Skip to content
This repository was archived by the owner on Mar 20, 2023. It is now read-only.

Commit 29a7a96

Browse files
committed
Fixed issues with missing parenthesis
1 parent 5126c53 commit 29a7a96

7 files changed

Lines changed: 18 additions & 14 deletions

File tree

coreneuron/mechanism/capac.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ void nrn_jacob_capacitance(NrnThread* _nt, Memb_list* ml, int /* type */) {
7070
nrn_pragma_acc(parallel loop present(vdata [0:_cntml_padded * nparm],
7171
ni [0:_cntml_actual],
7272
_vec_d [0:_nt->end]) if (_nt->compute_gpu)
73-
async(_nt->streams[_nt->stream_id])
73+
async(_nt->streams[_nt->stream_id]))
7474
nrn_pragma_omp(target teams distribute parallel for simd if(_nt->compute_gpu))
7575
for (_iml = 0; _iml < _cntml_actual; _iml++) {
7676
_vec_d[ni[_iml]] += cfac * cm;
@@ -116,7 +116,7 @@ void nrn_cur_capacitance(NrnThread* _nt, Memb_list* ml, int /* type */) {
116116
nrn_pragma_acc(parallel loop present(vdata [0:_cntml_padded * nparm],
117117
ni [0:_cntml_actual],
118118
_vec_rhs [0:_nt->end]) if (_nt->compute_gpu)
119-
async(_nt->streams[_nt->stream_id])
119+
async(_nt->streams[_nt->stream_id]))
120120
nrn_pragma_omp(target teams distribute parallel for simd if(_nt->compute_gpu))
121121
for (int _iml = 0; _iml < _cntml_actual; _iml++) {
122122
i_cap = cfac * cm * _vec_rhs[ni[_iml]];

coreneuron/mechanism/eion.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -338,7 +338,7 @@ void second_order_cur(NrnThread* _nt, int secondorder) {
338338
nrn_pragma_acc(parallel loop present(pd [0:_cntml_padded * 5],
339339
ni [0:_cntml_actual],
340340
_vec_rhs [0:_nt->end]) if (_nt->compute_gpu)
341-
async(_nt->streams[_nt->stream_id])
341+
async(_nt->streams[_nt->stream_id]))
342342
nrn_pragma_omp(target teams distribute parallel for simd if(_nt->compute_gpu))
343343
for (int _iml = 0; _iml < _cntml_actual; ++_iml) {
344344
cur += dcurdv * (_vec_rhs[ni[_iml]]);

coreneuron/network/partrans.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -123,7 +123,7 @@ void nrnthread_v_transfer(NrnThread* _nt) {
123123
nrn_pragma_acc(parallel loop present(insrc_indices [0:ntar],
124124
tar_data [0:ndata],
125125
insrc_buf_ [0:n_insrc_buf]) if (_nt->compute_gpu)
126-
async(_nt->streams[_nt->stream_id])
126+
async(_nt->streams[_nt->stream_id]))
127127
nrn_pragma_omp(target teams distribute parallel for simd map(to: tar_indices[0:ntar]) if(_nt->compute_gpu))
128128
for (size_t i = 0; i < ntar; ++i) {
129129
tar_data[tar_indices[i]] = insrc_buf_[insrc_indices[i]];

coreneuron/sim/fadvance_core.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -206,14 +206,14 @@ void update(NrnThread* _nt) {
206206
/* do not need to worry about linmod or extracellular*/
207207
if (secondorder) {
208208
nrn_pragma_acc(parallel loop present(vec_v [0:i2], vec_rhs [0:i2]) if (_nt->compute_gpu)
209-
async(_nt->streams[_nt->stream_id])
209+
async(_nt->streams[_nt->stream_id]))
210210
nrn_pragma_omp(target teams distribute parallel for simd if(_nt->compute_gpu))
211211
for (int i = 0; i < i2; ++i) {
212212
vec_v[i] += 2. * vec_rhs[i];
213213
}
214214
} else {
215215
nrn_pragma_acc(parallel loop present(vec_v [0:i2], vec_rhs [0:i2]) if (_nt->compute_gpu)
216-
async(_nt->streams[_nt->stream_id])
216+
async(_nt->streams[_nt->stream_id]))
217217
nrn_pragma_omp(target teams distribute parallel for simd if(_nt->compute_gpu))
218218
for (int i = 0; i < i2; ++i) {
219219
vec_v[i] += vec_rhs[i];
@@ -319,7 +319,7 @@ void nrncore2nrn_send_values(NrnThread* nth) {
319319
async(nth->streams[nth->stream_id]))
320320
nrn_pragma_omp(target update from(gather_i [0:1]) if (nth->compute_gpu))
321321
}
322-
nrn_pragma_acc(wait(nth->streams[nth->stream_id)))
322+
nrn_pragma_acc(wait(nth->streams[nth->stream_id))
323323
nrn_pragma_omp(taskwait)
324324
for (int i = 0; i < tr->n_trajec; ++i) {
325325
*(tr->scatter[i]) = *(tr->gather[i]);

coreneuron/sim/multicore.cpp

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,10 @@
1515
#include "coreneuron/coreneuron.hpp"
1616
#include "coreneuron/utils/nrnoc_aux.hpp"
1717

18+
#if defined(_OPENMP)
19+
#include <omp.h>
20+
#endif
21+
1822
/*
1923
Now that threads have taken over the actual_v, v_node, etc, it might
2024
be a good time to regularize the method of freeing, allocating, and

coreneuron/sim/solve_core.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,7 @@ static void bksub(NrnThread* _nt) {
7575
}
7676

7777
if (_nt->compute_gpu) {
78-
nrn_pragma_acc(wait(_nt->streams[_nth->stream_id]))
78+
nrn_pragma_acc(wait(_nt->streams[_nt->stream_id]))
7979
}
8080
}
8181
} // namespace coreneuron

coreneuron/sim/treeset_core.cpp

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ static void nrn_rhs(NrnThread* _nt) {
3333
int* parent_index = _nt->_v_parent_index;
3434

3535
nrn_pragma_acc(parallel loop present(vec_rhs [0:i3], vec_d [0:i3]) if (_nt->compute_gpu)
36-
async(_nt->streams[_nt->stream_id])
36+
async(_nt->streams[_nt->stream_id]))
3737
nrn_pragma_omp(target teams distribute parallel for if(_nt->compute_gpu))
3838
for (int i = i1; i < i3; ++i) {
3939
vec_rhs[i] = 0.;
@@ -45,7 +45,7 @@ static void nrn_rhs(NrnThread* _nt) {
4545
double* fast_imem_rhs = _nt->nrn_fast_imem->nrn_sav_rhs;
4646
nrn_pragma_acc(
4747
parallel loop present(fast_imem_d [i1:i3], fast_imem_rhs [i1:i3]) if (_nt->compute_gpu)
48-
async(_nt->streams[_nt->stream_id])
48+
async(_nt->streams[_nt->stream_id]))
4949
nrn_pragma_omp(target teams distribute parallel for if(_nt->compute_gpu))
5050
for (int i = i1; i < i3; ++i) {
5151
fast_imem_d[i] = 0.;
@@ -75,7 +75,7 @@ static void nrn_rhs(NrnThread* _nt) {
7575
*/
7676
double* p = _nt->nrn_fast_imem->nrn_sav_rhs;
7777
nrn_pragma_acc(parallel loop present(p, vec_rhs) if (_nt->compute_gpu)
78-
async(_nt->streams[_nt->stream_id])
78+
async(_nt->streams[_nt->stream_id]))
7979
nrn_pragma_omp(target teams distribute parallel for if(_nt->compute_gpu))
8080
for (int i = i1; i < i3; ++i) {
8181
p[i] -= vec_rhs[i];
@@ -92,7 +92,7 @@ static void nrn_rhs(NrnThread* _nt) {
9292
vec_b [0:i3],
9393
vec_v [0:i3],
9494
parent_index [0:i3]) if (_nt->compute_gpu)
95-
async(_nt->streams[_nt->stream_id])
95+
async(_nt->streams[_nt->stream_id]))
9696
nrn_pragma_omp(target teams distribute parallel for if(_nt->compute_gpu))
9797
for (int i = i2; i < i3; ++i) {
9898
double dv = vec_v[parent_index[i]] - vec_v[i];
@@ -152,7 +152,7 @@ static void nrn_lhs(NrnThread* _nt) {
152152
so here we transform so it only has membrane current contribution
153153
*/
154154
double* p = _nt->nrn_fast_imem->nrn_sav_d;
155-
nrn_pragma_acc(parallel loop present(p, vec_d) if (_nt->compute_gpu) async(_nt->streams[_nt->stream_id])
155+
nrn_pragma_acc(parallel loop present(p, vec_d) if (_nt->compute_gpu) async(_nt->streams[_nt->stream_id]))
156156
nrn_pragma_omp(target teams distribute parallel for if(_nt->compute_gpu))
157157
for (int i = i1; i < i3; ++i) {
158158
p[i] += vec_d[i];
@@ -162,7 +162,7 @@ static void nrn_lhs(NrnThread* _nt) {
162162
/* now add the axial currents */
163163
nrn_pragma_acc(parallel loop present(
164164
vec_d [0:i3], vec_a [0:i3], vec_b [0:i3], parent_index [0:i3]) if (_nt->compute_gpu)
165-
async(_nt->streams[_nt->stream_id])
165+
async(_nt->streams[_nt->stream_id]))
166166
nrn_pragma_omp(target teams distribute parallel for if(_nt->compute_gpu))
167167
for (int i = i2; i < i3; ++i) {
168168
nrn_pragma_acc(atomic update)

0 commit comments

Comments
 (0)