@@ -446,16 +446,16 @@ cdef gpucontext *kernel_context(GpuKernel k) except NULL:
446
446
raise GpuArrayException, " Invalid kernel or destroyed context"
447
447
return res
448
448
449
- cdef int kernel_sched(GpuKernel k, size_t n, size_t * ls , size_t * gs ) except - 1 :
449
+ cdef int kernel_sched(GpuKernel k, size_t n, size_t * gs , size_t * ls ) except - 1 :
450
450
cdef int err
451
- err = GpuKernel_sched(& k.k, n, ls, gs )
451
+ err = GpuKernel_sched(& k.k, n, gs, ls )
452
452
if err != GA_NO_ERROR:
453
453
raise get_exc(err), kernel_error(k, err)
454
454
455
- cdef int kernel_call(GpuKernel k, unsigned int n, const size_t * ls ,
456
- const size_t * gs , size_t shared, void ** args) except - 1 :
455
+ cdef int kernel_call(GpuKernel k, unsigned int n, const size_t * gs ,
456
+ const size_t * ls , size_t shared, void ** args) except - 1 :
457
457
cdef int err
458
- err = GpuKernel_call(& k.k, n, ls, gs , shared, args)
458
+ err = GpuKernel_call(& k.k, n, gs, ls , shared, args)
459
459
if err != GA_NO_ERROR:
460
460
raise get_exc(err), kernel_error(k, err)
461
461
@@ -2113,10 +2113,10 @@ cdef class GpuKernel:
2113
2113
sure to test against the size of your data.
2114
2114
2115
2115
If you want more control over thread allocation you can use the
2116
- `ls ` and `gs ` parameters like so::
2116
+ `gs ` and `ls ` parameters like so::
2117
2117
2118
2118
k = GpuKernel(...)
2119
- k(param1, param2, ls=ls, gs=gs )
2119
+ k(param1, param2, gs=gs, ls=ls )
2120
2120
2121
2121
If you choose to use this interface, make sure to stay within the
2122
2122
limits of `k.maxlsize` and `ctx.maxgsize` or the call will fail.
@@ -2200,12 +2200,12 @@ cdef class GpuKernel:
2200
2200
finally :
2201
2201
free(_types)
2202
2202
2203
- def __call__ (self , *args , n = None , ls = None , gs = None , shared = 0 ):
2203
+ def __call__ (self , *args , n = None , gs = None , ls = None , shared = 0 ):
2204
2204
if n == None and (ls == None or gs == None ):
2205
2205
raise ValueError , " Must specify size (n) or both gs and ls"
2206
- self .do_call(n, ls, gs , args, shared)
2206
+ self .do_call(n, gs, ls , args, shared)
2207
2207
2208
- cdef do_call(self , py_n, py_ls, py_gs , py_args, size_t shared):
2208
+ cdef do_call(self , py_n, py_gs, py_ls , py_args, size_t shared):
2209
2209
cdef size_t n
2210
2210
cdef size_t gs[3 ]
2211
2211
cdef size_t ls[3 ]
@@ -2272,8 +2272,8 @@ cdef class GpuKernel:
2272
2272
if nd != 1 :
2273
2273
raise ValueError , " n is specified and nd != 1"
2274
2274
n = py_n
2275
- kernel_sched(self , n, & ls [0 ], & gs [0 ])
2276
- kernel_call(self , nd, ls, gs , shared, self .callbuf)
2275
+ kernel_sched(self , n, & gs [0 ], & ls [0 ])
2276
+ kernel_call(self , nd, gs, ls , shared, self .callbuf)
2277
2277
2278
2278
cdef _setarg(self , unsigned int index, int typecode, object o):
2279
2279
if typecode == GA_BUFFER:
0 commit comments