@@ -1185,46 +1185,87 @@ end
11851185
11861186# ###########################################################################################
11871187
1188+ using Random
1189+
11881190@testset " random numbers" begin
11891191
11901192n = 256
11911193
1192- @testset " basic" begin
1193- function kernel (A:: CuDeviceArray{T} , B:: CuDeviceArray{T} ) where {T}
1194- tid = threadIdx (). x
1195- A[tid] = rand (T)
1196- B[tid] = rand (T)
1197- return nothing
1194+ @testset " basic rand($T ), seed $seed " for T in (Int32, UInt32, Int64, UInt64, Int128, UInt128,
1195+ Float32, Float64),
1196+ seed in (nothing , #= missing,=# 1234 )
1197+
1198+ function apply_seed (seed)
1199+ if seed === missing
1200+ # should result in different numbers across launches
1201+ Random. seed! ()
1202+ # XXX : this currently doesn't work, because of the definition in Base,
1203+ # `seed!(r::MersenneTwister=default_rng())`, which breaks overriding
1204+ # `default_rng` with a non-MersenneTwister RNG.
1205+ elseif seed != = nothing
1206+ # should result in the same numbers
1207+ Random. seed! (seed)
1208+ elseif seed === nothing
1209+ # should result in different numbers across launches,
1210+ # as determined by the seed set during module loading.
1211+ end
11981212 end
11991213
1200- @testset for T in (Int32, UInt32, Int64, UInt64, Int128, UInt128,
1201- Float32, Float64)
1214+ # different kernel invocations should get different numbers
1215+ @testset " across launches" begin
1216+ function kernel (A:: AbstractArray{T} , seed) where {T}
1217+ apply_seed (seed)
1218+ tid = threadIdx (). x
1219+ A[tid] = rand (T)
1220+ return nothing
1221+ end
1222+
12021223 a = CUDA. zeros (T, n)
12031224 b = CUDA. zeros (T, n)
12041225
1205- @cuda threads= n kernel (a, b)
1226+ @cuda threads= n kernel (a, seed)
1227+ @cuda threads= n kernel (b, seed)
12061228
1207- @test all (Array (a) .!= Array (b))
1229+ if seed === nothing || seed === missing
1230+ @test all (Array (a) .!= Array (b))
1231+ else
1232+ @test Array (a) == Array (b)
1233+ end
12081234 end
1209- end
12101235
1211- @testset " custom seed" begin
1212- function kernel (A:: CuDeviceArray{T} ) where {T}
1213- tid = threadIdx (). x
1214- Random. seed! (1234 )
1215- A[tid] = rand (T)
1216- return nothing
1217- end
1236+ # multiple calls to rand should get different numbers
1237+ @testset " across calls" begin
1238+ function kernel (A:: AbstractArray{T} , B:: AbstractArray{T} , seed) where {T}
1239+ apply_seed (seed)
1240+ tid = threadIdx (). x
1241+ A[tid] = rand (T)
1242+ B[tid] = rand (T)
1243+ return nothing
1244+ end
12181245
1219- @testset for T in (Int32, UInt32, Int64, UInt64, Int128, UInt128,
1220- Float32, Float64)
12211246 a = CUDA. zeros (T, n)
12221247 b = CUDA. zeros (T, n)
12231248
1224- @cuda threads= n kernel (a)
1225- @cuda threads= n kernel (b)
1249+ @cuda threads= n kernel (a, b, seed)
1250+
1251+ @test all (Array (a) .!= Array (b))
1252+ end
1253+
1254+ # different threads should get different numbers
1255+ @testset " across threads" for active_dim in 1 : 6
1256+ function kernel (A:: AbstractArray{T} , seed) where {T}
1257+ apply_seed (seed)
1258+ id = threadIdx (). x* threadIdx (). y* threadIdx (). z* blockIdx (). x* blockIdx (). y* blockIdx (). z
1259+ A[id] = rand (T)
1260+ return nothing
1261+ end
1262+
1263+ tx, ty, tz, bx, by, bz = [dim == active_dim ? 2 : 1 for dim in 1 : 6 ]
1264+ a = CUDA. zeros (T, 2 )
1265+
1266+ @cuda threads= (tx, ty, tz) blocks= (bx, by, bz) kernel (a, seed)
12261267
1227- @test Array (a) == Array (b)
1268+ @test Array (a)[ 1 ] != Array (a)[ 2 ]
12281269 end
12291270end
12301271
0 commit comments