diff --git a/examples/README.md b/examples/README.md index caf1c065..e8c908c4 100644 --- a/examples/README.md +++ b/examples/README.md @@ -8,3 +8,6 @@ If you are new to Exo, we recommend going through the examples in the following 2. [Cursor](./cursors/README.md): This example shows how to use Cursors to efficiently write schedules and define a new scheduling operator. 3. [RVM](./rvm_conv1d/README.md): This example illustrates how to use Exo to define and target a new hardware accelerator entirely in the user code. + +4. Quizzes ([quiz1](./quiz1/README.md), [quiz2](./quiz2/README.md), [quiz3](./quiz3/README.md)) contain common scheduling mistakes in Exo and solutions to fix them. The best way to learn a programming language is by debugging code. + diff --git a/examples/quiz1/.gitignore b/examples/quiz1/.gitignore new file mode 100644 index 00000000..d10776ef --- /dev/null +++ b/examples/quiz1/.gitignore @@ -0,0 +1 @@ +quiz1/ diff --git a/examples/quiz1/README.md b/examples/quiz1/README.md new file mode 100644 index 00000000..b233a9b3 --- /dev/null +++ b/examples/quiz1/README.md @@ -0,0 +1,59 @@ +# Quiz 1 + +Throughout the quiz, we provide incorrect code and the correct output as a reference. Your goal is to understand the code and fix the bug to match the correct output! + +You can execute `quiz1.py` by running `exocc quiz1.py`. Without modification, it will show the incorrect output. + +## Incorrect Output + +The following output is incorrect because it does not make calls to vector intrinsics. While it matches the structure of SIMD vector code, it is still being executed one element at a time: + +```python +def double(N: size, inp: f32[N] @ DRAM, out: f32[N] @ DRAM): + assert N % 8 == 0 + two_vec: R[8] @ DRAM + for ii in seq(0, 8): + two_vec[ii] = 2.0 + for io in seq(0, N / 8): + out_vec: f32[8] @ DRAM + inp_vec: f32[8] @ DRAM + for i0 in seq(0, 8): + inp_vec[i0] = inp[i0 + 8 * io] + for ii in seq(0, 8): + out_vec[ii] = two_vec[ii] * inp_vec[ii] + for i0 in seq(0, 8): + out[i0 + 8 * io] = out_vec[i0] +``` + +## Correct Output + +The correct output optimizes the function to use vectorized arithmetic operations to compute the result over the entire array: + +```python +def double(N: size, inp: f32[N] @ DRAM, out: f32[N] @ DRAM): + assert N % 8 == 0 + two_vec: R[8] @ AVX2 + vector_assign_two(two_vec[0:8]) + for io in seq(0, N / 8): + out_vec: f32[8] @ AVX2 + inp_vec: f32[8] @ AVX2 + vector_load(inp_vec[0:8], inp[8 * io + 0:8 * io + 8]) + vector_multiply(out_vec[0:8], two_vec[0:8], inp_vec[0:8]) + vector_store(out[8 * io + 0:8 * io + 8], out_vec[0:8]) +``` + +--- + +## Solution + +Before calling `replace_all(p, avx_instrs)`, you need to set buffer memory annotations to AVX2, because `replace_all` is memory-aware and will only replace code chunks with instructions that have matching memory annotations. + +Add the following code before the call to `replace_all`: + +```python + # Set the memory types to be AVX2 vectors + for name in ["two", "out", "inp"]: + p = set_memory(p, f"{name}_vec", AVX2) +``` + +This will ensure that the memory annotations are correctly set to AVX2 before replacing the code with vector intrinsics. diff --git a/examples/quiz1/quiz1.py b/examples/quiz1/quiz1.py new file mode 100644 index 00000000..d8475c60 --- /dev/null +++ b/examples/quiz1/quiz1.py @@ -0,0 +1,84 @@ +from __future__ import annotations + +from exo import * +from exo.libs.memories import AVX2 +from exo.stdlib.scheduling import * + + +@instr("{dst_data} = _mm256_loadu_ps(&{src_data});") +def vector_load(dst: [f32][8] @ AVX2, src: [f32][8] @ DRAM): + assert stride(src, 0) == 1 + assert stride(dst, 0) == 1 + + for i in seq(0, 8): + dst[i] = src[i] + + +@instr("_mm256_storeu_ps(&{dst_data}, {src_data});") +def vector_store(dst: [f32][8] @ DRAM, src: [f32][8] @ AVX2): + assert stride(src, 0) == 1 + assert stride(dst, 0) == 1 + + for i in seq(0, 8): + dst[i] = src[i] + + +@instr("{out_data} = _mm256_mul_ps({x_data}, {y_data});") +def vector_multiply(out: [f32][8] @ AVX2, x: [f32][8] @ AVX2, y: [f32][8] @ AVX2): + assert stride(out, 0) == 1 + assert stride(x, 0) == 1 + assert stride(y, 0) == 1 + + for i in seq(0, 8): + out[i] = x[i] * y[i] + + +@instr("{out_data} = _mm256_broadcast_ss(2.0);") +def vector_assign_two(out: [f32][8] @ AVX2): + assert stride(out, 0) == 1 + + for i in seq(0, 8): + out[i] = 2.0 + + +@proc +def vec_double(N: size, inp: f32[N], out: f32[N]): + assert N % 8 == 0 + for i in seq(0, N): + out[i] = 2.0 * inp[i] + + +def wrong_schedule(p): + """ + Forgot to set the memory types to be AVX2 vectors, so replace instruction + does not work as intended. + """ + p = rename(p, "vec_double_optimized") + p = divide_loop(p, "i", 8, ["io", "ii"], perfect=True) + + # Create a vector of twos + p = bind_expr(p, "2.0", "two_vec") + two_alloc = p.find("two_vec: _") + two_assign = p.find("two_vec = _") + p = expand_dim(p, two_alloc, 8, "ii") + + # Hoist the allocation and assignment of two vector + p = lift_alloc(p, two_alloc, 2) + p = fission(p, two_assign.after(), 2) + p = remove_loop(p, two_assign.parent().parent()) + + # Create vectors for the input and output values + innermost_loop = p.find_loop("ii #1") + p = stage_mem(p, innermost_loop, "out[8*io:8*io+8]", "out_vec") + p = stage_mem(p, innermost_loop, "inp[8*io:8*io+8]", "inp_vec") + p = simplify(p) + + # Replace with AVX instructinos + avx_instrs = [vector_assign_two, vector_multiply, vector_load, vector_store] + p = replace_all(p, avx_instrs) + + return p + + +w = wrong_schedule(vec_double) +print(w) diff --git a/examples/quiz2/.gitignore b/examples/quiz2/.gitignore new file mode 100644 index 00000000..99d0c64f --- /dev/null +++ b/examples/quiz2/.gitignore @@ -0,0 +1 @@ +quiz2/ diff --git a/examples/quiz2/README.md b/examples/quiz2/README.md new file mode 100644 index 00000000..c0bf51e2 --- /dev/null +++ b/examples/quiz2/README.md @@ -0,0 +1,101 @@ +# Quiz2! + +This quiz is about loop fission bugs and debugging via printing cursors. + +## Incorrect output (compiler error) +As written, the schedule has a bug which attempts to incorrectly fission a loop. +```bash +Traceback (most recent call last): + File "/home/yuka/.local/bin/exocc", line 8, in + sys.exit(main()) + File "/home/yuka/.local/lib/python3.9/site-packages/exo/main.py", line 55, in main + library = [ + File "/home/yuka/.local/lib/python3.9/site-packages/exo/main.py", line 58, in + for proc in get_procs_from_module(load_user_code(mod)) + File "/home/yuka/.local/lib/python3.9/site-packages/exo/main.py", line 107, in load_user_code + loader.exec_module(user_module) + File "", line 790, in exec_module + File "", line 228, in _call_with_frames_removed + File "/home/yuka/exo/examples/quiz2/quiz2.py", line 42, in + w = wrong_schedule(scaled_add) + File "/home/yuka/exo/examples/quiz2/quiz2.py", line 38, in wrong_schedule + p = fission(p, vector_assign.after()) + File "/home/yuka/.local/lib/python3.9/site-packages/exo/API_scheduling.py", line 100, in __call__ + return self.func(*bound_args.args, **bound_args.kwargs) + File "/home/yuka/.local/lib/python3.9/site-packages/exo/API_scheduling.py", line 2066, in fission + ir, fwd = scheduling.DoFissionAfterSimple( + File "/home/yuka/.local/lib/python3.9/site-packages/exo/rewrite/LoopIR_scheduling.py", line 2385, in DoFissionAfterSimple + alloc_check(pre, post) + File "/home/yuka/.local/lib/python3.9/site-packages/exo/rewrite/LoopIR_scheduling.py", line 2352, in alloc_check + raise SchedulingError( +exo.rewrite.new_eff.SchedulingError: <<>>: Will not fission here, because doing so will hide the allocation of vec from a later use site. +``` + +## Correct Output +The correct output will divide the computation into individual, vectorizable loops. +```python +def scaled_add_scheduled(N: size, a: f32[N] @ DRAM, b: f32[N] @ DRAM, + c: f32[N] @ DRAM): + assert N % 8 == 0 + for io in seq(0, N / 8): + vec: R[8] @ DRAM + vec_1: R[8] @ DRAM + vec_2: f32[8] @ DRAM + vec_3: R[8] @ DRAM + vec_4: R[8] @ DRAM + vec_5: f32[8] @ DRAM + for ii in seq(0, 8): + vec_1[ii] = 2 + for ii in seq(0, 8): + vec_2[ii] = a[8 * io + ii] + for ii in seq(0, 8): + vec[ii] = vec_1[ii] * vec_2[ii] + for ii in seq(0, 8): + vec_4[ii] = 3 + for ii in seq(0, 8): + vec_5[ii] = b[8 * io + ii] + for ii in seq(0, 8): + vec_3[ii] = vec_4[ii] * vec_5[ii] + for ii in seq(0, 8): + c[8 * io + ii] = vec[ii] + vec_3[ii] +``` + +--- + +## Solution + +To understand the bug, let's first try printing right before the error. Add the following line after line 37: +```python +print(vector_assign.after()) +``` + +This will output: +```python + for io in seq(0, N / 8): + vec: R[8] @ DRAM + for ii in seq(0, 8): + vec_1: R @ DRAM + vec_1 = 2 + [GAP - After] + ... +``` + +The code is attempting to perform fission at the `[GAP - After]` location. +However, this is unsafe because the `vec_1: R` allocation is within the `ii` loop and before the fission point. +If `vec_1` is used after the fission point, the code will no longer be a valid Exo. + +To fix this issue, modify the code as follows: + +```python + for i in range(num_vectors): + vector_reg = p.find(f"vec: _ #{i}") + p = expand_dim(p, vector_reg, 8, "ii") + p = lift_alloc(p, vector_reg) + + for i in range(num_vectors): + vector_assign = p.find(f"vec = _ #{i}") + p = fission(p, vector_assign.after()) +``` + +By separating the allocation lifting and fission operations into two separate loops, you ensure that all the allocations are lifted out of the loop before performing fission. This resolves the issue of unsafe fission due to the allocation being within the loop. + diff --git a/examples/quiz2/quiz2.py b/examples/quiz2/quiz2.py new file mode 100644 index 00000000..f2a8c379 --- /dev/null +++ b/examples/quiz2/quiz2.py @@ -0,0 +1,46 @@ +from __future__ import annotations + +from exo import * +from exo.stdlib.scheduling import * + + +@proc +def scaled_add(N: size, a: f32[N], b: f32[N], c: f32[N]): + assert N % 8 == 0 + for i in seq(0, N): + c[i] = 2 * a[i] + 3 * b[i] + + +def stage_exprs(p, num_vectors, assign): + if isinstance(assign.rhs(), BinaryOpCursor): + p = bind_expr(p, assign.rhs().lhs(), "vec") + num_vectors += 1 + p, num_vectors = stage_exprs(p, num_vectors, p.forward(assign).prev()) + + p = bind_expr(p, assign.rhs().rhs(), "vec") + num_vectors += 1 + p, num_vectors = stage_exprs(p, num_vectors, p.forward(assign).prev()) + return p, num_vectors + + +def wrong_schedule(p): + p = rename(p, "scaled_add_scheduled") + num_vectors = 0 + + p = divide_loop(p, "i", 8, ["io", "ii"], perfect=True) + + p, num_vectors = stage_exprs(p, num_vectors, p.find("c[_] = _")) + + for i in range(num_vectors): + vector_reg = p.find(f"vec: _ #{i}") + p = expand_dim(p, vector_reg, 8, "ii") + p = lift_alloc(p, vector_reg) + + vector_assign = p.find(f"vec = _ #{i}") + p = fission(p, vector_assign.after()) + + return p + + +w = wrong_schedule(scaled_add) +print(w) diff --git a/examples/quiz3/.gitignore b/examples/quiz3/.gitignore new file mode 100644 index 00000000..9f05cb89 --- /dev/null +++ b/examples/quiz3/.gitignore @@ -0,0 +1 @@ +quiz3/ diff --git a/examples/quiz3/README.md b/examples/quiz3/README.md new file mode 100644 index 00000000..82fc54b2 --- /dev/null +++ b/examples/quiz3/README.md @@ -0,0 +1,89 @@ +# Quiz3!! + +This quiz explores fixing subtle cursor navigation bugs. + +## Correct Output +This code makes the optimization of shrinking the `blur_x` memory allocation from (H+2, W) to (34, 256). Since the code has been tiled, we don't need to store the entire intermediate `blur_x` buffer in memory. Instead, we can just reuse the same intermediate buffer for each tile. + +To do so, the schedule tries to sink the allocation within the tile, reduce the memory size to the bare minimum necessary for computing that tile, and then lift the allocation back up to the top level scope. +```python +def tile_and_fused_blur(W: size, H: size, blur_y: ui16[H, W] @ DRAM, + inp: ui16[H + 2, W + 2] @ DRAM): + assert H % 32 == 0 + assert W % 256 == 0 + blur_x: ui16[34, 256] @ DRAM + for yo in seq(0, H / 32): + for xo in seq(0, W / 256): + for yi in seq(0, 34): + for xi in seq(0, 256): + blur_x[yi + 32 * yo - 32 * yo, xi + 256 * xo - 256 * + xo] = (inp[yi + 32 * yo, xi + 256 * xo] + + inp[yi + 32 * yo, 1 + xi + 256 * xo] + + inp[yi + 32 * yo, 2 + xi + 256 * xo]) / 3.0 + for yi in seq(0, 32): + for xi in seq(0, 256): + blur_y[yi + 32 * yo, xi + + 256 * xo] = (blur_x[yi + 32 * yo - 32 * yo, + xi + 256 * xo - 256 * xo] + + blur_x[1 + yi + 32 * yo - 32 * yo, + xi + 256 * xo - 256 * xo] + + blur_x[2 + yi + 32 * yo - 32 * yo, + xi + 256 * xo - 256 * xo]) / 3.0 +``` + +## Incorrect Output +This output is partially correct: it manages to reduce the height dimension from `H+2` to `34`. However, it fails to reduce the memory usage in the width direction. +```python +def tile_and_fused_blur(W: size, H: size, blur_y: ui16[H, W] @ DRAM, + inp: ui16[H + 2, W + 2] @ DRAM): + assert H % 32 == 0 + assert W % 256 == 0 + blur_x: ui16[34, W] @ DRAM + for yo in seq(0, H / 32): + for xo in seq(0, W / 256): + for yi in seq(0, 34): + for xi in seq(0, 256): + blur_x[yi + 32 * yo - 32 * yo, xi + 256 * + xo] = (inp[yi + 32 * yo, xi + 256 * xo] + + inp[yi + 32 * yo, 1 + xi + 256 * xo] + + inp[yi + 32 * yo, 2 + xi + 256 * xo]) / 3.0 + for yi in seq(0, 32): + for xi in seq(0, 256): + blur_y[yi + 32 * yo, xi + 256 * xo] = ( + blur_x[yi + 32 * yo - 32 * yo, xi + 256 * xo] + + blur_x[1 + yi + 32 * yo - 32 * yo, xi + 256 * xo] + + blur_x[2 + yi + 32 * yo - 32 * yo, + xi + 256 * xo]) / 3.0 +``` + +--- + +## Solution + +To understand the bug, let's insert print statements in these places: + +```python +print(xo_loop) +loops_to_lower_allocation_into = get_loops_at_or_above(xo_loop) +for i, loop in enumerate(loops_to_lower_allocation_into): + print(i, loop) + ... +``` + +The `xo_loop` points to: +```python +for yo in seq(0, H / 32): + for xo in seq(0, W / 256): # <-- NODE + ... +``` + +And the first (and only) iteration of the `loop` points to: +```python +for yo in seq(0, H / 32): # <-- NODE + for xo in seq(0, W / 256): + ... +``` + +This reveals that the implementation of `get_loops_at_or_above` has a bug because it only contains "loops above" the `xo_loop` (which is `yo` loop), not including the `xo_loop` itself. + +To fix this bug, change `loops = []` to `loops = [cursor]` in the implementation of `get_loops_at_or_above`. diff --git a/examples/quiz3/quiz3.py b/examples/quiz3/quiz3.py new file mode 100644 index 00000000..ef45174b --- /dev/null +++ b/examples/quiz3/quiz3.py @@ -0,0 +1,74 @@ +from __future__ import annotations + +from exo import * +from exo.stdlib.scheduling import * + + +@proc +def tile_and_fused_blur( + W: size, H: size, blur_y: ui16[H, W] @ DRAM, inp: ui16[H + 2, W + 2] @ DRAM +): + assert H % 32 == 0 + assert W % 256 == 0 + blur_x: ui16[2 + H, W] @ DRAM + for yo in seq(0, H / 32): + for xo in seq(0, W / 256): + for yi in seq(0, 34): + for xi in seq(0, 256): + blur_x[yi + 32 * yo, xi + 256 * xo] = ( + inp[yi + 32 * yo, xi + 256 * xo] + + inp[yi + 32 * yo, 1 + xi + 256 * xo] + + inp[yi + 32 * yo, 2 + xi + 256 * xo] + ) / 3.0 + for yi in seq(0, 32): + for xi in seq(0, 256): + blur_y[yi + 32 * yo, xi + 256 * xo] = ( + blur_x[yi + 32 * yo, xi + 256 * xo] + + blur_x[1 + yi + 32 * yo, xi + 256 * xo] + + blur_x[2 + yi + 32 * yo, xi + 256 * xo] + ) / 3.0 + + +def get_loops_at_or_above(cursor): + loops = [] + while not isinstance((parent := cursor.parent()), InvalidCursor): + loops.append(parent) + cursor = parent + return list(reversed(loops)) + + +def wrong_schedule(p): + """ + Incorrect function get_loops_at_or_above is missing the initial loop + when initiating the loops array + """ + + p = rename(p, "tile_and_fused_blur_scheduled") + xo_loop = p.find_loop("xo") + producer_alloc = p.find("blur_x : _") + + # each output depends on 3 rows of blur_x, so computing a 32x256 subarray + # of output requires a 34x256 subarray of blur_x. + tile_size = [32, 256] + blur_x_tile_size = [34, 256] + + loops_to_lower_allocation_into = get_loops_at_or_above(xo_loop) + for i, loop in enumerate(loops_to_lower_allocation_into): + # Forward cursors before using + loop = p.forward(loop) + producer_alloc = p.forward(producer_alloc) + + # Sink the blur_x allocation into the next for loop + p = sink_alloc(p, producer_alloc) + + # Shrink blur_x size accordingly + offset_expr = f"{tile_size[i]} * {loop.name()}" + p = resize_dim(p, producer_alloc, i, blur_x_tile_size[i], offset_expr) + + p = lift_alloc(p, producer_alloc, 1) + + return p + + +w = wrong_schedule(tile_and_fused_blur) +print(w) diff --git a/tests/golden/test_examples/test_avx2_matmul.txt b/tests/golden/test_examples/test_avx2_matmul.txt new file mode 100644 index 00000000..2e16ce25 --- /dev/null +++ b/tests/golden/test_examples/test_avx2_matmul.txt @@ -0,0 +1,144 @@ + +#pragma once +#ifndef TEST_CASE_H +#define TEST_CASE_H + +#ifdef __cplusplus +extern "C" { +#endif + + +#include +#include + +// Compiler feature macros adapted from Hedley (public domain) +// https://github.com/nemequ/hedley + +#if defined(__has_builtin) +# define EXO_HAS_BUILTIN(builtin) __has_builtin(builtin) +#else +# define EXO_HAS_BUILTIN(builtin) (0) +#endif + +#if EXO_HAS_BUILTIN(__builtin_assume) +# define EXO_ASSUME(expr) __builtin_assume(expr) +#elif EXO_HAS_BUILTIN(__builtin_unreachable) +# define EXO_ASSUME(expr) \ + ((void)((expr) ? 1 : (__builtin_unreachable(), 1))) +#else +# define EXO_ASSUME(expr) ((void)(expr)) +#endif + + +#ifndef EXO_WIN_1F32 +#define EXO_WIN_1F32 +struct exo_win_1f32{ + float * const data; + const int_fast32_t strides[1]; +}; +#endif +#ifndef EXO_WIN_1F32C +#define EXO_WIN_1F32C +struct exo_win_1f32c{ + const float * const data; + const int_fast32_t strides[1]; +}; +#endif +// rank_k_reduce_6x16( +// K : size, +// A : f32[6, K] @DRAM, +// B : f32[K, 16] @DRAM, +// C : f32[6, 16] @DRAM +// ) +void rank_k_reduce_6x16( void *ctxt, int_fast32_t K, const float* A, const float* B, float* C ); + +// rank_k_reduce_6x16_scheduled( +// K : size, +// A : f32[6, K] @DRAM, +// B : f32[K, 16] @DRAM, +// C : f32[6, 16] @DRAM +// ) +void rank_k_reduce_6x16_scheduled( void *ctxt, int_fast32_t K, const float* A, const float* B, float* C ); + + + +#ifdef __cplusplus +} +#endif +#endif // TEST_CASE_H + +#include "test_case.h" + +#include +#include +#include + + +/* relying on the following instruction..." +mm256_broadcast_ss(out,val) +{out_data} = _mm256_broadcast_ss(&{val_data}); +*/ + +/* relying on the following instruction..." +mm256_fmadd_ps(dst,src1,src2) +{dst_data} = _mm256_fmadd_ps({src1_data}, {src2_data}, {dst_data}); +*/ + +/* relying on the following instruction..." +mm256_loadu_ps(dst,src) +{dst_data} = _mm256_loadu_ps(&{src_data}); +*/ + +/* relying on the following instruction..." +mm256_storeu_ps(dst,src) +_mm256_storeu_ps(&{dst_data}, {src_data}); +*/ +// rank_k_reduce_6x16( +// K : size, +// A : f32[6, K] @DRAM, +// B : f32[K, 16] @DRAM, +// C : f32[6, 16] @DRAM +// ) +void rank_k_reduce_6x16( void *ctxt, int_fast32_t K, const float* A, const float* B, float* C ) { +for (int_fast32_t i = 0; i < 6; i++) { + for (int_fast32_t j = 0; j < 16; j++) { + for (int_fast32_t k = 0; k < K; k++) { + C[i * 16 + j] += A[i * K + k] * B[k * 16 + j]; + } + } +} +} + +// rank_k_reduce_6x16_scheduled( +// K : size, +// A : f32[6, K] @DRAM, +// B : f32[K, 16] @DRAM, +// C : f32[6, 16] @DRAM +// ) +void rank_k_reduce_6x16_scheduled( void *ctxt, int_fast32_t K, const float* A, const float* B, float* C ) { +__m256 C_reg[6][2]; +for (int_fast32_t i0 = 0; i0 < 6; i0++) { + for (int_fast32_t i2 = 0; i2 < 2; i2++) { + C_reg[i0][i2] = _mm256_loadu_ps(&C[(i0) * (16) + 8 * i2]); + } +} +for (int_fast32_t k = 0; k < K; k++) { + __m256 B_reg[2]; + for (int_fast32_t io = 0; io < 2; io++) { + B_reg[io] = _mm256_loadu_ps(&B[(k) * (16) + 8 * io]); + } + for (int_fast32_t i = 0; i < 6; i++) { + __m256 A_reg; + A_reg = _mm256_broadcast_ss(&A[(i) * K + k]); + for (int_fast32_t jo = 0; jo < 2; jo++) { + C_reg[i][jo] = _mm256_fmadd_ps(A_reg, B_reg[jo], C_reg[i][jo]); + } + } +} +for (int_fast32_t i0 = 0; i0 < 6; i0++) { + for (int_fast32_t i2 = 0; i2 < 2; i2++) { + _mm256_storeu_ps(&C[(i0) * (16) + 8 * i2], C_reg[i0][i2]); + } +} +} + diff --git a/tests/golden/test_examples/test_cursors.txt b/tests/golden/test_examples/test_cursors.txt new file mode 100644 index 00000000..521ebb6b --- /dev/null +++ b/tests/golden/test_examples/test_cursors.txt @@ -0,0 +1,75 @@ + +#pragma once +#ifndef TEST_CASE_H +#define TEST_CASE_H + +#ifdef __cplusplus +extern "C" { +#endif + + +#include +#include + +// Compiler feature macros adapted from Hedley (public domain) +// https://github.com/nemequ/hedley + +#if defined(__has_builtin) +# define EXO_HAS_BUILTIN(builtin) __has_builtin(builtin) +#else +# define EXO_HAS_BUILTIN(builtin) (0) +#endif + +#if EXO_HAS_BUILTIN(__builtin_assume) +# define EXO_ASSUME(expr) __builtin_assume(expr) +#elif EXO_HAS_BUILTIN(__builtin_unreachable) +# define EXO_ASSUME(expr) \ + ((void)((expr) ? 1 : (__builtin_unreachable(), 1))) +#else +# define EXO_ASSUME(expr) ((void)(expr)) +#endif + + + +// gemv( +// M : size, +// N : size, +// A : f32[M, N] @DRAM, +// x : f32[N] @DRAM, +// y : f32[M] @DRAM +// ) +void gemv( void *ctxt, int_fast32_t M, int_fast32_t N, const float* A, const float* x, float* y ); + + + +#ifdef __cplusplus +} +#endif +#endif // TEST_CASE_H + +#include "test_case.h" + +#include +#include + +// gemv( +// M : size, +// N : size, +// A : f32[M, N] @DRAM, +// x : f32[N] @DRAM, +// y : f32[M] @DRAM +// ) +void gemv( void *ctxt, int_fast32_t M, int_fast32_t N, const float* A, const float* x, float* y ) { +EXO_ASSUME(M % 8 == 0); +EXO_ASSUME(N % 8 == 0); +for (int_fast32_t io = 0; io < ((M) / (8)); io++) { + for (int_fast32_t jo = 0; jo < ((N) / (8)); jo++) { + for (int_fast32_t ii = 0; ii < 8; ii++) { + for (int_fast32_t ji = 0; ji < 8; ji++) { + y[8 * io + ii] += A[(8 * io + ii) * N + 8 * jo + ji] * x[8 * jo + ji]; + } + } + } +} +} + diff --git a/tests/golden/test_examples/test_quiz1.txt b/tests/golden/test_examples/test_quiz1.txt new file mode 100644 index 00000000..27e523b1 --- /dev/null +++ b/tests/golden/test_examples/test_quiz1.txt @@ -0,0 +1,100 @@ + +#pragma once +#ifndef TEST_CASE_H +#define TEST_CASE_H + +#ifdef __cplusplus +extern "C" { +#endif + + +#include +#include + +// Compiler feature macros adapted from Hedley (public domain) +// https://github.com/nemequ/hedley + +#if defined(__has_builtin) +# define EXO_HAS_BUILTIN(builtin) __has_builtin(builtin) +#else +# define EXO_HAS_BUILTIN(builtin) (0) +#endif + +#if EXO_HAS_BUILTIN(__builtin_assume) +# define EXO_ASSUME(expr) __builtin_assume(expr) +#elif EXO_HAS_BUILTIN(__builtin_unreachable) +# define EXO_ASSUME(expr) \ + ((void)((expr) ? 1 : (__builtin_unreachable(), 1))) +#else +# define EXO_ASSUME(expr) ((void)(expr)) +#endif + + + +// vec_double( +// N : size, +// inp : f32[N] @DRAM, +// out : f32[N] @DRAM +// ) +void vec_double( void *ctxt, int_fast32_t N, const float* inp, float* out ); + +// vec_double_optimized( +// N : size, +// inp : f32[N] @DRAM, +// out : f32[N] @DRAM +// ) +void vec_double_optimized( void *ctxt, int_fast32_t N, const float* inp, float* out ); + + + +#ifdef __cplusplus +} +#endif +#endif // TEST_CASE_H + +#include "test_case.h" + +#include +#include + +// vec_double( +// N : size, +// inp : f32[N] @DRAM, +// out : f32[N] @DRAM +// ) +void vec_double( void *ctxt, int_fast32_t N, const float* inp, float* out ) { +EXO_ASSUME(N % 8 == 0); +for (int_fast32_t i = 0; i < N; i++) { + out[i] = 2.0f * inp[i]; +} +} + +// vec_double_optimized( +// N : size, +// inp : f32[N] @DRAM, +// out : f32[N] @DRAM +// ) +void vec_double_optimized( void *ctxt, int_fast32_t N, const float* inp, float* out ) { +EXO_ASSUME(N % 8 == 0); +float *two_vec = (float*) malloc(8 * sizeof(*two_vec)); +for (int_fast32_t ii = 0; ii < 8; ii++) { + two_vec[ii] = 2.0f; +} +for (int_fast32_t io = 0; io < ((N) / (8)); io++) { + float *out_vec = (float*) malloc(8 * sizeof(*out_vec)); + float *inp_vec = (float*) malloc(8 * sizeof(*inp_vec)); + for (int_fast32_t i0 = 0; i0 < 8; i0++) { + inp_vec[i0] = inp[i0 + 8 * io]; + } + for (int_fast32_t ii = 0; ii < 8; ii++) { + out_vec[ii] = two_vec[ii] * inp_vec[ii]; + } + free(inp_vec); + for (int_fast32_t i0 = 0; i0 < 8; i0++) { + out[i0 + 8 * io] = out_vec[i0]; + } + free(out_vec); +} +free(two_vec); +} + diff --git a/tests/golden/test_examples/test_quiz3.txt b/tests/golden/test_examples/test_quiz3.txt new file mode 100644 index 00000000..78f57121 --- /dev/null +++ b/tests/golden/test_examples/test_quiz3.txt @@ -0,0 +1,115 @@ + +#pragma once +#ifndef TEST_CASE_H +#define TEST_CASE_H + +#ifdef __cplusplus +extern "C" { +#endif + + +#include +#include + +// Compiler feature macros adapted from Hedley (public domain) +// https://github.com/nemequ/hedley + +#if defined(__has_builtin) +# define EXO_HAS_BUILTIN(builtin) __has_builtin(builtin) +#else +# define EXO_HAS_BUILTIN(builtin) (0) +#endif + +#if EXO_HAS_BUILTIN(__builtin_assume) +# define EXO_ASSUME(expr) __builtin_assume(expr) +#elif EXO_HAS_BUILTIN(__builtin_unreachable) +# define EXO_ASSUME(expr) \ + ((void)((expr) ? 1 : (__builtin_unreachable(), 1))) +#else +# define EXO_ASSUME(expr) ((void)(expr)) +#endif + + + +// tile_and_fused_blur( +// W : size, +// H : size, +// blur_y : ui16[H, W] @DRAM, +// inp : ui16[H + 2, W + 2] @DRAM +// ) +void tile_and_fused_blur( void *ctxt, int_fast32_t W, int_fast32_t H, uint16_t* blur_y, const uint16_t* inp ); + +// tile_and_fused_blur_scheduled( +// W : size, +// H : size, +// blur_y : ui16[H, W] @DRAM, +// inp : ui16[H + 2, W + 2] @DRAM +// ) +void tile_and_fused_blur_scheduled( void *ctxt, int_fast32_t W, int_fast32_t H, uint16_t* blur_y, const uint16_t* inp ); + + + +#ifdef __cplusplus +} +#endif +#endif // TEST_CASE_H + +#include "test_case.h" + +#include +#include + +// tile_and_fused_blur( +// W : size, +// H : size, +// blur_y : ui16[H, W] @DRAM, +// inp : ui16[H + 2, W + 2] @DRAM +// ) +void tile_and_fused_blur( void *ctxt, int_fast32_t W, int_fast32_t H, uint16_t* blur_y, const uint16_t* inp ) { +EXO_ASSUME(H % 32 == 0); +EXO_ASSUME(W % 256 == 0); +uint16_t *blur_x = (uint16_t*) malloc((2 + H) * W * sizeof(*blur_x)); +for (int_fast32_t yo = 0; yo < ((H) / (32)); yo++) { + for (int_fast32_t xo = 0; xo < ((W) / (256)); xo++) { + for (int_fast32_t yi = 0; yi < 34; yi++) { + for (int_fast32_t xi = 0; xi < 256; xi++) { + blur_x[(yi + 32 * yo) * W + xi + 256 * xo] = (inp[(yi + 32 * yo) * (W + 2) + xi + 256 * xo] + inp[(yi + 32 * yo) * (W + 2) + 1 + xi + 256 * xo] + inp[(yi + 32 * yo) * (W + 2) + 2 + xi + 256 * xo]) / ((uint16_t) 3.0); + } + } + for (int_fast32_t yi = 0; yi < 32; yi++) { + for (int_fast32_t xi = 0; xi < 256; xi++) { + blur_y[(yi + 32 * yo) * W + xi + 256 * xo] = (blur_x[(yi + 32 * yo) * W + xi + 256 * xo] + blur_x[(1 + yi + 32 * yo) * W + xi + 256 * xo] + blur_x[(2 + yi + 32 * yo) * W + xi + 256 * xo]) / ((uint16_t) 3.0); + } + } + } +} +free(blur_x); +} + +// tile_and_fused_blur_scheduled( +// W : size, +// H : size, +// blur_y : ui16[H, W] @DRAM, +// inp : ui16[H + 2, W + 2] @DRAM +// ) +void tile_and_fused_blur_scheduled( void *ctxt, int_fast32_t W, int_fast32_t H, uint16_t* blur_y, const uint16_t* inp ) { +EXO_ASSUME(H % 32 == 0); +EXO_ASSUME(W % 256 == 0); +uint16_t *blur_x = (uint16_t*) malloc(34 * W * sizeof(*blur_x)); +for (int_fast32_t yo = 0; yo < ((H) / (32)); yo++) { + for (int_fast32_t xo = 0; xo < ((W) / (256)); xo++) { + for (int_fast32_t yi = 0; yi < 34; yi++) { + for (int_fast32_t xi = 0; xi < 256; xi++) { + blur_x[(yi + 32 * yo - (32 * yo)) * W + xi + 256 * xo] = (inp[(yi + 32 * yo) * (W + 2) + xi + 256 * xo] + inp[(yi + 32 * yo) * (W + 2) + 1 + xi + 256 * xo] + inp[(yi + 32 * yo) * (W + 2) + 2 + xi + 256 * xo]) / ((uint16_t) 3.0); + } + } + for (int_fast32_t yi = 0; yi < 32; yi++) { + for (int_fast32_t xi = 0; xi < 256; xi++) { + blur_y[(yi + 32 * yo) * W + xi + 256 * xo] = (blur_x[(yi + 32 * yo - (32 * yo)) * W + xi + 256 * xo] + blur_x[(1 + yi + 32 * yo - (32 * yo)) * W + xi + 256 * xo] + blur_x[(2 + yi + 32 * yo - (32 * yo)) * W + xi + 256 * xo]) / ((uint16_t) 3.0); + } + } + } +} +free(blur_x); +} + diff --git a/tests/golden/test_examples/test_rvm_conv1d.txt b/tests/golden/test_examples/test_rvm_conv1d.txt new file mode 100644 index 00000000..f67e7baa --- /dev/null +++ b/tests/golden/test_examples/test_rvm_conv1d.txt @@ -0,0 +1,146 @@ + +#pragma once +#ifndef TEST_CASE_H +#define TEST_CASE_H + +#ifdef __cplusplus +extern "C" { +#endif + + +#include +#include + +// Compiler feature macros adapted from Hedley (public domain) +// https://github.com/nemequ/hedley + +#if defined(__has_builtin) +# define EXO_HAS_BUILTIN(builtin) __has_builtin(builtin) +#else +# define EXO_HAS_BUILTIN(builtin) (0) +#endif + +#if EXO_HAS_BUILTIN(__builtin_assume) +# define EXO_ASSUME(expr) __builtin_assume(expr) +#elif EXO_HAS_BUILTIN(__builtin_unreachable) +# define EXO_ASSUME(expr) \ + ((void)((expr) ? 1 : (__builtin_unreachable(), 1))) +#else +# define EXO_ASSUME(expr) ((void)(expr)) +#endif + + +#ifndef EXO_WIN_2I32 +#define EXO_WIN_2I32 +struct exo_win_2i32{ + int32_t * const data; + const int_fast32_t strides[2]; +}; +#endif +#ifndef EXO_WIN_2I32C +#define EXO_WIN_2I32C +struct exo_win_2i32c{ + const int32_t * const data; + const int_fast32_t strides[2]; +}; +#endif +// exo_conv1d_tile_lt_kw( +// data : i32[4, 16] @DRAM, +// kernels : i32[16, 4, 4] @DRAM, +// out : i32[16, 16] @DRAM +// ) +void exo_conv1d_tile_lt_kw( void *ctxt, const int32_t* data, const int32_t* kernels, int32_t* out ); + + + +#ifdef __cplusplus +} +#endif +#endif // TEST_CASE_H + +#include "test_case.h" + +#include +#include + +#include +#include + + +// exo_conv1d_tile_lt_kw( +// data : i32[4, 16] @DRAM, +// kernels : i32[16, 4, 4] @DRAM, +// out : i32[16, 16] @DRAM +// ) +void exo_conv1d_tile_lt_kw( void *ctxt, const int32_t* data, const int32_t* kernels, int32_t* out ) { +for (int_fast32_t ioo = 0; ioo < 1; ioo++) { + for (int_fast32_t jo = 0; jo < 4; jo++) { + #define out_tile_0 "m7" + #define out_tile_1 "m6" + #define out_tile_2 "m5" + #define out_tile_3 "m4" + asm volatile("mzero "out_tile_0); + asm volatile("mzero "out_tile_1); + asm volatile("mzero "out_tile_2); + asm volatile("mzero "out_tile_3); + for (int_fast32_t c = 0; c < 4; c++) { + static int32_t y[4 * 4]; + for (int_fast32_t ji = 0; ji < 4; ji++) { + for (int_fast32_t r = 0; r < 4; r++) { + if (ji + r + 4 * jo < 16) { + y[ji * 4 + r] = data[c * 16 + ji + r + 4 * jo]; + } else { + y[ji * 4 + r] = ((int32_t) 0); + } + } + } + #define kernel_tile_0 "m3" + #define kernel_tile_1 "m2" + #define kernel_tile_2 "m1" + #define data_tile "m0" + asm volatile("mld.w "data_tile", (%1), %0" :: "r"(4*(((struct exo_win_2i32c){ &y[0], { 4, 1 } }).strides[0])), "r"(&y[0])); + asm volatile("mld.w "kernel_tile_0", (%1), %0" :: "r"(4*(((struct exo_win_2i32c){ &kernels[(16 * ioo) * (16) + (c) * 4], { 16, 1 } }).strides[0])), "r"(&kernels[(16 * ioo) * (16) + (c) * 4])); + asm volatile("mmasa.w "out_tile_0", "data_tile", "kernel_tile_0); + asm volatile("mld.w "kernel_tile_1", (%1), %0" :: "r"(4*(((struct exo_win_2i32c){ &kernels[(4 + 16 * ioo) * (16) + (c) * 4], { 16, 1 } }).strides[0])), "r"(&kernels[(4 + 16 * ioo) * (16) + (c) * 4])); + asm volatile("mmasa.w "out_tile_1", "data_tile", "kernel_tile_1); + #undef kernel_tile_1 + asm volatile("mld.w "kernel_tile_2", (%1), %0" :: "r"(4*(((struct exo_win_2i32c){ &kernels[(8 + 16 * ioo) * (16) + (c) * 4], { 16, 1 } }).strides[0])), "r"(&kernels[(8 + 16 * ioo) * (16) + (c) * 4])); + asm volatile("mmasa.w "out_tile_2", "data_tile", "kernel_tile_2); + #undef kernel_tile_2 + asm volatile("mld.w "kernel_tile_0", (%1), %0" :: "r"(4*(((struct exo_win_2i32c){ &kernels[(12 + 16 * ioo) * (16) + (c) * 4], { 16, 1 } }).strides[0])), "r"(&kernels[(12 + 16 * ioo) * (16) + (c) * 4])); + asm volatile("mmasa.w "out_tile_3", "data_tile", "kernel_tile_0); + #undef data_tile + #undef kernel_tile_0 + } + asm volatile("mst.w "out_tile_0", (%1), %0" :: "r"(4*(((struct exo_win_2i32){ &out[(16 * ioo) * (16) + 4 * jo], { 16, 1 } }).strides[0])), "r"(&out[(16 * ioo) * (16) + 4 * jo])); + #undef out_tile_0 + asm volatile("mst.w "out_tile_1", (%1), %0" :: "r"(4*(((struct exo_win_2i32){ &out[(4 + 16 * ioo) * (16) + 4 * jo], { 16, 1 } }).strides[0])), "r"(&out[(4 + 16 * ioo) * (16) + 4 * jo])); + #undef out_tile_1 + asm volatile("mst.w "out_tile_2", (%1), %0" :: "r"(4*(((struct exo_win_2i32){ &out[(8 + 16 * ioo) * (16) + 4 * jo], { 16, 1 } }).strides[0])), "r"(&out[(8 + 16 * ioo) * (16) + 4 * jo])); + #undef out_tile_2 + asm volatile("mst.w "out_tile_3", (%1), %0" :: "r"(4*(((struct exo_win_2i32){ &out[(12 + 16 * ioo) * (16) + 4 * jo], { 16, 1 } }).strides[0])), "r"(&out[(12 + 16 * ioo) * (16) + 4 * jo])); + #undef out_tile_3 + } +} +} + + +/* relying on the following instruction..." +rvm_mld(dst,src) +asm volatile("mld.w "{dst_int}", (%1), %0" :: "r"(4*({src}.strides[0])), "r"(&{src_data})); +*/ + +/* relying on the following instruction..." +rvm_mmasa(md,ms1,ms2) +asm volatile("mmasa.w "{md_int}", "{ms1_int}", "{ms2_int}); +*/ + +/* relying on the following instruction..." +rvm_mst(src,dst) +asm volatile("mst.w "{src_int}", (%1), %0" :: "r"(4*({dst}.strides[0])), "r"(&{dst_data})); +*/ + +/* relying on the following instruction..." +rvm_mzero(dst) +asm volatile("mzero "{dst_int}); +*/ diff --git a/tests/test_examples.py b/tests/test_examples.py new file mode 100644 index 00000000..bde09d98 --- /dev/null +++ b/tests/test_examples.py @@ -0,0 +1,48 @@ +from __future__ import annotations + +from pathlib import Path + +import pytest + +import exo +import exo.main + +REPO_ROOT = Path(__file__).parent.parent.resolve() + + +def _test_app(module_file: Path): + module_file = module_file.resolve(strict=True) + mod = exo.main.load_user_code(module_file) + procs = exo.main.get_procs_from_module(mod) + + c_file, h_file = exo.compile_procs_to_strings(procs, "test_case.h") + + return f"{h_file}\n{c_file}" + + +# ---------------------------------------------------------------------------- # + + +def test_avx2_matmul(golden): + module_file = REPO_ROOT / "examples" / "avx2_matmul" / "x86_matmul.py" + assert _test_app(module_file) == golden + + +def test_cursors(golden): + module_file = REPO_ROOT / "examples" / "cursors" / "cursors.py" + assert _test_app(module_file) == golden + + +def test_rvm_conv1d(golden): + module_file = REPO_ROOT / "examples" / "rvm_conv1d" / "exo" / "conv1d.py" + assert _test_app(module_file) == golden + + +def test_quiz1(golden): + module_file = REPO_ROOT / "examples" / "quiz1" / "quiz1.py" + assert _test_app(module_file) == golden + + +def test_quiz3(golden): + module_file = REPO_ROOT / "examples" / "quiz3" / "quiz3.py" + assert _test_app(module_file) == golden