From cec503d6922c16796452e8cac251fad0454a7565 Mon Sep 17 00:00:00 2001 From: paramthakkar123 Date: Fri, 17 Oct 2025 09:04:38 +0530 Subject: [PATCH 1/3] Added tests for random_ops and tensor_ops --- test/test_random_ops.py | 27 +++++++++++++++++++++++++++ test/test_tensor_ops.py | 28 ++++++++++++++++++++++++++++ 2 files changed, 55 insertions(+) create mode 100644 test/test_random_ops.py create mode 100644 test/test_tensor_ops.py diff --git a/test/test_random_ops.py b/test/test_random_ops.py new file mode 100644 index 0000000..4a46941 --- /dev/null +++ b/test/test_random_ops.py @@ -0,0 +1,27 @@ +import torch +import os +from BackendBench.backends import LLMBackend +from BackendBench.llm_client import LLMKernelGenerator +from BackendBench.suite import OpInfoTestSuite + +class TestRandomOps: + suite = OpInfoTestSuite( + "random_ops_test", + "cuda", + torch.float32, + filter=["bernoulli"] + ) + + def test_bernoulli(self): + backend = LLMBackend( + model="mock_model", + llm_client=LLMKernelGenerator(model="mock_model"), + ) + backend.generate_kernels(self.suite, attempts=3) + + summary_file = f"{backend.kernels_dir}/bernoulli/bernoulli_summary.txt" + assert os.path.exists(summary_file) + + with open(summary_file, "r") as f: + summary = f.read() + assert "Final Status: ✓ Success" in summary \ No newline at end of file diff --git a/test/test_tensor_ops.py b/test/test_tensor_ops.py new file mode 100644 index 0000000..5959967 --- /dev/null +++ b/test/test_tensor_ops.py @@ -0,0 +1,28 @@ +import torch +import os +from BackendBench.backends import LLMBackend +from BackendBench.llm_client import LLMKernelGenerator +from BackendBench.suite import OpInfoTestSuite + +class TestTensorCreationOps: + suite = OpInfoTestSuite( + "tensor_creation_ops_test", + "cuda", + torch.float32, + filter=["cat", "clone", "copy_", "elu_backward", "masked_fill_", "new_empty", "new_empty_strided", "new_full", "new_ones", "new_zeros", "nonzero", "repeat", "split", "split_with_sizes", "unsqueeze_"], + ) + + def test_tensor_creation_ops(self): + backend = LLMBackend( + model="mock_model", + llm_client=LLMKernelGenerator(model="mock_model"), + ) + backend.generate_kernels(self.suite, attempts=3) + + for op_name in self.suite.get_op_names(): + summary_file = f"{backend.kernels_dir}/{op_name}/{op_name}_summary.txt" + assert os.path.exists(summary_file) + + with open(summary_file, "r") as f: + summary = f.read() + assert "Final Status: ✓ Success" in summary \ No newline at end of file From 08d5a5ce972d8d1e3979bbbf5d4a685af0f4ceec Mon Sep 17 00:00:00 2001 From: paramthakkar123 Date: Fri, 17 Oct 2025 15:59:22 +0530 Subject: [PATCH 2/3] Updates --- test/test_tensor_ops.py | 57 +++++++++++++++++++++++++++++++++++++++-- 1 file changed, 55 insertions(+), 2 deletions(-) diff --git a/test/test_tensor_ops.py b/test/test_tensor_ops.py index 5959967..b08d6a4 100644 --- a/test/test_tensor_ops.py +++ b/test/test_tensor_ops.py @@ -4,12 +4,17 @@ from BackendBench.llm_client import LLMKernelGenerator from BackendBench.suite import OpInfoTestSuite + +class TestCase: + def __init__(self, args, kwargs): + self.args = args + self.kwargs = kwargs class TestTensorCreationOps: suite = OpInfoTestSuite( "tensor_creation_ops_test", "cuda", torch.float32, - filter=["cat", "clone", "copy_", "elu_backward", "masked_fill_", "new_empty", "new_empty_strided", "new_full", "new_ones", "new_zeros", "nonzero", "repeat", "split", "split_with_sizes", "unsqueeze_"], + filter=["new_empty", "new_empty_strided", "new_full", "new_ones", "new_zeros"], ) def test_tensor_creation_ops(self): @@ -25,4 +30,52 @@ def test_tensor_creation_ops(self): with open(summary_file, "r") as f: summary = f.read() - assert "Final Status: ✓ Success" in summary \ No newline at end of file + assert "Final Status: ✓ Success" in summary + + def test_new_empty(self): + base_tensor = torch.ones((2, 3), device="cuda", dtype=torch.float32) + new_tensor = base_tensor.new_empty((4, 5)) + + assert new_tensor.shape == (4, 5) + assert new_tensor.device == base_tensor.device + assert new_tensor.dtype == base_tensor.dtype + assert new_tensor.is_contiguous() + assert new_tensor.numel() > 0 + + def test_new_empty_strided(self): + base_tensor = torch.ones((2, 3), device="cuda", dtype=torch.float32) + new_tensor = base_tensor.new_empty_strided((4, 5), (10, 2)) + + assert new_tensor.shape == (4, 5) + assert new_tensor.stride() == (10, 2) + assert new_tensor.device == base_tensor.device + assert new_tensor.dtype == base_tensor.dtype + assert new_tensor.numel() > 0 + + def test_new_full(self): + base_tensor = torch.ones((2, 3), device="cuda", dtype=torch.float32) + fill_value = 7.0 + new_tensor = base_tensor.new_full((4, 5), fill_value) + + assert new_tensor.shape == (4, 5) + assert new_tensor.device == base_tensor.device + assert new_tensor.dtype == base_tensor.dtype + assert torch.all(new_tensor == fill_value) + + def test_new_ones(self): + base_tensor = torch.ones((2, 3), device="cuda", dtype=torch.float32) + new_tensor = base_tensor.new_ones((4, 5)) + + assert new_tensor.shape == (4, 5) + assert new_tensor.device == base_tensor.device + assert new_tensor.dtype == base_tensor.dtype + assert torch.all(new_tensor == 1.0) + + def test_new_zeros(self): + base_tensor = torch.ones((2, 3), device="cuda", dtype=torch.float32) + new_tensor = base_tensor.new_zeros((4, 5)) + + assert new_tensor.shape == (4, 5) + assert new_tensor.device == base_tensor.device + assert new_tensor.dtype == base_tensor.dtype + assert torch.all(new_tensor == 0.0) \ No newline at end of file From fda6d4658fc5939464b602c8019efc142951ce18 Mon Sep 17 00:00:00 2001 From: paramthakkar123 Date: Fri, 17 Oct 2025 16:00:09 +0530 Subject: [PATCH 3/3] Updates --- test/test_random_ops.py | 20 ++++++++++++-------- test/test_tensor_ops.py | 14 ++++++++++++-- 2 files changed, 24 insertions(+), 10 deletions(-) diff --git a/test/test_random_ops.py b/test/test_random_ops.py index 4a46941..de53092 100644 --- a/test/test_random_ops.py +++ b/test/test_random_ops.py @@ -1,16 +1,20 @@ -import torch +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD 3-Clause license found in the +# LICENSE file in the root directory of this source tree. + import os + +import torch + from BackendBench.backends import LLMBackend from BackendBench.llm_client import LLMKernelGenerator from BackendBench.suite import OpInfoTestSuite + class TestRandomOps: - suite = OpInfoTestSuite( - "random_ops_test", - "cuda", - torch.float32, - filter=["bernoulli"] - ) + suite = OpInfoTestSuite("random_ops_test", "cuda", torch.float32, filter=["bernoulli"]) def test_bernoulli(self): backend = LLMBackend( @@ -24,4 +28,4 @@ def test_bernoulli(self): with open(summary_file, "r") as f: summary = f.read() - assert "Final Status: ✓ Success" in summary \ No newline at end of file + assert "Final Status: ✓ Success" in summary diff --git a/test/test_tensor_ops.py b/test/test_tensor_ops.py index b08d6a4..6858b6f 100644 --- a/test/test_tensor_ops.py +++ b/test/test_tensor_ops.py @@ -1,5 +1,13 @@ -import torch +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD 3-Clause license found in the +# LICENSE file in the root directory of this source tree. + import os + +import torch + from BackendBench.backends import LLMBackend from BackendBench.llm_client import LLMKernelGenerator from BackendBench.suite import OpInfoTestSuite @@ -9,6 +17,8 @@ class TestCase: def __init__(self, args, kwargs): self.args = args self.kwargs = kwargs + + class TestTensorCreationOps: suite = OpInfoTestSuite( "tensor_creation_ops_test", @@ -78,4 +88,4 @@ def test_new_zeros(self): assert new_tensor.shape == (4, 5) assert new_tensor.device == base_tensor.device assert new_tensor.dtype == base_tensor.dtype - assert torch.all(new_tensor == 0.0) \ No newline at end of file + assert torch.all(new_tensor == 0.0)