Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 2 additions & 9 deletions backends/arm/test/ops/test_acos.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
# LICENSE file in the root directory of this source tree.
from typing import Tuple

import pytest
import torch

from executorch.backends.arm.test import common
Expand Down Expand Up @@ -105,10 +104,7 @@ def test_acos_vgf_FP(test_data: Tuple):
tosa_version="TOSA-1.0+FP",
run_on_vulkan_runtime=True,
)
try:
pipeline.run()
except FileNotFoundError as e:
pytest.skip(f"VKML executor_runner not found - not built - skip {e}")
pipeline.run()


@common.parametrize("test_data", test_data_suite)
Expand All @@ -122,7 +118,4 @@ def test_acos_vgf_INT(test_data: Tuple):
tosa_version="TOSA-1.0+INT",
run_on_vulkan_runtime=True,
)
try:
pipeline.run()
except FileNotFoundError as e:
pytest.skip(f"VKML executor_runner not found - not built - skip {e}")
pipeline.run()
10 changes: 2 additions & 8 deletions backends/arm/test/ops/test_add.py
Original file line number Diff line number Diff line change
Expand Up @@ -211,10 +211,7 @@ def test_add_tensor_vgf_FP(test_data: input_t1):
tosa_version="TOSA-1.0+FP",
run_on_vulkan_runtime=True,
)
try:
pipeline.run()
except FileNotFoundError as e:
pytest.skip(f"VKML executor_runner not found - not built - skip {e}")
pipeline.run()


@common.parametrize("test_data", Add.test_data)
Expand All @@ -228,10 +225,7 @@ def test_add_tensor_vgf_INT(test_data: input_t1):
tosa_version="TOSA-1.0+INT",
run_on_vulkan_runtime=True,
)
try:
pipeline.run()
except FileNotFoundError as e:
pytest.skip(f"VKML executor_runner not found - not built - skip {e}")
pipeline.run()


def get_symmetric_a16w8_add_quantizer(per_channel_quantization=False):
Expand Down
7 changes: 6 additions & 1 deletion backends/arm/test/ops/test_sum.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,11 @@ def test_view_u85_INT_1_0(test_data: Tuple):
@common.SkipIfNoModelConverter
def test_sum_dim_intlist_vgf_FP(test_data: input_t1):
pipeline = VgfPipeline[input_t1](
Sum(), test_data(), aten_op, tosa_version="TOSA-1.0+FP"
Sum(),
test_data(),
aten_op,
tosa_version="TOSA-1.0+FP",
run_on_vulkan_runtime=True,
)
pipeline.run()

Expand All @@ -107,6 +111,7 @@ def test_sum_dim_intlist_vgf_INT(test_data: input_t1):
test_data(),
aten_op,
tosa_version="TOSA-1.0+INT",
run_on_vulkan_runtime=True,
)
pipeline.run()

Expand Down
15 changes: 14 additions & 1 deletion backends/arm/test/tester/test_pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -906,7 +906,7 @@ class VgfPipeline(BasePipelineMaker, Generic[T]):
exir_ops: Exir dialect ops expected to be found in the graph after to_edge.
if not using use_edge_to_transform_and_lower.

run_on_vulkan_runtime: Set to true to test VGF output on VKML runtime.
run_on_vulkan_runtime: Whether to test VGF output on VKML runtime.

vgf_compiler_flags: Optional compiler flags.

Expand Down Expand Up @@ -1018,3 +1018,16 @@ def __init__(
qtol=qtol,
inputs=self.test_data,
)
self.run_on_vulkan_runtime = run_on_vulkan_runtime

# TODO: Remove once CI fully working
def run(self):
import pytest

if self.run_on_vulkan_runtime:
try:
super().run()
except FileNotFoundError as e:
pytest.skip(f"VKML executor_runner not found - not built - skip {e}")
else:
super().run()
22 changes: 17 additions & 5 deletions examples/portable/executor_runner/executor_runner.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -175,21 +175,33 @@ int main(int argc, char** argv) {
std::vector<std::pair<char*, size_t>> input_buffers;

std::stringstream list_of_input_files(FLAGS_inputs);
std::string token;
std::string path;

// First reserve memory for number of vector elements to avoid vector
// reallocations when emplacing back.
std::vector<std::string> file_paths;
while (std::getline(list_of_input_files, path, ',')) {
file_paths.push_back(std::move(path));
}
inputs_storage.reserve(file_paths.size());

for (const auto& file_path : file_paths) {
std::ifstream input_file_handle(
file_path, std::ios::binary | std::ios::ate);

while (std::getline(list_of_input_files, token, ',')) {
std::ifstream input_file_handle(token, std::ios::binary | std::ios::ate);
if (!input_file_handle) {
ET_LOG(Error, "Failed to open input file: %s\n", token.c_str());
ET_LOG(Error, "Failed to open input file: %s\n", file_path.c_str());
return 1;
}

std::streamsize file_size = input_file_handle.tellg();
input_file_handle.seekg(0, std::ios::beg);

// Reserve memory for actual file contents.
inputs_storage.emplace_back(file_size, '\0');

if (!input_file_handle.read(&inputs_storage.back()[0], file_size)) {
ET_LOG(Error, "Failed to read input file: %s\n", token.c_str());
ET_LOG(Error, "Failed to read input file: %s\n", file_path.c_str());
return 1;
}

Expand Down
35 changes: 34 additions & 1 deletion extension/runner_util/inputs.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,40 @@ Result<BufferCleanup> prepare_input_tensors(
continue;
}
if (tag.get() != Tag::Tensor) {
ET_LOG(Debug, "Skipping non-tensor input %zu", i);
if (!hard_code_inputs_to_ones) {
Error err = Error::Ok;
auto [buffer, buffer_size] = input_buffers.at(i);

ET_LOG(
Debug, "Verifying and setting input for non-tensor input %zu", i);

if (tag.get() == Tag::Int) {
int64_t int_input;
std::memcpy(&int_input, buffer, buffer_size);
err = method.set_input(runtime::EValue(int_input), i);
} else if (tag.get() == Tag::Double) {
double double_input;
std::memcpy(&double_input, buffer, buffer_size);
err = method.set_input(runtime::EValue(double_input), i);
} else if (tag.get() == Tag::Bool) {
bool bool_input;
std::memcpy(&bool_input, buffer, buffer_size);
err = method.set_input(runtime::EValue(bool_input), i);
} else {
ET_LOG(
Error,
"Input %zu of type %zu not supported",
i,
static_cast<size_t>(tag.get()));
err = Error::InvalidArgument;
}
if (err != Error::Ok) {
BufferCleanup cleanup({inputs, num_allocated});
return err;
}
} else {
ET_LOG(Debug, "Skipping non-tensor input %zu", i);
}
continue;
}
Result<TensorInfo> tensor_meta = method_meta.input_tensor_meta(i);
Expand Down
Loading