diff --git a/.all-contributorsrc b/.all-contributorsrc index 8d4b73289..acd1673a4 100644 --- a/.all-contributorsrc +++ b/.all-contributorsrc @@ -287,6 +287,33 @@ "contributions": [ "code" ] + }, + { + "login": "TAdev0", + "name": "Tristan", + "avatar_url": "https://avatars.githubusercontent.com/u/122918260?v=4", + "profile": "https://nodeguardians.io/character/98995858fd55 ", + "contributions": [ + "code" + ] + }, + { + "login": "Gakunt", + "name": "Kugo", + "avatar_url": "https://avatars.githubusercontent.com/u/153402253?v=4", + "profile": "https://github.com/Gakunt", + "contributions": [ + "doc" + ] + }, + { + "login": "FriendlyLifeguard", + "name": "Beeyoung", + "avatar_url": "https://avatars.githubusercontent.com/u/55970530?v=4", + "profile": "http://alankang.xyz", + "contributions": [ + "code" + ] } ], "contributorsPerLine": 7, @@ -296,4 +323,4 @@ "projectName": "orion", "projectOwner": "gizatechxyz", "commitType": "docs" -} \ No newline at end of file +} diff --git a/README.md b/README.md index cc2cf1ef2..19888ec6f 100644 --- a/README.md +++ b/README.md @@ -23,7 +23,7 @@ # Orion: An Open-source Framework for Validity and ZK ML ✨ -[![All Contributors](https://img.shields.io/badge/all_contributors-30-orange.svg?style=flat-square)](#contributors-) +[![All Contributors](https://img.shields.io/badge/all_contributors-32-orange.svg?style=flat-square)](#contributors-) Orion is an open-source, community-driven framework dedicated to Provable Machine Learning. It provides essential components and a new ONNX runtime for building verifiable Machine Learning models using [STARKs](https://starkware.co/stark/). @@ -57,7 +57,7 @@ For a full list of all authors and contributors, see [the contributors page](htt This project is licensed under the **MIT license**. -See [LICENSE](https://github.com/franalgaba/onnx-cairo/blob/main/LICENSE/README.md) for more information. +See [LICENSE](https://github.com/franalgaba/onnx-cairo/blob/main/LICENSE) for more information. ## Contributors ✨ @@ -108,6 +108,9 @@ Thanks goes to these wonderful people: Vid Kersic
Vid Kersic

πŸ’» Trunks @ Carbonable
Trunks @ Carbonable

πŸ“– canacechan
canacechan

πŸ’» + Tristan
Tristan

πŸ’» + Kugo
Kugo

πŸ“– + Beeyoung
Beeyoung

πŸ’» diff --git a/Scarb.toml b/Scarb.toml index 463e4ac62..f05fa6649 100644 --- a/Scarb.toml +++ b/Scarb.toml @@ -1,6 +1,6 @@ [package] name = "orion" -version = "0.2.3" +version = "0.2.4" cairo-version = "2.5.3" edition = "2023_10" description = "ONNX Runtime in Cairo for verifiable ML inference using STARK" diff --git a/docgen/src/main.rs b/docgen/src/main.rs index fc780ad03..8d1f90f4b 100644 --- a/docgen/src/main.rs +++ b/docgen/src/main.rs @@ -90,6 +90,14 @@ fn main() { let trait_name: &str = "SVMClassifierTrait"; doc_trait(trait_path, doc_path, label); doc_functions(trait_path, doc_path, trait_name, label); + + // NORMALIZER DOC + let trait_path = "src/operators/ml/normalizer/normalizer.cairo"; + let doc_path = "docs/framework/operators/machine-learning/normalizer"; + let label = "normalizer"; + let trait_name: &str = "NormalizerTrait"; + doc_trait(trait_path, doc_path, label); + doc_functions(trait_path, doc_path, trait_name, label); } fn doc_trait(trait_path: &str, doc_path: &str, label: &str) { diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index 16a00107b..1608069ae 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -3,6 +3,11 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [Unreleased] - 2024-02-21 + +## Added +- Label Encoder. +- ## [Unreleased] - 2024-01-17 ## Added diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index d867a96ba..477601b37 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -144,6 +144,7 @@ * [tensor.not](framework/operators/tensor/tensor.not.md) * [tensor.erf](framework/operators/tensor/tensor.erf.md) * [tensor.reduce\_log\_sum](framework/operators/tensor/tensor.reduce\_log\_sum.md) + * [tensor.reduce\_log\_sum\_exp](framework/operators/tensor/tensor.reduce\_log\_sum\_exp.md) * [tensor.unique](framework/operators/tensor/tensor.unique.md) * [tensor.compress](framework/operators/tensor/tensor.compress.md) * [tensor.layer_normalization](framework/operators/tensor/tensor.layer_normalization.md) @@ -157,6 +158,7 @@ * [tensor.hamming_window](framework/operators/tensor/tensor.hamming_window.md) * [tensor.blackman_window](framework/operators/tensor/tensor.blackman_window.md) * [tensor.random_uniform_like](framework/operators/tensor/tensor.random_uniform_like.md) + * [tensor.label_encoder](framework/operators/tensor/tensor.label_encoder.md) * [Neural Network](framework/operators/neural-network/README.md) * [nn.relu](framework/operators/neural-network/nn.relu.md) * [nn.leaky\_relu](framework/operators/neural-network/nn.leaky\_relu.md) @@ -198,6 +200,8 @@ * [sequence.sequence\_erase](framework/operators/sequence/sequence.sequence\_erase.md) * [sequence.sequence\_insert](framework/operators/sequence/sequence.sequence\_insert.md) * [sequence.concat\_from\_sequence](framework/operators/sequence/sequence.concat\_from\_sequence.md) + * [Normalizer](framework/operators/machine-learning/normalizer/README.md) + * [normalize.predict](framework/operators/machine-learning/normalizer/normalizer.predict.md) ## πŸ› Hub diff --git a/docs/framework/compatibility.md b/docs/framework/compatibility.md index a05d3bbce..f3f84ac3f 100644 --- a/docs/framework/compatibility.md +++ b/docs/framework/compatibility.md @@ -88,7 +88,7 @@ You can see below the list of current supported ONNX Operators: | [Max](operators/tensor/tensor.max.md) | :white\_check\_mark: | | [ReduceSumSquare](operators/tensor/tensor.reduce\_sum\_square.md) | :white\_check\_mark: | | [Trilu](operators/tensor/tensor.trilu.md) | :white\_check\_mark: | -| [Scatter](operators/tensor/scatter.max.md) | :white\_check\_mark: | +| [Scatter](operators/tensor/tensor.scatter.md) | :white\_check\_mark: | | [ArrayFeatureExtractor](operators/tensor/tensor.array\_feature\_extractor.md) | :white\_check\_mark: | | [Binarizer](operators/tensor/tensor.binarizer.md) | :white\_check\_mark: | | [ConstantOfShape](operators/tensor/tensor.constant_of_shape.md) | :white\_check\_mark: | @@ -111,6 +111,7 @@ You can see below the list of current supported ONNX Operators: | [ReduceLogSum](operators/tensor/tensor.reduce\_log\_sum.md) | :white\_check\_mark: | | [Erf](operators/tensor/tensor.erf.md) | :white\_check\_mark: | | [Compress](operators/tensor/tensor.compress.md) | :white\_check\_mark: | +| [ReduceLogSumExp](operators/tensor/tensor.reduce\_log\_sum\_exp.md) | :white\_check\_mark: | | [Layer_normalization](operators/tensor/tensor.layer_normalization.md) | :white\_check\_mark: | | [ScatterND](operators/tensor/tensor.scatter/_nd.md) | :white\_check\_mark: | | [DequantizeLinear](operators/tensor/tensor.dequantize_linear.md) | :white\_check\_mark: | @@ -124,5 +125,6 @@ You can see below the list of current supported ONNX Operators: | [HammingWindow](operators/tensor/tensor.tensor.hamming_window.md) | :white\_check\_mark: | | [BlackmanWindow](operators/tensor/tensor.tensor.blackman_window.md) | :white\_check\_mark: | | [RandomUniformLike](operators/tensor/tensor.tensor.random_uniform_like.md) | :white\_check\_mark: | +| [LabelEncoder](operators/tensor/tensor.label_encoder.md) | :white\_check\_mark: | -Current Operators support: **117/156 (75%)** +Current Operators support: **118/156 (75%)** diff --git a/docs/framework/numbers/fixed-point/README.md b/docs/framework/numbers/fixed-point/README.md index f30122676..4ecc2246f 100644 --- a/docs/framework/numbers/fixed-point/README.md +++ b/docs/framework/numbers/fixed-point/README.md @@ -69,6 +69,7 @@ use orion::numbers::fixed_point::core::FixedTrait; | [`fp.sinh`](fp.sinh.md) | Returns the value of the hyperbolic sine of the fixed point number. | | [`fp.tanh`](fp.tanh.md) | Returns the value of the hyperbolic tangent of the fixed point number. | | [`fp.sign`](fp.sign.md) | Returns the element-wise indication of the sign of the input fixed point number. | +| [`fp.erf`](fp.erf.md) | Returns the error function of the input fixed point number computed element-wise. | ### Arithmetic & Comparison operators diff --git a/docs/framework/operators/machine-learning/linear-classifier/linear_classifier.predict.md b/docs/framework/operators/machine-learning/linear-classifier/linear_classifier.predict.md index aec154f68..7ed30f236 100644 --- a/docs/framework/operators/machine-learning/linear-classifier/linear_classifier.predict.md +++ b/docs/framework/operators/machine-learning/linear-classifier/linear_classifier.predict.md @@ -1,7 +1,7 @@ # LinearClassifierTrait::predict ```rust - fn predict(ref self: LinearClassifier, X: Tensor) -> Tensor; + fn predict(classifier: LinearClassifier, X: Tensor) -> Tensor; ``` Linear Classifier. Performs the linear classification. @@ -85,7 +85,7 @@ fn linear_classifier_helper( fn linear_classifier_multi_softmax() -> (Span, Tensor) { let (mut classifier, X) = linear_classifier_helper(POST_TRANSFORM::SOFTMAX); - let (labels, mut scores) = LinearClassifierTrait::predict(ref classifier, X); + let (labels, mut scores) = LinearClassifierTrait::predict(classifier, X); (labels, scores) } diff --git a/docs/framework/operators/machine-learning/linear-regressor/linear_regressor.predict.md b/docs/framework/operators/machine-learning/linear-regressor/linear_regressor.predict.md index f1bd38831..6c40ac930 100644 --- a/docs/framework/operators/machine-learning/linear-regressor/linear_regressor.predict.md +++ b/docs/framework/operators/machine-learning/linear-regressor/linear_regressor.predict.md @@ -1,14 +1,14 @@ # LinearRegressorTrait::predict ```rust - fn predict(ref self: LinearRegressor, X: Tensor) -> Tensor; + fn predict(regressor: LinearRegressor, X: Tensor) -> Tensor; ``` Linear Regressor. Performs the generalized linear regression evaluation. ## Args -* `self`: LinearRegressor - A LinearRegressor object. +* `regressor`: LinearRegressor - A LinearRegressor object. * `X`: Input 2D tensor. ## Returns @@ -68,7 +68,7 @@ fn example_linear_regressor() -> Tensor { post_transform }; - let scores = LinearRegressorTrait::predict(ref regressor, X); + let scores = LinearRegressorTrait::predict(regressor, X); scores } @@ -120,7 +120,7 @@ fn example_linear_regressor_2() -> Tensor { post_transform }; - let scores = LinearRegressorTrait::predict(ref regressor, X); + let scores = LinearRegressorTrait::predict(regressor, X); scores } diff --git a/docs/framework/operators/machine-learning/normalizer/README.md b/docs/framework/operators/machine-learning/normalizer/README.md new file mode 100644 index 000000000..5b31584eb --- /dev/null +++ b/docs/framework/operators/machine-learning/normalizer/README.md @@ -0,0 +1,23 @@ +# Normalizer + +`NormalizerTrait` computes the normalization of the input, each row of the input is normalized independently. + +```rust +use orion::operators::ml::NormalizerTrait; +``` + +### Data types + +Orion supports currently only fixed point data types for `NormalizerTrait`. + +| Data type | dtype | +| -------------------- | ------------------------------------------------------------- | +| Fixed point (signed) | `NormalizerTrait` | + + +*** + +| function | description | +| --- | --- | +| [`normalizer.predict`](normalizer.predict.md) | Returns the normalization of the input, each row of the input is normalized independently. | + diff --git a/docs/framework/operators/machine-learning/normalizer/normalizer.predict.md b/docs/framework/operators/machine-learning/normalizer/normalizer.predict.md new file mode 100644 index 000000000..93a603e4b --- /dev/null +++ b/docs/framework/operators/machine-learning/normalizer/normalizer.predict.md @@ -0,0 +1,61 @@ +# Normalizer::predict + +```rust + fn predict(X: Tensor, norm: NORM) -> Tensor; +``` + +Returns the normalized input. +Tree different types of normalization can be performed and are defined as follow : +MAX: $Y = \frac{X}{max(X)}$ +L1: $Y = \frac{X}{sum(X)}$ +L2: $Y = \frac{X}\sqrt{{sum(XΒ²)}}$ +For batches, that is, [N,C] tensors, normalization is done along the C axis. In other words, each row of the batch is normalized independently. + +## Args + +* `X`(`@Tensor`) - Input 2D tensor. +* `norm`(`NORM`) - NORM::MAX, NORM::L1 or NORM::L2 + + +## Returns + +* Tensor - output tensor + +## Examples + +```rust +use orion::numbers::FP16x16; +use orion::operators::tensor::{Tensor, TensorTrait, FP16x16Tensor, FP16x16TensorDiv, FP16x16TensorPartialEq}; + +use orion::operators::ml::normalizer::normalizer::{ + NormalizerTrait, NORM +}; + + + +fn normalizer_max() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 52428, sign: true }); + data.append(FP16x16 { mag: 39321, sign: true }); + data.append(FP16x16 { mag: 26214, sign: true }); + data.append(FP16x16 { mag: 13107, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 13107, sign: false }); + data.append(FP16x16 { mag: 26214, sign: false }); + data.append(FP16x16 { mag: 39321, sign: false }); + + let X = TensorTrait::new(shape.span(), data.span()); + + return NormalizerTrait::predict(X, NORM::MAX); +} +>>> [[-1. -0.8 -0.6 ] + [-1. -0.5 0. ] + [ 0.3333333 0.6666666 1. ]] + +``` + diff --git a/docs/framework/operators/machine-learning/tree-ensemble-classifier/tree_ensemble_classifier.predict.md b/docs/framework/operators/machine-learning/tree-ensemble-classifier/tree_ensemble_classifier.predict.md index 6d839e873..c38f3e46d 100644 --- a/docs/framework/operators/machine-learning/tree-ensemble-classifier/tree_ensemble_classifier.predict.md +++ b/docs/framework/operators/machine-learning/tree-ensemble-classifier/tree_ensemble_classifier.predict.md @@ -1,7 +1,7 @@ # TreeEnsembleClassifier::predict ```rust - fn predict(ref self: TreeEnsembleClassifier, X: Tensor) -> (Span, MutMatrix::); + fn predict(classifier: TreeEnsembleClassifier, X: Tensor) -> (Span, MutMatrix::); ``` Tree Ensemble classifier. Returns the top class for each of N inputs. @@ -185,7 +185,7 @@ fn tree_ensemble_classifier_helper( fn test_tree_ensemble_classifier_multi_pt_softmax() -> (Span, MutMatrix::) { let (mut classifier, X) = tree_ensemble_classifier_helper(POST_TRANSFORM::SOFTMAX); - let (labels, scores) = TreeEnsembleClassifierTrait::predict(ref classifier, X); + let (labels, scores) = TreeEnsembleClassifierTrait::predict(classifier, X); (labels, scores) } diff --git a/docs/framework/operators/machine-learning/tree-ensemble-regressor/tree_ensemble_regressor.predict.md b/docs/framework/operators/machine-learning/tree-ensemble-regressor/tree_ensemble_regressor.predict.md index 812115971..243bda558 100644 --- a/docs/framework/operators/machine-learning/tree-ensemble-regressor/tree_ensemble_regressor.predict.md +++ b/docs/framework/operators/machine-learning/tree-ensemble-regressor/tree_ensemble_regressor.predict.md @@ -1,7 +1,7 @@ # TreeEnsembleRegressor::predict ```rust - fn predict(ref self: TreeEnsembleRegressor, X: Tensor) -> (Span, MutMatrix::); + fn predict(regressor: TreeEnsembleRegressor, X: Tensor) -> (Span, MutMatrix::); ``` Tree Ensemble regressor. Returns the regressed values for each input in N. @@ -160,7 +160,7 @@ fn tree_ensemble_regressor_helper( fn test_tree_ensemble_regressor_SUM() -> MutMatrix:: { let (mut regressor, X) = tree_ensemble_regressor_helper(AGGREGATE_FUNCTION::SUM); - let mut res = TreeEnsembleRegressorTrait::predict(ref regressor, X); + let mut res = TreeEnsembleRegressorTrait::predict(regressor, X); res } >>> diff --git a/docs/framework/operators/neural-network/README.md b/docs/framework/operators/neural-network/README.md index b24ad9e40..fc3bfb612 100644 --- a/docs/framework/operators/neural-network/README.md +++ b/docs/framework/operators/neural-network/README.md @@ -37,6 +37,6 @@ Orion supports currently these `NN` types. | [`nn.gemm`](nn.gemm.md) | Performs General Matrix multiplication. | | [`nn.grid_sample`](nn.grid\_sample.md) | Computes the grid sample of the input tensor and input grid. | | [`nn.col2im`](nn.col2im.md) | Rearranges column blocks back into a multidimensional image | -| [`nn.conv_transpose`](nn.conv\_transpose.md) | Performs the convolution transpose of the input data tensor and weigth tensor. | -| [`nn.conv`](nn.conv.md) | Performs the convolution of the input data tensor and weigth tensor. | +| [`nn.conv_transpose`](nn.conv\_transpose.md) | Performs the convolution transpose of the input data tensor and weight tensor. | +| [`nn.conv`](nn.conv.md) | Performs the convolution of the input data tensor and weight tensor. | diff --git a/docs/framework/operators/neural-network/nn.col2im.md b/docs/framework/operators/neural-network/nn.col2im.md index fd5e82ffa..6c7b1af05 100644 --- a/docs/framework/operators/neural-network/nn.col2im.md +++ b/docs/framework/operators/neural-network/nn.col2im.md @@ -1,4 +1,3 @@ - # NNTrait::col2im ```rust diff --git a/docs/framework/operators/neural-network/nn.conv.md b/docs/framework/operators/neural-network/nn.conv.md index 086737f0b..fd7d53010 100644 --- a/docs/framework/operators/neural-network/nn.conv.md +++ b/docs/framework/operators/neural-network/nn.conv.md @@ -15,7 +15,7 @@ ) -> Tensor ``` -The convolution operator consumes an input tensor and a filter (input weigth tensor), and computes the output. +The convolution operator consumes an input tensor and a filter (input weight tensor), and computes the output. ## Args diff --git a/docs/framework/operators/neural-network/nn.conv_transpose.md b/docs/framework/operators/neural-network/nn.conv_transpose.md index 29b2af6d2..83082fd94 100644 --- a/docs/framework/operators/neural-network/nn.conv_transpose.md +++ b/docs/framework/operators/neural-network/nn.conv_transpose.md @@ -16,7 +16,7 @@ ) -> Tensor ``` -The convolution transpose operator consumes an input tensor and a input weigth tensor, and computes the output. +The convolution transpose operator consumes an input tensor and a input weight tensor, and computes the output. ## Args diff --git a/docs/framework/operators/tensor/README.md b/docs/framework/operators/tensor/README.md index 46de5f3ad..fe2995096 100644 --- a/docs/framework/operators/tensor/README.md +++ b/docs/framework/operators/tensor/README.md @@ -118,6 +118,7 @@ use orion::operators::tensor::TensorTrait; | [`tensor.gather_nd`](tensor.gather\_nd.md) | Given data tensor of rank r >= 1, indices tensor of rank q >= 1, and batch_dims integer b, this operator gathers slices of data into an output tensor of rank q + r - indices_shape[-1] - 1 - b. | | [`tensor.reduce_log_sum`](tensor.reduce\_log\_sum.md) | Computes the log sum of the input tensor's elements along the provided axes. | | [`tensor.erf`](tensor.erf.md) | Computes the error function of the given input tensor element-wise. | +| [`tensor.reduce_log_sum_exp`](tensor.reduce\_log\_sum\_exp.md) | Computes the log sum of the exponentials of the input tensor's elements along the provided axes. | | [`tensor.layer_normalization`](tensor.layer\_normalization.md) | computes the layer normalization of the input tensor. | | [`tensor.split`](tensor.split.md) | Split a tensor into a list of tensors, along the specified β€˜axis’. | | [`tensor.random_uniform_like`](tensor.random\_uniform\_like.md) | RandomUniformLike generates a tensor with random values using a uniform distribution, matching the shape of the input tensor. | @@ -130,6 +131,7 @@ use orion::operators::tensor::TensorTrait; | [`tensor.optional`](tensor.optional.md) | Constructs an optional-type value containing either an empty optional of a certain type specified by the attribute, or a non-empty value containing the input element. | | [`tensor.dynamic_quantize_linear`](tensor.dynamic\_quantize\_linear.md) | Computes the Scale, Zero Point and FP32->8Bit conversion of FP32 Input data. | | [`tensor.scatter_nd`](tensor.scatter\_nd.md) | The output of the operation is produced by creating a copy of the input data, and then updating its value to values specified by updates at specific index positions specified by indices. Its output shape is the same as the shape of data | +| [`tensor.label_encoder`](tensor.label\_encoder.md) | Maps each element in the input tensor to another value. | ## Arithmetic Operations diff --git a/docs/framework/operators/tensor/scatter.max.md b/docs/framework/operators/tensor/scatter.max.md deleted file mode 100644 index e69de29bb..000000000 diff --git a/docs/framework/operators/tensor/tensor.label_encoder.md b/docs/framework/operators/tensor/tensor.label_encoder.md new file mode 100644 index 000000000..20bfd212e --- /dev/null +++ b/docs/framework/operators/tensor/tensor.label_encoder.md @@ -0,0 +1,110 @@ +# tensor.label_encoder + +```rust +fn label_encoder(self: @Tensor, default_list: Option>, default_tensor: Option>, keys: Option>, keys_tensor: Option>, values: Option>, values_tensor: Option>) -> Tensor; +``` + +Maps each element in the input tensor to another value. + +The mapping is determined by the two parallel attributes, 'keys_' and 'values_' attribute. +The i-th value in the specified 'keys_' attribute would be mapped to the i-th value in the specified 'values_' attribute. + It implies that input's element type and the element type of the specified 'keys_' should be identical while the output type is identical to the specified 'values_' attribute. + +## Args + +* `self`(`@Tensor`) - The input tensor. +* `default_list`(`Option>`) - The default span. +* `default_tensor`(`Option>`) - The default tensor. +* `keys`(`Option>`) - The keys span. +* `keys_tensor`(`Option>`) - The keys tensor. +* `values`(` Option>`) - The values span. +* `values_tensor`(`Option>`) - The values tensor. + +One and only one of 'default_*'s should be set +One and only one of 'keys*'s should be set + One and only one of 'values*'s should be set. + +## Panics + +* Panics if the len/shape of keys and values are not the same. + +## Returns + +A new `Tensor` which maps each element in the input tensor to another value.. + +## Type Constraints + +* `T` in (`Tensor`, `Tensor`, `Tensor`, `tensor,`) + +## Examples + +```rust +use array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::U32Tensor; +use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; + +fn label_encoder_example() -> Tensor, { + fn data() -> Tensor { + let mut sizes = ArrayTrait::new(); + sizes.append(2); + sizes.append(3); + let mut data = ArrayTrait::new(); + data.append(1); + data.append(2); + data.append(3); + data.append(1); + data.append(4); + data.append(5); + + let tensor = TensorTrait::::new(sizes.span(), data.span()); + return tensor; + } + + fn keys() -> Tensor { + let mut sizes = ArrayTrait::new(); + sizes.append(3); + sizes.append(1); + + let mut data = ArrayTrait::new(); + data.append(1); + data.append(2); + data.append(1); + + let tensor = TensorTrait::::new(sizes.span(), data.span()); + return tensor; + } + + fn values() -> Tensor { + let mut sizes = ArrayTrait::new(); + sizes.append(3); + sizes.append(1); + + let mut data = ArrayTrait::new(); + data.append(8); + data.append(9); + data.append(7); + + let tensor = TensorTrait::::new(sizes.span(), data.span()); + return tensor; + } + + fn default() -> Tensor { + let mut sizes = ArrayTrait::new(); + sizes.append(1); + + let mut data = ArrayTrait::new(); + data.append(999); + + let tensor = TensorTrait::::new(sizes.span(), data.span()); + return tensor; + } + + let data = data(); + let keys = keys(); + let values = values(); + let default = default(); + return data.label_encoder(default_list: Option::None, default_tensor: Option::Some(default), + keys: Option::None, keys_tensor: Option::Some(keys), + values: Option::None, values_tensor: Option::Some(values)); +>>> [7, 9, 999, 7, 999, 999], +``` diff --git a/docs/framework/operators/tensor/tensor.min.md b/docs/framework/operators/tensor/tensor.min.md index 92bc2d150..12deae2e3 100644 --- a/docs/framework/operators/tensor/tensor.min.md +++ b/docs/framework/operators/tensor/tensor.min.md @@ -4,7 +4,7 @@ fn min(tensors: Span>) -> Tensor; ``` -Returns the element-wise minumum values from a list of input tensors +Returns the element-wise minimum values from a list of input tensors The input tensors must have either: * Exactly the same shape * The same number of dimensions and the length of each dimension is either a common length or 1. diff --git a/docs/framework/operators/tensor/tensor.qlinear_add.md b/docs/framework/operators/tensor/tensor.qlinear_add.md index b89987c21..bb997d9c3 100644 --- a/docs/framework/operators/tensor/tensor.qlinear_add.md +++ b/docs/framework/operators/tensor/tensor.qlinear_add.md @@ -8,7 +8,7 @@ Performs the sum of quantized Tensors It consumes two quantized input tensors, their scales and zero points, scale and zero point of output, and computes the quantized output. The quantization formula is y = saturate((x / y_scale) + y_zero_point). -It perfoms the addition of the two vectors once dequantized, then return the quantization of the result of the addition. +It performs the addition of the two vectors once dequantized, then return the quantization of the result of the addition. The broadcasting is supported Scale and zero point must have same shape and the same type. They must be either scalar (per tensor) or N-D tensor (per row for 'a' and per column for 'b'). Scalar refers to per tensor quantization whereas N-D refers to per row or per column quantization. diff --git a/docs/framework/operators/tensor/tensor.qlinear_matmul.md b/docs/framework/operators/tensor/tensor.qlinear_matmul.md index b5928a8bb..eb52f4c03 100644 --- a/docs/framework/operators/tensor/tensor.qlinear_matmul.md +++ b/docs/framework/operators/tensor/tensor.qlinear_matmul.md @@ -8,7 +8,7 @@ Multiplies quantized Tensors It consumes two quantized input tensors, their scales and zero points, scale and zero point of output, and computes the quantized output. The quantization formula is y = saturate((x / y_scale) + y_zero_point). -It perfoms the multiplication of the two vectors once dequantized. If either argument is N-D, N > 2, it is treated as a stack of matrices residing in the last two indexes. +It performs the multiplication of the two vectors once dequantized. If either argument is N-D, N > 2, it is treated as a stack of matrices residing in the last two indexes. Then return the quantization of the result of the multiplication. Scale and zero point must have same shape and the same type. They must be either scalar (per tensor) or N-D tensor (per row for 'a' and per column for 'b'). Scalar refers to per tensor quantization whereas N-D refers to per row or per column quantization. diff --git a/docs/framework/operators/tensor/tensor.qlinear_mul.md b/docs/framework/operators/tensor/tensor.qlinear_mul.md index e1877a137..aeedd3365 100644 --- a/docs/framework/operators/tensor/tensor.qlinear_mul.md +++ b/docs/framework/operators/tensor/tensor.qlinear_mul.md @@ -8,7 +8,7 @@ Performs the element-wise multiplication of quantized Tensors It consumes two quantized input tensors, their scales and zero points, scale and zero point of output, and computes the quantized output. The quantization formula is y = saturate((x / y_scale) + y_zero_point). -It perfoms the element-wise multiplication of the two vectors once dequantized, then return the quantization of the result of the multiplication. +It performs the element-wise multiplication of the two vectors once dequantized, then return the quantization of the result of the multiplication. The broadcasting is supported Scale and zero point must have same shape and the same type. They must be either scalar (per tensor) or N-D tensor (per row for 'a' and per column for 'b'). Scalar refers to per tensor quantization whereas N-D refers to per row or per column quantization. diff --git a/docs/framework/operators/tensor/tensor.reduce_log_sum_exp.md b/docs/framework/operators/tensor/tensor.reduce_log_sum_exp.md new file mode 100644 index 000000000..8befd8c43 --- /dev/null +++ b/docs/framework/operators/tensor/tensor.reduce_log_sum_exp.md @@ -0,0 +1,60 @@ +## tensor.reduce_log_sum_exp + +```rust + fn reduce_log_sum_exp(self: @Tensor, axis: usize, keepdims: bool) -> Tensor; +``` + +Computes the log sum of the exponentials of the input tensor's elements along the provided axes. + +## Args +* 'self'(`@Tensor`) - The input tensor. +* 'axis'(`usize`) - The dimension to reduce. +* 'keepdims'(`bool`) - If true, retains reduced dimensions with length 1. + +## Panics + +* Panics if axis is not in the range of the input tensor's dimensions. + +## Returns + +Returns a new `Tensor` instance with the specified axis reduced by summing its elements. + + +## Example + +```rust +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP32x32Tensor; +use orion::numbers::{FixedTrait, FP32x32}; + +fn reduce_log_sum_exp() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP32x32 { mag: 4294967296, sign: false }); + data.append(FP32x32 { mag: 8589934592, sign: false }); + data.append(FP32x32 { mag: 12884901888, sign: false }); + data.append(FP32x32 { mag: 17179869184, sign: false }); + data.append(FP32x32 { mag: 21474836480, sign: false }); + data.append(FP32x32 { mag: 25769803776, sign: false }); + data.append(FP32x32 { mag: 30064771072, sign: false }); + data.append(FP32x32 { mag: 34359738368, sign: false }); + data.append(FP32x32 { mag: 38654705664, sign: false }); + data.append(FP32x32 { mag: 42949672960, sign: false }); + data.append(FP32x32 { mag: 47244640256, sign: false }); + data.append(FP32x32 { mag: 51539607552, sign: false }); + TensorTrait::new(shape.span(), data.span()) + + let tensor = TensorTrait::::new(shape.span(), data.span()); + + return tensor.reduce_log_sum_exp(axis: 2, keepdims: false); + + } + + +>>> [[9215828, 16323477, 20115004], [22716772, 24699744, 26302432]] +``` diff --git a/nodegen/helpers.py b/nodegen/helpers.py index 9983f62e7..3508ac305 100644 --- a/nodegen/helpers.py +++ b/nodegen/helpers.py @@ -10,7 +10,7 @@ class FixedImpl(Enum): FP8x23 = 'FP8x23' FP16x16 = 'FP16x16' - FP64x64 = 'FP64x64' + FP32x32 = 'FP32x32' @@ -20,14 +20,15 @@ def to_fp(x: np.ndarray, fp_impl: FixedImpl): return (x * 2**23).astype(np.int64) case FixedImpl.FP16x16: return (x * 2**16).astype(np.int64) - case FixedImpl.FP64x64: - return (x * 2**64) + case FixedImpl.FP32x32: + return (x * 2**32).astype(np.int64) + class Dtype(Enum): FP8x23 = 'FP8x23' FP16x16 = 'FP16x16' - FP64x64 = 'FP64x64' + FP32x32 = 'FP32x32' I8 = 'i8' I32 = 'i32' U32 = 'u32' @@ -173,8 +174,8 @@ def get_data_statement(data: np.ndarray, dtype: Dtype) -> list[str]: return ["FP8x23 { "+f"mag: {abs(int(x))}, sign: {str(x < 0).lower()} "+"}" for x in data.flatten()] case Dtype.FP16x16: return ["FP16x16 { "+f"mag: {abs(int(x))}, sign: {str(x < 0).lower()} "+"}" for x in data.flatten()] - case Dtype.FP64x64: - return ["FP64x64 { "+f"mag: {abs(int(x))}, sign: {str(x < 0).lower()} "+"}" for x in data.flatten()] + case Dtype.FP32x32: + return ["FP32x32 { "+f"mag: {abs(int(x))}, sign: {str(x < 0).lower()} "+"}" for x in data.flatten()] case Dtype.BOOL: return [str(x).lower() for x in data.flatten()] case Dtype.COMPLEX64: @@ -253,6 +254,7 @@ def find_all_types(tensors: list[Tensor | Sequence]) -> list[Dtype]: Dtype.FP16x16: ["orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}",], Dtype.BOOL: ["orion::operators::tensor::BoolTensor",], Dtype.COMPLEX64: ["orion::operators::tensor::Complex64Tensor",], + Dtype.FP32x32: ["orion::operators::tensor::FP32x32Tensor",], } @@ -280,6 +282,7 @@ def find_all_types(tensors: list[Tensor | Sequence]) -> list[Dtype]: Dtype.I8: ["orion::operators::tensor::I8TensorPartialEq",], Dtype.FP8x23: ["orion::operators::tensor::FP8x23TensorPartialEq",], Dtype.FP16x16: ["orion::operators::tensor::FP16x16TensorPartialEq",], + Dtype.FP32x32: ["orion::operators::tensor::FP32x32TensorPartialEq",], Dtype.BOOL: ["orion::operators::tensor::BoolTensorPartialEq",], Dtype.COMPLEX64: ["orion::operators::tensor::Complex64TensorPartialEq",], } @@ -291,6 +294,7 @@ def find_all_types(tensors: list[Tensor | Sequence]) -> list[Dtype]: Dtype.I8: ["orion::numbers::NumberTrait"], Dtype.FP8x23: ["orion::numbers::{FixedTrait, FP8x23}",], Dtype.FP16x16: ["orion::numbers::{FixedTrait, FP16x16}",], + Dtype.FP32x32: ["orion::numbers::{FixedTrait, FP32x32}",], Dtype.BOOL: [], Dtype.COMPLEX64: ["orion::numbers::{NumberTrait, complex64}",], } \ No newline at end of file diff --git a/nodegen/node/label_encoder.py b/nodegen/node/label_encoder.py new file mode 100644 index 000000000..d5f4407f0 --- /dev/null +++ b/nodegen/node/label_encoder.py @@ -0,0 +1,203 @@ +import numpy as np +from nodegen.node import RunAll +from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait +# Copyright (c) ONNX Project Contributors + +# SPDX-License-Identifier: Apache-2.0 +# pylint: disable=R0913,R0914,W0221 +def labelEncoder( # type: ignore + x, + default_float=None, + default_int64=None, + default_string=None, + keys_floats=None, + keys_int64s=None, + keys_strings=None, + values_floats=None, + values_int64s=None, + values_strings=None, +): + keys = keys_floats if keys_floats is not None else (keys_int64s if np.any(keys_int64s) else keys_strings) + values = values_floats if values_floats is not None else (values_int64s if np.any(values_int64s) else values_strings) + + classes = dict(zip(keys, values)) + if id(keys) == id(keys_floats): + cast = float + elif id(keys) == id(keys_int64s): + cast = int # type: ignore + else: + cast = str # type: ignore + if id(values) == id(values_floats): + defval = default_float + dtype = np.float32 + elif id(values) == id(values_int64s): + defval = default_int64 + dtype = np.int64 # type: ignore + else: + defval = default_string + if not isinstance(defval, str): + defval = "" + dtype = np.str_ # type: ignore + shape = x.shape + if len(x.shape) > 1: + x = x.flatten() + res = [] + for i in range(0, x.shape[0]): + v = classes.get(cast(x[i]), defval) + res.append(v) + return np.array(res, dtype=dtype).reshape(shape) + +class Label_encoder(RunAll): + + @staticmethod + def label_encoder_fp16x16(): + + def labelencoder(): + def default(): + x = np.array([1, 2, 3, 4, 5, 6, 1, 2, 3]).astype(np.int64) + keys = np.array([1, 2, 5, 6, ]).astype(np.int64) + values = np.array([11, 22, 55, 66]).astype(np.int64) + default = np.array(99).astype(np.int64) + + y = labelEncoder(x=x, keys_int64s=keys, values_int64s=values, default_int64=default) + + x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) + default = Tensor(Dtype.FP16x16, default.shape, to_fp(default.flatten(), FixedImpl.FP16x16)) + keys = Tensor(Dtype.FP16x16, keys.shape, to_fp(keys.flatten(), FixedImpl.FP16x16)) + values = Tensor(Dtype.FP16x16, values.shape, to_fp(values.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) + + name = "label_encoder_fp16x16_3d_default" + make_test( + inputs = [x, default, keys, values], output = y, func_sig = """input_0.label_encoder(default_list:Option::None, default_tensor: Option::Some(input_1), + keys:Option::None, keys_tensor: Option::Some(input_2), + values: Option::None, values_tensor: Option::Some(input_3))""", + name= name) + + default() + labelencoder() + + @staticmethod + def label_encoder_fp8x23(): + + def label_encoder(): + def default(): + + x = np.array([1, 2, 3, 4, 5, 6, 1, 2, 3, 7, 8]).astype(np.int64) + keys = np.array([1, 2, 5, 6, 7]).astype(np.int64) + values = np.array([11, 22, 55, 66, 77]).astype(np.int64) + default = np.array(99).astype(np.int64) + + y = labelEncoder(x=x, keys_int64s=keys, values_int64s=values, default_int64=default) + + x = Tensor(Dtype.FP8x23, x.shape, to_fp(x.flatten(), FixedImpl.FP8x23)) + default = Tensor(Dtype.FP8x23, default.shape, to_fp(default.flatten(), FixedImpl.FP8x23)) + keys = Tensor(Dtype.FP8x23, keys.shape, to_fp(keys.flatten(), FixedImpl.FP8x23)) + values = Tensor(Dtype.FP8x23, values.shape, to_fp(values.flatten(), FixedImpl.FP8x23)) + y = Tensor(Dtype.FP8x23, y.shape, to_fp(y.flatten(), FixedImpl.FP8x23)) + + name = "label_encoder_fp8x23_default" + + make_test( + inputs = [x, default, keys, values], output = y, func_sig = """input_0.label_encoder(default_list:Option::None, default_tensor: Option::Some(input_1), + keys:Option::None, keys_tensor: Option::Some(input_2), + values: Option::None, values_tensor: Option::Some(input_3))""", + name= name) + + + + default() + label_encoder() + + @staticmethod + def label_encoder_i8(): + + def label_encoder_3D(): + def default(): + + x = np.array([1, 2, 3, 4, 5, 6, 1, 2, 3, 7, 8]).astype(np.int8) + keys = np.array([1, 2, 5, 6, 7]).astype(np.int8) + values = np.array([11, 22, 55, 66, 77]).astype(np.int8) + default = np.array(99).astype(np.int8) + + y = labelEncoder(x=x, keys_int64s=keys, values_int64s=values, default_int64=default) + + x = Tensor(Dtype.I8, x.shape, x.flatten()) + default = Tensor(Dtype.I8, default.shape, default.flatten()) + keys = Tensor(Dtype.I8, keys.shape, keys.flatten()) + values = Tensor(Dtype.I8, values.shape, values.flatten()) + y = Tensor(Dtype.I8, y.shape, y.flatten()) + + name = "label_encoder_i8_default" + make_test( + inputs = [x, default, keys, values], output = y, func_sig = """input_0.label_encoder(default_list:Option::None, default_tensor: Option::Some(input_1), + keys:Option::None, keys_tensor: Option::Some(input_2), + values: Option::None, values_tensor: Option::Some(input_3))""", + name= name) + + + default() + label_encoder_3D() + + + @staticmethod + def label_encoder_i32(): + def label_encoder_3D(): + def default(): + x = np.array([1, 2, 3, 4, 5, 6, 1, 2, 3, 7, 8]).astype(np.int32) + keys = np.array([1, 2, 5, 6, 7]).astype(np.int32) + values = np.array([11, 22, 55, 66, 77]).astype(np.int32) + default = np.array(99).astype(np.int32) + + y = labelEncoder(x=x, keys_int64s=keys, values_int64s=values, default_int64=default) + + x = Tensor(Dtype.I32, x.shape, x.flatten()) + default = Tensor(Dtype.I32, default.shape, default.flatten()) + keys = Tensor(Dtype.I32, keys.shape, keys.flatten()) + values = Tensor(Dtype.I32, values.shape, values.flatten()) + y = Tensor(Dtype.I32, y.shape, y.flatten()) + + name = "label_encoder_i32_default" + make_test( + inputs = [x, default, keys, values], output = y, func_sig = """input_0.label_encoder(default_list:Option::None, default_tensor: Option::Some(input_1), + keys:Option::None, keys_tensor: Option::Some(input_2), + values: Option::None, values_tensor: Option::Some(input_3))""", + name= name) + + + + + default() + label_encoder_3D() + + + @staticmethod + def label_encoder_u32(): + + def label_encoder_3D(): + def default(): + + x = np.array([1, 2, 3, 4, 5, 6, 1, 2, 3, 7, 8]).astype(np.uint32) + keys = np.array([1, 2, 5, 6, 7]).astype(np.uint32) + values = np.array([11, 22, 55, 66, 77]).astype(np.uint32) + default = np.array(99).astype(np.uint32) + + y = labelEncoder(x=x, keys_int64s=keys, values_int64s=values, default_int64=default) + + x = Tensor(Dtype.U32, x.shape, x.flatten()) + default = Tensor(Dtype.U32, default.shape, default.flatten()) + keys = Tensor(Dtype.U32, keys.shape, keys.flatten()) + values = Tensor(Dtype.U32, values.shape, values.flatten()) + y = Tensor(Dtype.U32, y.shape, y.flatten()) + + name = "label_encoder_u32_default" + + make_test( + inputs = [x, default, keys, values], output = y, func_sig = """input_0.label_encoder(default_list:Option::None, default_tensor: Option::Some(input_1), + keys:Option::None, keys_tensor: Option::Some(input_2), + values: Option::None, values_tensor: Option::Some(input_3))""", + name= name) + + + default() + label_encoder_3D() diff --git a/nodegen/node/reduce_log_sum.py b/nodegen/node/reduce_log_sum.py index 259081f5a..9dc8ad4df 100644 --- a/nodegen/node/reduce_log_sum.py +++ b/nodegen/node/reduce_log_sum.py @@ -1,7 +1,7 @@ import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl -import numpy as np + class Reduce_log_sum(RunAll): diff --git a/nodegen/node/reduce_log_sum_exp.py b/nodegen/node/reduce_log_sum_exp.py new file mode 100644 index 000000000..e4da8b1e0 --- /dev/null +++ b/nodegen/node/reduce_log_sum_exp.py @@ -0,0 +1,62 @@ +import numpy as np +from nodegen.node import RunAll +from ..helpers import make_test, Tensor, Dtype, FixedImpl, to_fp + +class Reduce_log_sum_exp(RunAll): + @staticmethod + def reduce_log_sum_exp_fp32x32(): + def reduce_log_sum_exp_export_do_not_keepdims(): + shape = [3, 2, 2] + axes = np.array([2], dtype=np.int64) + keepdims = False + x = np.reshape(np.arange(1, np.prod(shape) + 1), shape) + y = np.log(np.sum(np.exp(x), axis=tuple(axes), keepdims=False)).astype(np.float64) + + x = Tensor(Dtype.FP32x32, x.shape, to_fp( + x.flatten(), FixedImpl.FP32x32)) + y = Tensor(Dtype.FP32x32, y.shape, to_fp( + y.flatten(), FixedImpl.FP32x32)) + + name = "reduce_log_sum_exp_fp32x32_export_do_not_keepdims" + make_test( + [x], y, "input_0.reduce_log_sum_exp(2, false)", name) + + def reduce_log_sum_exp_export_keepdims(): + shape = [3, 2, 2] + axes = np.array([2], dtype=np.int64) + keepdims = True + x = np.reshape(np.arange(1, np.prod(shape) + 1), shape) + y = np.log(np.sum(np.exp(x), axis=tuple(axes), keepdims=True)).astype(np.float64) + + x = Tensor(Dtype.FP32x32, x.shape, to_fp( + x.flatten(), FixedImpl.FP32x32)) + y = Tensor(Dtype.FP32x32, y.shape, to_fp( + y.flatten(), FixedImpl.FP32x32)) + + name = "reduce_log_sum_exp_fp32x32_export_keepdims" + make_test( + [x], y, "input_0.reduce_log_sum_exp(2, true)", name) + + def reduce_log_sum_exp_axis_0(): + shape = [3, 2, 2] + axes = np.array([0], dtype=np.int64) + keepdims = True + x = np.reshape(np.arange(1, np.prod(shape) + 1), shape) + y = np.log(np.sum(np.exp(x), axis=tuple(axes), keepdims=True)).astype(np.float64) + + x = Tensor(Dtype.FP32x32, x.shape, to_fp( + x.flatten(), FixedImpl.FP32x32)) + y = Tensor(Dtype.FP32x32, y.shape, to_fp( + y.flatten(), FixedImpl.FP32x32)) + + name = "reduce_log_sum_exp_fp32x32_export_negative_axes_keepdims" + make_test( + [x], y, "input_0.reduce_log_sum_exp(0, true)", name) + + + reduce_log_sum_exp_export_do_not_keepdims() + reduce_log_sum_exp_export_keepdims() + reduce_log_sum_exp_axis_0() + + + diff --git a/nodegen/node/reduce_prod.py b/nodegen/node/reduce_prod.py deleted file mode 100644 index 7d145bae1..000000000 --- a/nodegen/node/reduce_prod.py +++ /dev/null @@ -1,287 +0,0 @@ -import numpy as np -from nodegen.node import RunAll -from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl - - -class Reduce_prod(RunAll): - @staticmethod - def reduce_prod_u32(): - def reduce_prod_1D(): - x = np.array([0, 1, 2,]).astype(np.uint32) - y = np.array([0]).astype(np.uint32) - - x = Tensor(Dtype.U32, x.shape, x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "reduce_prod_u32_1D" - make_test( - [x], y, "input_0.reduce_prod(0, false)", name) - - def reduce_prod_2D(): - def default(): - x = np.array([0, 1, 2, 3]).astype(np.uint32).reshape(2, 2) - y = np.array([0, 3]).astype(np.uint32) - - x = Tensor(Dtype.U32, x.shape, x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "reduce_prod_u32_2D_default" - make_test( - [x], y, "input_0.reduce_prod(0, false)", name) - - def keepdims(): - x = np.array([0, 1, 2, 3]).astype(np.uint32).reshape(2, 2) - y = np.array([0, 3]).astype(np.uint32).reshape(1, 2) - - x = Tensor(Dtype.U32, x.shape, x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "reduce_prod_u32_2D_keepdims" - make_test( - [x], y, "input_0.reduce_prod(0, true)", name) - - def axis_1(): - x = np.array([0, 1, 2, 3]).astype(np.uint32).reshape(2, 2) - y = np.array([0, 6]).astype(np.uint32) - - x = Tensor(Dtype.U32, x.shape, x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "reduce_prod_u32_2D_axis_1" - make_test( - [x], y, "input_0.reduce_prod(1, false)", name) - - default() - keepdims() - axis_1() - reduce_prod_1D() - reduce_prod_2D() - - @staticmethod - def reduce_prod_i32(): - def reduce_prod_1D(): - x = np.array([0, 1, 2,]).astype(np.int32) - y = np.array([0]).astype(np.int32) - - x = Tensor(Dtype.I32, x.shape, x.flatten()) - y = Tensor(Dtype.I32, y.shape, y.flatten()) - - name = "reduce_prod_i32_1D" - make_test( - [x], y, "input_0.reduce_prod(0, false)", name) - - def reduce_prod_2D(): - def default(): - x = np.array([0, 1, 2, 3]).astype(np.int32).reshape(2, 2) - y = np.array([0, 3]).astype(np.int32) - - x = Tensor(Dtype.I32, x.shape, x.flatten()) - y = Tensor(Dtype.I32, y.shape, y.flatten()) - - name = "reduce_prod_i32_2D_default" - make_test( - [x], y, "input_0.reduce_prod(0, false)", name) - - def keepdims(): - x = np.array([0, 1, 2, 3]).astype(np.int32).reshape(2, 2) - y = np.array([0, 3]).astype(np.int32).reshape(1, 2) - - x = Tensor(Dtype.I32, x.shape, x.flatten()) - y = Tensor(Dtype.I32, y.shape, y.flatten()) - - name = "reduce_prod_i32_2D_keepdims" - make_test( - [x], y, "input_0.reduce_prod(0, true)", name) - - def axis_1(): - x = np.array([0, 1, 2, 3]).astype(np.int32).reshape(2, 2) - y = np.array([0, 6]).astype(np.int32) - - x = Tensor(Dtype.I32, x.shape, x.flatten()) - y = Tensor(Dtype.I32, y.shape, y.flatten()) - - name = "reduce_prod_i32_2D_axis_1" - make_test( - [x], y, "input_0.reduce_prod(1, false)", name) - - default() - keepdims() - axis_1() - reduce_prod_1D() - reduce_prod_2D() - - @staticmethod - def reduce_prod_i8(): - def reduce_prod_1D(): - x = np.array([0, 1, 2,]).astype(np.int8) - y = np.array([0]).astype(np.int8) - - x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) - y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) - - name = "reduce_prod_i8_1D" - make_test( - [x], y, "input_0.reduce_prod(0, false)", name) - - def reduce_prod_2D(): - def default(): - x = np.array([0, 1, 2, 3]).astype(np.int8).reshape(2, 2) - y = np.array([0, 3]).astype(np.int8) - - x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) - y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) - - name = "reduce_prod_i8_2D_default" - make_test( - [x], y, "input_0.reduce_prod(0, false)", name) - - def keepdims(): - x = np.array([0, 1, 2, 3]).astype(np.int8).reshape(2, 2) - y = np.array([0, 3]).astype(np.int8).reshape(1, 2) - - x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) - y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) - - name = "reduce_prod_i8_2D_keepdims" - make_test( - [x], y, "input_0.reduce_prod(0, true)", name) - - def axis_1(): - x = np.array([0, 1, 2, 3]).astype(np.int8).reshape(2, 2) - y = np.array([0, 6]).astype(np.int8) - x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) - y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) - - name = "reduce_prod_i8_2D_axis_1" - make_test( - [x], y, "input_0.reduce_prod(1, false)", name) - - default() - keepdims() - axis_1() - reduce_prod_1D() - reduce_prod_2D() - - @staticmethod - def reduce_prod_fp8x23(): - def reduce_prod_1D(): - x = np.array([0, 1, 2,]).astype(np.int64) - y = np.array([0]).astype(np.int64) - - x = Tensor(Dtype.FP8x23, x.shape, to_fp( - x.flatten(), FixedImpl.FP8x23)) - y = Tensor(Dtype.FP8x23, y.shape, to_fp( - y.flatten(), FixedImpl.FP8x23)) - - name = "reduce_prod_fp8x23_1D" - make_test( - [x], y, "input_0.reduce_prod(0, false)", name) - - def reduce_prod_2D(): - def default(): - x = np.array([0, 1, 2, 3]).astype(np.int64).reshape(2, 2) - y = np.array([0, 3]).astype(np.int64) - - x = Tensor(Dtype.FP8x23, x.shape, to_fp( - x.flatten(), FixedImpl.FP8x23)) - y = Tensor(Dtype.FP8x23, y.shape, to_fp( - y.flatten(), FixedImpl.FP8x23)) - - name = "reduce_prod_fp8x23_2D_default" - make_test( - [x], y, "input_0.reduce_prod(0, false)", name) - - def keepdims(): - x = np.array([0, 1, 2, 3]).astype(np.int64).reshape(2, 2) - y = np.array([0, 3]).astype(np.int64).reshape(1, 2) - - x = Tensor(Dtype.FP8x23, x.shape, to_fp( - x.flatten(), FixedImpl.FP8x23)) - y = Tensor(Dtype.FP8x23, y.shape, to_fp( - y.flatten(), FixedImpl.FP8x23)) - - name = "reduce_prod_fp8x23_2D_keepdims" - make_test( - [x], y, "input_0.reduce_prod(0, true)", name) - - def axis_1(): - x = np.array([0, 1, 2, 3]).astype(np.int64).reshape(2, 2) - y = np.array([0, 6]).astype(np.int64) - - x = Tensor(Dtype.FP8x23, x.shape, to_fp( - x.flatten(), FixedImpl.FP8x23)) - y = Tensor(Dtype.FP8x23, y.shape, to_fp( - y.flatten(), FixedImpl.FP8x23)) - - name = "reduce_prod_fp8x23_2D_axis_1" - make_test( - [x], y, "input_0.reduce_prod(1, false)", name) - - default() - keepdims() - axis_1() - - reduce_prod_1D() - reduce_prod_2D() - - @staticmethod - def reduce_prod_fp16x16(): - def reduce_prod_1D(): - x = np.array([0, 1, 2,]).astype(np.int64) - y = np.array([0]).astype(np.int64) - - x = Tensor(Dtype.FP16x16, x.shape, to_fp( - x.flatten(), FixedImpl.FP16x16)) - y = Tensor(Dtype.FP16x16, y.shape, to_fp( - y.flatten(), FixedImpl.FP16x16)) - - name = "reduce_prod_fp16x16_1D" - make_test( - [x], y, "input_0.reduce_prod(0, false)", name) - - def reduce_prod_2D(): - def default(): - x = np.array([0, 1, 2, 3]).astype(np.int64).reshape(2, 2) - y = np.array([0, 3]).astype(np.int64) - - x = Tensor(Dtype.FP16x16, x.shape, to_fp( - x.flatten(), FixedImpl.FP16x16)) - y = Tensor(Dtype.FP16x16, y.shape, to_fp( - y.flatten(), FixedImpl.FP16x16)) - - name = "reduce_prod_fp16x16_2D_default" - make_test( - [x], y, "input_0.reduce_prod(0, false)", name) - - def keepdims(): - x = np.array([0, 1, 2, 3]).astype(np.int64).reshape(2, 2) - y = np.array([0, 3]).astype(np.int64).reshape(1, 2) - - x = Tensor(Dtype.FP16x16, x.shape, to_fp( - x.flatten(), FixedImpl.FP16x16)) - y = Tensor(Dtype.FP16x16, y.shape, to_fp( - y.flatten(), FixedImpl.FP16x16)) - - name = "reduce_prod_fp16x16_2D_keepdims" - make_test( - [x], y, "input_0.reduce_prod(0, true)", name) - - def axis_1(): - x = np.array([0, 1, 2, 3]).astype(np.int64).reshape(2, 2) - y = np.array([0, 6]).astype(np.int64) - - x = Tensor(Dtype.FP16x16, x.shape, to_fp( - x.flatten(), FixedImpl.FP16x16)) - y = Tensor(Dtype.FP16x16, y.shape, to_fp( - y.flatten(), FixedImpl.FP16x16)) - - name = "reduce_prod_fp16x16_2D_axis_1" - make_test( - [x], y, "input_0.reduce_prod(1, false)", name) - - default() - keepdims() - axis_1() - - reduce_prod_1D() - reduce_prod_2D() diff --git a/nodegen/node/squeeze.py b/nodegen/node/squeeze.py index 2f598e1ea..44d1d4a22 100644 --- a/nodegen/node/squeeze.py +++ b/nodegen/node/squeeze.py @@ -15,11 +15,11 @@ def squeeze(): name = "squeeze_i8" make_test( - [x], y, "input_0.squeeze(Option::Some(array![0_i32, 2_i32].span()))", name) + [x], y, "input_0.squeeze(Option::Some(array![0, 2].span()))", name) squeeze() @staticmethod - def squeeze_i32(): + def squeeze(): def squeeze(): x = np.ones((1, 2, 1, 2, 1), dtype=np.int32) y = np.ones((2, 2, 1), dtype=np.int32) @@ -27,9 +27,9 @@ def squeeze(): x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) - name = "squeeze_i32" + name = "squeeze" make_test( - [x], y, "input_0.squeeze(Option::Some(array![0_i32, 2_i32].span()))", name) + [x], y, "input_0.squeeze(Option::Some(array![0, 2].span()))", name) squeeze() @staticmethod @@ -43,7 +43,7 @@ def squeeze(): name = "squeeze_u32" make_test( - [x], y, "input_0.squeeze(Option::Some(array![0_i32, 2_i32].span()))", name) + [x], y, "input_0.squeeze(Option::Some(array![0, 2].span()))", name) squeeze() @staticmethod @@ -59,7 +59,7 @@ def squeeze(): name = "squeeze_fP16x16" make_test( - [x], y, "input_0.squeeze(Option::Some(array![0_i32, 2_i32].span()))", name) + [x], y, "input_0.squeeze(Option::Some(array![0, 2].span()))", name) squeeze() @staticmethod @@ -75,5 +75,5 @@ def squeeze(): name = "squeeze_fP8x23" make_test( - [x], y, "input_0.squeeze(Option::Some(array![0_i32, 2_i32].span()))", name) + [x], y, "input_0.squeeze(Option::Some(array![0, 2].span()))", name) squeeze() diff --git a/src/numbers.cairo b/src/numbers.cairo index 156fc5bf5..ddd95fa10 100644 --- a/src/numbers.cairo +++ b/src/numbers.cairo @@ -1432,9 +1432,10 @@ impl FP32x32Number of NumberTrait { impl I8Number of NumberTrait { fn new(mag: i8, sign: bool) -> i8 { - if sign{ + if sign { return -mag; } + mag } @@ -1559,7 +1560,7 @@ impl I8Number of NumberTrait { fn abs(self: i8) -> i8 { if self >= 0 { - return self; + self } else { self * -1_i8 } @@ -1579,7 +1580,7 @@ impl I8Number of NumberTrait { fn min(self: i8, other: i8) -> i8 { if self < other { - return self; + self } else { other } @@ -1587,7 +1588,7 @@ impl I8Number of NumberTrait { fn max(self: i8, other: i8) -> i8 { if self > other { - return self; + self } else { other } @@ -1603,43 +1604,43 @@ impl I8Number of NumberTrait { fn xor(lhs: i8, rhs: i8) -> bool { if (lhs == 0 || rhs == 0) && lhs != rhs { - return true; + true } else { - return false; + false } } fn or(lhs: i8, rhs: i8) -> bool { - if (lhs == 0 && rhs == 0) { - return false; + if lhs == 0 && rhs == 0 { + false } else { - return true; + true } } fn sign(self: i8) -> i8 { if self == 0 { - return 0_i8; + 0_i8 } else if self > 0 { - return 1_i8; + 1_i8 } else { -1_i8 } } fn and(lhs: i8, rhs: i8) -> bool { - if (lhs == 0 || rhs == 0) { - return false; + if lhs == 0 || rhs == 0 { + false } else { - return true; + true } } fn where(self: i8, x: i8, y: i8) -> i8 { if self == 0 { - return y; + y } else { - return x; + x } } @@ -1656,7 +1657,7 @@ impl I8Number of NumberTrait { } fn is_inf(self: i8) -> bool { - (self == 127 || self == -127) + self == 127 || self == -127 } fn is_pos_inf(self: i8) -> bool { @@ -1691,8 +1692,10 @@ impl I8Number of NumberTrait { impl I8Div of Div { fn div(lhs: i8, rhs: i8) -> i8 { assert(rhs != 0, 'divisor cannot be 0'); + let mut lhs_positive = lhs; let mut rhs_positive = rhs; + // making sure everything is positive if lhs < 0 { lhs_positive = lhs * -1; @@ -1700,6 +1703,7 @@ impl I8Div of Div { if rhs < 0 { rhs_positive = rhs * -1; } + //felt252 plays role of a bridge for type casting let lhs_felt: felt252 = lhs_positive.into(); let rhs_felt: felt252 = rhs_positive.into(); @@ -1708,6 +1712,7 @@ impl I8Div of Div { let mut result = lhs_u128 / rhs_u128; let felt_result: felt252 = result.into(); let signed_int_result: i8 = felt_result.try_into().unwrap(); + if lhs * rhs < 0 { signed_int_result * -1 } else { @@ -1727,11 +1732,14 @@ impl I8IntoFP8x23 of Into { fn into(self: i8) -> FP8x23 { let number_sign: bool = self < 0; let mut self_positive: i8 = self; + if number_sign { self_positive = self_positive * -1_i8 } + let number_felt: felt252 = self_positive.into(); let number_u32: u32 = number_felt.try_into().unwrap(); + FP8x23 { mag: number_u32 * ONE_fp8x23, sign: number_sign } } } @@ -1740,11 +1748,14 @@ impl I8IntoFP16x16 of Into { fn into(self: i8) -> FP16x16 { let number_sign: bool = self < 0; let mut self_positive: i8 = self; + if number_sign { self_positive = self_positive * -1_i8 } + let number_felt: felt252 = self_positive.into(); let number_u32: u32 = number_felt.try_into().unwrap(); + FP16x16 { mag: number_u32 * ONE_fp16x16, sign: number_sign } } } @@ -1753,11 +1764,14 @@ impl I8IntoFP64x64 of Into { fn into(self: i8) -> FP64x64 { let number_sign: bool = self < 0; let mut self_positive: i8 = self; + if number_sign { self_positive = self_positive * -1_i8 } + let number_felt: felt252 = self_positive.into(); let number_u128: u128 = number_felt.try_into().unwrap(); + FP64x64 { mag: number_u128 * ONE_fp64x64, sign: number_sign } } } @@ -1766,20 +1780,24 @@ impl I8IntoFP32x32 of Into { fn into(self: i8) -> FP32x32 { let number_sign: bool = self < 0; let mut self_positive: i8 = self; + if number_sign { self_positive = self_positive * -1_i8 } + let number_felt: felt252 = self_positive.into(); let number_u128: u64 = number_felt.try_into().unwrap(); + FP32x32 { mag: number_u128 * ONE_fp32x32, sign: number_sign } } } impl I16Number of NumberTrait { fn new(mag: i16, sign: bool) -> i16 { - if sign{ + if sign { return -mag; } + mag } @@ -1904,7 +1922,7 @@ impl I16Number of NumberTrait { fn abs(self: i16) -> i16 { if self >= 0 { - return self; + self } else { self * -1_i16 } @@ -1924,7 +1942,7 @@ impl I16Number of NumberTrait { fn min(self: i16, other: i16) -> i16 { if self < other { - return self; + self } else { other } @@ -1932,7 +1950,7 @@ impl I16Number of NumberTrait { fn max(self: i16, other: i16) -> i16 { if self > other { - return self; + self } else { other } @@ -1948,43 +1966,43 @@ impl I16Number of NumberTrait { fn xor(lhs: i16, rhs: i16) -> bool { if (lhs == 0 || rhs == 0) && lhs != rhs { - return true; + true } else { - return false; + false } } fn or(lhs: i16, rhs: i16) -> bool { - if (lhs == 0 && rhs == 0) { - return false; + if lhs == 0 && rhs == 0 { + false } else { - return true; + true } } fn sign(self: i16) -> i16 { if self == 0 { - return 0_i16; + 0_i16 } else if self > 0 { - return 1_i16; + 1_i16 } else { -1_i16 } } fn and(lhs: i16, rhs: i16) -> bool { - if (lhs == 0 || rhs == 0) { - return false; + if lhs == 0 || rhs == 0 { + false } else { - return true; + true } } fn where(self: i16, x: i16, y: i16) -> i16 { if self == 0 { - return y; + y } else { - return x; + x } } @@ -2001,7 +2019,7 @@ impl I16Number of NumberTrait { } fn is_inf(self: i16) -> bool { - (self == 32767 || self == -32767) + self == 32767 || self == -32767 } fn is_pos_inf(self: i16) -> bool { @@ -2036,8 +2054,10 @@ impl I16Number of NumberTrait { impl I16Div of Div { fn div(lhs: i16, rhs: i16) -> i16 { assert(rhs != 0, 'divisor cannot be 0'); + let mut lhs_positive = lhs; let mut rhs_positive = rhs; + // making sure everything is positive if lhs < 0 { lhs_positive = lhs * -1; @@ -2045,6 +2065,7 @@ impl I16Div of Div { if rhs < 0 { rhs_positive = rhs * -1; } + //felt252 plays role of a bridge for type casting let lhs_felt: felt252 = lhs_positive.into(); let rhs_felt: felt252 = rhs_positive.into(); @@ -2053,6 +2074,7 @@ impl I16Div of Div { let mut result = lhs_u128 / rhs_u128; let felt_result: felt252 = result.into(); let signed_int_result: i16 = felt_result.try_into().unwrap(); + if lhs * rhs < 0 { signed_int_result * -1 } else { @@ -2070,9 +2092,10 @@ impl I16DivEq of DivEq { impl I32Number of NumberTrait { fn new(mag: i32, sign: bool) -> i32 { - if sign{ + if sign { return -mag; } + mag } @@ -2197,7 +2220,7 @@ impl I32Number of NumberTrait { fn abs(self: i32) -> i32 { if self >= 0 { - return self; + self } else { self * -1_i32 } @@ -2217,7 +2240,7 @@ impl I32Number of NumberTrait { fn min(self: i32, other: i32) -> i32 { if self < other { - return self; + self } else { other } @@ -2225,7 +2248,7 @@ impl I32Number of NumberTrait { fn max(self: i32, other: i32) -> i32 { if self > other { - return self; + self } else { other } @@ -2241,43 +2264,43 @@ impl I32Number of NumberTrait { fn xor(lhs: i32, rhs: i32) -> bool { if (lhs == 0 || rhs == 0) && lhs != rhs { - return true; + true } else { - return false; + false } } fn or(lhs: i32, rhs: i32) -> bool { - if (lhs == 0 && rhs == 0) { - return false; + if lhs == 0 && rhs == 0 { + false } else { - return true; + true } } fn sign(self: i32) -> i32 { if self == 0 { - return 0_i32; + 0_i32 } else if self > 0 { - return 1_i32; + 1_i32 } else { -1_i32 } } fn and(lhs: i32, rhs: i32) -> bool { - if (lhs == 0 || rhs == 0) { - return false; + if lhs == 0 || rhs == 0 { + false } else { - return true; + true } } fn where(self: i32, x: i32, y: i32) -> i32 { if self == 0 { - return y; + y } else { - return x; + x } } @@ -2294,7 +2317,7 @@ impl I32Number of NumberTrait { } fn is_inf(self: i32) -> bool { - (self == 2147483647 || self == -2147483647) + self == 2147483647 || self == -2147483647 } fn is_pos_inf(self: i32) -> bool { @@ -2329,8 +2352,10 @@ impl I32Number of NumberTrait { impl I32Div of Div { fn div(lhs: i32, rhs: i32) -> i32 { assert(rhs != 0, 'divisor cannot be 0'); + let mut lhs_positive = lhs; let mut rhs_positive = rhs; + // making sure everything is positive if lhs < 0 { lhs_positive = lhs * -1; @@ -2338,6 +2363,7 @@ impl I32Div of Div { if rhs < 0 { rhs_positive = rhs * -1; } + //felt252 plays role of a bridge for type casting let lhs_felt: felt252 = lhs_positive.into(); let rhs_felt: felt252 = rhs_positive.into(); @@ -2346,6 +2372,7 @@ impl I32Div of Div { let mut result = lhs_u128 / rhs_u128; let felt_result: felt252 = result.into(); let signed_int_result: i32 = felt_result.try_into().unwrap(); + if lhs * rhs < 0 { signed_int_result * -1 } else { @@ -2365,20 +2392,24 @@ impl I32IntoU32 of Into { fn into(self: i32) -> u32 { let number_sign: bool = self < 0; let mut self_positive: i32 = self; + if number_sign { self_positive = self_positive * -1_i32 } + let number_felt: felt252 = self_positive.into(); let number_u32: u32 = number_felt.try_into().unwrap(); + number_u32 } } impl I64Number of NumberTrait { fn new(mag: i64, sign: bool) -> i64 { - if sign{ + if sign { return -mag; } + mag } @@ -2503,7 +2534,7 @@ impl I64Number of NumberTrait { fn abs(self: i64) -> i64 { if self >= 0 { - return self; + self } else { self * -1_i64 } @@ -2523,7 +2554,7 @@ impl I64Number of NumberTrait { fn min(self: i64, other: i64) -> i64 { if self < other { - return self; + self } else { other } @@ -2531,7 +2562,7 @@ impl I64Number of NumberTrait { fn max(self: i64, other: i64) -> i64 { if self > other { - return self; + self } else { other } @@ -2547,43 +2578,43 @@ impl I64Number of NumberTrait { fn xor(lhs: i64, rhs: i64) -> bool { if (lhs == 0 || rhs == 0) && lhs != rhs { - return true; + true } else { - return false; + false } } fn or(lhs: i64, rhs: i64) -> bool { - if (lhs == 0 && rhs == 0) { - return false; + if lhs == 0 && rhs == 0 { + false } else { - return true; + true } } fn sign(self: i64) -> i64 { if self == 0 { - return 0_i64; + 0_i64 } else if self > 0 { - return 1_i64; + 1_i64 } else { -1_i64 } } fn and(lhs: i64, rhs: i64) -> bool { - if (lhs == 0 || rhs == 0) { - return false; + if lhs == 0 || rhs == 0 { + false } else { - return true; + true } } fn where(self: i64, x: i64, y: i64) -> i64 { if self == 0 { - return y; + y } else { - return x; + x } } @@ -2600,7 +2631,7 @@ impl I64Number of NumberTrait { } fn is_inf(self: i64) -> bool { - (self == 9223372036854775807 || self == -9223372036854775807) + self == 9223372036854775807 || self == -9223372036854775807 } fn is_pos_inf(self: i64) -> bool { @@ -2635,8 +2666,10 @@ impl I64Number of NumberTrait { impl I64Div of Div { fn div(lhs: i64, rhs: i64) -> i64 { assert(rhs != 0, 'divisor cannot be 0'); + let mut lhs_positive = lhs; let mut rhs_positive = rhs; + // making sure everything is positive if lhs < 0 { lhs_positive = lhs * -1; @@ -2644,6 +2677,7 @@ impl I64Div of Div { if rhs < 0 { rhs_positive = rhs * -1; } + //felt252 plays role of a bridge for type casting let lhs_felt: felt252 = lhs_positive.into(); let rhs_felt: felt252 = rhs_positive.into(); @@ -2652,6 +2686,7 @@ impl I64Div of Div { let mut result = lhs_u128 / rhs_u128; let felt_result: felt252 = result.into(); let signed_int_result: i64 = felt_result.try_into().unwrap(); + if lhs * rhs < 0 { signed_int_result * -1 } else { @@ -2669,9 +2704,10 @@ impl I64DivEq of DivEq { impl I128Number of NumberTrait { fn new(mag: i128, sign: bool) -> i128 { - if sign{ + if sign { return -mag; } + mag } @@ -2796,7 +2832,7 @@ impl I128Number of NumberTrait { fn abs(self: i128) -> i128 { if self >= 0 { - return self; + self } else { self * -1_i128 } @@ -2816,7 +2852,7 @@ impl I128Number of NumberTrait { fn min(self: i128, other: i128) -> i128 { if self < other { - return self; + self } else { other } @@ -2824,7 +2860,7 @@ impl I128Number of NumberTrait { fn max(self: i128, other: i128) -> i128 { if self > other { - return self; + self } else { other } @@ -2840,43 +2876,43 @@ impl I128Number of NumberTrait { fn xor(lhs: i128, rhs: i128) -> bool { if (lhs == 0 || rhs == 0) && lhs != rhs { - return true; + true } else { - return false; + false } } fn or(lhs: i128, rhs: i128) -> bool { - if (lhs == 0 && rhs == 0) { - return false; + if lhs == 0 && rhs == 0 { + false } else { - return true; + true } } fn sign(self: i128) -> i128 { if self == 0 { - return 0_i128; + 0_i128 } else if self > 0 { - return 1_i128; + 1_i128 } else { -1_i128 } } fn and(lhs: i128, rhs: i128) -> bool { - if (lhs == 0 || rhs == 0) { - return false; + if lhs == 0 || rhs == 0 { + false } else { - return true; + true } } fn where(self: i128, x: i128, y: i128) -> i128 { if self == 0 { - return y; + y } else { - return x; + x } } @@ -2893,8 +2929,8 @@ impl I128Number of NumberTrait { } fn is_inf(self: i128) -> bool { - (self == 170141183460469231731687303715884105727 - || self == -170141183460469231731687303715884105727) + self == 170141183460469231731687303715884105727 + || self == -170141183460469231731687303715884105727 } fn is_pos_inf(self: i128) -> bool { @@ -2929,8 +2965,10 @@ impl I128Number of NumberTrait { impl I128Div of Div { fn div(lhs: i128, rhs: i128) -> i128 { assert(rhs != 0, 'divisor cannot be 0'); + let mut lhs_positive = lhs; let mut rhs_positive = rhs; + // making sure everything is positive if lhs < 0 { lhs_positive = lhs * -1; @@ -2938,6 +2976,7 @@ impl I128Div of Div { if rhs < 0 { rhs_positive = rhs * -1; } + //felt252 plays role of a bridge for type casting let lhs_felt: felt252 = lhs_positive.into(); let rhs_felt: felt252 = rhs_positive.into(); @@ -2946,6 +2985,7 @@ impl I128Div of Div { let mut result = lhs_u128 / rhs_u128; let felt_result: felt252 = result.into(); let signed_int_result: i128 = felt_result.try_into().unwrap(); + // assigning the sign and returning if lhs * rhs < 0 { signed_int_result * -1 @@ -3105,7 +3145,7 @@ impl u32Number of NumberTrait { fn min(self: u32, other: u32) -> u32 { if self < other { - return self; + self } else { other } @@ -3113,7 +3153,7 @@ impl u32Number of NumberTrait { fn max(self: u32, other: u32) -> u32 { if self > other { - return self; + self } else { other } @@ -3129,17 +3169,17 @@ impl u32Number of NumberTrait { fn xor(lhs: u32, rhs: u32) -> bool { if (lhs == 0 || rhs == 0) && lhs != rhs { - return true; + true } else { - return false; + false } } fn or(lhs: u32, rhs: u32) -> bool { - if (lhs == 0 && rhs == 0) { - return false; + if lhs == 0 && rhs == 0 { + false } else { - return true; + true } } @@ -3148,18 +3188,18 @@ impl u32Number of NumberTrait { } fn and(lhs: u32, rhs: u32) -> bool { - if (lhs == 0 || rhs == 0) { - return false; + if lhs == 0 || rhs == 0 { + false } else { - return true; + true } } fn where(self: u32, x: u32, y: u32) -> u32 { if self == 0 { - return y; + y } else { - return x; + x } } @@ -3324,6 +3364,7 @@ impl Complex64Number of NumberTrait { if self == Complex64Impl::zero() { return true; } + false } @@ -3343,6 +3384,7 @@ impl Complex64Number of NumberTrait { if self == Complex64Impl::one() { return true; } + false } @@ -3447,6 +3489,7 @@ impl U32IntoI32 of Into { fn into(self: u32) -> i32 { let number_felt: felt252 = self.into(); let number_i32: i32 = number_felt.try_into().unwrap(); + number_i32 } } diff --git a/src/numbers/complex_number/complex64.cairo b/src/numbers/complex_number/complex64.cairo index 20fb57f88..9edcb8d1a 100644 --- a/src/numbers/complex_number/complex64.cairo +++ b/src/numbers/complex_number/complex64.cairo @@ -15,7 +15,6 @@ struct complex64 { } // CONSTANTS for FP64x64 - const PI: u128 = 57952155664616982739; const HALF_PI: u128 = 28976077832308491370; const TWO: u128 = 36893488147419103232; @@ -40,15 +39,16 @@ impl Complex64Impl of ComplexTrait { } fn zero() -> complex64 { - return complex64 { real: FixedTrait::ZERO(), img: FP64x64Impl::ZERO() }; + complex64 { real: FixedTrait::ZERO(), img: FP64x64Impl::ZERO() } } fn one() -> complex64 { - return complex64 { real: FP64x64Impl::ONE(), img: FP64x64Impl::ZERO() }; + complex64 { real: FP64x64Impl::ONE(), img: FP64x64Impl::ZERO() } } fn mag(self: complex64) -> FP64x64 { let two = FP64x64Impl::new(TWO, false); + (self.real.pow(two) + self.img.pow(two)).sqrt() } @@ -59,15 +59,16 @@ impl Complex64Impl of ComplexTrait { fn exp(self: complex64) -> complex64 { let real = self.real.exp() * self.img.cos(); let img = self.real.exp() * self.img.sin(); + complex64 { real, img } } fn exp2(self: complex64) -> complex64 { let two = complex64 { real: FP64x64Impl::new(TWO, false), img: FP64x64Impl::ZERO() }; + two.pow(self) } - fn sqrt(self: complex64) -> complex64 { let x = self.real; let y = self.img; @@ -78,26 +79,29 @@ impl Complex64Impl of ComplexTrait { } else { (((x.pow(two) + y.pow(two)).sqrt() - x) / two).sqrt() }; - let img = FP64x64Impl::new(img.mag, y.sign); + complex64 { real, img } } fn ln(self: complex64) -> complex64 { let real = self.mag().ln(); let img = self.arg(); + complex64 { real, img } } fn log2(self: complex64) -> complex64 { let ln_2 = FP64x64Impl::new(12786309186476892720, false); let ln = self.ln(); + complex64 { real: (ln.real / ln_2), img: (ln.img / ln_2) } } fn log10(self: complex64) -> complex64 { let ln_10 = FP64x64Impl::new(42475197399893398429, false); let ln = self.ln(); + complex64 { real: (ln.real / ln_10), img: (ln.img / ln_10) } } @@ -129,6 +133,7 @@ impl Complex64Impl of ComplexTrait { let B = b.real * self.arg() + b.img * self.mag().ln(); let real = A * B.cos(); let img = A * B.sin(); + complex64 { real, img } } @@ -136,17 +141,18 @@ impl Complex64Impl of ComplexTrait { fn cos(self: complex64) -> complex64 { let a = self.real; let b = self.img; + complex64 { real: FP64x64Impl::cos(a) * FP64x64Impl::cosh(b), img: -FP64x64Impl::sin(a) * FP64x64Impl::sinh(b) } } - //sin(z) = sin(a+bi) = sin(a)cosh(b)+icos(a)sinh(b) fn sin(self: complex64) -> complex64 { let a = self.real; let b = self.img; + complex64 { real: FP64x64Impl::sin(a) * FP64x64Impl::cosh(b), img: FP64x64Impl::cos(a) * FP64x64Impl::sinh(b) @@ -159,6 +165,7 @@ impl Complex64Impl of ComplexTrait { let a = self.real; let b = self.img; let den = FP64x64Impl::cosh(two * b) + FP64x64Impl::cos(two * a); + complex64 { real: FP64x64Impl::sin(two * a) / den, img: FP64x64Impl::sinh(two * b) / den } } @@ -184,7 +191,6 @@ impl Complex64Impl of ComplexTrait { asin } - //atan(z) = 1/2 * i[ln (1 - iz) - ln(1 + iz)] fn atan(self: complex64) -> complex64 { let two = Complex64Impl::new(FP64x64Impl::new(TWO, false), FP64x64Impl::ZERO()); @@ -198,7 +204,6 @@ impl Complex64Impl of ComplexTrait { atan } - //acosh(z) = ln (z + sqrt(z + 1) * sqrt(z - 1)) fn acosh(self: complex64) -> complex64 { let one = Complex64Impl::new(FP64x64Impl::ONE(), FP64x64Impl::ZERO()); @@ -218,7 +223,6 @@ impl Complex64Impl of ComplexTrait { asinh } - //atanh(z) = 1/2 * [ln (1 + z) - ln(1 - z)] fn atanh(self: complex64) -> complex64 { let two = Complex64Impl::new(FP64x64Impl::new(TWO, false), FP64x64Impl::ZERO()); @@ -232,6 +236,7 @@ impl Complex64Impl of ComplexTrait { fn cosh(self: complex64) -> complex64 { let a = self.real; let b = self.img; + complex64 { real: FP64x64Impl::cosh(a) * FP64x64Impl::cos(b), img: FP64x64Impl::sinh(a) * FP64x64Impl::sin(b) @@ -242,6 +247,7 @@ impl Complex64Impl of ComplexTrait { fn sinh(self: complex64) -> complex64 { let a = self.real; let b = self.img; + complex64 { real: FP64x64Impl::sinh(a) * FP64x64Impl::cos(b), img: FP64x64Impl::cosh(a) * FP64x64Impl::sin(b) @@ -254,6 +260,7 @@ impl Complex64Impl of ComplexTrait { let a = self.real; let b = self.img; let den = FP64x64Impl::cosh(two * a) + FP64x64Impl::cos(two * b); + complex64 { real: FP64x64Impl::sinh(two * a) / den, img: FP64x64Impl::sin(two * b) / den } } @@ -261,12 +268,14 @@ impl Complex64Impl of ComplexTrait { fn to_polar(self: complex64) -> (FP64x64, FP64x64) { let mag = self.mag(); let arg = self.arg(); - return (mag, arg); + + (mag, arg) } fn from_polar(mag: FP64x64, arg: FP64x64) -> complex64 { let real = mag * arg.cos(); let img = mag * arg.sin(); + complex64 { real, img } } @@ -277,6 +286,7 @@ impl Complex64Impl of ComplexTrait { let real = x / (x.pow(two) + y.pow(two)); let img = -y / (x.pow(two) + y.pow(two)); + complex64 { real, img } } } @@ -361,7 +371,6 @@ impl Complex64DivEq of DivEq { } } - // Implements the PartialEq trait for complex64. impl Complex64PartialEq of PartialEq { fn eq(lhs: @complex64, rhs: @complex64) -> bool { @@ -394,7 +403,8 @@ impl Complex64Neg of Neg { fn complex64_add(a: complex64, b: complex64) -> complex64 { let real = a.real + b.real; let img = a.img + b.img; - return ComplexTrait::new(real, img); + + ComplexTrait::new(real, img) } // Subtracts complex64 complex numbers. @@ -409,7 +419,8 @@ fn complex64_add(a: complex64, b: complex64) -> complex64 { fn complex64_sub(a: complex64, b: complex64) -> complex64 { let real = a.real - b.real; let img = a.img - b.img; - return ComplexTrait::new(real, img); + + ComplexTrait::new(real, img) } // Multiplies two complex64 integers. @@ -427,7 +438,8 @@ fn complex64_sub(a: complex64, b: complex64) -> complex64 { fn complex64_mul(a: complex64, b: complex64) -> complex64 { let real = a.real * b.real - a.img * b.img; let img = a.real * b.img + a.img * b.real; - return ComplexTrait::new(real, img); + + ComplexTrait::new(real, img) } // Divides the first complex64 by the second complex64. @@ -452,7 +464,7 @@ fn complex64_eq(a: complex64, b: complex64) -> bool { return true; } - return false; + false } // Compares two complex64 complex numbers for inequality. @@ -463,7 +475,7 @@ fn complex64_eq(a: complex64, b: complex64) -> bool { // * `bool` - `true` if the two complex numbers are not equal, `false` otherwise. fn complex64_ne(a: complex64, b: complex64) -> bool { // The result is the inverse of the equal function. - return !complex64_eq(a, b); + !complex64_eq(a, b) } // Negates the given complex64 complex number. @@ -473,5 +485,5 @@ fn complex64_ne(a: complex64, b: complex64) -> bool { // * `complex64` - The negation of `x`. fn complex64_neg(x: complex64) -> complex64 { // The negation of an complex number is obtained by negating its real part and its imaginary part. - return ComplexTrait::new(-x.real, -x.img); + ComplexTrait::new(-x.real, -x.img) } diff --git a/src/numbers/fixed_point/core.cairo b/src/numbers/fixed_point/core.cairo index 0ef1f8c6f..e35d8abdb 100644 --- a/src/numbers/fixed_point/core.cairo +++ b/src/numbers/fixed_point/core.cairo @@ -33,6 +33,7 @@ /// sinh - Returns the value of the hyperbolic sine of the fixed point number. /// tanh - Returns the value of the hyperbolic tangent of the fixed point number. /// sign - Returns the element-wise indication of the sign of the input fixed point number. +/// erf - Returns the error function of the input fixed point number computed element-wise. /// trait FixedTrait { /// # FixedTrait::new diff --git a/src/numbers/fixed_point/implementations/fp16x16/core.cairo b/src/numbers/fixed_point/implementations/fp16x16/core.cairo index d39820ce8..8f77324aa 100644 --- a/src/numbers/fixed_point/implementations/fp16x16/core.cairo +++ b/src/numbers/fixed_point/implementations/fp16x16/core.cairo @@ -1,9 +1,5 @@ use core::debug::PrintTrait; -use core::option::OptionTrait; -use core::result::{ResultTrait, ResultTraitImpl}; -use core::traits::{TryInto, Into}; - use orion::numbers::fixed_point::core::FixedTrait; use orion::numbers::fixed_point::implementations::fp16x16::math::{ core as core_math, trig, hyp, erf @@ -18,178 +14,177 @@ struct FP16x16 { } // CONSTANTS - const TWO: u32 = 131072; // 2 ** 17 const ONE: u32 = 65536; // 2 ** 16 const HALF: u32 = 32768; // 2 ** 15 const MAX: u32 = 2147483648; // 2 ** 31 - impl FP16x16Impl of FixedTrait { fn ZERO() -> FP16x16 { - return FP16x16 { mag: 0, sign: false }; + FP16x16 { mag: 0, sign: false } } fn HALF() -> FP16x16 { - return FP16x16 { mag: HALF, sign: false }; + FP16x16 { mag: HALF, sign: false } } fn ONE() -> FP16x16 { - return FP16x16 { mag: ONE, sign: false }; + FP16x16 { mag: ONE, sign: false } } fn MAX() -> FP16x16 { - return FP16x16 { mag: MAX, sign: false }; + FP16x16 { mag: MAX, sign: false } } fn new(mag: u32, sign: bool) -> FP16x16 { - return FP16x16 { mag: mag, sign: sign }; + FP16x16 { mag: mag, sign: sign } } fn new_unscaled(mag: u32, sign: bool) -> FP16x16 { - return FP16x16 { mag: mag * ONE, sign: sign }; + FP16x16 { mag: mag * ONE, sign: sign } } fn from_felt(val: felt252) -> FP16x16 { let mag = core::integer::u32_try_from_felt252(utils::felt_abs(val)).unwrap(); - return FixedTrait::new(mag, utils::felt_sign(val)); + + FixedTrait::new(mag, utils::felt_sign(val)) } fn abs(self: FP16x16) -> FP16x16 { - return core_math::abs(self); + core_math::abs(self) } fn acos(self: FP16x16) -> FP16x16 { - return trig::acos_fast(self); + trig::acos_fast(self) } fn acos_fast(self: FP16x16) -> FP16x16 { - return trig::acos_fast(self); + trig::acos_fast(self) } fn acosh(self: FP16x16) -> FP16x16 { - return hyp::acosh(self); + hyp::acosh(self) } fn asin(self: FP16x16) -> FP16x16 { - return trig::asin_fast(self); + trig::asin_fast(self) } fn asin_fast(self: FP16x16) -> FP16x16 { - return trig::asin_fast(self); + trig::asin_fast(self) } fn asinh(self: FP16x16) -> FP16x16 { - return hyp::asinh(self); + hyp::asinh(self) } fn atan(self: FP16x16) -> FP16x16 { - return trig::atan_fast(self); + trig::atan_fast(self) } fn atan_fast(self: FP16x16) -> FP16x16 { - return trig::atan_fast(self); + trig::atan_fast(self) } fn atanh(self: FP16x16) -> FP16x16 { - return hyp::atanh(self); + hyp::atanh(self) } fn ceil(self: FP16x16) -> FP16x16 { - return core_math::ceil(self); + core_math::ceil(self) } fn cos(self: FP16x16) -> FP16x16 { - return trig::cos_fast(self); + trig::cos_fast(self) } fn cos_fast(self: FP16x16) -> FP16x16 { - return trig::cos_fast(self); + trig::cos_fast(self) } fn cosh(self: FP16x16) -> FP16x16 { - return hyp::cosh(self); + hyp::cosh(self) } fn floor(self: FP16x16) -> FP16x16 { - return core_math::floor(self); + core_math::floor(self) } // Calculates the natural exponent of x: e^x fn exp(self: FP16x16) -> FP16x16 { - return core_math::exp(self); + core_math::exp(self) } // Calculates the binary exponent of x: 2^x fn exp2(self: FP16x16) -> FP16x16 { - return core_math::exp2(self); + core_math::exp2(self) } // Calculates the natural logarithm of x: ln(x) // self must be greater than zero fn ln(self: FP16x16) -> FP16x16 { - return core_math::ln(self); + core_math::ln(self) } // Calculates the binary logarithm of x: log2(x) // self must be greather than zero fn log2(self: FP16x16) -> FP16x16 { - return core_math::log2(self); + core_math::log2(self) } // Calculates the base 10 log of x: log10(x) // self must be greater than zero fn log10(self: FP16x16) -> FP16x16 { - return core_math::log10(self); + core_math::log10(self) } // Calclates the value of x^y and checks for overflow before returning // self is a fixed point value // b is a fixed point value fn pow(self: FP16x16, b: FP16x16) -> FP16x16 { - return core_math::pow(self, b); + core_math::pow(self, b) } fn round(self: FP16x16) -> FP16x16 { - return core_math::round(self); + core_math::round(self) } fn sin(self: FP16x16) -> FP16x16 { - return trig::sin_fast(self); + trig::sin_fast(self) } fn sin_fast(self: FP16x16) -> FP16x16 { - return trig::sin_fast(self); + trig::sin_fast(self) } fn sinh(self: FP16x16) -> FP16x16 { - return hyp::sinh(self); + hyp::sinh(self) } // Calculates the square root of a fixed point value // x must be positive fn sqrt(self: FP16x16) -> FP16x16 { - return core_math::sqrt(self); + core_math::sqrt(self) } fn tan(self: FP16x16) -> FP16x16 { - return trig::tan_fast(self); + trig::tan_fast(self) } fn tan_fast(self: FP16x16) -> FP16x16 { - return trig::tan_fast(self); + trig::tan_fast(self) } fn tanh(self: FP16x16) -> FP16x16 { - return hyp::tanh(self); + hyp::tanh(self) } fn sign(self: FP16x16) -> FP16x16 { - return core_math::sign(self); + core_math::sign(self) } fn NaN() -> FP16x16 { - return FP16x16 { mag: 0, sign: true }; + FP16x16 { mag: 0, sign: true } } fn is_nan(self: FP16x16) -> bool { @@ -197,15 +192,15 @@ impl FP16x16Impl of FixedTrait { } fn INF() -> FP16x16 { - return FP16x16 { mag: 4294967295, sign: false }; + FP16x16 { mag: 4294967295, sign: false } } fn POS_INF() -> FP16x16 { - return FP16x16 { mag: 4294967295, sign: false }; + FP16x16 { mag: 4294967295, sign: false } } fn NEG_INF() -> FP16x16 { - return FP16x16 { mag: 4294967295, sign: true }; + FP16x16 { mag: 4294967295, sign: true } } fn is_inf(self: FP16x16) -> bool { @@ -221,7 +216,7 @@ impl FP16x16Impl of FixedTrait { } fn erf(self: FP16x16) -> FP16x16 { - return erf::erf(self); + erf::erf(self) } } @@ -239,9 +234,9 @@ impl FP16x16IntoFelt252 of Into { let mag_felt = self.mag.into(); if self.sign { - return mag_felt * -1; + mag_felt * -1 } else { - return mag_felt * 1; + mag_felt * 1 } } } @@ -262,10 +257,10 @@ impl FP16x16TryIntoI8 of TryInto { impl FP16x16TryIntoU128 of TryInto { fn try_into(self: FP16x16) -> Option { if self.sign { - return Option::None(()); + Option::None(()) } else { // Unscale the magnitude and round down - return Option::Some((self.mag / ONE).into()); + Option::Some((self.mag / ONE).into()) } } } @@ -273,10 +268,10 @@ impl FP16x16TryIntoU128 of TryInto { impl FP16x16TryIntoU64 of TryInto { fn try_into(self: FP16x16) -> Option { if self.sign { - return Option::None(()); + Option::None(()) } else { // Unscale the magnitude and round down - return Option::Some((self.mag / ONE).into()); + Option::Some((self.mag / ONE).into()) } } } @@ -284,10 +279,10 @@ impl FP16x16TryIntoU64 of TryInto { impl FP16x16TryIntoU32 of TryInto { fn try_into(self: FP16x16) -> Option { if self.sign { - return Option::None(()); + Option::None(()) } else { // Unscale the magnitude and round down - return Option::Some(self.mag / ONE); + Option::Some(self.mag / ONE) } } } @@ -298,7 +293,7 @@ impl FP16x16TryIntoU16 of TryInto { Option::None(()) } else { // Unscale the magnitude and round down - return (self.mag / ONE).try_into(); + (self.mag / ONE).try_into() } } } @@ -309,7 +304,7 @@ impl FP16x16TryIntoU8 of TryInto { Option::None(()) } else { // Unscale the magnitude and round down - return (self.mag / ONE).try_into(); + (self.mag / ONE).try_into() } } } @@ -317,18 +312,18 @@ impl FP16x16TryIntoU8 of TryInto { impl FP16x16PartialEq of PartialEq { #[inline(always)] fn eq(lhs: @FP16x16, rhs: @FP16x16) -> bool { - return core_math::eq(lhs, rhs); + core_math::eq(lhs, rhs) } #[inline(always)] fn ne(lhs: @FP16x16, rhs: @FP16x16) -> bool { - return core_math::ne(lhs, rhs); + core_math::ne(lhs, rhs) } } impl FP16x16Add of Add { fn add(lhs: FP16x16, rhs: FP16x16) -> FP16x16 { - return core_math::add(lhs, rhs); + core_math::add(lhs, rhs) } } @@ -341,7 +336,7 @@ impl FP16x16AddEq of AddEq { impl FP16x16Sub of Sub { fn sub(lhs: FP16x16, rhs: FP16x16) -> FP16x16 { - return core_math::sub(lhs, rhs); + core_math::sub(lhs, rhs) } } @@ -354,7 +349,7 @@ impl FP16x16SubEq of SubEq { impl FP16x16Mul of Mul { fn mul(lhs: FP16x16, rhs: FP16x16) -> FP16x16 { - return core_math::mul(lhs, rhs); + core_math::mul(lhs, rhs) } } @@ -367,7 +362,7 @@ impl FP16x16MulEq of MulEq { impl FP16x16Div of Div { fn div(lhs: FP16x16, rhs: FP16x16) -> FP16x16 { - return core_math::div(lhs, rhs); + core_math::div(lhs, rhs) } } @@ -381,48 +376,47 @@ impl FP16x16DivEq of DivEq { impl FP16x16PartialOrd of PartialOrd { #[inline(always)] fn ge(lhs: FP16x16, rhs: FP16x16) -> bool { - return core_math::ge(lhs, rhs); + core_math::ge(lhs, rhs) } #[inline(always)] fn gt(lhs: FP16x16, rhs: FP16x16) -> bool { - return core_math::gt(lhs, rhs); + core_math::gt(lhs, rhs) } #[inline(always)] fn le(lhs: FP16x16, rhs: FP16x16) -> bool { - return core_math::le(lhs, rhs); + core_math::le(lhs, rhs) } #[inline(always)] fn lt(lhs: FP16x16, rhs: FP16x16) -> bool { - return core_math::lt(lhs, rhs); + core_math::lt(lhs, rhs) } } impl FP16x16Neg of Neg { #[inline(always)] fn neg(a: FP16x16) -> FP16x16 { - return core_math::neg(a); + core_math::neg(a) } } impl FP16x16Rem of Rem { #[inline(always)] fn rem(lhs: FP16x16, rhs: FP16x16) -> FP16x16 { - return core_math::rem(lhs, rhs); + core_math::rem(lhs, rhs) } } - /// INTERNAL - fn _i32_into_fp(x: FP16x16) -> i32 { let number_felt: felt252 = (x.mag / ONE).into(); let number_i32: i32 = number_felt.try_into().unwrap(); if x.sign { return number_i32 * -1_i32; } + number_i32 } diff --git a/src/numbers/fixed_point/implementations/fp16x16/helpers.cairo b/src/numbers/fixed_point/implementations/fp16x16/helpers.cairo index 0cd5a8f0f..d18fc4108 100644 --- a/src/numbers/fixed_point/implementations/fp16x16/helpers.cairo +++ b/src/numbers/fixed_point/implementations/fp16x16/helpers.cairo @@ -1,5 +1,4 @@ use core::debug::PrintTrait; -use core::traits::Into; use orion::numbers::fixed_point::implementations::fp16x16::core::{ HALF, ONE, TWO, FP16x16, FP16x16Impl, FP16x16Sub, FP16x16Div, FixedTrait, FP16x16Print diff --git a/src/numbers/fixed_point/implementations/fp16x16/math/comp.cairo b/src/numbers/fixed_point/implementations/fp16x16/math/comp.cairo index ddf153f18..b53adc614 100644 --- a/src/numbers/fixed_point/implementations/fp16x16/math/comp.cairo +++ b/src/numbers/fixed_point/implementations/fp16x16/math/comp.cairo @@ -3,65 +3,65 @@ use orion::numbers::fixed_point::implementations::fp16x16::core::{ }; fn max(a: FP16x16, b: FP16x16) -> FP16x16 { - if (a >= b) { - return a; + if a >= b { + a } else { - return b; + b } } fn min(a: FP16x16, b: FP16x16) -> FP16x16 { - if (a <= b) { - return a; + if a <= b { + a } else { - return b; + b } } fn xor(a: FP16x16, b: FP16x16) -> bool { if (a == FixedTrait::new(0, false) || b == FixedTrait::new(0, false)) && (a != b) { - return true; + true } else { - return false; + false } } fn or(a: FP16x16, b: FP16x16) -> bool { let zero = FixedTrait::new(0, false); if a == zero && b == zero { - return false; + false } else { - return true; + true } } fn and(a: FP16x16, b: FP16x16) -> bool { let zero = FixedTrait::new(0, false); if a == zero || b == zero { - return false; + false } else { - return true; + true } } fn where(a: FP16x16, b: FP16x16, c: FP16x16) -> FP16x16 { if a == FixedTrait::new(0, false) { - return c; + c } else { - return b; + b } } fn bitwise_and(a: FP16x16, b: FP16x16) -> FP16x16 { - return FixedTrait::new(a.mag & b.mag, a.sign & b.sign); + FixedTrait::new(a.mag & b.mag, a.sign & b.sign) } fn bitwise_xor(a: FP16x16, b: FP16x16) -> FP16x16 { - return FixedTrait::new(a.mag ^ b.mag, a.sign ^ b.sign); + FixedTrait::new(a.mag ^ b.mag, a.sign ^ b.sign) } fn bitwise_or(a: FP16x16, b: FP16x16) -> FP16x16 { - return FixedTrait::new(a.mag | b.mag, a.sign | b.sign); + FixedTrait::new(a.mag | b.mag, a.sign | b.sign) } // Tests -------------------------------------------------------------------------------------------------------------- @@ -70,7 +70,6 @@ fn bitwise_or(a: FP16x16, b: FP16x16) -> FP16x16 { mod tests { use super::{FixedTrait, max, min, bitwise_and, bitwise_xor, bitwise_or}; - #[test] fn test_max() { let a = FixedTrait::new_unscaled(1, false); @@ -127,6 +126,7 @@ mod tests { assert(bitwise_xor(a, b) == c, 'bitwise_xor(a,b)') } + #[test] fn test_bitwise_or() { let a = FixedTrait::new(225280, false); // 3.4375 let b = FixedTrait::new(4160843776, true); // -2046.5625 diff --git a/src/numbers/fixed_point/implementations/fp16x16/math/core.cairo b/src/numbers/fixed_point/implementations/fp16x16/math/core.cairo index d477f051d..0085d2639 100644 --- a/src/numbers/fixed_point/implementations/fp16x16/math/core.cairo +++ b/src/numbers/fixed_point/implementations/fp16x16/math/core.cairo @@ -1,9 +1,4 @@ -use core::debug::PrintTrait; -use core::option::OptionTrait; -use core::result::{ResultTrait, ResultTraitImpl}; -use core::traits::{Into, TryInto}; use core::integer; -use core::integer::{u32_safe_divmod, u32_as_non_zero, u32_wide_mul}; use orion::numbers::fixed_point::implementations::fp16x16::core::{ HALF, ONE, MAX, FP16x16, FP16x16Impl, FP16x16Add, FP16x16AddEq, FP16x16Sub, FP16x16Mul, @@ -13,9 +8,8 @@ use orion::numbers::fixed_point::implementations::fp16x16::core::{ use orion::numbers::fixed_point::implementations::fp16x16::math::lut; // PUBLIC - fn abs(a: FP16x16) -> FP16x16 { - return FixedTrait::new(a.mag, false); + FixedTrait::new(a.mag, false) } fn add(a: FP16x16, b: FP16x16) -> FP16x16 { @@ -28,23 +22,23 @@ fn add(a: FP16x16, b: FP16x16) -> FP16x16 { } if (a.mag > b.mag) { - return FixedTrait::new(a.mag - b.mag, a.sign); + FixedTrait::new(a.mag - b.mag, a.sign) } else { - return FixedTrait::new(b.mag - a.mag, b.sign); + FixedTrait::new(b.mag - a.mag, b.sign) } } fn ceil(a: FP16x16) -> FP16x16 { - let (div, rem) = u32_safe_divmod(a.mag, u32_as_non_zero(ONE)); + let (div, rem) = integer::u32_safe_divmod(a.mag, integer::u32_as_non_zero(ONE)); if rem == 0 { - return a; + a } else if !a.sign { - return FixedTrait::new_unscaled(div + 1, false); + FixedTrait::new_unscaled(div + 1, false) } else if div == 0 { - return FixedTrait::new_unscaled(0, false); + FixedTrait::new_unscaled(0, false) } else { - return FixedTrait::new_unscaled(div, true); + FixedTrait::new_unscaled(div, true) } } @@ -53,16 +47,16 @@ fn div(a: FP16x16, b: FP16x16) -> FP16x16 { let res_u64 = a_u64 / b.mag.into(); // Re-apply sign - return FixedTrait::new(res_u64.try_into().unwrap(), a.sign ^ b.sign); + FixedTrait::new(res_u64.try_into().unwrap(), a.sign ^ b.sign) } fn eq(a: @FP16x16, b: @FP16x16) -> bool { - return (*a.mag == *b.mag) && (*a.sign == *b.sign); + (*a.mag == *b.mag) && (*a.sign == *b.sign) } // Calculates the natural exponent of x: e^x fn exp(a: FP16x16) -> FP16x16 { - return exp2(FixedTrait::new(94548, false) * a); // log2(e) * 2^23 β‰ˆ 12102203 + exp2(FixedTrait::new(94548, false) * a) // log2(e) * 2^23 β‰ˆ 12102203 } // Calculates the binary exponent of x: 2^x @@ -71,7 +65,7 @@ fn exp2(a: FP16x16) -> FP16x16 { return FixedTrait::ONE(); } - let (int_part, frac_part) = integer::u32_safe_divmod(a.mag, u32_as_non_zero(ONE)); + let (int_part, frac_part) = integer::u32_safe_divmod(a.mag, integer::u32_as_non_zero(ONE)); let int_res = FixedTrait::new_unscaled(lut::exp2(int_part), false); let mut res_u = int_res; @@ -87,57 +81,57 @@ fn exp2(a: FP16x16) -> FP16x16 { res_u = res_u * (r1 + FixedTrait::ONE()); } - if (a.sign == true) { - return FixedTrait::ONE() / res_u; + if a.sign { + FixedTrait::ONE() / res_u } else { - return res_u; + res_u } } fn exp2_int(exp: u32) -> FP16x16 { - return FixedTrait::new_unscaled(lut::exp2(exp), false); + FixedTrait::new_unscaled(lut::exp2(exp), false) } fn floor(a: FP16x16) -> FP16x16 { - let (div, rem) = integer::u32_safe_divmod(a.mag, u32_as_non_zero(ONE)); + let (div, rem) = integer::u32_safe_divmod(a.mag, integer::u32_as_non_zero(ONE)); if rem == 0 { - return a; + a } else if !a.sign { - return FixedTrait::new_unscaled(div, false); + FixedTrait::new_unscaled(div, false) } else { - return FixedTrait::new_unscaled(div + 1, true); + FixedTrait::new_unscaled(div + 1, true) } } fn ge(a: FP16x16, b: FP16x16) -> bool { if a.sign != b.sign { - return !a.sign; + !a.sign } else { - return (a.mag == b.mag) || ((a.mag > b.mag) ^ a.sign); + (a.mag == b.mag) || ((a.mag > b.mag) ^ a.sign) } } fn gt(a: FP16x16, b: FP16x16) -> bool { if a.sign != b.sign { - return !a.sign; + !a.sign } else { - return (a.mag != b.mag) && ((a.mag > b.mag) ^ a.sign); + (a.mag != b.mag) && ((a.mag > b.mag) ^ a.sign) } } fn le(a: FP16x16, b: FP16x16) -> bool { if a.sign != b.sign { - return a.sign; + a.sign } else { - return (a.mag == b.mag) || ((a.mag < b.mag) ^ a.sign); + (a.mag == b.mag) || ((a.mag < b.mag) ^ a.sign) } } // Calculates the natural logarithm of x: ln(x) // self must be greater than zero fn ln(a: FP16x16) -> FP16x16 { - return FixedTrait::new(45426, false) * log2(a); // ln(2) = 0.693... + FixedTrait::new(45426, false) * log2(a) // ln(2) = 0.693... } // Calculates the binary logarithm of x: log2(x) @@ -157,7 +151,7 @@ fn log2(a: FP16x16) -> FP16x16 { let (msb, div) = lut::msb(whole); if a.mag == div * ONE { - return FixedTrait::new_unscaled(msb, false); + FixedTrait::new_unscaled(msb, false) } else { let norm = a / FixedTrait::new_unscaled(div, false); let r8 = FixedTrait::new(596, true) * norm; @@ -168,21 +162,22 @@ fn log2(a: FP16x16) -> FP16x16 { let r3 = (r4 + FixedTrait::new(608566, false)) * norm; let r2 = (r3 + FixedTrait::new(655828, true)) * norm; let r1 = (r2 + FixedTrait::new(534433, false)) * norm; - return r1 + FixedTrait::new(224487, true) + FixedTrait::new_unscaled(msb, false); + + r1 + FixedTrait::new(224487, true) + FixedTrait::new_unscaled(msb, false) } } // Calculates the base 10 log of x: log10(x) // self must be greater than zero fn log10(a: FP16x16) -> FP16x16 { - return FixedTrait::new(19728, false) * log2(a); // log10(2) = 0.301... + FixedTrait::new(19728, false) * log2(a) // log10(2) = 0.301... } fn lt(a: FP16x16, b: FP16x16) -> bool { if a.sign != b.sign { - return a.sign; + a.sign } else { - return (a.mag != b.mag) && ((a.mag < b.mag) ^ a.sign); + (a.mag != b.mag) && ((a.mag < b.mag) ^ a.sign) } } @@ -190,20 +185,20 @@ fn mul(a: FP16x16, b: FP16x16) -> FP16x16 { let prod_u128 = integer::u32_wide_mul(a.mag, b.mag); // Re-apply sign - return FixedTrait::new((prod_u128 / ONE.into()).try_into().unwrap(), a.sign ^ b.sign); + FixedTrait::new((prod_u128 / ONE.into()).try_into().unwrap(), a.sign ^ b.sign) } fn ne(a: @FP16x16, b: @FP16x16) -> bool { - return (*a.mag != *b.mag) || (*a.sign != *b.sign); + (*a.mag != *b.mag) || (*a.sign != *b.sign) } fn neg(a: FP16x16) -> FP16x16 { if a.mag == 0 { - return a; + a } else if !a.sign { - return FixedTrait::new(a.mag, !a.sign); + FixedTrait::new(a.mag, !a.sign) } else { - return FixedTrait::new(a.mag, false); + FixedTrait::new(a.mag, false) } } @@ -211,7 +206,7 @@ fn neg(a: FP16x16) -> FP16x16 { // self is a FP16x16 point value // b is a FP16x16 point value fn pow(a: FP16x16, b: FP16x16) -> FP16x16 { - let (_, rem) = integer::u32_safe_divmod(b.mag, u32_as_non_zero(ONE)); + let (_, rem) = integer::u32_safe_divmod(b.mag, integer::u32_as_non_zero(ONE)); // use the more performant integer pow when y is an int if (rem == 0) { @@ -219,7 +214,7 @@ fn pow(a: FP16x16, b: FP16x16) -> FP16x16 { } // x^y = exp(y*ln(x)) for x > 0 will error for x < 0 - return exp(b * ln(a)); + exp(b * ln(a)) } // Calclates the value of a^b and checks for overflow before returning @@ -227,7 +222,7 @@ fn pow_int(a: FP16x16, b: u32, sign: bool) -> FP16x16 { let mut x = a; let mut n = b; - if sign == true { + if sign { x = FixedTrait::ONE() / x; } @@ -238,11 +233,7 @@ fn pow_int(a: FP16x16, b: u32, sign: bool) -> FP16x16 { let mut y = FixedTrait::ONE(); let two = integer::u32_as_non_zero(2); - loop { - if n <= 1 { - break; - } - + while n > 1 { let (div, rem) = integer::u32_safe_divmod(n, two); if rem == 1 { @@ -253,20 +244,20 @@ fn pow_int(a: FP16x16, b: u32, sign: bool) -> FP16x16 { n = div; }; - return x * y; + x * y } fn rem(a: FP16x16, b: FP16x16) -> FP16x16 { - return a - floor(a / b) * b; + a - floor(a / b) * b } fn round(a: FP16x16) -> FP16x16 { - let (div, rem) = integer::u32_safe_divmod(a.mag, u32_as_non_zero(ONE)); + let (div, rem) = integer::u32_safe_divmod(a.mag, integer::u32_as_non_zero(ONE)); if (HALF <= rem) { - return FixedTrait::new_unscaled(div + 1, a.sign); + FixedTrait::new_unscaled(div + 1, a.sign) } else { - return FixedTrait::new_unscaled(div, a.sign); + FixedTrait::new_unscaled(div, a.sign) } } @@ -276,11 +267,12 @@ fn sqrt(a: FP16x16) -> FP16x16 { assert(a.sign == false, 'must be positive'); let root = integer::u64_sqrt(a.mag.into() * ONE.into()); - return FixedTrait::new(root.into(), false); + + FixedTrait::new(root.into(), false) } fn sub(a: FP16x16, b: FP16x16) -> FP16x16 { - return add(a, -b); + add(a, -b) } fn sign(a: FP16x16) -> FP16x16 { @@ -467,7 +459,7 @@ mod tests { let a = FixedTrait::new_unscaled(42, false); let b = FixedTrait::new_unscaled(42, false); let c = eq(@a, @b); - assert(c == true, 'invalid result'); + assert(c, 'invalid result'); } #[test] @@ -475,7 +467,7 @@ mod tests { let a = FixedTrait::new_unscaled(42, false); let b = FixedTrait::new_unscaled(42, false); let c = ne(@a, @b); - assert(c == false, 'invalid result'); + assert(!c, 'invalid result'); } #[test] @@ -549,12 +541,12 @@ mod tests { let c = FixedTrait::::new_unscaled(1, true); assert(a <= a, 'a <= a'); - assert(a <= b == false, 'a <= b'); - assert(a <= c == false, 'a <= c'); + assert(!(a <= b), 'a <= b'); + assert(!(a <= c), 'a <= c'); assert(b <= a, 'b <= a'); assert(b <= b, 'b <= b'); - assert(b <= c == false, 'b <= c'); + assert(!(b <= c), 'b <= c'); assert(c <= a, 'c <= a'); assert(c <= b, 'c <= b'); @@ -567,17 +559,17 @@ mod tests { let b = FixedTrait::new_unscaled(0, false); let c = FixedTrait::::new_unscaled(1, true); - assert(a < a == false, 'a < a'); - assert(a < b == false, 'a < b'); - assert(a < c == false, 'a < c'); + assert(!(a < a), 'a < a'); + assert(!(a < b), 'a < b'); + assert(!(a < c), 'a < c'); assert(b < a, 'b < a'); - assert(b < b == false, 'b < b'); - assert(b < c == false, 'b < c'); + assert(!(b < b), 'b < b'); + assert(!(b < c), 'b < c'); assert(c < a, 'c < a'); assert(c < b, 'c < b'); - assert(c < c == false, 'c < c'); + assert(!(c < c), 'c < c'); } #[test] @@ -590,12 +582,12 @@ mod tests { assert(a >= b, 'a >= b'); assert(a >= c, 'a >= c'); - assert(b >= a == false, 'b >= a'); + assert(!(b >= a), 'b >= a'); assert(b >= b, 'b >= b'); assert(b >= c, 'b >= c'); - assert(c >= a == false, 'c >= a'); - assert(c >= b == false, 'c >= b'); + assert(!(c >= a), 'c >= a'); + assert(!(c >= b), 'c >= b'); assert(c >= c, 'c >= c'); } @@ -605,17 +597,17 @@ mod tests { let b = FixedTrait::new_unscaled(0, false); let c = FixedTrait::::new_unscaled(1, true); - assert(a > a == false, 'a > a'); + assert(!(a > a), 'a > a'); assert(a > b, 'a > b'); assert(a > c, 'a > c'); - assert(b > a == false, 'b > a'); - assert(b > b == false, 'b > b'); + assert(!(b > a), 'b > a'); + assert(!(b > b), 'b > b'); assert(b > c, 'b > c'); - assert(c > a == false, 'c > a'); - assert(c > b == false, 'c > b'); - assert(c > c == false, 'c > c'); + assert(!(c > a), 'c > a'); + assert(!(c > b), 'c > b'); + assert(!(c > c), 'c > c'); } #[test] diff --git a/src/numbers/fixed_point/implementations/fp16x16/math/erf.cairo b/src/numbers/fixed_point/implementations/fp16x16/math/erf.cairo index 86f87f5ca..4561e5b78 100644 --- a/src/numbers/fixed_point/implementations/fp16x16/math/erf.cairo +++ b/src/numbers/fixed_point/implementations/fp16x16/math/erf.cairo @@ -1,4 +1,3 @@ -use core::traits::Into; use orion::numbers::fixed_point::implementations::fp16x16::core::{ONE, FP16x16, FixedTrait}; use orion::numbers::fixed_point::implementations::fp16x16::math::lut::erf_lut; @@ -20,5 +19,6 @@ fn erf(x: FP16x16) -> FP16x16 { } else { erf_value = ONE; } + FP16x16 { mag: erf_value, sign: x.sign } } diff --git a/src/numbers/fixed_point/implementations/fp16x16/math/hyp.cairo b/src/numbers/fixed_point/implementations/fp16x16/math/hyp.cairo index 78d0cdac2..b77271087 100644 --- a/src/numbers/fixed_point/implementations/fp16x16/math/hyp.cairo +++ b/src/numbers/fixed_point/implementations/fp16x16/math/hyp.cairo @@ -1,4 +1,3 @@ -use core::debug::PrintTrait; use orion::numbers::fixed_point::implementations::fp16x16::core::{ HALF, ONE, TWO, FP16x16, FP16x16Impl, FP16x16Add, FP16x16AddEq, FP16x16Sub, FP16x16Mul, FP16x16MulEq, FP16x16TryIntoU128, FP16x16PartialEq, FP16x16PartialOrd, FP16x16SubEq, FP16x16Neg, @@ -8,53 +7,55 @@ use orion::numbers::fixed_point::implementations::fp16x16::core::{ // Calculates hyperbolic cosine of a (fixed point) fn cosh(a: FP16x16) -> FP16x16 { let ea = a.exp(); - return (ea + (FixedTrait::ONE() / ea)) / FixedTrait::new(TWO, false); + + (ea + (FixedTrait::ONE() / ea)) / FixedTrait::new(TWO, false) } // Calculates hyperbolic sine of a (fixed point) fn sinh(a: FP16x16) -> FP16x16 { let ea = a.exp(); - return (ea - (FixedTrait::ONE() / ea)) / FixedTrait::new(TWO, false); + + (ea - (FixedTrait::ONE() / ea)) / FixedTrait::new(TWO, false) } // Calculates hyperbolic tangent of a (fixed point) fn tanh(a: FP16x16) -> FP16x16 { let ea = a.exp(); let ea_i = FixedTrait::ONE() / ea; - return (ea - ea_i) / (ea + ea_i); + + (ea - ea_i) / (ea + ea_i) } // Calculates inverse hyperbolic cosine of a (fixed point) fn acosh(a: FP16x16) -> FP16x16 { let root = (a * a - FixedTrait::ONE()).sqrt(); - return (a + root).ln(); + + (a + root).ln() } // Calculates inverse hyperbolic sine of a (fixed point) fn asinh(a: FP16x16) -> FP16x16 { let root = (a * a + FixedTrait::ONE()).sqrt(); - return (a + root).ln(); + + (a + root).ln() } // Calculates inverse hyperbolic tangent of a (fixed point) fn atanh(a: FP16x16) -> FP16x16 { let one = FixedTrait::ONE(); let ln_arg = (one + a) / (one - a); - return ln_arg.ln() / FixedTrait::new(TWO, false); + + ln_arg.ln() / FixedTrait::new(TWO, false) } // Tests -------------------------------------------------------------------------------------------------------------- #[cfg(test)] mod tests { - use core::option::OptionTrait; - use core::traits::Into; - use orion::numbers::fixed_point::implementations::fp16x16::helpers::assert_precise; use super::{FixedTrait, TWO, cosh, ONE, sinh, tanh, acosh, asinh, atanh, HALF}; - #[test] #[available_gas(10000000)] fn test_cosh() { diff --git a/src/numbers/fixed_point/implementations/fp16x16/math/lut.cairo b/src/numbers/fixed_point/implementations/fp16x16/math/lut.cairo index 65c9746c1..723ac975f 100644 --- a/src/numbers/fixed_point/implementations/fp16x16/math/lut.cairo +++ b/src/numbers/fixed_point/implementations/fp16x16/math/lut.cairo @@ -54,7 +54,7 @@ fn msb(whole: u32) -> (u32, u32) { } } - return (16, 65536); + (16, 65536) } fn exp2(exp: u32) -> u32 { @@ -112,7 +112,7 @@ fn exp2(exp: u32) -> u32 { } } - return 65536; + 65536 } fn sin(a: u32) -> (u32, u32, u32) { @@ -929,7 +929,7 @@ fn sin(a: u32) -> (u32, u32, u32) { } } - return (102542, 65535, 65536); + (102542, 65535, 65536) } fn atan(a: u32) -> (u32, u32, u32) { @@ -1233,7 +1233,7 @@ fn atan(a: u32) -> (u32, u32, u32) { return (44958, 39405, 39716); } - return (45416, 39716, 40025); + (45416, 39716, 40025) } fn erf_lut(x: u32) -> u32 { @@ -1925,5 +1925,6 @@ fn erf_lut(x: u32) -> u32 { return 65535; } } - return ONE; + + ONE } diff --git a/src/numbers/fixed_point/implementations/fp16x16/math/trig.cairo b/src/numbers/fixed_point/implementations/fp16x16/math/trig.cairo index 8b0d9b47f..7c4ad199c 100644 --- a/src/numbers/fixed_point/implementations/fp16x16/math/trig.cairo +++ b/src/numbers/fixed_point/implementations/fp16x16/math/trig.cairo @@ -1,6 +1,4 @@ -use core::debug::PrintTrait; -use core::integer::{u32_safe_divmod, u32_as_non_zero}; -use core::option::OptionTrait; +use core::integer; use orion::numbers::fixed_point::implementations::fp16x16::math::lut; use orion::numbers::fixed_point::implementations::fp16x16::core::{ @@ -9,7 +7,6 @@ use orion::numbers::fixed_point::implementations::fp16x16::core::{ }; // CONSTANTS - const TWO_PI: u32 = 411775; const PI: u32 = 205887; const HALF_PI: u32 = 102944; @@ -22,10 +19,10 @@ fn acos(a: FP16x16) -> FP16x16 { let asin_arg = (FixedTrait::ONE() - a * a).sqrt(); // will fail if a > 1 let asin_res = asin(asin_arg); - if (a.sign) { - return FixedTrait::new(PI, false) - asin_res; + if a.sign { + FixedTrait::new(PI, false) - asin_res } else { - return asin_res; + asin_res } } @@ -33,10 +30,10 @@ fn acos_fast(a: FP16x16) -> FP16x16 { let asin_arg = (FixedTrait::ONE() - a * a).sqrt(); // will fail if a > 1 let asin_res = asin_fast(asin_arg); - if (a.sign) { - return FixedTrait::new(PI, false) - asin_res; + if a.sign { + FixedTrait::new(PI, false) - asin_res } else { - return asin_res; + asin_res } } @@ -48,7 +45,8 @@ fn asin(a: FP16x16) -> FP16x16 { } let div = (FixedTrait::ONE() - a * a).sqrt(); // will fail if a > 1 - return atan(a / div); + + atan(a / div) } fn asin_fast(a: FP16x16) -> FP16x16 { @@ -57,7 +55,8 @@ fn asin_fast(a: FP16x16) -> FP16x16 { } let div = (FixedTrait::ONE() - a * a).sqrt(); // will fail if a > 1 - return atan_fast(a / div); + + atan_fast(a / div) } // Calculates arctan(a) (fixed point) @@ -100,10 +99,9 @@ fn atan(a: FP16x16) -> FP16x16 { res = res - FixedTrait::new(HALF_PI, false); } - return FixedTrait::new(res.mag, a.sign); + FixedTrait::new(res.mag, a.sign) } - fn atan_fast(a: FP16x16) -> FP16x16 { let mut at = a.abs(); let mut shift = false; @@ -135,31 +133,32 @@ fn atan_fast(a: FP16x16) -> FP16x16 { res = res - FixedTrait::::new(HALF_PI, false); } - return FixedTrait::new(res.mag, a.sign); + FixedTrait::new(res.mag, a.sign) } // Calculates cos(a) with a in radians (fixed point) fn cos(a: FP16x16) -> FP16x16 { - return sin(FixedTrait::new(HALF_PI, false) - a); + sin(FixedTrait::new(HALF_PI, false) - a) } fn cos_fast(a: FP16x16) -> FP16x16 { - return sin_fast(FixedTrait::new(HALF_PI, false) - a); + sin_fast(FixedTrait::new(HALF_PI, false) - a) } fn sin(a: FP16x16) -> FP16x16 { let a1 = a.mag % TWO_PI; - let (whole_rem, partial_rem) = u32_safe_divmod(a1, u32_as_non_zero(PI)); + let (whole_rem, partial_rem) = integer::u32_safe_divmod(a1, integer::u32_as_non_zero(PI)); let a2 = FixedTrait::new(partial_rem, false); let partial_sign = whole_rem == 1; let loop_res = a2 * _sin_loop(a2, 7, FixedTrait::ONE()); - return FixedTrait::new(loop_res.mag, a.sign ^ partial_sign && loop_res.mag != 0); + + FixedTrait::new(loop_res.mag, a.sign ^ partial_sign && loop_res.mag != 0) } fn sin_fast(a: FP16x16) -> FP16x16 { let a1 = a.mag % TWO_PI; - let (whole_rem, mut partial_rem) = u32_safe_divmod(a1, u32_as_non_zero(PI)); + let (whole_rem, mut partial_rem) = integer::u32_safe_divmod(a1, integer::u32_as_non_zero(PI)); let partial_sign = whole_rem == 1; if partial_rem >= HALF_PI { @@ -171,7 +170,7 @@ fn sin_fast(a: FP16x16) -> FP16x16 { let res = partial_step * (FixedTrait::new(high, false) - FixedTrait::new(low, false)) + FixedTrait::::new(low, false); - return FixedTrait::new(res.mag, a.sign ^ partial_sign && res.mag != 0); + FixedTrait::new(res.mag, a.sign ^ partial_sign && res.mag != 0) } // Calculates tan(a) with a in radians (fixed point) @@ -179,14 +178,16 @@ fn tan(a: FP16x16) -> FP16x16 { let sinx = sin(a); let cosx = cos(a); assert(cosx.mag != 0, 'tan undefined'); - return sinx / cosx; + + sinx / cosx } fn tan_fast(a: FP16x16) -> FP16x16 { let sinx = sin_fast(a); let cosx = cos_fast(a); assert(cosx.mag != 0, 'tan undefined'); - return sinx / cosx; + + sinx / cosx } // Helper function to calculate Taylor series for sin @@ -199,15 +200,13 @@ fn _sin_loop(a: FP16x16, i: u32, acc: FP16x16) -> FP16x16 { return new_acc; } - return _sin_loop(a, i - 1, new_acc); + _sin_loop(a, i - 1, new_acc) } // Tests -------------------------------------------------------------------------------------------------------------- #[cfg(test)] mod tests { - use core::traits::Into; - use orion::numbers::fixed_point::implementations::fp16x16::helpers::{ assert_precise, assert_relative }; diff --git a/src/numbers/fixed_point/implementations/fp16x16wide/core.cairo b/src/numbers/fixed_point/implementations/fp16x16wide/core.cairo index 9c97cce46..0a6c4795e 100644 --- a/src/numbers/fixed_point/implementations/fp16x16wide/core.cairo +++ b/src/numbers/fixed_point/implementations/fp16x16wide/core.cairo @@ -1,9 +1,5 @@ use core::debug::PrintTrait; -use core::option::OptionTrait; -use core::result::{ResultTrait, ResultTraitImpl}; -use core::traits::{TryInto, Into}; - use orion::numbers::{fixed_point::core::FixedTrait, FP16x16}; use orion::numbers::fixed_point::implementations::fp16x16wide::math::{ core as core_math, trig, hyp, erf @@ -18,178 +14,177 @@ struct FP16x16W { } // CONSTANTS - const TWO: u64 = 131072; // 2 ** 17 const ONE: u64 = 65536; // 2 ** 16 const HALF: u64 = 32768; // 2 ** 15 const MAX: u64 = 2147483648; // 2 ** 31 - impl FP16x16WImpl of FixedTrait { fn ZERO() -> FP16x16W { - return FP16x16W { mag: 0, sign: false }; + FP16x16W { mag: 0, sign: false } } fn HALF() -> FP16x16W { - return FP16x16W { mag: HALF, sign: false }; + FP16x16W { mag: HALF, sign: false } } fn ONE() -> FP16x16W { - return FP16x16W { mag: ONE, sign: false }; + FP16x16W { mag: ONE, sign: false } } fn MAX() -> FP16x16W { - return FP16x16W { mag: MAX, sign: false }; + FP16x16W { mag: MAX, sign: false } } fn new(mag: u64, sign: bool) -> FP16x16W { - return FP16x16W { mag: mag, sign: sign }; + FP16x16W { mag: mag, sign: sign } } fn new_unscaled(mag: u64, sign: bool) -> FP16x16W { - return FP16x16W { mag: mag * ONE, sign: sign }; + FP16x16W { mag: mag * ONE, sign: sign } } fn from_felt(val: felt252) -> FP16x16W { let mag = core::integer::u64_try_from_felt252(utils::felt_abs(val)).unwrap(); - return FixedTrait::new(mag, utils::felt_sign(val)); + + FixedTrait::new(mag, utils::felt_sign(val)) } fn abs(self: FP16x16W) -> FP16x16W { - return core_math::abs(self); + core_math::abs(self) } fn acos(self: FP16x16W) -> FP16x16W { - return trig::acos_fast(self); + trig::acos_fast(self) } fn acos_fast(self: FP16x16W) -> FP16x16W { - return trig::acos_fast(self); + trig::acos_fast(self) } fn acosh(self: FP16x16W) -> FP16x16W { - return hyp::acosh(self); + hyp::acosh(self) } fn asin(self: FP16x16W) -> FP16x16W { - return trig::asin_fast(self); + trig::asin_fast(self) } fn asin_fast(self: FP16x16W) -> FP16x16W { - return trig::asin_fast(self); + trig::asin_fast(self) } fn asinh(self: FP16x16W) -> FP16x16W { - return hyp::asinh(self); + hyp::asinh(self) } fn atan(self: FP16x16W) -> FP16x16W { - return trig::atan_fast(self); + trig::atan_fast(self) } fn atan_fast(self: FP16x16W) -> FP16x16W { - return trig::atan_fast(self); + trig::atan_fast(self) } fn atanh(self: FP16x16W) -> FP16x16W { - return hyp::atanh(self); + hyp::atanh(self) } fn ceil(self: FP16x16W) -> FP16x16W { - return core_math::ceil(self); + core_math::ceil(self) } fn cos(self: FP16x16W) -> FP16x16W { - return trig::cos_fast(self); + trig::cos_fast(self) } fn cos_fast(self: FP16x16W) -> FP16x16W { - return trig::cos_fast(self); + trig::cos_fast(self) } fn cosh(self: FP16x16W) -> FP16x16W { - return hyp::cosh(self); + hyp::cosh(self) } fn floor(self: FP16x16W) -> FP16x16W { - return core_math::floor(self); + core_math::floor(self) } // Calculates the natural exponent of x: e^x fn exp(self: FP16x16W) -> FP16x16W { - return core_math::exp(self); + core_math::exp(self) } // Calculates the binary exponent of x: 2^x fn exp2(self: FP16x16W) -> FP16x16W { - return core_math::exp2(self); + core_math::exp2(self) } // Calculates the natural logarithm of x: ln(x) // self must be greater than zero fn ln(self: FP16x16W) -> FP16x16W { - return core_math::ln(self); + core_math::ln(self) } // Calculates the binary logarithm of x: log2(x) // self must be greather than zero fn log2(self: FP16x16W) -> FP16x16W { - return core_math::log2(self); + core_math::log2(self) } // Calculates the base 10 log of x: log10(x) // self must be greater than zero fn log10(self: FP16x16W) -> FP16x16W { - return core_math::log10(self); + core_math::log10(self) } // Calclates the value of x^y and checks for overflow before returning // self is a fixed point value // b is a fixed point value fn pow(self: FP16x16W, b: FP16x16W) -> FP16x16W { - return core_math::pow(self, b); + core_math::pow(self, b) } fn round(self: FP16x16W) -> FP16x16W { - return core_math::round(self); + core_math::round(self) } fn sin(self: FP16x16W) -> FP16x16W { - return trig::sin_fast(self); + trig::sin_fast(self) } fn sin_fast(self: FP16x16W) -> FP16x16W { - return trig::sin_fast(self); + trig::sin_fast(self) } fn sinh(self: FP16x16W) -> FP16x16W { - return hyp::sinh(self); + hyp::sinh(self) } // Calculates the square root of a fixed point value // x must be positive fn sqrt(self: FP16x16W) -> FP16x16W { - return core_math::sqrt(self); + core_math::sqrt(self) } fn tan(self: FP16x16W) -> FP16x16W { - return trig::tan_fast(self); + trig::tan_fast(self) } fn tan_fast(self: FP16x16W) -> FP16x16W { - return trig::tan_fast(self); + trig::tan_fast(self) } fn tanh(self: FP16x16W) -> FP16x16W { - return hyp::tanh(self); + hyp::tanh(self) } fn sign(self: FP16x16W) -> FP16x16W { - return core_math::sign(self); + core_math::sign(self) } fn NaN() -> FP16x16W { - return FP16x16W { mag: 0, sign: true }; + FP16x16W { mag: 0, sign: true } } fn is_nan(self: FP16x16W) -> bool { @@ -197,15 +192,15 @@ impl FP16x16WImpl of FixedTrait { } fn INF() -> FP16x16W { - return FP16x16W { mag: 4294967295, sign: false }; + FP16x16W { mag: 4294967295, sign: false } } fn POS_INF() -> FP16x16W { - return FP16x16W { mag: 4294967295, sign: false }; + FP16x16W { mag: 4294967295, sign: false } } fn NEG_INF() -> FP16x16W { - return FP16x16W { mag: 4294967295, sign: true }; + FP16x16W { mag: 4294967295, sign: true } } fn is_inf(self: FP16x16W) -> bool { @@ -221,7 +216,7 @@ impl FP16x16WImpl of FixedTrait { } fn erf(self: FP16x16W) -> FP16x16W { - return erf::erf(self); + erf::erf(self) } } @@ -239,9 +234,9 @@ impl FP16x16WIntoFelt252 of Into { let mag_felt = self.mag.into(); if self.sign { - return mag_felt * -1; + mag_felt * -1 } else { - return mag_felt * 1; + mag_felt * 1 } } } @@ -277,10 +272,10 @@ impl FP16x16WTryIntoI8 of TryInto { impl FP16x16WTryIntoU128 of TryInto { fn try_into(self: FP16x16W) -> Option { if self.sign { - return Option::None(()); + Option::None(()) } else { // Unscale the magnitude and round down - return Option::Some((self.mag / ONE).into()); + Option::Some((self.mag / ONE).into()) } } } @@ -288,10 +283,10 @@ impl FP16x16WTryIntoU128 of TryInto { impl FP16x16WTryIntoU64 of TryInto { fn try_into(self: FP16x16W) -> Option { if self.sign { - return Option::None(()); + Option::None(()) } else { // Unscale the magnitude and round down - return Option::Some((self.mag / ONE).into()); + Option::Some((self.mag / ONE).into()) } } } @@ -299,10 +294,10 @@ impl FP16x16WTryIntoU64 of TryInto { impl FP16x16WTryIntoU32 of TryInto { fn try_into(self: FP16x16W) -> Option { if self.sign { - return Option::None(()); + Option::None(()) } else { // Unscale the magnitude and round down - return (self.mag / ONE).try_into(); + (self.mag / ONE).try_into() } } } @@ -313,7 +308,7 @@ impl FP16x16WTryIntoU16 of TryInto { Option::None(()) } else { // Unscale the magnitude and round down - return (self.mag / ONE).try_into(); + (self.mag / ONE).try_into() } } } @@ -324,7 +319,7 @@ impl FP16x16WTryIntoU8 of TryInto { Option::None(()) } else { // Unscale the magnitude and round down - return (self.mag / ONE).try_into(); + (self.mag / ONE).try_into() } } } @@ -332,18 +327,18 @@ impl FP16x16WTryIntoU8 of TryInto { impl FP16x16WPartialEq of PartialEq { #[inline(always)] fn eq(lhs: @FP16x16W, rhs: @FP16x16W) -> bool { - return core_math::eq(lhs, rhs); + core_math::eq(lhs, rhs) } #[inline(always)] fn ne(lhs: @FP16x16W, rhs: @FP16x16W) -> bool { - return core_math::ne(lhs, rhs); + core_math::ne(lhs, rhs) } } impl FP16x16WAdd of Add { fn add(lhs: FP16x16W, rhs: FP16x16W) -> FP16x16W { - return core_math::add(lhs, rhs); + core_math::add(lhs, rhs) } } @@ -356,7 +351,7 @@ impl FP16x16WAddEq of AddEq { impl FP16x16WSub of Sub { fn sub(lhs: FP16x16W, rhs: FP16x16W) -> FP16x16W { - return core_math::sub(lhs, rhs); + core_math::sub(lhs, rhs) } } @@ -369,7 +364,7 @@ impl FP16x16WSubEq of SubEq { impl FP16x16WMul of Mul { fn mul(lhs: FP16x16W, rhs: FP16x16W) -> FP16x16W { - return core_math::mul(lhs, rhs); + core_math::mul(lhs, rhs) } } @@ -382,7 +377,7 @@ impl FP16x16WMulEq of MulEq { impl FP16x16WDiv of Div { fn div(lhs: FP16x16W, rhs: FP16x16W) -> FP16x16W { - return core_math::div(lhs, rhs); + core_math::div(lhs, rhs) } } @@ -396,48 +391,47 @@ impl FP16x16WDivEq of DivEq { impl FP16x16WPartialOrd of PartialOrd { #[inline(always)] fn ge(lhs: FP16x16W, rhs: FP16x16W) -> bool { - return core_math::ge(lhs, rhs); + core_math::ge(lhs, rhs) } #[inline(always)] fn gt(lhs: FP16x16W, rhs: FP16x16W) -> bool { - return core_math::gt(lhs, rhs); + core_math::gt(lhs, rhs) } #[inline(always)] fn le(lhs: FP16x16W, rhs: FP16x16W) -> bool { - return core_math::le(lhs, rhs); + core_math::le(lhs, rhs) } #[inline(always)] fn lt(lhs: FP16x16W, rhs: FP16x16W) -> bool { - return core_math::lt(lhs, rhs); + core_math::lt(lhs, rhs) } } impl FP16x16WNeg of Neg { #[inline(always)] fn neg(a: FP16x16W) -> FP16x16W { - return core_math::neg(a); + core_math::neg(a) } } impl FP16x16WRem of Rem { #[inline(always)] fn rem(lhs: FP16x16W, rhs: FP16x16W) -> FP16x16W { - return core_math::rem(lhs, rhs); + core_math::rem(lhs, rhs) } } - /// INTERNAL - fn _i32_into_fp(x: FP16x16W) -> i32 { let number_felt: felt252 = (x.mag / ONE).into(); let number_i32: i32 = number_felt.try_into().unwrap(); if x.sign { return number_i32 * -1_i32; } + number_i32 } diff --git a/src/numbers/fixed_point/implementations/fp16x16wide/helpers.cairo b/src/numbers/fixed_point/implementations/fp16x16wide/helpers.cairo index c9627852a..ea5f7cf65 100644 --- a/src/numbers/fixed_point/implementations/fp16x16wide/helpers.cairo +++ b/src/numbers/fixed_point/implementations/fp16x16wide/helpers.cairo @@ -1,5 +1,4 @@ use core::debug::PrintTrait; -use core::traits::Into; use orion::numbers::fixed_point::implementations::fp16x16wide::core::{ HALF, ONE, TWO, FP16x16W, FP16x16WImpl, FP16x16WSub, FP16x16WDiv, FixedTrait, FP16x16WPrint diff --git a/src/numbers/fixed_point/implementations/fp16x16wide/math/comp.cairo b/src/numbers/fixed_point/implementations/fp16x16wide/math/comp.cairo index 50f93edea..5573f7650 100644 --- a/src/numbers/fixed_point/implementations/fp16x16wide/math/comp.cairo +++ b/src/numbers/fixed_point/implementations/fp16x16wide/math/comp.cairo @@ -3,65 +3,65 @@ use orion::numbers::fixed_point::implementations::fp16x16wide::core::{ }; fn max(a: FP16x16W, b: FP16x16W) -> FP16x16W { - if (a >= b) { - return a; + if a >= b { + a } else { - return b; + b } } fn min(a: FP16x16W, b: FP16x16W) -> FP16x16W { - if (a <= b) { - return a; + if a <= b { + a } else { - return b; + b } } fn xor(a: FP16x16W, b: FP16x16W) -> bool { if (a == FixedTrait::new(0, false) || b == FixedTrait::new(0, false)) && (a != b) { - return true; + true } else { - return false; + false } } fn or(a: FP16x16W, b: FP16x16W) -> bool { let zero = FixedTrait::new(0, false); if a == zero && b == zero { - return false; + false } else { - return true; + true } } fn and(a: FP16x16W, b: FP16x16W) -> bool { let zero = FixedTrait::new(0, false); if a == zero || b == zero { - return false; + false } else { - return true; + true } } fn where(a: FP16x16W, b: FP16x16W, c: FP16x16W) -> FP16x16W { if a == FixedTrait::new(0, false) { - return c; + c } else { - return b; + b } } fn bitwise_and(a: FP16x16W, b: FP16x16W) -> FP16x16W { - return FixedTrait::new(a.mag & b.mag, a.sign & b.sign); + FixedTrait::new(a.mag & b.mag, a.sign & b.sign) } fn bitwise_xor(a: FP16x16W, b: FP16x16W) -> FP16x16W { - return FixedTrait::new(a.mag ^ b.mag, a.sign ^ b.sign); + FixedTrait::new(a.mag ^ b.mag, a.sign ^ b.sign) } fn bitwise_or(a: FP16x16W, b: FP16x16W) -> FP16x16W { - return FixedTrait::new(a.mag | b.mag, a.sign | b.sign); + FixedTrait::new(a.mag | b.mag, a.sign | b.sign) } // Tests -------------------------------------------------------------------------------------------------------------- @@ -70,7 +70,6 @@ fn bitwise_or(a: FP16x16W, b: FP16x16W) -> FP16x16W { mod tests { use super::{FixedTrait, max, min, bitwise_and, bitwise_xor, bitwise_or}; - #[test] fn test_max() { let a = FixedTrait::new_unscaled(1, false); @@ -127,6 +126,7 @@ mod tests { assert(bitwise_xor(a, b) == c, 'bitwise_xor(a,b)') } + #[test] fn test_bitwise_or() { let a = FixedTrait::new(225280, false); // 3.4375 let b = FixedTrait::new(4160843776, true); // -2046.5625 diff --git a/src/numbers/fixed_point/implementations/fp16x16wide/math/core.cairo b/src/numbers/fixed_point/implementations/fp16x16wide/math/core.cairo index 902a54b48..cafc20e4d 100644 --- a/src/numbers/fixed_point/implementations/fp16x16wide/math/core.cairo +++ b/src/numbers/fixed_point/implementations/fp16x16wide/math/core.cairo @@ -1,9 +1,4 @@ -use core::debug::PrintTrait; -use core::option::OptionTrait; -use core::result::{ResultTrait, ResultTraitImpl}; -use core::traits::{Into, TryInto}; use core::integer; -use core::integer::{u64_safe_divmod, u64_as_non_zero, u64_wide_mul}; use orion::numbers::fixed_point::implementations::fp16x16wide::core::{ HALF, ONE, MAX, FP16x16W, FP16x16WImpl, FP16x16WAdd, FP16x16WAddEq, FP16x16WSub, FP16x16WMul, @@ -13,9 +8,8 @@ use orion::numbers::fixed_point::implementations::fp16x16wide::core::{ use orion::numbers::fixed_point::implementations::fp16x16wide::math::lut; // PUBLIC - fn abs(a: FP16x16W) -> FP16x16W { - return FixedTrait::new(a.mag, false); + FixedTrait::new(a.mag, false) } fn add(a: FP16x16W, b: FP16x16W) -> FP16x16W { @@ -28,23 +22,23 @@ fn add(a: FP16x16W, b: FP16x16W) -> FP16x16W { } if (a.mag > b.mag) { - return FixedTrait::new(a.mag - b.mag, a.sign); + FixedTrait::new(a.mag - b.mag, a.sign) } else { - return FixedTrait::new(b.mag - a.mag, b.sign); + FixedTrait::new(b.mag - a.mag, b.sign) } } fn ceil(a: FP16x16W) -> FP16x16W { - let (div, rem) = u64_safe_divmod(a.mag, u64_as_non_zero(ONE)); + let (div, rem) = integer::u64_safe_divmod(a.mag, integer::u64_as_non_zero(ONE)); if rem == 0 { - return a; + a } else if !a.sign { - return FixedTrait::new_unscaled(div + 1, false); + FixedTrait::new_unscaled(div + 1, false) } else if div == 0 { - return FixedTrait::new_unscaled(0, false); + FixedTrait::new_unscaled(0, false) } else { - return FixedTrait::new_unscaled(div, true); + FixedTrait::new_unscaled(div, true) } } @@ -53,16 +47,16 @@ fn div(a: FP16x16W, b: FP16x16W) -> FP16x16W { let res_u64 = a_u64 / b.mag.into(); // Re-apply sign - return FixedTrait::new(res_u64.try_into().unwrap(), a.sign ^ b.sign); + FixedTrait::new(res_u64.try_into().unwrap(), a.sign ^ b.sign) } fn eq(a: @FP16x16W, b: @FP16x16W) -> bool { - return (*a.mag == *b.mag) && (*a.sign == *b.sign); + (*a.mag == *b.mag) && (*a.sign == *b.sign) } // Calculates the natural exponent of x: e^x fn exp(a: FP16x16W) -> FP16x16W { - return exp2(FixedTrait::new(94548, false) * a); // log2(e) * 2^23 β‰ˆ 12102203 + exp2(FixedTrait::new(94548, false) * a) // log2(e) * 2^23 β‰ˆ 12102203 } // Calculates the binary exponent of x: 2^x @@ -71,7 +65,7 @@ fn exp2(a: FP16x16W) -> FP16x16W { return FixedTrait::ONE(); } - let (int_part, frac_part) = integer::u64_safe_divmod(a.mag, u64_as_non_zero(ONE)); + let (int_part, frac_part) = integer::u64_safe_divmod(a.mag, integer::u64_as_non_zero(ONE)); let int_res = FixedTrait::new_unscaled(lut::exp2(int_part), false); let mut res_u = int_res; @@ -87,57 +81,57 @@ fn exp2(a: FP16x16W) -> FP16x16W { res_u = res_u * (r1 + FixedTrait::ONE()); } - if (a.sign == true) { - return FixedTrait::ONE() / res_u; + if a.sign { + FixedTrait::ONE() / res_u } else { - return res_u; + res_u } } fn exp2_int(exp: u64) -> FP16x16W { - return FixedTrait::new_unscaled(lut::exp2(exp), false); + FixedTrait::new_unscaled(lut::exp2(exp), false) } fn floor(a: FP16x16W) -> FP16x16W { - let (div, rem) = integer::u64_safe_divmod(a.mag, u64_as_non_zero(ONE)); + let (div, rem) = integer::u64_safe_divmod(a.mag, integer::u64_as_non_zero(ONE)); if rem == 0 { - return a; + a } else if !a.sign { - return FixedTrait::new_unscaled(div, false); + FixedTrait::new_unscaled(div, false) } else { - return FixedTrait::new_unscaled(div + 1, true); + FixedTrait::new_unscaled(div + 1, true) } } fn ge(a: FP16x16W, b: FP16x16W) -> bool { if a.sign != b.sign { - return !a.sign; + !a.sign } else { - return (a.mag == b.mag) || ((a.mag > b.mag) ^ a.sign); + (a.mag == b.mag) || ((a.mag > b.mag) ^ a.sign) } } fn gt(a: FP16x16W, b: FP16x16W) -> bool { if a.sign != b.sign { - return !a.sign; + !a.sign } else { - return (a.mag != b.mag) && ((a.mag > b.mag) ^ a.sign); + (a.mag != b.mag) && ((a.mag > b.mag) ^ a.sign) } } fn le(a: FP16x16W, b: FP16x16W) -> bool { if a.sign != b.sign { - return a.sign; + a.sign } else { - return (a.mag == b.mag) || ((a.mag < b.mag) ^ a.sign); + (a.mag == b.mag) || ((a.mag < b.mag) ^ a.sign) } } // Calculates the natural logarithm of x: ln(x) // self must be greater than zero fn ln(a: FP16x16W) -> FP16x16W { - return FixedTrait::new(45426, false) * log2(a); // ln(2) = 0.693... + FixedTrait::new(45426, false) * log2(a) // ln(2) = 0.693... } // Calculates the binary logarithm of x: log2(x) @@ -157,7 +151,7 @@ fn log2(a: FP16x16W) -> FP16x16W { let (msb, div) = lut::msb(whole); if a.mag == div * ONE { - return FixedTrait::new_unscaled(msb, false); + FixedTrait::new_unscaled(msb, false) } else { let norm = a / FixedTrait::new_unscaled(div, false); let r8 = FixedTrait::new(596, true) * norm; @@ -168,21 +162,22 @@ fn log2(a: FP16x16W) -> FP16x16W { let r3 = (r4 + FixedTrait::new(608566, false)) * norm; let r2 = (r3 + FixedTrait::new(655828, true)) * norm; let r1 = (r2 + FixedTrait::new(534433, false)) * norm; - return r1 + FixedTrait::new(224487, true) + FixedTrait::new_unscaled(msb, false); + + r1 + FixedTrait::new(224487, true) + FixedTrait::new_unscaled(msb, false) } } // Calculates the base 10 log of x: log10(x) // self must be greater than zero fn log10(a: FP16x16W) -> FP16x16W { - return FixedTrait::new(19728, false) * log2(a); // log10(2) = 0.301... + FixedTrait::new(19728, false) * log2(a) // log10(2) = 0.301... } fn lt(a: FP16x16W, b: FP16x16W) -> bool { if a.sign != b.sign { - return a.sign; + a.sign } else { - return (a.mag != b.mag) && ((a.mag < b.mag) ^ a.sign); + (a.mag != b.mag) && ((a.mag < b.mag) ^ a.sign) } } @@ -190,20 +185,20 @@ fn mul(a: FP16x16W, b: FP16x16W) -> FP16x16W { let prod_u128 = integer::u64_wide_mul(a.mag, b.mag); // Re-apply sign - return FixedTrait::new((prod_u128 / ONE.into()).try_into().unwrap(), a.sign ^ b.sign); + FixedTrait::new((prod_u128 / ONE.into()).try_into().unwrap(), a.sign ^ b.sign) } fn ne(a: @FP16x16W, b: @FP16x16W) -> bool { - return (*a.mag != *b.mag) || (*a.sign != *b.sign); + (*a.mag != *b.mag) || (*a.sign != *b.sign) } fn neg(a: FP16x16W) -> FP16x16W { if a.mag == 0 { - return a; + a } else if !a.sign { - return FixedTrait::new(a.mag, !a.sign); + FixedTrait::new(a.mag, !a.sign) } else { - return FixedTrait::new(a.mag, false); + FixedTrait::new(a.mag, false) } } @@ -211,7 +206,7 @@ fn neg(a: FP16x16W) -> FP16x16W { // self is a FP16x16W point value // b is a FP16x16W point value fn pow(a: FP16x16W, b: FP16x16W) -> FP16x16W { - let (_, rem) = integer::u64_safe_divmod(b.mag, u64_as_non_zero(ONE)); + let (_, rem) = integer::u64_safe_divmod(b.mag, integer::u64_as_non_zero(ONE)); // use the more performant integer pow when y is an int if (rem == 0) { @@ -219,7 +214,7 @@ fn pow(a: FP16x16W, b: FP16x16W) -> FP16x16W { } // x^y = exp(y*ln(x)) for x > 0 will error for x < 0 - return exp(b * ln(a)); + exp(b * ln(a)) } // Calclates the value of a^b and checks for overflow before returning @@ -227,7 +222,7 @@ fn pow_int(a: FP16x16W, b: u64, sign: bool) -> FP16x16W { let mut x = a; let mut n = b; - if sign == true { + if sign { x = FixedTrait::ONE() / x; } @@ -238,11 +233,7 @@ fn pow_int(a: FP16x16W, b: u64, sign: bool) -> FP16x16W { let mut y = FixedTrait::ONE(); let two = integer::u64_as_non_zero(2); - loop { - if n <= 1 { - break; - } - + while n > 1 { let (div, rem) = integer::u64_safe_divmod(n, two); if rem == 1 { @@ -253,20 +244,20 @@ fn pow_int(a: FP16x16W, b: u64, sign: bool) -> FP16x16W { n = div; }; - return x * y; + x * y } fn rem(a: FP16x16W, b: FP16x16W) -> FP16x16W { - return a - floor(a / b) * b; + a - floor(a / b) * b } fn round(a: FP16x16W) -> FP16x16W { - let (div, rem) = integer::u64_safe_divmod(a.mag, u64_as_non_zero(ONE)); + let (div, rem) = integer::u64_safe_divmod(a.mag, integer::u64_as_non_zero(ONE)); if (HALF <= rem) { - return FixedTrait::new_unscaled(div + 1, a.sign); + FixedTrait::new_unscaled(div + 1, a.sign) } else { - return FixedTrait::new_unscaled(div, a.sign); + FixedTrait::new_unscaled(div, a.sign) } } @@ -276,11 +267,12 @@ fn sqrt(a: FP16x16W) -> FP16x16W { assert(a.sign == false, 'must be positive'); let root = integer::u64_sqrt(a.mag.into() * ONE.into()); - return FixedTrait::new(root.into(), false); + + FixedTrait::new(root.into(), false) } fn sub(a: FP16x16W, b: FP16x16W) -> FP16x16W { - return add(a, -b); + add(a, -b) } fn sign(a: FP16x16W) -> FP16x16W { @@ -467,7 +459,7 @@ mod tests { let a = FixedTrait::new_unscaled(42, false); let b = FixedTrait::new_unscaled(42, false); let c = eq(@a, @b); - assert(c == true, 'invalid result'); + assert(c, 'invalid result'); } #[test] @@ -475,7 +467,7 @@ mod tests { let a = FixedTrait::new_unscaled(42, false); let b = FixedTrait::new_unscaled(42, false); let c = ne(@a, @b); - assert(c == false, 'invalid result'); + assert(!c, 'invalid result'); } #[test] @@ -549,12 +541,12 @@ mod tests { let c = FixedTrait::::new_unscaled(1, true); assert(a <= a, 'a <= a'); - assert(a <= b == false, 'a <= b'); - assert(a <= c == false, 'a <= c'); + assert(!(a <= b), 'a <= b'); + assert(!(a <= c), 'a <= c'); assert(b <= a, 'b <= a'); assert(b <= b, 'b <= b'); - assert(b <= c == false, 'b <= c'); + assert(!(b <= c), 'b <= c'); assert(c <= a, 'c <= a'); assert(c <= b, 'c <= b'); @@ -567,17 +559,17 @@ mod tests { let b = FixedTrait::new_unscaled(0, false); let c = FixedTrait::::new_unscaled(1, true); - assert(a < a == false, 'a < a'); - assert(a < b == false, 'a < b'); - assert(a < c == false, 'a < c'); + assert(!(a < a), 'a < a'); + assert(!(a < b), 'a < b'); + assert(!(a < c), 'a < c'); assert(b < a, 'b < a'); - assert(b < b == false, 'b < b'); - assert(b < c == false, 'b < c'); + assert(!(b < b), 'b < b'); + assert(!(b < c), 'b < c'); assert(c < a, 'c < a'); assert(c < b, 'c < b'); - assert(c < c == false, 'c < c'); + assert(!(c < c), 'c < c'); } #[test] @@ -590,12 +582,12 @@ mod tests { assert(a >= b, 'a >= b'); assert(a >= c, 'a >= c'); - assert(b >= a == false, 'b >= a'); + assert(!(b >= a), 'b >= a'); assert(b >= b, 'b >= b'); assert(b >= c, 'b >= c'); - assert(c >= a == false, 'c >= a'); - assert(c >= b == false, 'c >= b'); + assert(!(c >= a), 'c >= a'); + assert(!(c >= b), 'c >= b'); assert(c >= c, 'c >= c'); } @@ -605,17 +597,17 @@ mod tests { let b = FixedTrait::new_unscaled(0, false); let c = FixedTrait::::new_unscaled(1, true); - assert(a > a == false, 'a > a'); + assert(!(a > a), 'a > a'); assert(a > b, 'a > b'); assert(a > c, 'a > c'); - assert(b > a == false, 'b > a'); - assert(b > b == false, 'b > b'); + assert(!(b > a), 'b > a'); + assert(!(b > b), 'b > b'); assert(b > c, 'b > c'); - assert(c > a == false, 'c > a'); - assert(c > b == false, 'c > b'); - assert(c > c == false, 'c > c'); + assert(!(c > a), 'c > a'); + assert(!(c > b), 'c > b'); + assert(!(c > c), 'c > c'); } #[test] diff --git a/src/numbers/fixed_point/implementations/fp16x16wide/math/erf.cairo b/src/numbers/fixed_point/implementations/fp16x16wide/math/erf.cairo index 49d19bf20..143b7dfe6 100644 --- a/src/numbers/fixed_point/implementations/fp16x16wide/math/erf.cairo +++ b/src/numbers/fixed_point/implementations/fp16x16wide/math/erf.cairo @@ -1,8 +1,6 @@ -use core::traits::Into; use orion::numbers::fixed_point::implementations::fp16x16wide::core::{ONE, FP16x16W, FixedTrait}; use orion::numbers::fixed_point::implementations::fp16x16wide::math::lut::erf_lut; - const ERF_COMPUTATIONAL_ACCURACY: u64 = 100; const ROUND_CHECK_NUMBER: u64 = 10; // Values > MAX_ERF_NUMBER return 1 @@ -21,5 +19,6 @@ fn erf(x: FP16x16W) -> FP16x16W { } else { erf_value = ONE; } + FP16x16W { mag: erf_value, sign: x.sign } } diff --git a/src/numbers/fixed_point/implementations/fp16x16wide/math/hyp.cairo b/src/numbers/fixed_point/implementations/fp16x16wide/math/hyp.cairo index 527b6046d..e2ab580fb 100644 --- a/src/numbers/fixed_point/implementations/fp16x16wide/math/hyp.cairo +++ b/src/numbers/fixed_point/implementations/fp16x16wide/math/hyp.cairo @@ -1,4 +1,3 @@ -use core::debug::PrintTrait; use orion::numbers::fixed_point::implementations::fp16x16wide::core::{ HALF, ONE, TWO, FP16x16W, FP16x16WImpl, FP16x16WAdd, FP16x16WAddEq, FP16x16WSub, FP16x16WMul, FP16x16WMulEq, FP16x16WTryIntoU128, FP16x16WPartialEq, FP16x16WPartialOrd, FP16x16WSubEq, @@ -8,53 +7,55 @@ use orion::numbers::fixed_point::implementations::fp16x16wide::core::{ // Calculates hyperbolic cosine of a (fixed point) fn cosh(a: FP16x16W) -> FP16x16W { let ea = a.exp(); - return (ea + (FixedTrait::ONE() / ea)) / FixedTrait::new(TWO, false); + + (ea + (FixedTrait::ONE() / ea)) / FixedTrait::new(TWO, false) } // Calculates hyperbolic sine of a (fixed point) fn sinh(a: FP16x16W) -> FP16x16W { let ea = a.exp(); - return (ea - (FixedTrait::ONE() / ea)) / FixedTrait::new(TWO, false); + + (ea - (FixedTrait::ONE() / ea)) / FixedTrait::new(TWO, false) } // Calculates hyperbolic tangent of a (fixed point) fn tanh(a: FP16x16W) -> FP16x16W { let ea = a.exp(); let ea_i = FixedTrait::ONE() / ea; - return (ea - ea_i) / (ea + ea_i); + + (ea - ea_i) / (ea + ea_i) } // Calculates inverse hyperbolic cosine of a (fixed point) fn acosh(a: FP16x16W) -> FP16x16W { let root = (a * a - FixedTrait::ONE()).sqrt(); - return (a + root).ln(); + + (a + root).ln() } // Calculates inverse hyperbolic sine of a (fixed point) fn asinh(a: FP16x16W) -> FP16x16W { let root = (a * a + FixedTrait::ONE()).sqrt(); - return (a + root).ln(); + + (a + root).ln() } // Calculates inverse hyperbolic tangent of a (fixed point) fn atanh(a: FP16x16W) -> FP16x16W { let one = FixedTrait::ONE(); let ln_arg = (one + a) / (one - a); - return ln_arg.ln() / FixedTrait::new(TWO, false); + + ln_arg.ln() / FixedTrait::new(TWO, false) } // Tests -------------------------------------------------------------------------------------------------------------- #[cfg(test)] mod tests { - use core::option::OptionTrait; - use core::traits::Into; - use orion::numbers::fixed_point::implementations::fp16x16wide::helpers::assert_precise; use super::{FixedTrait, TWO, cosh, ONE, sinh, tanh, acosh, asinh, atanh, HALF}; - #[test] #[available_gas(10000000)] fn test_cosh() { diff --git a/src/numbers/fixed_point/implementations/fp16x16wide/math/lut.cairo b/src/numbers/fixed_point/implementations/fp16x16wide/math/lut.cairo index 62c58537e..f40f4d15a 100644 --- a/src/numbers/fixed_point/implementations/fp16x16wide/math/lut.cairo +++ b/src/numbers/fixed_point/implementations/fp16x16wide/math/lut.cairo @@ -54,7 +54,7 @@ fn msb(whole: u64) -> (u64, u64) { } } - return (16, 65536); + (16, 65536) } fn exp2(exp: u64) -> u64 { @@ -112,7 +112,7 @@ fn exp2(exp: u64) -> u64 { } } - return 65536; + 65536 } fn sin(a: u64) -> (u64, u64, u64) { @@ -929,7 +929,7 @@ fn sin(a: u64) -> (u64, u64, u64) { } } - return (102542, 65535, 65536); + (102542, 65535, 65536) } fn atan(a: u64) -> (u64, u64, u64) { @@ -1233,7 +1233,7 @@ fn atan(a: u64) -> (u64, u64, u64) { return (44958, 39405, 39716); } - return (45416, 39716, 40025); + (45416, 39716, 40025) } fn erf_lut(x: u64) -> u64 { @@ -1925,5 +1925,6 @@ fn erf_lut(x: u64) -> u64 { return 65535; } } - return ONE; + + ONE } diff --git a/src/numbers/fixed_point/implementations/fp16x16wide/math/trig.cairo b/src/numbers/fixed_point/implementations/fp16x16wide/math/trig.cairo index 3c22fd97f..441248cf8 100644 --- a/src/numbers/fixed_point/implementations/fp16x16wide/math/trig.cairo +++ b/src/numbers/fixed_point/implementations/fp16x16wide/math/trig.cairo @@ -1,6 +1,4 @@ -use core::debug::PrintTrait; -use core::integer::{u64_safe_divmod, u64_as_non_zero}; -use core::option::OptionTrait; +use core::integer; use orion::numbers::fixed_point::implementations::fp16x16wide::math::lut; use orion::numbers::fixed_point::implementations::fp16x16wide::core::{ @@ -9,7 +7,6 @@ use orion::numbers::fixed_point::implementations::fp16x16wide::core::{ }; // CONSTANTS - const TWO_PI: u64 = 411775; const PI: u64 = 205887; const HALF_PI: u64 = 102944; @@ -22,10 +19,10 @@ fn acos(a: FP16x16W) -> FP16x16W { let asin_arg = (FixedTrait::ONE() - a * a).sqrt(); // will fail if a > 1 let asin_res = asin(asin_arg); - if (a.sign) { - return FixedTrait::new(PI, false) - asin_res; + if a.sign { + FixedTrait::new(PI, false) - asin_res } else { - return asin_res; + asin_res } } @@ -33,10 +30,10 @@ fn acos_fast(a: FP16x16W) -> FP16x16W { let asin_arg = (FixedTrait::ONE() - a * a).sqrt(); // will fail if a > 1 let asin_res = asin_fast(asin_arg); - if (a.sign) { - return FixedTrait::new(PI, false) - asin_res; + if a.sign { + FixedTrait::new(PI, false) - asin_res } else { - return asin_res; + asin_res } } @@ -48,7 +45,8 @@ fn asin(a: FP16x16W) -> FP16x16W { } let div = (FixedTrait::ONE() - a * a).sqrt(); // will fail if a > 1 - return atan(a / div); + + atan(a / div) } fn asin_fast(a: FP16x16W) -> FP16x16W { @@ -57,7 +55,8 @@ fn asin_fast(a: FP16x16W) -> FP16x16W { } let div = (FixedTrait::ONE() - a * a).sqrt(); // will fail if a > 1 - return atan_fast(a / div); + + atan_fast(a / div) } // Calculates arctan(a) (fixed point) @@ -100,10 +99,9 @@ fn atan(a: FP16x16W) -> FP16x16W { res = res - FixedTrait::new(HALF_PI, false); } - return FixedTrait::new(res.mag, a.sign); + FixedTrait::new(res.mag, a.sign) } - fn atan_fast(a: FP16x16W) -> FP16x16W { let mut at = a.abs(); let mut shift = false; @@ -135,31 +133,32 @@ fn atan_fast(a: FP16x16W) -> FP16x16W { res = res - FixedTrait::::new(HALF_PI, false); } - return FixedTrait::new(res.mag, a.sign); + FixedTrait::new(res.mag, a.sign) } // Calculates cos(a) with a in radians (fixed point) fn cos(a: FP16x16W) -> FP16x16W { - return sin(FixedTrait::new(HALF_PI, false) - a); + sin(FixedTrait::new(HALF_PI, false) - a) } fn cos_fast(a: FP16x16W) -> FP16x16W { - return sin_fast(FixedTrait::new(HALF_PI, false) - a); + sin_fast(FixedTrait::new(HALF_PI, false) - a) } fn sin(a: FP16x16W) -> FP16x16W { let a1 = a.mag % TWO_PI; - let (whole_rem, partial_rem) = u64_safe_divmod(a1, u64_as_non_zero(PI)); + let (whole_rem, partial_rem) = integer::u64_safe_divmod(a1, integer::u64_as_non_zero(PI)); let a2 = FixedTrait::new(partial_rem, false); let partial_sign = whole_rem == 1; let loop_res = a2 * _sin_loop(a2, 7, FixedTrait::ONE()); - return FixedTrait::new(loop_res.mag, a.sign ^ partial_sign && loop_res.mag != 0); + + FixedTrait::new(loop_res.mag, a.sign ^ partial_sign && loop_res.mag != 0) } fn sin_fast(a: FP16x16W) -> FP16x16W { let a1 = a.mag % TWO_PI; - let (whole_rem, mut partial_rem) = u64_safe_divmod(a1, u64_as_non_zero(PI)); + let (whole_rem, mut partial_rem) = integer::u64_safe_divmod(a1, integer::u64_as_non_zero(PI)); let partial_sign = whole_rem == 1; if partial_rem >= HALF_PI { @@ -171,7 +170,7 @@ fn sin_fast(a: FP16x16W) -> FP16x16W { let res = partial_step * (FixedTrait::new(high, false) - FixedTrait::new(low, false)) + FixedTrait::::new(low, false); - return FixedTrait::new(res.mag, a.sign ^ partial_sign && res.mag != 0); + FixedTrait::new(res.mag, a.sign ^ partial_sign && res.mag != 0) } // Calculates tan(a) with a in radians (fixed point) @@ -179,14 +178,16 @@ fn tan(a: FP16x16W) -> FP16x16W { let sinx = sin(a); let cosx = cos(a); assert(cosx.mag != 0, 'tan undefined'); - return sinx / cosx; + + sinx / cosx } fn tan_fast(a: FP16x16W) -> FP16x16W { let sinx = sin_fast(a); let cosx = cos_fast(a); assert(cosx.mag != 0, 'tan undefined'); - return sinx / cosx; + + sinx / cosx } // Helper function to calculate Taylor series for sin @@ -199,15 +200,13 @@ fn _sin_loop(a: FP16x16W, i: u64, acc: FP16x16W) -> FP16x16W { return new_acc; } - return _sin_loop(a, i - 1, new_acc); + _sin_loop(a, i - 1, new_acc) } // Tests -------------------------------------------------------------------------------------------------------------- #[cfg(test)] mod tests { - use core::traits::Into; - use orion::numbers::fixed_point::implementations::fp16x16wide::helpers::{ assert_precise, assert_relative }; diff --git a/src/numbers/fixed_point/implementations/fp32x32/comp.cairo b/src/numbers/fixed_point/implementations/fp32x32/comp.cairo index 14bcf69c8..ec1043f89 100644 --- a/src/numbers/fixed_point/implementations/fp32x32/comp.cairo +++ b/src/numbers/fixed_point/implementations/fp32x32/comp.cairo @@ -1,48 +1,47 @@ -use orion::numbers::{FP32x32, FixedTrait}; -use orion::numbers::FP32x32Impl; +use orion::numbers::{FP32x32, FP32x32Impl, FixedTrait}; fn xor(a: FP32x32, b: FP32x32) -> bool { if (a == FixedTrait::new(0, false) || b == FixedTrait::new(0, false)) && (a != b) { - return true; + true } else { - return false; + false } } fn or(a: FP32x32, b: FP32x32) -> bool { let zero = FixedTrait::new(0, false); if a == zero && b == zero { - return false; + false } else { - return true; + true } } fn and(a: FP32x32, b: FP32x32) -> bool { let zero = FixedTrait::new(0, false); if a == zero || b == zero { - return false; + false } else { - return true; + true } } fn where(a: FP32x32, b: FP32x32, c: FP32x32) -> FP32x32 { if a == FixedTrait::new(0, false) { - return c; + c } else { - return b; + b } } fn bitwise_and(a: FP32x32, b: FP32x32) -> FP32x32 { - return FixedTrait::new(a.mag & b.mag, a.sign & b.sign); + FixedTrait::new(a.mag & b.mag, a.sign & b.sign) } fn bitwise_xor(a: FP32x32, b: FP32x32) -> FP32x32 { - return FixedTrait::new(a.mag ^ b.mag, a.sign ^ b.sign); + FixedTrait::new(a.mag ^ b.mag, a.sign ^ b.sign) } fn bitwise_or(a: FP32x32, b: FP32x32) -> FP32x32 { - return FixedTrait::new(a.mag | b.mag, a.sign | b.sign); + FixedTrait::new(a.mag | b.mag, a.sign | b.sign) } diff --git a/src/numbers/fixed_point/implementations/fp32x32/core.cairo b/src/numbers/fixed_point/implementations/fp32x32/core.cairo index e7fd8e24d..ee38799da 100644 --- a/src/numbers/fixed_point/implementations/fp32x32/core.cairo +++ b/src/numbers/fixed_point/implementations/fp32x32/core.cairo @@ -1,177 +1,174 @@ use core::debug::PrintTrait; -use core::option::OptionTrait; -use core::result::{ResultTrait, ResultTraitImpl}; -use core::traits::{TryInto, Into}; - use cubit::f64 as fp32x32; use cubit::f64::Fixed as FP32x32; use cubit::f64::{ONE, HALF}; use cubit::f64::types::fixed; -use orion::numbers::fixed_point::implementations::fp32x32::erf; use orion::numbers::fixed_point::core::{FixedTrait}; +use orion::numbers::fixed_point::implementations::fp32x32::erf; use orion::numbers::fixed_point::utils; const MAX: u64 = 9223372036854775808; impl FP32x32Impl of FixedTrait { fn ZERO() -> FP32x32 { - return FP32x32 { mag: 0, sign: false }; + FP32x32 { mag: 0, sign: false } } fn HALF() -> FP32x32 { - return FP32x32 { mag: HALF, sign: false }; + FP32x32 { mag: HALF, sign: false } } fn ONE() -> FP32x32 { - return FP32x32 { mag: ONE, sign: false }; + FP32x32 { mag: ONE, sign: false } } fn MAX() -> FP32x32 { - return FP32x32 { mag: MAX, sign: false }; + FP32x32 { mag: MAX, sign: false } } fn new(mag: u64, sign: bool) -> FP32x32 { - return FP32x32 { mag: mag, sign: sign }; + FP32x32 { mag: mag, sign: sign } } fn new_unscaled(mag: u64, sign: bool) -> FP32x32 { - return FP32x32 { mag: mag * ONE, sign: sign }; + FP32x32 { mag: mag * ONE, sign: sign } } fn from_felt(val: felt252) -> FP32x32 { let mag = core::integer::u64_try_from_felt252(utils::felt_abs(val)).unwrap(); - return FixedTrait::new(mag, utils::felt_sign(val)); + + FixedTrait::new(mag, utils::felt_sign(val)) } fn abs(self: FP32x32) -> FP32x32 { - return fp32x32::ops::abs(self); + fp32x32::ops::abs(self) } fn acos(self: FP32x32) -> FP32x32 { - return fp32x32::trig::acos_fast(self); + fp32x32::trig::acos_fast(self) } fn acos_fast(self: FP32x32) -> FP32x32 { - return fp32x32::trig::acos_fast(self); + fp32x32::trig::acos_fast(self) } fn acosh(self: FP32x32) -> FP32x32 { - return fp32x32::hyp::acosh(self); + fp32x32::hyp::acosh(self) } fn asin(self: FP32x32) -> FP32x32 { - return fp32x32::trig::asin_fast(self); + fp32x32::trig::asin_fast(self) } fn asin_fast(self: FP32x32) -> FP32x32 { - return fp32x32::trig::asin_fast(self); + fp32x32::trig::asin_fast(self) } fn asinh(self: FP32x32) -> FP32x32 { - return fp32x32::hyp::asinh(self); + fp32x32::hyp::asinh(self) } fn atan(self: FP32x32) -> FP32x32 { - return fp32x32::trig::atan_fast(self); + fp32x32::trig::atan_fast(self) } fn atan_fast(self: FP32x32) -> FP32x32 { - return fp32x32::trig::atan_fast(self); + fp32x32::trig::atan_fast(self) } fn atanh(self: FP32x32) -> FP32x32 { - return fp32x32::hyp::atanh(self); + fp32x32::hyp::atanh(self) } fn ceil(self: FP32x32) -> FP32x32 { - return fp32x32::ops::ceil(self); + fp32x32::ops::ceil(self) } fn cos(self: FP32x32) -> FP32x32 { - return fp32x32::trig::cos_fast(self); + fp32x32::trig::cos_fast(self) } fn cos_fast(self: FP32x32) -> FP32x32 { - return fp32x32::trig::cos_fast(self); + fp32x32::trig::cos_fast(self) } fn cosh(self: FP32x32) -> FP32x32 { - return fp32x32::hyp::cosh(self); + fp32x32::hyp::cosh(self) } fn floor(self: FP32x32) -> FP32x32 { - return fp32x32::ops::floor(self); + fp32x32::ops::floor(self) } // Calculates the natural exponent of x: e^x fn exp(self: FP32x32) -> FP32x32 { - return fp32x32::ops::exp(self); + fp32x32::ops::exp(self) } // Calculates the binary exponent of x: 2^x fn exp2(self: FP32x32) -> FP32x32 { - return fp32x32::ops::exp2(self); + fp32x32::ops::exp2(self) } // Calculates the natural logarithm of x: ln(x) // self must be greater than zero fn ln(self: FP32x32) -> FP32x32 { - return fp32x32::ops::ln(self); + fp32x32::ops::ln(self) } // Calculates the binary logarithm of x: log2(x) // self must be greather than zero fn log2(self: FP32x32) -> FP32x32 { - return fp32x32::ops::log2(self); + fp32x32::ops::log2(self) } // Calculates the base 10 log of x: log10(x) // self must be greater than zero fn log10(self: FP32x32) -> FP32x32 { - return fp32x32::ops::log10(self); + fp32x32::ops::log10(self) } // Calclates the value of x^y and checks for overflow before returning // self is a fixed point value // b is a fixed point value fn pow(self: FP32x32, b: FP32x32) -> FP32x32 { - return fp32x32::ops::pow(self, b); + fp32x32::ops::pow(self, b) } fn round(self: FP32x32) -> FP32x32 { - return fp32x32::ops::round(self); + fp32x32::ops::round(self) } fn sin(self: FP32x32) -> FP32x32 { - return fp32x32::trig::sin_fast(self); + fp32x32::trig::sin_fast(self) } fn sin_fast(self: FP32x32) -> FP32x32 { - return fp32x32::trig::sin_fast(self); + fp32x32::trig::sin_fast(self) } fn sinh(self: FP32x32) -> FP32x32 { - return fp32x32::hyp::sinh(self); + fp32x32::hyp::sinh(self) } // Calculates the square root of a fixed point value // x must be positive fn sqrt(self: FP32x32) -> FP32x32 { - return fp32x32::ops::sqrt(self); + fp32x32::ops::sqrt(self) } fn tan(self: FP32x32) -> FP32x32 { - return fp32x32::trig::tan_fast(self); + fp32x32::trig::tan_fast(self) } fn tan_fast(self: FP32x32) -> FP32x32 { - return fp32x32::trig::tan_fast(self); + fp32x32::trig::tan_fast(self) } fn tanh(self: FP32x32) -> FP32x32 { - return fp32x32::hyp::tanh(self); + fp32x32::hyp::tanh(self) } fn sign(self: FP32x32) -> FP32x32 { @@ -179,7 +176,7 @@ impl FP32x32Impl of FixedTrait { } fn NaN() -> FP32x32 { - return FP32x32 { mag: 0, sign: true }; + FP32x32 { mag: 0, sign: true } } fn is_nan(self: FP32x32) -> bool { @@ -187,15 +184,15 @@ impl FP32x32Impl of FixedTrait { } fn INF() -> FP32x32 { - return FP32x32 { mag: 4294967295, sign: false }; + FP32x32 { mag: 4294967295, sign: false } } fn POS_INF() -> FP32x32 { - return FP32x32 { mag: 4294967295, sign: false }; + FP32x32 { mag: 4294967295, sign: false } } fn NEG_INF() -> FP32x32 { - return FP32x32 { mag: 4294967295, sign: true }; + FP32x32 { mag: 4294967295, sign: true } } fn is_inf(self: FP32x32) -> bool { @@ -211,11 +208,10 @@ impl FP32x32Impl of FixedTrait { } fn erf(self: FP32x32) -> FP32x32 { - return erf::erf(self); + erf::erf(self) } } - impl FP32x32Print of PrintTrait { fn print(self: FP32x32) { self.sign.print(); @@ -229,9 +225,9 @@ impl FP32x32IntoFelt252 of Into { let mag_felt = self.mag.into(); if self.sign { - return mag_felt * -1; + mag_felt * -1 } else { - return mag_felt * 1; + mag_felt * 1 } } } @@ -239,10 +235,10 @@ impl FP32x32IntoFelt252 of Into { impl FP32x32TryIntoU64 of TryInto { fn try_into(self: FP32x32) -> Option { if self.sign { - return Option::None(()); + Option::None(()) } else { // Unscale the magnitude and round down - return Option::Some((self.mag / ONE).into()); + Option::Some((self.mag / ONE).into()) } } } @@ -253,7 +249,7 @@ impl FP32x32TryIntoU16 of TryInto { Option::None(()) } else { // Unscale the magnitude and round down - return (self.mag / ONE).try_into(); + (self.mag / ONE).try_into() } } } @@ -264,7 +260,7 @@ impl FP32x32TryIntoU32 of TryInto { Option::None(()) } else { // Unscale the magnitude and round down - return (self.mag / ONE).try_into(); + (self.mag / ONE).try_into() } } } @@ -275,7 +271,7 @@ impl FP32x32TryIntoU8 of TryInto { Option::None(()) } else { // Unscale the magnitude and round down - return (self.mag / ONE).try_into(); + (self.mag / ONE).try_into() } } } @@ -300,7 +296,7 @@ impl FP32x32TryIntoI8 of TryInto { impl FP32x32Add of Add { fn add(lhs: FP32x32, rhs: FP32x32) -> FP32x32 { - return fp32x32::ops::add(lhs, rhs); + fp32x32::ops::add(lhs, rhs) } } @@ -313,7 +309,7 @@ impl FP32x32AddEq of AddEq { impl FP32x32Sub of Sub { fn sub(lhs: FP32x32, rhs: FP32x32) -> FP32x32 { - return fp32x32::ops::sub(lhs, rhs); + fp32x32::ops::sub(lhs, rhs) } } @@ -326,7 +322,7 @@ impl FP32x32SubEq of SubEq { impl FP32x32Mul of Mul { fn mul(lhs: FP32x32, rhs: FP32x32) -> FP32x32 { - return fp32x32::ops::mul(lhs, rhs); + fp32x32::ops::mul(lhs, rhs) } } @@ -339,7 +335,7 @@ impl FP32x32MulEq of MulEq { impl FP32x32Div of Div { fn div(lhs: FP32x32, rhs: FP32x32) -> FP32x32 { - return fp32x32::ops::div(lhs, rhs); + fp32x32::ops::div(lhs, rhs) } } @@ -353,45 +349,44 @@ impl FP32x32DivEq of DivEq { impl FP32x32PartialOrd of PartialOrd { #[inline(always)] fn ge(lhs: FP32x32, rhs: FP32x32) -> bool { - return fp32x32::ops::ge(lhs, rhs); + fp32x32::ops::ge(lhs, rhs) } #[inline(always)] fn gt(lhs: FP32x32, rhs: FP32x32) -> bool { - return fp32x32::ops::gt(lhs, rhs); + fp32x32::ops::gt(lhs, rhs) } #[inline(always)] fn le(lhs: FP32x32, rhs: FP32x32) -> bool { - return fp32x32::ops::le(lhs, rhs); + fp32x32::ops::le(lhs, rhs) } #[inline(always)] fn lt(lhs: FP32x32, rhs: FP32x32) -> bool { - return fp32x32::ops::lt(lhs, rhs); + fp32x32::ops::lt(lhs, rhs) } } impl FP32x32Neg of Neg { #[inline(always)] fn neg(a: FP32x32) -> FP32x32 { - return fp32x32::ops::neg(a); + fp32x32::ops::neg(a) } } impl FP32x32Rem of Rem { #[inline(always)] fn rem(lhs: FP32x32, rhs: FP32x32) -> FP32x32 { - return fp32x32::ops::rem(lhs, rhs); + fp32x32::ops::rem(lhs, rhs) } } fn eq(a: @FP32x32, b: @FP32x32) -> bool { - return (*a.mag == *b.mag) && (*a.sign == *b.sign); + (*a.mag == *b.mag) && (*a.sign == *b.sign) } /// INTERNAL - fn _i8_try_from_fp(x: FP32x32) -> Option { let unscaled_mag: Option = (x.mag / ONE).try_into(); diff --git a/src/numbers/fixed_point/implementations/fp32x32/erf.cairo b/src/numbers/fixed_point/implementations/fp32x32/erf.cairo index 63ee48f85..6ebff1eec 100644 --- a/src/numbers/fixed_point/implementations/fp32x32/erf.cairo +++ b/src/numbers/fixed_point/implementations/fp32x32/erf.cairo @@ -1,7 +1,6 @@ -use core::traits::Into; -use orion::numbers::{FP32x32, FixedTrait}; use cubit::f64::ONE; +use orion::numbers::{FP32x32, FixedTrait}; use orion::numbers::fixed_point::implementations::fp32x32::lut::erf_lut; const ERF_COMPUTATIONAL_ACCURACY: u64 = 100; @@ -22,5 +21,6 @@ fn erf(x: FP32x32) -> FP32x32 { } else { erf_value = ONE; } + FP32x32 { mag: erf_value, sign: x.sign } } diff --git a/src/numbers/fixed_point/implementations/fp32x32/lut.cairo b/src/numbers/fixed_point/implementations/fp32x32/lut.cairo index 03a576452..59173612b 100644 --- a/src/numbers/fixed_point/implementations/fp32x32/lut.cairo +++ b/src/numbers/fixed_point/implementations/fp32x32/lut.cairo @@ -689,5 +689,6 @@ fn erf_lut(x: u64) -> u64 { return 4294960759; } } - return ONE; + + ONE } diff --git a/src/numbers/fixed_point/implementations/fp64x64/comp.cairo b/src/numbers/fixed_point/implementations/fp64x64/comp.cairo index 121336680..e1dc177f0 100644 --- a/src/numbers/fixed_point/implementations/fp64x64/comp.cairo +++ b/src/numbers/fixed_point/implementations/fp64x64/comp.cairo @@ -3,46 +3,46 @@ use orion::numbers::FP64x64Impl; fn xor(a: FP64x64, b: FP64x64) -> bool { if (a == FixedTrait::new(0, false) || b == FixedTrait::new(0, false)) && (a != b) { - return true; + true } else { - return false; + false } } fn or(a: FP64x64, b: FP64x64) -> bool { let zero = FixedTrait::new(0, false); if a == zero && b == zero { - return false; + false } else { - return true; + true } } fn and(a: FP64x64, b: FP64x64) -> bool { let zero = FixedTrait::new(0, false); if a == zero || b == zero { - return false; + false } else { - return true; + true } } fn where(a: FP64x64, b: FP64x64, c: FP64x64) -> FP64x64 { if a == FixedTrait::new(0, false) { - return c; + c } else { - return b; + b } } fn bitwise_and(a: FP64x64, b: FP64x64) -> FP64x64 { - return FixedTrait::new(a.mag & b.mag, a.sign & b.sign); + FixedTrait::new(a.mag & b.mag, a.sign & b.sign) } fn bitwise_xor(a: FP64x64, b: FP64x64) -> FP64x64 { - return FixedTrait::new(a.mag ^ b.mag, a.sign ^ b.sign); + FixedTrait::new(a.mag ^ b.mag, a.sign ^ b.sign) } fn bitwise_or(a: FP64x64, b: FP64x64) -> FP64x64 { - return FixedTrait::new(a.mag | b.mag, a.sign | b.sign); + FixedTrait::new(a.mag | b.mag, a.sign | b.sign) } diff --git a/src/numbers/fixed_point/implementations/fp64x64/core.cairo b/src/numbers/fixed_point/implementations/fp64x64/core.cairo index 23af67564..e11fcd9a4 100644 --- a/src/numbers/fixed_point/implementations/fp64x64/core.cairo +++ b/src/numbers/fixed_point/implementations/fp64x64/core.cairo @@ -1,9 +1,5 @@ use core::debug::PrintTrait; -use core::option::OptionTrait; -use core::result::{ResultTrait, ResultTraitImpl}; -use core::traits::{TryInto, Into}; - use cubit::f128 as fp64x64; use cubit::f128::types::Fixed as FP64x64; use cubit::f128::ONE_u128 as ONE; @@ -17,161 +13,162 @@ const HALF: u128 = 9223372036854775808_u128; // 2 ** 63 impl FP64x64Impl of FixedTrait { fn ZERO() -> FP64x64 { - return FP64x64 { mag: 0, sign: false }; + FP64x64 { mag: 0, sign: false } } fn HALF() -> FP64x64 { - return FP64x64 { mag: HALF, sign: false }; + FP64x64 { mag: HALF, sign: false } } fn ONE() -> FP64x64 { - return FP64x64 { mag: ONE, sign: false }; + FP64x64 { mag: ONE, sign: false } } fn MAX() -> FP64x64 { - return FP64x64 { mag: MAX, sign: false }; + FP64x64 { mag: MAX, sign: false } } fn new(mag: u128, sign: bool) -> FP64x64 { - return FP64x64 { mag: mag, sign: sign }; + FP64x64 { mag: mag, sign: sign } } fn new_unscaled(mag: u128, sign: bool) -> FP64x64 { - return FP64x64 { mag: mag * ONE, sign: sign }; + FP64x64 { mag: mag * ONE, sign: sign } } fn from_felt(val: felt252) -> FP64x64 { let mag = core::integer::u128_try_from_felt252(utils::felt_abs(val)).unwrap(); - return FixedTrait::new(mag, utils::felt_sign(val)); + + FixedTrait::new(mag, utils::felt_sign(val)) } fn abs(self: FP64x64) -> FP64x64 { - return fp64x64::ops::abs(self); + fp64x64::ops::abs(self) } fn acos(self: FP64x64) -> FP64x64 { - return fp64x64::trig::acos_fast(self); + fp64x64::trig::acos_fast(self) } fn acos_fast(self: FP64x64) -> FP64x64 { - return fp64x64::trig::acos_fast(self); + fp64x64::trig::acos_fast(self) } fn acosh(self: FP64x64) -> FP64x64 { - return fp64x64::hyp::acosh(self); + fp64x64::hyp::acosh(self) } fn asin(self: FP64x64) -> FP64x64 { - return fp64x64::trig::asin_fast(self); + fp64x64::trig::asin_fast(self) } fn asin_fast(self: FP64x64) -> FP64x64 { - return fp64x64::trig::asin_fast(self); + fp64x64::trig::asin_fast(self) } fn asinh(self: FP64x64) -> FP64x64 { - return fp64x64::hyp::asinh(self); + fp64x64::hyp::asinh(self) } fn atan(self: FP64x64) -> FP64x64 { - return fp64x64::trig::atan_fast(self); + fp64x64::trig::atan_fast(self) } fn atan_fast(self: FP64x64) -> FP64x64 { - return fp64x64::trig::atan_fast(self); + fp64x64::trig::atan_fast(self) } fn atanh(self: FP64x64) -> FP64x64 { - return fp64x64::hyp::atanh(self); + fp64x64::hyp::atanh(self) } fn ceil(self: FP64x64) -> FP64x64 { - return fp64x64::ops::ceil(self); + fp64x64::ops::ceil(self) } fn cos(self: FP64x64) -> FP64x64 { - return fp64x64::trig::cos_fast(self); + fp64x64::trig::cos_fast(self) } fn cos_fast(self: FP64x64) -> FP64x64 { - return fp64x64::trig::cos_fast(self); + fp64x64::trig::cos_fast(self) } fn cosh(self: FP64x64) -> FP64x64 { - return fp64x64::hyp::cosh(self); + fp64x64::hyp::cosh(self) } fn floor(self: FP64x64) -> FP64x64 { - return fp64x64::ops::floor(self); + fp64x64::ops::floor(self) } // Calculates the natural exponent of x: e^x fn exp(self: FP64x64) -> FP64x64 { - return fp64x64::ops::exp(self); + fp64x64::ops::exp(self) } // Calculates the binary exponent of x: 2^x fn exp2(self: FP64x64) -> FP64x64 { - return fp64x64::ops::exp2(self); + fp64x64::ops::exp2(self) } // Calculates the natural logarithm of x: ln(x) // self must be greater than zero fn ln(self: FP64x64) -> FP64x64 { - return fp64x64::ops::ln(self); + fp64x64::ops::ln(self) } // Calculates the binary logarithm of x: log2(x) // self must be greather than zero fn log2(self: FP64x64) -> FP64x64 { - return fp64x64::ops::log2(self); + fp64x64::ops::log2(self) } // Calculates the base 10 log of x: log10(x) // self must be greater than zero fn log10(self: FP64x64) -> FP64x64 { - return fp64x64::ops::log10(self); + fp64x64::ops::log10(self) } // Calclates the value of x^y and checks for overflow before returning // self is a fixed point value // b is a fixed point value fn pow(self: FP64x64, b: FP64x64) -> FP64x64 { - return fp64x64::ops::pow(self, b); + fp64x64::ops::pow(self, b) } fn round(self: FP64x64) -> FP64x64 { - return fp64x64::ops::round(self); + fp64x64::ops::round(self) } fn sin(self: FP64x64) -> FP64x64 { - return fp64x64::trig::sin_fast(self); + fp64x64::trig::sin_fast(self) } fn sin_fast(self: FP64x64) -> FP64x64 { - return fp64x64::trig::sin_fast(self); + fp64x64::trig::sin_fast(self) } fn sinh(self: FP64x64) -> FP64x64 { - return fp64x64::hyp::sinh(self); + fp64x64::hyp::sinh(self) } // Calculates the square root of a fixed point value // x must be positive fn sqrt(self: FP64x64) -> FP64x64 { - return fp64x64::ops::sqrt(self); + fp64x64::ops::sqrt(self) } fn tan(self: FP64x64) -> FP64x64 { - return fp64x64::trig::tan_fast(self); + fp64x64::trig::tan_fast(self) } fn tan_fast(self: FP64x64) -> FP64x64 { - return fp64x64::trig::tan_fast(self); + fp64x64::trig::tan_fast(self) } fn tanh(self: FP64x64) -> FP64x64 { - return fp64x64::hyp::tanh(self); + fp64x64::hyp::tanh(self) } fn sign(self: FP64x64) -> FP64x64 { @@ -179,7 +176,7 @@ impl FP64x64Impl of FixedTrait { } fn NaN() -> FP64x64 { - return FP64x64 { mag: 0, sign: true }; + FP64x64 { mag: 0, sign: true } } fn is_nan(self: FP64x64) -> bool { @@ -187,15 +184,15 @@ impl FP64x64Impl of FixedTrait { } fn INF() -> FP64x64 { - return FP64x64 { mag: 4294967295, sign: false }; + FP64x64 { mag: 4294967295, sign: false } } fn POS_INF() -> FP64x64 { - return FP64x64 { mag: 4294967295, sign: false }; + FP64x64 { mag: 4294967295, sign: false } } fn NEG_INF() -> FP64x64 { - return FP64x64 { mag: 4294967295, sign: true }; + FP64x64 { mag: 4294967295, sign: true } } fn is_inf(self: FP64x64) -> bool { @@ -211,11 +208,10 @@ impl FP64x64Impl of FixedTrait { } fn erf(self: FP64x64) -> FP64x64 { - return erf::erf(self); + erf::erf(self) } } - impl FP64x64Print of PrintTrait { fn print(self: FP64x64) { self.sign.print(); @@ -229,9 +225,9 @@ impl FP64x64IntoFelt252 of Into { let mag_felt = self.mag.into(); if self.sign { - return mag_felt * -1; + mag_felt * -1 } else { - return mag_felt * 1; + mag_felt * 1 } } } @@ -239,10 +235,10 @@ impl FP64x64IntoFelt252 of Into { impl FP64x64TryIntoU128 of TryInto { fn try_into(self: FP64x64) -> Option { if self.sign { - return Option::None(()); + Option::None(()) } else { // Unscale the magnitude and round down - return Option::Some((self.mag / ONE).into()); + Option::Some((self.mag / ONE).into()) } } } @@ -253,7 +249,7 @@ impl FP64x64TryIntoU16 of TryInto { Option::None(()) } else { // Unscale the magnitude and round down - return (self.mag / ONE).try_into(); + (self.mag / ONE).try_into() } } } @@ -264,7 +260,7 @@ impl FP64x64TryIntoU32 of TryInto { Option::None(()) } else { // Unscale the magnitude and round down - return (self.mag / ONE).try_into(); + (self.mag / ONE).try_into() } } } @@ -275,7 +271,7 @@ impl FP64x64TryIntoU8 of TryInto { Option::None(()) } else { // Unscale the magnitude and round down - return (self.mag / ONE).try_into(); + (self.mag / ONE).try_into() } } } @@ -300,7 +296,7 @@ impl FP64x64TryIntoI8 of TryInto { impl FP64x64Add of Add { fn add(lhs: FP64x64, rhs: FP64x64) -> FP64x64 { - return fp64x64::ops::add(lhs, rhs); + fp64x64::ops::add(lhs, rhs) } } @@ -313,7 +309,7 @@ impl FP64x64AddEq of AddEq { impl FP64x64Sub of Sub { fn sub(lhs: FP64x64, rhs: FP64x64) -> FP64x64 { - return fp64x64::ops::sub(lhs, rhs); + fp64x64::ops::sub(lhs, rhs) } } @@ -326,7 +322,7 @@ impl FP64x64SubEq of SubEq { impl FP64x64Mul of Mul { fn mul(lhs: FP64x64, rhs: FP64x64) -> FP64x64 { - return fp64x64::ops::mul(lhs, rhs); + fp64x64::ops::mul(lhs, rhs) } } @@ -339,7 +335,7 @@ impl FP64x64MulEq of MulEq { impl FP64x64Div of Div { fn div(lhs: FP64x64, rhs: FP64x64) -> FP64x64 { - return fp64x64::ops::div(lhs, rhs); + fp64x64::ops::div(lhs, rhs) } } @@ -353,45 +349,44 @@ impl FP64x64DivEq of DivEq { impl FP64x64PartialOrd of PartialOrd { #[inline(always)] fn ge(lhs: FP64x64, rhs: FP64x64) -> bool { - return fp64x64::ops::ge(lhs, rhs); + fp64x64::ops::ge(lhs, rhs) } #[inline(always)] fn gt(lhs: FP64x64, rhs: FP64x64) -> bool { - return fp64x64::ops::gt(lhs, rhs); + fp64x64::ops::gt(lhs, rhs) } #[inline(always)] fn le(lhs: FP64x64, rhs: FP64x64) -> bool { - return fp64x64::ops::le(lhs, rhs); + fp64x64::ops::le(lhs, rhs) } #[inline(always)] fn lt(lhs: FP64x64, rhs: FP64x64) -> bool { - return fp64x64::ops::lt(lhs, rhs); + fp64x64::ops::lt(lhs, rhs) } } impl FP64x64Neg of Neg { #[inline(always)] fn neg(a: FP64x64) -> FP64x64 { - return fp64x64::ops::neg(a); + fp64x64::ops::neg(a) } } impl FP64x64Rem of Rem { #[inline(always)] fn rem(lhs: FP64x64, rhs: FP64x64) -> FP64x64 { - return fp64x64::ops::rem(lhs, rhs); + fp64x64::ops::rem(lhs, rhs) } } fn eq(a: @FP64x64, b: @FP64x64) -> bool { - return (*a.mag == *b.mag) && (*a.sign == *b.sign); + (*a.mag == *b.mag) && (*a.sign == *b.sign) } /// INTERNAL - fn _i8_try_from_fp(x: FP64x64) -> Option { let unscaled_mag: Option = (x.mag / ONE).try_into(); diff --git a/src/numbers/fixed_point/implementations/fp64x64/erf.cairo b/src/numbers/fixed_point/implementations/fp64x64/erf.cairo index 3f5101b65..1558f1e24 100644 --- a/src/numbers/fixed_point/implementations/fp64x64/erf.cairo +++ b/src/numbers/fixed_point/implementations/fp64x64/erf.cairo @@ -1,7 +1,6 @@ -use core::traits::Into; -use orion::numbers::{FP64x64, FixedTrait}; use cubit::f128::ONE_u128 as ONE; +use orion::numbers::{FP64x64, FixedTrait}; use orion::numbers::fixed_point::implementations::fp64x64::lut::erf_lut; const ERF_COMPUTATIONAL_ACCURACY: u128 = 100_u128; @@ -22,5 +21,6 @@ fn erf(x: FP64x64) -> FP64x64 { } else { erf_value = ONE; } + FP64x64 { mag: erf_value, sign: x.sign } } diff --git a/src/numbers/fixed_point/implementations/fp64x64/lut.cairo b/src/numbers/fixed_point/implementations/fp64x64/lut.cairo index 34042bf26..23487a032 100644 --- a/src/numbers/fixed_point/implementations/fp64x64/lut.cairo +++ b/src/numbers/fixed_point/implementations/fp64x64/lut.cairo @@ -689,5 +689,6 @@ fn erf_lut(x: u128) -> u128 { return 18446715997887504384; } } - return ONE; + + ONE } diff --git a/src/numbers/fixed_point/implementations/fp8x23/core.cairo b/src/numbers/fixed_point/implementations/fp8x23/core.cairo index 20b0788f3..7e44554dd 100644 --- a/src/numbers/fixed_point/implementations/fp8x23/core.cairo +++ b/src/numbers/fixed_point/implementations/fp8x23/core.cairo @@ -1,9 +1,5 @@ use core::debug::PrintTrait; -use core::option::OptionTrait; -use core::result::{ResultTrait, ResultTraitImpl}; -use core::traits::{TryInto, Into}; - use orion::numbers::fixed_point::core::{FixedTrait}; use orion::numbers::fixed_point::implementations::fp8x23::math::{core as core_math, trig, hyp, erf}; use orion::numbers::fixed_point::utils; @@ -16,7 +12,6 @@ struct FP8x23 { } // CONSTANTS - const TWO: u32 = 16777216; // 2 ** 24 const ONE: u32 = 8388608; // 2 ** 23 const HALF: u32 = 4194304; // 2 ** 22 @@ -25,169 +20,170 @@ const MAX: u32 = 2147483648; // 2 ** 31 impl FP8x23Impl of FixedTrait { fn ZERO() -> FP8x23 { - return FP8x23 { mag: 0, sign: false }; + FP8x23 { mag: 0, sign: false } } fn HALF() -> FP8x23 { - return FP8x23 { mag: HALF, sign: false }; + FP8x23 { mag: HALF, sign: false } } fn ONE() -> FP8x23 { - return FP8x23 { mag: ONE, sign: false }; + FP8x23 { mag: ONE, sign: false } } fn MAX() -> FP8x23 { - return FP8x23 { mag: MAX, sign: false }; + FP8x23 { mag: MAX, sign: false } } fn new(mag: u32, sign: bool) -> FP8x23 { - return FP8x23 { mag: mag, sign: sign }; + FP8x23 { mag: mag, sign: sign } } fn new_unscaled(mag: u32, sign: bool) -> FP8x23 { - return FP8x23 { mag: mag * ONE, sign: sign }; + FP8x23 { mag: mag * ONE, sign: sign } } fn from_felt(val: felt252) -> FP8x23 { let mag = core::integer::u32_try_from_felt252(utils::felt_abs(val)).unwrap(); - return FixedTrait::new(mag, utils::felt_sign(val)); + + FixedTrait::new(mag, utils::felt_sign(val)) } fn abs(self: FP8x23) -> FP8x23 { - return core_math::abs(self); + core_math::abs(self) } fn acos(self: FP8x23) -> FP8x23 { - return trig::acos_fast(self); + trig::acos_fast(self) } fn acos_fast(self: FP8x23) -> FP8x23 { - return trig::acos_fast(self); + trig::acos_fast(self) } fn acosh(self: FP8x23) -> FP8x23 { - return hyp::acosh(self); + hyp::acosh(self) } fn asin(self: FP8x23) -> FP8x23 { - return trig::asin_fast(self); + trig::asin_fast(self) } fn asin_fast(self: FP8x23) -> FP8x23 { - return trig::asin_fast(self); + trig::asin_fast(self) } fn asinh(self: FP8x23) -> FP8x23 { - return hyp::asinh(self); + hyp::asinh(self) } fn atan(self: FP8x23) -> FP8x23 { - return trig::atan_fast(self); + trig::atan_fast(self) } fn atan_fast(self: FP8x23) -> FP8x23 { - return trig::atan_fast(self); + trig::atan_fast(self) } fn atanh(self: FP8x23) -> FP8x23 { - return hyp::atanh(self); + hyp::atanh(self) } fn ceil(self: FP8x23) -> FP8x23 { - return core_math::ceil(self); + core_math::ceil(self) } fn cos(self: FP8x23) -> FP8x23 { - return trig::cos_fast(self); + trig::cos_fast(self) } fn cos_fast(self: FP8x23) -> FP8x23 { - return trig::cos_fast(self); + trig::cos_fast(self) } fn cosh(self: FP8x23) -> FP8x23 { - return hyp::cosh(self); + hyp::cosh(self) } fn floor(self: FP8x23) -> FP8x23 { - return core_math::floor(self); + core_math::floor(self) } // Calculates the natural exponent of x: e^x fn exp(self: FP8x23) -> FP8x23 { - return core_math::exp(self); + core_math::exp(self) } // Calculates the binary exponent of x: 2^x fn exp2(self: FP8x23) -> FP8x23 { - return core_math::exp2(self); + core_math::exp2(self) } // Calculates the natural logarithm of x: ln(x) // self must be greater than zero fn ln(self: FP8x23) -> FP8x23 { - return core_math::ln(self); + core_math::ln(self) } // Calculates the binary logarithm of x: log2(x) // self must be greather than zero fn log2(self: FP8x23) -> FP8x23 { - return core_math::log2(self); + core_math::log2(self) } // Calculates the base 10 log of x: log10(x) // self must be greater than zero fn log10(self: FP8x23) -> FP8x23 { - return core_math::log10(self); + core_math::log10(self) } // Calclates the value of x^y and checks for overflow before returning // self is a fixed point value // b is a fixed point value fn pow(self: FP8x23, b: FP8x23) -> FP8x23 { - return core_math::pow(self, b); + core_math::pow(self, b) } fn round(self: FP8x23) -> FP8x23 { - return core_math::round(self); + core_math::round(self) } fn sin(self: FP8x23) -> FP8x23 { - return trig::sin_fast(self); + trig::sin_fast(self) } fn sin_fast(self: FP8x23) -> FP8x23 { - return trig::sin_fast(self); + trig::sin_fast(self) } fn sinh(self: FP8x23) -> FP8x23 { - return hyp::sinh(self); + hyp::sinh(self) } // Calculates the square root of a fixed point value // x must be positive fn sqrt(self: FP8x23) -> FP8x23 { - return core_math::sqrt(self); + core_math::sqrt(self) } fn tan(self: FP8x23) -> FP8x23 { - return trig::tan_fast(self); + trig::tan_fast(self) } fn tan_fast(self: FP8x23) -> FP8x23 { - return trig::tan_fast(self); + trig::tan_fast(self) } fn tanh(self: FP8x23) -> FP8x23 { - return hyp::tanh(self); + hyp::tanh(self) } fn sign(self: FP8x23) -> FP8x23 { - return core_math::sign(self); + core_math::sign(self) } fn NaN() -> FP8x23 { - return FP8x23 { mag: 0, sign: true }; + FP8x23 { mag: 0, sign: true } } fn is_nan(self: FP8x23) -> bool { @@ -195,15 +191,15 @@ impl FP8x23Impl of FixedTrait { } fn INF() -> FP8x23 { - return FP8x23 { mag: 4294967295, sign: false }; + FP8x23 { mag: 4294967295, sign: false } } fn POS_INF() -> FP8x23 { - return FP8x23 { mag: 4294967295, sign: false }; + FP8x23 { mag: 4294967295, sign: false } } fn NEG_INF() -> FP8x23 { - return FP8x23 { mag: 4294967295, sign: true }; + FP8x23 { mag: 4294967295, sign: true } } fn is_inf(self: FP8x23) -> bool { @@ -219,11 +215,10 @@ impl FP8x23Impl of FixedTrait { } fn erf(self: FP8x23) -> FP8x23 { - return erf::erf(self); + erf::erf(self) } } - impl FP8x23Print of PrintTrait { fn print(self: FP8x23) { self.sign.print(); @@ -237,9 +232,9 @@ impl FP8x23IntoFelt252 of Into { let mag_felt = self.mag.into(); if self.sign { - return mag_felt * -1; + mag_felt * -1 } else { - return mag_felt * 1; + mag_felt * 1 } } } @@ -247,10 +242,10 @@ impl FP8x23IntoFelt252 of Into { impl FP8x23TryIntoU128 of TryInto { fn try_into(self: FP8x23) -> Option { if self.sign { - return Option::None(()); + Option::None(()) } else { // Unscale the magnitude and round down - return Option::Some((self.mag / ONE).into()); + Option::Some((self.mag / ONE).into()) } } } @@ -258,10 +253,10 @@ impl FP8x23TryIntoU128 of TryInto { impl FP8x23TryIntoU64 of TryInto { fn try_into(self: FP8x23) -> Option { if self.sign { - return Option::None(()); + Option::None(()) } else { // Unscale the magnitude and round down - return Option::Some((self.mag / ONE).into()); + Option::Some((self.mag / ONE).into()) } } } @@ -269,10 +264,10 @@ impl FP8x23TryIntoU64 of TryInto { impl FP8x23TryIntoU32 of TryInto { fn try_into(self: FP8x23) -> Option { if self.sign { - return Option::None(()); + Option::None(()) } else { // Unscale the magnitude and round down - return Option::Some(self.mag / ONE); + Option::Some(self.mag / ONE) } } } @@ -283,7 +278,7 @@ impl FP8x23TryIntoU16 of TryInto { Option::None(()) } else { // Unscale the magnitude and round down - return (self.mag / ONE).try_into(); + (self.mag / ONE).try_into() } } } @@ -294,7 +289,7 @@ impl FP8x23TryIntoU8 of TryInto { Option::None(()) } else { // Unscale the magnitude and round down - return (self.mag / ONE).try_into(); + (self.mag / ONE).try_into() } } } @@ -314,18 +309,18 @@ impl FP8x23TryIntoI8 of TryInto { impl FP8x23PartialEq of PartialEq { #[inline(always)] fn eq(lhs: @FP8x23, rhs: @FP8x23) -> bool { - return core_math::eq(lhs, rhs); + core_math::eq(lhs, rhs) } #[inline(always)] fn ne(lhs: @FP8x23, rhs: @FP8x23) -> bool { - return core_math::ne(lhs, rhs); + core_math::ne(lhs, rhs) } } impl FP8x23Add of Add { fn add(lhs: FP8x23, rhs: FP8x23) -> FP8x23 { - return core_math::add(lhs, rhs); + core_math::add(lhs, rhs) } } @@ -338,7 +333,7 @@ impl FP8x23AddEq of AddEq { impl FP8x23Sub of Sub { fn sub(lhs: FP8x23, rhs: FP8x23) -> FP8x23 { - return core_math::sub(lhs, rhs); + core_math::sub(lhs, rhs) } } @@ -351,7 +346,7 @@ impl FP8x23SubEq of SubEq { impl FP8x23Mul of Mul { fn mul(lhs: FP8x23, rhs: FP8x23) -> FP8x23 { - return core_math::mul(lhs, rhs); + core_math::mul(lhs, rhs) } } @@ -364,7 +359,7 @@ impl FP8x23MulEq of MulEq { impl FP8x23Div of Div { fn div(lhs: FP8x23, rhs: FP8x23) -> FP8x23 { - return core_math::div(lhs, rhs); + core_math::div(lhs, rhs) } } @@ -378,48 +373,49 @@ impl FP8x23DivEq of DivEq { impl FP8x23PartialOrd of PartialOrd { #[inline(always)] fn ge(lhs: FP8x23, rhs: FP8x23) -> bool { - return core_math::ge(lhs, rhs); + core_math::ge(lhs, rhs) } #[inline(always)] fn gt(lhs: FP8x23, rhs: FP8x23) -> bool { - return core_math::gt(lhs, rhs); + core_math::gt(lhs, rhs) } #[inline(always)] fn le(lhs: FP8x23, rhs: FP8x23) -> bool { - return core_math::le(lhs, rhs); + core_math::le(lhs, rhs) } #[inline(always)] fn lt(lhs: FP8x23, rhs: FP8x23) -> bool { - return core_math::lt(lhs, rhs); + core_math::lt(lhs, rhs) } } impl FP8x23Neg of Neg { #[inline(always)] fn neg(a: FP8x23) -> FP8x23 { - return core_math::neg(a); + core_math::neg(a) } } impl FP8x23Rem of Rem { #[inline(always)] fn rem(lhs: FP8x23, rhs: FP8x23) -> FP8x23 { - return core_math::rem(lhs, rhs); + core_math::rem(lhs, rhs) } } /// INTERNAL - fn _i32_into_fp(x: FP8x23) -> i32 { // i32 { mag: x.mag / ONE, sign: x.sign } let number_felt: felt252 = (x.mag / ONE).into(); let number_i32: i32 = number_felt.try_into().unwrap(); + if x.sign { return number_i32 * -1_i32; } + number_i32 } @@ -430,9 +426,11 @@ fn _i8_try_from_fp(x: FP8x23) -> Option { Option::Some => { let number_felt: felt252 = unscaled_mag.unwrap().into(); let mut number_i8: i8 = number_felt.try_into().unwrap(); + if x.sign { return Option::Some(number_i8 * -1_i8); } + Option::Some(number_i8) }, Option::None => Option::None(()) diff --git a/src/numbers/fixed_point/implementations/fp8x23/helpers.cairo b/src/numbers/fixed_point/implementations/fp8x23/helpers.cairo index 58e0ca344..9b6136501 100644 --- a/src/numbers/fixed_point/implementations/fp8x23/helpers.cairo +++ b/src/numbers/fixed_point/implementations/fp8x23/helpers.cairo @@ -1,5 +1,4 @@ use core::debug::PrintTrait; -use core::traits::Into; use orion::numbers::fixed_point::implementations::fp8x23::core::{ HALF, ONE, TWO, FP8x23, FP8x23Sub, FP8x23Div, FixedTrait, FP8x23Print diff --git a/src/numbers/fixed_point/implementations/fp8x23/math/comp.cairo b/src/numbers/fixed_point/implementations/fp8x23/math/comp.cairo index 366f385e8..88bc12d9e 100644 --- a/src/numbers/fixed_point/implementations/fp8x23/math/comp.cairo +++ b/src/numbers/fixed_point/implementations/fp8x23/math/comp.cairo @@ -3,65 +3,67 @@ use orion::numbers::fixed_point::implementations::fp8x23::core::{ }; fn max(a: FP8x23, b: FP8x23) -> FP8x23 { - if (a >= b) { - return a; + if a >= b { + a } else { - return b; + b } } fn min(a: FP8x23, b: FP8x23) -> FP8x23 { - if (a <= b) { - return a; + if a <= b { + a } else { - return b; + b } } fn xor(a: FP8x23, b: FP8x23) -> bool { if (a == FixedTrait::new(0, false) || b == FixedTrait::new(0, false)) && (a != b) { - return true; + true } else { - return false; + false } } fn or(a: FP8x23, b: FP8x23) -> bool { let zero = FixedTrait::new(0, false); + if a == zero && b == zero { - return false; + false } else { - return true; + true } } fn and(a: FP8x23, b: FP8x23) -> bool { let zero = FixedTrait::new(0, false); + if a == zero || b == zero { - return false; + false } else { - return true; + true } } fn where(a: FP8x23, b: FP8x23, c: FP8x23) -> FP8x23 { if a == FixedTrait::new(0, false) { - return c; + c } else { - return b; + b } } fn bitwise_and(a: FP8x23, b: FP8x23) -> FP8x23 { - return FixedTrait::new(a.mag & b.mag, a.sign & b.sign); + FixedTrait::new(a.mag & b.mag, a.sign & b.sign) } fn bitwise_xor(a: FP8x23, b: FP8x23) -> FP8x23 { - return FixedTrait::new(a.mag ^ b.mag, a.sign ^ b.sign); + FixedTrait::new(a.mag ^ b.mag, a.sign ^ b.sign) } fn bitwise_or(a: FP8x23, b: FP8x23) -> FP8x23 { - return FixedTrait::new(a.mag | b.mag, a.sign | b.sign); + FixedTrait::new(a.mag | b.mag, a.sign | b.sign) } // Tests -------------------------------------------------------------------------------------------------------------- @@ -107,6 +109,7 @@ mod tests { assert(min(c, b) == c, 'min(c, b)'); assert(min(c, c) == c, 'min(c, c)'); } + #[test] fn test_bitwise_and() { let a = FixedTrait::new(28835840, false); // 3.4375 @@ -124,6 +127,7 @@ mod tests { assert(bitwise_xor(a, b) == c, 'bitwise_xor(a,b)') } + #[test] fn test_bitwise_or() { let a = FixedTrait::new(28835840, false); // 3.4375 let b = FixedTrait::new(1639448576, true); // -60.5625 diff --git a/src/numbers/fixed_point/implementations/fp8x23/math/core.cairo b/src/numbers/fixed_point/implementations/fp8x23/math/core.cairo index 0e0b3fa48..c347d9817 100644 --- a/src/numbers/fixed_point/implementations/fp8x23/math/core.cairo +++ b/src/numbers/fixed_point/implementations/fp8x23/math/core.cairo @@ -1,9 +1,4 @@ -use core::debug::PrintTrait; -use core::option::OptionTrait; -use core::result::{ResultTrait, ResultTraitImpl}; -use core::traits::{Into, TryInto}; use core::integer; -use core::integer::{u32_safe_divmod, u32_as_non_zero, u32_wide_mul}; use orion::numbers::fixed_point::implementations::fp8x23::core::{ HALF, ONE, MAX, FP8x23, FP8x23Add, FP8x23Impl, FP8x23AddEq, FP8x23Sub, FP8x23Mul, FP8x23MulEq, @@ -13,9 +8,8 @@ use orion::numbers::fixed_point::implementations::fp8x23::core::{ use orion::numbers::fixed_point::implementations::fp8x23::math::lut; // PUBLIC - fn abs(a: FP8x23) -> FP8x23 { - return FixedTrait::new(a.mag, false); + FixedTrait::new(a.mag, false) } fn add(a: FP8x23, b: FP8x23) -> FP8x23 { @@ -28,23 +22,23 @@ fn add(a: FP8x23, b: FP8x23) -> FP8x23 { } if (a.mag > b.mag) { - return FixedTrait::new(a.mag - b.mag, a.sign); + FixedTrait::new(a.mag - b.mag, a.sign) } else { - return FixedTrait::new(b.mag - a.mag, b.sign); + FixedTrait::new(b.mag - a.mag, b.sign) } } fn ceil(a: FP8x23) -> FP8x23 { - let (div, rem) = u32_safe_divmod(a.mag, u32_as_non_zero(ONE)); + let (div, rem) = integer::u32_safe_divmod(a.mag, integer::u32_as_non_zero(ONE)); if rem == 0 { - return a; + a } else if !a.sign { - return FixedTrait::new_unscaled(div + 1, false); + FixedTrait::new_unscaled(div + 1, false) } else if div == 0 { - return FixedTrait::new_unscaled(0, false); + FixedTrait::new_unscaled(0, false) } else { - return FixedTrait::new_unscaled(div, true); + FixedTrait::new_unscaled(div, true) } } @@ -53,16 +47,16 @@ fn div(a: FP8x23, b: FP8x23) -> FP8x23 { let res_u64 = a_u64 / b.mag.into(); // Re-apply sign - return FixedTrait::new(res_u64.try_into().unwrap(), a.sign ^ b.sign); + FixedTrait::new(res_u64.try_into().unwrap(), a.sign ^ b.sign) } fn eq(a: @FP8x23, b: @FP8x23) -> bool { - return (*a.mag == *b.mag) && (*a.sign == *b.sign); + (*a.mag == *b.mag) && (*a.sign == *b.sign) } // Calculates the natural exponent of x: e^x fn exp(a: FP8x23) -> FP8x23 { - return exp2(FixedTrait::new(12102203, false) * a); // log2(e) * 2^23 β‰ˆ 12102203 + exp2(FixedTrait::new(12102203, false) * a) // log2(e) * 2^23 β‰ˆ 12102203 } // Calculates the binary exponent of x: 2^x @@ -71,7 +65,7 @@ fn exp2(a: FP8x23) -> FP8x23 { return FixedTrait::ONE(); } - let (int_part, frac_part) = integer::u32_safe_divmod(a.mag, u32_as_non_zero(ONE)); + let (int_part, frac_part) = integer::u32_safe_divmod(a.mag, integer::u32_as_non_zero(ONE)); let int_res = FixedTrait::new_unscaled(lut::exp2(int_part), false); let mut res_u = int_res; @@ -88,63 +82,63 @@ fn exp2(a: FP8x23) -> FP8x23 { res_u = res_u * (r1 + FixedTrait::ONE()); } - if (a.sign == true) { - return FixedTrait::ONE() / res_u; + if a.sign { + FixedTrait::ONE() / res_u } else { - return res_u; + res_u } } fn exp2_int(exp: u32) -> FP8x23 { - return FixedTrait::new_unscaled(lut::exp2(exp), false); + FixedTrait::new_unscaled(lut::exp2(exp), false) } fn floor(a: FP8x23) -> FP8x23 { - let (div, rem) = integer::u32_safe_divmod(a.mag, u32_as_non_zero(ONE)); + let (div, rem) = integer::u32_safe_divmod(a.mag, integer::u32_as_non_zero(ONE)); if rem == 0 { - return a; + a } else if !a.sign { - return FixedTrait::new_unscaled(div, false); + FixedTrait::new_unscaled(div, false) } else { - return FixedTrait::new_unscaled(div + 1, true); + FixedTrait::new_unscaled(div + 1, true) } } fn ge(a: FP8x23, b: FP8x23) -> bool { if a.sign != b.sign { - return !a.sign; + !a.sign } else { - return (a.mag == b.mag) || ((a.mag > b.mag) ^ a.sign); + (a.mag == b.mag) || ((a.mag > b.mag) ^ a.sign) } } fn gt(a: FP8x23, b: FP8x23) -> bool { if a.sign != b.sign { - return !a.sign; + !a.sign } else { - return (a.mag != b.mag) && ((a.mag > b.mag) ^ a.sign); + (a.mag != b.mag) && ((a.mag > b.mag) ^ a.sign) } } fn le(a: FP8x23, b: FP8x23) -> bool { if a.sign != b.sign { - return a.sign; + a.sign } else { - return (a.mag == b.mag) || ((a.mag < b.mag) ^ a.sign); + (a.mag == b.mag) || ((a.mag < b.mag) ^ a.sign) } } // Calculates the natural logarithm of x: ln(x) // self must be greater than zero fn ln(a: FP8x23) -> FP8x23 { - return FixedTrait::new(5814540, false) * log2(a); // ln(2) = 0.693... + FixedTrait::new(5814540, false) * log2(a) // ln(2) = 0.693... } // Calculates the binary logarithm of x: log2(x) // self must be greather than zero fn log2(a: FP8x23) -> FP8x23 { - assert(a.sign == false, 'must be positive'); + assert(!a.sign, 'must be positive'); if (a.mag == ONE) { return FixedTrait::ZERO(); @@ -158,7 +152,7 @@ fn log2(a: FP8x23) -> FP8x23 { let (msb, div) = lut::msb(whole); if a.mag == div * ONE { - return FixedTrait::new_unscaled(msb, false); + FixedTrait::new_unscaled(msb, false) } else { let norm = a / FixedTrait::new_unscaled(div, false); let r8 = FixedTrait::new(76243, true) * norm; @@ -169,21 +163,22 @@ fn log2(a: FP8x23) -> FP8x23 { let r3 = (r4 + FixedTrait::new(77896489, false)) * norm; let r2 = (r3 + FixedTrait::new(83945943, true)) * norm; let r1 = (r2 + FixedTrait::new(68407458, false)) * norm; - return r1 + FixedTrait::new(28734280, true) + FixedTrait::new_unscaled(msb, false); + + r1 + FixedTrait::new(28734280, true) + FixedTrait::new_unscaled(msb, false) } } // Calculates the base 10 log of x: log10(x) // self must be greater than zero fn log10(a: FP8x23) -> FP8x23 { - return FixedTrait::new(2525223, false) * log2(a); // log10(2) = 0.301... + FixedTrait::new(2525223, false) * log2(a) // log10(2) = 0.301... } fn lt(a: FP8x23, b: FP8x23) -> bool { if a.sign != b.sign { - return a.sign; + a.sign } else { - return (a.mag != b.mag) && ((a.mag < b.mag) ^ a.sign); + (a.mag != b.mag) && ((a.mag < b.mag) ^ a.sign) } } @@ -191,20 +186,20 @@ fn mul(a: FP8x23, b: FP8x23) -> FP8x23 { let prod_u128 = integer::u32_wide_mul(a.mag, b.mag); // Re-apply sign - return FixedTrait::new((prod_u128 / ONE.into()).try_into().unwrap(), a.sign ^ b.sign); + FixedTrait::new((prod_u128 / ONE.into()).try_into().unwrap(), a.sign ^ b.sign) } fn ne(a: @FP8x23, b: @FP8x23) -> bool { - return (*a.mag != *b.mag) || (*a.sign != *b.sign); + (*a.mag != *b.mag) || (*a.sign != *b.sign) } fn neg(a: FP8x23) -> FP8x23 { if a.mag == 0 { - return a; + a } else if !a.sign { - return FixedTrait::new(a.mag, !a.sign); + FixedTrait::new(a.mag, !a.sign) } else { - return FixedTrait::new(a.mag, false); + FixedTrait::new(a.mag, false) } } @@ -212,7 +207,7 @@ fn neg(a: FP8x23) -> FP8x23 { // self is a FP8x23 point value // b is a FP8x23 point value fn pow(a: FP8x23, b: FP8x23) -> FP8x23 { - let (_, rem) = integer::u32_safe_divmod(b.mag, u32_as_non_zero(ONE)); + let (_, rem) = integer::u32_safe_divmod(b.mag, integer::u32_as_non_zero(ONE)); // use the more performant integer pow when y is an int if (rem == 0) { @@ -220,7 +215,7 @@ fn pow(a: FP8x23, b: FP8x23) -> FP8x23 { } // x^y = exp(y*ln(x)) for x > 0 will error for x < 0 - return exp(b * ln(a)); + exp(b * ln(a)) } // Calclates the value of a^b and checks for overflow before returning @@ -228,7 +223,7 @@ fn pow_int(a: FP8x23, b: u32, sign: bool) -> FP8x23 { let mut x = a; let mut n = b; - if sign == true { + if sign { x = FixedTrait::ONE() / x; } @@ -239,11 +234,7 @@ fn pow_int(a: FP8x23, b: u32, sign: bool) -> FP8x23 { let mut y = FixedTrait::ONE(); let two = integer::u32_as_non_zero(2); - loop { - if n <= 1 { - break; - } - + while n > 1 { let (div, rem) = integer::u32_safe_divmod(n, two); if rem == 1 { @@ -254,34 +245,35 @@ fn pow_int(a: FP8x23, b: u32, sign: bool) -> FP8x23 { n = div; }; - return x * y; + x * y } fn rem(a: FP8x23, b: FP8x23) -> FP8x23 { - return a - floor(a / b) * b; + a - floor(a / b) * b } fn round(a: FP8x23) -> FP8x23 { - let (div, rem) = integer::u32_safe_divmod(a.mag, u32_as_non_zero(ONE)); + let (div, rem) = integer::u32_safe_divmod(a.mag, integer::u32_as_non_zero(ONE)); if (HALF <= rem) { - return FixedTrait::new_unscaled(div + 1, a.sign); + FixedTrait::new_unscaled(div + 1, a.sign) } else { - return FixedTrait::new_unscaled(div, a.sign); + FixedTrait::new_unscaled(div, a.sign) } } // Calculates the square root of a FP8x23 point value // x must be positive fn sqrt(a: FP8x23) -> FP8x23 { - assert(a.sign == false, 'must be positive'); + assert(!(a.sign), 'must be positive'); let root = integer::u64_sqrt(a.mag.into() * ONE.into()); - return FixedTrait::new(root.into(), false); + + FixedTrait::new(root.into(), false) } fn sub(a: FP8x23, b: FP8x23) -> FP8x23 { - return add(a, -b); + add(a, -b) } fn sign(a: FP8x23) -> FP8x23 { @@ -388,7 +380,6 @@ mod tests { assert(sqrt(a).mag == 5 * ONE, 'invalid pos root'); } - #[test] #[available_gas(100000)] fn test_msb() { @@ -472,7 +463,7 @@ mod tests { let a = FixedTrait::new_unscaled(42, false); let b = FixedTrait::new_unscaled(42, false); let c = eq(@a, @b); - assert(c == true, 'invalid result'); + assert(c, 'invalid result'); } #[test] @@ -480,7 +471,7 @@ mod tests { let a = FixedTrait::new_unscaled(42, false); let b = FixedTrait::new_unscaled(42, false); let c = ne(@a, @b); - assert(c == false, 'invalid result'); + assert(!c, 'invalid result'); } #[test] @@ -554,12 +545,12 @@ mod tests { let c = FixedTrait::::new_unscaled(1, true); assert(a <= a, 'a <= a'); - assert(a <= b == false, 'a <= b'); - assert(a <= c == false, 'a <= c'); + assert(!(a <= b), 'a <= b'); + assert(!(a <= c), 'a <= c'); assert(b <= a, 'b <= a'); assert(b <= b, 'b <= b'); - assert(b <= c == false, 'b <= c'); + assert(!(b <= c), 'b <= c'); assert(c <= a, 'c <= a'); assert(c <= b, 'c <= b'); @@ -572,17 +563,17 @@ mod tests { let b = FixedTrait::new_unscaled(0, false); let c = FixedTrait::::new_unscaled(1, true); - assert(a < a == false, 'a < a'); - assert(a < b == false, 'a < b'); - assert(a < c == false, 'a < c'); + assert(!(a < a), 'a < a'); + assert(!(a < b), 'a < b'); + assert(!(a < c), 'a < c'); assert(b < a, 'b < a'); - assert(b < b == false, 'b < b'); - assert(b < c == false, 'b < c'); + assert(!(b < b), 'b < b'); + assert(!(b < c), 'b < c'); assert(c < a, 'c < a'); assert(c < b, 'c < b'); - assert(c < c == false, 'c < c'); + assert(!(c < c), 'c < c'); } #[test] @@ -595,12 +586,12 @@ mod tests { assert(a >= b, 'a >= b'); assert(a >= c, 'a >= c'); - assert(b >= a == false, 'b >= a'); + assert(!(b >= a), 'b >= a'); assert(b >= b, 'b >= b'); assert(b >= c, 'b >= c'); - assert(c >= a == false, 'c >= a'); - assert(c >= b == false, 'c >= b'); + assert(!(c >= a), 'c >= a'); + assert(!(c >= b), 'c >= b'); assert(c >= c, 'c >= c'); } @@ -610,17 +601,17 @@ mod tests { let b = FixedTrait::new_unscaled(0, false); let c = FixedTrait::::new_unscaled(1, true); - assert(a > a == false, 'a > a'); + assert(!(a > a), 'a > a'); assert(a > b, 'a > b'); assert(a > c, 'a > c'); - assert(b > a == false, 'b > a'); - assert(b > b == false, 'b > b'); + assert(!(b > a), 'b > a'); + assert(!(b > b), 'b > b'); assert(b > c, 'b > c'); - assert(c > a == false, 'c > a'); - assert(c > b == false, 'c > b'); - assert(c > c == false, 'c > c'); + assert(!(c > a), 'c > a'); + assert(!(c > b), 'c > b'); + assert(!(c > c), 'c > c'); } #[test] diff --git a/src/numbers/fixed_point/implementations/fp8x23/math/erf.cairo b/src/numbers/fixed_point/implementations/fp8x23/math/erf.cairo index 8121e170b..5e05783a2 100644 --- a/src/numbers/fixed_point/implementations/fp8x23/math/erf.cairo +++ b/src/numbers/fixed_point/implementations/fp8x23/math/erf.cairo @@ -1,4 +1,3 @@ -use core::traits::Into; use orion::numbers::fixed_point::implementations::fp8x23::core::{ONE, FP8x23, FixedTrait}; use orion::numbers::fixed_point::implementations::fp8x23::math::lut::erf_lut; @@ -21,5 +20,6 @@ fn erf(x: FP8x23) -> FP8x23 { } else { erf_value = ONE; } + FP8x23 { mag: erf_value, sign: x.sign } } diff --git a/src/numbers/fixed_point/implementations/fp8x23/math/hyp.cairo b/src/numbers/fixed_point/implementations/fp8x23/math/hyp.cairo index e2059d848..16d427366 100644 --- a/src/numbers/fixed_point/implementations/fp8x23/math/hyp.cairo +++ b/src/numbers/fixed_point/implementations/fp8x23/math/hyp.cairo @@ -1,4 +1,5 @@ use core::debug::PrintTrait; + use orion::numbers::fixed_point::implementations::fp8x23::core::{ HALF, ONE, TWO, FP8x23, FP8x23Impl, FP8x23Add, FP8x23AddEq, FP8x23Sub, FP8x23Mul, FP8x23MulEq, FP8x23TryIntoU128, FP8x23PartialEq, FP8x23PartialOrd, FP8x23SubEq, FP8x23Neg, FP8x23Div, @@ -8,48 +9,51 @@ use orion::numbers::fixed_point::implementations::fp8x23::core::{ // Calculates hyperbolic cosine of a (fixed point) fn cosh(a: FP8x23) -> FP8x23 { let ea = a.exp(); - return (ea + (FixedTrait::ONE() / ea)) / FixedTrait::new(TWO, false); + + (ea + (FixedTrait::ONE() / ea)) / FixedTrait::new(TWO, false) } // Calculates hyperbolic sine of a (fixed point) fn sinh(a: FP8x23) -> FP8x23 { let ea = a.exp(); - return (ea - (FixedTrait::ONE() / ea)) / FixedTrait::new(TWO, false); + + (ea - (FixedTrait::ONE() / ea)) / FixedTrait::new(TWO, false) } // Calculates hyperbolic tangent of a (fixed point) fn tanh(a: FP8x23) -> FP8x23 { let ea = a.exp(); let ea_i = FixedTrait::ONE() / ea; - return (ea - ea_i) / (ea + ea_i); + + (ea - ea_i) / (ea + ea_i) } // Calculates inverse hyperbolic cosine of a (fixed point) fn acosh(a: FP8x23) -> FP8x23 { let root = (a * a - FixedTrait::ONE()).sqrt(); - return (a + root).ln(); + + (a + root).ln() } // Calculates inverse hyperbolic sine of a (fixed point) fn asinh(a: FP8x23) -> FP8x23 { let root = (a * a + FixedTrait::ONE()).sqrt(); - return (a + root).ln(); + + (a + root).ln() } // Calculates inverse hyperbolic tangent of a (fixed point) fn atanh(a: FP8x23) -> FP8x23 { let one = FixedTrait::ONE(); let ln_arg = (one + a) / (one - a); - return ln_arg.ln() / FixedTrait::new(TWO, false); + + ln_arg.ln() / FixedTrait::new(TWO, false) } // Tests -------------------------------------------------------------------------------------------------------------- #[cfg(test)] mod tests { - use core::option::OptionTrait; - use core::traits::Into; - use orion::numbers::fixed_point::implementations::fp8x23::helpers::assert_precise; use super::{FixedTrait, TWO, cosh, ONE, sinh, tanh, acosh, asinh, atanh, HALF}; diff --git a/src/numbers/fixed_point/implementations/fp8x23/math/lut.cairo b/src/numbers/fixed_point/implementations/fp8x23/math/lut.cairo index fdb9dfea3..27136c99a 100644 --- a/src/numbers/fixed_point/implementations/fp8x23/math/lut.cairo +++ b/src/numbers/fixed_point/implementations/fp8x23/math/lut.cairo @@ -29,7 +29,7 @@ fn msb(whole: u32) -> (u32, u32) { } } - return (8, 256); + (8, 256) } fn exp2(exp: u32) -> u32 { @@ -106,7 +106,7 @@ fn exp2(exp: u32) -> u32 { } } - return 8388608; + 8388608 } fn sin(a: u32) -> (u32, u32, u32) { @@ -923,7 +923,7 @@ fn sin(a: u32) -> (u32, u32, u32) { } } - return (13125323, 8388450, 8388608); + (13125323, 8388450, 8388608) } fn atan(a: u32) -> (u32, u32, u32) { @@ -1227,7 +1227,7 @@ fn atan(a: u32) -> (u32, u32, u32) { return (5754585, 5043802, 5083601); } - return (5813305, 5083601, 5123141); + (5813305, 5083601, 5123141) } fn erf_lut(x: u32) -> u32 { @@ -1919,5 +1919,6 @@ fn erf_lut(x: u32) -> u32 { return 8388595; } } - return ONE; + + ONE } diff --git a/src/numbers/fixed_point/implementations/fp8x23/math/trig.cairo b/src/numbers/fixed_point/implementations/fp8x23/math/trig.cairo index 11aec54ad..2a0db31e2 100644 --- a/src/numbers/fixed_point/implementations/fp8x23/math/trig.cairo +++ b/src/numbers/fixed_point/implementations/fp8x23/math/trig.cairo @@ -1,6 +1,4 @@ -use core::debug::PrintTrait; -use core::integer::{u32_safe_divmod, u32_as_non_zero}; -use core::option::OptionTrait; +use core::integer; use orion::numbers::fixed_point::implementations::fp8x23::math::lut; use orion::numbers::fixed_point::implementations::fp8x23::core::{ @@ -9,7 +7,6 @@ use orion::numbers::fixed_point::implementations::fp8x23::core::{ }; // CONSTANTS - const TWO_PI: u32 = 52707178; const PI: u32 = 26353589; const HALF_PI: u32 = 13176795; @@ -23,9 +20,9 @@ fn acos(a: FP8x23) -> FP8x23 { let asin_res = asin(asin_arg); if (a.sign) { - return FixedTrait::new(PI, false) - asin_res; + FixedTrait::new(PI, false) - asin_res } else { - return asin_res; + asin_res } } @@ -34,9 +31,9 @@ fn acos_fast(a: FP8x23) -> FP8x23 { let asin_res = asin_fast(asin_arg); if (a.sign) { - return FixedTrait::new(PI, false) - asin_res; + FixedTrait::new(PI, false) - asin_res } else { - return asin_res; + asin_res } } @@ -48,7 +45,8 @@ fn asin(a: FP8x23) -> FP8x23 { } let div = (FixedTrait::ONE() - a * a).sqrt(); // will fail if a > 1 - return atan(a / div); + + atan(a / div) } fn asin_fast(a: FP8x23) -> FP8x23 { @@ -57,7 +55,8 @@ fn asin_fast(a: FP8x23) -> FP8x23 { } let div = (FixedTrait::ONE() - a * a).sqrt(); // will fail if a > 1 - return atan_fast(a / div); + + atan_fast(a / div) } // Calculates arctan(a) (fixed point) @@ -100,7 +99,7 @@ fn atan(a: FP8x23) -> FP8x23 { res = res - FixedTrait::new(HALF_PI, false); } - return FixedTrait::new(res.mag, a.sign); + FixedTrait::new(res.mag, a.sign) } fn atan_fast(a: FP8x23) -> FP8x23 { @@ -134,31 +133,32 @@ fn atan_fast(a: FP8x23) -> FP8x23 { res = res - FixedTrait::::new(HALF_PI, false); } - return FixedTrait::new(res.mag, a.sign); + FixedTrait::new(res.mag, a.sign) } // Calculates cos(a) with a in radians (fixed point) fn cos(a: FP8x23) -> FP8x23 { - return sin(FixedTrait::new(HALF_PI, false) - a); + sin(FixedTrait::new(HALF_PI, false) - a) } fn cos_fast(a: FP8x23) -> FP8x23 { - return sin_fast(FixedTrait::new(HALF_PI, false) - a); + sin_fast(FixedTrait::new(HALF_PI, false) - a) } fn sin(a: FP8x23) -> FP8x23 { let a1 = a.mag % TWO_PI; - let (whole_rem, partial_rem) = u32_safe_divmod(a1, u32_as_non_zero(PI)); + let (whole_rem, partial_rem) = integer::u32_safe_divmod(a1, integer::u32_as_non_zero(PI)); let a2 = FixedTrait::new(partial_rem, false); let partial_sign = whole_rem == 1; let loop_res = a2 * _sin_loop(a2, 7, FixedTrait::ONE()); - return FixedTrait::new(loop_res.mag, a.sign ^ partial_sign && loop_res.mag != 0); + + FixedTrait::new(loop_res.mag, a.sign ^ partial_sign && loop_res.mag != 0) } fn sin_fast(a: FP8x23) -> FP8x23 { let a1 = a.mag % TWO_PI; - let (whole_rem, mut partial_rem) = u32_safe_divmod(a1, u32_as_non_zero(PI)); + let (whole_rem, mut partial_rem) = integer::u32_safe_divmod(a1, integer::u32_as_non_zero(PI)); let partial_sign = whole_rem == 1; if partial_rem >= HALF_PI { @@ -170,7 +170,7 @@ fn sin_fast(a: FP8x23) -> FP8x23 { let res = partial_step * (FixedTrait::new(high, false) - FixedTrait::new(low, false)) + FixedTrait::::new(low, false); - return FixedTrait::new(res.mag, a.sign ^ partial_sign && res.mag != 0); + FixedTrait::new(res.mag, a.sign ^ partial_sign && res.mag != 0) } // Calculates tan(a) with a in radians (fixed point) @@ -178,14 +178,16 @@ fn tan(a: FP8x23) -> FP8x23 { let sinx = sin(a); let cosx = cos(a); assert(cosx.mag != 0, 'tan undefined'); - return sinx / cosx; + + sinx / cosx } fn tan_fast(a: FP8x23) -> FP8x23 { let sinx = sin_fast(a); let cosx = cos_fast(a); assert(cosx.mag != 0, 'tan undefined'); - return sinx / cosx; + + sinx / cosx } // Helper function to calculate Taylor series for sin @@ -198,15 +200,13 @@ fn _sin_loop(a: FP8x23, i: u32, acc: FP8x23) -> FP8x23 { return new_acc; } - return _sin_loop(a, i - 1, new_acc); + _sin_loop(a, i - 1, new_acc) } // Tests -------------------------------------------------------------------------------------------------------------- #[cfg(test)] mod tests { - use core::traits::Into; - use orion::numbers::fixed_point::implementations::fp8x23::helpers::{ assert_precise, assert_relative }; diff --git a/src/numbers/fixed_point/implementations/fp8x23wide/core.cairo b/src/numbers/fixed_point/implementations/fp8x23wide/core.cairo index 1fac5453d..4804e6fda 100644 --- a/src/numbers/fixed_point/implementations/fp8x23wide/core.cairo +++ b/src/numbers/fixed_point/implementations/fp8x23wide/core.cairo @@ -1,9 +1,5 @@ use core::debug::PrintTrait; -use core::option::OptionTrait; -use core::result::{ResultTrait, ResultTraitImpl}; -use core::traits::{TryInto, Into}; - use orion::numbers::{fixed_point::core::{FixedTrait}, FP8x23}; use orion::numbers::fixed_point::implementations::fp8x23wide::math::{ core as core_math, trig, hyp, erf @@ -18,178 +14,176 @@ struct FP8x23W { } // CONSTANTS - const TWO: u64 = 16777216; // 2 ** 24 const ONE: u64 = 8388608; // 2 ** 23 const HALF: u64 = 4194304; // 2 ** 22 const MAX: u64 = 2147483648; // 2 ** 31 - impl FP8x23WImpl of FixedTrait { fn ZERO() -> FP8x23W { - return FP8x23W { mag: 0, sign: false }; + FP8x23W { mag: 0, sign: false } } fn HALF() -> FP8x23W { - return FP8x23W { mag: HALF, sign: false }; + FP8x23W { mag: HALF, sign: false } } fn ONE() -> FP8x23W { - return FP8x23W { mag: ONE, sign: false }; + FP8x23W { mag: ONE, sign: false } } fn MAX() -> FP8x23W { - return FP8x23W { mag: MAX, sign: false }; + FP8x23W { mag: MAX, sign: false } } fn new(mag: u64, sign: bool) -> FP8x23W { - return FP8x23W { mag: mag, sign: sign }; + FP8x23W { mag: mag, sign: sign } } fn new_unscaled(mag: u64, sign: bool) -> FP8x23W { - return FP8x23W { mag: mag * ONE, sign: sign }; + FP8x23W { mag: mag * ONE, sign: sign } } fn from_felt(val: felt252) -> FP8x23W { let mag = core::integer::u64_try_from_felt252(utils::felt_abs(val)).unwrap(); - return FixedTrait::new(mag, utils::felt_sign(val)); + FixedTrait::new(mag, utils::felt_sign(val)) } fn abs(self: FP8x23W) -> FP8x23W { - return core_math::abs(self); + core_math::abs(self) } fn acos(self: FP8x23W) -> FP8x23W { - return trig::acos_fast(self); + trig::acos_fast(self) } fn acos_fast(self: FP8x23W) -> FP8x23W { - return trig::acos_fast(self); + trig::acos_fast(self) } fn acosh(self: FP8x23W) -> FP8x23W { - return hyp::acosh(self); + hyp::acosh(self) } fn asin(self: FP8x23W) -> FP8x23W { - return trig::asin_fast(self); + trig::asin_fast(self) } fn asin_fast(self: FP8x23W) -> FP8x23W { - return trig::asin_fast(self); + trig::asin_fast(self) } fn asinh(self: FP8x23W) -> FP8x23W { - return hyp::asinh(self); + hyp::asinh(self) } fn atan(self: FP8x23W) -> FP8x23W { - return trig::atan_fast(self); + trig::atan_fast(self) } fn atan_fast(self: FP8x23W) -> FP8x23W { - return trig::atan_fast(self); + trig::atan_fast(self) } fn atanh(self: FP8x23W) -> FP8x23W { - return hyp::atanh(self); + hyp::atanh(self) } fn ceil(self: FP8x23W) -> FP8x23W { - return core_math::ceil(self); + core_math::ceil(self) } fn cos(self: FP8x23W) -> FP8x23W { - return trig::cos_fast(self); + trig::cos_fast(self) } fn cos_fast(self: FP8x23W) -> FP8x23W { - return trig::cos_fast(self); + trig::cos_fast(self) } fn cosh(self: FP8x23W) -> FP8x23W { - return hyp::cosh(self); + hyp::cosh(self) } fn floor(self: FP8x23W) -> FP8x23W { - return core_math::floor(self); + core_math::floor(self) } // Calculates the natural exponent of x: e^x fn exp(self: FP8x23W) -> FP8x23W { - return core_math::exp(self); + core_math::exp(self) } // Calculates the binary exponent of x: 2^x fn exp2(self: FP8x23W) -> FP8x23W { - return core_math::exp2(self); + core_math::exp2(self) } // Calculates the natural logarithm of x: ln(x) // self must be greater than zero fn ln(self: FP8x23W) -> FP8x23W { - return core_math::ln(self); + core_math::ln(self) } // Calculates the binary logarithm of x: log2(x) // self must be greather than zero fn log2(self: FP8x23W) -> FP8x23W { - return core_math::log2(self); + core_math::log2(self) } // Calculates the base 10 log of x: log10(x) // self must be greater than zero fn log10(self: FP8x23W) -> FP8x23W { - return core_math::log10(self); + core_math::log10(self) } // Calclates the value of x^y and checks for overflow before returning // self is a fixed point value // b is a fixed point value fn pow(self: FP8x23W, b: FP8x23W) -> FP8x23W { - return core_math::pow(self, b); + core_math::pow(self, b) } fn round(self: FP8x23W) -> FP8x23W { - return core_math::round(self); + core_math::round(self) } fn sin(self: FP8x23W) -> FP8x23W { - return trig::sin_fast(self); + trig::sin_fast(self) } fn sin_fast(self: FP8x23W) -> FP8x23W { - return trig::sin_fast(self); + trig::sin_fast(self) } fn sinh(self: FP8x23W) -> FP8x23W { - return hyp::sinh(self); + hyp::sinh(self) } // Calculates the square root of a fixed point value // x must be positive fn sqrt(self: FP8x23W) -> FP8x23W { - return core_math::sqrt(self); + core_math::sqrt(self) } fn tan(self: FP8x23W) -> FP8x23W { - return trig::tan_fast(self); + trig::tan_fast(self) } fn tan_fast(self: FP8x23W) -> FP8x23W { - return trig::tan_fast(self); + trig::tan_fast(self) } fn tanh(self: FP8x23W) -> FP8x23W { - return hyp::tanh(self); + hyp::tanh(self) } fn sign(self: FP8x23W) -> FP8x23W { - return core_math::sign(self); + core_math::sign(self) } fn NaN() -> FP8x23W { - return FP8x23W { mag: 0, sign: true }; + FP8x23W { mag: 0, sign: true } } fn is_nan(self: FP8x23W) -> bool { @@ -197,15 +191,15 @@ impl FP8x23WImpl of FixedTrait { } fn INF() -> FP8x23W { - return FP8x23W { mag: 4294967295, sign: false }; + FP8x23W { mag: 4294967295, sign: false } } fn POS_INF() -> FP8x23W { - return FP8x23W { mag: 4294967295, sign: false }; + FP8x23W { mag: 4294967295, sign: false } } fn NEG_INF() -> FP8x23W { - return FP8x23W { mag: 4294967295, sign: true }; + FP8x23W { mag: 4294967295, sign: true } } fn is_inf(self: FP8x23W) -> bool { @@ -221,7 +215,7 @@ impl FP8x23WImpl of FixedTrait { } fn erf(self: FP8x23W) -> FP8x23W { - return erf::erf(self); + erf::erf(self) } } @@ -239,9 +233,9 @@ impl FP8x23WIntoFelt252 of Into { let mag_felt = self.mag.into(); if self.sign { - return mag_felt * -1; + mag_felt * -1 } else { - return mag_felt * 1; + mag_felt * 1 } } } @@ -264,10 +258,10 @@ impl FP8x23WTryIntoFP8x23 of TryInto { impl FP8x23WTryIntoU128 of TryInto { fn try_into(self: FP8x23W) -> Option { if self.sign { - return Option::None(()); + Option::None(()) } else { // Unscale the magnitude and round down - return Option::Some((self.mag / ONE).into()); + Option::Some((self.mag / ONE).into()) } } } @@ -275,10 +269,10 @@ impl FP8x23WTryIntoU128 of TryInto { impl FP8x23WTryIntoU64 of TryInto { fn try_into(self: FP8x23W) -> Option { if self.sign { - return Option::None(()); + Option::None(()) } else { // Unscale the magnitude and round down - return Option::Some((self.mag / ONE).into()); + Option::Some((self.mag / ONE).into()) } } } @@ -289,7 +283,7 @@ impl FP8x23WTryIntoU32 of TryInto { Option::None(()) } else { // Unscale the magnitude and round down - return (self.mag / ONE).try_into(); + (self.mag / ONE).try_into() } } } @@ -300,7 +294,7 @@ impl FP8x23WTryIntoU16 of TryInto { Option::None(()) } else { // Unscale the magnitude and round down - return (self.mag / ONE).try_into(); + (self.mag / ONE).try_into() } } } @@ -311,7 +305,7 @@ impl FP8x23WTryIntoU8 of TryInto { Option::None(()) } else { // Unscale the magnitude and round down - return (self.mag / ONE).try_into(); + (self.mag / ONE).try_into() } } } @@ -331,18 +325,18 @@ impl FP8x23WTryIntoI8 of TryInto { impl FP8x23WPartialEq of PartialEq { #[inline(always)] fn eq(lhs: @FP8x23W, rhs: @FP8x23W) -> bool { - return core_math::eq(lhs, rhs); + core_math::eq(lhs, rhs) } #[inline(always)] fn ne(lhs: @FP8x23W, rhs: @FP8x23W) -> bool { - return core_math::ne(lhs, rhs); + core_math::ne(lhs, rhs) } } impl FP8x23WAdd of Add { fn add(lhs: FP8x23W, rhs: FP8x23W) -> FP8x23W { - return core_math::add(lhs, rhs); + core_math::add(lhs, rhs) } } @@ -355,7 +349,7 @@ impl FP8x23WAddEq of AddEq { impl FP8x23WSub of Sub { fn sub(lhs: FP8x23W, rhs: FP8x23W) -> FP8x23W { - return core_math::sub(lhs, rhs); + core_math::sub(lhs, rhs) } } @@ -368,7 +362,7 @@ impl FP8x23WSubEq of SubEq { impl FP8x23WMul of Mul { fn mul(lhs: FP8x23W, rhs: FP8x23W) -> FP8x23W { - return core_math::mul(lhs, rhs); + core_math::mul(lhs, rhs) } } @@ -381,7 +375,7 @@ impl FP8x23WMulEq of MulEq { impl FP8x23WDiv of Div { fn div(lhs: FP8x23W, rhs: FP8x23W) -> FP8x23W { - return core_math::div(lhs, rhs); + core_math::div(lhs, rhs) } } @@ -395,36 +389,36 @@ impl FP8x23WDivEq of DivEq { impl FP8x23WPartialOrd of PartialOrd { #[inline(always)] fn ge(lhs: FP8x23W, rhs: FP8x23W) -> bool { - return core_math::ge(lhs, rhs); + core_math::ge(lhs, rhs) } #[inline(always)] fn gt(lhs: FP8x23W, rhs: FP8x23W) -> bool { - return core_math::gt(lhs, rhs); + core_math::gt(lhs, rhs) } #[inline(always)] fn le(lhs: FP8x23W, rhs: FP8x23W) -> bool { - return core_math::le(lhs, rhs); + core_math::le(lhs, rhs) } #[inline(always)] fn lt(lhs: FP8x23W, rhs: FP8x23W) -> bool { - return core_math::lt(lhs, rhs); + core_math::lt(lhs, rhs) } } impl FP8x23WNeg of Neg { #[inline(always)] fn neg(a: FP8x23W) -> FP8x23W { - return core_math::neg(a); + core_math::neg(a) } } impl FP8x23WRem of Rem { #[inline(always)] fn rem(lhs: FP8x23W, rhs: FP8x23W) -> FP8x23W { - return core_math::rem(lhs, rhs); + core_math::rem(lhs, rhs) } } @@ -433,9 +427,11 @@ impl FP8x23WRem of Rem { fn _i32_into_fp(x: FP8x23W) -> i32 { let number_felt: felt252 = (x.mag / ONE).into(); let number_i32: i32 = number_felt.try_into().unwrap(); + if x.sign { return number_i32 * -1_i32; } + number_i32 } @@ -446,9 +442,11 @@ fn _i8_try_from_fp(x: FP8x23W) -> Option { Option::Some => { let number_felt: felt252 = unscaled_mag.unwrap().into(); let mut number_i8: i8 = number_felt.try_into().unwrap(); + if x.sign { return Option::Some(number_i8 * -1_i8); } + Option::Some(number_i8) }, Option::None => Option::None(()) diff --git a/src/numbers/fixed_point/implementations/fp8x23wide/helpers.cairo b/src/numbers/fixed_point/implementations/fp8x23wide/helpers.cairo index b34475914..2ea2c1aff 100644 --- a/src/numbers/fixed_point/implementations/fp8x23wide/helpers.cairo +++ b/src/numbers/fixed_point/implementations/fp8x23wide/helpers.cairo @@ -1,5 +1,4 @@ use core::debug::PrintTrait; -use core::traits::Into; use orion::numbers::fixed_point::implementations::fp8x23wide::core::{ HALF, ONE, TWO, FP8x23W, FP8x23WSub, FP8x23WDiv, FixedTrait, FP8x23WPrint diff --git a/src/numbers/fixed_point/implementations/fp8x23wide/math/comp.cairo b/src/numbers/fixed_point/implementations/fp8x23wide/math/comp.cairo index b2dad2e6d..6b9ea2ef8 100644 --- a/src/numbers/fixed_point/implementations/fp8x23wide/math/comp.cairo +++ b/src/numbers/fixed_point/implementations/fp8x23wide/math/comp.cairo @@ -3,65 +3,65 @@ use orion::numbers::fixed_point::implementations::fp8x23wide::core::{ }; fn max(a: FP8x23W, b: FP8x23W) -> FP8x23W { - if (a >= b) { - return a; + if a >= b { + a } else { - return b; + b } } fn min(a: FP8x23W, b: FP8x23W) -> FP8x23W { - if (a <= b) { - return a; + if a <= b { + a } else { - return b; + b } } fn xor(a: FP8x23W, b: FP8x23W) -> bool { if (a == FixedTrait::new(0, false) || b == FixedTrait::new(0, false)) && (a != b) { - return true; + true } else { - return false; + false } } fn or(a: FP8x23W, b: FP8x23W) -> bool { let zero = FixedTrait::new(0, false); if a == zero && b == zero { - return false; + false } else { - return true; + true } } fn and(a: FP8x23W, b: FP8x23W) -> bool { let zero = FixedTrait::new(0, false); if a == zero || b == zero { - return false; + false } else { - return true; + true } } fn where(a: FP8x23W, b: FP8x23W, c: FP8x23W) -> FP8x23W { if a == FixedTrait::new(0, false) { - return c; + c } else { - return b; + b } } fn bitwise_and(a: FP8x23W, b: FP8x23W) -> FP8x23W { - return FixedTrait::new(a.mag & b.mag, a.sign & b.sign); + FixedTrait::new(a.mag & b.mag, a.sign & b.sign) } fn bitwise_xor(a: FP8x23W, b: FP8x23W) -> FP8x23W { - return FixedTrait::new(a.mag ^ b.mag, a.sign ^ b.sign); + FixedTrait::new(a.mag ^ b.mag, a.sign ^ b.sign) } fn bitwise_or(a: FP8x23W, b: FP8x23W) -> FP8x23W { - return FixedTrait::new(a.mag | b.mag, a.sign | b.sign); + FixedTrait::new(a.mag | b.mag, a.sign | b.sign) } // Tests -------------------------------------------------------------------------------------------------------------- @@ -70,7 +70,6 @@ fn bitwise_or(a: FP8x23W, b: FP8x23W) -> FP8x23W { mod tests { use super::{FixedTrait, max, min, bitwise_and, bitwise_xor, bitwise_or}; - #[test] fn test_max() { let a = FixedTrait::new_unscaled(1, false); @@ -126,6 +125,7 @@ mod tests { assert(bitwise_xor(a, b) == c, 'bitwise_xor(a,b)') } + #[test] fn test_bitwise_or() { let a = FixedTrait::new(28835840, false); // 3.4375 let b = FixedTrait::new(1639448576, true); // -60.5625 diff --git a/src/numbers/fixed_point/implementations/fp8x23wide/math/core.cairo b/src/numbers/fixed_point/implementations/fp8x23wide/math/core.cairo index 3d89ccce0..54d23b9e2 100644 --- a/src/numbers/fixed_point/implementations/fp8x23wide/math/core.cairo +++ b/src/numbers/fixed_point/implementations/fp8x23wide/math/core.cairo @@ -1,9 +1,4 @@ -use core::debug::PrintTrait; -use core::option::OptionTrait; -use core::result::{ResultTrait, ResultTraitImpl}; -use core::traits::{Into, TryInto}; use core::integer; -use core::integer::{u64_safe_divmod, u64_as_non_zero, u64_wide_mul}; use orion::numbers::fixed_point::implementations::fp8x23wide::core::{ HALF, ONE, MAX, FP8x23W, FP8x23WAdd, FP8x23WImpl, FP8x23WAddEq, FP8x23WSub, FP8x23WMul, @@ -13,9 +8,8 @@ use orion::numbers::fixed_point::implementations::fp8x23wide::core::{ use orion::numbers::fixed_point::implementations::fp8x23wide::math::lut; // PUBLIC - fn abs(a: FP8x23W) -> FP8x23W { - return FixedTrait::new(a.mag, false); + FixedTrait::new(a.mag, false) } fn add(a: FP8x23W, b: FP8x23W) -> FP8x23W { @@ -35,7 +29,7 @@ fn add(a: FP8x23W, b: FP8x23W) -> FP8x23W { } fn ceil(a: FP8x23W) -> FP8x23W { - let (div, rem) = u64_safe_divmod(a.mag, u64_as_non_zero(ONE)); + let (div, rem) = integer::u64_safe_divmod(a.mag, integer::u64_as_non_zero(ONE)); if rem == 0 { return a; @@ -53,16 +47,16 @@ fn div(a: FP8x23W, b: FP8x23W) -> FP8x23W { let res_u64 = a_u64 / b.mag.into(); // Re-apply sign - return FixedTrait::new(res_u64.try_into().unwrap(), a.sign ^ b.sign); + FixedTrait::new(res_u64.try_into().unwrap(), a.sign ^ b.sign) } fn eq(a: @FP8x23W, b: @FP8x23W) -> bool { - return (*a.mag == *b.mag) && (*a.sign == *b.sign); + (*a.mag == *b.mag) && (*a.sign == *b.sign) } // Calculates the natural exponent of x: e^x fn exp(a: FP8x23W) -> FP8x23W { - return exp2(FixedTrait::new(12102203, false) * a); // log2(e) * 2^23 β‰ˆ 12102203 + exp2(FixedTrait::new(12102203, false) * a) // log2(e) * 2^23 β‰ˆ 12102203 } // Calculates the binary exponent of x: 2^x @@ -71,7 +65,7 @@ fn exp2(a: FP8x23W) -> FP8x23W { return FixedTrait::ONE(); } - let (int_part, frac_part) = integer::u64_safe_divmod(a.mag, u64_as_non_zero(ONE)); + let (int_part, frac_part) = integer::u64_safe_divmod(a.mag, integer::u64_as_non_zero(ONE)); let int_res = FixedTrait::new_unscaled(lut::exp2(int_part), false); let mut res_u = int_res; @@ -88,19 +82,19 @@ fn exp2(a: FP8x23W) -> FP8x23W { res_u = res_u * (r1 + FixedTrait::ONE()); } - if (a.sign == true) { - return FixedTrait::ONE() / res_u; + if a.sign { + FixedTrait::ONE() / res_u } else { - return res_u; + res_u } } fn exp2_int(exp: u64) -> FP8x23W { - return FixedTrait::new_unscaled(lut::exp2(exp), false); + FixedTrait::new_unscaled(lut::exp2(exp), false) } fn floor(a: FP8x23W) -> FP8x23W { - let (div, rem) = integer::u64_safe_divmod(a.mag, u64_as_non_zero(ONE)); + let (div, rem) = integer::u64_safe_divmod(a.mag, integer::u64_as_non_zero(ONE)); if rem == 0 { return a; @@ -113,38 +107,38 @@ fn floor(a: FP8x23W) -> FP8x23W { fn ge(a: FP8x23W, b: FP8x23W) -> bool { if a.sign != b.sign { - return !a.sign; + !a.sign } else { - return (a.mag == b.mag) || ((a.mag > b.mag) ^ a.sign); + (a.mag == b.mag) || ((a.mag > b.mag) ^ a.sign) } } fn gt(a: FP8x23W, b: FP8x23W) -> bool { if a.sign != b.sign { - return !a.sign; + !a.sign } else { - return (a.mag != b.mag) && ((a.mag > b.mag) ^ a.sign); + (a.mag != b.mag) && ((a.mag > b.mag) ^ a.sign) } } fn le(a: FP8x23W, b: FP8x23W) -> bool { if a.sign != b.sign { - return a.sign; + a.sign } else { - return (a.mag == b.mag) || ((a.mag < b.mag) ^ a.sign); + (a.mag == b.mag) || ((a.mag < b.mag) ^ a.sign) } } // Calculates the natural logarithm of x: ln(x) // self must be greater than zero fn ln(a: FP8x23W) -> FP8x23W { - return FixedTrait::new(5814540, false) * log2(a); // ln(2) = 0.693... + FixedTrait::new(5814540, false) * log2(a) // ln(2) = 0.693... } // Calculates the binary logarithm of x: log2(x) // self must be greather than zero fn log2(a: FP8x23W) -> FP8x23W { - assert(a.sign == false, 'must be positive'); + assert(!a.sign, 'must be positive'); if (a.mag == ONE) { return FixedTrait::ZERO(); @@ -158,7 +152,7 @@ fn log2(a: FP8x23W) -> FP8x23W { let (msb, div) = lut::msb(whole); if a.mag == div * ONE { - return FixedTrait::new_unscaled(msb, false); + FixedTrait::new_unscaled(msb, false) } else { let norm = a / FixedTrait::new_unscaled(div, false); let r8 = FixedTrait::new(76243, true) * norm; @@ -169,21 +163,22 @@ fn log2(a: FP8x23W) -> FP8x23W { let r3 = (r4 + FixedTrait::new(77896489, false)) * norm; let r2 = (r3 + FixedTrait::new(83945943, true)) * norm; let r1 = (r2 + FixedTrait::new(68407458, false)) * norm; - return r1 + FixedTrait::new(28734280, true) + FixedTrait::new_unscaled(msb, false); + + r1 + FixedTrait::new(28734280, true) + FixedTrait::new_unscaled(msb, false) } } // Calculates the base 10 log of x: log10(x) // self must be greater than zero fn log10(a: FP8x23W) -> FP8x23W { - return FixedTrait::new(2525223, false) * log2(a); // log10(2) = 0.301... + FixedTrait::new(2525223, false) * log2(a) // log10(2) = 0.301... } fn lt(a: FP8x23W, b: FP8x23W) -> bool { if a.sign != b.sign { - return a.sign; + a.sign } else { - return (a.mag != b.mag) && ((a.mag < b.mag) ^ a.sign); + (a.mag != b.mag) && ((a.mag < b.mag) ^ a.sign) } } @@ -191,20 +186,20 @@ fn mul(a: FP8x23W, b: FP8x23W) -> FP8x23W { let prod_u128 = integer::u64_wide_mul(a.mag, b.mag); // Re-apply sign - return FixedTrait::new((prod_u128 / ONE.into()).try_into().unwrap(), a.sign ^ b.sign); + FixedTrait::new((prod_u128 / ONE.into()).try_into().unwrap(), a.sign ^ b.sign) } fn ne(a: @FP8x23W, b: @FP8x23W) -> bool { - return (*a.mag != *b.mag) || (*a.sign != *b.sign); + (*a.mag != *b.mag) || (*a.sign != *b.sign) } fn neg(a: FP8x23W) -> FP8x23W { if a.mag == 0 { - return a; + a } else if !a.sign { - return FixedTrait::new(a.mag, !a.sign); + FixedTrait::new(a.mag, !a.sign) } else { - return FixedTrait::new(a.mag, false); + FixedTrait::new(a.mag, false) } } @@ -212,7 +207,7 @@ fn neg(a: FP8x23W) -> FP8x23W { // self is a FP8x23W point value // b is a FP8x23W point value fn pow(a: FP8x23W, b: FP8x23W) -> FP8x23W { - let (_, rem) = integer::u64_safe_divmod(b.mag, u64_as_non_zero(ONE)); + let (_, rem) = integer::u64_safe_divmod(b.mag, integer::u64_as_non_zero(ONE)); // use the more performant integer pow when y is an int if (rem == 0) { @@ -220,7 +215,7 @@ fn pow(a: FP8x23W, b: FP8x23W) -> FP8x23W { } // x^y = exp(y*ln(x)) for x > 0 will error for x < 0 - return exp(b * ln(a)); + exp(b * ln(a)) } // Calclates the value of a^b and checks for overflow before returning @@ -228,7 +223,7 @@ fn pow_int(a: FP8x23W, b: u64, sign: bool) -> FP8x23W { let mut x = a; let mut n = b; - if sign == true { + if sign { x = FixedTrait::ONE() / x; } @@ -239,11 +234,7 @@ fn pow_int(a: FP8x23W, b: u64, sign: bool) -> FP8x23W { let mut y = FixedTrait::ONE(); let two = integer::u64_as_non_zero(2); - loop { - if n <= 1 { - break; - } - + while n > 1 { let (div, rem) = integer::u64_safe_divmod(n, two); if rem == 1 { @@ -254,34 +245,35 @@ fn pow_int(a: FP8x23W, b: u64, sign: bool) -> FP8x23W { n = div; }; - return x * y; + x * y } fn rem(a: FP8x23W, b: FP8x23W) -> FP8x23W { - return a - floor(a / b) * b; + a - floor(a / b) * b } fn round(a: FP8x23W) -> FP8x23W { - let (div, rem) = integer::u64_safe_divmod(a.mag, u64_as_non_zero(ONE)); + let (div, rem) = integer::u64_safe_divmod(a.mag, integer::u64_as_non_zero(ONE)); if (HALF <= rem) { - return FixedTrait::new_unscaled(div + 1, a.sign); + FixedTrait::new_unscaled(div + 1, a.sign) } else { - return FixedTrait::new_unscaled(div, a.sign); + FixedTrait::new_unscaled(div, a.sign) } } // Calculates the square root of a FP8x23W point value // x must be positive fn sqrt(a: FP8x23W) -> FP8x23W { - assert(a.sign == false, 'must be positive'); + assert(!a.sign, 'must be positive'); let root = integer::u64_sqrt(a.mag.into() * ONE.into()); - return FixedTrait::new(root.into(), false); + + FixedTrait::new(root.into(), false) } fn sub(a: FP8x23W, b: FP8x23W) -> FP8x23W { - return add(a, -b); + add(a, -b) } fn sign(a: FP8x23W) -> FP8x23W { @@ -472,7 +464,7 @@ mod tests { let a = FixedTrait::new_unscaled(42, false); let b = FixedTrait::new_unscaled(42, false); let c = eq(@a, @b); - assert(c == true, 'invalid result'); + assert(c, 'invalid result'); } #[test] @@ -480,7 +472,7 @@ mod tests { let a = FixedTrait::new_unscaled(42, false); let b = FixedTrait::new_unscaled(42, false); let c = ne(@a, @b); - assert(c == false, 'invalid result'); + assert(!c, 'invalid result'); } #[test] @@ -554,12 +546,12 @@ mod tests { let c = FixedTrait::::new_unscaled(1, true); assert(a <= a, 'a <= a'); - assert(a <= b == false, 'a <= b'); - assert(a <= c == false, 'a <= c'); + assert(!(a <= b), 'a <= b'); + assert(!(a <= c), 'a <= c'); assert(b <= a, 'b <= a'); assert(b <= b, 'b <= b'); - assert(b <= c == false, 'b <= c'); + assert(!(b <= c), 'b <= c'); assert(c <= a, 'c <= a'); assert(c <= b, 'c <= b'); @@ -572,17 +564,17 @@ mod tests { let b = FixedTrait::new_unscaled(0, false); let c = FixedTrait::::new_unscaled(1, true); - assert(a < a == false, 'a < a'); - assert(a < b == false, 'a < b'); - assert(a < c == false, 'a < c'); + assert(!(a < a), 'a < a'); + assert(!(a < b), 'a < b'); + assert(!(a < c), 'a < c'); assert(b < a, 'b < a'); - assert(b < b == false, 'b < b'); - assert(b < c == false, 'b < c'); + assert(!(b < b), 'b < b'); + assert(!(b < c), 'b < c'); assert(c < a, 'c < a'); assert(c < b, 'c < b'); - assert(c < c == false, 'c < c'); + assert(!(c < c), 'c < c'); } #[test] @@ -595,12 +587,12 @@ mod tests { assert(a >= b, 'a >= b'); assert(a >= c, 'a >= c'); - assert(b >= a == false, 'b >= a'); + assert(!(b >= a), 'b >= a'); assert(b >= b, 'b >= b'); assert(b >= c, 'b >= c'); - assert(c >= a == false, 'c >= a'); - assert(c >= b == false, 'c >= b'); + assert(!(c >= a), 'c >= a'); + assert(!(c >= b), 'c >= b'); assert(c >= c, 'c >= c'); } @@ -610,17 +602,17 @@ mod tests { let b = FixedTrait::new_unscaled(0, false); let c = FixedTrait::::new_unscaled(1, true); - assert(a > a == false, 'a > a'); + assert(!(a > a), 'a > a'); assert(a > b, 'a > b'); assert(a > c, 'a > c'); - assert(b > a == false, 'b > a'); - assert(b > b == false, 'b > b'); + assert(!(b > a), 'b > a'); + assert(!(b > b), 'b > b'); assert(b > c, 'b > c'); - assert(c > a == false, 'c > a'); - assert(c > b == false, 'c > b'); - assert(c > c == false, 'c > c'); + assert(!(c > a), 'c > a'); + assert(!(c > b), 'c > b'); + assert(!(c > c), 'c > c'); } #[test] diff --git a/src/numbers/fixed_point/implementations/fp8x23wide/math/erf.cairo b/src/numbers/fixed_point/implementations/fp8x23wide/math/erf.cairo index 83f33f9ad..ec741bff9 100644 --- a/src/numbers/fixed_point/implementations/fp8x23wide/math/erf.cairo +++ b/src/numbers/fixed_point/implementations/fp8x23wide/math/erf.cairo @@ -1,4 +1,3 @@ -use core::traits::Into; use orion::numbers::fixed_point::implementations::fp8x23wide::core::{ONE, FP8x23W, FixedTrait}; use orion::numbers::fixed_point::implementations::fp8x23wide::math::lut::erf_lut; @@ -21,5 +20,6 @@ fn erf(x: FP8x23W) -> FP8x23W { } else { erf_value = ONE; } + FP8x23W { mag: erf_value, sign: x.sign } } diff --git a/src/numbers/fixed_point/implementations/fp8x23wide/math/hyp.cairo b/src/numbers/fixed_point/implementations/fp8x23wide/math/hyp.cairo index 848f711a2..928de48c8 100644 --- a/src/numbers/fixed_point/implementations/fp8x23wide/math/hyp.cairo +++ b/src/numbers/fixed_point/implementations/fp8x23wide/math/hyp.cairo @@ -1,4 +1,3 @@ -use core::debug::PrintTrait; use orion::numbers::fixed_point::implementations::fp8x23wide::core::{ HALF, ONE, TWO, FP8x23W, FP8x23WImpl, FP8x23WAdd, FP8x23WAddEq, FP8x23WSub, FP8x23WMul, FP8x23WMulEq, FP8x23WTryIntoU128, FP8x23WPartialEq, FP8x23WPartialOrd, FP8x23WSubEq, FP8x23WNeg, @@ -8,53 +7,55 @@ use orion::numbers::fixed_point::implementations::fp8x23wide::core::{ // Calculates hyperbolic cosine of a (fixed point) fn cosh(a: FP8x23W) -> FP8x23W { let ea = a.exp(); - return (ea + (FixedTrait::ONE() / ea)) / FixedTrait::new(TWO, false); + + (ea + (FixedTrait::ONE() / ea)) / FixedTrait::new(TWO, false) } // Calculates hyperbolic sine of a (fixed point) fn sinh(a: FP8x23W) -> FP8x23W { let ea = a.exp(); - return (ea - (FixedTrait::ONE() / ea)) / FixedTrait::new(TWO, false); + + (ea - (FixedTrait::ONE() / ea)) / FixedTrait::new(TWO, false) } // Calculates hyperbolic tangent of a (fixed point) fn tanh(a: FP8x23W) -> FP8x23W { let ea = a.exp(); let ea_i = FixedTrait::ONE() / ea; - return (ea - ea_i) / (ea + ea_i); + + (ea - ea_i) / (ea + ea_i) } // Calculates inverse hyperbolic cosine of a (fixed point) fn acosh(a: FP8x23W) -> FP8x23W { let root = (a * a - FixedTrait::ONE()).sqrt(); - return (a + root).ln(); + + (a + root).ln() } // Calculates inverse hyperbolic sine of a (fixed point) fn asinh(a: FP8x23W) -> FP8x23W { let root = (a * a + FixedTrait::ONE()).sqrt(); - return (a + root).ln(); + + (a + root).ln() } // Calculates inverse hyperbolic tangent of a (fixed point) fn atanh(a: FP8x23W) -> FP8x23W { let one = FixedTrait::ONE(); let ln_arg = (one + a) / (one - a); - return ln_arg.ln() / FixedTrait::new(TWO, false); + + ln_arg.ln() / FixedTrait::new(TWO, false) } // Tests -------------------------------------------------------------------------------------------------------------- #[cfg(test)] mod tests { - use core::option::OptionTrait; - use core::traits::Into; - use orion::numbers::fixed_point::implementations::fp8x23wide::helpers::assert_precise; use super::{FixedTrait, TWO, cosh, ONE, sinh, tanh, acosh, asinh, atanh, HALF}; - #[test] #[available_gas(10000000)] fn test_cosh() { diff --git a/src/numbers/fixed_point/implementations/fp8x23wide/math/lut.cairo b/src/numbers/fixed_point/implementations/fp8x23wide/math/lut.cairo index eea11e46a..20b9a2f3b 100644 --- a/src/numbers/fixed_point/implementations/fp8x23wide/math/lut.cairo +++ b/src/numbers/fixed_point/implementations/fp8x23wide/math/lut.cairo @@ -29,7 +29,7 @@ fn msb(whole: u64) -> (u64, u64) { } } - return (8, 256); + (8, 256) } fn exp2(exp: u64) -> u64 { @@ -106,7 +106,7 @@ fn exp2(exp: u64) -> u64 { } } - return 8388608; + 8388608 } fn sin(a: u64) -> (u64, u64, u64) { @@ -923,7 +923,7 @@ fn sin(a: u64) -> (u64, u64, u64) { } } - return (13125323, 8388450, 8388608); + (13125323, 8388450, 8388608) } fn atan(a: u64) -> (u64, u64, u64) { @@ -1227,7 +1227,7 @@ fn atan(a: u64) -> (u64, u64, u64) { return (5754585, 5043802, 5083601); } - return (5813305, 5083601, 5123141); + (5813305, 5083601, 5123141) } fn erf_lut(x: u64) -> u64 { @@ -1919,5 +1919,6 @@ fn erf_lut(x: u64) -> u64 { return 8388595; } } - return ONE; + + ONE } diff --git a/src/numbers/fixed_point/implementations/fp8x23wide/math/trig.cairo b/src/numbers/fixed_point/implementations/fp8x23wide/math/trig.cairo index f2074215c..5d3055640 100644 --- a/src/numbers/fixed_point/implementations/fp8x23wide/math/trig.cairo +++ b/src/numbers/fixed_point/implementations/fp8x23wide/math/trig.cairo @@ -1,6 +1,4 @@ -use core::debug::PrintTrait; -use core::integer::{u64_safe_divmod, u64_as_non_zero}; -use core::option::OptionTrait; +use core::integer; use orion::numbers::fixed_point::implementations::fp8x23wide::math::lut; use orion::numbers::fixed_point::implementations::fp8x23wide::core::{ @@ -9,7 +7,6 @@ use orion::numbers::fixed_point::implementations::fp8x23wide::core::{ }; // CONSTANTS - const TWO_PI: u64 = 52707178; const PI: u64 = 26353589; const HALF_PI: u64 = 13176795; @@ -23,9 +20,9 @@ fn acos(a: FP8x23W) -> FP8x23W { let asin_res = asin(asin_arg); if (a.sign) { - return FixedTrait::new(PI, false) - asin_res; + FixedTrait::new(PI, false) - asin_res } else { - return asin_res; + asin_res } } @@ -34,9 +31,9 @@ fn acos_fast(a: FP8x23W) -> FP8x23W { let asin_res = asin_fast(asin_arg); if (a.sign) { - return FixedTrait::new(PI, false) - asin_res; + FixedTrait::new(PI, false) - asin_res } else { - return asin_res; + asin_res } } @@ -48,7 +45,8 @@ fn asin(a: FP8x23W) -> FP8x23W { } let div = (FixedTrait::ONE() - a * a).sqrt(); // will fail if a > 1 - return atan(a / div); + + atan(a / div) } fn asin_fast(a: FP8x23W) -> FP8x23W { @@ -57,7 +55,8 @@ fn asin_fast(a: FP8x23W) -> FP8x23W { } let div = (FixedTrait::ONE() - a * a).sqrt(); // will fail if a > 1 - return atan_fast(a / div); + + atan_fast(a / div) } // Calculates arctan(a) (fixed point) @@ -100,7 +99,7 @@ fn atan(a: FP8x23W) -> FP8x23W { res = res - FixedTrait::new(HALF_PI, false); } - return FixedTrait::new(res.mag, a.sign); + FixedTrait::new(res.mag, a.sign) } fn atan_fast(a: FP8x23W) -> FP8x23W { @@ -134,31 +133,32 @@ fn atan_fast(a: FP8x23W) -> FP8x23W { res = res - FixedTrait::::new(HALF_PI, false); } - return FixedTrait::new(res.mag, a.sign); + FixedTrait::new(res.mag, a.sign) } // Calculates cos(a) with a in radians (fixed point) fn cos(a: FP8x23W) -> FP8x23W { - return sin(FixedTrait::new(HALF_PI, false) - a); + sin(FixedTrait::new(HALF_PI, false) - a) } fn cos_fast(a: FP8x23W) -> FP8x23W { - return sin_fast(FixedTrait::new(HALF_PI, false) - a); + sin_fast(FixedTrait::new(HALF_PI, false) - a) } fn sin(a: FP8x23W) -> FP8x23W { let a1 = a.mag % TWO_PI; - let (whole_rem, partial_rem) = u64_safe_divmod(a1, u64_as_non_zero(PI)); + let (whole_rem, partial_rem) = integer::u64_safe_divmod(a1, integer::u64_as_non_zero(PI)); let a2 = FixedTrait::new(partial_rem, false); let partial_sign = whole_rem == 1; let loop_res = a2 * _sin_loop(a2, 7, FixedTrait::ONE()); - return FixedTrait::new(loop_res.mag, a.sign ^ partial_sign && loop_res.mag != 0); + + FixedTrait::new(loop_res.mag, a.sign ^ partial_sign && loop_res.mag != 0) } fn sin_fast(a: FP8x23W) -> FP8x23W { let a1 = a.mag % TWO_PI; - let (whole_rem, mut partial_rem) = u64_safe_divmod(a1, u64_as_non_zero(PI)); + let (whole_rem, mut partial_rem) = integer::u64_safe_divmod(a1, integer::u64_as_non_zero(PI)); let partial_sign = whole_rem == 1; if partial_rem >= HALF_PI { @@ -170,7 +170,7 @@ fn sin_fast(a: FP8x23W) -> FP8x23W { let res = partial_step * (FixedTrait::new(high, false) - FixedTrait::new(low, false)) + FixedTrait::::new(low, false); - return FixedTrait::new(res.mag, a.sign ^ partial_sign && res.mag != 0); + FixedTrait::new(res.mag, a.sign ^ partial_sign && res.mag != 0) } // Calculates tan(a) with a in radians (fixed point) @@ -178,14 +178,16 @@ fn tan(a: FP8x23W) -> FP8x23W { let sinx = sin(a); let cosx = cos(a); assert(cosx.mag != 0, 'tan undefined'); - return sinx / cosx; + + sinx / cosx } fn tan_fast(a: FP8x23W) -> FP8x23W { let sinx = sin_fast(a); let cosx = cos_fast(a); assert(cosx.mag != 0, 'tan undefined'); - return sinx / cosx; + + sinx / cosx } // Helper function to calculate Taylor series for sin @@ -198,15 +200,13 @@ fn _sin_loop(a: FP8x23W, i: u64, acc: FP8x23W) -> FP8x23W { return new_acc; } - return _sin_loop(a, i - 1, new_acc); + _sin_loop(a, i - 1, new_acc) } // Tests -------------------------------------------------------------------------------------------------------------- #[cfg(test)] mod tests { - use core::traits::Into; - use orion::numbers::fixed_point::implementations::fp8x23wide::helpers::{ assert_precise, assert_relative }; diff --git a/src/numbers/fixed_point/utils.cairo b/src/numbers/fixed_point/utils.cairo index c15b28690..bed384bb1 100644 --- a/src/numbers/fixed_point/utils.cairo +++ b/src/numbers/fixed_point/utils.cairo @@ -7,14 +7,14 @@ const HALF_PRIME: felt252 = // true = negative // false = positive fn felt_sign(a: felt252) -> bool { - return integer::u256_from_felt252(a) > integer::u256_from_felt252(HALF_PRIME); + integer::u256_from_felt252(a) > integer::u256_from_felt252(HALF_PRIME) } // Returns the absolute value of a signed `felt252` fn felt_abs(a: felt252) -> felt252 { let a_sign = felt_sign(a); - if (a_sign == true) { + if a_sign { return a * -1; } else { return a * 1; @@ -30,11 +30,11 @@ mod tests { fn test_sign() { let min = -1809251394333065606848661391547535052811553607665798349986546028067936010240; let max = 1809251394333065606848661391547535052811553607665798349986546028067936010240; - assert(felt_sign(min) == true, 'invalid result'); - assert(felt_sign(-1) == true, 'invalid result'); - assert(felt_sign(0) == false, 'invalid result'); - assert(felt_sign(1) == false, 'invalid result'); - assert(felt_sign(max) == false, 'invalid result'); + assert(felt_sign(min), 'invalid result'); + assert(felt_sign(-1), 'invalid result'); + assert(!felt_sign(0), 'invalid result'); + assert(!felt_sign(1), 'invalid result'); + assert(!felt_sign(max), 'invalid result'); } #[test] diff --git a/src/operators/matrix.cairo b/src/operators/matrix.cairo index 755e13ce4..5e7564d11 100644 --- a/src/operators/matrix.cairo +++ b/src/operators/matrix.cairo @@ -1,6 +1,3 @@ -use core::array::ArrayTrait; -use core::option::OptionTrait; - use orion::numbers::NumberTrait; use orion::operators::vec::{VecTrait, NullableVec, NullableVecImpl}; @@ -36,10 +33,10 @@ impl MutMatrixImpl< /// Get the value at (row, col) fn at(ref self: MutMatrix, row: usize, col: usize) -> T { - return match self.get(row, col) { + match self.get(row, col) { Option::Some(val) => val, Option::None => NumberTrait::zero(), - }; + } } /// Performs the product between a m x n `MutMatrix` and a n x 1 `NullableVec`. @@ -48,36 +45,34 @@ impl MutMatrixImpl< ref self: MutMatrix, ref vec: NullableVec ) -> NullableVec { assert(self.cols == vec.len, 'wrong matrix shape for dot'); + let m = self.rows; let n = self.cols; let mut result_vec = VecTrait::new(); let mut i = 0_usize; - loop { - if i == m { - break (); - } + while i != m { let mut sum: T = NumberTrait::zero(); let mut k = 0_usize; - loop { - if k == n { - break (); - } + while k != n { sum += MutMatrixImpl::at(ref self, i, k) * VecTrait::at(ref vec, k); + k += 1; }; - VecTrait::set(ref result_vec, i, sum); + VecTrait::set(ref result_vec, i, sum); i += 1; }; - return result_vec; + + result_vec } /// Set the value at (row, col) fn set(ref self: MutMatrix, row: usize, col: usize, value: T) { if row < self.rows && col < self.cols { let index = row * self.cols + col; + self.data.set(index, value) } } @@ -92,13 +87,10 @@ impl MutMatrixImpl< assert(axis < 2, 'Invalid axis'); let mut result: Array = ArrayTrait::new(); + if axis == 0 { let mut col: usize = 0; - loop { - if col == self.cols { - break; - } - + while col != self.cols { let mut max_value = self.get(0, col); let mut max_value = match max_value { Option::Some => { max_value.unwrap() }, @@ -107,16 +99,13 @@ impl MutMatrixImpl< let mut max_index = 0; let mut row: usize = 1; - loop { - if row == self.rows { - break; - } - + while row != self.rows { let mut value = self.get(row, col); let mut value = match value { Option::Some => { value.unwrap() }, Option::None => { NumberTrait::min_value() } }; + if value > max_value { max_value = value; max_index = row; @@ -126,7 +115,6 @@ impl MutMatrixImpl< }; result.append(max_index); - col += 1; }; @@ -134,11 +122,7 @@ impl MutMatrixImpl< } let mut row: usize = 0; - loop { - if row == self.rows { - break; - } - + while row != self.rows { let mut max_value = self.get(row, 0); let mut max_value = match max_value { Option::Some => { max_value.unwrap() }, @@ -147,16 +131,13 @@ impl MutMatrixImpl< let mut max_index = 0; let mut col: usize = 1; - loop { - if col == self.cols { - break; - } - + while col != self.cols { let mut value = self.get(row, col); let mut value = match value { Option::Some => { value.unwrap() }, Option::None => { NumberTrait::min_value() } }; + if value > max_value { max_value = value; max_index = col; @@ -166,11 +147,10 @@ impl MutMatrixImpl< }; result.append(max_index); - row += 1; }; - return result.span(); + result.span() } /// Apply softmax to the matrix along the specified axis @@ -181,18 +161,10 @@ impl MutMatrixImpl< if axis == 0 { let mut col: usize = 0; - loop { - if col == self.cols { - break; - } - + while col != self.cols { let mut sum_exp = NumberTrait::zero(); let mut row: usize = 0; - loop { - if row == self.rows { - break; - } - + while row != self.rows { let value = self.get(row, col).unwrap().into(); sum_exp += value.exp(); @@ -200,11 +172,7 @@ impl MutMatrixImpl< }; row = 0; - loop { - if row == self.rows { - break; - } - + while row != self.rows { let value = self.get(row, col).unwrap().into(); let softmax_value = (value.exp() / sum_exp).into(); result.set(row, col, softmax_value); @@ -216,18 +184,10 @@ impl MutMatrixImpl< }; } else { let mut row: usize = 0; - loop { - if row == self.rows { - break; - } - + while row != self.rows { let mut sum_exp = NumberTrait::zero(); let mut col: usize = 0; - loop { - if col == self.cols { - break; - } - + while col != self.cols { let value = self.get(row, col).unwrap().into(); sum_exp += value.exp(); @@ -235,11 +195,7 @@ impl MutMatrixImpl< }; col = 0; - loop { - if col == self.cols { - break; - } - + while col != self.cols { let value = self.get(row, col).unwrap().into(); let softmax_value = (value.exp() / sum_exp).into(); result.set(row, col, softmax_value); @@ -264,32 +220,23 @@ impl MutMatrixImpl< if axis == 0 { let mut col: usize = 0; - loop { - if col == self.cols { - break; - } - + while col != self.cols { let mut sum_exp = NumberTrait::zero(); let mut row: usize = 0; - loop { - if row == self.rows { - break; - } - + while row != self.rows { let value = self.get(row, col).unwrap().into(); + if value != NumberTrait::zero() { sum_exp += value.exp(); } + row += 1; }; row = 0; - loop { - if row == self.rows { - break; - } - + while row != self.rows { let value = self.get(row, col).unwrap().into(); + if value != NumberTrait::zero() { let softmax_value = (value.exp() / sum_exp).into(); result.set(row, col, softmax_value); @@ -304,31 +251,20 @@ impl MutMatrixImpl< }; } else { let mut row: usize = 0; - loop { - if row == self.rows { - break; - } - + while row != self.rows { let mut sum_exp = NumberTrait::zero(); let mut col: usize = 0; - loop { - if col == self.cols { - break; - } - + while col != self.cols { let value = self.get(row, col).unwrap().into(); if value != NumberTrait::zero() { sum_exp += value.exp(); } + col += 1; }; col = 0; - loop { - if col == self.cols { - break; - } - + while col != self.cols { let value = self.get(row, col).unwrap().into(); if value != NumberTrait::zero() { @@ -353,18 +289,11 @@ impl MutMatrixImpl< let mut result = MutMatrixImpl::new(self.rows, self.cols); let mut row: usize = 0; - loop { - if row == self.rows { - break; - } - + while row != self.rows { let mut col: usize = 0; - loop { - if col == self.cols { - break; - } - + while col != self.cols { let value = self.get(row, col); + if value.is_some() { let value = NumberTrait::one() / (NumberTrait::one() + (value.unwrap() * NumberTrait::neg_one()).exp()); diff --git a/src/operators/ml.cairo b/src/operators/ml.cairo index 724664216..08e9e40fb 100644 --- a/src/operators/ml.cairo +++ b/src/operators/ml.cairo @@ -1,12 +1,13 @@ mod tree_ensemble; mod linear; mod svm; +mod normalizer; use orion::operators::ml::tree_ensemble::core::{ TreeEnsemble, TreeEnsembleAttributes, TreeEnsembleImpl, NODE_MODES }; use orion::operators::ml::tree_ensemble::tree_ensemble_classifier::{ - TreeEnsembleClassifier, TreeEnsembleClassifierImpl, TreeEnsembleClassifierTrait, POST_TRANSFORM + TreeEnsembleClassifier, TreeEnsembleClassifierImpl, TreeEnsembleClassifierTrait }; use orion::operators::ml::tree_ensemble::tree_ensemble_regressor::{ @@ -20,3 +21,14 @@ use orion::operators::ml::linear::linear_regressor::{ use orion::operators::ml::linear::linear_classifier::{ LinearClassifierTrait, LinearClassifierImpl, LinearClassifier }; + +use orion::operators::ml::normalizer::normalizer::{NormalizerTrait, NORM}; + +#[derive(Copy, Drop)] +enum POST_TRANSFORM { + NONE, + SOFTMAX, + LOGISTIC, + SOFTMAXZERO, + PROBIT, +} diff --git a/src/operators/ml/linear/linear_classifier.cairo b/src/operators/ml/linear/linear_classifier.cairo index b9bed234a..000179e36 100644 --- a/src/operators/ml/linear/linear_classifier.cairo +++ b/src/operators/ml/linear/linear_classifier.cairo @@ -7,7 +7,7 @@ use orion::numbers::NumberTrait; use orion::operators::tensor::{I8Tensor, I32Tensor, U32Tensor, FP16x16Tensor, FP16x16TensorAdd}; use orion::numbers::{FP32x32, FP32x32Impl, FixedTrait}; use orion::operators::nn::{NNTrait, FP16x16NN}; - +use orion::operators::ml::POST_TRANSFORM; #[derive(Destruct)] struct LinearClassifier { @@ -18,16 +18,6 @@ struct LinearClassifier { post_transform: POST_TRANSFORM, } - -#[derive(Copy, Drop)] -enum POST_TRANSFORM { - NONE, - SOFTMAX, - LOGISTIC, - SOFTMAXZERO, - PROBIT, -} - /// Trait /// /// predict - Performs the linear classification. @@ -35,7 +25,7 @@ trait LinearClassifierTrait { /// # LinearClassifierTrait::predict /// /// ```rust - /// fn predict(ref self: LinearClassifier, X: Tensor) -> Tensor; + /// fn predict(classifier: LinearClassifier, X: Tensor) -> Tensor; /// ``` /// /// Linear Classifier. Performs the linear classification. @@ -119,7 +109,7 @@ trait LinearClassifierTrait { /// fn linear_classifier_multi_softmax() -> (Span, Tensor) { /// let (mut classifier, X) = linear_classifier_helper(POST_TRANSFORM::SOFTMAX); /// - /// let (labels, mut scores) = LinearClassifierTrait::predict(ref classifier, X); + /// let (labels, mut scores) = LinearClassifierTrait::predict(classifier, X); /// /// (labels, scores) /// } @@ -132,7 +122,7 @@ trait LinearClassifierTrait { /// [0.036323, 0.090237, 0.87344] /// ]) /// ``` - fn predict(ref self: LinearClassifier, X: Tensor) -> (Span, Tensor); + fn predict(classifier: LinearClassifier, X: Tensor) -> (Span, Tensor); } impl LinearClassifierImpl< @@ -152,17 +142,17 @@ impl LinearClassifierImpl< +Add>, +NNTrait > of LinearClassifierTrait { - fn predict(ref self: LinearClassifier, X: Tensor) -> (Span, Tensor) { - let n: usize = self.coefficients.len() / *(X.shape).at(1); + fn predict(classifier: LinearClassifier, X: Tensor) -> (Span, Tensor) { + let n: usize = classifier.coefficients.len() / *(X.shape).at(1); let mut shape = ArrayTrait::::new(); shape.append(n); shape.append(*(X.shape).at(1)); - let mut coefficients = TensorTrait::new(shape.span(), self.coefficients); + let mut coefficients = TensorTrait::new(shape.span(), classifier.coefficients); let coefficients = coefficients.transpose(array![1, 0].span()); let mut scores = X.matmul(@coefficients); - match self.intercepts { + match classifier.intercepts { Option::Some(intercepts) => { let mut shape = ArrayTrait::::new(); shape.append(1); @@ -173,12 +163,12 @@ impl LinearClassifierImpl< Option::None => {}, }; - let (n_classes, classlabels) = match self.classlabels { + let (n_classes, classlabels) = match classifier.classlabels { Option::Some(classlabels) => { (classlabels.len(), classlabels) }, Option::None => { (0, ArrayTrait::::new().span()) }, }; if *coefficients.shape.at(1) == 1 && n_classes == 2 { - let mut new_scores = ArrayTrait::new(); + let mut new_scores = array![]; loop { match scores.data.pop_front() { @@ -189,10 +179,11 @@ impl LinearClassifierImpl< Option::None => { break; }, } }; + scores = TensorTrait::new(array![*scores.shape.at(0), 2].span(), new_scores.span()); } // Post Transform - scores = match self.post_transform { + scores = match classifier.post_transform { POST_TRANSFORM::NONE => { scores }, POST_TRANSFORM::SOFTMAX => { NNTrait::softmax(@scores, 1) }, POST_TRANSFORM::LOGISTIC => { NNTrait::sigmoid(@scores) }, @@ -201,7 +192,7 @@ impl LinearClassifierImpl< }; // Labels - let mut labels_list = ArrayTrait::new(); + let mut labels_list = array![]; if *scores.shape.at(1) > 1 { let mut labels = scores.argmax(1, Option::None, Option::None); loop { @@ -212,56 +203,48 @@ impl LinearClassifierImpl< }; } else { let mut i = 0; - match self.post_transform { + match classifier.post_transform { POST_TRANSFORM::NONE => { - loop { - if i == scores.data.len() { - break; - } + while i != scores.data.len() { if *scores.data.at(i) >= NumberTrait::zero() { labels_list.append(*classlabels[0]); } else { labels_list.append(0); } + i += 1; }; }, POST_TRANSFORM::SOFTMAX => { - loop { - if i == scores.data.len() { - break; - } + while i != scores.data.len() { if *scores.data.at(i) >= NumberTrait::half() { labels_list.append(*classlabels[0]); } else { labels_list.append(0); } + i += 1; }; }, POST_TRANSFORM::LOGISTIC => { - loop { - if i == scores.data.len() { - break; - } + while i != scores.data.len() { if *scores.data.at(i) >= NumberTrait::half() { labels_list.append(*classlabels[0]); } else { labels_list.append(0); } + i += 1; }; }, POST_TRANSFORM::SOFTMAXZERO => { - loop { - if i == scores.data.len() { - break; - } + while i != scores.data.len() { if *scores.data.at(i) >= NumberTrait::half() { labels_list.append(*classlabels[0]); } else { labels_list.append(0); } + i += 1; }; }, @@ -273,12 +256,10 @@ impl LinearClassifierImpl< } } - fn max(a: usize, b: usize) -> usize { if a > b { - return a; + a } else { - return b; + b } } - diff --git a/src/operators/ml/linear/linear_regressor.cairo b/src/operators/ml/linear/linear_regressor.cairo index 75e461729..d15f55f89 100644 --- a/src/operators/ml/linear/linear_regressor.cairo +++ b/src/operators/ml/linear/linear_regressor.cairo @@ -13,6 +13,7 @@ use orion::numbers::{FP32x32, FP32x32Impl, FixedTrait}; use core::debug::PrintTrait; use orion::operators::nn::{NNTrait, FP16x16NN}; +use orion::operators::ml::POST_TRANSFORM; #[derive(Destruct)] struct LinearRegressor { @@ -22,14 +23,6 @@ struct LinearRegressor { post_transform: POST_TRANSFORM, } -#[derive(Copy, Drop)] -enum POST_TRANSFORM { - NONE, - SOFTMAX, - LOGISTIC, - SOFTMAXZERO, - PROBIT, -} /// Trait /// @@ -38,14 +31,14 @@ trait LinearRegressorTrait { /// # LinearRegressorTrait::predict /// /// ```rust - /// fn predict(ref self: LinearRegressor, X: Tensor) -> Tensor; + /// fn predict(regressor: LinearRegressor, X: Tensor) -> Tensor; /// ``` /// /// Linear Regressor. Performs the generalized linear regression evaluation. /// /// ## Args /// - /// * `self`: LinearRegressor - A LinearRegressor object. + /// * `regressor`: LinearRegressor - A LinearRegressor object. /// * `X`: Input 2D tensor. /// /// ## Returns @@ -105,7 +98,7 @@ trait LinearRegressorTrait { /// post_transform /// }; /// - /// let scores = LinearRegressorTrait::predict(ref regressor, X); + /// let scores = LinearRegressorTrait::predict(regressor, X); /// /// scores /// } @@ -157,7 +150,7 @@ trait LinearRegressorTrait { /// post_transform /// }; /// - /// let scores = LinearRegressorTrait::predict(ref regressor, X); + /// let scores = LinearRegressorTrait::predict(regressor, X); /// /// scores /// } @@ -168,7 +161,7 @@ trait LinearRegressorTrait { /// /// - fn predict(ref self: LinearRegressor, X: Tensor) -> Tensor; + fn predict(regressor: LinearRegressor, X: Tensor) -> Tensor; } impl LinearRegressorImpl< @@ -189,19 +182,19 @@ impl LinearRegressorImpl< +Add>, +NNTrait, > of LinearRegressorTrait { - fn predict(ref self: LinearRegressor, X: Tensor) -> Tensor { - let n: usize = self.coefficients.len() / self.target; + fn predict(regressor: LinearRegressor, X: Tensor) -> Tensor { + let n: usize = regressor.coefficients.len() / regressor.target; let mut shape = ArrayTrait::::new(); - shape.append(self.target); + shape.append(regressor.target); shape.append(n); - let mut coefficients = TensorTrait::new(shape.span(), self.coefficients); + let mut coefficients = TensorTrait::new(shape.span(), regressor.coefficients); let coefficients = coefficients.transpose(array![1, 0].span()); let mut score = X.matmul(@coefficients); - match self.intercepts { + match regressor.intercepts { Option::Some(intercepts) => { - let mut shape = ArrayTrait::::new(); + let mut shape: Array = array![]; shape.append(1); shape.append(intercepts.len()); let intercepts = TensorTrait::new(shape.span(), intercepts); @@ -211,7 +204,7 @@ impl LinearRegressorImpl< }; // Post Transform - let score = match self.post_transform { + let score = match regressor.post_transform { POST_TRANSFORM::NONE => score, // No action required POST_TRANSFORM::SOFTMAX => NNTrait::softmax(@score, 1), POST_TRANSFORM::LOGISTIC => NNTrait::sigmoid(@score), diff --git a/src/operators/ml/normalizer.cairo b/src/operators/ml/normalizer.cairo new file mode 100644 index 000000000..f6e029e90 --- /dev/null +++ b/src/operators/ml/normalizer.cairo @@ -0,0 +1 @@ +mod normalizer; diff --git a/src/operators/ml/normalizer/normalizer.cairo b/src/operators/ml/normalizer/normalizer.cairo new file mode 100644 index 000000000..91d4d9e6f --- /dev/null +++ b/src/operators/ml/normalizer/normalizer.cairo @@ -0,0 +1,268 @@ +use core::array::ArrayTrait; +use orion::numbers::NumberTrait; +use orion::operators::tensor::{TensorTrait, Tensor}; + + +#[derive(Copy, Drop)] +enum NORM { + MAX, + L1, + L2, +} + + +/// predict - Returns the normalization of the input, each row of the input is normalized independently. +trait NormalizerTrait { + /// # Normalizer::predict + /// + /// ```rust + /// fn predict(X: Tensor, norm: NORM) -> Tensor; + /// ``` + /// + /// Returns the normalized input. + /// Tree different types of normalization can be performed and are defined as follow : + /// MAX: $Y = \frac{X}{max(X)}$ + /// L1: $Y = \frac{X}{sum(X)}$ + /// L2: $Y = \frac{X}\sqrt{{sum(XΒ²)}}$ + /// For batches, that is, [N,C] tensors, normalization is done along the C axis. In other words, each row of the batch is normalized independently. + /// + /// ## Args + /// + /// * `X`(`@Tensor`) - Input 2D tensor. + /// * `norm`(`NORM`) - NORM::MAX, NORM::L1 or NORM::L2 + /// + /// + /// ## Returns + /// + /// * Tensor - output tensor + /// + /// ## Examples + /// + /// ```rust + /// use orion::numbers::FP16x16; + /// use orion::operators::tensor::{Tensor, TensorTrait, FP16x16Tensor, FP16x16TensorDiv, FP16x16TensorPartialEq}; + /// + /// use orion::operators::ml::normalizer::normalizer::{ + /// NormalizerTrait, NORM + /// }; + /// + /// + /// + /// fn normalizer_max() -> Tensor { + /// let mut shape = ArrayTrait::::new(); + /// shape.append(3); + /// shape.append(3); + /// + /// let mut data = ArrayTrait::new(); + /// data.append(FP16x16 { mag: 65536, sign: true }); + /// data.append(FP16x16 { mag: 52428, sign: true }); + /// data.append(FP16x16 { mag: 39321, sign: true }); + /// data.append(FP16x16 { mag: 26214, sign: true }); + /// data.append(FP16x16 { mag: 13107, sign: true }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 13107, sign: false }); + /// data.append(FP16x16 { mag: 26214, sign: false }); + /// data.append(FP16x16 { mag: 39321, sign: false }); + /// + /// let X = TensorTrait::new(shape.span(), data.span()); + /// + /// return NormalizerTrait::predict(X, NORM::MAX); + /// } + /// >>> [[-1. -0.8 -0.6 ] + /// [-1. -0.5 0. ] + /// [ 0.3333333 0.6666666 1. ]] + /// + /// ``` + /// + /// + fn predict(X: Tensor, norm: NORM) -> Tensor; +} + + +impl NormalizerImpl< + T, + MAG, + +Drop, + +Copy, + +NumberTrait, + +PartialOrd, + +TensorTrait, + +AddEq, + +Div>, + +Mul +> of NormalizerTrait { + fn predict(X: Tensor, norm: NORM) -> Tensor { + assert(X.shape.len() == 2, 'input should be 2D: NxC'); + + let normalized_tensor = match norm { + NORM::MAX => { norm_max(X) }, + NORM::L1 => { norm_l1(X) }, + NORM::L2 => { norm_l2(X) }, + }; + + return normalized_tensor; + } +} + + +fn norm_max< + T, + MAG, + +Drop, + +Copy, + +NumberTrait, + +TensorTrait, + +PartialOrd, + +Div>, +>( + X: Tensor +) -> Tensor { + let div_data = reduce_max_2D_axis_1(X.abs()); + + let div = TensorTrait::new( + array![*X.shape.at(0), (div_data.len() / *X.shape.at(0))].span(), div_data + ); + + let epsillon = TensorTrait::new(array![1, 1].span(), array![NumberTrait::from_felt(1)].span()); + let safe_div = TensorTrait::max(tensors: array![div, epsillon].span()); + + return X / safe_div; +} + +fn norm_l1< + T, + MAG, + +Drop, + +Copy, + +AddEq, + +NumberTrait, + +TensorTrait, + +PartialOrd, + +Div>, +>( + X: Tensor +) -> Tensor { + let div_data = reduce_sum_2D_axis_1(X.abs()); + + let div = TensorTrait::new( + array![*X.shape.at(0), (div_data.len() / *X.shape.at(0))].span(), div_data + ); + + let epsillon = TensorTrait::new(array![1, 1].span(), array![NumberTrait::from_felt(1)].span()); + let safe_div = TensorTrait::max(tensors: array![div, epsillon].span()); + + return X / safe_div; +} + +fn norm_l2< + T, + MAG, + +Drop, + +Copy, + +AddEq, + +NumberTrait, + +TensorTrait, + +PartialOrd, + +Div>, + +Mul +>( + X: Tensor +) -> Tensor { + let div_data = reduce_sum_2D_axis_1(square(X)); + let div = TensorTrait::new( + array![*X.shape.at(0), (div_data.len() / *X.shape.at(0))].span(), div_data + ); + + let epsillon = TensorTrait::new(array![1, 1].span(), array![NumberTrait::from_felt(1)].span()); + let safe_div = TensorTrait::max(tensors: array![div.sqrt(), epsillon].span()); + + return X / safe_div; +} + + +fn reduce_max_2D_axis_1< + T, MAG, +Drop, +Copy, +NumberTrait, +TensorTrait, +PartialOrd, +>( + X: Tensor +) -> Span { + let mut new_data = ArrayTrait::new(); + let N = *X.shape.at(0); + let C = *X.shape.at(1); + + let mut i = 0; + while i != N { + let max = max(SpanTrait::slice(X.data, i * C, C)); + new_data.append(max); + i += 1; + }; + return new_data.span(); +} + + +fn max, +Copy, +NumberTrait, +TensorTrait, +PartialOrd,>( + mut a: Span +) -> T { + assert(a.len() > 0, 'span cannot be empty'); + + let mut max = *a.at(0); + loop { + match a.pop_front() { + Option::Some(v) => { if *v > max { + max = *v; + }; }, + Option::None => { break max; } + }; + } +} + +fn sum, +Copy, +AddEq, +NumberTrait,>(mut a: Span) -> T { + assert(a.len() > 0, 'span cannot be empty'); + + let mut sum = NumberTrait::zero(); + loop { + match a.pop_front() { + Option::Some(v) => { sum += *v; }, + Option::None => { break sum; } + }; + } +} + +fn square< + T, + MAG, + +Drop, + +Copy, + +AddEq, + +NumberTrait, + +TensorTrait, + +PartialOrd, + +Mul +>( + mut a: Tensor +) -> Tensor { + let mut arr = ArrayTrait::new(); + loop { + match a.data.pop_front() { + Option::Some(v) => { arr.append(*v * *v); }, + Option::None => { break TensorTrait::new(a.shape, arr.span()); } + }; + } +} + +fn reduce_sum_2D_axis_1< + T, MAG, +Drop, +Copy, +AddEq, +NumberTrait, +TensorTrait, +>( + X: Tensor +) -> Span { + let mut new_data = ArrayTrait::new(); + let N = *X.shape.at(0); + let C = *X.shape.at(1); + + let mut i = 0; + while i != N { + let sum = sum(SpanTrait::slice(X.data, i * C, C)); + new_data.append(sum); + i += 1; + }; + return new_data.span(); +} diff --git a/src/operators/ml/svm/core.cairo b/src/operators/ml/svm/core.cairo index 156fea8ee..365cb0c1b 100644 --- a/src/operators/ml/svm/core.cairo +++ b/src/operators/ml/svm/core.cairo @@ -1,13 +1,8 @@ -use core::traits::TryInto; -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::traits::Into; use orion::numbers::NumberTrait; +use orion::numbers::{FP16x16, FP16x16Impl, FP32x32, FP32x32Impl, FixedTrait}; use orion::operators::tensor::{ TensorTrait, Tensor, I8Tensor, I32Tensor, U32Tensor, FP16x16Tensor, BoolTensor }; -use orion::numbers::{FP16x16, FP16x16Impl, FP32x32, FP32x32Impl, FixedTrait}; -use core::debug::PrintTrait; use orion::utils::get_row; #[derive(Copy, Drop)] @@ -18,7 +13,6 @@ enum KERNEL_TYPE { SIGMOID, } - fn kernel_dot< T, MAG, @@ -51,7 +45,8 @@ fn kernel_dot< NumberTrait::tanh(s) }, }; - return s; + + s } @@ -62,15 +57,12 @@ fn sv_dot< ) -> T { let mut i = 0; let mut sum = NumberTrait::zero(); - loop { - if i == pA.len() { - break; - } + while i != pA.len() { sum = sum + *pA.at(i) * *pB.at(i); i += 1; }; - return sum; + sum } fn squared_diff< @@ -89,12 +81,10 @@ fn squared_diff< ) -> T { let mut i = 0; let mut sum = NumberTrait::zero(); - loop { - if i == pA.len() { - break; - } + while i != pA.len() { sum = sum + (*pA.at(i) - *pB.at(i)).pow(NumberTrait::one() + NumberTrait::one()); i += 1; }; - return sum; + + sum } diff --git a/src/operators/ml/svm/svm_classifier.cairo b/src/operators/ml/svm/svm_classifier.cairo index fcaee16e3..8dacddfc0 100644 --- a/src/operators/ml/svm/svm_classifier.cairo +++ b/src/operators/ml/svm/svm_classifier.cairo @@ -14,6 +14,7 @@ use orion::operators::nn::{NNTrait, FP16x16NN, FP64x64NN}; use orion::utils::get_row; use orion::operators::ml::svm::core::{kernel_dot, KERNEL_TYPE}; +use orion::operators::ml::POST_TRANSFORM; #[derive(Copy, Drop, Destruct)] @@ -30,17 +31,6 @@ struct SVMClassifier { vectors_per_class: Option>, } - -#[derive(Copy, Drop)] -enum POST_TRANSFORM { - NONE, - SOFTMAX, - LOGISTIC, - SOFTMAXZERO, - PROBIT, -} - - #[derive(Copy, Drop)] enum MODE { SVM_LINEAR, @@ -250,7 +240,6 @@ trait SVMClassifierTrait { fn predict(ref self: SVMClassifier, X: Tensor) -> (Span, Tensor); } - impl SVMClassifierImpl< T, MAG, @@ -272,19 +261,17 @@ impl SVMClassifierImpl< fn predict(ref self: SVMClassifier, X: Tensor) -> (Span, Tensor) { let mut vector_count_ = 0; let class_count_ = max(self.classlabels.len(), 1); - let mut starting_vector_ = ArrayTrait::new(); + let mut starting_vector_: Array = array![]; let (vectors_per_class_, starting_vector_) = match self.vectors_per_class { Option::Some(vectors_per_class) => { let mut i = 0; - loop { - if i == vectors_per_class.len() { - break; - } + while i != vectors_per_class.len() { starting_vector_.append(vector_count_); vector_count_ += *vectors_per_class.at(i); i += 1; }; + (vectors_per_class, starting_vector_.span()) }, Option::None => { (array![].span(), array![].span()) }, @@ -320,22 +307,17 @@ impl SVMClassifierImpl< // SVM let (res, votes) = match mode { MODE::SVM_LINEAR => { - let mut res = ArrayTrait::new(); + let mut res: Array = array![]; let mut n = 0; - loop { - if n == *X.shape.at(0) { - break; - } + while n != *X.shape.at(0) { let mut x_n = get_row(@X, n); let scores = run_linear(ref self, x_n, coefs, class_count_, kernel_type_); let mut i = 0; - loop { - if i == scores.len() { - break; - } + while i != scores.len() { res.append(*scores.at(i)); i += 1; }; + n += 1; }; @@ -345,13 +327,10 @@ impl SVMClassifierImpl< ) }, MODE::SVM_SVC => { - let mut res = ArrayTrait::new(); - let mut votes = ArrayTrait::new(); + let mut res: Array = array![]; + let mut votes: Array = array![]; let mut n = 0; - loop { - if n == *X.shape.at(0) { - break; - } + while n != *X.shape.at(0) { let mut x_n = get_row(@X, n); let (scores, mut vote) = run_svm( ref self, @@ -365,21 +344,17 @@ impl SVMClassifierImpl< vectors_per_class_ ); let mut i = 0; - loop { - if i == scores.len() { - break; - } + while i != scores.len() { res.append(*scores.at(i)); i += 1; }; + let mut i = 0; - loop { - if i == vote.len() { - break; - } + while i != vote.len() { votes.append(vote.at(i)); i += 1; }; + n += 1; }; @@ -400,20 +375,14 @@ impl SVMClassifierImpl< MODE::SVM_LINEAR => { (res, false) }, MODE::SVM_SVC => { let (scores, has_proba) = if self.prob_a.len() > 0 { - let mut scores = ArrayTrait::new(); + let mut scores: Array = array![]; let mut n = 0; - loop { - if n == *res.shape.at(0) { - break; - } + while n != *res.shape.at(0) { let res_n = get_row(@res, n); let mut s = probablities(ref self, res_n, class_count_); let mut i = 0; - loop { - if i == s.len() { - break; - } + while i != s.len() { scores.append(s.at(i)); i += 1; }; @@ -430,19 +399,17 @@ impl SVMClassifierImpl< } else { (res, false) }; + (scores, has_proba) }, }; // Finalization - let mut labels = ArrayTrait::new(); - let mut final_scores = ArrayTrait::new(); + let mut labels: Array = array![]; + let mut final_scores: Array = array![]; let mut n = 0; - loop { - if n == *scores.shape.at(0) { - break; - } + while n != *scores.shape.at(0) { let mut scores_n = get_row(@scores, n); match votes { Option::Some(votes) => { @@ -455,14 +422,13 @@ impl SVMClassifierImpl< has_proba, self.classlabels ); + let mut i = 0; - loop { - if i == new_scores.data.len() { - break; - } + while i != new_scores.data.len() { final_scores.append(*new_scores.data.at(i)); i += 1; }; + labels.append(label); }, Option::None => { @@ -474,32 +440,31 @@ impl SVMClassifierImpl< has_proba, self.classlabels ); + let mut i = 0; - loop { - if i == new_scores.data.len() { - break; - } + while i != new_scores.data.len() { final_scores.append(*new_scores.data.at(i)); i += 1; }; + labels.append(label); }, } + n += 1; }; + let labels = labels.span(); // Labels if self.classlabels.len() > 0 { - let mut class_labels = ArrayTrait::new(); + let mut class_labels: Array = array![]; let mut i = 0; - loop { - if i == labels.len() { - break; - } + while i != labels.len() { class_labels.append(*self.classlabels.at(*labels.at(i))); i += 1; }; + return ( class_labels.span(), TensorTrait::new( @@ -508,17 +473,17 @@ impl SVMClassifierImpl< ) ); } - return ( + + ( labels, TensorTrait::new( array![*X.shape.at(0), final_scores.len() / *X.shape.at(0)].span(), final_scores.span() ) - ); + ) } } - fn run_svm< T, MAG, @@ -544,13 +509,10 @@ fn run_svm< vectors_per_class_: Span ) -> (Array, NullableVec) { let mut evals = 0; - let mut kernels = ArrayTrait::new(); + let mut kernels: Array = array![]; let mut j = 0; - loop { - if j == vector_count_ { - break; - } + while j != vector_count_ { let sv_j = get_row(@sv, j); kernels.append(kernel_dot(self.kernel_params, X, sv_j, kernel)); j += 1; @@ -558,25 +520,17 @@ fn run_svm< let kernels = kernels.span(); - let mut scores = ArrayTrait::new(); - + let mut scores: Array = array![]; let mut votes = VecTrait::new(); VecTrait::set(ref votes, class_count_ - 1, NumberTrait::zero()); let mut i = 0; - loop { - if i == class_count_ { - break; - } - + while i != class_count_ { let si_i = *starting_vector_.at(i); let class_i_sc = *vectors_per_class_.at(i); let mut j = i + 1; - loop { - if j == class_count_ { - break; - } + while j != class_count_ { let si_j = *starting_vector_.at(j); let class_j_sc = *vectors_per_class_.at(j); @@ -606,12 +560,15 @@ fn run_svm< } else { VecTrait::set(ref votes, j, VecTrait::at(ref votes, j) + NumberTrait::one()); } + evals += 1; j += 1; }; + i += 1; }; - return (scores, votes); + + (scores, votes) } fn run_linear< @@ -633,14 +590,10 @@ fn run_linear< class_count_: usize, kernel: KERNEL_TYPE ) -> Array { - let mut scores = ArrayTrait::new(); + let mut scores: Array = array![]; let mut j = 0; - loop { - if j == class_count_ { - break; - } - + while j != class_count_ { let coefs_j = get_row(@coefs, j); let d = kernel_dot(self.kernel_params, X, coefs_j, kernel); @@ -650,9 +603,9 @@ fn run_linear< scores.append(score); j += 1; }; - return scores; -} + scores +} fn compute_final_scores< T, @@ -678,7 +631,6 @@ fn compute_final_scores< has_proba: bool, classlabels: Span ) -> (usize, Tensor) { - let (max_class, max_weight) = if votes.len() > 0 { let max_class = argmax_span(votes); let max_weight = *votes.at(max_class); @@ -708,7 +660,7 @@ fn compute_final_scores< write_additional_scores ); - return (label, new_scores); + (label, new_scores) } fn write_scores< @@ -725,7 +677,6 @@ fn write_scores< >( n_classes: usize, scores: Tensor, post_transform: POST_TRANSFORM, add_second_class: usize ) -> Tensor { - let new_scores = if n_classes >= 2 { let new_scores = match post_transform { POST_TRANSFORM::NONE => scores, @@ -750,6 +701,7 @@ fn write_scores< } else { TensorTrait::new(array![1].span(), array![*scores.data.at(0)].span()) }; + scores }, POST_TRANSFORM::SOFTMAX => { @@ -769,6 +721,7 @@ fn write_scores< } else { TensorTrait::new(array![1].span(), array![*scores.data.at(0)].span()) }; + scores }, POST_TRANSFORM::LOGISTIC => { @@ -787,6 +740,7 @@ fn write_scores< } else { TensorTrait::new(array![1].span(), array![*scores.data.at(0)].span()) }; + scores }, POST_TRANSFORM::SOFTMAXZERO => { @@ -806,13 +760,15 @@ fn write_scores< } else { TensorTrait::new(array![1].span(), array![*scores.data.at(0)].span()) }; + scores }, POST_TRANSFORM::PROBIT => core::panic_with_felt252('Probit not applicable here.'), }; new_scores }; - return new_scores; + + new_scores } fn set_score_svm< @@ -835,29 +791,29 @@ fn set_score_svm< return (*classlabels.at(1), write_additional_scores); }; }; + return (*classlabels.at(maxclass), write_additional_scores); } if max_weight >= NumberTrait::zero() { return (posclass, write_additional_scores); }; - return (negclass, write_additional_scores); + + (negclass, write_additional_scores) } fn argmax_span, +Copy, +PartialOrd,>(span: Span) -> usize { let mut max = 0; let mut i = 0; - loop { - if i == span.len() { - break; - } + while i != span.len() { if *span.at(i) > *span.at(max) { max = i; } + i += 1; }; - return max; -} + max +} fn probablities< T, @@ -880,15 +836,9 @@ fn probablities< let mut probsp2: MutMatrix = MutMatrixImpl::new(class_count_, class_count_); let mut index = 0; let mut i = 0; - loop { - if i == class_count_ { - break; - } + while i != class_count_ { let mut j = i + 1; - loop { - if j == class_count_ { - break; - } + while j != class_count_ { let val1 = sigmoid_probability( *scores.at(index), *self.prob_a.at(index), *self.prob_b.at(index) ); @@ -896,15 +846,18 @@ fn probablities< let mut val2 = NumberTrait::min( val1, NumberTrait::one() ); // ONNX : min(val2, (1 - 1.0e-7)) + probsp2.set(i, j, val2); probsp2.set(j, i, NumberTrait::one() - val2); j += 1; index += 1; }; + i += 1; }; - return multiclass_probability(class_count_, ref probsp2); + + multiclass_probability(class_count_, ref probsp2) } fn multiclass_probability< @@ -937,85 +890,64 @@ fn multiclass_probability< let eps = (NumberTrait::half() / NumberTrait::new_unscaled(a.into(), false)) / k_fp; let mut t = 0; - loop { - if t == k { - break; - } + while t != k { VecTrait::set(ref P, t, NumberTrait::one() / k_fp); let mut i = 0; let mut acc1 = NumberTrait::zero(); - loop { - if i == t { - break; - } + while i != t { let r_i = MutMatrixImpl::at(ref R, i, t); acc1 += r_i * r_i; i += 1; }; + MutMatrixImpl::set(ref Q, t, t, acc1); let mut i = 0; - loop { - if i == t { - break; - } + while i != t { MutMatrixImpl::set(ref Q, t, i, MutMatrixImpl::at(ref Q, i, t)); i += 1; }; let mut i = t + 1; let mut acc2 = NumberTrait::zero(); - loop { - if i == k { - break; - } + while i != k { let r_i = MutMatrixImpl::at(ref R, i, t); acc2 += r_i * r_i; i += 1; }; + MutMatrixImpl::set(ref Q, t, t, acc1 + acc2); let mut i = t + 1; let mut acc = NumberTrait::zero(); - loop { - if i == k { - break; - } + while i != k { acc += -MutMatrixImpl::at(ref R, i, t) * MutMatrixImpl::at(ref R, t, i); i += 1; }; let mut i = t + 1; - loop { - if i == k { - break; - } + while i != k { MutMatrixImpl::set(ref Q, t, i, acc); i += 1; }; + t += 1; }; let mut i = 0; - loop { - if i == max_iter { - break; - } - + while i != max_iter { let mut Qp = MutMatrixImpl::matrix_vector_product(ref Q, ref P); let mut pQp = dot(ref P, ref Qp); let mut max_error = NumberTrait::zero(); let mut t = 0; - loop { - if t == k { - break; - } + while t != k { let error = NumberTrait::abs(Qp.at(t) - pQp); if error > max_error { max_error = error; } + t += 1; }; @@ -1024,11 +956,7 @@ fn multiclass_probability< } let mut t = 0; - loop { - if t == k { - break; - } - + while t != k { let diff = (-VecTrait::at(ref Qp, t) + pQp) / MutMatrixImpl::at(ref Q, t, t); VecTrait::set(ref P, t, VecTrait::at(ref P, t) + diff); @@ -1045,9 +973,11 @@ fn multiclass_probability< t += 1; }; + i += 1; }; - return P; + + P } /// Computation of the matrix Qb in the multiclass_probability computation @@ -1071,10 +1001,7 @@ fn Qp_computation< let m = Qp.len; let mut i = 0_usize; - loop { - if i == m { - break (); - } + while i != m { let elem = (VecTrait::at(ref Qp, i) + diff * MutMatrixImpl::at(ref Q, t, i)) / (NumberTrait::one() + diff); @@ -1083,7 +1010,6 @@ fn Qp_computation< }; } - fn sigmoid_probability< T, MAG, @@ -1110,14 +1036,14 @@ fn sigmoid_probability< v }; - return NumberTrait::one() - v; + NumberTrait::one() - v } - fn max(a: usize, b: usize) -> usize { if a > b { return a; }; + b } @@ -1125,18 +1051,16 @@ fn min, +Drop, +PartialOrd,>(a: Span) -> T { let mut min = *a.at(0); let mut i = 0; - loop { - if i == a.len() { - break; - } + while i != a.len() { if min > *a.at(i) { min = *a.at(i); } + i += 1; }; - return min; -} + min +} fn dot_start_end< T, MAG, +Drop, +Copy, +NumberTrait, +Add, +TensorTrait, +AddEq, +Mul, @@ -1146,19 +1070,15 @@ fn dot_start_end< let mut sum = NumberTrait::zero(); let mut index_a = a_start; let mut index_b = b_start; - loop { - if index_a == a_end || index_b == b_end { - break; - } + while index_a != a_end && index_b != b_end { sum = sum + *pA.at(index_a) * *pB.at(index_b); index_a += 1; index_b += 1; }; - return sum; + sum } - fn sv_dot< T, MAG, +Drop, +Copy, +NumberTrait, +Add, +TensorTrait, +AddEq, +Mul, >( @@ -1166,15 +1086,12 @@ fn sv_dot< ) -> T { let mut i = 0; let mut sum = NumberTrait::zero(); - loop { - if i == pA.len() { - break; - } + while i != pA.len() { sum = sum + *pA.at(i) * *pB.at(i); i += 1; }; - return sum; + sum } fn squared_diff< @@ -1193,14 +1110,12 @@ fn squared_diff< ) -> T { let mut i = 0; let mut sum = NumberTrait::zero(); - loop { - if i == pA.len() { - break; - } + while i != pA.len() { sum = sum + (*pA.at(i) - *pB.at(i)).pow(NumberTrait::one() + NumberTrait::one()); i += 1; }; - return sum; + + sum } fn dot, +Copy, +NumberTrait, +Mul, +AddEq, +Add, +Div>( @@ -1210,14 +1125,12 @@ fn dot, +Copy, +NumberTrait, +Mul, +AddEq, +Ad let n = self.len; let mut sum: T = NumberTrait::zero(); let mut i = 0_usize; - loop { - if i == n { - break (); - } + while i != n { sum += self.at(i) * vec.at(i); i += 1; }; - return sum; + + sum } fn div_element_wise, +Add, +Div, +NumberTrait, +Drop, +Copy>( @@ -1226,10 +1139,7 @@ fn div_element_wise, +Add, +Div, +NumberTrait, +Dr let m = self.len; let mut i = 0_usize; - loop { - if i == m { - break (); - } + while i != m { VecTrait::set(ref self, i, VecTrait::at(ref self, i) / elem); i += 1; }; diff --git a/src/operators/ml/svm/svm_regressor.cairo b/src/operators/ml/svm/svm_regressor.cairo index be76931e9..d69e40d80 100644 --- a/src/operators/ml/svm/svm_regressor.cairo +++ b/src/operators/ml/svm/svm_regressor.cairo @@ -10,6 +10,7 @@ use orion::numbers::{FP16x16, FP16x16Impl, FP32x32, FP32x32Impl, FixedTrait}; use core::debug::PrintTrait; use orion::operators::nn::{NNTrait, FP16x16NN}; use orion::utils::get_row; +use orion::operators::ml::POST_TRANSFORM; use orion::operators::ml::svm::core::{kernel_dot, KERNEL_TYPE}; @@ -25,15 +26,6 @@ struct SVMRegressor { support_vectors: Span, } -#[derive(Copy, Drop)] -enum POST_TRANSFORM { - NONE, - SOFTMAX, - LOGISTIC, - SOFTMAXZERO, - PROBIT, -} - #[derive(Copy, Drop)] enum MODE { SVM_LINEAR, @@ -195,12 +187,9 @@ impl SVMRegressorImpl< (mode_, kernel_type_, sv) }; - let mut z = ArrayTrait::new(); + let mut z: Array = array![]; let mut n = 0; - loop { - if n == *X.shape.at(0) { - break; - } + while n != *X.shape.at(0) { let mut s = NumberTrait::zero(); match mode_ { MODE::SVM_LINEAR => { @@ -211,15 +200,13 @@ impl SVMRegressorImpl< MODE::SVM_SVC => { let mut x_n = get_row(@X, n); let mut j = 0; - loop { - if j == self.n_supports { - break; - } + while j != self.n_supports { let mut sv_j = get_row(@sv, j); let d = kernel_dot(self.kernel_params, x_n, sv_j, kernel_type_); s += *self.coefficients.at(j) * d; j += 1; }; + s += *self.rho.at(0); }, } @@ -233,11 +220,13 @@ impl SVMRegressorImpl< } else { z.append(s); }; + n += 1; }; // Post Transform let mut score = TensorTrait::new(array![*X.shape.at(0)].span(), z.span()); + score = match self.post_transform { POST_TRANSFORM::NONE => score, POST_TRANSFORM::SOFTMAX => NNTrait::softmax(@score, 1), @@ -246,7 +235,7 @@ impl SVMRegressorImpl< POST_TRANSFORM::PROBIT => core::panic_with_felt252('Probit not supported yet'), }; - return score; + score } } diff --git a/src/operators/ml/tree_ensemble/core.cairo b/src/operators/ml/tree_ensemble/core.cairo index 08b4b6ef6..09b0f3937 100644 --- a/src/operators/ml/tree_ensemble/core.cairo +++ b/src/operators/ml/tree_ensemble/core.cairo @@ -1,12 +1,10 @@ -use core::array::ArrayTrait; use alexandria_data_structures::array_ext::SpanTraitExt; -use orion::numbers::NumberTrait; -use orion::operators::tensor::{Tensor, TensorTrait, U32Tensor}; -use orion::utils::get_row; - use alexandria_merkle_tree::merkle_tree::{pedersen::PedersenHasherImpl}; use alexandria_data_structures::array_ext::ArrayTraitExt; +use orion::numbers::NumberTrait; +use orion::operators::tensor::{Tensor, TensorTrait, U32Tensor}; +use orion::utils::get_row; #[derive(Copy, Drop, Destruct)] struct TreeEnsembleAttributes { @@ -39,7 +37,6 @@ enum NODE_MODES { LEAF } - #[generate_trait] impl TreeEnsembleImpl< T, MAG, +Drop, +Copy, +NumberTrait, +PartialOrd, +PartialEq @@ -91,18 +88,15 @@ impl TreeEnsembleImpl< index } + fn leave_index_tree(ref self: TreeEnsemble, x: Tensor) -> Tensor { - let mut outputs = ArrayTrait::new(); + let mut outputs: Array = array![]; let mut i: usize = 0; let breaker: usize = *x.shape[0]; - loop { - if i == breaker { - break; - } - + while i != breaker { let row_data: Span = get_row(@x, i); - let mut outs = ArrayTrait::new(); + let mut outs: Array = array![]; let mut tree_ids = self.tree_ids; loop { match tree_ids.pop_front() { @@ -115,6 +109,7 @@ impl TreeEnsembleImpl< Option::None => { break; } }; }; + outputs.append_all(ref outs); i += 1; }; diff --git a/src/operators/ml/tree_ensemble/tree_ensemble_classifier.cairo b/src/operators/ml/tree_ensemble/tree_ensemble_classifier.cairo index ab073a5b5..296ae8c7b 100644 --- a/src/operators/ml/tree_ensemble/tree_ensemble_classifier.cairo +++ b/src/operators/ml/tree_ensemble/tree_ensemble_classifier.cairo @@ -20,6 +20,7 @@ use alexandria_data_structures::array_ext::{SpanTraitExt}; use orion::operators::matrix::{MutMatrix, MutMatrixImpl}; use orion::operators::vec::{VecTrait, NullableVec, NullableVecImpl}; +use orion::operators::ml::POST_TRANSFORM; use core::debug::PrintTrait; @@ -35,15 +36,6 @@ struct TreeEnsembleClassifier { post_transform: POST_TRANSFORM, } -#[derive(Copy, Drop)] -enum POST_TRANSFORM { - NONE, - SOFTMAX, - LOGISTIC, - SOFTMAXZERO, - PROBIT, -} - /// Trait /// /// predict - Returns the top class for each of N inputs. @@ -51,7 +43,7 @@ trait TreeEnsembleClassifierTrait { /// # TreeEnsembleClassifier::predict /// /// ```rust - /// fn predict(ref self: TreeEnsembleClassifier, X: Tensor) -> (Span, MutMatrix::); + /// fn predict(classifier: TreeEnsembleClassifier, X: Tensor) -> (Span, MutMatrix::); /// ``` /// /// Tree Ensemble classifier. Returns the top class for each of N inputs. @@ -235,7 +227,7 @@ trait TreeEnsembleClassifierTrait { /// fn test_tree_ensemble_classifier_multi_pt_softmax() -> (Span, MutMatrix::) { /// let (mut classifier, X) = tree_ensemble_classifier_helper(POST_TRANSFORM::SOFTMAX); /// - /// let (labels, scores) = TreeEnsembleClassifierTrait::predict(ref classifier, X); + /// let (labels, scores) = TreeEnsembleClassifierTrait::predict(classifier, X); /// (labels, scores) /// } /// @@ -248,7 +240,9 @@ trait TreeEnsembleClassifierTrait { /// ]) /// ``` /// - fn predict(ref self: TreeEnsembleClassifier, X: Tensor) -> (Span, MutMatrix::); + fn predict( + classifier: TreeEnsembleClassifier, X: Tensor + ) -> (Span, MutMatrix::); } impl TreeEnsembleClassifierImpl< @@ -267,14 +261,17 @@ impl TreeEnsembleClassifierImpl< +Div, +Mul > of TreeEnsembleClassifierTrait { - fn predict(ref self: TreeEnsembleClassifier, X: Tensor) -> (Span, MutMatrix::) { - let leaves_index = self.ensemble.leave_index_tree(X); - let n_classes = self.classlabels.len(); + fn predict( + classifier: TreeEnsembleClassifier, X: Tensor + ) -> (Span, MutMatrix::) { + let mut classifier = classifier; + let leaves_index = classifier.ensemble.leave_index_tree(X); + let n_classes = classifier.classlabels.len(); let mut res: MutMatrix = MutMatrixImpl::new(*leaves_index.shape.at(0), n_classes); // Set base values - if self.base_values.is_some() { - let mut base_values = self.base_values.unwrap(); + if classifier.base_values.is_some() { + let mut base_values = classifier.base_values.unwrap(); let mut row: usize = 0; loop { if row == res.rows { @@ -320,12 +317,12 @@ impl TreeEnsembleClassifierImpl< let mut class_index: Felt252Dict>> = Default::default(); let mut i: usize = 0; loop { - if i == self.class_treeids.len() { + if i == classifier.class_treeids.len() { break; } - let tid = *self.class_treeids[i]; - let nid = *self.class_nodeids[i]; + let tid = *classifier.class_treeids[i]; + let nid = *classifier.class_nodeids[i]; let mut key = PedersenHasherImpl::new(); let key: felt252 = key.hash(tid.into(), nid.into()); @@ -356,8 +353,8 @@ impl TreeEnsembleClassifierImpl< let mut key = PedersenHasherImpl::new(); let key: felt252 = key .hash( - (*self.ensemble.atts.nodes_treeids[*index]).into(), - (*self.ensemble.atts.nodes_nodeids[*index]).into() + (*classifier.ensemble.atts.nodes_treeids[*index]).into(), + (*classifier.ensemble.atts.nodes_nodeids[*index]).into() ); t_index.append(class_index.get(key).deref()); }, @@ -372,21 +369,21 @@ impl TreeEnsembleClassifierImpl< loop { match its.pop_front() { Option::Some(it) => { - match res.get(i, *self.class_ids[*it]) { + match res.get(i, *classifier.class_ids[*it]) { Option::Some(val) => { res .set( i, - *self.class_ids[*it], - val + *self.class_weights[*it] + *classifier.class_ids[*it], + val + *classifier.class_weights[*it] ); }, Option::None => { res .set( i, - *self.class_ids[*it], - *self.class_weights[*it] + *classifier.class_ids[*it], + *classifier.class_weights[*it] ); }, }; @@ -404,7 +401,7 @@ impl TreeEnsembleClassifierImpl< // Binary class let mut binary = false; let mut i: usize = 0; - let mut class_ids = self.class_ids; + let mut class_ids = classifier.class_ids; let mut class_id: usize = 0; // Get first class_id in class_ids match class_ids.pop_front() { @@ -412,7 +409,7 @@ impl TreeEnsembleClassifierImpl< Option::None => { class_id = 0; } }; loop { - if i == self.class_ids.len() { + if i == classifier.class_ids.len() { break; } match class_ids.pop_front() { @@ -444,7 +441,7 @@ impl TreeEnsembleClassifierImpl< }; i += 1; }; - match self.post_transform { + match classifier.post_transform { POST_TRANSFORM::NONE => { let mut i: usize = 0; loop { @@ -526,7 +523,7 @@ impl TreeEnsembleClassifierImpl< } // Post Transform - let mut new_scores = match self.post_transform { + let mut new_scores = match classifier.post_transform { POST_TRANSFORM::NONE => res, // No action required POST_TRANSFORM::SOFTMAX => res.softmax(1), POST_TRANSFORM::LOGISTIC => res.sigmoid(), @@ -540,7 +537,7 @@ impl TreeEnsembleClassifierImpl< let mut labels_list = ArrayTrait::new(); loop { match labels.pop_front() { - Option::Some(i) => { labels_list.append(*self.classlabels[*i]); }, + Option::Some(i) => { labels_list.append(*classifier.classlabels[*i]); }, Option::None => { break; } }; }; @@ -548,4 +545,3 @@ impl TreeEnsembleClassifierImpl< return (labels_list.span(), new_scores); } } - diff --git a/src/operators/ml/tree_ensemble/tree_ensemble_regressor.cairo b/src/operators/ml/tree_ensemble/tree_ensemble_regressor.cairo index 215ad2a96..3f9998e96 100644 --- a/src/operators/ml/tree_ensemble/tree_ensemble_regressor.cairo +++ b/src/operators/ml/tree_ensemble/tree_ensemble_regressor.cairo @@ -21,6 +21,7 @@ use alexandria_data_structures::array_ext::{SpanTraitExt}; use orion::operators::matrix::{MutMatrix, MutMatrixImpl}; use orion::operators::vec::{VecTrait, NullableVec, NullableVecImpl}; +use orion::operators::ml::POST_TRANSFORM; use core::debug::PrintTrait; @@ -37,16 +38,6 @@ struct TreeEnsembleRegressor { post_transform: POST_TRANSFORM, } - -#[derive(Copy, Drop)] -enum POST_TRANSFORM { - NONE, - SOFTMAX, - LOGISTIC, - SOFTMAXZERO, - PROBIT, -} - #[derive(Copy, Drop)] enum AGGREGATE_FUNCTION { SUM, @@ -62,7 +53,7 @@ trait TreeEnsembleRegressorTrait { /// # TreeEnsembleRegressor::predict /// /// ```rust - /// fn predict(ref self: TreeEnsembleRegressor, X: Tensor) -> (Span, MutMatrix::); + /// fn predict(regressor: TreeEnsembleRegressor, X: Tensor) -> (Span, MutMatrix::); /// ``` /// /// Tree Ensemble regressor. Returns the regressed values for each input in N. @@ -221,7 +212,7 @@ trait TreeEnsembleRegressorTrait { /// /// fn test_tree_ensemble_regressor_SUM() -> MutMatrix:: { /// let (mut regressor, X) = tree_ensemble_regressor_helper(AGGREGATE_FUNCTION::SUM); - /// let mut res = TreeEnsembleRegressorTrait::predict(ref regressor, X); + /// let mut res = TreeEnsembleRegressorTrait::predict(regressor, X); /// res /// } /// >>> @@ -230,7 +221,7 @@ trait TreeEnsembleRegressorTrait { /// /// ``` /// - fn predict(ref self: TreeEnsembleRegressor, X: Tensor) -> MutMatrix::; + fn predict(regressor: TreeEnsembleRegressor, X: Tensor) -> MutMatrix::; } impl TreeEnsembleRegressorImpl< @@ -249,22 +240,23 @@ impl TreeEnsembleRegressorImpl< +Div, +Mul, > of TreeEnsembleRegressorTrait { - fn predict(ref self: TreeEnsembleRegressor, X: Tensor) -> MutMatrix:: { - let leaves_index = self.ensemble.leave_index_tree(X); - let n_targets = self.n_targets; + fn predict(regressor: TreeEnsembleRegressor, X: Tensor) -> MutMatrix:: { + let mut regressor = regressor; + let leaves_index = regressor.ensemble.leave_index_tree(X); + let n_targets = regressor.n_targets; let mut res: MutMatrix = MutMatrixImpl::new(*leaves_index.shape.at(0), n_targets); - let n_trees = self.ensemble.tree_ids.len(); + let n_trees = regressor.ensemble.tree_ids.len(); let mut target_index: Felt252Dict>> = Default::default(); let mut i: usize = 0; loop { - if i == self.target_treeids.len() { + if i == regressor.target_treeids.len() { break; } - let tid = *self.target_treeids[i]; - let nid = *self.target_nodeids[i]; + let tid = *regressor.target_treeids[i]; + let nid = *regressor.target_nodeids[i]; let mut key = PedersenHasherImpl::new(); let key: felt252 = key.hash(tid.into(), nid.into()); @@ -296,8 +288,8 @@ impl TreeEnsembleRegressorImpl< let mut key = PedersenHasherImpl::new(); let key: felt252 = key .hash( - (*self.ensemble.atts.nodes_treeids[*index]).into(), - (*self.ensemble.atts.nodes_nodeids[*index]).into() + (*regressor.ensemble.atts.nodes_treeids[*index]).into(), + (*regressor.ensemble.atts.nodes_nodeids[*index]).into() ); t_index.append(target_index.get(key).deref()); }, @@ -306,20 +298,26 @@ impl TreeEnsembleRegressorImpl< }; let mut t_index = t_index.span(); - match self.aggregate_function { - AGGREGATE_FUNCTION::SUM => { compute_res_SUM(ref self, ref res, ref t_index, i); }, + match regressor.aggregate_function { + AGGREGATE_FUNCTION::SUM => { + compute_res_SUM(ref regressor, ref res, ref t_index, i); + }, AGGREGATE_FUNCTION::AVERAGE => { - compute_res_AVERAGE(ref self, ref res, ref t_index, n_trees, i); + compute_res_AVERAGE(ref regressor, ref res, ref t_index, n_trees, i); + }, + AGGREGATE_FUNCTION::MIN => { + compute_res_MIN(ref regressor, ref res, ref t_index, i); + }, + AGGREGATE_FUNCTION::MAX => { + compute_res_MAX(ref regressor, ref res, ref t_index, i); }, - AGGREGATE_FUNCTION::MIN => { compute_res_MIN(ref self, ref res, ref t_index, i); }, - AGGREGATE_FUNCTION::MAX => { compute_res_MAX(ref self, ref res, ref t_index, i); }, }; i += 1; }; // Convention is to add base_values after aggregate function - if self.base_values.is_some() { - let mut base_values = self.base_values.unwrap(); + if regressor.base_values.is_some() { + let mut base_values = regressor.base_values.unwrap(); let mut row: usize = 0; loop { if row == res.rows { @@ -346,7 +344,7 @@ impl TreeEnsembleRegressorImpl< } // Post Transform - let mut new_scores = match self.post_transform { + let mut new_scores = match regressor.post_transform { POST_TRANSFORM::NONE => res, // No action required POST_TRANSFORM::SOFTMAX => res.softmax(1), POST_TRANSFORM::LOGISTIC => res.sigmoid(), diff --git a/src/operators/nn/core.cairo b/src/operators/nn/core.cairo index 93f9242c0..35d318b28 100644 --- a/src/operators/nn/core.cairo +++ b/src/operators/nn/core.cairo @@ -16,8 +16,8 @@ use orion::operators::tensor::core::Tensor; /// gemm - Performs General Matrix multiplication. /// grid_sample - Computes the grid sample of the input tensor and input grid. /// col2im - Rearranges column blocks back into a multidimensional image -/// conv_transpose - Performs the convolution transpose of the input data tensor and weigth tensor. -/// conv - Performs the convolution of the input data tensor and weigth tensor. +/// conv_transpose - Performs the convolution transpose of the input data tensor and weight tensor. +/// conv - Performs the convolution of the input data tensor and weight tensor. trait NNTrait { /// # NNTrait::relu /// @@ -834,7 +834,7 @@ trait NNTrait { /// ) -> Tensor /// ``` /// - /// The convolution operator consumes an input tensor and a filter (input weigth tensor), and computes the output. + /// The convolution operator consumes an input tensor and a filter (input weight tensor), and computes the output. /// /// ## Args /// @@ -971,7 +971,7 @@ trait NNTrait { /// ) -> Tensor /// ``` /// - /// The convolution transpose operator consumes an input tensor and a input weigth tensor, and computes the output. + /// The convolution transpose operator consumes an input tensor and a input weight tensor, and computes the output. /// /// ## Args /// diff --git a/src/operators/nn/functional/col2im.cairo b/src/operators/nn/functional/col2im.cairo index 1f1aa0d48..4f9cfc1a8 100644 --- a/src/operators/nn/functional/col2im.cairo +++ b/src/operators/nn/functional/col2im.cairo @@ -1,8 +1,7 @@ use orion::numbers::NumberTrait; +use orion::operators::tensor::core::{stride}; use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor,}; use orion::operators::vec::{NullableVec, NullableVecImpl}; -use orion::operators::tensor::core::{stride}; - fn col2im, +NumberTrait, +Copy, +Drop, +Add, +Mul,>( data: @Tensor, @@ -15,15 +14,13 @@ fn col2im, +NumberTrait, +Copy, +Drop, +Ad let dilations = match dilations { Option::Some(dilations) => dilations, Option::None => { - let mut dilations = ArrayTrait::new(); + let mut dilations: Array = array![]; let mut i = 0; - loop { - if i == image_shape.len() { - break; - } + while i != image_shape.len() { dilations.append(1); i += 1; }; + dilations.span() }, }; @@ -31,31 +28,27 @@ fn col2im, +NumberTrait, +Copy, +Drop, +Ad let pads = match pads { Option::Some(pads) => pads, Option::None => { - let mut pads = ArrayTrait::new(); + let mut pads: Array = array![]; let mut i = 0; - loop { - if i == image_shape.len() { - break; - } + while i != image_shape.len() { pads.append(0); pads.append(0); i += 1; }; + pads.span() }, }; let strides = match strides { Option::Some(strides) => strides, Option::None => { - let mut strides = ArrayTrait::new(); + let mut strides: Array = array![]; let mut i = 0; - loop { - if i == image_shape.len() { - break; - } + while i != image_shape.len() { strides.append(1); i += 1; }; + strides.span() }, }; @@ -65,28 +58,20 @@ fn col2im, +NumberTrait, +Copy, +Drop, +Ad let mut new_shape = array![*(*data).shape.at(0), C, bl]; let mut i = 2; - loop { - if i == (*data).shape.len() { - break; - } + while i != (*data).shape.len() { new_shape.append(*(*data).shape.at(i)); i += 1; }; + let data = data.reshape(new_shape.span()); - let mut res = ArrayTrait::new(); + let mut res: Array = array![]; let data_stride = stride(data.shape); let mut n = 0; - loop { - if n == *data.shape.at(0) { - break; - } + while n != *data.shape.at(0) { let mut c = 0; - loop { - if c == *data.shape.at(1) { - break; - } + while c != *data.shape.at(1) { let data_n_c = TensorTrait::new( SpanTrait::slice(data.shape, 2, data.shape.len() - 2), SpanTrait::slice( @@ -97,29 +82,25 @@ fn col2im, +NumberTrait, +Copy, +Drop, +Ad @data_n_c, image_shape, block_shape, dilations, pads, strides ); let mut i = 0; - loop { - if i == out.len() { - break; - } + while i != out.len() { res.append(out.at(i)); i += 1; }; + c += 1; }; + n += 1; }; let mut new_shape = array![*data.shape.at(0), *data.shape.at(1)]; let mut i = 0; - loop { - if i == image_shape.len() { - break; - } + while i != image_shape.len() { new_shape.append(*image_shape.at(i)); i += 1; }; - return TensorTrait::new(new_shape.span(), res.span()); + TensorTrait::new(new_shape.span(), res.span()) } fn get_image, +Copy>(self: @Tensor, row: usize) -> Span { @@ -145,12 +126,9 @@ fn col2im_naive_implementation< col2im_shape_check(data, image_shape, kernel_shape, dilations, pads, strides); - let mut dim_col = ArrayTrait::new(); + let mut dim_col: Array = array![]; let mut i = 0; - loop { - if i == n_dims { - break; - } + while i != n_dims { dim_col .append( (*image_shape.at(i) @@ -162,6 +140,7 @@ fn col2im_naive_implementation< i += 1; }; + let dim_col = dim_col.span(); let stride_img = stride(image_shape); @@ -172,24 +151,15 @@ fn col2im_naive_implementation< let kernel_size = prod(kernel_shape, 0); let col_size = prod(dim_col, 0); let mut c_col = 0; - loop { - if c_col == kernel_size { - break; - } + while c_col != kernel_size { let offset = get_indices(c_col, kernel_shape).span(); let mut col = 0; - loop { - if col == col_size { - break; - } + while col != col_size { let ind_col = get_indices(col, dim_col).span(); - let mut ind_im = ArrayTrait::new(); + let mut ind_im: Array = array![]; let mut i = 0; - loop { - if i == n_dims { - break; - } + while i != n_dims { if (*ind_col.at(i) * *strides.at(i) + *offset.at(i) * *dilations.at(i)) < *pads .at(i) { let neg_index = *pads.at(i) @@ -206,25 +176,26 @@ fn col2im_naive_implementation< i += 1; }; + let ind_im = ind_im.span(); if !is_out(ind_im, image_shape) { let mut index = 0; let mut i = 0; - loop { - if i == image_shape.len() { - break; - } + while i != image_shape.len() { index += *stride_img.at(i) * *ind_im.at(i); i += 1; }; + data_im.set(index, data_im.at(index) + *(*data).data.at(c_col * col_size + col)); } + col += 1; }; + c_col += 1; }; - return data_im; + data_im } fn col2im_shape_check, +Copy, +Drop,>( @@ -243,13 +214,10 @@ fn col2im_shape_check, +Copy, +Drop,>( let input_length = *(*X).shape.at(1); let n_dims = output_shape.len(); - let mut n_blocks = ArrayTrait::new(); + let mut n_blocks: Array = array![]; let mut i = 0; - loop { - if i == n_dims { - break; - } + while i != n_dims { n_blocks .append( (*output_shape.at(i) @@ -267,15 +235,11 @@ fn col2im_shape_check, +Copy, +Drop,>( assert(input_length == block_size, 'input_length != block_size'); } - fn get_indices(index: usize, shape: Span,) -> Array { let mut i = index; - let mut res = ArrayTrait::new(); + let mut res: Array = array![]; let mut k = shape.len() - 1; - loop { - if k == 0 { - break; - } + while k != 0 { let m = i % *shape.at(k); res.append(m); i -= m; @@ -283,17 +247,15 @@ fn get_indices(index: usize, shape: Span,) -> Array { k -= 1; }; - let mut new_res = ArrayTrait::new(); + let mut new_res: Array = array![]; new_res.append(i); let mut i = shape.len() - 1; - loop { - if i == 0 { - break; - } + while i != 0 { new_res.append(*res.at(i - 1)); i -= 1; }; - return new_res; + + new_res } fn is_out(ind: Span, shape: Span,) -> bool { @@ -312,7 +274,8 @@ fn is_out(ind: Span, shape: Span,) -> bool { } n += 1; }; - return is_out; + + is_out } fn prod, +Copy, +NumberTrait, +TensorTrait, +Mul,>( @@ -320,12 +283,10 @@ fn prod, +Copy, +NumberTrait, +TensorTrait, +Mul< ) -> T { let mut i = start; let mut prod = NumberTrait::one(); - loop { - if i == pA.len() { - break; - } + while i != pA.len() { prod = prod * (*pA.at(i)); i += 1; }; - return prod; + + prod } diff --git a/src/operators/nn/functional/conv.cairo b/src/operators/nn/functional/conv.cairo index 926bcb2b5..ac72c336d 100644 --- a/src/operators/nn/functional/conv.cairo +++ b/src/operators/nn/functional/conv.cairo @@ -1,14 +1,10 @@ -use core::traits::Into; -use core::traits::IndexView; -use core::array::ArrayTrait; +use core::debug::PrintTrait; + use orion::numbers::NumberTrait; use orion::numbers::{U32IntoI32, I32IntoU32, I32Div, I32Number}; use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor,}; use orion::operators::vec::{NullableVec, NullableVecImpl}; use orion::operators::tensor::core::{stride}; -use core::clone::Clone; - -use core::debug::PrintTrait; #[derive(Copy, Drop)] enum AUTO_PAD { @@ -41,66 +37,59 @@ fn conv< strides: Option>, ) -> Tensor { let nd = (*X).shape.len() - 2; - + assert((*X).shape.len() >= 3, 'X must have at least 3 dim'); + let dilations = match dilations { Option::Some(dilations) => dilations, Option::None => { - let mut dilations = ArrayTrait::new(); + let mut dilations: Array = array![]; let mut i = 2; - loop { - if i >= (*X).shape.len() { - break; - } + while i != (*X).shape.len() { dilations.append(1); i += 1; }; + dilations.span() }, }; let kernel_shape = match kernel_shape { Option::Some(kernel_shape) => kernel_shape, Option::None => { - let mut kernel_shape = ArrayTrait::new(); + let mut kernel_shape: Array = array![]; let mut i = 2; - loop { - if i >= (*W).shape.len() { - break; - } + while i != (*W).shape.len() { kernel_shape.append(*(*W).shape.at(i)); i += 1; }; + kernel_shape.span() }, }; let pads = match pads { Option::Some(pads) => pads, Option::None => { - let mut pads = ArrayTrait::new(); + let mut pads: Array = array![]; let mut i = 2; - loop { - if i >= (*X).shape.len() { - break; - } + while i != (*X).shape.len() { pads.append(0); pads.append(0); i += 1; }; + pads.span() }, }; let strides = match strides { Option::Some(strides) => strides, Option::None => { - let mut strides = ArrayTrait::new(); + let mut strides: Array = array![]; let mut i = 2; - loop { - if i >= (*X).shape.len() { - break; - } + while i != (*X).shape.len() { strides.append(1); i += 1; }; + strides.span() }, }; @@ -117,8 +106,8 @@ fn conv< if group > 1 { let sN = *(*X).shape.at(0); - let mut res_b = ArrayTrait::new(); - let mut res_cv = ArrayTrait::new(); + let mut res_b: Array = array![]; + let mut res_cv = array![]; let mut td = 0; let mg = *(*W).shape.at(0) / group; @@ -127,37 +116,27 @@ fn conv< let X_stride = stride((*X).shape); let mut gx_shape = array![1, dw]; let mut i = 2; - loop { - if i >= (*X).shape.len() { - break; - } + while i != (*X).shape.len() { gx_shape.append(*(*X).shape.at(i)); i += 1; }; + let gx_shape = gx_shape.span(); let W_stride = stride((*W).shape); let mut gw_shape = array![mg]; let mut i = 1; - loop { - if i >= (*W).shape.len() { - break; - } + while i != (*W).shape.len() { gw_shape.append(*(*W).shape.at(i)); i += 1; }; + let gw_shape = gw_shape.span(); let mut b = 0; - loop { - if b == sN { - break; - } + while b != sN { let mut g = 0; - loop { - if g == group { - break; - } + while g != group { let gx = TensorTrait::new( gx_shape, SpanTrait::slice( @@ -181,13 +160,16 @@ fn conv< Option::Some(pads), Option::Some(strides) ); + if b == 0 { td += *cv.shape.at(1); } + res_b.append(b); res_cv.append(cv); g += 1; }; + b += 1; }; @@ -199,61 +181,47 @@ fn conv< let mut cv = *res_cv.at(0); let mut i = 2; - loop { - if i == cv.shape.len() { - break; - } + while i != cv.shape.len() { final_shape.append(*cv.shape.at(i)); i += 1; }; + let final_shape = final_shape.span(); - let mut final = ArrayTrait::new(); + let mut final: Array = array![]; let mut p = 0; let mut i = 0; - loop { - if i == res_b.len() { - break; - } + while i != res_b.len() { let cv = *res_cv.at(i); let mut n = 0; - loop { - if n == cv.data.len() { - break; - } + while n != cv.data.len() { final.append(*cv.data.at(n)); n += 1; }; + p += *cv.shape.at(1); if p >= td { p = 0; } + i += 1; }; + let final = final.span(); let final = match B { Option::Some(B) => { - let mut final_b = ArrayTrait::new(); + let mut final_b: Array = array![]; let final_stride = stride(final_shape); let mut i = 0; - loop { - if i == *final_shape.at(0) { - break; - } + while i != *final_shape.at(0) { let mut j = 0; - loop { - if j == B.len() { - break; - } + while j != B.len() { let mut k = 0; - loop { - if k == *final_stride.at(1) { - break; - } + while k != *final_stride.at(1) { final_b .append( *final.at(i * *final_stride.at(0) + j * *final_stride.at(1) + k) @@ -261,10 +229,13 @@ fn conv< ); k += 1; }; + j += 1; }; + i += 1; }; + final_b.span() }, Option::None => { final }, @@ -277,37 +248,32 @@ fn conv< if *dilations.at(0) != 1 || min(dilations.clone()) != max(dilations.clone()) { // computation of the dilated kernel let nd = dilations.len(); - let mut new_kernel_shape = ArrayTrait::new(); - let mut new_shape = ArrayTrait::new(); + let mut new_kernel_shape: Array = array![]; + let mut new_shape: Array = array![]; new_shape.append_span(SpanTrait::slice((*W).shape, 0, (*W).shape.len() - nd)); let mut i = 0; - loop { - if i == dilations.len() { - break; - } + while i != dilations.len() { let d = *dilations.at(i); let di = (*W).shape.len() - nd + i; new_shape.append(*(*W).shape.at(di) + (*(*W).shape.at(di) - 1) * (d - 1)); new_kernel_shape.append(*kernel_shape.at(i) + (*kernel_shape.at(i) - 1) * (d - 1)); i += 1; }; + let new_shape = new_shape.span(); let new_w_strides = stride(new_shape); let mut new_w = NullableVecImpl::new(); new_w.set(*new_shape.at(0) * *new_w_strides.at(0) - 1, NumberTrait::zero()); - let mut indices = ArrayTrait::new(); + let mut indices = array![]; indices.append(arange(0, *new_shape.at(0), 1)); indices.append(arange(0, *new_shape.at(1), 1)); let mut i = 0; - loop { - if i == dilations.len() { - break; - } + while i != dilations.len() { let d = *dilations.at(i); let di = (*W).shape.len() - nd + i; indices.append(arange(0, *new_shape.at(di), d)); @@ -316,35 +282,28 @@ fn conv< let set_of_all_indices = cartesian(indices.span()); - let mut new_w_arr = ArrayTrait::new(); + let mut new_w_arr: Array = array![]; let mut i = 0; let mut prev = 0; - loop { - if i == (*W).data.len() { - break; - } + while i != (*W).data.len() { let nd_index = *set_of_all_indices.at(i); let mut flatten_index = 0; let mut j = 0; - loop { - if j == nd_index.len() { - break; - } + while j != nd_index.len() { flatten_index += *nd_index.at(j) * *new_w_strides.at(j); j += 1; }; if flatten_index > prev + 1 { let mut j = prev + 1; - loop { - if j == flatten_index { - break; - } + while j != flatten_index { new_w_arr.append(NumberTrait::zero()); }; + j += 1; } + new_w_arr.append(*(*W).data.at(i)); new_w.set(flatten_index, *(*W).data.at(i)); prev = flatten_index; @@ -355,13 +314,10 @@ fn conv< let pads = match auto_pad { AUTO_PAD::NOTSET => { pads }, AUTO_PAD::SAME_UPPER => { - let mut head = ArrayTrait::new(); - let mut tail = ArrayTrait::new(); + let mut head: Array = array![]; + let mut tail: Array = array![]; let mut i = 0; - loop { - if i == nd { - break; - } + while i != nd { let d = *(*X).shape.at(i); let target_size = (d + *strides.at(i) - 1) / *strides.at(i); let pad_needed = (target_size - 1) * *strides.at(i) + *kernel_shape.at(i) - d; @@ -371,18 +327,16 @@ fn conv< tail.append(pad_tail); i += 1; }; + head.append_span(tail.span()); let pads = head.span(); pads }, AUTO_PAD::SAME_LOWER => { - let mut head = ArrayTrait::new(); - let mut tail = ArrayTrait::new(); + let mut head: Array = array![]; + let mut tail: Array = array![]; let mut i = 0; - loop { - if i == nd { - break; - } + while i != nd { let d = *(*X).shape.at(i); let target_size = (d + *strides.at(i) - 1) / *strides.at(i); let pad_needed = (target_size - 1) * *strides.at(i) + *kernel_shape.at(i) - d; @@ -392,18 +346,16 @@ fn conv< tail.append(pad_tail); i += 1; }; + head.append_span(tail.span()); let pads = head.span(); pads }, AUTO_PAD::VALID => { - let mut head = ArrayTrait::new(); - let mut tail = ArrayTrait::new(); + let mut head: Array = array![]; + let mut tail: Array = array![]; let mut i = 0; - loop { - if i == nd { - break; - } + while i != nd { let d = *(*X).shape.at(i); let target_size = (d + *strides.at(i) - 1) / *strides.at(i); let pad_needed = (target_size - 1) * *strides.at(i) + *kernel_shape.at(i) - d; @@ -413,6 +365,7 @@ fn conv< tail.append(pad_tail); i += 1; }; + head.append_span(tail.span()); let pads = head.span(); pads @@ -444,26 +397,19 @@ fn conv< match B { Option::Some(B) => { let mut i = 0; - loop { - if i == sN { - break; - } + while i != sN { let mut j = 0; - loop { - if j == sM { - break; - } + while j != sM { let b_j = *B.at(j); let mut k = 0; - loop { - if k == h_out { - break; - } + while k != h_out { res.set(i * *res_strides.at(0) + j * *res_strides.at(1) + k, b_j); k += 1; }; + j += 1; }; + i += 1; }; }, @@ -471,27 +417,15 @@ fn conv< } let mut n = 0; - loop { - if n == sN { - break; - } + while n != sN { let mut nw = 0; - loop { - if nw == sM { - break; - } + while nw != sM { let mut c = 0; - loop { - if c == sC { - break; - } + while c != sC { let w = SpanTrait::slice((*W).data, nw * sC * kh + c * kh, kh); let mut io = bh; - loop { - if io >= eh.into() { - break; - } + while io < eh.into() { let hr = (io - bh) / sth.into(); if hr < h_out.into() { let i = io + (kh % 2).into(); @@ -510,11 +444,13 @@ fn conv< } else { dot(img, w) }; + let hr = if hr < 0 { *res_strides.at(1) - hr.into() } else { hr.into() }; + res .set( n * *res_strides.at(0) + nw * *res_strides.at(1) + hr, @@ -522,23 +458,26 @@ fn conv< + s ); } + io += sth.into(); }; + c += 1; }; + nw += 1; }; + n += 1; }; - let mut res_data = ArrayTrait::new(); + + let mut res_data: Array = array![]; let mut i = 0; - loop { - if i == res.len() { - break; - } + while i != res.len() { res_data.append(res.at(i)); i += 1; }; + return TensorTrait::new(res_shape, res_data.span()); } @@ -577,26 +516,14 @@ fn conv< match B { Option::Some(B) => { let mut i = 0; - loop { - if i == sN { - break; - } + while i != sN { let mut j = 0; - loop { - if j == sM { - break; - } + while j != sM { let b_j = *B.at(j); let mut k = 0; - loop { - if k == h_out { - break; - } + while k != h_out { let mut l = 0; - loop { - if l == w_out { - break; - } + while l != w_out { res .set( i * *res_strides.at(0) @@ -607,10 +534,13 @@ fn conv< ); l += 1; }; + k += 1; }; + j += 1; }; + i += 1; }; }, @@ -618,29 +548,17 @@ fn conv< } let mut n = 0; - loop { - if n == sN { - break; - } + while n != sN { let mut nw = 0; - loop { - if nw == sM { - break; - } + while nw != sM { let mut c = 0; - loop { - if c == sC { - break; - } + while c != sC { let w = SpanTrait::slice( (*W).data, nw * (sC * kh * kw) + c * (kh * kw), kh * kw ); let mut io = bh; - loop { - if io >= eh.into() { - break; - } + while io < eh.into() { let hr = (io - bh) / sth.into(); if hr < h_out.into() { let i = io + (kh % 2).into(); @@ -648,22 +566,16 @@ fn conv< let ih2 = I32Number::min(i + oh + kh.into(), sH.into()).into(); let mut jo = bw; - loop { - if jo >= ew.into() { - break; - } + while jo < ew.into() { let wr = (jo - bw) / stw.into(); if wr < w_out.into() { let j = jo + (kw % 2).into(); let iw1 = I32Number::max(0, j + ow).into(); let iw2 = I32Number::min(j + ow + kw.into(), sW.into()).into(); - let mut img = ArrayTrait::new(); + let mut img: Array = array![]; let mut ihi = ih1; - loop { - if ihi == ih2 { - break; - } + while ihi != ih2 { img .append_span( SpanTrait::slice( @@ -677,6 +589,7 @@ fn conv< ); ihi += 1; }; + let img = img.span(); let s = if w.len() != img.len() { @@ -688,18 +601,16 @@ fn conv< let jw2 = I32Number::min(sW.into() - (j + ow), kw.into()) .into(); - let mut w_ = ArrayTrait::new(); + let mut w_: Array = array![]; let mut jhj = jh1; - loop { - if jhj == jh2 { - break; - } + while jhj != jh2 { w_ .append_span( SpanTrait::slice(w, jhj * kw + jw1, jw2 - jw1) ); jhj += 1; }; + let w_ = w_.span(); assert(w_.len() == img.len(), 'unexpected w and img len'); @@ -740,24 +651,26 @@ fn conv< jo += stw.into(); }; } + io += sth.into(); }; + c += 1; }; + nw += 1; }; + n += 1; }; - let mut res_data = ArrayTrait::new(); + let mut res_data: Array = array![]; let mut i = 0; - loop { - if i == res.len() { - break; - } + while i != res.len() { res_data.append(res.at(i)); i += 1; }; + return TensorTrait::new(res_shape, res_data.span()); } @@ -806,31 +719,16 @@ fn conv< match B { Option::Some(B) => { let mut i = 0; - loop { - if i == sN { - break; - } + while i != sN { let mut j = 0; - loop { - if j == sM { - break; - } + while j != sM { let b_j = *B.at(j); let mut k = 0; - loop { - if k == h_out { - break; - } + while k != h_out { let mut l = 0; - loop { - if l == w_out { - break; - } + while l != w_out { let mut m = 0; - loop { - if m == z_out { - break; - } + while m != z_out { res .set( i * *res_strides.at(0) @@ -842,12 +740,16 @@ fn conv< ); m += 1; }; + l += 1; }; + k += 1; }; + j += 1; }; + i += 1; }; }, @@ -855,29 +757,17 @@ fn conv< } let mut n = 0; - loop { - if n == sN { - break; - } + while n != sN { let mut nw = 0; - loop { - if nw == sM { - break; - } + while nw != sM { let mut c = 0; - loop { - if c == sC { - break; - } + while c != sC { let w = SpanTrait::slice( (*W).data, nw * (sC * kh * kw * kz) + c * (kh * kw * kz), kh * kw * kz ); let mut io = bh; - loop { - if io >= eh.into() { - break; - } + while io < eh.into() { let hr = (io - bh) / sth.into(); if hr < h_out.into() { let i = io + (kh % 2).into(); @@ -885,10 +775,7 @@ fn conv< let ih2 = I32Number::min(i + oh + kh.into(), sH.into()).into(); let mut jo = bw; - loop { - if jo >= ew.into() { - break; - } + while jo < ew.into() { let wr = (jo - bw) / stw.into(); if wr < w_out.into() { let j = jo + (kw % 2).into(); @@ -896,10 +783,7 @@ fn conv< let iw2 = I32Number::min(j + ow + kw.into(), sW.into()).into(); let mut zo = bz; - loop { - if zo >= ez.into() { - break; - } + while zo < ez.into() { let zr = (zo - bz) / stz.into(); if zr < z_out.into() { let z = zo + (kz % 2).into(); @@ -907,17 +791,11 @@ fn conv< let iz2 = I32Number::min(z + oz + kz.into(), sW.into()) .into(); - let mut img = ArrayTrait::new(); + let mut img: Array = array![]; let mut ihi = ih1; - loop { - if ihi == ih2 { - break; - } + while ihi != ih2 { let mut iwi = iw1; - loop { - if iwi == iw2 { - break; - } + while iwi != iw2 { img .append_span( SpanTrait::slice( @@ -932,8 +810,10 @@ fn conv< ); iwi += 1; }; + ihi += 1; }; + let img = img.span(); let s = if w.len() != img.len() { @@ -955,17 +835,11 @@ fn conv< ) .into(); - let mut w_ = ArrayTrait::new(); + let mut w_: Array = array![]; let mut jhj = jh1; - loop { - if jhj == jh2 { - break; - } + while jhj != jh2 { let mut jwj = jw1; - loop { - if jwj == jw2 { - break; - } + while jwj != jw2 { w_ .append_span( SpanTrait::slice( @@ -976,8 +850,10 @@ fn conv< ); jwj += 1; }; + jhj += 1; }; + let w_ = w_.span(); assert( @@ -1025,6 +901,7 @@ fn conv< + s ); } + zo += stz.into(); }; } @@ -1032,24 +909,26 @@ fn conv< jo += stw.into(); }; } + io += sth.into(); }; + c += 1; }; + nw += 1; }; + n += 1; }; - let mut res_data = ArrayTrait::new(); + let mut res_data: Array = array![]; let mut i = 0; - loop { - if i == res.len() { - break; - } + while i != res.len() { res_data.append(res.at(i)); i += 1; }; + return TensorTrait::new(res_shape, res_data.span()); } @@ -1063,18 +942,15 @@ fn conv< let w_stride = stride((*W).shape); let x_stride = stride((*X).shape); - let mut shape_out = ArrayTrait::new(); - let mut o_index = ArrayTrait::::new(); - let mut b_index = ArrayTrait::::new(); - let mut e_index = ArrayTrait::new(); + let mut shape_out: Array = array![]; + let mut o_index: Array = array![]; + let mut b_index: Array = array![]; + let mut e_index: Array = array![]; - let mut range_len = ArrayTrait::new(); + let mut range_len: Array = array![]; let mut i = 0; - loop { - if i == nd { - break; - } + while i != nd { shape_out .append( ((*(*X).shape.at(2 + i) - *kernel_shape.at(i) + *pads.at(i) + *pads.at(i + nd)) @@ -1109,26 +985,19 @@ fn conv< match B { Option::Some(B) => { let mut i = 0; - loop { - if i == sN { - break; - } + while i != sN { let mut j = 0; - loop { - if j == sM { - break; - } + while j != sM { let b_j = *B.at(j); let mut k = 0; - loop { - if k == *res_strides.at(1) { - break; - } + while k != *res_strides.at(1) { res.set(i * *res_strides.at(0) + j * *res_strides.at(1) + k, b_j); k += 1; }; + j += 1; }; + i += 1; }; }, @@ -1136,37 +1005,22 @@ fn conv< } let mut n = 0; - loop { - if n == sN { - break; - } + while n != sN { let mut nw = 0; - loop { - if nw == sM { - break; - } + while nw != sM { let mut c = 0; - loop { - if c == sC { - break; - } + while c != sC { let w = SpanTrait::slice( (*W).data, nw * *w_stride.at(0) + c * *w_stride.at(1), *w_stride.at(1) ); let mut i = 0; - loop { - if i == *range_len.at(0) * *range_stride.at(0) { - break; - } - let mut io_index = ArrayTrait::::new(); - let mut r_index = ArrayTrait::::new(); + while i != *range_len.at(0) * *range_stride.at(0) { + let mut io_index: Array = array![]; + let mut r_index: Array = array![]; let mut flatten_index = i; let mut nx = 0; - loop { - if nx == nd { - break; - } + while nx != nd { let (n_index, rem) = DivRem::div_rem( flatten_index, (*range_stride.at(nx)).try_into().unwrap() ); @@ -1179,16 +1033,13 @@ fn conv< }; if r_index_check(r_index.span(), shape_out) { - let mut indices = ArrayTrait::::new(); - let mut i1_index = ArrayTrait::new(); - let mut i2_index = ArrayTrait::new(); - let mut idiff_index = ArrayTrait::new(); + let mut indices: Array = array![]; + let mut i1_index: Array = array![]; + let mut i2_index: Array = array![]; + let mut idiff_index: Array = array![]; let mut nx = 0; - loop { - if nx == nd { - break; - } + while nx != nd { indices.append(*io_index.at(nx) + (*kernel_shape.at(nx) % 2).into()); i1_index .append( @@ -1210,8 +1061,9 @@ fn conv< } nx += 1; }; + let i1_index = i1_index.span(); - let mut img = ArrayTrait::new(); + let mut img: Array = array![]; let img = if nx == 1 { let img = SpanTrait::slice( @@ -1224,18 +1076,12 @@ fn conv< let i_stride = stride(idiff_index.span()); let mut ii = 0; - loop { - if ii == *i_stride.at(0) * *idiff_index.at(0) { - break; - } + while ii != *i_stride.at(0) * *idiff_index.at(0) { let mut flatten_index = ii; let mut start = n * *x_stride.at(0) + c * *x_stride.at(1); let mut nx = 0; - loop { - if nx == nd - 1 { - break; - } + while nx != nd - 1 { let (ii_index, rem) = DivRem::div_rem( flatten_index, (*i_stride.at(nx)).try_into().unwrap() ); @@ -1244,6 +1090,7 @@ fn conv< start += (*i1_index.at(nx) + ii_index) * *x_stride.at(2 + nx); nx += 1; }; + img .append_span( SpanTrait::slice( @@ -1254,19 +1101,17 @@ fn conv< ); ii += 1; }; + img.span() }; let s = if w.len() != img.len() { - let mut j1_index = ArrayTrait::new(); - let mut j2_index = ArrayTrait::new(); - let mut jdiff_index = ArrayTrait::new(); + let mut j1_index: Array = array![]; + let mut j2_index: Array = array![]; + let mut jdiff_index: Array = array![]; let mut nx = 0; - loop { - if nx == nd { - break; - } + while nx != nd { j1_index .append( I32Number::max(0, -*indices.at(nx) - *o_index.at(nx)).into() @@ -1286,9 +1131,10 @@ fn conv< } nx += 1; }; + let j1_index = j1_index.span(); - let mut w_ = ArrayTrait::new(); + let mut w_: Array = array![]; let w_ = if nx == 1 { let w_ = SpanTrait::slice( @@ -1301,18 +1147,12 @@ fn conv< let j_stride = stride(jdiff_index.span()); let mut jj = 0; - loop { - if jj == *j_stride.at(0) * *jdiff_index.at(0) { - break; - } + while jj != *j_stride.at(0) * *jdiff_index.at(0) { let mut flatten_index = jj; let mut start = 0; let mut nx = 0; - loop { - if nx == nd - 1 { - break; - } + while nx != nd - 1 { let (jj_index, rem) = DivRem::div_rem( flatten_index, (*j_stride.at(nx)).try_into().unwrap() ); @@ -1331,8 +1171,10 @@ fn conv< ); jj += 1; }; + w_.span() }; + dot(img, w_) } else { dot(img, w) @@ -1341,37 +1183,35 @@ fn conv< let mut res_index = n * *res_strides.at(0) + nw * *res_strides.at(1); let mut nx = 0; - loop { - if nx == nd { - break; - } + while nx != nd { res_index += (*r_index.at(nx)).into() * *res_strides.at(2 + nx); nx += 1; }; res.set(res_index, res.at(res_index) + s); }; + i += 1 }; + c += 1; }; + nw += 1; }; + n += 1; }; - let mut res_data = ArrayTrait::new(); + let mut res_data: Array = array![]; let mut i = 0; - loop { - if i == res.len() { - break; - } + while i != res.len() { res_data.append(res.at(i)); i += 1; }; - return TensorTrait::new(res_shape, res_data.span()); -} + TensorTrait::new(res_shape, res_data.span()) +} fn r_index_check(r_index: Span, shape_out: Span) -> bool { let mut i = 0; @@ -1384,25 +1224,22 @@ fn r_index_check(r_index: Span, shape_out: Span) -> bool { } i += 1; }; - return flag; -} + flag +} fn prod, +Copy, +NumberTrait, +TensorTrait, +Mul,>( pA: Span, start: usize ) -> T { let mut i = start; let mut prod = NumberTrait::one(); - loop { - if i == pA.len() { - break; - } + while i != pA.len() { prod = prod * (*pA.at(i)); i += 1; }; - return prod; -} + prod +} fn min(mut a: Span) -> usize { assert(a.len() > 0, 'span cannot be empty'); @@ -1410,33 +1247,24 @@ fn min(mut a: Span) -> usize { let mut min = *a.at(0); loop { match a.pop_front() { - Option::Some(v) => { - if *v < min { - min = *v; - }; - }, - Option::None => { - break min; - } + Option::Some(v) => { if *v < min { + min = *v; + }; }, + Option::None => { break min; } }; } } - fn max(mut a: Span) -> usize { assert(a.len() > 0, 'span cannot be empty'); let mut max = *a.at(0); loop { match a.pop_front() { - Option::Some(v) => { - if *v > max { - max = *v; - }; - }, - Option::None => { - break max; - } + Option::Some(v) => { if *v > max { + max = *v; + }; }, + Option::None => { break max; } }; } } @@ -1444,16 +1272,14 @@ fn max(mut a: Span) -> usize { fn arange(start: usize, end: usize, step: usize) -> Span { assert((end - start) % step == 0, 'incompatible step value'); - let mut arr = ArrayTrait::new(); + let mut arr: Array = array![]; let mut i = start; - loop { - if i >= end { - break; - } + while i < end { arr.append(i); i += step; }; - return arr.span(); + + arr.span() } @@ -1469,24 +1295,18 @@ fn cartesian(mut arrays: Span>,) -> Span> { }; let mut i = 0; - let mut size_arrays = ArrayTrait::new(); - loop { - if i == arrays.len() { - break; - } + let mut size_arrays: Array = array![]; + while i != arrays.len() { size_arrays.append((*(arrays.at(i))).len()); - i += 1; }; + let size_arrays = size_arrays.span(); - let mut output_arrays = ArrayTrait::>::new(); + let mut output_arrays = array![]; let mut m = n; let mut i = 0; - loop { - if i == arrays.len() { - break; - } + while i != arrays.len() { m = m / (*(arrays.at(i))).len(); let mut out = repeat(*(arrays.at(i)), m); out = repeat_2(out, size_arrays, i); @@ -1494,74 +1314,58 @@ fn cartesian(mut arrays: Span>,) -> Span> { output_arrays.append(out); i += 1; }; + let output_arrays = output_arrays.span(); let mut i = 0; let mut ret = ArrayTrait::new(); - loop { - if i == n { - break; - } + while i != n { let mut j = 0; - let mut x = ArrayTrait::new(); - loop { - if j == arrays.len() { - break; - } - + let mut x: Array = array![]; + while j != arrays.len() { x.append(*(output_arrays.at(j)).at(i)); j += 1; }; + ret.append(x.span()); i += 1; }; - return ret.span(); + ret.span() } fn repeat_2(mut array: Array, size_array: Span, index: usize) -> Array { let mut size = array.len(); let mut i = 0; - loop { - if i == index { - break; - } + while i != index { let mut j = 1; - loop { - if j == *size_array.at(index - 1 - i) { - break; - } + while j != *size_array.at(index - 1 - i) { let mut k = 0; - loop { - if k == size { - break; - } + while k != size { array.append(*array.at(k)); k += 1; }; + j += 1; }; + size = size * *size_array.at(index - 1 - i); i += 1; }; + array } fn repeat(array: Span, m: usize,) -> Array { - let mut out = ArrayTrait::new(); + let mut out: Array = array![]; let mut j = 0; - loop { - if j == array.len() { - break; - } + while j != array.len() { let mut k = 0; - loop { - if k == m { - break; - } + while k != m { out.append(*array.at(j)); k += 1; }; + j += 1; }; @@ -1575,13 +1379,10 @@ fn dot< ) -> T { let mut i = 0; let mut sum = NumberTrait::zero(); - loop { - if i == a.len() { - break; - } + while i != a.len() { sum = sum + *a.at(i) * *b.at(i); i += 1; }; - return sum; + sum } diff --git a/src/operators/nn/functional/conv_transpose.cairo b/src/operators/nn/functional/conv_transpose.cairo index bd324e0d6..f8f810558 100644 --- a/src/operators/nn/functional/conv_transpose.cairo +++ b/src/operators/nn/functional/conv_transpose.cairo @@ -1,7 +1,7 @@ use orion::numbers::NumberTrait; +use orion::operators::tensor::core::{stride}; use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor,}; use orion::operators::vec::{NullableVec, NullableVecImpl}; -use orion::operators::tensor::core::{stride}; #[derive(Copy, Drop)] enum AUTO_PAD { @@ -33,61 +33,53 @@ fn conv_transpose< let dilations = match dilations { Option::Some(dilations) => dilations, Option::None => { - let mut dilations = ArrayTrait::new(); + let mut dilations: Array = array![]; let mut i = 2; - loop { - if i >= (*X).shape.len() { - break; - } + while i != (*X).shape.len() { dilations.append(1); i += 1; }; + dilations.span() }, }; let kernel_shape = match kernel_shape { Option::Some(kernel_shape) => kernel_shape, Option::None => { - let mut kernel_shape = ArrayTrait::new(); + let mut kernel_shape: Array = array![]; let mut i = 2; - loop { - if i >= (*W).shape.len() { - break; - } + while i != (*W).shape.len() { kernel_shape.append(*(*W).shape.at(i)); i += 1; }; + kernel_shape.span() }, }; let output_padding = match output_padding { Option::Some(output_padding) => output_padding, Option::None => { - let mut output_padding = ArrayTrait::new(); + let mut output_padding: Array = array![]; let mut i = 2; - loop { - if i >= (*X).shape.len() { - break; - } + while i != (*X).shape.len() { output_padding.append(0); output_padding.append(0); i += 1; }; + output_padding.span() }, }; let strides = match strides { Option::Some(strides) => strides, Option::None => { - let mut strides = ArrayTrait::new(); + let mut strides: Array = array![]; let mut i = 2; - loop { - if i >= (*X).shape.len() { - break; - } + while i != (*X).shape.len() { strides.append(1); i += 1; }; + strides.span() }, }; @@ -98,12 +90,9 @@ fn conv_transpose< let output_shape = match output_shape { Option::Some(output_shape) => output_shape, Option::None => { - let mut output_shape = ArrayTrait::new(); + let mut output_shape: Array = array![]; let mut i = 0; - loop { - if i == n_dims { - break; - } + while i != n_dims { output_shape .append( (*(*X).shape.at(i + 2) - 1) * *strides.at(i) @@ -113,23 +102,23 @@ fn conv_transpose< ); i += 1; }; + output_shape.span() }, }; + (pads, n_dims, output_shape) }, Option::None => { let (pads, n_dims, output_shape) = match auto_pad { AUTO_PAD::NOTSET => { - let mut pads = ArrayTrait::new(); + let mut pads: Array = array![]; let mut i = 0; - loop { - if i == strides.len() * 2 { - break; - } + while i != strides.len() * 2 { pads.append(0); i += 1; }; + let pads = pads.span(); let n_dims = (*X).shape.len() - 2; @@ -137,13 +126,9 @@ fn conv_transpose< let output_shape = match output_shape { Option::Some(output_shape) => output_shape, Option::None => { - let mut output_shape = ArrayTrait::new(); + let mut output_shape: Array = array![]; let mut i = 0; - loop { - if i == n_dims { - break; - } - + while i != n_dims { output_shape .append( (*(*X).shape.at(i + 2) - 1) * *strides.at(i) @@ -153,6 +138,7 @@ fn conv_transpose< ); i += 1; }; + output_shape.span() }, }; @@ -163,25 +149,20 @@ fn conv_transpose< let output_shape = match output_shape { Option::Some(output_shape) => output_shape, Option::None => { - let mut output_shape = ArrayTrait::new(); + let mut output_shape: Array = array![]; let mut i = 0; - loop { - if i == strides.len() { - break; - } + while i != strides.len() { output_shape.append(*(*X).shape.at(i + 2) * *strides.at(i)); i += 1; }; + output_shape.span() }, }; - let mut total_padding = ArrayTrait::new(); + let mut total_padding: Array = array![]; let mut i = 0; - loop { - if i == output_shape.len() { - break; - } + while i != output_shape.len() { total_padding .append( (*(*X).shape.at(i + 2) - 1) * *strides.at(i) @@ -191,51 +172,43 @@ fn conv_transpose< ); i += 1; }; + let total_padding = total_padding.span(); - let mut pads = ArrayTrait::new(); + let mut pads: Array = array![]; let mut i = 0; - loop { - if i == output_shape.len() { - break; - } + while i != output_shape.len() { pads.append(*total_padding.at(i) / 2); i += 1; }; + let mut i = 0; - loop { - if i == output_shape.len() { - break; - } + while i != output_shape.len() { pads.append(*total_padding.at(i) - (*total_padding.at(i) / 2)); i += 1; }; + (pads.span(), pads.len() / 2, output_shape) }, AUTO_PAD::SAME_LOWER => { let output_shape = match output_shape { Option::Some(output_shape) => output_shape, Option::None => { - let mut output_shape = ArrayTrait::new(); + let mut output_shape: Array = array![]; let mut i = 0; - loop { - if i == strides.len() { - break; - } + while i != strides.len() { output_shape.append(*(*X).shape.at(i + 2) * *strides.at(i)); i += 1; }; + output_shape.span() }, }; - let mut total_padding = ArrayTrait::new(); + let mut total_padding: Array = array![]; let mut i = 0; - loop { - if i == output_shape.len() { - break; - } + while i != output_shape.len() { total_padding .append( (*(*X).shape.at(i + 2) - 1) * *strides.at(i) @@ -245,50 +218,42 @@ fn conv_transpose< ); i += 1; }; + let total_padding = total_padding.span(); - let mut pads = ArrayTrait::new(); + let mut pads: Array = array![]; let mut i = 0; - loop { - if i == output_shape.len() { - break; - } + while i != output_shape.len() { pads.append(*total_padding.at(i) - *total_padding.at(i) / 2); i += 1; }; + let mut i = 0; - loop { - if i == output_shape.len() { - break; - } + while i != output_shape.len() { pads.append(*total_padding.at(i) / 2); i += 1; }; + (pads.span(), pads.len() / 2, output_shape) }, AUTO_PAD::VALID => { - let mut pads = ArrayTrait::new(); + let mut pads: Array = array![]; let mut i = 0; - loop { - if i == strides.len() * 2 { - break; - } + while i != strides.len() * 2 { pads.append(0); i += 1; }; + let pads = pads.span(); let n_dims = (*X).shape.len() - 2; let output_shape = match output_shape { Option::Some(output_shape) => output_shape, Option::None => { - let mut output_shape = ArrayTrait::new(); + let mut output_shape: Array = array![]; let mut i = 0; - loop { - if i == n_dims { - break; - } + while i != n_dims { output_shape .append( (*(*X).shape.at(i + 2) - 1) * *strides.at(i) @@ -298,12 +263,15 @@ fn conv_transpose< ); i += 1; }; + output_shape.span() }, }; + (pads, n_dims, output_shape) }, }; + (pads, n_dims, output_shape) }, }; @@ -312,15 +280,13 @@ fn conv_transpose< Option::None => { 1 }, }; - let mut kernel_shape = ArrayTrait::new(); + let mut kernel_shape: Array = array![]; let mut i = 2; - loop { - if i >= (*W).shape.len() { - break; - } + while i != (*W).shape.len() { kernel_shape.append(*(*W).shape.at(i)); i += 1; }; + let kernel_shape = kernel_shape.span(); let kernel_size = prod(kernel_shape, 0); @@ -332,14 +298,11 @@ fn conv_transpose< let n = prod((*X).shape, 2); let k = C / group; - let mut final = ArrayTrait::new(); + let mut final: Array = array![]; if group == 1 { let mut image_id = 0; - loop { - if image_id == *(*X).shape.at(0) { - break; - } + while image_id != *(*X).shape.at(0) { let w_t = TensorTrait::new(array![k, m].span(), (*W).data) .transpose(array![1, 0].span()); @@ -349,10 +312,7 @@ fn conv_transpose< let gemmc = gemm .reshape(array![num_output_channels, m / num_output_channels, n].span()); let mut c = 0; - loop { - if c == num_output_channels { - break; - } + while c != num_output_channels { let gemmc_c = TensorTrait::new( array![m / num_output_channels, n].span(), SpanTrait::slice( @@ -367,103 +327,78 @@ fn conv_transpose< match B { Option::Some(B) => { let mut i = 0; - loop { - if i == res.len() { - break; - } + while i != res.len() { res.set(i, res.at(i) + *(*B).data.at(c)); i += 1; }; }, Option::None => {}, } + c += 1; let mut i = 0; - loop { - if i == res.len() { - break; - } + while i != res.len() { final.append(res.at(i)); i += 1; }; }; + image_id += 1; }; } else { - let mut output_array = ArrayTrait::new(); + let mut output_array: Array> = array![]; let mut i = 0; let mut output_size = 1; - loop { - if i == output_shape.len() { - break; - } + while i != output_shape.len() { output_size *= *output_shape.at(i); i += 1; }; // Computation of conv transposition per group let mut group_id = 0; - loop { - if group_id == group { - break; - } - let mut group_X = ArrayTrait::new(); - let mut group_W = ArrayTrait::new(); + while group_id != group { + let mut group_X: Array = array![]; + let mut group_W: Array = array![]; let mut image_id = 0; - loop { - if image_id == *(*X).shape.at(0) { - break; - } + while image_id != *(*X).shape.at(0) { let start = image_id * n * C + (group_id * C / group) * n; let end = image_id * n * C + ((group_id + 1) * C / group) * n; let mut i = start; - loop { - if i == end { - break; - } + while i != end { group_X.append(*(*X).data.at(i)); - i += 1; }; + image_id += 1; }; let start = (group_id * C / group) * *(*W).shape.at(1) * kernel_size; let end = (group_id + 1) * C / group * *(*W).shape.at(1) * kernel_size; let mut i = start; - loop { - if i == end { - break; - } + while i != end { group_W.append(*(*W).data.at(i)); i += 1; }; - let mut shape_X = ArrayTrait::new(); + let mut shape_X: Array = array![]; shape_X.append(*(*X).shape.at(0)); shape_X.append(C / group); let mut i = 2; - loop { - if i >= (*X).shape.len() { - break; - } + while i != (*X).shape.len() { shape_X.append(*(*X).shape.at(i)); i += 1; }; - let mut shape_W = ArrayTrait::new(); + let mut shape_W: Array = array![]; shape_W.append(C / group); let mut i = 1; - loop { - if i >= (*W).shape.len() { - break; - } + while i != (*W).shape.len() { shape_W.append(*(*W).shape.at(i)); i += 1; }; @@ -492,47 +427,39 @@ fn conv_transpose< group_id += 1; }; + let output_array = output_array.span(); // Sorting result per item of the batch // output size : N (batch size) x num_output_channels x output_shape let mut image_id = 0; - loop { - if image_id == *(*X).shape.at(0) { - break; - } + while image_id != *(*X).shape.at(0) { let mut group_id = 0; - loop { - if group_id == group { - break; - } + while group_id != group { let group_output = *output_array.at(group_id); let mut i = image_id * output_size * (num_output_channels / group); - loop { - if i == (image_id + 1) * output_size * (num_output_channels / group) { - break; - } + while i != (image_id + 1) * output_size * (num_output_channels / group) { final.append(*group_output.at(i)); i += 1; }; + group_id += 1; }; + image_id += 1; }; } + let mut shape = array![*(*X).shape.at(0), num_output_channels]; let mut i = 0; - loop { - if i == output_shape.len() { - break; - } + while i != output_shape.len() { shape.append(*output_shape.at(i)); i += 1; }; - return TensorTrait::new(shape.span(), final.span()); + TensorTrait::new(shape.span(), final.span()) } fn get_image, +Copy>(self: @Tensor, row: usize) -> Span { @@ -558,12 +485,9 @@ fn col2im_naive_implementation< col2im_shape_check(data, image_shape, kernel_shape, dilations, pads, strides); - let mut dim_col = ArrayTrait::new(); + let mut dim_col: Array = array![]; let mut i = 0; - loop { - if i == n_dims { - break; - } + while i != n_dims { dim_col .append( (*image_shape.at(i) @@ -575,6 +499,7 @@ fn col2im_naive_implementation< i += 1; }; + let dim_col = dim_col.span(); let stride_img = stride(image_shape); @@ -585,24 +510,15 @@ fn col2im_naive_implementation< let kernel_size = prod(kernel_shape, 0); let col_size = prod(dim_col, 0); let mut c_col = 0; - loop { - if c_col == kernel_size { - break; - } + while c_col != kernel_size { let offset = get_indices(c_col, kernel_shape).span(); let mut col = 0; - loop { - if col == col_size { - break; - } + while col != col_size { let ind_col = get_indices(col, dim_col).span(); - let mut ind_im = ArrayTrait::new(); + let mut ind_im: Array = array![]; let mut i = 0; - loop { - if i == n_dims { - break; - } + while i != n_dims { if (*ind_col.at(i) * *strides.at(i) + *offset.at(i) * *dilations.at(i)) < *pads .at(i) { let neg_index = *pads.at(i) @@ -619,25 +535,26 @@ fn col2im_naive_implementation< i += 1; }; + let ind_im = ind_im.span(); if !is_out(ind_im, image_shape) { let mut index = 0; let mut i = 0; - loop { - if i == image_shape.len() { - break; - } + while i != image_shape.len() { index += *stride_img.at(i) * *ind_im.at(i); i += 1; }; + data_im.set(index, data_im.at(index) + *(*data).data.at(c_col * col_size + col)); } + col += 1; }; + c_col += 1; }; - return data_im; + data_im } fn col2im_shape_check, +Copy, +Drop,>( @@ -656,13 +573,10 @@ fn col2im_shape_check, +Copy, +Drop,>( let input_length = *(*X).shape.at(1); let n_dims = output_shape.len(); - let mut n_blocks = ArrayTrait::new(); + let mut n_blocks: Array = array![]; let mut i = 0; - loop { - if i == n_dims { - break; - } + while i != n_dims { n_blocks .append( (*output_shape.at(i) @@ -683,12 +597,9 @@ fn col2im_shape_check, +Copy, +Drop,>( fn get_indices(index: usize, shape: Span,) -> Array { let mut i = index; - let mut res = ArrayTrait::new(); + let mut res: Array = array![]; let mut k = shape.len() - 1; - loop { - if k == 0 { - break; - } + while k != 0 { let m = i % *shape.at(k); res.append(m); i -= m; @@ -696,17 +607,15 @@ fn get_indices(index: usize, shape: Span,) -> Array { k -= 1; }; - let mut new_res = ArrayTrait::new(); + let mut new_res: Array = array![]; new_res.append(i); let mut i = shape.len() - 1; - loop { - if i == 0 { - break; - } + while i != 0 { new_res.append(*res.at(i - 1)); i -= 1; }; - return new_res; + + new_res } fn is_out(ind: Span, shape: Span,) -> bool { @@ -725,22 +634,20 @@ fn is_out(ind: Span, shape: Span,) -> bool { } n += 1; }; - return is_out; -} + is_out +} fn prod, +Copy, +NumberTrait, +TensorTrait, +Mul,>( pA: Span, start: usize ) -> T { let mut i = start; let mut prod = NumberTrait::one(); - loop { - if i == pA.len() { - break; - } + while i != pA.len() { prod = prod * (*pA.at(i)); i += 1; }; - return prod; + + prod } diff --git a/src/operators/nn/functional/depth_to_space.cairo b/src/operators/nn/functional/depth_to_space.cairo index c9efe3f66..161ea46ad 100644 --- a/src/operators/nn/functional/depth_to_space.cairo +++ b/src/operators/nn/functional/depth_to_space.cairo @@ -1,15 +1,9 @@ -use core::traits::Into; -use core::traits::TryInto; -use orion::operators::tensor::core::{Tensor, TensorTrait}; -use core::option::OptionTrait; - use orion::numbers::fixed_point::core::FixedTrait; use orion::numbers::NumberTrait; - +use orion::operators::tensor::core::{Tensor, TensorTrait}; use orion::operators::tensor::helpers::{reduce_output_shape, len_from_shape, combine_indices}; use orion::operators::tensor::math::{reduce_sum::accumulate_sum, arithmetic::div_downcast}; - /// Cf: NNTrait::depth_to_space docstring fn depth_to_space< T, @@ -24,23 +18,26 @@ fn depth_to_space< >( tensor: Tensor, blocksize: usize, mode: felt252 ) -> Tensor { - assert!((tensor.shape).len() == 4, "Unexpected shape 4."); + assert((tensor.shape).len() == 4, 'Unexpected shape 4.'); + let b = (tensor.shape).at(0); let C = (tensor.shape).at(1); let H = (tensor.shape).at(2); let W = (tensor.shape).at(3); let finalshape = array![*b, *C / (blocksize * blocksize), *H * blocksize, *W * blocksize]; + if mode == 'DCR' { let tmpshape = array![*b, blocksize, blocksize, *C / (blocksize * blocksize), *H, *W]; let reshaped = (tensor).reshape(target_shape: tmpshape.span()); let transposed = reshaped.transpose(axes: array![0, 3, 4, 1, 5, 2].span()); - return transposed.reshape(target_shape: finalshape.span()); - } - else { + + transposed.reshape(target_shape: finalshape.span()) + } else { // assert mode == "CRD" let tmpshape = array![*b, *C / (blocksize * blocksize), blocksize, blocksize, *H, *W]; let reshaped = (tensor).reshape(target_shape: tmpshape.span()); let transposed = reshaped.transpose(axes: array![0, 1, 4, 2, 5, 3].span()); - return transposed.reshape(target_shape: finalshape.span()); + + transposed.reshape(target_shape: finalshape.span()) } } diff --git a/src/operators/nn/functional/gemm.cairo b/src/operators/nn/functional/gemm.cairo index c37bda880..f1a93e2e2 100644 --- a/src/operators/nn/functional/gemm.cairo +++ b/src/operators/nn/functional/gemm.cairo @@ -1,4 +1,3 @@ -use alexandria_data_structures::array_ext::SpanTraitExt; use core::array::SpanTrait; use orion::numbers::NumberTrait; @@ -39,26 +38,16 @@ fn gemm< NumberTrait::one() }; - if transA == true { + if transA { A = A.transpose(array![1, 0].span()); } - if transB == true { + if transB { B = B.transpose(array![1, 0].span()); } match C { - Option::Some(c) => { - let broadcast_c_shape = if c.shape.len() == 1 { - array![1].span().concat(c.shape) - } else { - c.shape - }; - - let c = Tensor { shape: broadcast_c_shape, data: c.data }; - - return mul_by_scalar(@A.matmul(@B), alpha) + mul_by_scalar(@c, beta); - }, - Option::None => { return mul_by_scalar(@A.matmul(@B), alpha); } + Option::Some(c) => { mul_by_scalar(@A.matmul(@B), alpha) + mul_by_scalar(@c, beta) }, + Option::None(_) => { mul_by_scalar(@A.matmul(@B), alpha) } } } diff --git a/src/operators/nn/functional/grid_sample.cairo b/src/operators/nn/functional/grid_sample.cairo index ed1cb01b6..aed560e37 100644 --- a/src/operators/nn/functional/grid_sample.cairo +++ b/src/operators/nn/functional/grid_sample.cairo @@ -1,12 +1,10 @@ -use core::option::OptionTrait; -use core::traits::TryInto; -use orion::numbers::NumberTrait; -use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor,}; -use orion::operators::vec::{NullableVec, NullableVecImpl}; -use orion::operators::tensor::core::{stride}; use core::debug::PrintTrait; + use orion::numbers::FP16x16; -use orion::operators::tensor::{FP16x16Tensor}; +use orion::numbers::NumberTrait; +use orion::operators::tensor::core::{stride}; +use orion::operators::tensor::{FP16x16Tensor, TensorTrait, Tensor, U32Tensor,}; +use orion::operators::vec::{NullableVec, NullableVecImpl}; #[derive(Copy, Drop)] enum MODE { @@ -77,7 +75,7 @@ fn grid_sample< let border = prepare_border(X, dims, align_corner); - let mut y_dims = array![N, C]; + let mut y_dims: Array = array![N, C]; y_dims.append_span(SpanTrait::slice(grid_dims, 1, grid_dims.len() - 2)); let y_dims = y_dims.span(); @@ -85,32 +83,23 @@ fn grid_sample< return TensorTrait::new(array![].span(), array![].span()); } - let mut Y = ArrayTrait::new(); + let mut Y: Array = array![]; let mut n = 0; - loop { - if n == N { - break; - } + while n != N { let grid_data = SpanTrait::slice((*grid).data, n * *grid_stride.at(0), *grid_stride.at(0)); let grid_data_stride = SpanTrait::slice(grid_stride, 1, grid_stride.len() - 1); let mut c = 0; - loop { - if c == C { - break; - } + while c != C { let X_data = SpanTrait::slice( (*X).data, n * *x_stride.at(0) + c * *x_stride.at(1), *x_stride.at(1) ); let X_data_stride = SpanTrait::slice(x_stride, 2, grid_stride.len() - 2); let all_coords = get_all_coords(SpanTrait::slice(grid_dims, 1, grid_dims.len() - 2)); - let mut ix = 0; - loop { - if ix == all_coords.len() { - break; - } + let mut ix = 0; + while ix != all_coords.len() { let ox = *all_coords.at(ix); let nx = get_sub(grid_data, grid_data_stride, ox); let nx = reverse(nx); @@ -122,14 +111,10 @@ fn grid_sample< MODE::CUBIC => { x }, }; - let mut new_x = ArrayTrait::new(); + let mut new_x: Array = array![]; let mut i = 0; - loop { - if i == x.len() { - break; - } + while i != x.len() { let v = *x.at(i); - let mut x_min = *border.at(i); let mut x_max = *border.at(i + num_dims); let new_v = if v < x_min || v > x_max { @@ -149,9 +134,11 @@ fn grid_sample< } else { v }; + new_x.append(new_v); i += 1; }; + let x = new_x.span(); let y = match mode { @@ -169,15 +156,18 @@ fn grid_sample< ) }, }; - Y.append(y); + Y.append(y); ix += 1; }; + c += 1; }; + n += 1; }; - return TensorTrait::new(y_dims, Y.span()); + + TensorTrait::new(y_dims, Y.span()) } fn gs_cubic_interpolation_1d_with_x< @@ -213,9 +203,9 @@ fn gs_cubic_interpolation_1d_with_x< let v_2 = pixel_at_array(data, x_1.try_into().unwrap(), border, padding_mode); let v_3 = pixel_at_array(data, x_2.try_into().unwrap(), border, padding_mode); - let v = array![v_0, v_1, v_2, v_3].span(); + let v: Span = array![v_0, v_1, v_2, v_3].span(); - return dot(coeffs, v); + dot(coeffs, v) } fn gs_get_cubic_coeffs< @@ -245,7 +235,7 @@ fn gs_get_cubic_coeffs< let A = NumberTrait::neg(three / four); let x = NumberTrait::abs(x); - let mut coeffs = ArrayTrait::new(); + let mut coeffs: Array = array![]; coeffs.append(((A * (x + one) - five * A) * (x + one) + eigth * A) * (x + one) - four * A); coeffs.append(((A + two) * x - (A + three)) * x * x + one); @@ -255,7 +245,8 @@ fn gs_get_cubic_coeffs< ((A * ((one - x) + one) - five * A) * ((one - x) + one) + eigth * A) * ((one - x) + one) - four * A ); - return coeffs.span(); + + coeffs.span() } fn gs_cubic_interpolation_nd_with_x< @@ -294,13 +285,10 @@ fn gs_cubic_interpolation_nd_with_x< return a; } - let mut res1d = ArrayTrait::new(); + let mut res1d: Array = array![]; let mut i = 0; - loop { - if i == *data_dims.at(0) { - break; - } + while i != *data_dims.at(0) { let sub_data = SpanTrait::slice(data, i * *data_stride.at(0), *data_stride.at(0)); let sub_x = SpanTrait::slice(x, 1, x.len() - 1); @@ -316,23 +304,23 @@ fn gs_cubic_interpolation_nd_with_x< let r = gs_cubic_interpolation_nd_with_x( sub_data, data_dims_sub, data_stride_sub, sub_x, border.span(), padding_mode ); + res1d.append(r); i += 1; }; - return gs_cubic_interpolation_1d_with_x( + gs_cubic_interpolation_1d_with_x( res1d.span(), *x.at(0), array![*border.at(0), *border.at(num_dims)].span(), padding_mode - ); + ) } - fn gs_get_linear_coeffs, +Copy, +NumberTrait, +Sub,>( x: T ) -> Span { let x = NumberTrait::abs(x); - return array![NumberTrait::one() - x, x].span(); -} + array![NumberTrait::one() - x, x].span() +} fn gs_linear_interpolation_1d_with_x< T, @@ -362,9 +350,9 @@ fn gs_linear_interpolation_1d_with_x< let v_0 = pixel_at_array(data, x_0.try_into().unwrap(), border, padding_mode); let v_1 = pixel_at_array(data, x_1.try_into().unwrap(), border, padding_mode); - let v = array![v_0, v_1].span(); + let v: Span = array![v_0, v_1].span(); - return dot(coeffs, v); + dot(coeffs, v) } fn dot, +Copy, +NumberTrait, +Add, +TensorTrait, +Mul,>( @@ -374,18 +362,14 @@ fn dot, +Copy, +NumberTrait, +Add, +TensorTrait = array![]; let mut i = 0; - loop { - if i == *data_dims.at(0) { - break; - } + while i != *data_dims.at(0) { let sub_data = SpanTrait::slice(data, i * *data_stride.at(0), *data_stride.at(0)); let sub_x = SpanTrait::slice(x, 1, x.len() - 1); @@ -443,16 +424,16 @@ fn gs_linear_interpolation_nd_with_x< let r = gs_linear_interpolation_nd_with_x( sub_data, data_dims_sub, data_stride_sub, sub_x, border.span(), padding_mode ); + res1d.append(r); i += 1; }; - return gs_linear_interpolation_1d_with_x( + gs_linear_interpolation_1d_with_x( res1d.span(), *x.at(0), array![*border.at(0), *border.at(num_dims)].span(), padding_mode - ); + ) } - fn pixel_at_ndarray< T, MAG, @@ -525,7 +506,7 @@ fn pixel_at_ndarray< border.append_span(border1); border.append_span(border2); - return pixel_at_ndarray(ndarray, ndarray_dims, ndarray_stride, x, border.span(), padding_mode); + pixel_at_ndarray(ndarray, ndarray_dims, ndarray_stride, x, border.span(), padding_mode) } fn pixel_at_array< @@ -571,21 +552,18 @@ fn pixel_at_array< }, }; - return pixel; + pixel } fn zeros, +Copy, +NumberTrait>(n: usize) -> Span { - let mut zeros = ArrayTrait::new(); + let mut zeros: Array = array![]; let mut i = 0; - loop { - if i == n { - break; - } + while i != n { zeros.append(NumberTrait::zero()); i += 1; }; - return zeros.span(); + zeros.span() } fn rint< @@ -604,14 +582,11 @@ fn rint< data: Span ) -> Span { // round to nearest if ties rounds to the nearest even value. - let mut rint = ArrayTrait::new(); + let mut rint: Array = array![]; let two: T = NumberTrait::one() + NumberTrait::one(); let mut i = 0; - loop { - if i == data.len() { - break; - } + while i != data.len() { let x = *data.at(i); let mut round = NumberTrait::round(x); @@ -621,11 +596,12 @@ fn rint< round -= NumberTrait::one() } } + rint.append(round); i += 1; }; - return rint.span(); + rint.span() } fn clamp, +Copy, +NumberTrait, +PartialOrd>( @@ -634,10 +610,12 @@ fn clamp, +Copy, +NumberTrait, +PartialOrd>( if val < low { return low; } + if val > high { return high; } - return val; + + val } fn gs_reflect< @@ -686,23 +664,18 @@ fn gs_reflect< fx }; - return fx; + fx } - fn reverse, +Drop,>(data: Span) -> Span { - let mut rev = ArrayTrait::new(); + let mut rev: Array = array![]; let mut i = data.len(); - loop { - if i == 0 { - break; - } + while i != 0 { rev.append(*data.at(i - 1)); - i -= 1; }; - return rev.span(); + rev.span() } fn get_sub, +Drop,>( @@ -710,34 +683,26 @@ fn get_sub, +Drop,>( ) -> Span { let mut acc_indices = 0; let mut i = 0; - loop { - if i == index.len() { - break; - } + while i != index.len() { acc_indices += *index.at(i) * *stride_data.at(i); - i += 1; }; - return SpanTrait::slice(data, acc_indices, *stride_data.at(index.len() - 1)); + SpanTrait::slice(data, acc_indices, *stride_data.at(index.len() - 1)) } - fn prod, +Copy, +NumberTrait, +TensorTrait, +Mul,>( pA: Span, start: usize ) -> T { let mut i = start; let mut prod = NumberTrait::one(); - loop { - if i == pA.len() { - break; - } + while i != pA.len() { prod = prod * (*pA.at(i)); i += 1; }; - return prod; -} + prod +} fn prepare_border< T, @@ -757,14 +722,11 @@ fn prepare_border< ) -> Span { let num_dims = dims.len(); - let mut borders1 = ArrayTrait::new(); - let mut borders2 = ArrayTrait::new(); + let mut borders1: Array = array![]; + let mut borders2: Array = array![]; let mut i = 0; - loop { - if i == num_dims { - break; - } + while i != num_dims { if align_corner == 0 { borders1.append(-NumberTrait::half()); borders2 @@ -778,26 +740,26 @@ fn prepare_border< NumberTrait::new_unscaled((*dims.at(i)).into(), false) - NumberTrait::one() ); } + i += 1; }; + borders1.append_span(borders2.span()); - return borders1.span(); + + borders1.span() } fn arange(start: usize, end: usize, step: usize) -> Span { assert((end - start) % step == 0, 'incompatible step value'); - let mut arr = ArrayTrait::new(); + let mut arr: Array = array![]; let mut i = start; - loop { - if i >= end { - break; - } + while i != end { arr.append(i); i += step; }; - return arr.span(); -} + arr.span() +} fn gs_denormalize_coordinates< T, @@ -814,20 +776,17 @@ fn gs_denormalize_coordinates< >( n: Span, dims: Span, align_corner: usize ) -> Span { - let mut x = ArrayTrait::new(); + let mut x: Array = array![]; let mut i = 0; - loop { - if i == n.len() { - break; - } + while i != n.len() { let v = *n.at(i); let dim = *dims.at(i); x.append(gs_denormalize(v, dim, align_corner)); i += 1; }; - return x.span(); + x.span() } fn gs_denormalize< @@ -854,22 +813,19 @@ fn gs_denormalize< (n + NumberTrait::one()) / two * (length - NumberTrait::one()) }; - return x; + x } fn get_all_coords(shape: Span) -> Span> { - let mut all_indices = ArrayTrait::new(); + let mut all_indices = array![]; let mut i = 0; - loop { - if i == shape.len() { - break; - } + while i != shape.len() { all_indices.append(arange(0, *shape.at(i), 1)); i += 1; }; - return cartesian(all_indices.span()); + cartesian(all_indices.span()) } fn cartesian(mut arrays: Span>,) -> Span> { @@ -884,24 +840,18 @@ fn cartesian(mut arrays: Span>,) -> Span> { }; let mut i = 0; - let mut size_arrays = ArrayTrait::new(); - loop { - if i == arrays.len() { - break; - } + let mut size_arrays: Array = array![]; + while i != arrays.len() { size_arrays.append((*(arrays.at(i))).len()); - i += 1; }; + let size_arrays = size_arrays.span(); let mut output_arrays = ArrayTrait::>::new(); let mut m = n; let mut i = 0; - loop { - if i == arrays.len() { - break; - } + while i != arrays.len() { m = m / (*(arrays.at(i))).len(); let mut out = repeat(*(arrays.at(i)), m); out = repeat_2(out, size_arrays, i); @@ -909,75 +859,58 @@ fn cartesian(mut arrays: Span>,) -> Span> { output_arrays.append(out); i += 1; }; + let output_arrays = output_arrays.span(); let mut i = 0; - let mut ret = ArrayTrait::new(); - loop { - if i == n { - break; - } + let mut ret = array![]; + while i != n { let mut j = 0; let mut x = ArrayTrait::new(); - loop { - if j == arrays.len() { - break; - } - + while j != arrays.len() { x.append(*(output_arrays.at(j)).at(i)); j += 1; }; + ret.append(x.span()); i += 1; }; - return ret.span(); + ret.span() } - fn repeat_2(mut array: Array, size_array: Span, index: usize) -> Array { let mut size = array.len(); let mut i = 0; - loop { - if i == index { - break; - } + while i != index { let mut j = 1; - loop { - if j == *size_array.at(index - 1 - i) { - break; - } + while j != *size_array.at(index - 1 - i) { let mut k = 0; - loop { - if k == size { - break; - } + while k != size { array.append(*array.at(k)); k += 1; }; + j += 1; }; + size = size * *size_array.at(index - 1 - i); i += 1; }; + array } fn repeat(array: Span, m: usize,) -> Array { - let mut out = ArrayTrait::new(); + let mut out: Array = array![]; let mut j = 0; - loop { - if j == array.len() { - break; - } + while j != array.len() { let mut k = 0; - loop { - if k == m { - break; - } + while k != m { out.append(*array.at(j)); k += 1; }; + j += 1; }; diff --git a/src/operators/nn/functional/hard_sigmoid.cairo b/src/operators/nn/functional/hard_sigmoid.cairo index bd9714757..8a368e3be 100644 --- a/src/operators/nn/functional/hard_sigmoid.cairo +++ b/src/operators/nn/functional/hard_sigmoid.cairo @@ -1,12 +1,6 @@ -use core::traits::Into; -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; - - use orion::numbers::fixed_point::core::FixedTrait; -use orion::operators::tensor::core::{Tensor, TensorTrait}; use orion::numbers::NumberTrait; +use orion::operators::tensor::core::{Tensor, TensorTrait}; /// Cf: NNTrait::hard_sigmoid docstring fn hard_sigmoid< @@ -23,7 +17,7 @@ fn hard_sigmoid< >( mut x: Tensor, alpha: @T, beta: @T ) -> Tensor { - let mut data_result = ArrayTrait::::new(); + let mut data_result: Array = array![]; loop { match x.data.pop_front() { @@ -36,6 +30,6 @@ fn hard_sigmoid< }; }; - return TensorTrait::new(x.shape, data_result.span()); + TensorTrait::new(x.shape, data_result.span()) } diff --git a/src/operators/nn/functional/leaky_relu.cairo b/src/operators/nn/functional/leaky_relu.cairo index d1677d48f..113383deb 100644 --- a/src/operators/nn/functional/leaky_relu.cairo +++ b/src/operators/nn/functional/leaky_relu.cairo @@ -1,12 +1,6 @@ -use core::traits::Into; -use core::array::ArrayTrait; -use core::option::OptionTrait; -use core::array::SpanTrait; - use orion::numbers::fixed_point::core::FixedTrait; -use orion::operators::tensor::core::{Tensor, TensorTrait}; use orion::numbers::NumberTrait; - +use orion::operators::tensor::core::{Tensor, TensorTrait}; /// Cf: NNTrait::leaky_relu docstring fn leaky_relu< @@ -23,7 +17,7 @@ fn leaky_relu< ) -> Tensor { assert(*alpha < NumberTrait::one(), 'alpha must be less than 1'); - let mut data_result = ArrayTrait::::new(); + let mut data_result: Array = array![]; loop { match z.data.pop_front() { @@ -38,5 +32,5 @@ fn leaky_relu< }; }; - return TensorTrait::new(z.shape, data_result.span()); + TensorTrait::new(z.shape, data_result.span()) } diff --git a/src/operators/nn/functional/linear.cairo b/src/operators/nn/functional/linear.cairo index fcc616f2c..c01deb87d 100644 --- a/src/operators/nn/functional/linear.cairo +++ b/src/operators/nn/functional/linear.cairo @@ -1,5 +1,3 @@ -use core::array::SpanTrait; - use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; @@ -20,5 +18,5 @@ fn linear< let dot = weights.matmul(@z); let sum = dot + bias; - return sum; + sum } diff --git a/src/operators/nn/functional/logsoftmax.cairo b/src/operators/nn/functional/logsoftmax.cairo index 1280e7cd3..fdf89c43d 100644 --- a/src/operators/nn/functional/logsoftmax.cairo +++ b/src/operators/nn/functional/logsoftmax.cairo @@ -1,8 +1,6 @@ -use core::array::SpanTrait; - +use orion::numbers::fixed_point::core::FixedTrait; use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; -use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::math::{exp::exp_upcast, arithmetic::div_downcast}; /// Cf: NNTrait::logsoftmax docstring @@ -16,7 +14,7 @@ fn logsoftmax< let softmax = exp_tensor / sum; let logsoftmax = softmax.log(); - return logsoftmax; + logsoftmax } /// Cf: NNTrait::logsoftmax docstring @@ -42,5 +40,6 @@ fn logsoftmaxWide< let exp_tensor: Tensor = exp_upcast(*z); let sum = exp_tensor.reduce_sum(axis, true); let softmax = div_downcast(@exp_tensor, @sum); + softmax.log() } diff --git a/src/operators/nn/functional/relu.cairo b/src/operators/nn/functional/relu.cairo index 7555c515d..bdd5c1fee 100644 --- a/src/operators/nn/functional/relu.cairo +++ b/src/operators/nn/functional/relu.cairo @@ -1,7 +1,3 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; - use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; @@ -17,7 +13,7 @@ fn relu< >( mut z: Tensor ) -> Tensor { - let mut data_result = ArrayTrait::::new(); + let mut data_result: Array = array![]; loop { match z.data.pop_front() { @@ -32,5 +28,5 @@ fn relu< }; }; - return TensorTrait::new(z.shape, data_result.span()); + TensorTrait::new(z.shape, data_result.span()) } diff --git a/src/operators/nn/functional/sigmoid.cairo b/src/operators/nn/functional/sigmoid.cairo index c7ed638aa..2acf5c851 100644 --- a/src/operators/nn/functional/sigmoid.cairo +++ b/src/operators/nn/functional/sigmoid.cairo @@ -1,12 +1,6 @@ -use core::traits::Into; -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; - - use orion::numbers::fixed_point::core::FixedTrait; -use orion::operators::tensor::core::{Tensor, TensorTrait}; use orion::numbers::NumberTrait; +use orion::operators::tensor::core::{Tensor, TensorTrait}; /// Cf: NNTrait::sigmoid docstring fn sigmoid< @@ -23,7 +17,7 @@ fn sigmoid< >( mut z: Tensor ) -> Tensor { - let mut data_result = ArrayTrait::::new(); + let mut data_result: Array = array![]; loop { match z.data.pop_front() { @@ -36,6 +30,6 @@ fn sigmoid< }; }; - return TensorTrait::new(z.shape, data_result.span()); + TensorTrait::new(z.shape, data_result.span()) } diff --git a/src/operators/nn/functional/softmax.cairo b/src/operators/nn/functional/softmax.cairo index 95d0f9b9f..10602bde7 100644 --- a/src/operators/nn/functional/softmax.cairo +++ b/src/operators/nn/functional/softmax.cairo @@ -1,6 +1,6 @@ +use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; use orion::operators::tensor::math::{exp::exp_upcast, arithmetic::div_downcast}; -use orion::numbers::fixed_point::core::FixedTrait; /// Cf: NNTrait::softmax docstring fn softmax< @@ -14,6 +14,7 @@ fn softmax< ) -> Tensor { let exp_tensor = z.exp(); let sum = exp_tensor.reduce_sum(axis, true); + exp_tensor / sum } @@ -39,6 +40,7 @@ fn softmaxWide< ) -> Tensor { let exp_tensor: Tensor = exp_upcast(*z); let sum = exp_tensor.reduce_sum(axis, true); + div_downcast(@exp_tensor, @sum) } diff --git a/src/operators/nn/functional/softmax_zero.cairo b/src/operators/nn/functional/softmax_zero.cairo index e90dd4784..8749caa22 100644 --- a/src/operators/nn/functional/softmax_zero.cairo +++ b/src/operators/nn/functional/softmax_zero.cairo @@ -1,14 +1,9 @@ -use core::traits::Into; -use core::option::OptionTrait; - use orion::numbers::fixed_point::core::FixedTrait; use orion::numbers::NumberTrait; - use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index}; use orion::operators::tensor::helpers::{reduce_output_shape, len_from_shape, combine_indices}; use orion::operators::tensor::math::{reduce_sum::accumulate_sum, arithmetic::div_downcast}; - /// Cf: NNTrait::softmax_zero docstring fn softmax_zero< T, @@ -25,6 +20,7 @@ fn softmax_zero< ) -> Tensor { let exp_tensor = exp_zero(*z); let sum_no_zero = reduce_sum_no_zero(@exp_tensor, axis, true); + exp_tensor / sum_no_zero } @@ -54,10 +50,10 @@ fn softmaxWide_zero< ) -> Tensor { let exp_tensor: Tensor = exp_upcast_zero(*z); let sum_no_zero = reduce_sum_no_zero(@exp_tensor, axis, true); + div_downcast(@exp_tensor, @sum_no_zero) } - /// Helper function that compute the exponential of a tensor except if the value of an entry is zero, the value remains zero. /// /// # Arguments @@ -76,7 +72,7 @@ fn exp_zero< >( mut z: Tensor ) -> Tensor { - let mut result = ArrayTrait::new(); + let mut result: Array = array![]; loop { match z.data.pop_front() { @@ -91,7 +87,7 @@ fn exp_zero< }; }; - return TensorTrait::new(z.shape, result.span()); + TensorTrait::new(z.shape, result.span()) } /// Helper function that compute the exponential of a tensor except if the value of an entry is zero, the value remains zero. @@ -119,7 +115,7 @@ fn exp_upcast_zero< >( mut self: Tensor ) -> Tensor { - let mut result = ArrayTrait::new(); + let mut result: Array = array![]; loop { match self.data.pop_front() { @@ -134,10 +130,9 @@ fn exp_upcast_zero< }; }; - return TensorTrait::new(self.shape, result.span()); + TensorTrait::new(self.shape, result.span()) } - /// Helper function that compute the reduce sum making sure no none zero value are in the output tensor. /// /// # Arguments @@ -158,42 +153,44 @@ fn reduce_sum_no_zero< >( self: @Tensor, axis: usize, keepdims: bool ) -> Tensor { - let mut output_data = ArrayTrait::new(); + let mut output_data: Array = array![]; if (*self.shape).len() == 1 { assert(axis == 0, 'axis out of dimensions'); + let current_sum = accumulate_sum::(*self.data, *self.shape, *self.shape, axis); output_data.append(current_sum); - let mut output_shape = ArrayTrait::new(); + let mut output_shape: Array = array![]; output_shape.append(1); return TensorTrait::new(output_shape.span(), output_data.span()); } else { assert(axis <= (*self.shape).len(), 'axis out of dimensions'); + let output_shape = reduce_output_shape(*self.shape, axis, false); let output_data_len = len_from_shape(output_shape); + let mut index: usize = 0; - loop { + while index != output_data_len { let output_indices = unravel_index(index, output_shape); - let mut current_sum = accumulate_sum::(*self.data, *self.shape, output_indices, axis); + let mut current_sum = accumulate_sum::< + T + >(*self.data, *self.shape, output_indices, axis); if current_sum == NumberTrait::zero() { current_sum = NumberTrait::one(); } - output_data.append(current_sum); + output_data.append(current_sum); index += 1; - if index == output_data_len { - break (); - }; }; if keepdims { let output_shape = reduce_output_shape(*self.shape, axis, true); - return TensorTrait::::new(output_shape, output_data.span()); + TensorTrait::::new(output_shape, output_data.span()) } else { - return TensorTrait::::new(output_shape, output_data.span()); + TensorTrait::::new(output_shape, output_data.span()) } } } diff --git a/src/operators/nn/functional/softplus.cairo b/src/operators/nn/functional/softplus.cairo index 1d876c535..6292e68af 100644 --- a/src/operators/nn/functional/softplus.cairo +++ b/src/operators/nn/functional/softplus.cairo @@ -1,13 +1,7 @@ -use core::traits::Into; -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; - -use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; +use orion::numbers::fixed_point::core::FixedTrait; use orion::numbers::NumberTrait; - /// Cf: NNTrait::softplus docstring fn softplus< T, @@ -22,7 +16,7 @@ fn softplus< >( mut z: Tensor ) -> Tensor { - let mut data_result = ArrayTrait::::new(); + let mut data_result: Array = array![]; loop { match z.data.pop_front() { @@ -34,5 +28,5 @@ fn softplus< }; }; - return TensorTrait::new(z.shape, data_result.span()); + TensorTrait::new(z.shape, data_result.span()) } diff --git a/src/operators/nn/functional/softsign.cairo b/src/operators/nn/functional/softsign.cairo index 8d20ff297..180c15f02 100644 --- a/src/operators/nn/functional/softsign.cairo +++ b/src/operators/nn/functional/softsign.cairo @@ -1,12 +1,6 @@ -use core::traits::Into; -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; - use orion::numbers::fixed_point::core::FixedTrait; -use orion::operators::tensor::core::{Tensor, TensorTrait}; use orion::numbers::NumberTrait; - +use orion::operators::tensor::core::{Tensor, TensorTrait}; /// Cf: NNTrait::softsign docstring fn softsign< @@ -22,7 +16,7 @@ fn softsign< >( mut z: Tensor ) -> Tensor { - let mut data_result = ArrayTrait::new(); + let mut data_result: Array = array![]; loop { match z.data.pop_front() { @@ -34,5 +28,5 @@ fn softsign< }; }; - return TensorTrait::new(z.shape, data_result.span()); + TensorTrait::new(z.shape, data_result.span()) } diff --git a/src/operators/nn/functional/space_to_depth.cairo b/src/operators/nn/functional/space_to_depth.cairo index 6b0881d8b..d8e8089cb 100644 --- a/src/operators/nn/functional/space_to_depth.cairo +++ b/src/operators/nn/functional/space_to_depth.cairo @@ -1,15 +1,9 @@ -use core::traits::Into; -use core::traits::TryInto; -use orion::operators::tensor::core::{Tensor, TensorTrait}; -use core::option::OptionTrait; - use orion::numbers::fixed_point::core::FixedTrait; use orion::numbers::NumberTrait; - +use orion::operators::tensor::core::{Tensor, TensorTrait}; use orion::operators::tensor::helpers::{reduce_output_shape, len_from_shape, combine_indices}; use orion::operators::tensor::math::{reduce_sum::accumulate_sum, arithmetic::div_downcast}; - /// Cf: NNTrait::space_to_depth docstring fn space_to_depth< T, @@ -24,7 +18,8 @@ fn space_to_depth< >( tensor: Tensor, blocksize: usize ) -> Tensor { - assert!((tensor.shape).len() == 4, "Unexpected shape 4."); + assert((tensor.shape).len() == 4, 'Unexpected shape 4.'); + let b = (tensor.shape).at(0); let C = (tensor.shape).at(1); let H = (tensor.shape).at(2); @@ -33,5 +28,6 @@ fn space_to_depth< let reshaped = (tensor).reshape(target_shape: tmpshape.span()); let transposed = reshaped.transpose(axes: array![0, 3, 5, 1, 2, 4].span()); let finalshape = array![*b, *C * blocksize * blocksize, *H / blocksize, *W / blocksize]; - return transposed.reshape(target_shape: finalshape.span()); + + transposed.reshape(target_shape: finalshape.span()) } diff --git a/src/operators/nn/functional/thresholded_relu.cairo b/src/operators/nn/functional/thresholded_relu.cairo index 36533660b..a160bdb50 100644 --- a/src/operators/nn/functional/thresholded_relu.cairo +++ b/src/operators/nn/functional/thresholded_relu.cairo @@ -1,7 +1,3 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; - use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; @@ -17,7 +13,7 @@ fn thresholded_relu< >( mut z: Tensor, alpha: @T ) -> Tensor { - let mut data_result = ArrayTrait::::new(); + let mut data_result: Array = array![]; loop { match z.data.pop_front() { @@ -32,5 +28,5 @@ fn thresholded_relu< }; }; - return TensorTrait::new(z.shape, data_result.span()); + TensorTrait::new(z.shape, data_result.span()) } diff --git a/src/operators/nn/implementations/nn_fp16x16.cairo b/src/operators/nn/implementations/nn_fp16x16.cairo index 3e1ae2dff..1c018ade3 100644 --- a/src/operators/nn/implementations/nn_fp16x16.cairo +++ b/src/operators/nn/implementations/nn_fp16x16.cairo @@ -1,5 +1,3 @@ -use core::option::OptionTrait; - use orion::operators::tensor::core::Tensor; use orion::operators::nn::core::NNTrait; use orion::operators::nn::functional; @@ -61,10 +59,12 @@ impl FP16x16NN of NNTrait { functional::hard_sigmoid::hard_sigmoid(*tensor, alpha, beta) } - fn depth_to_space(tensor: @Tensor, blocksize: usize, mode: felt252) -> Tensor { + fn depth_to_space( + tensor: @Tensor, blocksize: usize, mode: felt252 + ) -> Tensor { functional::depth_to_space::depth_to_space(*tensor, blocksize, mode) } - + fn space_to_depth(tensor: @Tensor, blocksize: usize) -> Tensor { functional::space_to_depth::space_to_depth(*tensor, blocksize) } @@ -90,7 +90,7 @@ impl FP16x16NN of NNTrait { ) -> Tensor { functional::grid_sample::grid_sample(X, grid, align_corner, mode, padding_mode) } - + fn col2im( data: @Tensor, image_shape: Span, @@ -101,7 +101,7 @@ impl FP16x16NN of NNTrait { ) -> Tensor { functional::col2im::col2im(data, image_shape, block_shape, dilations, pads, strides,) } - + fn conv_transpose( X: @Tensor, W: @Tensor, @@ -129,7 +129,7 @@ impl FP16x16NN of NNTrait { strides ) } - + fn conv( X: @Tensor, W: @Tensor, diff --git a/src/operators/nn/implementations/nn_fp32x32.cairo b/src/operators/nn/implementations/nn_fp32x32.cairo index 2a12d137e..a5725eccb 100644 --- a/src/operators/nn/implementations/nn_fp32x32.cairo +++ b/src/operators/nn/implementations/nn_fp32x32.cairo @@ -1,5 +1,3 @@ -use core::option::OptionTrait; - use orion::operators::tensor::core::Tensor; use orion::operators::nn::core::NNTrait; use orion::operators::nn::functional; @@ -55,10 +53,12 @@ impl FP32x32NN of NNTrait { functional::hard_sigmoid::hard_sigmoid(*tensor, alpha, beta) } - fn depth_to_space(tensor: @Tensor, blocksize: usize, mode: felt252) -> Tensor { + fn depth_to_space( + tensor: @Tensor, blocksize: usize, mode: felt252 + ) -> Tensor { functional::depth_to_space::depth_to_space(*tensor, blocksize, mode) } - + fn space_to_depth(tensor: @Tensor, blocksize: usize) -> Tensor { functional::space_to_depth::space_to_depth(*tensor, blocksize) } @@ -84,7 +84,7 @@ impl FP32x32NN of NNTrait { ) -> Tensor { functional::grid_sample::grid_sample(X, grid, align_corner, mode, padding_mode) } - + fn col2im( data: @Tensor, image_shape: Span, @@ -95,7 +95,7 @@ impl FP32x32NN of NNTrait { ) -> Tensor { functional::col2im::col2im(data, image_shape, block_shape, dilations, pads, strides,) } - + fn conv_transpose( X: @Tensor, W: @Tensor, @@ -123,7 +123,7 @@ impl FP32x32NN of NNTrait { strides ) } - + fn conv( X: @Tensor, W: @Tensor, diff --git a/src/operators/nn/implementations/nn_fp64x64.cairo b/src/operators/nn/implementations/nn_fp64x64.cairo index a56c7c1f5..01a3b30ad 100644 --- a/src/operators/nn/implementations/nn_fp64x64.cairo +++ b/src/operators/nn/implementations/nn_fp64x64.cairo @@ -1,5 +1,3 @@ -use core::option::OptionTrait; - use orion::operators::tensor::core::Tensor; use orion::operators::nn::core::NNTrait; use orion::operators::nn::functional; @@ -55,10 +53,12 @@ impl FP64x64NN of NNTrait { functional::hard_sigmoid::hard_sigmoid(*tensor, alpha, beta) } - fn depth_to_space(tensor: @Tensor, blocksize: usize, mode: felt252) -> Tensor { + fn depth_to_space( + tensor: @Tensor, blocksize: usize, mode: felt252 + ) -> Tensor { functional::depth_to_space::depth_to_space(*tensor, blocksize, mode) } - + fn space_to_depth(tensor: @Tensor, blocksize: usize) -> Tensor { functional::space_to_depth::space_to_depth(*tensor, blocksize) } @@ -84,7 +84,7 @@ impl FP64x64NN of NNTrait { ) -> Tensor { functional::grid_sample::grid_sample(X, grid, align_corner, mode, padding_mode) } - + fn col2im( data: @Tensor, image_shape: Span, @@ -95,7 +95,7 @@ impl FP64x64NN of NNTrait { ) -> Tensor { functional::col2im::col2im(data, image_shape, block_shape, dilations, pads, strides,) } - + fn conv_transpose( X: @Tensor, W: @Tensor, @@ -123,7 +123,7 @@ impl FP64x64NN of NNTrait { strides ) } - + fn conv( X: @Tensor, W: @Tensor, diff --git a/src/operators/nn/implementations/nn_fp8x23.cairo b/src/operators/nn/implementations/nn_fp8x23.cairo index 512c259e8..d80d2c323 100644 --- a/src/operators/nn/implementations/nn_fp8x23.cairo +++ b/src/operators/nn/implementations/nn_fp8x23.cairo @@ -1,5 +1,3 @@ -use core::option::OptionTrait; - use orion::operators::tensor::core::Tensor; use orion::operators::nn::core::NNTrait; use orion::operators::nn::functional; @@ -62,7 +60,7 @@ impl FP8x23NN of NNTrait { fn depth_to_space(tensor: @Tensor, blocksize: usize, mode: felt252) -> Tensor { functional::depth_to_space::depth_to_space(*tensor, blocksize, mode) } - + fn space_to_depth(tensor: @Tensor, blocksize: usize) -> Tensor { functional::space_to_depth::space_to_depth(*tensor, blocksize) } @@ -88,7 +86,7 @@ impl FP8x23NN of NNTrait { ) -> Tensor { functional::grid_sample::grid_sample(X, grid, align_corner, mode, padding_mode) } - + fn col2im( data: @Tensor, image_shape: Span, @@ -99,7 +97,7 @@ impl FP8x23NN of NNTrait { ) -> Tensor { functional::col2im::col2im(data, image_shape, block_shape, dilations, pads, strides,) } - + fn conv_transpose( X: @Tensor, W: @Tensor, @@ -127,7 +125,7 @@ impl FP8x23NN of NNTrait { strides ) } - + fn conv( X: @Tensor, W: @Tensor, diff --git a/src/operators/nn/implementations/nn_i32.cairo b/src/operators/nn/implementations/nn_i32.cairo index 67883ea2a..29a94d288 100644 --- a/src/operators/nn/implementations/nn_i32.cairo +++ b/src/operators/nn/implementations/nn_i32.cairo @@ -1,5 +1,3 @@ -use core::option::OptionTrait; - use orion::operators::tensor::core::Tensor; use orion::operators::nn::core::NNTrait; use orion::operators::nn::functional; @@ -53,7 +51,7 @@ impl I32NN of NNTrait { fn depth_to_space(tensor: @Tensor, blocksize: usize, mode: felt252) -> Tensor { functional::depth_to_space::depth_to_space(*tensor, blocksize, mode) } - + fn space_to_depth(tensor: @Tensor, blocksize: usize) -> Tensor { functional::space_to_depth::space_to_depth(*tensor, blocksize) } @@ -79,7 +77,7 @@ impl I32NN of NNTrait { ) -> Tensor { panic(array!['not supported!']) } - + fn col2im( data: @Tensor, image_shape: Span, @@ -90,7 +88,7 @@ impl I32NN of NNTrait { ) -> Tensor { functional::col2im::col2im(data, image_shape, block_shape, dilations, pads, strides,) } - + fn conv_transpose( X: @Tensor, W: @Tensor, @@ -118,7 +116,7 @@ impl I32NN of NNTrait { strides ) } - + fn conv( X: @Tensor, W: @Tensor, diff --git a/src/operators/nn/implementations/nn_i8.cairo b/src/operators/nn/implementations/nn_i8.cairo index bd333834c..e22de6b43 100644 --- a/src/operators/nn/implementations/nn_i8.cairo +++ b/src/operators/nn/implementations/nn_i8.cairo @@ -1,5 +1,3 @@ -use core::option::OptionTrait; - use orion::operators::tensor::core::Tensor; use orion::operators::nn::core::NNTrait; use orion::operators::nn::functional; @@ -53,7 +51,7 @@ impl I8NN of NNTrait { fn depth_to_space(tensor: @Tensor, blocksize: usize, mode: felt252) -> Tensor { functional::depth_to_space::depth_to_space(*tensor, blocksize, mode) } - + fn space_to_depth(tensor: @Tensor, blocksize: usize) -> Tensor { functional::space_to_depth::space_to_depth(*tensor, blocksize) } @@ -79,7 +77,7 @@ impl I8NN of NNTrait { ) -> Tensor { panic(array!['not supported!']) } - + fn col2im( data: @Tensor, image_shape: Span, @@ -90,7 +88,7 @@ impl I8NN of NNTrait { ) -> Tensor { functional::col2im::col2im(data, image_shape, block_shape, dilations, pads, strides,) } - + fn conv_transpose( X: @Tensor, W: @Tensor, @@ -118,7 +116,7 @@ impl I8NN of NNTrait { strides ) } - + fn conv( X: @Tensor, W: @Tensor, diff --git a/src/operators/nn/implementations/nn_u32.cairo b/src/operators/nn/implementations/nn_u32.cairo index 15fb25ce0..7352b7ad9 100644 --- a/src/operators/nn/implementations/nn_u32.cairo +++ b/src/operators/nn/implementations/nn_u32.cairo @@ -1,5 +1,3 @@ -use core::option::OptionTrait; - use orion::operators::tensor::core::Tensor; use orion::operators::nn::core::NNTrait; use orion::operators::nn::functional; @@ -53,7 +51,7 @@ impl U32NN of NNTrait { fn depth_to_space(tensor: @Tensor, blocksize: usize, mode: felt252) -> Tensor { functional::depth_to_space::depth_to_space(*tensor, blocksize, mode) } - + fn space_to_depth(tensor: @Tensor, blocksize: usize) -> Tensor { functional::space_to_depth::space_to_depth(*tensor, blocksize) } @@ -79,7 +77,7 @@ impl U32NN of NNTrait { ) -> Tensor { panic(array!['not supported!']) } - + fn col2im( data: @Tensor, image_shape: Span, @@ -90,7 +88,7 @@ impl U32NN of NNTrait { ) -> Tensor { functional::col2im::col2im(data, image_shape, block_shape, dilations, pads, strides,) } - + fn conv_transpose( X: @Tensor, W: @Tensor, @@ -118,7 +116,7 @@ impl U32NN of NNTrait { strides ) } - + fn conv( X: @Tensor, W: @Tensor, diff --git a/src/operators/sequence/functional/concat_from_sequence.cairo b/src/operators/sequence/functional/concat_from_sequence.cairo index 336bb0553..6b15c2719 100644 --- a/src/operators/sequence/functional/concat_from_sequence.cairo +++ b/src/operators/sequence/functional/concat_from_sequence.cairo @@ -1,15 +1,8 @@ -use core::clone::Clone; -use core::array::{ArrayTrait, SpanTrait}; -use core::option::OptionTrait; -use core::debug::PrintTrait; -use core::traits::Into; - use orion::operators::tensor::helpers::replace_index; use orion::operators::tensor::core::{Tensor, TensorTrait}; use orion::operators::tensor::math::concat::concat; use orion::numbers::{NumberTrait, I32IntoU32}; - fn concat_from_sequence< T, impl TTensorTrait: TensorTrait, impl TCopy: Copy, impl TDrop: Drop, >( @@ -33,7 +26,6 @@ fn concat_from_sequence< } } - fn concat_without_new_axis< T, impl TTensorTrait: TensorTrait, impl TCopy: Copy, impl TDrop: Drop, >( @@ -44,18 +36,17 @@ fn concat_without_new_axis< /// assert in range [-r, r - 1] assert( - (axis_is_negative == false && axis_value <= r - 1) - || (axis_is_negative == true && axis_value <= r), + (!axis_is_negative && axis_value <= r - 1) || (axis_is_negative && axis_value <= r), 'Out of bounds for dimension' ); - if axis_is_negative == true { + if axis_is_negative { axis_value = r - axis_value } + concat(sequence.span(), axis_value) } - fn concat_with_new_axis< T, impl TTensorTrait: TensorTrait, impl TCopy: Copy, impl TDrop: Drop, >( @@ -66,20 +57,20 @@ fn concat_with_new_axis< /// assert in range [-r - 1, r] assert( - (axis_is_negative == false && axis_value <= r) - || (axis_is_negative == true && axis_value <= r + 1), + (!axis_is_negative && axis_value <= r) || (axis_is_negative && axis_value <= r + 1), 'Out of bounds for dimension' ); - if axis_is_negative == true { + if axis_is_negative { if axis_value > r { axis_value = 0 } else { axis_value = r - axis_value } } + let mut input_sequence_copy = sequence; - let mut reshaped_sequence = ArrayTrait::>::new(); + let mut reshaped_sequence: Array> = array![]; loop { match input_sequence_copy.pop_front() { Option::Some(input_sequence_value) => { @@ -89,6 +80,7 @@ fn concat_with_new_axis< Option::None => { break; } }; }; + concat(reshaped_sequence.span(), axis_value) } @@ -99,7 +91,7 @@ fn add_new_dimension< mut tensor: Tensor, axis: usize ) -> Tensor { let mut tensor_shape = tensor.shape; - let mut new_tensor_shape = ArrayTrait::::new(); + let mut new_tensor_shape: Array = array![]; let mut tensor_shape_counter: usize = 0; loop { match tensor_shape.pop_front() { @@ -113,8 +105,10 @@ fn add_new_dimension< Option::None => { break; } }; }; + if axis >= tensor.shape.len() { new_tensor_shape.append(1); } + TensorTrait::::new(new_tensor_shape.span(), tensor.data) } diff --git a/src/operators/sequence/functional/sequence_at.cairo b/src/operators/sequence/functional/sequence_at.cairo index 4a4aa9203..3ca1d567a 100644 --- a/src/operators/sequence/functional/sequence_at.cairo +++ b/src/operators/sequence/functional/sequence_at.cairo @@ -1,6 +1,3 @@ -use core::array::{ArrayTrait, SpanTrait}; -use core::option::OptionTrait; - use orion::operators::tensor::core::{Tensor, TensorTrait}; use orion::numbers::{NumberTrait, I32IntoU32, U32IntoI32}; @@ -17,15 +14,16 @@ fn sequence_at, impl TCopy: Copy, impl TDrop: let position_value: u32 = position_value_i32.into(); assert( - (is_negative == false && position_value <= sequence.len() - 1) - || (is_negative == true && position_value <= sequence.len()), + (!is_negative && position_value <= sequence.len() - 1) + || (is_negative && position_value <= sequence.len()), 'Position out of bounds' ); - if is_negative == false { - return *sequence.at(position_value); + if !is_negative { + *sequence.at(position_value) } else { let normalized_position_value = sequence.len() - position_value; - return *sequence.at(normalized_position_value); + + *sequence.at(normalized_position_value) } } diff --git a/src/operators/sequence/functional/sequence_construct.cairo b/src/operators/sequence/functional/sequence_construct.cairo index 18902b078..d86ccfdb6 100644 --- a/src/operators/sequence/functional/sequence_construct.cairo +++ b/src/operators/sequence/functional/sequence_construct.cairo @@ -1,11 +1,8 @@ -use core::array::{ArrayTrait, SpanTrait}; - use orion::operators::tensor::{TensorTrait, Tensor}; - /// Cf: SequenceTrait::sequence_construct docstring fn sequence_construct>(tensors: Array>) -> Array> { assert(tensors.len() >= 1, 'Input tensors must be >= 1'); - return tensors; + tensors } diff --git a/src/operators/sequence/functional/sequence_empty.cairo b/src/operators/sequence/functional/sequence_empty.cairo index 93e2989cc..ee74df0a5 100644 --- a/src/operators/sequence/functional/sequence_empty.cairo +++ b/src/operators/sequence/functional/sequence_empty.cairo @@ -1,16 +1,13 @@ -use core::array::{ArrayTrait, SpanTrait}; - use orion::operators::tensor::{TensorTrait, Tensor}; - /// Cf: SequenceTrait::sequence_empty docstring fn sequence_empty, impl TDrop: Drop>() -> Array> { - let mut sequence = ArrayTrait::new(); + let mut sequence = array![]; - let mut shape = ArrayTrait::::new(); + let mut shape: Array = array![]; shape.append(0); - let mut data = ArrayTrait::new(); + let mut data: Array = array![]; let tensor = TensorTrait::new(shape.span(), data.span()); sequence.append(tensor); diff --git a/src/operators/sequence/functional/sequence_erase.cairo b/src/operators/sequence/functional/sequence_erase.cairo index 3c6a6d57a..7c274e700 100644 --- a/src/operators/sequence/functional/sequence_erase.cairo +++ b/src/operators/sequence/functional/sequence_erase.cairo @@ -1,6 +1,3 @@ -use core::array::{ArrayTrait, SpanTrait}; -use core::option::OptionTrait; - use orion::operators::tensor::core::{Tensor, TensorTrait}; use orion::operators::tensor::I32Tensor; use orion::numbers::{NumberTrait, I32IntoU32}; @@ -12,9 +9,10 @@ fn sequence_erase, impl TCopy: Copy, impl TDr let position: Tensor = match position { Option::Some(p) => p, Option::None => { - let mut shape = ArrayTrait::::new(); - let mut data = ArrayTrait::::new(); + let mut shape: Array = array![]; + let mut data: Array = array![]; data.append(-1_i32); + TensorTrait::::new(shape.span(), data.span()) } }; @@ -26,17 +24,17 @@ fn sequence_erase, impl TCopy: Copy, impl TDr let mut position_value: u32 = position_value_i32.into(); assert( - (is_negative == false && position_value <= sequence.len() - 1) - || (is_negative == true && position_value <= sequence.len()), + (!is_negative && position_value <= sequence.len() - 1) + || (is_negative && position_value <= sequence.len()), 'Position out of bounds' ); - if is_negative == true { + if is_negative { position_value = sequence.len() - position_value; } let mut input_sequence_copy = sequence; - let mut output_sequence = ArrayTrait::>::new(); + let mut output_sequence: Array> = array![]; let mut tensor_counter: usize = 0; loop { match input_sequence_copy.pop_front() { @@ -45,14 +43,14 @@ fn sequence_erase, impl TCopy: Copy, impl TDr tensor_counter += 1; continue; } - output_sequence.append(input_sequence_value); + output_sequence.append(input_sequence_value); tensor_counter += 1; }, Option::None => { break; } }; }; - return output_sequence; + output_sequence } diff --git a/src/operators/sequence/functional/sequence_insert.cairo b/src/operators/sequence/functional/sequence_insert.cairo index 83b333387..df19120b9 100644 --- a/src/operators/sequence/functional/sequence_insert.cairo +++ b/src/operators/sequence/functional/sequence_insert.cairo @@ -1,6 +1,3 @@ -use core::array::{ArrayTrait, SpanTrait}; -use core::option::OptionTrait; - use orion::operators::tensor::core::{Tensor, TensorTrait}; use orion::operators::tensor::I32Tensor; use orion::numbers::{NumberTrait, I32IntoU32}; @@ -12,9 +9,10 @@ fn sequence_insert, impl TCopy: Copy, impl TD let position: Tensor = match position { Option::Some(p) => p, Option::None => { - let mut shape = ArrayTrait::::new(); - let mut data = ArrayTrait::::new(); + let mut shape: Array = array![]; + let mut data: Array = array![]; data.append(-1_i32); + TensorTrait::::new(shape.span(), data.span()) }, }; @@ -26,16 +24,16 @@ fn sequence_insert, impl TCopy: Copy, impl TD let mut position_value: u32 = position_value_i32.into(); assert( - (is_negative == false && position_value <= self.len() - 1) - || (is_negative == true && position_value <= self.len()), + (!is_negative && position_value <= self.len() - 1) + || (is_negative && position_value <= self.len()), 'Position out of bounds' ); - if is_negative == true { + if is_negative { position_value = self.len() - position_value; } - let mut new_sequence = ArrayTrait::>::new(); + let mut new_sequence: Array> = array![]; let mut inserted = false; let mut self_copy = self; loop { @@ -46,7 +44,7 @@ fn sequence_insert, impl TCopy: Copy, impl TD inserted = true; } new_sequence.append(t); - if inserted == false { + if !inserted { position_value -= 1; } }, @@ -54,5 +52,5 @@ fn sequence_insert, impl TCopy: Copy, impl TD }; }; - return new_sequence; + new_sequence } diff --git a/src/operators/sequence/functional/sequence_length.cairo b/src/operators/sequence/functional/sequence_length.cairo index 84f91e48f..0409c5809 100644 --- a/src/operators/sequence/functional/sequence_length.cairo +++ b/src/operators/sequence/functional/sequence_length.cairo @@ -1,12 +1,9 @@ -use core::array::{ArrayTrait, SpanTrait}; - use orion::operators::tensor::{TensorTrait, Tensor}; - /// Cf: SequenceTrait::sequence_length docstring fn sequence_length>(self: Array>) -> Tensor { - let mut shape = ArrayTrait::::new(); - let mut result = ArrayTrait::new(); + let mut shape: Array = array![]; + let mut result: Array = array![]; result.append(self.len()); Tensor:: { shape: shape.span(), data: result.span(), } diff --git a/src/operators/sequence/implementations/sequence_bool.cairo b/src/operators/sequence/implementations/sequence_bool.cairo index 7c1402db1..d9f0de151 100644 --- a/src/operators/sequence/implementations/sequence_bool.cairo +++ b/src/operators/sequence/implementations/sequence_bool.cairo @@ -1,5 +1,3 @@ -use core::option::OptionTrait; - use orion::operators::tensor::core::Tensor; use orion::operators::sequence::core::SequenceTrait; use orion::operators::sequence::functional; diff --git a/src/operators/sequence/implementations/sequence_fp16x16.cairo b/src/operators/sequence/implementations/sequence_fp16x16.cairo index d03967b32..bcc6793b1 100644 --- a/src/operators/sequence/implementations/sequence_fp16x16.cairo +++ b/src/operators/sequence/implementations/sequence_fp16x16.cairo @@ -1,5 +1,3 @@ -use core::option::OptionTrait; - use orion::operators::tensor::core::Tensor; use orion::operators::sequence::core::SequenceTrait; use orion::operators::sequence::functional; @@ -7,7 +5,6 @@ use orion::numbers::fixed_point::implementations::fp16x16::core::FP16x16; use orion::operators::tensor::implementations::tensor_fp16x16::FP16x16Tensor; use orion::operators::tensor::implementations::tensor_i32::I32Tensor; - impl FP16x16Sequence of SequenceTrait { fn sequence_construct(tensors: Array>) -> Array> { functional::sequence_construct::sequence_construct(tensors) diff --git a/src/operators/sequence/implementations/sequence_fp16x16wide.cairo b/src/operators/sequence/implementations/sequence_fp16x16wide.cairo index bfaa11f37..bc133efc8 100644 --- a/src/operators/sequence/implementations/sequence_fp16x16wide.cairo +++ b/src/operators/sequence/implementations/sequence_fp16x16wide.cairo @@ -1,5 +1,3 @@ -use core::option::OptionTrait; - use orion::operators::tensor::core::Tensor; use orion::operators::sequence::core::SequenceTrait; use orion::operators::sequence::functional; @@ -7,7 +5,6 @@ use orion::numbers::fixed_point::implementations::fp16x16wide::core::FP16x16W; use orion::operators::tensor::implementations::tensor_fp16x16wide::FP16x16WTensor; use orion::operators::tensor::implementations::tensor_i32::I32Tensor; - impl FP16x16WSequence of SequenceTrait { fn sequence_construct(tensors: Array>) -> Array> { functional::sequence_construct::sequence_construct(tensors) diff --git a/src/operators/sequence/implementations/sequence_fp32x32.cairo b/src/operators/sequence/implementations/sequence_fp32x32.cairo index 2025d6161..2a9af7255 100644 --- a/src/operators/sequence/implementations/sequence_fp32x32.cairo +++ b/src/operators/sequence/implementations/sequence_fp32x32.cairo @@ -1,5 +1,3 @@ -use core::option::OptionTrait; - use orion::operators::tensor::core::Tensor; use orion::operators::sequence::core::SequenceTrait; use orion::operators::sequence::functional; @@ -7,7 +5,6 @@ use orion::numbers::fixed_point::implementations::fp32x32::core::FP32x32; use orion::operators::tensor::implementations::tensor_fp32x32::FP32x32Tensor; use orion::operators::tensor::implementations::tensor_i32::I32Tensor; - impl FP32x32Sequence of SequenceTrait { fn sequence_construct(tensors: Array>) -> Array> { functional::sequence_construct::sequence_construct(tensors) diff --git a/src/operators/sequence/implementations/sequence_fp64x64.cairo b/src/operators/sequence/implementations/sequence_fp64x64.cairo index a1c0d3f89..004450f21 100644 --- a/src/operators/sequence/implementations/sequence_fp64x64.cairo +++ b/src/operators/sequence/implementations/sequence_fp64x64.cairo @@ -1,5 +1,3 @@ -use core::option::OptionTrait; - use orion::operators::tensor::core::Tensor; use orion::operators::sequence::core::SequenceTrait; use orion::operators::sequence::functional; @@ -7,7 +5,6 @@ use orion::numbers::fixed_point::implementations::fp64x64::core::FP64x64; use orion::operators::tensor::implementations::tensor_fp64x64::FP64x64Tensor; use orion::operators::tensor::implementations::tensor_i32::I32Tensor; - impl FP64x64Sequence of SequenceTrait { fn sequence_construct(tensors: Array>) -> Array> { functional::sequence_construct::sequence_construct(tensors) diff --git a/src/operators/sequence/implementations/sequence_fp8x23.cairo b/src/operators/sequence/implementations/sequence_fp8x23.cairo index ae9bfd18d..567faccd8 100644 --- a/src/operators/sequence/implementations/sequence_fp8x23.cairo +++ b/src/operators/sequence/implementations/sequence_fp8x23.cairo @@ -1,5 +1,3 @@ -use core::option::OptionTrait; - use orion::operators::tensor::core::Tensor; use orion::operators::sequence::core::SequenceTrait; use orion::operators::sequence::functional; @@ -7,7 +5,6 @@ use orion::numbers::fixed_point::implementations::fp8x23::core::FP8x23; use orion::operators::tensor::implementations::tensor_fp8x23::FP8x23Tensor; use orion::operators::tensor::implementations::tensor_i32::I32Tensor; - impl FP8x23Sequence of SequenceTrait { fn sequence_construct(tensors: Array>) -> Array> { functional::sequence_construct::sequence_construct(tensors) diff --git a/src/operators/sequence/implementations/sequence_fp8x23wide.cairo b/src/operators/sequence/implementations/sequence_fp8x23wide.cairo index 5dc8e246e..994298877 100644 --- a/src/operators/sequence/implementations/sequence_fp8x23wide.cairo +++ b/src/operators/sequence/implementations/sequence_fp8x23wide.cairo @@ -1,5 +1,3 @@ -use core::option::OptionTrait; - use orion::operators::tensor::core::Tensor; use orion::operators::sequence::core::SequenceTrait; use orion::operators::sequence::functional; @@ -7,7 +5,6 @@ use orion::numbers::fixed_point::implementations::fp8x23wide::core::FP8x23W; use orion::operators::tensor::implementations::tensor_fp8x23wide::FP8x23WTensor; use orion::operators::tensor::implementations::tensor_i32::I32Tensor; - impl FP8x23WSequence of SequenceTrait { fn sequence_construct(tensors: Array>) -> Array> { functional::sequence_construct::sequence_construct(tensors) diff --git a/src/operators/sequence/implementations/sequence_i32.cairo b/src/operators/sequence/implementations/sequence_i32.cairo index 8a267c244..f99c4d592 100644 --- a/src/operators/sequence/implementations/sequence_i32.cairo +++ b/src/operators/sequence/implementations/sequence_i32.cairo @@ -1,11 +1,8 @@ -use core::option::OptionTrait; - use orion::operators::tensor::core::Tensor; use orion::operators::sequence::core::SequenceTrait; use orion::operators::sequence::functional; use orion::operators::tensor::implementations::tensor_i32::I32Tensor; - impl I32Sequence of SequenceTrait { fn sequence_construct(tensors: Array>) -> Array> { functional::sequence_construct::sequence_construct(tensors) diff --git a/src/operators/sequence/implementations/sequence_i8.cairo b/src/operators/sequence/implementations/sequence_i8.cairo index 700e52867..3dc80952e 100644 --- a/src/operators/sequence/implementations/sequence_i8.cairo +++ b/src/operators/sequence/implementations/sequence_i8.cairo @@ -1,12 +1,9 @@ -use core::option::OptionTrait; - use orion::operators::tensor::core::Tensor; use orion::operators::sequence::core::SequenceTrait; use orion::operators::sequence::functional; use orion::operators::tensor::implementations::tensor_i8::I8Tensor; use orion::operators::tensor::implementations::tensor_i32::I32Tensor; - impl I8Sequence of SequenceTrait { fn sequence_construct(tensors: Array>) -> Array> { functional::sequence_construct::sequence_construct(tensors) diff --git a/src/operators/sequence/implementations/sequence_u32.cairo b/src/operators/sequence/implementations/sequence_u32.cairo index 34ca8d578..be5b5deda 100644 --- a/src/operators/sequence/implementations/sequence_u32.cairo +++ b/src/operators/sequence/implementations/sequence_u32.cairo @@ -1,12 +1,9 @@ -use core::option::OptionTrait; - use orion::operators::tensor::core::Tensor; use orion::operators::sequence::core::SequenceTrait; use orion::operators::sequence::functional; use orion::operators::tensor::implementations::tensor_u32::U32Tensor; use orion::operators::tensor::implementations::tensor_i32::I32Tensor; - impl U32Sequence of SequenceTrait { fn sequence_construct(tensors: Array>) -> Array> { functional::sequence_construct::sequence_construct(tensors) diff --git a/src/operators/tensor/core.cairo b/src/operators/tensor/core.cairo index 23018044d..0d21a4de3 100644 --- a/src/operators/tensor/core.cairo +++ b/src/operators/tensor/core.cairo @@ -1,3 +1,4 @@ +use alexandria_data_structures::array_ext::ArrayTraitExt; use core::array::{ArrayTrait, SpanTrait}; use core::serde::Serde; use core::option::OptionTrait; @@ -116,6 +117,7 @@ impl TensorSerde, impl TDrop: Drop> of Serde= 1, indices tensor of rank q >= 1, and batch_dims integer b, this operator gathers slices of data into an output tensor of rank q + r - indices_shape[-1] - 1 - b. /// reduce_log_sum - Computes the log sum of the input tensor's elements along the provided axes. /// erf - Computes the error function of the given input tensor element-wise. +/// reduce_log_sum_exp - Computes the log sum of the exponentials of the input tensor's elements along the provided axes. /// layer_normalization - computes the layer normalization of the input tensor. /// split - Split a tensor into a list of tensors, along the specified β€˜axis’. /// random_uniform_like - RandomUniformLike generates a tensor with random values using a uniform distribution, matching the shape of the input tensor. @@ -128,6 +130,7 @@ impl TensorSerde, impl TDrop: Drop> of Serde8Bit conversion of FP32 Input data. /// scatter_nd - The output of the operation is produced by creating a copy of the input data, and then updating its value to values specified by updates at specific index positions specified by indices. Its output shape is the same as the shape of data +/// label_encoder - Maps each element in the input tensor to another value. trait TensorTrait { /// # tensor.new /// @@ -275,7 +278,7 @@ trait TensorTrait { /// fn min(tensors: Span>) -> Tensor; /// ``` /// - /// Returns the element-wise minumum values from a list of input tensors + /// Returns the element-wise minimum values from a list of input tensors /// The input tensors must have either: /// * Exactly the same shape /// * The same number of dimensions and the length of each dimension is either a common length or 1. @@ -2578,7 +2581,7 @@ trait TensorTrait { /// /// It consumes two quantized input tensors, their scales and zero points, scale and zero point of output, and computes the quantized output. /// The quantization formula is y = saturate((x / y_scale) + y_zero_point). - /// It perfoms the addition of the two vectors once dequantized, then return the quantization of the result of the addition. + /// It performs the addition of the two vectors once dequantized, then return the quantization of the result of the addition. /// The broadcasting is supported /// Scale and zero point must have same shape and the same type. They must be either scalar (per tensor) or N-D tensor (per row for 'a' and per column for 'b'). /// Scalar refers to per tensor quantization whereas N-D refers to per row or per column quantization. @@ -2676,7 +2679,7 @@ trait TensorTrait { /// /// It consumes two quantized input tensors, their scales and zero points, scale and zero point of output, and computes the quantized output. /// The quantization formula is y = saturate((x / y_scale) + y_zero_point). - /// It perfoms the element-wise multiplication of the two vectors once dequantized, then return the quantization of the result of the multiplication. + /// It performs the element-wise multiplication of the two vectors once dequantized, then return the quantization of the result of the multiplication. /// The broadcasting is supported /// Scale and zero point must have same shape and the same type. They must be either scalar (per tensor) or N-D tensor (per row for 'a' and per column for 'b'). /// Scalar refers to per tensor quantization whereas N-D refers to per row or per column quantization. @@ -2783,7 +2786,7 @@ trait TensorTrait { /// /// It consumes two quantized input tensors, their scales and zero points, scale and zero point of output, and computes the quantized output. /// The quantization formula is y = saturate((x / y_scale) + y_zero_point). - /// It perfoms the multiplication of the two vectors once dequantized. If either argument is N-D, N > 2, it is treated as a stack of matrices residing in the last two indexes. + /// It performs the multiplication of the two vectors once dequantized. If either argument is N-D, N > 2, it is treated as a stack of matrices residing in the last two indexes. /// Then return the quantization of the result of the multiplication. /// Scale and zero point must have same shape and the same type. They must be either scalar (per tensor) or N-D tensor (per row for 'a' and per column for 'b'). /// Scalar refers to per tensor quantization whereas N-D refers to per row or per column quantization. @@ -3298,7 +3301,7 @@ trait TensorTrait { /// [1 1]] /// ``` /// - fn squeeze(self: @Tensor, axes: Option>) -> Tensor; + fn squeeze(self: @Tensor, axes: Option>) -> Tensor; /// # tensor.clip /// /// ```rust @@ -4795,6 +4798,68 @@ trait TensorTrait { /// ``` /// fn reduce_log_sum(self: @Tensor, axis: usize, keepdims: bool) -> Tensor; + /// ## tensor.reduce_log_sum_exp + /// + /// ```rust + /// fn reduce_log_sum_exp(self: @Tensor, axis: usize, keepdims: bool) -> Tensor; + /// ``` + /// + /// Computes the log sum of the exponentials of the input tensor's elements along the provided axes. + /// + /// ## Args + /// * 'self'(`@Tensor`) - The input tensor. + /// * 'axis'(`usize`) - The dimension to reduce. + /// * 'keepdims'(`bool`) - If true, retains reduced dimensions with length 1. + /// + /// ## Panics + /// + /// * Panics if axis is not in the range of the input tensor's dimensions. + /// + /// ## Returns + /// + /// Returns a new `Tensor` instance with the specified axis reduced by summing its elements. + /// + /// + /// ## Example + /// + /// ```rust + /// use core::array::{ArrayTrait, SpanTrait}; + /// use orion::operators::tensor::{TensorTrait, Tensor}; + /// use orion::operators::tensor::FP32x32Tensor; + /// use orion::numbers::{FixedTrait, FP32x32}; + /// + /// fn reduce_log_sum_exp() -> Tensor { + /// let mut shape = ArrayTrait::::new(); + /// shape.append(3); + /// shape.append(2); + /// shape.append(2); + /// + /// let mut data = ArrayTrait::new(); + /// data.append(FP32x32 { mag: 4294967296, sign: false }); + /// data.append(FP32x32 { mag: 8589934592, sign: false }); + /// data.append(FP32x32 { mag: 12884901888, sign: false }); + /// data.append(FP32x32 { mag: 17179869184, sign: false }); + /// data.append(FP32x32 { mag: 21474836480, sign: false }); + /// data.append(FP32x32 { mag: 25769803776, sign: false }); + /// data.append(FP32x32 { mag: 30064771072, sign: false }); + /// data.append(FP32x32 { mag: 34359738368, sign: false }); + /// data.append(FP32x32 { mag: 38654705664, sign: false }); + /// data.append(FP32x32 { mag: 42949672960, sign: false }); + /// data.append(FP32x32 { mag: 47244640256, sign: false }); + /// data.append(FP32x32 { mag: 51539607552, sign: false }); + /// TensorTrait::new(shape.span(), data.span()) + /// + /// let tensor = TensorTrait::::new(shape.span(), data.span()); + /// + /// return tensor.reduce_log_sum_exp(axis: 2, keepdims: false); + /// + /// } + /// + /// + /// >>> [[9215828, 16323477, 20115004], [22716772, 24699744, 26302432]] + /// ``` + /// + fn reduce_log_sum_exp(self: @Tensor, axis: usize, keepdims: bool) -> Tensor; /// ## tensor.erf /// /// ```rust @@ -5224,7 +5289,10 @@ trait TensorTrait { /// ``` /// fn reverse_sequence( - self: @Tensor, sequence_lens: Tensor, batch_axis: Option, time_axis: Option + self: @Tensor, + sequence_lens: Tensor, + batch_axis: Option, + time_axis: Option ) -> Tensor; /// # tensor.scatter_nd /// @@ -5304,10 +5372,7 @@ trait TensorTrait { /// ``` /// fn scatter_nd( - self: @Tensor, - updates: Tensor, - indices: Tensor, - reduction: Option + self: @Tensor, updates: Tensor, indices: Tensor, reduction: Option ) -> Tensor; /// # tensor.dynamic_quantize_linear /// @@ -5364,9 +5429,7 @@ trait TensorTrait { /// >>> ([133, 233, 236, 255, -18, -0], [0.02745], [128] /// ``` /// - fn dynamic_quantize_linear( - self: @Tensor - ) -> (Tensor, Tensor, Tensor); + fn dynamic_quantize_linear(self: @Tensor) -> (Tensor, Tensor, Tensor); /// # tensor.optional /// /// ```rust @@ -5664,7 +5727,129 @@ trait TensorTrait { /// >>> [[[[7299130, 4884492]], [[2339070, 1559536]], [[3448557, 984617]], [[5745934, 3670947]], [[4665989, 3079292]], [[3375288, 948254]], [[3749966, 4911069]], [[1358829, 4368105]]]] /// ``` /// - fn random_uniform_like(tensor: @Tensor, high: Option, low: Option, seed: Option) -> Tensor; + fn random_uniform_like( + tensor: @Tensor, high: Option, low: Option, seed: Option + ) -> Tensor; + /// # tensor.label_encoder + /// + /// ```rust + /// fn label_encoder(self: @Tensor, default_list: Option>, default_tensor: Option>, keys: Option>, keys_tensor: Option>, values: Option>, values_tensor: Option>) -> Tensor; + /// ``` + /// + /// Maps each element in the input tensor to another value. + /// + /// The mapping is determined by the two parallel attributes, 'keys_' and 'values_' attribute. + /// The i-th value in the specified 'keys_' attribute would be mapped to the i-th value in the specified 'values_' attribute. + /// It implies that input's element type and the element type of the specified 'keys_' should be identical while the output type is identical to the specified 'values_' attribute. + /// + /// ## Args + /// + /// * `self`(`@Tensor`) - The input tensor. + /// * `default_list`(`Option>`) - The default span. + /// * `default_tensor`(`Option>`) - The default tensor. + /// * `keys`(`Option>`) - The keys span. + /// * `keys_tensor`(`Option>`) - The keys tensor. + /// * `values`(` Option>`) - The values span. + /// * `values_tensor`(`Option>`) - The values tensor. + /// + /// One and only one of 'default_*'s should be set + /// One and only one of 'keys*'s should be set + /// One and only one of 'values*'s should be set. + /// + /// ## Panics + /// + /// * Panics if the len/shape of keys and values are not the same. + /// + /// ## Returns + /// + /// A new `Tensor` which maps each element in the input tensor to another value.. + /// + /// ## Type Constraints + /// + /// * `T` in (`Tensor`, `Tensor`, `Tensor`, `tensor,`) + /// + /// ## Examples + /// + /// ```rust + /// use array::{ArrayTrait, SpanTrait}; + /// use orion::operators::tensor::U32Tensor; + /// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; + /// + /// fn label_encoder_example() -> Tensor, { + /// fn data() -> Tensor { + /// let mut sizes = ArrayTrait::new(); + /// sizes.append(2); + /// sizes.append(3); + /// let mut data = ArrayTrait::new(); + /// data.append(1); + /// data.append(2); + /// data.append(3); + /// data.append(1); + /// data.append(4); + /// data.append(5); + /// + /// let tensor = TensorTrait::::new(sizes.span(), data.span()); + /// return tensor; + /// } + /// + /// fn keys() -> Tensor { + /// let mut sizes = ArrayTrait::new(); + /// sizes.append(3); + /// sizes.append(1); + /// + /// let mut data = ArrayTrait::new(); + /// data.append(1); + /// data.append(2); + /// data.append(1); + /// + /// let tensor = TensorTrait::::new(sizes.span(), data.span()); + /// return tensor; + /// } + /// + /// fn values() -> Tensor { + /// let mut sizes = ArrayTrait::new(); + /// sizes.append(3); + /// sizes.append(1); + /// + /// let mut data = ArrayTrait::new(); + /// data.append(8); + /// data.append(9); + /// data.append(7); + /// + /// let tensor = TensorTrait::::new(sizes.span(), data.span()); + /// return tensor; + /// } + /// + /// fn default() -> Tensor { + /// let mut sizes = ArrayTrait::new(); + /// sizes.append(1); + /// + /// let mut data = ArrayTrait::new(); + /// data.append(999); + /// + /// let tensor = TensorTrait::::new(sizes.span(), data.span()); + /// return tensor; + /// } + /// + /// let data = data(); + /// let keys = keys(); + /// let values = values(); + /// let default = default(); + /// return data.label_encoder(default_list: Option::None, default_tensor: Option::Some(default), + /// keys: Option::None, keys_tensor: Option::Some(keys), + /// values: Option::None, values_tensor: Option::Some(values)); + /// >>> [7, 9, 999, 7, 999, 999], + /// ``` + /// + fn label_encoder( + self: @Tensor, + default_list: Option>, + default_tensor: Option>, + keys: Option>, + keys_tensor: Option>, + values: Option>, + values_tensor: Option> + ) -> Tensor; } /// Cf: TensorTrait::new docstring @@ -5743,33 +5928,22 @@ fn unravel_index(index: usize, mut shape: Span) -> Span { /// Cf: TensorTrait::stride docstring fn stride(mut shape: Span) -> Span { - let shape_len = shape.len(); - assert(shape_len > 0, 'shape cannot be empty'); - - let mut result: Array = ArrayTrait::new(); - let mut accumulated: usize = 1; - let mut temp_result = ArrayTrait::new(); + let mut strides = ArrayTrait::new(); + let mut stride = 1; loop { match shape.pop_back() { - Option::Some(i) => { - temp_result.append(accumulated); - accumulated *= *i; + Option::Some(size) => { + strides.append(stride); + stride *= *size; }, Option::None => { break; } }; }; - let mut temp_result = temp_result.span(); - loop { - match temp_result.pop_back() { - Option::Some(val) => { result.append(*val); }, - Option::None => { break; } - }; - }; - - return result.span(); + strides.reverse().span() } + /// Cf: TensorTrait::reshape docstring fn reshape(self: @Tensor, target_shape: Span) -> Tensor { new_tensor(target_shape, *self.data) @@ -6051,7 +6225,7 @@ fn nonzero< } /// Cf: TensorTrait::squeeze docstring -fn squeeze(self: @Tensor, axes: Option>) -> Tensor { +fn squeeze(self: @Tensor, axes: Option>) -> Tensor { let target_shape = match axes { Option::Some(mut axes) => { let mut axis_squeezed = 0; @@ -6060,7 +6234,7 @@ fn squeeze(self: @Tensor, axes: Option>) -> Tensor { match axes.pop_front() { Option::Some(axis) => { let mut reshape: Array = ArrayTrait::new(); - let mut index = 0_i32; + let mut index = 0; let axis = if *axis < 0 { assert( *axis <= (*self.shape).len().into(), 'axis out of accepted range' diff --git a/src/operators/tensor/helpers.cairo b/src/operators/tensor/helpers.cairo index caa8d3b21..550ff45c5 100644 --- a/src/operators/tensor/helpers.cairo +++ b/src/operators/tensor/helpers.cairo @@ -1,7 +1,3 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; - use alexandria_data_structures::array_ext::ArrayTraitExt; use orion::utils::u32_max; @@ -27,7 +23,7 @@ fn len_from_shape(mut shape: Span) -> usize { }; }; - return result; + result } /// Verifies if the shape and the data array of a tensor are compatible. @@ -51,21 +47,37 @@ fn check_shape(shape: Span, data: Span) { /// # Panics /// * Panics if the shapes are not compatible for broadcasting. fn check_compatibility(mut shape_1: Span, mut shape_2: Span) { - assert(shape_1.len() == shape_2.len(), 'tensors shape must match'); - - loop { - match shape_1.pop_front() { - Option::Some(shape_1_val) => { - let shape_2_val = *shape_2.pop_front().unwrap(); - - assert( - *shape_1_val == shape_2_val || *shape_1_val == 1 || shape_2_val == 1, - 'tensors shape must match' - ); - }, - Option::None => { break; } + // Start from the last dimension by getting the length of each shape + let mut iter_1 = shape_1.len(); + let mut iter_2 = shape_2.len(); + + // Iterate while there are dimensions left in either shape + while iter_1 > 0 || iter_2 > 0 { + // Get the current dimension for each shape, defaulting to 1 if we've run out of dimensions + let dim_1 = if iter_1 > 0 { + *shape_1[iter_1 - 1] + } else { + 1 }; - }; + let dim_2 = if iter_2 > 0 { + *shape_2[iter_2 - 1] + } else { + 1 + }; + + // Check the broadcasting rule for the current dimension + if dim_1 != dim_2 && dim_1 != 1 && dim_2 != 1 { + panic(array!['tensors shape must match']); + } + + // Move to the next dimension + if iter_1 > 0 { + iter_1 -= 1; + } + if iter_2 > 0 { + iter_2 -= 1; + } + } } /// Computes the index in the broadcasted tensor corresponding to the given indices and shape. @@ -81,7 +93,15 @@ fn check_compatibility(mut shape_1: Span, mut shape_2: Span) { /// # Returns /// * A usize representing the index in the broadcasted tensor. fn broadcast_index_mapping(mut shape: Span, mut indices: Span) -> usize { - assert(shape.len() == indices.len(), 'shape/indices len must be equal'); + if shape.len() == indices.len() { + broadcast_index_mapping_equal_shape(shape, indices) + } else { + broadcast_index_mapping_non_equal_shape(shape, indices) + } +} + + +fn broadcast_index_mapping_equal_shape(mut shape: Span, mut indices: Span) -> usize { let mut result = 0_usize; let mut stride = stride(shape); @@ -98,9 +118,50 @@ fn broadcast_index_mapping(mut shape: Span, mut indices: Span) -> }; }; - return result; + result +} + +fn broadcast_index_mapping_non_equal_shape( + mut shape: Span, mut indices: Span +) -> usize { + let mut result = 0_usize; + let mut stride = stride(shape.clone()); + + // Calculate the offset to align indices with the rightmost dimensions of the shape + let mut offset = if shape.len() > indices.len() { + shape.len() - indices.len() + } else { + 0 + }; + + loop { + match shape.pop_back() { + Option::Some(_) => { + let stride_val = stride + .pop_back() + .unwrap_or(@1); // Default stride for non-existent dimensions is 1 + + // Calculate the index, using 0 for dimensions beyond the length of indices + let index_val = if offset > 0 { + offset -= 1; // Decrement offset until we align indices with the shape + 0 // Use 0 for indices beyond the length of the indices span + } else { + *indices + .pop_back() + .unwrap_or(@0) // Use actual index value or 0 if indices are exhausted + }; + + let index = index_val * *stride_val; + result += index; + }, + Option::None => { break; } + }; + }; + + result } + /// Generates the output shape after reducing a tensor along a specified axis. /// /// # Arguments @@ -117,7 +178,7 @@ fn broadcast_index_mapping(mut shape: Span, mut indices: Span) -> fn reduce_output_shape(mut input_shape: Span, axis: usize, keepdims: bool) -> Span { assert(axis < input_shape.len(), 'axis out of dimensions'); - let mut output_shape = ArrayTrait::new(); + let mut output_shape: Array = array![]; let mut n: usize = 0; loop { @@ -137,7 +198,7 @@ fn reduce_output_shape(mut input_shape: Span, axis: usize, keepdims: bool }; }; - return output_shape.span(); + output_shape.span() } @@ -158,7 +219,7 @@ fn permutation_output_shape(input_shape: Span, mut axes: Span) -> let axes_len = axes.len(); assert(input_shape.len() == axes_len, 'input_shape/indices len unequal'); - let mut output_shape = ArrayTrait::new(); + let mut output_shape: Array = array![]; loop { match axes.pop_front() { @@ -167,7 +228,7 @@ fn permutation_output_shape(input_shape: Span, mut axes: Span) -> }; }; - return output_shape.span(); + output_shape.span() } /// Combines output indices with the current index of the specified axis. @@ -186,14 +247,10 @@ fn permutation_output_shape(input_shape: Span, mut axes: Span) -> fn combine_indices(mut output_indices: Span, axis_index: usize, axis: usize) -> Span { assert(axis <= output_indices.len(), 'axis value is out of range'); - let mut result = ArrayTrait::new(); + let mut result: Array = array![]; let mut n: usize = 0; - loop { - if n > output_indices.len() { - break (); - } - + while n != output_indices.len() + 1 { if n == axis { result.append(axis_index); } else if n > axis { @@ -205,7 +262,7 @@ fn combine_indices(mut output_indices: Span, axis_index: usize, axis: usi n += 1; }; - return result.span(); + result.span() } @@ -237,7 +294,7 @@ fn find_axis(mut axes: Span, target_axis: usize) -> usize { }; }; - return axis; + axis } /// Computes the broadcasted shape of two tensors. @@ -254,34 +311,19 @@ fn find_axis(mut axes: Span, target_axis: usize) -> usize { /// * A Span of usize representing the broadcasted shape. fn broadcast_shape(mut shape1: Span, mut shape2: Span) -> Span { check_compatibility(shape1, shape2); - let mut result: Array = ArrayTrait::new(); - - loop { - let mut dim1 = 1; - let mut dim2 = 1; - - match shape1.pop_front() { - Option::Some(item) => { dim1 = *item; }, - Option::None => { if shape1.len() == 0 && shape2.len() == 0 { - break (); - }; } - }; + let mut result: Array = array![]; - match shape2.pop_front() { - Option::Some(item) => { dim2 = *item; }, - Option::None => { if shape1.len() == 0 && shape2.len() == 0 { - break (); - }; } - }; + while !shape1.is_empty() || !shape2.is_empty() { + let dim1 = *shape1.pop_back().unwrap_or(@1); + let dim2 = *shape2.pop_back().unwrap_or(@1); let broadcasted_dim = u32_max(dim1, dim2); result.append(broadcasted_dim); }; - return result.span(); + result.reverse().span() } - /// Substitute a value in a shape at a given index /// /// # Arguments @@ -297,7 +339,7 @@ fn broadcast_shape(mut shape1: Span, mut shape2: Span) -> Span` - The modified shape fn replace_index(mut shape: Span, index: usize, value: usize) -> Span { - let mut output = ArrayTrait::new(); + let mut output: Array = array![]; let mut i = 0; loop { @@ -314,7 +356,7 @@ fn replace_index(mut shape: Span, index: usize, value: usize) -> Span, index: usize, value: usize) -> Span` - A span containing the usize elements representing the axes. fn get_all_axes(shape: Span) -> Span { - let mut ret: Array = ArrayTrait::new(); + let mut ret: Array = array![]; let mut i: usize = 0; let stop_i = shape.len() - 1; loop { @@ -339,6 +381,7 @@ fn get_all_axes(shape: Span) -> Span { } i += 1; }; + ret.span() } @@ -353,20 +396,14 @@ fn flatten_array_of_tensors, +Drop,>( let stride_lim: usize = *new_stride.at(axis); let max_row = (*(*tensors.at(0).shape).at(0)); let mut row = 0; - loop { - if row >= max_row { - break; - } + while row != max_row { let mut tensors_span = tensors.span(); loop { let mut i = 0; match tensors_span.pop_front() { Option::Some(mut t) => { let mut data = *t.data; - loop { - if i >= stride_lim { - break; - } + while i != stride_lim { let idx = i + (row * stride_lim); flattened.append(*data.at(idx)); i += 1; @@ -375,8 +412,10 @@ fn flatten_array_of_tensors, +Drop,>( Option::None => { break; }, } }; + row += 1; }; + flattened.span() } @@ -390,27 +429,18 @@ fn as_tensors_array, +Drop, +TensorTrait,>( let mut axes: Array = array![]; let mut idx: usize = 0; - loop { - if idx >= rank { - break; - } + while idx != rank { axes.append(idx); idx += 1; }; idx = 0; let axis_len: usize = *shape.at(axis); - loop { - if idx >= axis_len { - break; - } + while idx != axis_len { let mut starts: Array = array![]; let mut ends: Array = array![]; let mut i: usize = 0; - loop { - if i >= rank { - break; - } + while i != rank { starts.append(if i == axis { idx } else { @@ -436,6 +466,7 @@ fn as_tensors_array, +Drop, +TensorTrait,>( idx += 1; }; + as_tensors } @@ -476,6 +507,7 @@ fn span_cmp, +Copy, +PartialEq, +PartialOrd>( } }; }; + ret } @@ -511,18 +543,18 @@ impl SpanPartialOrd, +Copy, +PartialEq, +PartialOrd> of Par fn optional_has_element, +Drop, +TensorTrait,>( x: Option> ) -> Tensor { - match x{ + match x { Option::Some => { - let mut shape = ArrayTrait::::new(); + let mut shape: Array = array![]; shape.append(1); - let mut data = ArrayTrait::::new(); + let mut data: Array = array![]; data.append(true); TensorTrait::new(shape.span(), data.span()) }, Option::None => { - let mut shape = ArrayTrait::::new(); + let mut shape: Array = array![]; shape.append(1); - let mut data = ArrayTrait::::new(); + let mut data: Array = array![]; data.append(false); TensorTrait::new(shape.span(), data.span()) } @@ -544,12 +576,8 @@ fn optional_has_element, +Drop, +TensorTrait,>( fn optional_get_element, +Drop, +TensorTrait,>( x: Option> ) -> Tensor { - match x{ - Option::Some(ele) => { - ele - }, - Option::None => { - panic(array!['The input is an empty', 'optional-type.']) - } + match x { + Option::Some(ele) => { ele }, + Option::None => { panic(array!['The input is an empty', 'optional-type.']) } } } diff --git a/src/operators/tensor/implementations/tensor_bool.cairo b/src/operators/tensor/implementations/tensor_bool.cairo index f894937dc..612a397cc 100644 --- a/src/operators/tensor/implementations/tensor_bool.cairo +++ b/src/operators/tensor/implementations/tensor_bool.cairo @@ -1,8 +1,3 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; -use core::traits::{TryInto, Into}; - use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::core::{ constant_of_shape, new_tensor, stride, Tensor, TensorTrait, ravel_index, unravel_index, reshape, @@ -37,7 +32,6 @@ impl BoolTensor of TensorTrait { panic(array!['not supported!']) } - fn min_in_tensor(self: @Tensor) -> bool { panic(array!['not supported!']) } @@ -240,7 +234,7 @@ impl BoolTensor of TensorTrait { panic(array!['not supported!']) } - fn squeeze(self: @Tensor, axes: Option>) -> Tensor { + fn squeeze(self: @Tensor, axes: Option>) -> Tensor { panic(array!['not supported!']) } @@ -321,13 +315,13 @@ impl BoolTensor of TensorTrait { ) -> Tensor:: { panic(array!['not supported!']) } + fn qlinear_leakyrelu( self: @Tensor, a_scale: @Tensor, a_zero_point: @Tensor, alpha: bool, ) -> Tensor:: { panic(array!['not supported!']) } - fn round(self: @Tensor) -> Tensor { panic(array!['not supported!']) } @@ -432,6 +426,10 @@ impl BoolTensor of TensorTrait { panic(array!['not supported!']) } + fn reduce_log_sum_exp(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { + panic(array!['not supported']) + } + fn unique( self: @Tensor, axis: Option, sorted: Option ) -> (Tensor, Tensor, Tensor, Tensor) { @@ -492,26 +490,26 @@ impl BoolTensor of TensorTrait { } fn reverse_sequence( - self: @Tensor, sequence_lens: Tensor, batch_axis: Option, time_axis: Option + self: @Tensor, + sequence_lens: Tensor, + batch_axis: Option, + time_axis: Option ) -> Tensor { manipulation::reverse_sequence::reverse_sequence(self, sequence_lens, batch_axis, time_axis) } - - fn optional(self: @Tensor) -> Option>{ + + fn optional(self: @Tensor) -> Option> { manipulation::optional::optional(self) } - + fn dynamic_quantize_linear( self: @Tensor - ) -> (Tensor::, Tensor::, Tensor){ - panic(array!['not supported!']) + ) -> (Tensor::, Tensor::, Tensor) { + panic(array!['not supported!']) } fn scatter_nd( - self: @Tensor, - updates: Tensor, - indices: Tensor, - reduction: Option + self: @Tensor, updates: Tensor, indices: Tensor, reduction: Option ) -> Tensor { panic(array!['not supported!']) } @@ -532,7 +530,21 @@ impl BoolTensor of TensorTrait { panic(array!['not supported!']) } - fn random_uniform_like(tensor: @Tensor, high: Option, low: Option, seed: Option) -> Tensor { + fn random_uniform_like( + tensor: @Tensor, high: Option, low: Option, seed: Option + ) -> Tensor { + panic(array!['not supported!']) + } + + fn label_encoder( + self: @Tensor, + default_list: Option>, + default_tensor: Option>, + keys: Option>, + keys_tensor: Option>, + values: Option>, + values_tensor: Option> + ) -> Tensor { panic(array!['not supported!']) } } @@ -555,15 +567,10 @@ impl BoolTryIntobool of TryInto { } // Internals - fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { let mut is_eq = true; - loop { - if lhs.shape.len() == 0 || !is_eq { - break; - } - + while lhs.shape.len() != 0 && is_eq { is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); }; @@ -571,13 +578,9 @@ fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { return false; } - loop { - if lhs.data.len() == 0 || !is_eq { - break; - } - + while lhs.data.len() != 0 && is_eq { is_eq = lhs.data.pop_front().unwrap() == rhs.data.pop_front().unwrap(); }; - return is_eq; + is_eq } diff --git a/src/operators/tensor/implementations/tensor_complex64.cairo b/src/operators/tensor/implementations/tensor_complex64.cairo index e035b6a64..c9c31ae23 100644 --- a/src/operators/tensor/implementations/tensor_complex64.cairo +++ b/src/operators/tensor/implementations/tensor_complex64.cairo @@ -1,8 +1,3 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; -use core::traits::{TryInto, Into}; - use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::core::{ new_tensor, constant_of_shape, stride, Tensor, TensorTrait, ravel_index, unravel_index, reshape, @@ -17,7 +12,6 @@ use orion::operators::tensor::implementations::{ use orion::numbers::complex_number::complex_trait::ComplexTrait; use orion::numbers::complex_number::complex64::{Complex64Impl, complex64}; - impl Complex64Tensor of TensorTrait { fn new(shape: Span, data: Span) -> Tensor { new_tensor(shape, data) @@ -322,7 +316,7 @@ impl Complex64Tensor of TensorTrait { core_tensor::nonzero(self) } - fn squeeze(self: @Tensor, axes: Option>) -> Tensor { + fn squeeze(self: @Tensor, axes: Option>) -> Tensor { core_tensor::squeeze(self, axes) } @@ -461,7 +455,6 @@ impl Complex64Tensor of TensorTrait { math::reduce_log_sum::reduce_log_sum(self, axis, keepdims) } - fn erf(self: @Tensor) -> Tensor { panic(array!['not supported!']) } @@ -478,6 +471,12 @@ impl Complex64Tensor of TensorTrait { math::compress::compress(self, condition, axis) } + fn reduce_log_sum_exp( + self: @Tensor, axis: usize, keepdims: bool + ) -> Tensor { + math::reduce_log_sum_exp::reduce_log_sum_exp(self, axis, keepdims) + } + fn layer_normalization( self: @Tensor, scale: @Tensor, @@ -499,7 +498,10 @@ impl Complex64Tensor of TensorTrait { } fn reverse_sequence( - self: @Tensor, sequence_lens: Tensor, batch_axis: Option, time_axis: Option + self: @Tensor, + sequence_lens: Tensor, + batch_axis: Option, + time_axis: Option ) -> Tensor { manipulation::reverse_sequence::reverse_sequence(self, sequence_lens, batch_axis, time_axis) } @@ -522,10 +524,15 @@ impl Complex64Tensor of TensorTrait { panic(array!['not supported!']) } - fn random_uniform_like(tensor: @Tensor, high: Option, low: Option, seed: Option) -> Tensor { + fn random_uniform_like( + tensor: @Tensor, + high: Option, + low: Option, + seed: Option + ) -> Tensor { panic(array!['not supported!']) } - + fn range(start: complex64, end: complex64, step: complex64) -> Tensor { panic(array!['not supported!']) } @@ -541,17 +548,17 @@ impl Complex64Tensor of TensorTrait { fn blackman_window(size: complex64, periodic: Option) -> Tensor { panic(array!['not supported!']) } - + fn split_to_sequence( self: @Tensor, axis: usize, keepdims: usize, split: Option> ) -> Array> { manipulation::split_to_sequence::split_to_sequence(self, axis, keepdims, split) } - - fn optional(self: @Tensor) -> Option>{ + + fn optional(self: @Tensor) -> Option> { manipulation::optional::optional(self) } - + fn dynamic_quantize_linear( self: @Tensor ) -> (Tensor::, Tensor::, Tensor) { @@ -566,6 +573,18 @@ impl Complex64Tensor of TensorTrait { ) -> Tensor { panic(array!['not supported!']) } + + fn label_encoder( + self: @Tensor, + default_list: Option>, + default_tensor: Option>, + keys: Option>, + keys_tensor: Option>, + values: Option>, + values_tensor: Option> + ) -> Tensor { + panic(array!['not supported!']) + } } /// Implements addition for `Tensor` using the `Add` trait. @@ -639,22 +658,17 @@ impl Complex64TensorPartialEq of PartialEq> { } } - // Internals - fn eq(lhs: @complex64, rhs: @complex64) -> bool { let eq = (*lhs.real == *rhs.real) && (*lhs.img == *rhs.img); + eq } fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { let mut is_eq = true; - loop { - if lhs.shape.len() == 0 || !is_eq { - break; - } - + while lhs.shape.len() != 0 && is_eq { is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); }; @@ -662,14 +676,10 @@ fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { return false; } - loop { - if lhs.data.len() == 0 || !is_eq { - break; - } - + while lhs.data.len() != 0 && is_eq { is_eq = eq(lhs.data.pop_front().unwrap(), rhs.data.pop_front().unwrap()); }; - return is_eq; + is_eq } diff --git a/src/operators/tensor/implementations/tensor_fp16x16.cairo b/src/operators/tensor/implementations/tensor_fp16x16.cairo index 4dd2dd8d3..a37ed0442 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16.cairo @@ -1,8 +1,3 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; -use core::traits::{TryInto, Into}; - use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::helpers::SpanPartialOrd; use orion::operators::tensor::core::{ @@ -16,6 +11,9 @@ use orion::operators::tensor::implementations::{ }; use orion::numbers::fixed_point::implementations::fp16x16::math::trig::PI; +use orion::numbers::fixed_point::implementations::fp16x16wide::core::FP16x16W; + + impl FP16x16Tensor of TensorTrait { fn new(shape: Span, data: Span) -> Tensor { new_tensor(shape, data) @@ -360,7 +358,7 @@ impl FP16x16Tensor of TensorTrait { core_tensor::nonzero(self) } - fn squeeze(self: @Tensor, axes: Option>) -> Tensor { + fn squeeze(self: @Tensor, axes: Option>) -> Tensor { core_tensor::squeeze(self, axes) } @@ -442,7 +440,6 @@ impl FP16x16Tensor of TensorTrait { panic(array!['not supported!']) } - fn gather_elements( self: @Tensor, indices: Tensor, axis: Option ) -> Tensor { @@ -497,6 +494,11 @@ impl FP16x16Tensor of TensorTrait { math::reduce_log_sum::reduce_log_sum(self, axis, keepdims) } + fn reduce_log_sum_exp(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { + panic(array!['not supported!']) + } + + fn erf(self: @Tensor) -> Tensor { math::erf::erf(*self) } @@ -562,10 +564,12 @@ impl FP16x16Tensor of TensorTrait { manipulation::split::split(self, axis, num_outputs, spl) } - fn random_uniform_like(tensor: @Tensor, high: Option, low: Option, seed: Option) -> Tensor { + fn random_uniform_like( + tensor: @Tensor, high: Option, low: Option, seed: Option + ) -> Tensor { math::random_uniform_like::random_uniform_like(*tensor, high, low, seed) } - + fn range(start: FP16x16, end: FP16x16, step: FP16x16) -> Tensor { math::range::range(start, end, step) } @@ -581,7 +585,7 @@ impl FP16x16Tensor of TensorTrait { fn blackman_window(size: FP16x16, periodic: Option) -> Tensor { math::blackman_window::blackman_window(size, FP16x16 { mag: PI, sign: false }, periodic) } - + fn split_to_sequence( self: @Tensor, axis: usize, keepdims: usize, split: Option> ) -> Array> { @@ -589,29 +593,30 @@ impl FP16x16Tensor of TensorTrait { } fn reverse_sequence( - self: @Tensor, sequence_lens: Tensor, batch_axis: Option, time_axis: Option + self: @Tensor, + sequence_lens: Tensor, + batch_axis: Option, + time_axis: Option ) -> Tensor { manipulation::reverse_sequence::reverse_sequence(self, sequence_lens, batch_axis, time_axis) } - - - fn optional(self: @Tensor) -> Option>{ + + fn optional(self: @Tensor) -> Option> { manipulation::optional::optional(self) } - fn dynamic_quantize_linear( self: @Tensor - ) -> (Tensor::, Tensor::, Tensor){ + ) -> (Tensor::, Tensor::, Tensor) { quantization::dynamic_quantize_linear::dynamic_quantize_linear( self, NumberTrait::new_unscaled(0, false), NumberTrait::new_unscaled(255, false), NumberTrait::new_unscaled(0, false), NumberTrait::new_unscaled(1, false), - ) + ) } - + fn scatter_nd( self: @Tensor, updates: Tensor, @@ -620,6 +625,20 @@ impl FP16x16Tensor of TensorTrait { ) -> Tensor { math::scatter_nd::scatter_nd(self, updates, indices, reduction) } + + fn label_encoder( + self: @Tensor, + default_list: Option>, + default_tensor: Option>, + keys: Option>, + keys_tensor: Option>, + values: Option>, + values_tensor: Option> + ) -> Tensor { + ml::label_encoder::label_encoder( + self, default_list, default_tensor, keys, keys_tensor, values, values_tensor + ) + } } /// Implements addition for `Tensor` using the `Add` trait. @@ -703,22 +722,22 @@ impl TensorI8IntoTensorFP16x16 of Into, Tensor> { impl FP16x16TensorPartialOrd of PartialOrd> { #[inline(always)] fn ge(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::ge(lhs.data, rhs.data); + SpanPartialOrd::ge(lhs.data, rhs.data) } #[inline(always)] fn gt(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::gt(lhs.data, rhs.data); + SpanPartialOrd::gt(lhs.data, rhs.data) } #[inline(always)] fn le(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::le(lhs.data, rhs.data); + SpanPartialOrd::le(lhs.data, rhs.data) } #[inline(always)] fn lt(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::lt(lhs.data, rhs.data); + SpanPartialOrd::lt(lhs.data, rhs.data) } } @@ -741,11 +760,7 @@ fn relative_eq(lhs: @FP16x16, rhs: @FP16x16) -> bool { fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { let mut is_eq = true; - loop { - if lhs.shape.len() == 0 || !is_eq { - break; - } - + while lhs.shape.len() != 0 && is_eq { is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); }; @@ -753,28 +768,20 @@ fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { return false; } - loop { - if lhs.data.len() == 0 || !is_eq { - break; - } - + while lhs.data.len() != 0 && is_eq { is_eq = relative_eq(lhs.data.pop_front().unwrap(), rhs.data.pop_front().unwrap()); }; - return is_eq; + is_eq } fn tensor_i8_to_tensor_fp16x16(x: @Tensor) -> Tensor { let mut result_data = ArrayTrait::::new(); let mut data = *x.data; - loop { + while data.len() != 0 { result_data.append((*data.pop_front().unwrap()).into()); - - if data.len() == 0 { - break (); - }; }; - return TensorTrait::new(*x.shape, result_data.span()); + TensorTrait::new(*x.shape, result_data.span()) } diff --git a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo index 6fb17db32..2003b28ff 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo @@ -1,8 +1,3 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; -use core::traits::{TryInto, Into}; - use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::helpers::SpanPartialOrd; use orion::operators::tensor::core::{ @@ -16,6 +11,13 @@ use orion::operators::tensor::implementations::{ }; use orion::numbers::fixed_point::implementations::fp16x16wide::math::trig::PI; +use orion::numbers::fixed_point::implementations::fp16x16wide::core::{ + FP16x16WImpl, FP16x16WTryIntoFP16x16, FP16x16IntoFP16x16W +}; + +use orion::numbers::fixed_point::implementations::fp16x16::core::FP16x16; + + impl FP16x16WTensor of TensorTrait { fn new(shape: Span, data: Span) -> Tensor { new_tensor(shape, data) @@ -320,7 +322,7 @@ impl FP16x16WTensor of TensorTrait { core_tensor::nonzero(self) } - fn squeeze(self: @Tensor, axes: Option>) -> Tensor { + fn squeeze(self: @Tensor, axes: Option>) -> Tensor { core_tensor::squeeze(self, axes) } @@ -408,7 +410,6 @@ impl FP16x16WTensor of TensorTrait { panic(array!['not supported!']) } - fn gather_elements( self: @Tensor, indices: Tensor, axis: Option ) -> Tensor { @@ -463,6 +464,12 @@ impl FP16x16WTensor of TensorTrait { math::reduce_log_sum::reduce_log_sum(self, axis, keepdims) } + fn reduce_log_sum_exp( + self: @Tensor, axis: usize, keepdims: bool + ) -> Tensor { + panic(array!['not supported!']) + } + fn erf(self: @Tensor) -> Tensor { math::erf::erf(*self) } @@ -514,10 +521,15 @@ impl FP16x16WTensor of TensorTrait { manipulation::split::split(self, axis, num_outputs, spl) } - fn random_uniform_like(tensor: @Tensor, high: Option, low: Option, seed: Option) -> Tensor { + fn random_uniform_like( + tensor: @Tensor, + high: Option, + low: Option, + seed: Option + ) -> Tensor { math::random_uniform_like::random_uniform_like(*tensor, high, low, seed) } - + fn range(start: FP16x16W, end: FP16x16W, step: FP16x16W) -> Tensor { math::range::range(start, end, step) } @@ -533,37 +545,38 @@ impl FP16x16WTensor of TensorTrait { fn blackman_window(size: FP16x16W, periodic: Option) -> Tensor { math::blackman_window::blackman_window(size, FP16x16W { mag: PI, sign: false }, periodic) } - + fn split_to_sequence( self: @Tensor, axis: usize, keepdims: usize, split: Option> ) -> Array> { manipulation::split_to_sequence::split_to_sequence(self, axis, keepdims, split) } - + fn reverse_sequence( - self: @Tensor, sequence_lens: Tensor, batch_axis: Option, time_axis: Option + self: @Tensor, + sequence_lens: Tensor, + batch_axis: Option, + time_axis: Option ) -> Tensor { manipulation::reverse_sequence::reverse_sequence(self, sequence_lens, batch_axis, time_axis) } - - - fn optional(self: @Tensor) -> Option>{ + + fn optional(self: @Tensor) -> Option> { manipulation::optional::optional(self) } - fn dynamic_quantize_linear( self: @Tensor - ) -> (Tensor::, Tensor::, Tensor){ + ) -> (Tensor::, Tensor::, Tensor) { quantization::dynamic_quantize_linear::dynamic_quantize_linear( self, NumberTrait::new_unscaled(0, false), NumberTrait::new_unscaled(255, false), NumberTrait::new_unscaled(0, false), NumberTrait::new_unscaled(1, false), - ) + ) } - + fn scatter_nd( self: @Tensor, updates: Tensor, @@ -572,6 +585,20 @@ impl FP16x16WTensor of TensorTrait { ) -> Tensor { math::scatter_nd::scatter_nd(self, updates, indices, reduction) } + + fn label_encoder( + self: @Tensor, + default_list: Option>, + default_tensor: Option>, + keys: Option>, + keys_tensor: Option>, + values: Option>, + values_tensor: Option> + ) -> Tensor { + ml::label_encoder::label_encoder( + self, default_list, default_tensor, keys, keys_tensor, values, values_tensor + ) + } } /// Implements addition for `Tensor` using the `Add` trait. @@ -655,26 +682,25 @@ impl U32TryIntoU32 of TryInto { impl FP16x16WTensorPartialOrd of PartialOrd> { #[inline(always)] fn ge(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::ge(lhs.data, rhs.data); + SpanPartialOrd::ge(lhs.data, rhs.data) } #[inline(always)] fn gt(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::gt(lhs.data, rhs.data); + SpanPartialOrd::gt(lhs.data, rhs.data) } #[inline(always)] fn le(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::le(lhs.data, rhs.data); + SpanPartialOrd::le(lhs.data, rhs.data) } #[inline(always)] fn lt(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::lt(lhs.data, rhs.data); + SpanPartialOrd::lt(lhs.data, rhs.data) } } - // Internals const PRECISION: u64 = 589; // 0.009 @@ -690,15 +716,10 @@ fn relative_eq(lhs: @FP16x16W, rhs: @FP16x16W) -> bool { rel_diff <= PRECISION } - fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { let mut is_eq = true; - loop { - if lhs.shape.len() == 0 || !is_eq { - break; - } - + while lhs.shape.len() != 0 && is_eq { is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); }; @@ -706,14 +727,10 @@ fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { return false; } - loop { - if lhs.data.len() == 0 || !is_eq { - break; - } - + while lhs.data.len() != 0 && is_eq { is_eq = relative_eq(lhs.data.pop_front().unwrap(), rhs.data.pop_front().unwrap()); }; - return is_eq; + is_eq } diff --git a/src/operators/tensor/implementations/tensor_fp32x32.cairo b/src/operators/tensor/implementations/tensor_fp32x32.cairo index e2380b70a..4870226a1 100644 --- a/src/operators/tensor/implementations/tensor_fp32x32.cairo +++ b/src/operators/tensor/implementations/tensor_fp32x32.cairo @@ -1,8 +1,3 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; -use core::traits::{TryInto, Into}; - use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::helpers::SpanPartialOrd; use orion::operators::tensor::core::{ @@ -360,7 +355,7 @@ impl FP32x32Tensor of TensorTrait { core_tensor::nonzero(self) } - fn squeeze(self: @Tensor, axes: Option>) -> Tensor { + fn squeeze(self: @Tensor, axes: Option>) -> Tensor { core_tensor::squeeze(self, axes) } @@ -442,7 +437,6 @@ impl FP32x32Tensor of TensorTrait { panic(array!['not supported!']) } - fn gather_elements( self: @Tensor, indices: Tensor, axis: Option ) -> Tensor { @@ -497,6 +491,10 @@ impl FP32x32Tensor of TensorTrait { math::reduce_log_sum::reduce_log_sum(self, axis, keepdims) } + fn reduce_log_sum_exp(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { + math::reduce_log_sum_exp::reduce_log_sum_exp(self, axis, keepdims) + } + fn erf(self: @Tensor) -> Tensor { math::erf::erf(*self) } @@ -562,10 +560,12 @@ impl FP32x32Tensor of TensorTrait { manipulation::split::split(self, axis, num_outputs, spl) } - fn random_uniform_like(tensor: @Tensor, high: Option, low: Option, seed: Option) -> Tensor { + fn random_uniform_like( + tensor: @Tensor, high: Option, low: Option, seed: Option + ) -> Tensor { math::random_uniform_like::random_uniform_like(*tensor, high, low, seed) } - + fn range(start: FP32x32, end: FP32x32, step: FP32x32) -> Tensor { math::range::range(start, end, step) } @@ -581,7 +581,7 @@ impl FP32x32Tensor of TensorTrait { fn blackman_window(size: FP32x32, periodic: Option) -> Tensor { panic(array!['not supported!']) } - + fn split_to_sequence( self: @Tensor, axis: usize, keepdims: usize, split: Option> ) -> Array> { @@ -589,27 +589,30 @@ impl FP32x32Tensor of TensorTrait { } fn reverse_sequence( - self: @Tensor, sequence_lens: Tensor, batch_axis: Option, time_axis: Option + self: @Tensor, + sequence_lens: Tensor, + batch_axis: Option, + time_axis: Option ) -> Tensor { manipulation::reverse_sequence::reverse_sequence(self, sequence_lens, batch_axis, time_axis) } - - fn optional(self: @Tensor) -> Option>{ + + fn optional(self: @Tensor) -> Option> { manipulation::optional::optional(self) } - + fn dynamic_quantize_linear( self: @Tensor - ) -> (Tensor::, Tensor::, Tensor){ + ) -> (Tensor::, Tensor::, Tensor) { quantization::dynamic_quantize_linear::dynamic_quantize_linear( self, NumberTrait::new_unscaled(0, false), NumberTrait::new_unscaled(255, false), NumberTrait::new_unscaled(0, false), NumberTrait::new_unscaled(1, false), - ) + ) } - + fn scatter_nd( self: @Tensor, updates: Tensor, @@ -618,6 +621,20 @@ impl FP32x32Tensor of TensorTrait { ) -> Tensor { math::scatter_nd::scatter_nd(self, updates, indices, reduction) } + + fn label_encoder( + self: @Tensor, + default_list: Option>, + default_tensor: Option>, + keys: Option>, + keys_tensor: Option>, + values: Option>, + values_tensor: Option> + ) -> Tensor { + ml::label_encoder::label_encoder( + self, default_list, default_tensor, keys, keys_tensor, values, values_tensor + ) + } } /// Implements addition for `Tensor` using the `Add` trait. @@ -731,9 +748,7 @@ impl FP32x32TensorPartialOrd of PartialOrd> { } } - // Internals - const PRECISION: u64 = 75497; // 0.009 fn relative_eq(lhs: @FP32x32, rhs: @FP32x32) -> bool { @@ -751,11 +766,7 @@ fn relative_eq(lhs: @FP32x32, rhs: @FP32x32) -> bool { fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { let mut is_eq = true; - loop { - if lhs.shape.len() == 0 || !is_eq { - break; - } - + while lhs.shape.len() != 0 && is_eq { is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); }; @@ -763,28 +774,20 @@ fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { return false; } - loop { - if lhs.data.len() == 0 || !is_eq { - break; - } - + while lhs.data.len() != 0 && is_eq { is_eq = relative_eq(lhs.data.pop_front().unwrap(), rhs.data.pop_front().unwrap()); }; - return is_eq; + is_eq } fn tensor_i8_to_tensor_fp32x32(x: @Tensor) -> Tensor { let mut result_data = ArrayTrait::::new(); let mut data = *x.data; - loop { + while data.len() != 0 { result_data.append((*data.pop_front().unwrap()).into()); - - if data.len() == 0 { - break (); - }; }; - return TensorTrait::new(*x.shape, result_data.span()); + TensorTrait::new(*x.shape, result_data.span()) } diff --git a/src/operators/tensor/implementations/tensor_fp64x64.cairo b/src/operators/tensor/implementations/tensor_fp64x64.cairo index 982ef5860..3a7214d18 100644 --- a/src/operators/tensor/implementations/tensor_fp64x64.cairo +++ b/src/operators/tensor/implementations/tensor_fp64x64.cairo @@ -1,8 +1,3 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; -use core::traits::{TryInto, Into}; - use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::helpers::SpanPartialOrd; use orion::operators::tensor::core::{ @@ -360,7 +355,7 @@ impl FP64x64Tensor of TensorTrait { core_tensor::nonzero(self) } - fn squeeze(self: @Tensor, axes: Option>) -> Tensor { + fn squeeze(self: @Tensor, axes: Option>) -> Tensor { core_tensor::squeeze(self, axes) } @@ -442,7 +437,6 @@ impl FP64x64Tensor of TensorTrait { panic(array!['not supported!']) } - fn gather_elements( self: @Tensor, indices: Tensor, axis: Option ) -> Tensor { @@ -497,6 +491,10 @@ impl FP64x64Tensor of TensorTrait { math::reduce_log_sum::reduce_log_sum(self, axis, keepdims) } + fn reduce_log_sum_exp(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { + math::reduce_log_sum_exp::reduce_log_sum_exp(self, axis, keepdims) + } + fn erf(self: @Tensor) -> Tensor { math::erf::erf(*self) } @@ -562,10 +560,12 @@ impl FP64x64Tensor of TensorTrait { manipulation::split::split(self, axis, num_outputs, spl) } - fn random_uniform_like(tensor: @Tensor, high: Option, low: Option, seed: Option) -> Tensor { + fn random_uniform_like( + tensor: @Tensor, high: Option, low: Option, seed: Option + ) -> Tensor { math::random_uniform_like::random_uniform_like(*tensor, high, low, seed) } - + fn range(start: FP64x64, end: FP64x64, step: FP64x64) -> Tensor { math::range::range(start, end, step) } @@ -581,7 +581,7 @@ impl FP64x64Tensor of TensorTrait { fn blackman_window(size: FP64x64, periodic: Option) -> Tensor { panic(array!['not supported!']) } - + fn split_to_sequence( self: @Tensor, axis: usize, keepdims: usize, split: Option> ) -> Array> { @@ -589,26 +589,28 @@ impl FP64x64Tensor of TensorTrait { } fn reverse_sequence( - self: @Tensor, sequence_lens: Tensor, batch_axis: Option, time_axis: Option + self: @Tensor, + sequence_lens: Tensor, + batch_axis: Option, + time_axis: Option ) -> Tensor { manipulation::reverse_sequence::reverse_sequence(self, sequence_lens, batch_axis, time_axis) } - - - fn optional(self: @Tensor) -> Option>{ + + fn optional(self: @Tensor) -> Option> { manipulation::optional::optional(self) } - + fn dynamic_quantize_linear( self: @Tensor - ) -> (Tensor::, Tensor::, Tensor){ + ) -> (Tensor::, Tensor::, Tensor) { quantization::dynamic_quantize_linear::dynamic_quantize_linear( self, NumberTrait::new_unscaled(0, false), NumberTrait::new_unscaled(255, false), NumberTrait::new_unscaled(0, false), NumberTrait::new_unscaled(1, false), - ) + ) } fn scatter_nd( @@ -619,6 +621,20 @@ impl FP64x64Tensor of TensorTrait { ) -> Tensor { math::scatter_nd::scatter_nd(self, updates, indices, reduction) } + + fn label_encoder( + self: @Tensor, + default_list: Option>, + default_tensor: Option>, + keys: Option>, + keys_tensor: Option>, + values: Option>, + values_tensor: Option> + ) -> Tensor { + ml::label_encoder::label_encoder( + self, default_list, default_tensor, keys, keys_tensor, values, values_tensor + ) + } } /// Implements addition for `Tensor` using the `Add` trait. @@ -713,27 +729,26 @@ impl TensorI8IntoTensorFP64x64 of Into, Tensor> { impl FP64x64TensorPartialOrd of PartialOrd> { #[inline(always)] fn ge(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::ge(lhs.data, rhs.data); + SpanPartialOrd::ge(lhs.data, rhs.data) } #[inline(always)] fn gt(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::gt(lhs.data, rhs.data); + SpanPartialOrd::gt(lhs.data, rhs.data) } #[inline(always)] fn le(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::le(lhs.data, rhs.data); + SpanPartialOrd::le(lhs.data, rhs.data) } #[inline(always)] fn lt(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::lt(lhs.data, rhs.data); + SpanPartialOrd::lt(lhs.data, rhs.data) } } // Internals - const PRECISION: u128 = 1660000000000000; // 9e-05 fn relative_eq(lhs: @FP64x64, rhs: @FP64x64) -> bool { @@ -751,11 +766,7 @@ fn relative_eq(lhs: @FP64x64, rhs: @FP64x64) -> bool { fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { let mut is_eq = true; - loop { - if lhs.shape.len() == 0 || !is_eq { - break; - } - + while lhs.shape.len() != 0 && is_eq { is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); }; @@ -763,28 +774,20 @@ fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { return false; } - loop { - if lhs.data.len() == 0 || !is_eq { - break; - } - + while lhs.shape.len() != 0 && is_eq { is_eq = relative_eq(lhs.data.pop_front().unwrap(), rhs.data.pop_front().unwrap()); }; - return is_eq; + is_eq } fn tensor_i8_to_tensor_fp64x64(x: @Tensor) -> Tensor { let mut result_data = ArrayTrait::::new(); let mut data = *x.data; - loop { + while data.len() != 0 { result_data.append((*data.pop_front().unwrap()).into()); - - if data.len() == 0 { - break (); - }; }; - return TensorTrait::new(*x.shape, result_data.span()); + TensorTrait::new(*x.shape, result_data.span()) } diff --git a/src/operators/tensor/implementations/tensor_fp8x23.cairo b/src/operators/tensor/implementations/tensor_fp8x23.cairo index b38c70e65..b4a26d749 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23.cairo @@ -1,8 +1,3 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; -use core::traits::{TryInto, Into}; - use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::helpers::SpanPartialOrd; use orion::operators::tensor::core::{ @@ -340,7 +335,6 @@ impl FP8x23Tensor of TensorTrait { ) } - fn slice( self: @Tensor, starts: Span, @@ -361,7 +355,7 @@ impl FP8x23Tensor of TensorTrait { core_ops::nonzero(self) } - fn squeeze(self: @Tensor, axes: Option>) -> Tensor { + fn squeeze(self: @Tensor, axes: Option>) -> Tensor { core_ops::squeeze(self, axes) } @@ -462,7 +456,6 @@ impl FP8x23Tensor of TensorTrait { panic(array!['not supported!']) } - fn reduce_min( self: @Tensor, axes: Option>, @@ -496,6 +489,10 @@ impl FP8x23Tensor of TensorTrait { math::reduce_log_sum::reduce_log_sum(self, axis, keepdims) } + fn reduce_log_sum_exp(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { + panic(array!['not supported!']) + } + fn erf(self: @Tensor) -> Tensor { math::erf::erf(*self) } @@ -561,10 +558,12 @@ impl FP8x23Tensor of TensorTrait { manipulation::split::split(self, axis, num_outputs, spl) } - fn random_uniform_like(tensor: @Tensor, high: Option, low: Option, seed: Option) -> Tensor { + fn random_uniform_like( + tensor: @Tensor, high: Option, low: Option, seed: Option + ) -> Tensor { math::random_uniform_like::random_uniform_like(*tensor, high, low, seed) } - + fn range(start: FP8x23, end: FP8x23, step: FP8x23) -> Tensor { math::range::range(start, end, step) } @@ -580,36 +579,36 @@ impl FP8x23Tensor of TensorTrait { fn blackman_window(size: FP8x23, periodic: Option) -> Tensor { math::blackman_window::blackman_window(size, FP8x23 { mag: PI, sign: false }, periodic) } - + fn split_to_sequence( - self: @Tensor, - axis: usize, - keepdims: usize, - split: Option> + self: @Tensor, axis: usize, keepdims: usize, split: Option> ) -> Array> { manipulation::split_to_sequence::split_to_sequence(self, axis, keepdims, split) } - + fn reverse_sequence( - self: @Tensor, sequence_lens: Tensor, batch_axis: Option, time_axis: Option + self: @Tensor, + sequence_lens: Tensor, + batch_axis: Option, + time_axis: Option ) -> Tensor { manipulation::reverse_sequence::reverse_sequence(self, sequence_lens, batch_axis, time_axis) } - - fn optional(self: @Tensor) -> Option>{ + + fn optional(self: @Tensor) -> Option> { manipulation::optional::optional(self) } - + fn dynamic_quantize_linear( self: @Tensor - ) -> (Tensor::, Tensor::, Tensor){ + ) -> (Tensor::, Tensor::, Tensor) { quantization::dynamic_quantize_linear::dynamic_quantize_linear( self, NumberTrait::new_unscaled(0, false), NumberTrait::new_unscaled(255, false), NumberTrait::new_unscaled(0, false), NumberTrait::new_unscaled(1, false), - ) + ) } fn scatter_nd( @@ -620,6 +619,20 @@ impl FP8x23Tensor of TensorTrait { ) -> Tensor { math::scatter_nd::scatter_nd(self, updates, indices, reduction) } + + fn label_encoder( + self: @Tensor, + default_list: Option>, + default_tensor: Option>, + keys: Option>, + keys_tensor: Option>, + values: Option>, + values_tensor: Option> + ) -> Tensor { + ml::label_encoder::label_encoder( + self, default_list, default_tensor, keys, keys_tensor, values, values_tensor + ) + } } /// Implements addition for `Tensor` using the `Add` trait. @@ -727,27 +740,26 @@ impl TensorI8IntoTensorFP8x23 of Into, Tensor> { impl FP8x23TensorPartialOrd of PartialOrd> { #[inline(always)] fn ge(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::ge(lhs.data, rhs.data); + SpanPartialOrd::ge(lhs.data, rhs.data) } #[inline(always)] fn gt(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::gt(lhs.data, rhs.data); + SpanPartialOrd::gt(lhs.data, rhs.data) } #[inline(always)] fn le(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::le(lhs.data, rhs.data); + SpanPartialOrd::le(lhs.data, rhs.data) } #[inline(always)] fn lt(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::lt(lhs.data, rhs.data); + SpanPartialOrd::lt(lhs.data, rhs.data) } } // Internals - const PRECISION: u32 = 75497; // 0.009 fn relative_eq(lhs: @FP8x23, rhs: @FP8x23) -> bool { @@ -765,11 +777,7 @@ fn relative_eq(lhs: @FP8x23, rhs: @FP8x23) -> bool { fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { let mut is_eq = true; - loop { - if lhs.shape.len() == 0 || !is_eq { - break; - } - + while lhs.shape.len() != 0 && is_eq { is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); }; @@ -777,28 +785,20 @@ fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { return false; } - loop { - if lhs.data.len() == 0 || !is_eq { - break; - } - + while lhs.data.len() != 0 && is_eq { is_eq = relative_eq(lhs.data.pop_front().unwrap(), rhs.data.pop_front().unwrap()); }; - return is_eq; + is_eq } fn tensor_i8_to_tensor_fp8x23(x: @Tensor) -> Tensor { let mut result_data = ArrayTrait::::new(); let mut data = *x.data; - loop { + while data.len() != 0 { result_data.append((*data.pop_front().unwrap()).into()); - - if data.len() == 0 { - break (); - }; }; - return TensorTrait::new(*x.shape, result_data.span()); + TensorTrait::new(*x.shape, result_data.span()) } diff --git a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo index 51456e6fc..06a297b69 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo @@ -1,8 +1,3 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; -use core::traits::{TryInto, Into}; - use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::helpers::SpanPartialOrd; use orion::operators::tensor::core::{ @@ -16,6 +11,9 @@ use orion::operators::tensor::implementations::{ }; use orion::numbers::fixed_point::implementations::fp8x23wide::math::trig::PI; +use orion::numbers::fixed_point::implementations::fp8x23::core::FP8x23; + + impl FP8x23WTensor of TensorTrait { fn new(shape: Span, data: Span) -> Tensor { new_tensor(shape, data) @@ -311,7 +309,7 @@ impl FP8x23WTensor of TensorTrait { core_tensor::nonzero(self) } - fn squeeze(self: @Tensor, axes: Option>) -> Tensor { + fn squeeze(self: @Tensor, axes: Option>) -> Tensor { core_tensor::squeeze(self, axes) } @@ -394,7 +392,6 @@ impl FP8x23WTensor of TensorTrait { panic(array!['not supported!']) } - fn gather_elements( self: @Tensor, indices: Tensor, axis: Option ) -> Tensor { @@ -449,6 +446,10 @@ impl FP8x23WTensor of TensorTrait { math::reduce_log_sum::reduce_log_sum(self, axis, keepdims) } + fn reduce_log_sum_exp(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { + panic(array!['not supported!']) + } + fn erf(self: @Tensor) -> Tensor { math::erf::erf(*self) } @@ -500,10 +501,12 @@ impl FP8x23WTensor of TensorTrait { manipulation::split::split(self, axis, num_outputs, spl) } - fn random_uniform_like(tensor: @Tensor, high: Option, low: Option, seed: Option) -> Tensor { + fn random_uniform_like( + tensor: @Tensor, high: Option, low: Option, seed: Option + ) -> Tensor { math::random_uniform_like::random_uniform_like(*tensor, high, low, seed) } - + fn range(start: FP8x23W, end: FP8x23W, step: FP8x23W) -> Tensor { math::range::range(start, end, step) } @@ -519,26 +522,29 @@ impl FP8x23WTensor of TensorTrait { fn blackman_window(size: FP8x23W, periodic: Option) -> Tensor { math::blackman_window::blackman_window(size, FP8x23W { mag: PI, sign: false }, periodic) } - + fn split_to_sequence( self: @Tensor, axis: usize, keepdims: usize, split: Option> ) -> Array> { manipulation::split_to_sequence::split_to_sequence(self, axis, keepdims, split) } - + fn reverse_sequence( - self: @Tensor, sequence_lens: Tensor, batch_axis: Option, time_axis: Option + self: @Tensor, + sequence_lens: Tensor, + batch_axis: Option, + time_axis: Option ) -> Tensor { manipulation::reverse_sequence::reverse_sequence(self, sequence_lens, batch_axis, time_axis) } - - fn optional(self: @Tensor) -> Option>{ + + fn optional(self: @Tensor) -> Option> { manipulation::optional::optional(self) } - + fn dynamic_quantize_linear( self: @Tensor - ) -> (Tensor::, Tensor::, Tensor){ + ) -> (Tensor::, Tensor::, Tensor) { quantization::dynamic_quantize_linear::dynamic_quantize_linear( self, NumberTrait::new_unscaled(0, false), @@ -556,6 +562,20 @@ impl FP8x23WTensor of TensorTrait { ) -> Tensor { math::scatter_nd::scatter_nd(self, updates, indices, reduction) } + + fn label_encoder( + self: @Tensor, + default_list: Option>, + default_tensor: Option>, + keys: Option>, + keys_tensor: Option>, + values: Option>, + values_tensor: Option> + ) -> Tensor { + ml::label_encoder::label_encoder( + self, default_list, default_tensor, keys, keys_tensor, values, values_tensor + ) + } } /// Implements addition for `Tensor` using the `Add` trait. @@ -663,27 +683,26 @@ impl U32TryIntoU32 of TryInto { impl FP8x23WTensorPartialOrd of PartialOrd> { #[inline(always)] fn ge(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::ge(lhs.data, rhs.data); + SpanPartialOrd::ge(lhs.data, rhs.data) } #[inline(always)] fn gt(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::gt(lhs.data, rhs.data); + SpanPartialOrd::gt(lhs.data, rhs.data) } #[inline(always)] fn le(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::le(lhs.data, rhs.data); + SpanPartialOrd::le(lhs.data, rhs.data) } #[inline(always)] fn lt(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::lt(lhs.data, rhs.data); + SpanPartialOrd::lt(lhs.data, rhs.data) } } // Internals - const PRECISION: u64 = 75497; // 0.009 fn relative_eq(lhs: @FP8x23W, rhs: @FP8x23W) -> bool { @@ -701,11 +720,7 @@ fn relative_eq(lhs: @FP8x23W, rhs: @FP8x23W) -> bool { fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { let mut is_eq = true; - loop { - if lhs.shape.len() == 0 || !is_eq { - break; - } - + while lhs.shape.len() != 0 && is_eq { is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); }; @@ -713,14 +728,10 @@ fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { return false; } - loop { - if lhs.data.len() == 0 || !is_eq { - break; - } - + while lhs.data.len() != 0 && is_eq { is_eq = relative_eq(lhs.data.pop_front().unwrap(), rhs.data.pop_front().unwrap()); }; - return is_eq; + is_eq } diff --git a/src/operators/tensor/implementations/tensor_i32.cairo b/src/operators/tensor/implementations/tensor_i32.cairo index a756ea72f..296876516 100644 --- a/src/operators/tensor/implementations/tensor_i32.cairo +++ b/src/operators/tensor/implementations/tensor_i32.cairo @@ -1,8 +1,3 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; -use core::traits::{TryInto, Into}; - use orion::numbers::{I32Div, I32DivEq}; use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::helpers::SpanPartialOrd; @@ -16,7 +11,6 @@ use orion::operators::tensor::implementations::{ tensor_u32::U32Tensor, tensor_i8::I8Tensor, tensor_bool::BoolTensor }; - impl I32Tensor of TensorTrait { fn new(shape: Span, data: Span) -> Tensor { new_tensor(shape, data) @@ -82,6 +76,7 @@ impl I32Tensor of TensorTrait { math::reduce_sum::reduce_sum(self, axis, keepdims) } + fn reduce_prod(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { math::reduce_prod::reduce_prod(self, axis, keepdims) } @@ -353,7 +348,7 @@ impl I32Tensor of TensorTrait { core_tensor::nonzero(self) } - fn squeeze(self: @Tensor, axes: Option>) -> Tensor { + fn squeeze(self: @Tensor, axes: Option>) -> Tensor { core_tensor::squeeze(self, axes) } @@ -487,6 +482,10 @@ impl I32Tensor of TensorTrait { panic(array!['not supported!']) } + fn reduce_log_sum_exp(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { + panic(array!['not supported!']) + } + fn erf(self: @Tensor) -> Tensor { panic(array!['not supported!']) } @@ -536,10 +535,12 @@ impl I32Tensor of TensorTrait { manipulation::split::split(self, axis, num_outputs, spl) } - fn random_uniform_like(tensor: @Tensor, high: Option, low: Option, seed: Option) -> Tensor { + fn random_uniform_like( + tensor: @Tensor, high: Option, low: Option, seed: Option + ) -> Tensor { panic(array!['not supported!']) } - + fn range(start: i32, end: i32, step: i32) -> Tensor { math::range::range(start, end, step) } @@ -555,38 +556,49 @@ impl I32Tensor of TensorTrait { fn blackman_window(size: i32, periodic: Option) -> Tensor { panic(array!['not supported!']) } - + fn split_to_sequence( self: @Tensor, axis: usize, keepdims: usize, split: Option> ) -> Array> { manipulation::split_to_sequence::split_to_sequence(self, axis, keepdims, split) } - + fn reverse_sequence( - self: @Tensor, sequence_lens: Tensor, batch_axis: Option, time_axis: Option + self: @Tensor, + sequence_lens: Tensor, + batch_axis: Option, + time_axis: Option ) -> Tensor { manipulation::reverse_sequence::reverse_sequence(self, sequence_lens, batch_axis, time_axis) } - - - fn optional(self: @Tensor) -> Option>{ - manipulation::optional::optional(self) + + fn optional(self: @Tensor) -> Option> { + manipulation::optional::optional(self) } - - fn dynamic_quantize_linear( - self: @Tensor - ) -> (Tensor::, Tensor::, Tensor){ + + fn dynamic_quantize_linear(self: @Tensor) -> (Tensor::, Tensor::, Tensor) { panic(array!['not supported!']) } - + fn scatter_nd( - self: @Tensor, - updates: Tensor, - indices: Tensor, - reduction: Option + self: @Tensor, updates: Tensor, indices: Tensor, reduction: Option ) -> Tensor { math::scatter_nd::scatter_nd(self, updates, indices, reduction) } + + fn label_encoder( + self: @Tensor, + default_list: Option>, + default_tensor: Option>, + keys: Option>, + keys_tensor: Option>, + values: Option>, + values_tensor: Option> + ) -> Tensor { + ml::label_encoder::label_encoder( + self, default_list, default_tensor, keys, keys_tensor, values, values_tensor + ) + } } /// Implements addition for `Tensor` using the `Add` trait. @@ -676,35 +688,30 @@ impl TensorI8IntoTensorI32 of Into, Tensor> { impl I32TensorPartialOrd of PartialOrd> { #[inline(always)] fn ge(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::ge(lhs.data, rhs.data); + SpanPartialOrd::ge(lhs.data, rhs.data) } #[inline(always)] fn gt(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::gt(lhs.data, rhs.data); + SpanPartialOrd::gt(lhs.data, rhs.data) } #[inline(always)] fn le(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::le(lhs.data, rhs.data); + SpanPartialOrd::le(lhs.data, rhs.data) } #[inline(always)] fn lt(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::lt(lhs.data, rhs.data); + SpanPartialOrd::lt(lhs.data, rhs.data) } } // Internals - fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { let mut is_eq = true; - loop { - if lhs.shape.len() == 0 || !is_eq { - break; - } - + while lhs.shape.len() != 0 && is_eq { is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); }; @@ -712,28 +719,20 @@ fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { return false; } - loop { - if lhs.data.len() == 0 || !is_eq { - break; - } - + while lhs.data.len() != 0 && is_eq { is_eq = lhs.data.pop_front().unwrap() == rhs.data.pop_front().unwrap(); }; - return is_eq; + is_eq } fn tensor_i8_to_tensor_i32(x: @Tensor) -> Tensor { let mut result_data = ArrayTrait::::new(); let mut data = *x.data; - loop { + while data.len() != 0 { result_data.append((*data.pop_front().unwrap()).into()); - - if data.len() == 0 { - break (); - }; }; - return TensorTrait::new(*x.shape, result_data.span()); + TensorTrait::new(*x.shape, result_data.span()) } diff --git a/src/operators/tensor/implementations/tensor_i8.cairo b/src/operators/tensor/implementations/tensor_i8.cairo index 38d12dbe4..42d807c68 100644 --- a/src/operators/tensor/implementations/tensor_i8.cairo +++ b/src/operators/tensor/implementations/tensor_i8.cairo @@ -1,8 +1,3 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; -use core::traits::{TryInto, Into}; - use orion::numbers::{I8Div, I8DivEq}; use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::helpers::SpanPartialOrd; @@ -338,7 +333,6 @@ impl I8Tensor of TensorTrait { ) } - fn slice( self: @Tensor, starts: Span, @@ -357,7 +351,7 @@ impl I8Tensor of TensorTrait { core_tensor::nonzero(self) } - fn squeeze(self: @Tensor, axes: Option>) -> Tensor { + fn squeeze(self: @Tensor, axes: Option>) -> Tensor { core_tensor::squeeze(self, axes) } @@ -491,6 +485,10 @@ impl I8Tensor of TensorTrait { panic(array!['not supported!']) } + fn reduce_log_sum_exp(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { + panic(array!['not supported']) + } + fn erf(self: @Tensor) -> Tensor { panic(array!['not supported!']) } @@ -540,10 +538,12 @@ impl I8Tensor of TensorTrait { manipulation::split::split(self, axis, num_outputs, spl) } - fn random_uniform_like(tensor: @Tensor, high: Option, low: Option, seed: Option) -> Tensor { + fn random_uniform_like( + tensor: @Tensor, high: Option, low: Option, seed: Option + ) -> Tensor { panic(array!['not supported!']) } - + fn range(start: i8, end: i8, step: i8) -> Tensor { math::range::range(start, end, step) } @@ -559,8 +559,7 @@ impl I8Tensor of TensorTrait { fn blackman_window(size: i8, periodic: Option) -> Tensor { panic(array!['not supported!']) } - - + fn split_to_sequence( self: @Tensor, axis: usize, keepdims: usize, split: Option> ) -> Array> { @@ -568,29 +567,41 @@ impl I8Tensor of TensorTrait { } fn reverse_sequence( - self: @Tensor, sequence_lens: Tensor, batch_axis: Option, time_axis: Option + self: @Tensor, + sequence_lens: Tensor, + batch_axis: Option, + time_axis: Option ) -> Tensor { manipulation::reverse_sequence::reverse_sequence(self, sequence_lens, batch_axis, time_axis) } - - fn optional(self: @Tensor) -> Option>{ + + fn optional(self: @Tensor) -> Option> { manipulation::optional::optional(self) } - - fn dynamic_quantize_linear( - self: @Tensor - ) -> (Tensor::, Tensor::, Tensor){ + + fn dynamic_quantize_linear(self: @Tensor) -> (Tensor::, Tensor::, Tensor) { panic(array!['not supported!']) } fn scatter_nd( - self: @Tensor, - updates: Tensor, - indices: Tensor, - reduction: Option + self: @Tensor, updates: Tensor, indices: Tensor, reduction: Option ) -> Tensor { math::scatter_nd::scatter_nd(self, updates, indices, reduction) } + + fn label_encoder( + self: @Tensor, + default_list: Option>, + default_tensor: Option>, + keys: Option>, + keys_tensor: Option>, + values: Option>, + values_tensor: Option> + ) -> Tensor { + ml::label_encoder::label_encoder( + self, default_list, default_tensor, keys, keys_tensor, values, values_tensor + ) + } } /// Implements addition for `Tensor` using the `Add` trait. @@ -668,35 +679,30 @@ impl I8TensorPartialEq of PartialEq> { impl I8TensorPartialOrd of PartialOrd> { #[inline(always)] fn ge(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::ge(lhs.data, rhs.data); + SpanPartialOrd::ge(lhs.data, rhs.data) } #[inline(always)] fn gt(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::gt(lhs.data, rhs.data); + SpanPartialOrd::gt(lhs.data, rhs.data) } #[inline(always)] fn le(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::le(lhs.data, rhs.data); + SpanPartialOrd::le(lhs.data, rhs.data) } #[inline(always)] fn lt(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::lt(lhs.data, rhs.data); + SpanPartialOrd::lt(lhs.data, rhs.data) } } // Internals - fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { let mut is_eq = true; - loop { - if lhs.shape.len() == 0 || !is_eq { - break; - } - + while lhs.shape.len() != 0 && is_eq { is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); }; @@ -704,13 +710,9 @@ fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { return false; } - loop { - if lhs.data.len() == 0 || !is_eq { - break; - } - + while lhs.data.len() == 0 && !is_eq { is_eq = lhs.data.pop_front().unwrap() == rhs.data.pop_front().unwrap(); }; - return is_eq; + is_eq } diff --git a/src/operators/tensor/implementations/tensor_u32.cairo b/src/operators/tensor/implementations/tensor_u32.cairo index 599681d13..efb681a86 100644 --- a/src/operators/tensor/implementations/tensor_u32.cairo +++ b/src/operators/tensor/implementations/tensor_u32.cairo @@ -1,8 +1,3 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; -use core::traits::{TryInto, Into}; - use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::helpers::SpanPartialOrd; use orion::operators::tensor::core::{ @@ -300,7 +295,7 @@ impl U32Tensor of TensorTrait { core_tensor::nonzero(self) } - fn squeeze(self: @Tensor, axes: Option>) -> Tensor { + fn squeeze(self: @Tensor, axes: Option>) -> Tensor { core_tensor::squeeze(self, axes) } @@ -434,6 +429,10 @@ impl U32Tensor of TensorTrait { panic(array!['not supported!']) } + fn reduce_log_sum_exp(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { + panic(array!['not supported!']) + } + fn erf(self: @Tensor) -> Tensor { panic(array!['not supported!']) } @@ -483,10 +482,12 @@ impl U32Tensor of TensorTrait { manipulation::split::split(self, axis, num_outputs, spl) } - fn random_uniform_like(tensor: @Tensor, high: Option, low: Option, seed: Option) -> Tensor { + fn random_uniform_like( + tensor: @Tensor, high: Option, low: Option, seed: Option + ) -> Tensor { panic(array!['not supported!']) } - + fn range(start: u32, end: u32, step: u32) -> Tensor { math::range::range(start, end, step) } @@ -502,38 +503,49 @@ impl U32Tensor of TensorTrait { fn blackman_window(size: u32, periodic: Option) -> Tensor { panic(array!['not supported!']) } - - + fn split_to_sequence( self: @Tensor, axis: usize, keepdims: usize, split: Option> ) -> Array> { manipulation::split_to_sequence::split_to_sequence(self, axis, keepdims, split) } - + fn reverse_sequence( - self: @Tensor, sequence_lens: Tensor, batch_axis: Option, time_axis: Option + self: @Tensor, + sequence_lens: Tensor, + batch_axis: Option, + time_axis: Option ) -> Tensor { manipulation::reverse_sequence::reverse_sequence(self, sequence_lens, batch_axis, time_axis) } - + fn optional(self: @Tensor) -> Option> { manipulation::optional::optional(self) } - fn dynamic_quantize_linear( - self: @Tensor - ) -> (Tensor::, Tensor::, Tensor){ - panic(array!['not supported!']) + fn dynamic_quantize_linear(self: @Tensor) -> (Tensor::, Tensor::, Tensor) { + panic(array!['not supported!']) } - + fn scatter_nd( - self: @Tensor, - updates: Tensor, - indices: Tensor, - reduction: Option + self: @Tensor, updates: Tensor, indices: Tensor, reduction: Option ) -> Tensor { math::scatter_nd::scatter_nd(self, updates, indices, reduction) } + + fn label_encoder( + self: @Tensor, + default_list: Option>, + default_tensor: Option>, + keys: Option>, + keys_tensor: Option>, + values: Option>, + values_tensor: Option> + ) -> Tensor { + ml::label_encoder::label_encoder( + self, default_list, default_tensor, keys, keys_tensor, values, values_tensor + ) + } } /// Implements addition for `Tensor` using the `Add` trait. @@ -620,22 +632,22 @@ impl U32TryIntoI8 of TryInto { impl U32TensorPartialOrd of PartialOrd> { #[inline(always)] fn ge(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::ge(lhs.data, rhs.data); + SpanPartialOrd::ge(lhs.data, rhs.data) } #[inline(always)] fn gt(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::gt(lhs.data, rhs.data); + SpanPartialOrd::gt(lhs.data, rhs.data) } #[inline(always)] fn le(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::le(lhs.data, rhs.data); + SpanPartialOrd::le(lhs.data, rhs.data) } #[inline(always)] fn lt(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::lt(lhs.data, rhs.data); + SpanPartialOrd::lt(lhs.data, rhs.data) } } @@ -644,11 +656,7 @@ impl U32TensorPartialOrd of PartialOrd> { fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { let mut is_eq = true; - loop { - if lhs.shape.len() == 0 || !is_eq { - break; - } - + while lhs.shape.len() != 0 && is_eq { is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); }; @@ -656,13 +664,9 @@ fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { return false; } - loop { - if lhs.data.len() == 0 || !is_eq { - break; - } - + while lhs.data.len() != 0 && is_eq { is_eq = lhs.data.pop_front().unwrap() == rhs.data.pop_front().unwrap(); }; - return is_eq; + is_eq } diff --git a/src/operators/tensor/linalg/matmul.cairo b/src/operators/tensor/linalg/matmul.cairo index 5be41efa5..fd604fe42 100644 --- a/src/operators/tensor/linalg/matmul.cairo +++ b/src/operators/tensor/linalg/matmul.cairo @@ -1,7 +1,3 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; - use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; @@ -32,6 +28,7 @@ fn matmul< let mut result_data = ArrayTrait::new(); result_shape.append(1); result_data.append(dot); + return TensorTrait::new(result_shape.span(), result_data.span()); } @@ -42,7 +39,7 @@ fn matmul< let result_shape = adjust_output_shape_after_matmul(result.shape, self_ndim, other_ndim); - return TensorTrait::new(result_shape, result.data); + TensorTrait::new(result_shape, result.data) } /// Computes the dot product of two 1-dimensional i32 tensors. @@ -82,7 +79,7 @@ fn dot_product< }; }; - return result; + result } @@ -116,30 +113,16 @@ fn matrix_multiply< let n = *mat1_shape[1]; let p = *mat2_shape[1]; - let mut result_data = ArrayTrait::new(); - let mut result_shape = ArrayTrait::new(); - result_shape.append(m); - result_shape.append(p); + let mut result_data: Array = array![]; + let mut result_shape: Array = array![m, p]; let mut i = 0_usize; - loop { - if i == m { - break (); - } - + while i != m { let mut j = 0_usize; - loop { - if j == p { - break (); - } - + while j != p { let mut sum: T = NumberTrait::zero(); let mut k = 0_usize; - loop { - if k == n { - break (); - } - + while k != n { let mat1_index = i * n + k; let mat2_index = k * p + j; sum += *mat1[mat1_index] * *mat2[mat2_index]; @@ -154,7 +137,7 @@ fn matrix_multiply< i += 1; }; - return TensorTrait::new(result_shape.span(), result_data.span()); + TensorTrait::new(result_shape.span(), result_data.span()) } /// Prepares the shape of a tensor for matrix multiplication. @@ -196,7 +179,7 @@ fn prepare_shape_for_matmul(mut shape: Span, is_first_tensor: bool) -> Sp loop { match shape.pop_front() { - Option::Some(item) => { shape_adjusted.append(*item); }, + Option::Some(item) => { shape_adjusted.append(*item) }, Option::None => { break; } }; }; @@ -206,7 +189,7 @@ fn prepare_shape_for_matmul(mut shape: Span, is_first_tensor: bool) -> Sp return shape_adjusted.span(); } - return shape; + shape } /// Adjusts the output shape of the matrix multiplication result based on the @@ -237,5 +220,5 @@ fn adjust_output_shape_after_matmul( let _ = output_shape.pop_back().unwrap(); } - return output_shape; + output_shape } diff --git a/src/operators/tensor/linalg/transpose.cairo b/src/operators/tensor/linalg/transpose.cairo index c7bb96da7..97ad240b4 100644 --- a/src/operators/tensor/linalg/transpose.cairo +++ b/src/operators/tensor/linalg/transpose.cairo @@ -1,6 +1,3 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; - use orion::operators::tensor::core::{ new_tensor, stride, Tensor, TensorTrait, ravel_index, unravel_index, reshape }; @@ -24,25 +21,18 @@ fn transpose, impl TCopy: Copy, impl TDrop: D let output_shape = permutation_output_shape(*self.shape, axes); let output_data_len = len_from_shape(output_shape); - let mut output_data = ArrayTrait::new(); + let mut output_data: Array = array![]; let mut output_index: usize = 0; - loop { - if output_index == output_data_len { - break (); - } - + while output_index != output_data_len { let output_indices = unravel_index(output_index, output_shape); - let mut input_indices = ArrayTrait::new(); + let mut input_indices: Array = array![]; let mut output_axis: usize = 0; - loop { - if output_axis == axes.len() { - break (); - } - + while output_axis != axes.len() { let input_axis = find_axis(axes, output_axis); input_indices.append(*output_indices[input_axis]); + output_axis += 1; }; @@ -52,39 +42,32 @@ fn transpose, impl TCopy: Copy, impl TDrop: D output_index += 1; }; - return TensorTrait::new(output_shape, output_data.span()); + TensorTrait::new(output_shape, output_data.span()) } - fn transpose2D, impl TCopy: Copy, impl TDrop: Drop>( self: @Tensor ) -> Tensor { assert((*self.shape).len() == 2, 'transpose a 2D tensor'); - let mut output_data = ArrayTrait::new(); - let mut output_shape = ArrayTrait::new(); + let mut output_data: Array = array![]; let n = *self.shape[0]; let m = *self.shape[1]; - output_shape.append(m); - output_shape.append(n); + let mut output_shape: Array = array![m, n]; let mut j: usize = 0; - loop { - if j == m { - break (); - } + while j != m { let mut i = 0; - loop { - if i == n { - break (); - } + while i != n { output_data.append(*(*self.data)[i * m + j]); + i += 1; }; + j += 1; }; - return TensorTrait::new(output_shape.span(), output_data.span()); + TensorTrait::new(output_shape.span(), output_data.span()) } diff --git a/src/operators/tensor/linalg/trilu.cairo b/src/operators/tensor/linalg/trilu.cairo index 08bfdcc98..1536fcb6a 100644 --- a/src/operators/tensor/linalg/trilu.cairo +++ b/src/operators/tensor/linalg/trilu.cairo @@ -1,6 +1,3 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; - use orion::operators::tensor::core::{Tensor, TensorTrait}; use orion::numbers::NumberTrait; @@ -18,78 +15,77 @@ fn trilu< assert((*self.shape).len() >= 2, 'must have at least 2 dimensions'); let shape_len = (*self.shape).len(); - let mut output_data = ArrayTrait::new(); - let mut output_size = ArrayTrait::new(); + let mut output_data: Array = array![]; + let mut output_size: Array = array![]; let mut batch_size = 1; let mut n: u32 = 0; let mut m: u32 = 0; - { - let mut self_shape = *self.shape; - let mut i = 0; - loop { - match self_shape.pop_front() { - Option::Some(val) => { - if i == shape_len - 2 { - n = *val; - } else if i == shape_len - 1 { - m = *val; - } else { - batch_size *= *val; - } - i += 1; - output_size.append(*val); - }, - Option::None => { break (); } - } + let mut self_shape = *self.shape; + let mut i = 0; + loop { + match self_shape.pop_front() { + Option::Some(val) => { + if i == shape_len - 2 { + n = *val; + } else if i == shape_len - 1 { + m = *val; + } else { + batch_size *= *val; + } + i += 1; + output_size.append(*val); + }, + Option::None => { break; } + } + }; + + let mut self_data = *self.data; + let mut b = 0; + loop { + if b == batch_size { + break (); } - } - { - let mut self_data = *self.data; - let mut b = 0; + let mut i = 0; loop { - if b == batch_size { + if i == n { break (); } - - let mut i = 0; + let mut j = 0; loop { - if i == n { + if j == m { break (); } - let mut j = 0; - loop { - if j == m { - break (); - } - let ii: felt252 = i.into(); - let jj: felt252 = j.into(); + let ii: felt252 = i.into(); + let jj: felt252 = j.into(); - let iii: i64 = ii.try_into().unwrap(); - let jjj: i64 = jj.try_into().unwrap(); + let iii: i64 = ii.try_into().unwrap(); + let jjj: i64 = jj.try_into().unwrap(); - let result = match self_data.pop_front() { - Option::Some(val) => { - if (upper && (iii + k <= jjj)) || (!upper && (iii + k >= jjj)) { - *val - } else { - NumberTrait::zero() - } - }, - Option::None => { break (); } - }; - - output_data.append(result); - j += 1; + let result = match self_data.pop_front() { + Option::Some(val) => { + if (upper && (iii + k <= jjj)) || (!upper && (iii + k >= jjj)) { + *val + } else { + NumberTrait::zero() + } + }, + Option::None => { break; } }; - i += 1; + + output_data.append(result); + + j += 1; }; - b += 1; + + i += 1; }; - } - return TensorTrait::new(*self.shape, output_data.span()); + b += 1; + }; + + TensorTrait::new(*self.shape, output_data.span()) } diff --git a/src/operators/tensor/manipulation/optional.cairo b/src/operators/tensor/manipulation/optional.cairo index e57e35e69..53d26d423 100644 --- a/src/operators/tensor/manipulation/optional.cairo +++ b/src/operators/tensor/manipulation/optional.cairo @@ -1,13 +1,7 @@ -use core::option::OptionTrait; use orion::operators::tensor::{Tensor, TensorTrait}; /// Cf: TensorTrait::optional docstring -fn optional< - T, - +Copy, - +Drop, - impl TOption: OptionTrait ->( +fn optional, +Drop, impl TOption: OptionTrait>( self: @Tensor ) -> Option> { Option::Some(*self) diff --git a/src/operators/tensor/manipulation/reverse_sequence.cairo b/src/operators/tensor/manipulation/reverse_sequence.cairo index efec92399..8bb45fe9a 100644 --- a/src/operators/tensor/manipulation/reverse_sequence.cairo +++ b/src/operators/tensor/manipulation/reverse_sequence.cairo @@ -1,28 +1,23 @@ -use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; /// Cf: TensorTrait::reverse_sequence docstring -fn reverse_sequence< - T, - impl TTensor: TensorTrait, - impl TCopy: Copy, - impl TDrop: Drop ->( - self: @Tensor, - sequence_lens: Tensor, - batch_axis: Option, +fn reverse_sequence, impl TCopy: Copy, impl TDrop: Drop>( + self: @Tensor, + sequence_lens: Tensor, + batch_axis: Option, time_axis: Option -) -> Tensor{ +) -> Tensor { let shape = *self.shape; let mut data: Array = array![]; let has_batch_axis: usize = match batch_axis { - Option::Some(value) => { - assert!((value != 0) || (value != 1), "batch_axis must be one of 1 or 0."); - value - }, - Option::None => 0, + Option::Some(value) => { + assert!((value != 0) || (value != 1), "batch_axis must be one of 1 or 0."); + value + }, + Option::None => 0, }; + let has_time_axis: usize = match time_axis { Option::Some(value) => { assert!((value != 0) || (value != 1), "time_axis must be one of 1 or 0."); @@ -30,8 +25,9 @@ fn reverse_sequence< }, Option::None => 1, }; + assert!(has_batch_axis != has_time_axis, "batch_axis and time_axis cannot be equal"); - assert!((*self.data).len() >= 2, "Tensor of rank r >= 2"); + assert((*self.data).len() >= 2, 'Tensor of rank r >= 2'); let control: bool = if has_batch_axis == 0 && has_time_axis == 1 { true } else { @@ -41,69 +37,64 @@ fn reverse_sequence< let mut index: Array = reverse_index(*self.shape, sequence_lens, control); loop { match index.pop_front() { - Option::Some(ele) => { - data.append(*((*self).data).at(ele)); - }, - Option::None => { - break; - } + Option::Some(ele) => { data.append(*((*self).data).at(ele)); }, + Option::None => { break; } } }; - + TensorTrait::::new(shape, data.span()) } -fn reverse_index( - shape: Span, sequence_lens: Tensor, control: bool -) -> Array { +fn reverse_index(shape: Span, sequence_lens: Tensor, control: bool) -> Array { let x: usize = *shape.at(0); let y: usize = *shape.at(1); - let mut result = ArrayTrait::::new(); + let mut result: Array = array![]; if control { // [i, slice] - assert!(sequence_lens.data.len() <= x,"The length of sequence_lens cannot exceed batch_axis"); + assert!( + sequence_lens.data.len() <= x, "The length of sequence_lens cannot exceed batch_axis" + ); let mut i: usize = 0; - loop { - if i >= x { - break; - } - + while i != x { let reverse: usize = (*sequence_lens.data.at(i)); - assert!(reverse <= y && reverse >= 1, "sequence_lens must be greater than one and less than batch_size"); + assert!( + reverse <= y && reverse >= 1, + "sequence_lens must be greater than one and less than batch_size" + ); let mut j: usize = reverse - 1; loop { - if j == 0 { result.append(i * y + j); break; } + result.append(i * y + j); j -= 1; }; let current_index_len: usize = (i + 1) * y - 1; let mut j: usize = result.len(); - loop { - if j > current_index_len { - break; - } + while j != current_index_len + 1 { result.append(j); j += 1; }; + i += 1; }; } else { // [slice, i] - assert!(sequence_lens.data.len() <= y,"The length of sequence_lens cannot exceed time_axis"); + assert!( + sequence_lens.data.len() <= y, "The length of sequence_lens cannot exceed time_axis" + ); let mut tmp = ArrayTrait::::new(); let mut i: usize = 0; - loop { - if i > y - 1 { - break; - } + while i != y { let reverse: usize = *sequence_lens.data.at(i); - assert!(reverse <= x && reverse >= 1, "sequence_lens must be greater than one and less than batch_size"); + assert!( + reverse <= x && reverse >= 1, + "sequence_lens must be greater than one and less than batch_size" + ); let mut j: usize = reverse - 1; loop { @@ -115,31 +106,26 @@ fn reverse_index( j -= 1; }; let mut j: usize = reverse; - loop { - if j > x - 1 { - break; - } + while j != x { tmp.append(j * y + i); j += 1; }; + i += 1; }; + let tmp = tmp.span(); - let mut i : usize = 0; - loop { - if i > x - 1 { - break; - } + let mut i: usize = 0; + while i != x { let mut j: usize = 0; - loop { - if j > y - 1 { - break; - } + while j != y { result.append((*tmp.at(j * x + i))); j += 1; }; + i += 1; }; } + result -} \ No newline at end of file +} diff --git a/src/operators/tensor/manipulation/split.cairo b/src/operators/tensor/manipulation/split.cairo index 3919c034f..a8036f219 100644 --- a/src/operators/tensor/manipulation/split.cairo +++ b/src/operators/tensor/manipulation/split.cairo @@ -1,23 +1,16 @@ use orion::operators::tensor::{Tensor, TensorTrait, U32Tensor}; -use core::array::{ArrayTrait, SpanTrait}; -use core::option::OptionTrait; use orion::operators::matrix::{MutMatrixTrait, MutMatrix, MutMatrixImpl}; /// Cf: TensorTrait::split docstring -fn split< - T, - +Copy, - +Drop, - +TensorTrait, ->( +fn split, +Drop, +TensorTrait,>( self: @Tensor, axis: usize, num_outputs: Option, split: Option> ) -> Array> { let has_num_outputs = match num_outputs { - Option::Some => { true }, + Option::Some => true, Option::None => false, }; let has_split = match split { - Option::Some => { true }, + Option::Some => true, Option::None => false, }; assert(!(has_num_outputs && has_split), 'split or num_outputs not both.'); @@ -34,6 +27,7 @@ fn split< } else { splited_t = split_has_split(self, axis, split.unwrap()); } + splited_t } @@ -52,23 +46,18 @@ fn split_num_outputs, +Drop, +TensorTrait,>( if (*(*t).shape.at(axis) % num_outputs == 0) { div = *(*t).shape.at(axis) / num_outputs; let mut i = 0; - loop { - if (i >= num_outputs) { - break; - } + while i != num_outputs { split.append(div); i += 1; }; } else { div = *(*t).shape.at(axis) / num_outputs + 1; let mut i = 0; - loop { - if (i >= num_outputs) { - break; - } + while i != num_outputs { split.append(div); i += 1; }; + match split.pop_front() { Option::Some(split_last_one) => { split.append(split_last_one + *(*t).shape.at(axis) - div * (num_outputs - 1)); @@ -80,34 +69,29 @@ fn split_num_outputs, +Drop, +TensorTrait,>( let mut sli: MutMatrix = MutMatrixImpl::new((*t).shape.len(), 2); let mut pos: usize = 0; let mut i = 0; - loop { - if (i >= (*t).shape.len()) { - break; - } + while i != (*t).shape.len() { let s: usize = *(*t).shape.at(i); sli.set(i, 0, 0); sli.set(i, 1, s); i += 1; }; + let mut i: usize = 0; - loop { - if (i >= split.len()) { - break; - } + while i != split.len() { let spl = *split.at(i); sli.set(axis, 0, pos); pos += spl; sli.set(axis, 1, pos); let end_ele_0 = match sli.get(axis, 0) { - Option::Some(res) => { res }, + Option::Some(res) => res, Option::None => { assert(false, 'Get end_ele_0 is failed'); 0 }, }; let end_ele_1 = match sli.get(axis, 1) { - Option::Some(res) => { res }, + Option::Some(res) => res, Option::None => { assert(false, 'Get end_ele_0 is failed'); 0 @@ -121,6 +105,7 @@ fn split_num_outputs, +Drop, +TensorTrait,>( splited_t.append(sub_t); i += 1; }; + splited_t } @@ -133,34 +118,29 @@ fn split_has_split, +Drop, +TensorTrait,>( let mut sli: MutMatrix = MutMatrixImpl::new((*t).shape.len(), 2); let mut pos: usize = 0; let mut i = 0; - loop { - if (i >= (*t).shape.len()) { - break; - } + while i != (*t).shape.len() { let s: usize = *(*t).shape.at(i); sli.set(i, 0, 0); sli.set(i, 1, s); i += 1; }; + let mut i: usize = 0; - loop { - if (i >= split.data.len()) { - break; - } + while i != split.data.len() { let spl: usize = split.at(indices: array![i].span()); sli.set(axis, 0, pos); pos += spl; sli.set(axis, 1, pos); let end_ele_0 = match sli.get(axis, 0) { - Option::Some(res) => { res }, + Option::Some(res) => res, Option::None => { assert(false, 'Get end_ele_0 is failed'); 0 }, }; let end_ele_1 = match sli.get(axis, 1) { - Option::Some(res) => { res }, + Option::Some(res) => res, Option::None => { assert(false, 'Get end_ele_0 is failed'); 0 @@ -174,5 +154,6 @@ fn split_has_split, +Drop, +TensorTrait,>( splited_t.append(sub_t); i += 1; }; + splited_t } diff --git a/src/operators/tensor/manipulation/split_to_sequence.cairo b/src/operators/tensor/manipulation/split_to_sequence.cairo index 7ff3ff8db..46dbe1af7 100644 --- a/src/operators/tensor/manipulation/split_to_sequence.cairo +++ b/src/operators/tensor/manipulation/split_to_sequence.cairo @@ -1,44 +1,34 @@ use orion::operators::tensor::{Tensor, TensorTrait, U32Tensor}; -use core::array::{ArrayTrait, SpanTrait}; -use core::option::OptionTrait; use orion::operators::matrix::{MutMatrixTrait, MutMatrix, MutMatrixImpl}; /// Cf: NNTrait::split docstring -fn split_to_sequence< - T, - +Copy, - +Drop, - +TensorTrait, ->( +fn split_to_sequence, +Drop, +TensorTrait,>( self: @Tensor, axis: usize, keepdims: usize, split: Option> ) -> Array> { let has_split = match split { - Option::Some => { true }, + Option::Some => true, Option::None => false, }; let mut has_num_outputs = false; let mut split_unwrap: Tensor = TensorTrait::new(array![1].span(), array![1].span()); - if (!has_split){ + if (!has_split) { let split_length = *(*self.shape).at(axis); let mut split_data: Array = array![]; let mut i = 0; - loop{ - if (i >= split_length) { - break; - } + while i != split_length { split_data.append(1); - i += 1; + i += 1; }; + split_unwrap = TensorTrait::new(array![split_length].span(), split_data.span()); - }else if (split.unwrap().data.len() == 1 && *(split.unwrap().shape.at(0)) == 1) { + } else if (split.unwrap().data.len() == 1 && *(split.unwrap().shape.at(0)) == 1) { // A scalar has_num_outputs = true; split_unwrap = split.unwrap(); - }else{ + } else { split_unwrap = split.unwrap(); } - let mut splited_t: Array> = array![]; @@ -52,28 +42,25 @@ fn split_to_sequence< splited_t = split_has_split(self, axis, split_unwrap); } - if (keepdims==0 && has_split==false) { + if (keepdims == 0 && !has_split) { let mut splited_t_temp: Array> = array![]; let mut i = 0; - loop{ - if (i >= splited_t.len()) { - break; - } + while i != splited_t.len() { let mut shape: Array = array![]; let mut j = 0; let shape_in_splited: Span = *splited_t.at(i).shape; - loop{ - if ( j >= shape_in_splited.len()) { - break; + while j != shape_in_splited.len() { + if (j != axis) { + shape.append(*shape_in_splited.at(j)) } - if (j!=axis) { - shape.append(*shape_in_splited.at(j)) - } - j += 1; + + j += 1; }; + splited_t_temp.append(splited_t[i].reshape(shape.span())); - i += 1; + i += 1; }; + return splited_t_temp; } splited_t @@ -82,12 +69,7 @@ fn split_to_sequence< /// Subfunction split for tensors (wth num_outputs). /// Cf: TensorTrait::split docstring -fn split_num_outputs< - T, - +Copy, - +Drop, - +TensorTrait, ->( +fn split_num_outputs, +Drop, +TensorTrait,>( t: @Tensor, mut axis: usize, num_outputs: usize ) -> Array> { let mut splited_t: Array> = array![]; @@ -100,23 +82,18 @@ fn split_num_outputs< if (*(*t).shape.at(axis) % num_outputs == 0) { div = *(*t).shape.at(axis) / num_outputs; let mut i = 0; - loop { - if (i >= num_outputs) { - break; - } + while i != num_outputs { split.append(div); i += 1; }; } else { div = *(*t).shape.at(axis) / num_outputs + 1; let mut i = 0; - loop { - if (i >= num_outputs) { - break; - } + while i != num_outputs { split.append(div); i += 1; }; + match split.pop_front() { Option::Some(split_last_one) => { split.append(split_last_one + *(*t).shape.at(axis) - div * (num_outputs - 1)); @@ -128,34 +105,29 @@ fn split_num_outputs< let mut sli: MutMatrix = MutMatrixImpl::new((*t).shape.len(), 2); let mut pos: usize = 0; let mut i = 0; - loop { - if (i >= (*t).shape.len()) { - break; - } + while i != (*t).shape.len() { let s: usize = *(*t).shape.at(i); sli.set(i, 0, 0); sli.set(i, 1, s); i += 1; }; + let mut i: usize = 0; - loop { - if (i >= split.len()) { - break; - } + while i != split.len() { let spl = *split.at(i); sli.set(axis, 0, pos); pos += spl; sli.set(axis, 1, pos); let end_ele_0 = match sli.get(axis, 0) { - Option::Some(res) => { res }, + Option::Some(res) => res, Option::None => { assert(false, 'Get end_ele_0 is failed'); 0 }, }; let end_ele_1 = match sli.get(axis, 1) { - Option::Some(res) => { res }, + Option::Some(res) => res, Option::None => { assert(false, 'Get end_ele_0 is failed'); 0 @@ -169,37 +141,28 @@ fn split_num_outputs< splited_t.append(sub_t); i += 1; }; + splited_t } /// Subfunction split for tensors (wth split). /// Cf: TensorTrait::split docstring -fn split_has_split< - T, - +Copy, - +Drop, - +TensorTrait, ->( +fn split_has_split, +Drop, +TensorTrait,>( t: @Tensor, axis: usize, split: Tensor ) -> Array> { let mut splited_t: Array> = array![]; let mut sli: MutMatrix = MutMatrixImpl::new((*t).shape.len(), 2); let mut pos: usize = 0; let mut i = 0; - loop { - if (i >= (*t).shape.len()) { - break; - } + while i != (*t).shape.len() { let s: usize = *(*t).shape.at(i); sli.set(i, 0, 0); sli.set(i, 1, s); i += 1; }; + let mut i: usize = 0; - loop { - if (i >= split.data.len()) { - break; - } + while i != split.data.len() { let spl: usize = split.at(indices: array![i].span()); sli.set(axis, 0, pos); pos += spl; @@ -227,5 +190,6 @@ fn split_has_split< splited_t.append(sub_t); i += 1; }; + splited_t } diff --git a/src/operators/tensor/manipulation/unique.cairo b/src/operators/tensor/manipulation/unique.cairo index d90b9b8cd..6ace4b2aa 100644 --- a/src/operators/tensor/manipulation/unique.cairo +++ b/src/operators/tensor/manipulation/unique.cairo @@ -1,10 +1,3 @@ -use core::traits::Into; -use core::traits::IndexView; -use core::option::OptionTrait; -use core::array::{SpanTrait, ArrayTrait}; - -use core::debug::PrintTrait; - use alexandria_data_structures::array_ext::{SpanTraitExt, ArrayTraitExt}; use alexandria_sorting::merge_sort::merge; @@ -87,13 +80,7 @@ fn unique_flatten, +Drop, +PartialOrd, +PartialEq,>( } }; - return ( - unique_elements.span(), - new_shape.span(), - indices.span(), - inverse_indices.span(), - count.span() - ); + (unique_elements.span(), new_shape.span(), indices.span(), inverse_indices.span(), count.span()) } /// Subfunction unique for tensors (wth axis). @@ -123,10 +110,7 @@ fn unique_along_axis< let mut unique_tensors_len = unique_tensors.len(); let mut i = 0; - loop { - if (i >= rank) { - break; - } + while i != rank { new_shape.append(if axis == i { unique_tensors_len } else { @@ -166,5 +150,5 @@ fn unique_along_axis< let new_shape_span = new_shape.span(); let unique_elements = flatten_array_of_tensors(unique_tensors, axis, new_shape_span); - return (unique_elements, new_shape_span, indices.span(), inverse_indices.span(), count.span()); + (unique_elements, new_shape_span, indices.span(), inverse_indices.span(), count.span()) } diff --git a/src/operators/tensor/math.cairo b/src/operators/tensor/math.cairo index 8035a842c..b73f6d102 100644 --- a/src/operators/tensor/math.cairo +++ b/src/operators/tensor/math.cairo @@ -58,6 +58,7 @@ mod is_inf; mod gather_nd; mod reduce_log_sum; mod erf; +mod reduce_log_sum_exp; mod layer_normalization; mod resize; mod compress; diff --git a/src/operators/tensor/math/abs.cairo b/src/operators/tensor/math/abs.cairo index 129e05b40..e129e94ea 100644 --- a/src/operators/tensor/math/abs.cairo +++ b/src/operators/tensor/math/abs.cairo @@ -1,7 +1,3 @@ -use core::array::ArrayTrait; -use core::option::OptionTrait; -use core::array::SpanTrait; - use orion::operators::tensor::core::{Tensor, TensorTrait}; use orion::numbers::NumberTrait; @@ -16,7 +12,7 @@ fn abs< >( mut z: Tensor ) -> Tensor { - let mut data_result = ArrayTrait::::new(); + let mut data_result: Array = array![]; loop { match z.data.pop_front() { Option::Some(item) => { data_result.append((*item).abs()); }, @@ -24,5 +20,5 @@ fn abs< }; }; - return TensorTrait::::new(z.shape, data_result.span()); + TensorTrait::::new(z.shape, data_result.span()) } diff --git a/src/operators/tensor/math/acos.cairo b/src/operators/tensor/math/acos.cairo index 477f11450..799f87994 100644 --- a/src/operators/tensor/math/acos.cairo +++ b/src/operators/tensor/math/acos.cairo @@ -1,7 +1,3 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; - use orion::numbers::NumberTrait; use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; @@ -17,7 +13,7 @@ fn acos< >( mut self: Tensor ) -> Tensor { - let mut result = ArrayTrait::new(); + let mut result: Array = array![]; loop { match self.data.pop_front() { Option::Some(item) => { result.append((*item).acos()); }, @@ -25,6 +21,6 @@ fn acos< }; }; - return TensorTrait::::new(self.shape, result.span()); + TensorTrait::::new(self.shape, result.span()) } diff --git a/src/operators/tensor/math/acosh.cairo b/src/operators/tensor/math/acosh.cairo index c9d159ca0..41717adab 100644 --- a/src/operators/tensor/math/acosh.cairo +++ b/src/operators/tensor/math/acosh.cairo @@ -1,8 +1,3 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; -use core::traits::Into; - use orion::numbers::NumberTrait; use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; @@ -18,7 +13,7 @@ fn acosh< >( mut self: Tensor ) -> Tensor { - let mut result = ArrayTrait::new(); + let mut result: Array = array![]; loop { match self.data.pop_front() { @@ -27,5 +22,5 @@ fn acosh< }; }; - return TensorTrait::new(self.shape, result.span()); + TensorTrait::new(self.shape, result.span()) } diff --git a/src/operators/tensor/math/and.cairo b/src/operators/tensor/math/and.cairo index 07e4c9443..0b1369f35 100644 --- a/src/operators/tensor/math/and.cairo +++ b/src/operators/tensor/math/and.cairo @@ -1,7 +1,3 @@ -use core::array::ArrayTrait; -use core::option::OptionTrait; -use core::array::SpanTrait; - use orion::numbers::NumberTrait; use orion::operators::tensor::{core::{Tensor, TensorTrait, unravel_index}, BoolTensor}; use orion::operators::tensor::helpers::{ @@ -11,12 +7,12 @@ use orion::operators::tensor::helpers::{ /// Cf: TensorTrait::and docstring fn and(y: @Tensor, z: @Tensor) -> Tensor { let broadcasted_shape = broadcast_shape(*y.shape, *z.shape); - let mut result: Array = ArrayTrait::new(); + let mut result: Array = array![]; let num_elements = len_from_shape(broadcasted_shape); let mut n: usize = 0; - loop { + while n != num_elements { let indices_broadcasted = unravel_index(n, broadcasted_shape); let indices_self = broadcast_index_mapping(*y.shape, indices_broadcasted); @@ -25,10 +21,7 @@ fn and(y: @Tensor, z: @Tensor) -> Tensor { result.append(*(*y.data)[indices_self] && *(*z.data)[indices_other]); n += 1; - if n == num_elements { - break (); - }; }; - return TensorTrait::new(broadcasted_shape, result.span()); + TensorTrait::new(broadcasted_shape, result.span()) } diff --git a/src/operators/tensor/math/argmax.cairo b/src/operators/tensor/math/argmax.cairo index d4b54f9ae..f16c99b5c 100644 --- a/src/operators/tensor/math/argmax.cairo +++ b/src/operators/tensor/math/argmax.cairo @@ -1,8 +1,3 @@ -use core::debug::PrintTrait; -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; - use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index}; use orion::operators::tensor::helpers::{reduce_output_shape, len_from_shape, combine_indices}; use orion::numbers::NumberTrait; @@ -36,7 +31,7 @@ fn argmax< return find_argmax_1D::(*self, axis, true, select_last_index); } - let mut output_data = ArrayTrait::new(); + let mut output_data: Array = array![]; let output_shape = reduce_output_shape(*self.shape, axis, false); let output_data_len = len_from_shape(output_shape); @@ -44,21 +39,16 @@ fn argmax< let MIN = NumberTrait::min_value(); let mut index: usize = 0; - loop { + while index != output_data_len { let output_indices = unravel_index(index, output_shape); let current_argmax = find_argmax(self, output_indices, axis, 0, MIN, 0, select_last_index); output_data.append(current_argmax); index += 1; - if index == output_data_len { - break (); - }; }; - return TensorTrait::< - usize - >::new(reduce_output_shape(*self.shape, axis, keepdims), output_data.span()); + TensorTrait::::new(reduce_output_shape(*self.shape, axis, keepdims), output_data.span()) } /// Helper function that finds the index of the maximum value in a flat tensor. diff --git a/src/operators/tensor/math/argmin.cairo b/src/operators/tensor/math/argmin.cairo index 51502fd52..53087421d 100644 --- a/src/operators/tensor/math/argmin.cairo +++ b/src/operators/tensor/math/argmin.cairo @@ -1,8 +1,3 @@ -use core::debug::PrintTrait; -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; - use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index}; use orion::operators::tensor::helpers::{reduce_output_shape, combine_indices, len_from_shape}; use orion::numbers::NumberTrait; @@ -36,7 +31,7 @@ fn argmin< return find_argmin_1D(*self, axis, true, select_last_index); } - let mut output_data = ArrayTrait::new(); + let mut output_data: Array = array![]; let output_shape = reduce_output_shape(*self.shape, axis, false); let output_data_len = len_from_shape(output_shape); @@ -44,21 +39,16 @@ fn argmin< let MAX = NumberTrait::max_value(); let mut index: usize = 0; - loop { + while index != output_data_len { let output_indices = unravel_index(index, output_shape); let current_argmin = find_argmin(self, output_indices, axis, 0, MAX, 0, select_last_index); output_data.append(current_argmin); index += 1; - if index == output_data_len { - break (); - }; }; - return TensorTrait::< - usize - >::new(reduce_output_shape(*self.shape, axis, keepdims), output_data.span()); + TensorTrait::::new(reduce_output_shape(*self.shape, axis, keepdims), output_data.span()) } diff --git a/src/operators/tensor/math/arithmetic.cairo b/src/operators/tensor/math/arithmetic.cairo index fdbbb7863..e744b29f9 100644 --- a/src/operators/tensor/math/arithmetic.cairo +++ b/src/operators/tensor/math/arithmetic.cairo @@ -1,13 +1,6 @@ -use core::option::OptionTrait; -use core::traits::TryInto; -use core::array::ArrayTrait; -use core::array::SpanTrait; - -use orion::operators::tensor::helpers::broadcast_shape; - use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index,}; -use orion::operators::tensor::helpers::{broadcast_index_mapping, len_from_shape,}; +use orion::operators::tensor::helpers::{broadcast_shape, broadcast_index_mapping, len_from_shape,}; use orion::utils::saturate; fn add< @@ -16,12 +9,12 @@ fn add< self: @Tensor, other: @Tensor ) -> Tensor { let broadcasted_shape = broadcast_shape(*self.shape, *other.shape); - let mut result = ArrayTrait::new(); + let mut result = array![]; let num_elements = len_from_shape(broadcasted_shape); let mut n: usize = 0; - loop { + while n != num_elements { let indices_broadcasted = unravel_index(n, broadcasted_shape); let indices_self = broadcast_index_mapping(*self.shape, indices_broadcasted); @@ -30,12 +23,9 @@ fn add< result.append(*(*self.data)[indices_self] + *(*other.data)[indices_other]); n += 1; - if n == num_elements { - break (); - }; }; - return TensorTrait::::new(broadcasted_shape, result.span()); + TensorTrait::::new(broadcasted_shape, result.span()) } fn add_by_scalar< @@ -55,7 +45,7 @@ fn add_by_scalar< } let mut input_data = *self.data; - let mut data_result = ArrayTrait::::new(); + let mut data_result = array![]; loop { match input_data.pop_front() { Option::Some(ele) => { data_result.append(*ele + val); }, @@ -63,7 +53,7 @@ fn add_by_scalar< }; }; - return TensorTrait::::new(*self.shape, data_result.span()); + TensorTrait::::new(*self.shape, data_result.span()) } fn saturated_add< @@ -81,12 +71,12 @@ fn saturated_add< self: @Tensor, other: @Tensor, min_saturation: T, max_saturation: T ) -> Tensor { let broadcasted_shape = broadcast_shape(*self.shape, *other.shape); - let mut result = ArrayTrait::new(); + let mut result = array![]; let num_elements = len_from_shape(broadcasted_shape); let mut n: usize = 0; - loop { + while n != num_elements { let indices_broadcasted = unravel_index(n, broadcasted_shape); let indices_self = broadcast_index_mapping(*self.shape, indices_broadcasted); @@ -104,12 +94,9 @@ fn saturated_add< ); n += 1; - if n == num_elements { - break (); - }; }; - return TensorTrait::::new(broadcasted_shape, result.span()); + TensorTrait::::new(broadcasted_shape, result.span()) } fn sub< @@ -118,12 +105,12 @@ fn sub< self: @Tensor, other: @Tensor ) -> Tensor { let broadcasted_shape = broadcast_shape(*self.shape, *other.shape); - let mut result = ArrayTrait::new(); + let mut result = array![]; let num_elements = len_from_shape(broadcasted_shape); let mut n: usize = 0; - loop { + while n != num_elements { let indices_broadcasted = unravel_index(n, broadcasted_shape); let indices_self = broadcast_index_mapping(*self.shape, indices_broadcasted); @@ -132,12 +119,9 @@ fn sub< result.append(*(*self.data)[indices_self] - *(*other.data)[indices_other]); n += 1; - if n == num_elements { - break (); - }; }; - return TensorTrait::::new(broadcasted_shape, result.span()); + TensorTrait::::new(broadcasted_shape, result.span()) } fn sub_by_scalar< @@ -157,7 +141,7 @@ fn sub_by_scalar< } let mut input_data = *self.data; - let mut data_result = ArrayTrait::::new(); + let mut data_result = array![]; loop { match input_data.pop_front() { Option::Some(ele) => { data_result.append(*ele - val); }, @@ -165,7 +149,7 @@ fn sub_by_scalar< }; }; - return TensorTrait::::new(*self.shape, data_result.span()); + TensorTrait::::new(*self.shape, data_result.span()) } fn saturated_sub< @@ -183,12 +167,12 @@ fn saturated_sub< self: @Tensor, other: @Tensor, min_saturation: T, max_saturation: T ) -> Tensor { let broadcasted_shape = broadcast_shape(*self.shape, *other.shape); - let mut result = ArrayTrait::new(); + let mut result = array![]; let num_elements = len_from_shape(broadcasted_shape); let mut n: usize = 0; - loop { + while n != num_elements { let indices_broadcasted = unravel_index(n, broadcasted_shape); let indices_self = broadcast_index_mapping(*self.shape, indices_broadcasted); @@ -206,12 +190,9 @@ fn saturated_sub< ); n += 1; - if n == num_elements { - break (); - }; }; - return TensorTrait::::new(broadcasted_shape, result.span()); + TensorTrait::::new(broadcasted_shape, result.span()) } fn mul< @@ -220,12 +201,12 @@ fn mul< self: @Tensor, other: @Tensor ) -> Tensor { let broadcasted_shape = broadcast_shape(*self.shape, *other.shape); - let mut result = ArrayTrait::new(); + let mut result = array![]; let num_elements = len_from_shape(broadcasted_shape); let mut n: usize = 0; - loop { + while n != num_elements { let indices_broadcasted = unravel_index(n, broadcasted_shape); let indices_self = broadcast_index_mapping(*self.shape, indices_broadcasted); @@ -234,12 +215,9 @@ fn mul< result.append(*(*self.data)[indices_self] * *(*other.data)[indices_other]); n += 1; - if n == num_elements { - break (); - }; }; - return TensorTrait::::new(broadcasted_shape, result.span()); + TensorTrait::::new(broadcasted_shape, result.span()) } fn mul_by_scalar< @@ -259,7 +237,7 @@ fn mul_by_scalar< } let mut input_data = *self.data; - let mut data_result = ArrayTrait::::new(); + let mut data_result = array![]; loop { match input_data.pop_front() { Option::Some(ele) => { data_result.append(*ele * val); }, @@ -267,7 +245,7 @@ fn mul_by_scalar< }; }; - return TensorTrait::::new(*self.shape, data_result.span()); + TensorTrait::::new(*self.shape, data_result.span()) } fn saturated_mul< @@ -285,12 +263,12 @@ fn saturated_mul< self: @Tensor, other: @Tensor, min_saturation: T, max_saturation: T ) -> Tensor { let broadcasted_shape = broadcast_shape(*self.shape, *other.shape); - let mut result = ArrayTrait::new(); + let mut result = array![]; let num_elements = len_from_shape(broadcasted_shape); let mut n: usize = 0; - loop { + while n != num_elements { let indices_broadcasted = unravel_index(n, broadcasted_shape); let indices_self = broadcast_index_mapping(*self.shape, indices_broadcasted); @@ -308,12 +286,9 @@ fn saturated_mul< ); n += 1; - if n == num_elements { - break (); - }; }; - return TensorTrait::::new(broadcasted_shape, result.span()); + TensorTrait::::new(broadcasted_shape, result.span()) } fn div< @@ -322,12 +297,12 @@ fn div< self: @Tensor, other: @Tensor ) -> Tensor { let broadcasted_shape = broadcast_shape(*self.shape, *other.shape); - let mut result = ArrayTrait::new(); + let mut result = array![]; let num_elements = len_from_shape(broadcasted_shape); let mut n: usize = 0; - loop { + while n != num_elements { let indices_broadcasted = unravel_index(n, broadcasted_shape); let indices_self = broadcast_index_mapping(*self.shape, indices_broadcasted); @@ -336,12 +311,9 @@ fn div< result.append(*(*self.data)[indices_self] / *(*other.data)[indices_other]); n += 1; - if n == num_elements { - break (); - }; }; - return TensorTrait::::new(broadcasted_shape, result.span()); + TensorTrait::::new(broadcasted_shape, result.span()) } fn div_by_scalar< @@ -361,7 +333,7 @@ fn div_by_scalar< } let mut input_data = *self.data; - let mut data_result = ArrayTrait::::new(); + let mut data_result = array![]; loop { match input_data.pop_front() { Option::Some(ele) => { data_result.append(*ele / val); }, @@ -369,7 +341,7 @@ fn div_by_scalar< }; }; - return TensorTrait::::new(*self.shape, data_result.span()); + TensorTrait::::new(*self.shape, data_result.span()) } fn saturated_div< @@ -387,12 +359,12 @@ fn saturated_div< self: @Tensor, other: @Tensor, min_saturation: T, max_saturation: T ) -> Tensor { let broadcasted_shape = broadcast_shape(*self.shape, *other.shape); - let mut result = ArrayTrait::new(); + let mut result = array![]; let num_elements = len_from_shape(broadcasted_shape); let mut n: usize = 0; - loop { + while n != num_elements { let indices_broadcasted = unravel_index(n, broadcasted_shape); let indices_self = broadcast_index_mapping(*self.shape, indices_broadcasted); @@ -410,12 +382,9 @@ fn saturated_div< ); n += 1; - if n == num_elements { - break (); - }; }; - return TensorTrait::::new(broadcasted_shape, result.span()); + TensorTrait::::new(broadcasted_shape, result.span()) } fn div_downcast< @@ -433,12 +402,12 @@ fn div_downcast< self: @Tensor, other: @Tensor ) -> Tensor { let broadcasted_shape = broadcast_shape(*self.shape, *other.shape); - let mut result = ArrayTrait::new(); + let mut result = array![]; let num_elements = len_from_shape(broadcasted_shape); let mut n: usize = 0; - loop { + while n != num_elements { let indices_broadcasted = unravel_index(n, broadcasted_shape); let indices_self = broadcast_index_mapping(*self.shape, indices_broadcasted); @@ -451,10 +420,7 @@ fn div_downcast< ); n += 1; - if n == num_elements { - break (); - }; }; - return TensorTrait::::new(broadcasted_shape, result.span()); + TensorTrait::::new(broadcasted_shape, result.span()) } diff --git a/src/operators/tensor/math/asin.cairo b/src/operators/tensor/math/asin.cairo index 60c440d8d..49a00ae19 100644 --- a/src/operators/tensor/math/asin.cairo +++ b/src/operators/tensor/math/asin.cairo @@ -1,7 +1,3 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; - use orion::numbers::NumberTrait; use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; @@ -17,7 +13,7 @@ fn asin< >( mut self: Tensor ) -> Tensor { - let mut result = ArrayTrait::new(); + let mut result: Array = array![]; loop { match self.data.pop_front() { @@ -26,5 +22,5 @@ fn asin< }; }; - return TensorTrait::new(self.shape, result.span()); + TensorTrait::new(self.shape, result.span()) } diff --git a/src/operators/tensor/math/asinh.cairo b/src/operators/tensor/math/asinh.cairo index b94efa9a4..6e9f06a3b 100644 --- a/src/operators/tensor/math/asinh.cairo +++ b/src/operators/tensor/math/asinh.cairo @@ -1,13 +1,7 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; -use core::traits::Into; - use orion::numbers::NumberTrait; use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; - /// Cf: TensorTrait::asinh docstring fn asinh< T, @@ -19,7 +13,7 @@ fn asinh< >( mut self: Tensor ) -> Tensor { - let mut result = ArrayTrait::new(); + let mut result: Array = array![]; loop { match self.data.pop_front() { @@ -28,6 +22,6 @@ fn asinh< }; }; - return TensorTrait::new(self.shape, result.span()); + TensorTrait::new(self.shape, result.span()) } diff --git a/src/operators/tensor/math/atan.cairo b/src/operators/tensor/math/atan.cairo index f08271c0c..9d32a6ead 100644 --- a/src/operators/tensor/math/atan.cairo +++ b/src/operators/tensor/math/atan.cairo @@ -1,13 +1,7 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; -use core::traits::Into; - use orion::numbers::NumberTrait; use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; - fn atan< T, MAG, @@ -18,7 +12,7 @@ fn atan< >( mut self: Tensor ) -> Tensor { - let mut result = ArrayTrait::new(); + let mut result: Array = array![]; loop { match self.data.pop_front() { @@ -27,5 +21,5 @@ fn atan< }; }; - return TensorTrait::new(self.shape, result.span()); + TensorTrait::new(self.shape, result.span()) } diff --git a/src/operators/tensor/math/binarizer.cairo b/src/operators/tensor/math/binarizer.cairo index 0a02bc91b..0b66a4d4e 100644 --- a/src/operators/tensor/math/binarizer.cairo +++ b/src/operators/tensor/math/binarizer.cairo @@ -1,7 +1,3 @@ -use core::array::ArrayTrait; -use core::option::OptionTrait; -use core::array::SpanTrait; - use orion::operators::tensor::core::{Tensor, TensorTrait}; use orion::numbers::NumberTrait; @@ -23,7 +19,7 @@ fn binarizer< NumberTrait::zero() }; - let mut binarized_data = ArrayTrait::::new(); + let mut binarized_data: Array = array![]; loop { match self.data.pop_front() { @@ -38,5 +34,5 @@ fn binarizer< }; }; - return TensorTrait::new(self.shape, binarized_data.span()); + TensorTrait::new(self.shape, binarized_data.span()) } diff --git a/src/operators/tensor/math/bitwise_and.cairo b/src/operators/tensor/math/bitwise_and.cairo index e3487568b..f7e013218 100644 --- a/src/operators/tensor/math/bitwise_and.cairo +++ b/src/operators/tensor/math/bitwise_and.cairo @@ -1,8 +1,3 @@ -use core::array::ArrayTrait; -use core::option::OptionTrait; -use core::array::SpanTrait; -use core::debug::PrintTrait; - use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index}; use orion::operators::tensor::helpers::{ @@ -21,12 +16,12 @@ fn bitwise_and< y: @Tensor, z: @Tensor ) -> Tensor { let broadcasted_shape = broadcast_shape(*y.shape, *z.shape); - let mut result: Array = ArrayTrait::::new(); + let mut result: Array = array![]; let num_elements = len_from_shape(broadcasted_shape); let mut n: usize = 0; - loop { + while n != num_elements { let indices_broadcasted = unravel_index(n, broadcasted_shape); let indices_self = broadcast_index_mapping(*y.shape, indices_broadcasted); @@ -40,10 +35,7 @@ fn bitwise_and< // result.append(res); n += 1; - if n == num_elements { - break (); - }; }; - return TensorTrait::::new(broadcasted_shape, result.span()); + TensorTrait::::new(broadcasted_shape, result.span()) } diff --git a/src/operators/tensor/math/bitwise_or.cairo b/src/operators/tensor/math/bitwise_or.cairo index 8869422d9..eaef9f492 100644 --- a/src/operators/tensor/math/bitwise_or.cairo +++ b/src/operators/tensor/math/bitwise_or.cairo @@ -1,8 +1,3 @@ -use core::array::ArrayTrait; -use core::option::OptionTrait; -use core::array::SpanTrait; -use core::debug::PrintTrait; - use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index}; use orion::operators::tensor::helpers::{ @@ -21,12 +16,12 @@ fn bitwise_or< y: @Tensor, z: @Tensor ) -> Tensor { let broadcasted_shape = broadcast_shape(*y.shape, *z.shape); - let mut result: Array = ArrayTrait::::new(); + let mut result: Array = array![]; let num_elements = len_from_shape(broadcasted_shape); let mut n: usize = 0; - loop { + while n != num_elements { let indices_broadcasted = unravel_index(n, broadcasted_shape); let indices_self = broadcast_index_mapping(*y.shape, indices_broadcasted); @@ -38,10 +33,7 @@ fn bitwise_or< result.append(NumberTrait::bitwise_or(lhs, rhs)); n += 1; - if n == num_elements { - break (); - }; }; - return TensorTrait::::new(broadcasted_shape, result.span()); + TensorTrait::::new(broadcasted_shape, result.span()) } diff --git a/src/operators/tensor/math/bitwise_xor.cairo b/src/operators/tensor/math/bitwise_xor.cairo index 934fa750f..a547465c0 100644 --- a/src/operators/tensor/math/bitwise_xor.cairo +++ b/src/operators/tensor/math/bitwise_xor.cairo @@ -1,8 +1,3 @@ -use core::array::ArrayTrait; -use core::option::OptionTrait; -use core::array::SpanTrait; -use core::debug::PrintTrait; - use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index}; use orion::operators::tensor::helpers::{ @@ -21,12 +16,12 @@ fn bitwise_xor< y: @Tensor, z: @Tensor ) -> Tensor { let broadcasted_shape = broadcast_shape(*y.shape, *z.shape); - let mut result: Array = ArrayTrait::::new(); + let mut result: Array = array![]; let num_elements = len_from_shape(broadcasted_shape); let mut n: usize = 0; - loop { + while n != num_elements { let indices_broadcasted = unravel_index(n, broadcasted_shape); let indices_self = broadcast_index_mapping(*y.shape, indices_broadcasted); @@ -38,10 +33,7 @@ fn bitwise_xor< result.append(NumberTrait::bitwise_xor(lhs, rhs)); n += 1; - if n == num_elements { - break (); - }; }; - return TensorTrait::::new(broadcasted_shape, result.span()); + TensorTrait::::new(broadcasted_shape, result.span()) } diff --git a/src/operators/tensor/math/blackman_window.cairo b/src/operators/tensor/math/blackman_window.cairo index 29f4d2903..217d03903 100644 --- a/src/operators/tensor/math/blackman_window.cairo +++ b/src/operators/tensor/math/blackman_window.cairo @@ -1,15 +1,6 @@ -use core::traits::Into; -use core::traits::TryInto; -use orion::operators::tensor::core::{Tensor, TensorTrait}; -use core::array::{ArrayTrait, SpanTrait}; -use core::option::OptionTrait; - use orion::numbers::fixed_point::core::FixedTrait; use orion::numbers::NumberTrait; - -use orion::operators::tensor::helpers::{reduce_output_shape, len_from_shape, combine_indices}; -use orion::operators::tensor::math::{reduce_sum::accumulate_sum, arithmetic::div_downcast}; - +use orion::operators::tensor::core::{Tensor, TensorTrait}; fn blackman_window< T, @@ -25,7 +16,9 @@ fn blackman_window< impl TAddEq: AddEq, impl TCopy: Copy, impl TDrop: Drop, ->(size: T, PI: T, periodic: Option) -> Tensor { +>( + size: T, PI: T, periodic: Option +) -> Tensor { let start: T = NumberTrait::zero(); let one_step: T = NumberTrait::one(); let two: T = one_step + one_step; @@ -36,74 +29,66 @@ fn blackman_window< let n_0_5: T = (one_step - two) / two; let ni = TensorTrait::range(start, size, one_step); - assert!((ni.shape).len() == 1, "Unexpected shape 1."); + assert((ni.shape).len() == 1, 'Unexpected shape 1.'); let mut N_1 = size; + if periodic != Option::Some(1) { N_1 = N_1 - one_step; }; + let len = *(ni.shape).at(0); - let mut arr1: Array = ArrayTrait::::new(); + let mut arr1: Array = array![]; let mut i: usize = 0; - loop { + while i != len { let v = *(ni.data).at(i); let r = (v * (PI * two)) / N_1; arr1.append(r); i += 1; - if i >= len { - break (); - }; }; + let window_cos = TensorTrait::::new(ni.shape, arr1.span()).cos(); i = 0; - let mut a1: Array = ArrayTrait::::new(); - loop { + let mut a1: Array = array![]; + while i != len { let v = *(window_cos.data).at(i); let r = v * n_0_5; a1.append(r); i += 1; - if i >= len { - break (); - }; }; + let window1 = TensorTrait::::new(ni.shape, a1.span()); - let mut arr2: Array = ArrayTrait::::new(); + let mut arr2: Array = array![]; i = 0; - loop { + while i != len { let v = *(ni.data).at(i); let r = v * (PI * two * two) / N_1; arr2.append(r); i += 1; - if i >= len { - break (); - }; }; + let window_cos_2 = TensorTrait::::new(ni.shape, arr2.span()).cos(); - let mut a2: Array = ArrayTrait::::new(); + let mut a2: Array = array![]; i = 0; - loop { + while i != len { let v = *(window_cos_2.data).at(i); let r = v * beta + alpha; a2.append(r); i += 1; - if i >= len { - break (); - }; }; + let window2 = TensorTrait::::new(ni.shape, a2.span()); - let mut arr: Array = ArrayTrait::::new(); + let mut arr: Array = array![]; i = 0; - loop { + while i != len { let v1 = *(window1.data).at(i); let v2 = *(window2.data).at(i); let r = v1 + v2; arr.append(r); i += 1; - if i >= len { - break (); - }; }; - return TensorTrait::::new(ni.shape, arr.span()); + + TensorTrait::::new(ni.shape, arr.span()) } diff --git a/src/operators/tensor/math/ceil.cairo b/src/operators/tensor/math/ceil.cairo index b6448b11d..8ee604ab3 100644 --- a/src/operators/tensor/math/ceil.cairo +++ b/src/operators/tensor/math/ceil.cairo @@ -1,7 +1,3 @@ -use core::array::ArrayTrait; -use core::option::OptionTrait; -use core::array::SpanTrait; - use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; @@ -16,7 +12,7 @@ fn ceil< >( mut z: Tensor ) -> Tensor { - let mut data_result = ArrayTrait::::new(); + let mut data_result: Array = array![]; loop { match z.data.pop_front() { @@ -25,6 +21,6 @@ fn ceil< }; }; - return TensorTrait::new(z.shape, data_result.span()); + TensorTrait::new(z.shape, data_result.span()) } diff --git a/src/operators/tensor/math/compress.cairo b/src/operators/tensor/math/compress.cairo index 80a4f7648..86793b187 100644 --- a/src/operators/tensor/math/compress.cairo +++ b/src/operators/tensor/math/compress.cairo @@ -1,13 +1,4 @@ use alexandria_data_structures::array_ext::SpanTraitExt; -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; - -use core::traits::Into; -use core::debug::PrintTrait; -use core::traits::TryInto; -use core::serde::Serde; -use core::traits::Destruct; use orion::numbers::NumberTrait; use orion::operators::tensor::U32TensorPartialEq; @@ -33,9 +24,9 @@ fn compress, impl TCopy: Copy, impl TDro assert(*data_shape.at(axis) >= condition.data.len(), 'index out of bound'); } - let mut output_shape = ArrayTrait::new(); - let mut index_data = ArrayTrait::new(); - let mut output_data = ArrayTrait::new(); + let mut output_shape = array![]; + let mut index_data = array![]; + let mut output_data = array![]; let mut condition_data = condition.data; @@ -153,5 +144,6 @@ fn compress, impl TCopy: Copy, impl TDro } let mut output_tensor = TensorTrait::::new(output_shape.span(), output_data.span()); - return output_tensor; + + output_tensor } diff --git a/src/operators/tensor/math/concat.cairo b/src/operators/tensor/math/concat.cairo index 1826d8d69..381aa7b5a 100644 --- a/src/operators/tensor/math/concat.cairo +++ b/src/operators/tensor/math/concat.cairo @@ -1,13 +1,6 @@ -use core::clone::Clone; -use core::array::{ArrayTrait, SpanTrait}; -use core::option::OptionTrait; -use core::debug::PrintTrait; -use core::traits::Into; - use orion::operators::tensor::helpers::replace_index; use orion::operators::tensor::{TensorTrait, Tensor}; - fn concat, impl TCopy: Copy, impl TDrop: Drop,>( mut tensors: Span>, axis: usize ) -> Tensor { @@ -59,7 +52,7 @@ fn validate_shapes(mut tensors: Span>, mut base_shape: Span, fn compute_output_size( mut base_shape: Span, mut tensors: Span>, axis: usize ) -> Array { - let mut output_size = ArrayTrait::::new(); + let mut output_size: Array = array![]; let mut axis_size = 0; loop { @@ -90,16 +83,12 @@ fn compute_output_size( fn concatenate_data, impl TDrop: Drop,>( mut tensors: Span>, axis: usize, base_shape: Span ) -> Array { - let mut output_data = ArrayTrait::::new(); + let mut output_data: Array = array![]; let total_loops = product_upto(base_shape, axis); let mut outer_loop_index = 0; - loop { - if outer_loop_index == total_loops { - break; - } - + while outer_loop_index != total_loops { let mut tensors_copy = tensors; loop { match tensors_copy.pop_front() { @@ -107,11 +96,7 @@ fn concatenate_data, impl TDrop: Drop,>( let slice_len = (*tensor.data).len() / total_loops; let mut inner_index = 0; - loop { - if inner_index == slice_len { - break; - } - + while inner_index != slice_len { output_data .append(*(*tensor.data).at(slice_len * outer_loop_index + inner_index)); inner_index += 1; diff --git a/src/operators/tensor/math/cos.cairo b/src/operators/tensor/math/cos.cairo index 943b6528b..c37e95618 100644 --- a/src/operators/tensor/math/cos.cairo +++ b/src/operators/tensor/math/cos.cairo @@ -1,13 +1,7 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; -use core::traits::Into; - use orion::numbers::NumberTrait; use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; - /// Cf: TensorTrait::cos docstring fn cos< T, @@ -19,7 +13,7 @@ fn cos< >( mut self: Tensor ) -> Tensor { - let mut result = ArrayTrait::new(); + let mut result = array![]; loop { match self.data.pop_front() { @@ -28,5 +22,5 @@ fn cos< }; }; - return TensorTrait::new(self.shape, result.span()); + TensorTrait::new(self.shape, result.span()) } diff --git a/src/operators/tensor/math/cosh.cairo b/src/operators/tensor/math/cosh.cairo index df8a7b40c..2133e3e8e 100644 --- a/src/operators/tensor/math/cosh.cairo +++ b/src/operators/tensor/math/cosh.cairo @@ -1,13 +1,7 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; -use core::traits::Into; - use orion::numbers::NumberTrait; use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; - /// Cf: TensorTrait::cosh docstring fn cosh< T, @@ -19,7 +13,7 @@ fn cosh< >( mut self: Tensor ) -> Tensor { - let mut result = ArrayTrait::new(); + let mut result = array![]; loop { match self.data.pop_front() { @@ -28,5 +22,5 @@ fn cosh< }; }; - return TensorTrait::new(self.shape, result.span()); + TensorTrait::new(self.shape, result.span()) } diff --git a/src/operators/tensor/math/cumsum.cairo b/src/operators/tensor/math/cumsum.cairo index 99aea3156..6fef885d2 100644 --- a/src/operators/tensor/math/cumsum.cairo +++ b/src/operators/tensor/math/cumsum.cairo @@ -1,11 +1,6 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; -use core::debug::PrintTrait; - +use orion::numbers::NumberTrait; use orion::operators::tensor::helpers::replace_index; use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index}; -use orion::numbers::NumberTrait; /// Cf: TensorTrait::cumsum docstring fn cumsum< @@ -52,17 +47,14 @@ fn cumsum_forward< let data = *self.data; - let mut output_data = ArrayTrait::new(); + let mut output_data = array![]; let mut index: usize = 0; - loop { - if index == data.len() { - break (); - }; - + while index != data.len() { let current_indices = unravel_index(index, *self.shape); let axis_value = *current_indices[axis]; + if axis_value == 0 { if exclusive { output_data.append(zero); @@ -91,10 +83,9 @@ fn cumsum_forward< index += 1; }; - return TensorTrait::::new(*self.shape, output_data.span()); + TensorTrait::::new(*self.shape, output_data.span()) } - /// Cf: TensorTrait::cumsum docstring fn cumsum_reverse< T, @@ -113,20 +104,15 @@ fn cumsum_reverse< assert(axis < (*self.shape).len(), 'axis out of dimensions'); let data = *self.data; - let mut output_data = ArrayTrait::new(); + let mut output_data = array![]; let mut index: usize = 0; - loop { - if index == data.len() { - break (); - }; - + while index != data.len() { let current_indices = unravel_index(index, *self.shape); let mut axis_value = *current_indices[axis]; if axis_value == 0 { // If the axis value is 0, we need to sum all the elements // in the axis. - let mut sum = *(data)[index]; if exclusive { sum = zero; @@ -144,6 +130,7 @@ fn cumsum_reverse< let next_axis_element_index = ravel_index(*self.shape, next_axis_element_indices); sum += *data[next_axis_element_index]; }; + output_data.append(sum); } else { // If the axis value is not 0, we only need to do a subtraction @@ -168,5 +155,5 @@ fn cumsum_reverse< index += 1; }; - return TensorTrait::::new(*self.shape, output_data.span()); + TensorTrait::::new(*self.shape, output_data.span()) } diff --git a/src/operators/tensor/math/equal.cairo b/src/operators/tensor/math/equal.cairo index e3f884acd..d2693acf9 100644 --- a/src/operators/tensor/math/equal.cairo +++ b/src/operators/tensor/math/equal.cairo @@ -1,7 +1,3 @@ -use core::array::ArrayTrait; -use core::option::OptionTrait; -use core::array::SpanTrait; - use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index}; use orion::operators::tensor::helpers::{ broadcast_shape, broadcast_index_mapping, len_from_shape, check_compatibility @@ -18,12 +14,12 @@ fn equal< y: @Tensor, z: @Tensor ) -> Tensor { let broadcasted_shape = broadcast_shape(*y.shape, *z.shape); - let mut result: Array = ArrayTrait::new(); + let mut result: Array = array![]; let num_elements = len_from_shape(broadcasted_shape); let mut n: usize = 0; - loop { + while n != num_elements { let indices_broadcasted = unravel_index(n, broadcasted_shape); let indices_self = broadcast_index_mapping(*y.shape, indices_broadcasted); @@ -36,10 +32,7 @@ fn equal< } n += 1; - if n == num_elements { - break (); - }; }; - return TensorTrait::new(broadcasted_shape, result.span()); + TensorTrait::new(broadcasted_shape, result.span()) } diff --git a/src/operators/tensor/math/erf.cairo b/src/operators/tensor/math/erf.cairo index 8cc8ab055..545ff789d 100644 --- a/src/operators/tensor/math/erf.cairo +++ b/src/operators/tensor/math/erf.cairo @@ -1,11 +1,6 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; - use orion::numbers::fixed_point::core::FixedTrait; -use orion::operators::tensor::core::{Tensor, TensorTrait}; use orion::numbers::NumberTrait; - +use orion::operators::tensor::core::{Tensor, TensorTrait}; /// Cf: TensorTrait::erf docstring fn erf< @@ -18,7 +13,7 @@ fn erf< >( mut z: Tensor ) -> Tensor { - let mut data_result = ArrayTrait::::new(); + let mut data_result: Array = array![]; loop { match z.data.pop_front() { @@ -27,5 +22,5 @@ fn erf< }; }; - return TensorTrait::::new(z.shape, data_result.span()); + TensorTrait::::new(z.shape, data_result.span()) } diff --git a/src/operators/tensor/math/exp.cairo b/src/operators/tensor/math/exp.cairo index 0c1700abf..cd1d71548 100644 --- a/src/operators/tensor/math/exp.cairo +++ b/src/operators/tensor/math/exp.cairo @@ -1,13 +1,7 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; -use core::traits::{Into, TryInto}; - use orion::numbers::NumberTrait; use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; - /// Cf: TensorTrait::exp docstring fn exp< T, @@ -19,7 +13,7 @@ fn exp< >( mut self: Tensor ) -> Tensor { - let mut result = ArrayTrait::new(); + let mut result = array![]; loop { match self.data.pop_front() { @@ -28,16 +22,16 @@ fn exp< }; }; - return TensorTrait::new(self.shape, result.span()); + TensorTrait::new(self.shape, result.span()) } /// Cf: TensorTrait::exp docstring fn exp_upcast< T, - MAG, + TMAG, W, WMAG, - impl TFixedTrait: FixedTrait, + impl TFixedTrait: FixedTrait, impl TTensor: TensorTrait, impl TCopy: Copy, impl TDrop: Drop, @@ -49,7 +43,7 @@ fn exp_upcast< >( mut self: Tensor ) -> Tensor { - let mut result = ArrayTrait::new(); + let mut result = array![]; loop { match self.data.pop_front() { @@ -58,5 +52,5 @@ fn exp_upcast< }; }; - return TensorTrait::new(self.shape, result.span()); + TensorTrait::new(self.shape, result.span()) } diff --git a/src/operators/tensor/math/flatten.cairo b/src/operators/tensor/math/flatten.cairo index d8e5b5583..a23671b77 100644 --- a/src/operators/tensor/math/flatten.cairo +++ b/src/operators/tensor/math/flatten.cairo @@ -1,9 +1,5 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; - use orion::operators::tensor::core::{Tensor, TensorTrait}; - /// Cf: TensorTrait::flatten docstring fn flatten>(self: @Tensor, axis: usize) -> Tensor { let mut shape = *self.shape; @@ -27,5 +23,5 @@ fn flatten>(self: @Tensor, axis: usize) let new_shape_second_axis = (*self.data).len() / new_shape_first_axis; - return self.reshape(array![new_shape_first_axis, new_shape_second_axis].span()); + self.reshape(array![new_shape_first_axis, new_shape_second_axis].span()) } diff --git a/src/operators/tensor/math/gather.cairo b/src/operators/tensor/math/gather.cairo index 93662868b..a60e927ab 100644 --- a/src/operators/tensor/math/gather.cairo +++ b/src/operators/tensor/math/gather.cairo @@ -1,13 +1,4 @@ use alexandria_data_structures::array_ext::SpanTraitExt; -use core::array::ArrayTrait; -use core::array::SpanTrait; - -use core::traits::Into; -use core::debug::PrintTrait; -use core::traits::TryInto; -use core::serde::Serde; -use core::traits::Destruct; -use core::option::OptionTrait; use orion::numbers::NumberTrait; use orion::operators::tensor::{TensorTrait, Tensor}; @@ -26,14 +17,14 @@ fn gather, impl TCopy: Copy, impl TDrop: let ind_max = indices.data.max().unwrap(); assert(ind_max < axis_shape, 'this index out of bounds'); - let mut output_data = ArrayTrait::new(); - let mut output_size = ArrayTrait::new(); + let mut output_data = array![]; + let mut output_size = array![]; let mut self_shape = *self.shape; let mut i: usize = 0; loop { match self_shape.pop_front() { Option::Some(val) => { - if (i == axis) { + if i == axis { let mut indices_shape = indices.shape; loop { match indices_shape.pop_front() { @@ -44,6 +35,7 @@ fn gather, impl TCopy: Copy, impl TDrop: } else { output_size.append(*val); } + i += 1; }, Option::None => { break; } @@ -58,10 +50,11 @@ fn gather, impl TCopy: Copy, impl TDrop: loop { match self_shape.pop_front() { Option::Some(val) => { - if (i == axis) { + if i == axis { divisor /= *val; break (); }; + outer_loop_break *= *val; divisor /= *val; i += 1; @@ -86,25 +79,18 @@ fn gather, impl TCopy: Copy, impl TDrop: let mut outer_loop: usize = 0; let axis_index = *self.shape[axis]; - loop { - if outer_loop == outer_loop_break { - break; - } - + while outer_loop != outer_loop_break { let mut data_indices = indices.data; loop { match data_indices.pop_front() { Option::Some(indice) => { let mut inner_loop = 0; - loop { - if inner_loop == break_loop { - break; - } - + while inner_loop != break_loop { let new_val = inner_loop / divisor % axis_index; if *indice == new_val { output_data.append(*self.data[break_loop * outer_loop + inner_loop]); } + inner_loop += 1; } }, @@ -117,5 +103,5 @@ fn gather, impl TCopy: Copy, impl TDrop: let mut output_tensor = TensorTrait::::new(output_size.span(), output_data.span()); - return output_tensor; + output_tensor } diff --git a/src/operators/tensor/math/gather_elements.cairo b/src/operators/tensor/math/gather_elements.cairo index f34e3e6b3..c3793a316 100644 --- a/src/operators/tensor/math/gather_elements.cairo +++ b/src/operators/tensor/math/gather_elements.cairo @@ -1,13 +1,4 @@ use alexandria_data_structures::array_ext::SpanTraitExt; -use core::array::ArrayTrait; -use core::array::SpanTrait; - -use core::traits::Into; -use core::debug::PrintTrait; -use core::traits::TryInto; -use core::serde::Serde; -use core::traits::Destruct; -use core::option::OptionTrait; use orion::numbers::NumberTrait; use orion::operators::tensor::U32TensorPartialEq; @@ -48,7 +39,7 @@ fn gather_elements, impl TCopy: Copy, im }; }; - let mut output_data = ArrayTrait::new(); + let mut output_data = array![]; let mut outer_loop = data_shape_clone.at(axis); let mut inner_loop = 1; @@ -61,6 +52,7 @@ fn gather_elements, impl TCopy: Copy, im if (ind >= axis) { multiplier *= *val; } + ind += 1; }, Option::None => { break; } @@ -82,6 +74,7 @@ fn gather_elements, impl TCopy: Copy, im if (ind >= axis) { multiplier_index *= *val; } + ind += 1; }, Option::None => { break; } @@ -97,16 +90,19 @@ fn gather_elements, impl TCopy: Copy, im let value = *val * inner_loop.into() + (i % inner_loop); output_data.append(*self.data[value]); } + if ((axis == indices_rank - 1) & (axis != 0)) { let value = *val + *outer_loop * (i / *outer_loop_index); output_data.append(*self.data[value]); } + if ((axis != indices_rank - 1) & (axis != 0)) { let value = *val * (looper) + (i % looper) + (multiplier * (i / multiplier_index)); output_data.append(*self.data[value]); } + i += 1; }, Option::None => { break; } @@ -114,5 +110,6 @@ fn gather_elements, impl TCopy: Copy, im }; let mut output_tensor = TensorTrait::::new(indices.shape, output_data.span()); - return output_tensor; + + output_tensor } diff --git a/src/operators/tensor/math/gather_nd.cairo b/src/operators/tensor/math/gather_nd.cairo index 5d6c75ce1..e5f340487 100644 --- a/src/operators/tensor/math/gather_nd.cairo +++ b/src/operators/tensor/math/gather_nd.cairo @@ -1,13 +1,4 @@ use alexandria_data_structures::array_ext::SpanTraitExt; -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; - -use core::traits::Into; -use core::debug::PrintTrait; -use core::traits::TryInto; -use core::serde::Serde; -use core::traits::Destruct; use orion::numbers::NumberTrait; use orion::operators::tensor::U32TensorPartialEq; @@ -37,20 +28,17 @@ fn gather_nd, impl TCopy: Copy, impl TDr 'check indices' ); - let mut batch_dims_shape = ArrayTrait::new(); - let mut output_shape = ArrayTrait::new(); - let mut index_data = ArrayTrait::new(); - let mut output_data = ArrayTrait::new(); + let mut batch_dims_shape = array![]; + let mut output_shape = array![]; + let mut index_data = array![]; + let mut output_data = array![]; let mut batch_dims_size = batch_dims; let mut total_data_len = 1; - let mut multiple_data_len = ArrayTrait::new(); + let mut multiple_data_len = array![]; let mut ind = 0; - loop { - if (ind == batch_dims) { - break (); - } + while ind != batch_dims { match indices_shape_clone.pop_front() { Option::Some(val) => { batch_dims_size *= *val; @@ -79,6 +67,7 @@ fn gather_nd, impl TCopy: Copy, impl TDr if (ind >= (batch_dims + *indices_shape_last)) { output_shape.append(*val); } + ind += 1; }, Option::None => { break; } @@ -101,6 +90,7 @@ fn gather_nd, impl TCopy: Copy, impl TDr if (ind >= batch_dims + *indices_shape_last) { incrementer *= *val; } + ind += 1; }, Option::None => { break; } @@ -116,6 +106,7 @@ fn gather_nd, impl TCopy: Copy, impl TDr if (ind >= batch_dims) { breaker *= *val; } + ind += 1; }, Option::None => { break; } @@ -136,13 +127,11 @@ fn gather_nd, impl TCopy: Copy, impl TDr if (index == *indices_shape_last - 1) { let mut data_ind: usize = result; - loop { - if data_ind == result + incrementer { - break; - } + while data_ind != result + incrementer { index_data.append(data_ind + incr); data_ind += 1; }; + result = 0; }; }, @@ -158,5 +147,6 @@ fn gather_nd, impl TCopy: Copy, impl TDr }; let mut output_tensor = TensorTrait::::new(output_shape.span(), output_data.span()); - return output_tensor; + + output_tensor } diff --git a/src/operators/tensor/math/greater.cairo b/src/operators/tensor/math/greater.cairo index 1177212bc..f90462b22 100644 --- a/src/operators/tensor/math/greater.cairo +++ b/src/operators/tensor/math/greater.cairo @@ -1,7 +1,3 @@ -use core::array::ArrayTrait; -use core::option::OptionTrait; -use core::array::SpanTrait; - use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index}; use orion::operators::tensor::helpers::{ broadcast_shape, broadcast_index_mapping, len_from_shape, check_compatibility @@ -18,12 +14,12 @@ fn greater< y: @Tensor, z: @Tensor ) -> Tensor { let broadcasted_shape = broadcast_shape(*y.shape, *z.shape); - let mut result: Array = ArrayTrait::new(); + let mut result: Array = array![]; let num_elements = len_from_shape(broadcasted_shape); let mut n: usize = 0; - loop { + while n != num_elements { let indices_broadcasted = unravel_index(n, broadcasted_shape); let indices_self = broadcast_index_mapping(*y.shape, indices_broadcasted); @@ -36,10 +32,7 @@ fn greater< } n += 1; - if n == num_elements { - break (); - }; }; - return TensorTrait::new(broadcasted_shape, result.span()); + TensorTrait::new(broadcasted_shape, result.span()) } diff --git a/src/operators/tensor/math/greater_equal.cairo b/src/operators/tensor/math/greater_equal.cairo index b7756d12e..bc8e1b045 100644 --- a/src/operators/tensor/math/greater_equal.cairo +++ b/src/operators/tensor/math/greater_equal.cairo @@ -1,7 +1,3 @@ -use core::array::ArrayTrait; -use core::option::OptionTrait; -use core::array::SpanTrait; - use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index}; use orion::operators::tensor::helpers::{ broadcast_shape, broadcast_index_mapping, len_from_shape, check_compatibility @@ -18,12 +14,12 @@ fn greater_equal< y: @Tensor, z: @Tensor ) -> Tensor { let broadcasted_shape = broadcast_shape(*y.shape, *z.shape); - let mut result: Array = ArrayTrait::new(); + let mut result: Array = array![]; let num_elements = len_from_shape(broadcasted_shape); let mut n: usize = 0; - loop { + while n != num_elements { let indices_broadcasted = unravel_index(n, broadcasted_shape); let indices_self = broadcast_index_mapping(*y.shape, indices_broadcasted); @@ -36,10 +32,7 @@ fn greater_equal< } n += 1; - if n == num_elements { - break (); - }; }; - return TensorTrait::new(broadcasted_shape, result.span()); + TensorTrait::new(broadcasted_shape, result.span()) } diff --git a/src/operators/tensor/math/hamming_window.cairo b/src/operators/tensor/math/hamming_window.cairo index 216590f09..06539f0fe 100644 --- a/src/operators/tensor/math/hamming_window.cairo +++ b/src/operators/tensor/math/hamming_window.cairo @@ -1,15 +1,6 @@ -use core::traits::Into; -use core::traits::TryInto; -use orion::operators::tensor::core::{Tensor, TensorTrait}; -use core::array::{ArrayTrait, SpanTrait}; -use core::option::OptionTrait; - use orion::numbers::fixed_point::core::FixedTrait; use orion::numbers::NumberTrait; - -use orion::operators::tensor::helpers::{reduce_output_shape, len_from_shape, combine_indices}; -use orion::operators::tensor::math::{reduce_sum::accumulate_sum, arithmetic::div_downcast}; - +use orion::operators::tensor::core::{Tensor, TensorTrait}; fn hamming_window< T, @@ -25,7 +16,9 @@ fn hamming_window< impl TAddEq: AddEq, impl TCopy: Copy, impl TDrop: Drop, ->(size: T, PI: T, periodic: Option) -> Tensor { +>( + size: T, PI: T, periodic: Option +) -> Tensor { let start: T = NumberTrait::zero(); let one_step: T = NumberTrait::one(); let two: T = one_step + one_step; @@ -36,37 +29,36 @@ fn hamming_window< let beta: T = one_step - alpha; let ni = TensorTrait::range(start, size, one_step); - assert!((ni.shape).len() == 1, "Unexpected shape 1."); + assert((ni.shape).len() == 1, 'Unexpected shape 1.'); let mut N_1 = size; + if periodic != Option::Some(1) { N_1 = N_1 - one_step; }; + let len = *(ni.shape).at(0); - let mut arr: Array = ArrayTrait::::new(); + let mut arr: Array = array![]; let mut i: usize = 0; - loop { + while i != len { let v = *(ni.data).at(i); let r = v * PI * two / N_1; arr.append(r); i += 1; - if i >= len { - break (); - }; }; + let window = TensorTrait::::new(ni.shape, arr.span()); let window_cos = window.cos(); let len2 = *(ni.shape).at(0); - let mut arr2: Array = ArrayTrait::::new(); + let mut arr2: Array = array![]; let mut j: usize = 0; - loop { + while j != len2 { let v = *(window_cos.data).at(j); let v_2 = alpha - v * beta; arr2.append(v_2); j += 1; - if j >= len2 { - break (); - }; }; + let window_cos_2 = TensorTrait::::new(ni.shape, arr2.span()); - return window_cos_2; + + window_cos_2 } diff --git a/src/operators/tensor/math/hann_window.cairo b/src/operators/tensor/math/hann_window.cairo index 05aa3b923..4fc7a801f 100644 --- a/src/operators/tensor/math/hann_window.cairo +++ b/src/operators/tensor/math/hann_window.cairo @@ -1,15 +1,6 @@ -use core::traits::Into; -use core::traits::TryInto; -use orion::operators::tensor::core::{Tensor, TensorTrait}; -use core::array::{ArrayTrait, SpanTrait}; -use core::option::OptionTrait; - use orion::numbers::fixed_point::core::FixedTrait; use orion::numbers::NumberTrait; - -use orion::operators::tensor::helpers::{reduce_output_shape, len_from_shape, combine_indices}; -use orion::operators::tensor::math::{reduce_sum::accumulate_sum, arithmetic::div_downcast}; - +use orion::operators::tensor::core::{Tensor, TensorTrait}; fn hann_window< T, @@ -25,41 +16,42 @@ fn hann_window< impl TAddEq: AddEq, impl TCopy: Copy, impl TDrop: Drop, ->(size: T, PI: T, periodic: Option) -> Tensor { +>( + size: T, PI: T, periodic: Option +) -> Tensor { let start: T = NumberTrait::zero(); let one_step: T = NumberTrait::one(); let ni = TensorTrait::range(start, size, one_step); - assert!((ni.shape).len() == 1, "Unexpected shape 1."); + assert((ni.shape).len() == 1, 'Unexpected shape 1.'); let mut N_1 = size; + if periodic != Option::Some(1) { N_1 = N_1 - one_step; }; + let len = *(ni.shape).at(0); - let mut arr: Array = ArrayTrait::::new(); + let mut arr: Array = array![]; let mut i: usize = 0; - loop { + while i != len { let v = *(ni.data).at(i); let r = v * PI / N_1; arr.append(r); i += 1; - if i >= len { - break (); - }; }; + let window = TensorTrait::::new(ni.shape, arr.span()); let window_sin = window.sin(); let len2 = *(ni.shape).at(0); - let mut arr2: Array = ArrayTrait::::new(); + let mut arr2: Array = array![]; let mut j: usize = 0; - loop { + while j != len2 { let v = *(window_sin.data).at(j); let v_2 = v * v; arr2.append(v_2); j += 1; - if j >= len2 { - break (); - }; }; + let window_sin_2 = TensorTrait::::new(ni.shape, arr2.span()); - return window_sin_2; + + window_sin_2 } diff --git a/src/operators/tensor/math/is_inf.cairo b/src/operators/tensor/math/is_inf.cairo index d3a5f8f4f..021b10732 100644 --- a/src/operators/tensor/math/is_inf.cairo +++ b/src/operators/tensor/math/is_inf.cairo @@ -1,7 +1,3 @@ -use core::array::ArrayTrait; -use core::option::OptionTrait; -use core::array::SpanTrait; - use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; use orion::operators::tensor::implementations::tensor_bool::BoolTensor; @@ -47,7 +43,7 @@ fn is_inf< return is_neg_inf(x); } - let mut data_result = ArrayTrait::::new(); + let mut data_result: Array = array![]; let mut y: Span = *x.data; loop { match y.pop_front() { @@ -56,7 +52,7 @@ fn is_inf< }; }; - return TensorTrait::new(*x.shape, data_result.span()); + TensorTrait::new(*x.shape, data_result.span()) } /// Cf: TensorTrait::is_pos_inf docstring @@ -70,7 +66,7 @@ fn is_pos_inf< >( x: @Tensor ) -> Tensor { - let mut data_result = ArrayTrait::::new(); + let mut data_result: Array = array![]; let mut y: Span = *x.data; loop { match y.pop_front() { @@ -79,7 +75,7 @@ fn is_pos_inf< }; }; - return TensorTrait::new(*x.shape, data_result.span()); + TensorTrait::new(*x.shape, data_result.span()) } /// Cf: TensorTrait::is_neg_inf docstring @@ -93,7 +89,7 @@ fn is_neg_inf< >( x: @Tensor ) -> Tensor { - let mut data_result = ArrayTrait::::new(); + let mut data_result: Array = array![]; let mut y: Span = *x.data; loop { match y.pop_front() { @@ -102,5 +98,5 @@ fn is_neg_inf< }; }; - return TensorTrait::new(*x.shape, data_result.span()); + TensorTrait::new(*x.shape, data_result.span()) } diff --git a/src/operators/tensor/math/is_nan.cairo b/src/operators/tensor/math/is_nan.cairo index 817cf5f4d..2f1818a81 100644 --- a/src/operators/tensor/math/is_nan.cairo +++ b/src/operators/tensor/math/is_nan.cairo @@ -1,7 +1,3 @@ -use core::array::ArrayTrait; -use core::option::OptionTrait; -use core::array::SpanTrait; - use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; use orion::operators::tensor::implementations::tensor_bool::BoolTensor; @@ -17,7 +13,7 @@ fn is_nan< >( x: @Tensor ) -> Tensor { - let mut data_result = ArrayTrait::::new(); + let mut data_result: Array = array![]; let mut y: Span = *x.data; loop { match y.pop_front() { @@ -26,5 +22,5 @@ fn is_nan< }; }; - return TensorTrait::new(*x.shape, data_result.span()); + TensorTrait::new(*x.shape, data_result.span()) } diff --git a/src/operators/tensor/math/layer_normalization.cairo b/src/operators/tensor/math/layer_normalization.cairo index 1417b7e2b..e61e826f5 100644 --- a/src/operators/tensor/math/layer_normalization.cairo +++ b/src/operators/tensor/math/layer_normalization.cairo @@ -1,17 +1,10 @@ -use core::traits::TryInto; -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; -use core::traits::Into; use orion::numbers::{NumberTrait, I32IntoU32}; +use orion::numbers::{FP16x16, FP16x16Impl, FP32x32, FP32x32Impl, FixedTrait}; use orion::operators::tensor::{ TensorTrait, Tensor, I8Tensor, I32Tensor, U32Tensor, FP16x16Tensor, BoolTensor }; -use orion::numbers::{FP16x16, FP16x16Impl, FP32x32, FP32x32Impl, FixedTrait}; -use core::debug::PrintTrait; use orion::operators::vec::{VecTrait, NullableVec, NullableVecImpl}; - /// Cf: TensorTrait::layer_normalization docstring fn layer_normalization< T, @@ -52,20 +45,15 @@ fn layer_normalization< }; let unsqueezed_rank = X_rank - axis; - let mut reduction_shape = ArrayTrait::new(); + let mut reduction_shape = array![]; let mut i = 0; - loop { - if i == axis { - break; - } + while i != axis { reduction_shape.append(*(*self).shape.at(i)); i += 1; }; + let mut i = 0; - loop { - if i == unsqueezed_rank { - break; - } + while i != unsqueezed_rank { reduction_shape.append(1); i += 1; }; @@ -73,34 +61,32 @@ fn layer_normalization< let mut row_number = 1; let mut col_number = 1; let mut i = 0; - loop { - if i == X_rank { - break; - } + while i != X_rank { if i < axis { row_number *= *(*self).shape.at(i); } else { col_number *= *(*self).shape.at(i); } + i += 1; }; - let mut shape_matrix = ArrayTrait::new(); + let mut shape_matrix = array![]; shape_matrix.append(row_number); shape_matrix.append(col_number); // Shape [1, 1] to mutiply one element tensors with 2D matrices - let mut shape_one = ArrayTrait::new(); + let mut shape_one = array![]; shape_one.append(1); shape_one.append(1); - let mut col_number_tensor = ArrayTrait::new(); + let mut col_number_tensor = array![]; col_number_tensor.append(NumberTrait::new_unscaled(col_number.into(), false)); - let mut epsilon_tensor = ArrayTrait::new(); + let mut epsilon_tensor = array![]; epsilon_tensor.append(epsilon); - let mut one_tensor = ArrayTrait::new(); + let mut one_tensor = array![]; one_tensor.append(NumberTrait::one()); let x_mat = self.reshape(shape_matrix.span()); @@ -122,23 +108,19 @@ fn layer_normalization< let scale = if (*scale).shape.len() < (*self).shape.len() { // Append 1 in scale shape to make sure scale has a dimension compatible with Y for multiplication - let mut shape = ArrayTrait::new(); + let mut shape = array![]; let mut i = 0; - loop { - if i == (*self).shape.len() - (*scale).shape.len() { - break; - } + while i != (*self).shape.len() - (*scale).shape.len() { shape.append(1); i += 1; }; + let mut i = 0; - loop { - if i == (*scale).shape.len() { - break; - } + while i != (*scale).shape.len() { shape.append(*(*scale).shape.at(i)); i += 1; }; + TensorTrait::new(shape.span(), (*scale).data) } else { *scale @@ -150,23 +132,19 @@ fn layer_normalization< Option::Some(B) => { let B = if (*B).shape.len() < (*self).shape.len() { // Append 1 in B shape to make sure scale has a dimension compatible with Y for multiplication - let mut shape = ArrayTrait::new(); + let mut shape = array![]; let mut i = 0; - loop { - if i == (*self).shape.len() - (*B).shape.len() { - break; - } + while i != (*self).shape.len() - (*B).shape.len() { shape.append(1); i += 1; }; + let mut i = 0; - loop { - if i == (*B).shape.len() { - break; - } + while i != (*B).shape.len() { shape.append(*(*B).shape.at(i)); i += 1; }; + TensorTrait::new(shape.span(), (*B).data) } else { *B @@ -179,6 +157,6 @@ fn layer_normalization< let X_mean = TensorTrait::new(reduction_shape.span(), x_mean.data); let X_inv_std_dev = TensorTrait::new(reduction_shape.span(), inv_std_dev.data); - return (Y, X_mean, X_inv_std_dev); + (Y, X_mean, X_inv_std_dev) } diff --git a/src/operators/tensor/math/less.cairo b/src/operators/tensor/math/less.cairo index b9cc0370f..35f9b4d73 100644 --- a/src/operators/tensor/math/less.cairo +++ b/src/operators/tensor/math/less.cairo @@ -1,7 +1,3 @@ -use core::array::ArrayTrait; -use core::option::OptionTrait; -use core::array::SpanTrait; - use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index}; use orion::operators::tensor::helpers::{ broadcast_shape, broadcast_index_mapping, len_from_shape, check_compatibility @@ -18,12 +14,12 @@ fn less< y: @Tensor, z: @Tensor ) -> Tensor { let broadcasted_shape = broadcast_shape(*y.shape, *z.shape); - let mut result: Array = ArrayTrait::new(); + let mut result: Array = array![]; let num_elements = len_from_shape(broadcasted_shape); let mut n: usize = 0; - loop { + while n != num_elements { let indices_broadcasted = unravel_index(n, broadcasted_shape); let indices_self = broadcast_index_mapping(*y.shape, indices_broadcasted); @@ -36,10 +32,7 @@ fn less< } n += 1; - if n == num_elements { - break (); - }; }; - return TensorTrait::new(broadcasted_shape, result.span()); + TensorTrait::new(broadcasted_shape, result.span()) } diff --git a/src/operators/tensor/math/less_equal.cairo b/src/operators/tensor/math/less_equal.cairo index 48316ed97..8c982a09c 100644 --- a/src/operators/tensor/math/less_equal.cairo +++ b/src/operators/tensor/math/less_equal.cairo @@ -1,7 +1,3 @@ -use core::array::ArrayTrait; -use core::option::OptionTrait; -use core::array::SpanTrait; - use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index}; use orion::operators::tensor::helpers::{ broadcast_shape, broadcast_index_mapping, len_from_shape, check_compatibility @@ -18,12 +14,12 @@ fn less_equal< y: @Tensor, z: @Tensor ) -> Tensor { let broadcasted_shape = broadcast_shape(*y.shape, *z.shape); - let mut result: Array = ArrayTrait::new(); + let mut result: Array = array![]; let num_elements = len_from_shape(broadcasted_shape); let mut n: usize = 0; - loop { + while n != num_elements { let indices_broadcasted = unravel_index(n, broadcasted_shape); let indices_self = broadcast_index_mapping(*y.shape, indices_broadcasted); @@ -36,10 +32,7 @@ fn less_equal< } n += 1; - if n == num_elements { - break (); - }; }; - return TensorTrait::new(broadcasted_shape, result.span()); + TensorTrait::new(broadcasted_shape, result.span()) } diff --git a/src/operators/tensor/math/log.cairo b/src/operators/tensor/math/log.cairo index fa153c61b..3b7ae0823 100644 --- a/src/operators/tensor/math/log.cairo +++ b/src/operators/tensor/math/log.cairo @@ -1,13 +1,7 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; -use core::traits::Into; - use orion::numbers::NumberTrait; use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; - /// Cf: TensorTrait::log docstring fn log< T, @@ -19,7 +13,7 @@ fn log< >( mut self: Tensor ) -> Tensor { - let mut result = ArrayTrait::new(); + let mut result = array![]; loop { match self.data.pop_front() { @@ -28,5 +22,5 @@ fn log< }; }; - return TensorTrait::::new(self.shape, result.span()); + TensorTrait::::new(self.shape, result.span()) } diff --git a/src/operators/tensor/math/max.cairo b/src/operators/tensor/math/max.cairo index 245099179..3ce6d4919 100644 --- a/src/operators/tensor/math/max.cairo +++ b/src/operators/tensor/math/max.cairo @@ -1,5 +1,3 @@ -use core::array::{ArrayTrait, SpanTrait}; - use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index}; use orion::operators::tensor::helpers::{ @@ -30,12 +28,8 @@ fn max< let mut tensor_counter: usize = 1; - loop { - if tensor_counter > tensors.len() - 1 { - break; - } - - let mut new_max_data = ArrayTrait::::new(); + while tensor_counter != tensors.len() { + let mut new_max_data: Array = array![]; let mut current_tensor = *tensors.at(tensor_counter); @@ -43,7 +37,7 @@ fn max< let num_elements = len_from_shape(broadcasted_shape); let mut n: usize = 0; - loop { + while n != num_elements { let mut indices_broadcasted = unravel_index(n, broadcasted_shape); let mut indices_self = broadcast_index_mapping(max_shape, indices_broadcasted); @@ -57,9 +51,6 @@ fn max< new_max_data.append(max_value); n += 1; - if n == num_elements { - break (); - }; }; max_shape = broadcasted_shape; @@ -67,5 +58,5 @@ fn max< tensor_counter += 1; }; - return TensorTrait::::new(max_shape, max_data); + TensorTrait::::new(max_shape, max_data) } diff --git a/src/operators/tensor/math/max_in_tensor.cairo b/src/operators/tensor/math/max_in_tensor.cairo index f1aabdafb..8f1622813 100644 --- a/src/operators/tensor/math/max_in_tensor.cairo +++ b/src/operators/tensor/math/max_in_tensor.cairo @@ -1,6 +1,3 @@ -use core::array::SpanTrait; -use core::option::OptionTrait; - use orion::numbers::NumberTrait; /// Cf: TensorTrait::max_in_tensor docstring @@ -28,5 +25,5 @@ fn max_in_tensor< }; }; - return max_value; + max_value } diff --git a/src/operators/tensor/math/min.cairo b/src/operators/tensor/math/min.cairo index a4ae655eb..2e7acadab 100644 --- a/src/operators/tensor/math/min.cairo +++ b/src/operators/tensor/math/min.cairo @@ -1,5 +1,3 @@ -use core::array::{ArrayTrait, SpanTrait}; - use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index}; use orion::operators::tensor::helpers::{ @@ -30,12 +28,8 @@ fn min< let mut tensor_counter: usize = 1; - loop { - if tensor_counter > tensors.len() - 1 { - break; - } - - let mut new_min_data = ArrayTrait::::new(); + while tensor_counter != tensors.len() { + let mut new_min_data: Array = array![]; let mut current_tensor = *tensors.at(tensor_counter); @@ -43,7 +37,7 @@ fn min< let num_elements = len_from_shape(broadcasted_shape); let mut n: usize = 0; - loop { + while n != num_elements { let mut indices_broadcasted = unravel_index(n, broadcasted_shape); let mut indices_self = broadcast_index_mapping(min_shape, indices_broadcasted); @@ -57,9 +51,6 @@ fn min< new_min_data.append(min_value); n += 1; - if n == num_elements { - break (); - }; }; min_shape = broadcasted_shape; @@ -67,5 +58,5 @@ fn min< tensor_counter += 1; }; - return TensorTrait::::new(min_shape, min_data); + TensorTrait::::new(min_shape, min_data) } diff --git a/src/operators/tensor/math/min_in_tensor.cairo b/src/operators/tensor/math/min_in_tensor.cairo index efa4356e5..854ac9a7f 100644 --- a/src/operators/tensor/math/min_in_tensor.cairo +++ b/src/operators/tensor/math/min_in_tensor.cairo @@ -1,6 +1,3 @@ -use core::array::SpanTrait; -use core::option::OptionTrait; - use orion::numbers::NumberTrait; /// Cf: TensorTrait::min_in_tensor docstring @@ -28,5 +25,5 @@ fn min_in_tensor< }; }; - return min_value; + min_value } diff --git a/src/operators/tensor/math/neg.cairo b/src/operators/tensor/math/neg.cairo index 0eaa8b3da..71f358be0 100644 --- a/src/operators/tensor/math/neg.cairo +++ b/src/operators/tensor/math/neg.cairo @@ -1,9 +1,5 @@ -use core::array::ArrayTrait; -use core::option::OptionTrait; -use core::array::SpanTrait; - -use orion::operators::tensor::core::{Tensor, TensorTrait}; use orion::numbers::NumberTrait; +use orion::operators::tensor::core::{Tensor, TensorTrait}; /// Cf: TensorTrait::neg docstring fn neg< @@ -16,7 +12,7 @@ fn neg< >( mut z: Tensor ) -> Tensor { - let mut data_result = ArrayTrait::::new(); + let mut data_result: Array = array![]; loop { match z.data.pop_front() { Option::Some(item) => { data_result.append((*item).neg()); }, @@ -24,5 +20,5 @@ fn neg< }; }; - return TensorTrait::::new(z.shape, data_result.span()); + TensorTrait::::new(z.shape, data_result.span()) } diff --git a/src/operators/tensor/math/not.cairo b/src/operators/tensor/math/not.cairo index 93e25c525..37f9561ec 100644 --- a/src/operators/tensor/math/not.cairo +++ b/src/operators/tensor/math/not.cairo @@ -1,15 +1,10 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; - use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; use orion::operators::tensor::implementations::{tensor_bool::BoolTensor}; - // Cf TensorTrait::not docstring fn not(mut z: Tensor) -> Tensor { - let mut data_result = ArrayTrait::::new(); + let mut data_result: Array = array![]; loop { match z.data.pop_front() { @@ -18,5 +13,5 @@ fn not(mut z: Tensor) -> Tensor { }; }; - return TensorTrait::new(z.shape, data_result.span()); + TensorTrait::new(z.shape, data_result.span()) } diff --git a/src/operators/tensor/math/onehot.cairo b/src/operators/tensor/math/onehot.cairo index bad9c9ef0..f5c48a9ef 100644 --- a/src/operators/tensor/math/onehot.cairo +++ b/src/operators/tensor/math/onehot.cairo @@ -1,18 +1,7 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; - -use core::traits::Into; -use core::debug::PrintTrait; -use core::traits::TryInto; -use core::serde::Serde; -use core::traits::Destruct; -use core::option::OptionTrait; - use orion::numbers::NumberTrait; use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::{TensorTrait, Tensor}; - /// Cf: TensorTrait::onehot docstring fn onehot_encode< T, @@ -40,8 +29,8 @@ fn onehot_encode< assert(((axis == 999) | (axis.into() <= rank)), 'axis out of dimensions'); - let mut output_data = ArrayTrait::new(); - let mut output_size = ArrayTrait::::new(); + let mut output_data = array![]; + let mut output_size: Array = array![]; // New shape for output data loop { @@ -65,10 +54,7 @@ fn onehot_encode< } let mut inner_index = 0; - loop { - if inner_index == depth { - break (); - }; + while inner_index != depth { let ind = FixedTrait::< T, MAG >::new_unscaled(inner_index.try_into().unwrap(), false); @@ -87,7 +73,7 @@ fn onehot_encode< }; let mut output_tensor = TensorTrait::new(output_size.span(), output_data.span()); - let mut tranpose_axes = ArrayTrait::new(); + let mut tranpose_axes = array![]; // Get New shape is axis is not last dimension if (axis != 999) & (axis.into() != rank) { let mut index: usize = 0; @@ -104,11 +90,10 @@ fn onehot_encode< index += 1; }; - output_tensor = output_tensor.transpose(tranpose_axes.span()); } - return output_tensor; + output_tensor } fn onehot< @@ -127,16 +112,17 @@ fn onehot< ) -> Tensor { assert(values.len() == 2, 'Wrong values dimensions'); - let mut sizes = ArrayTrait::new(); + let mut sizes = array![]; sizes.append(2); let mut first = *values.pop_front().unwrap(); let mut second = *values.pop_front().unwrap(); - let mut data = ArrayTrait::new(); + let mut data = array![]; data.append(FixedTrait::::new_unscaled(first.try_into().unwrap(), false)); data.append(FixedTrait::::new_unscaled(second.try_into().unwrap(), false)); let values = TensorTrait::new(sizes.span(), data.span()); + onehot_encode(self, depth, axis, values) } diff --git a/src/operators/tensor/math/optional_get_element.cairo b/src/operators/tensor/math/optional_get_element.cairo index 3af112f85..eef73bc17 100644 --- a/src/operators/tensor/math/optional_get_element.cairo +++ b/src/operators/tensor/math/optional_get_element.cairo @@ -1,9 +1,5 @@ -use core::array::ArrayTrait; -use option::OptionTrait; -use core::array::SpanTrait; - -use orion::operators::tensor::core::{Tensor, TensorTrait}; use orion::numbers::NumberTrait; +use orion::operators::tensor::core::{Tensor, TensorTrait}; /// Cf: TensorTrait::optional_get_element docstring fn optional_get_element< @@ -16,7 +12,7 @@ fn optional_get_element< >( mut z: Tensor, index: usize ) -> Tensor { - let mut data_result = ArrayTrait::::new(); + let mut data_result: Array = array![]; // use of match to get element within and out the array bound match z.data.get(index) { @@ -24,5 +20,5 @@ fn optional_get_element< Option::None => {} }; - return TensorTrait::::new(z.shape, data_result.span()); + TensorTrait::::new(z.shape, data_result.span()) } diff --git a/src/operators/tensor/math/or.cairo b/src/operators/tensor/math/or.cairo index 60d10c95a..13b4697a3 100644 --- a/src/operators/tensor/math/or.cairo +++ b/src/operators/tensor/math/or.cairo @@ -1,7 +1,3 @@ -use core::array::ArrayTrait; -use core::option::OptionTrait; -use core::array::SpanTrait; - use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index}; use orion::operators::tensor::helpers::{ @@ -20,12 +16,12 @@ fn or< y: @Tensor, z: @Tensor ) -> Tensor { let broadcasted_shape = broadcast_shape(*y.shape, *z.shape); - let mut result: Array = ArrayTrait::new(); + let mut result: Array = array![]; let num_elements = len_from_shape(broadcasted_shape); let mut n: usize = 0; - loop { + while n != num_elements { let indices_broadcasted = unravel_index(n, broadcasted_shape); let indices_self = broadcast_index_mapping(*y.shape, indices_broadcasted); @@ -38,10 +34,7 @@ fn or< } n += 1; - if n == num_elements { - break (); - }; }; - return TensorTrait::new(broadcasted_shape, result.span()); + TensorTrait::new(broadcasted_shape, result.span()) } diff --git a/src/operators/tensor/math/pow.cairo b/src/operators/tensor/math/pow.cairo index a5b15579b..b02097551 100644 --- a/src/operators/tensor/math/pow.cairo +++ b/src/operators/tensor/math/pow.cairo @@ -1,7 +1,3 @@ -use core::array::ArrayTrait; -use core::option::OptionTrait; -use core::array::SpanTrait; - use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index}; use orion::operators::tensor::helpers::{broadcast_shape, broadcast_index_mapping, len_from_shape}; @@ -18,12 +14,12 @@ fn pow< y: @Tensor, z: @Tensor ) -> Tensor { let broadcasted_shape = broadcast_shape(*y.shape, *z.shape); - let mut result: Array = ArrayTrait::new(); + let mut result: Array = array![]; let num_elements = len_from_shape(broadcasted_shape); let mut n: usize = 0; - loop { + while n != num_elements { let indices_broadcasted = unravel_index(n, broadcasted_shape); let indices_self = broadcast_index_mapping(*y.shape, indices_broadcasted); @@ -32,10 +28,7 @@ fn pow< result.append(NumberTrait::pow(*(*y.data)[indices_self], *(*z.data)[indices_other])); n += 1; - if n == num_elements { - break (); - }; }; - return TensorTrait::new(broadcasted_shape, result.span()); + TensorTrait::new(broadcasted_shape, result.span()) } diff --git a/src/operators/tensor/math/random_uniform_like.cairo b/src/operators/tensor/math/random_uniform_like.cairo index 0b9c06cda..61d993dda 100644 --- a/src/operators/tensor/math/random_uniform_like.cairo +++ b/src/operators/tensor/math/random_uniform_like.cairo @@ -1,17 +1,12 @@ -use core::traits::Into; -use core::traits::TryInto; -use orion::operators::tensor::core::{Tensor, TensorTrait}; -use core::option::OptionTrait; +use core::integer; + +use alexandria_merkle_tree::merkle_tree::{pedersen::PedersenHasherImpl}; use orion::numbers::fixed_point::core::FixedTrait; use orion::numbers::NumberTrait; - +use orion::operators::tensor::core::{Tensor, TensorTrait}; use orion::operators::tensor::helpers::{reduce_output_shape, len_from_shape, combine_indices}; use orion::operators::tensor::math::{reduce_sum::accumulate_sum, arithmetic::div_downcast}; -use core::traits::PartialEq; -use alexandria_merkle_tree::merkle_tree::{pedersen::PedersenHasherImpl}; -use core::integer::{u128s_from_felt252, U128sFromFelt252Result}; -use core::traits; /// Cf: TensorTrait::random_uniform_like docstring fn random_uniform_like< @@ -30,8 +25,9 @@ fn random_uniform_like< impl TAddEq: AddEq, impl TCopy: Copy, impl TDrop: Drop, ->(tensor: Tensor, high: Option, low: Option, seed:Option) -> Tensor { - +>( + tensor: Tensor, high: Option, low: Option, seed: Option +) -> Tensor { let mut seed: usize = match seed { Option::Some(seed) => seed, Option::None => NumberTrait::max_value(), @@ -45,11 +41,10 @@ fn random_uniform_like< Option::None => NumberTrait::zero(), }; assert!(high > low, "high must be larger than low"); - let res = tensor_get_state(tensor,seed,high,low); - - return res; -} + let res = tensor_get_state(tensor, seed, high, low); + res +} fn tensor_get_state< T, @@ -67,15 +62,14 @@ fn tensor_get_state< impl TAddEq: AddEq, impl TCopy: Copy, impl TDrop: Drop, ->(tensor: Tensor, mut seed: usize, high: T, low: T) -> Tensor { - let mut data = ArrayTrait::new(); +>( + tensor: Tensor, mut seed: usize, high: T, low: T +) -> Tensor { + let mut data = array![]; let mut count = (tensor.data).len(); let mut i = 0; - loop { - if count == i { - break; - } + while count != i { let mut v = NumberTrait::one(); v = hash_random_range(seed, low, high); let a: u64 = 1664525; @@ -86,7 +80,8 @@ fn tensor_get_state< data.append(v); i += 1; }; - return TensorTrait::new(tensor.shape, data.span()); + + TensorTrait::new(tensor.shape, data.span()) } // High level random in a range @@ -105,16 +100,19 @@ fn hash_random_range< impl TAddEq: AddEq, impl TCopy: Copy, impl TDrop: Drop, ->(seed: usize, min: T, max: T) -> T { +>( + seed: usize, min: T, max: T +) -> T { let mut key = PedersenHasherImpl::new(); let hash: felt252 = key.hash(seed.into(), 1); let a: u128 = 4294967295; - let b: u128 = match u128s_from_felt252(hash) { - U128sFromFelt252Result::Narrow(x) => x, - U128sFromFelt252Result::Wide((x, _)) => x, + let b: u128 = match integer::u128s_from_felt252(hash) { + integer::U128sFromFelt252Result::Narrow(x) => x, + integer::U128sFromFelt252Result::Wide((x, _)) => x, } % a; let c: felt252 = b.into(); let rnd: T = NumberTrait::from_felt(c); let range = max - min + NumberTrait::one(); // + 1 to include max + min + rnd % range } diff --git a/src/operators/tensor/math/range.cairo b/src/operators/tensor/math/range.cairo index a21f7f2b0..1edc0f628 100644 --- a/src/operators/tensor/math/range.cairo +++ b/src/operators/tensor/math/range.cairo @@ -1,15 +1,5 @@ -use core::traits::Into; -use core::traits::TryInto; -use orion::operators::tensor::core::{Tensor, TensorTrait}; -use core::array::{ArrayTrait, SpanTrait}; -use core::option::OptionTrait; - -use orion::numbers::fixed_point::core::FixedTrait; use orion::numbers::NumberTrait; - -use orion::operators::tensor::helpers::{reduce_output_shape, len_from_shape, combine_indices}; -use orion::operators::tensor::math::{reduce_sum::accumulate_sum, arithmetic::div_downcast}; - +use orion::operators::tensor::core::{Tensor, TensorTrait}; fn range< T, @@ -23,17 +13,18 @@ fn range< impl TAddEq: AddEq, impl TCopy: Copy, impl TDrop: Drop, ->(mut start: T, end: T, step: T) -> Tensor { - let mut result: Array = ArrayTrait::::new(); +>( + mut start: T, end: T, step: T +) -> Tensor { + let mut result: Array = array![]; let zero: T = NumberTrait::zero(); - loop { - if (step >= zero && start >= end) || (step <= zero && start <= end) { - break (); - }; + while !(step >= zero && start >= end) && !(step <= zero && start <= end) { let v = start; result.append(v); start += step; }; + let shape = array![result.len()]; - return TensorTrait::::new(shape.span(), result.span()); + + TensorTrait::::new(shape.span(), result.span()) } diff --git a/src/operators/tensor/math/reduce_l1.cairo b/src/operators/tensor/math/reduce_l1.cairo index 813101eb5..ba2be9215 100644 --- a/src/operators/tensor/math/reduce_l1.cairo +++ b/src/operators/tensor/math/reduce_l1.cairo @@ -1,11 +1,7 @@ -use core::option::OptionTrait; -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::debug::PrintTrait; - use orion::numbers::NumberTrait; -use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index}; use orion::numbers::fixed_point::core::FixedTrait; +use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index}; + /// Cf: TensorTrait::reduce_sum docstring fn reduce_l1< T, @@ -19,5 +15,6 @@ fn reduce_l1< self: @Tensor, axis: usize, keepdims: bool ) -> Tensor { let data_abs = self.abs(); - return data_abs.reduce_sum(axis: axis, keepdims: keepdims); + + data_abs.reduce_sum(axis: axis, keepdims: keepdims) } diff --git a/src/operators/tensor/math/reduce_l2.cairo b/src/operators/tensor/math/reduce_l2.cairo index 8bb5bc888..96f4b7245 100644 --- a/src/operators/tensor/math/reduce_l2.cairo +++ b/src/operators/tensor/math/reduce_l2.cairo @@ -1,11 +1,8 @@ -use core::option::OptionTrait; -use core::array::ArrayTrait; -use core::array::SpanTrait; use core::debug::PrintTrait; use orion::numbers::NumberTrait; -use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index}; use orion::numbers::fixed_point::core::FixedTrait; +use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index}; fn square< T, @@ -19,7 +16,7 @@ fn square< self: @Tensor ) -> Tensor { let mut data = *self.data; - let mut output_data = ArrayTrait::new(); + let mut output_data = array![]; loop { match data.pop_front() { @@ -32,8 +29,10 @@ fn square< }; let tensor_square = TensorTrait::new(*self.shape, output_data.span()); - return tensor_square; + + tensor_square } + /// Cf: TensorTrait::reduce_l2 docstring fn reduce_l2< T, @@ -48,7 +47,8 @@ fn reduce_l2< ) -> Tensor { let tensor_square = square(self); let tensor_square_sum = tensor_square.reduce_sum(axis: axis, keepdims: keepdims); - return tensor_square_sum.sqrt(); + + tensor_square_sum.sqrt() } fn reduce_l2_complex< @@ -64,9 +64,8 @@ fn reduce_l2_complex< self: @Tensor, axis: usize, keepdims: bool ) -> Tensor { let mut tensor_square = square(@self.abs()); - let mut tensor_square_sum = tensor_square.reduce_sum(axis: axis, keepdims: keepdims); - return tensor_square_sum.sqrt(); + tensor_square_sum.sqrt() } diff --git a/src/operators/tensor/math/reduce_log_sum.cairo b/src/operators/tensor/math/reduce_log_sum.cairo index 91019ebc0..60a5225cb 100644 --- a/src/operators/tensor/math/reduce_log_sum.cairo +++ b/src/operators/tensor/math/reduce_log_sum.cairo @@ -1,11 +1,6 @@ -use core::option::OptionTrait; -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::debug::PrintTrait; - use orion::numbers::NumberTrait; -use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index}; use orion::numbers::fixed_point::core::FixedTrait; +use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index}; /// Cf: TensorTrait::reduce_sum_square docstring fn reduce_log_sum< @@ -23,5 +18,5 @@ fn reduce_log_sum< let tensor_square_sum = self.reduce_sum(axis: axis, keepdims: keepdims); let tensor_square_sum_log = tensor_square_sum.log(); - return tensor_square_sum_log; + tensor_square_sum_log } diff --git a/src/operators/tensor/math/reduce_log_sum_exp.cairo b/src/operators/tensor/math/reduce_log_sum_exp.cairo new file mode 100644 index 000000000..5fd57ab76 --- /dev/null +++ b/src/operators/tensor/math/reduce_log_sum_exp.cairo @@ -0,0 +1,52 @@ +use core::option::OptionTrait; +use core::array::ArrayTrait; +use core::array::SpanTrait; +use core::debug::PrintTrait; + +use orion::numbers::NumberTrait; +use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index}; +use orion::numbers::fixed_point::core::FixedTrait; +use orion::operators::tensor::math::{exp::exp_upcast, arithmetic::div_downcast}; + +/// Cf: TensorTrait::reduce_log_sum_exp docstring +// fn reduce_log_sum_exp_wide< +// T, +// TMAG, +// W, +// WMAG, +// impl TIntoW: Into, +// impl WTryIntoT: TryInto, +// impl WCopy: Copy, +// impl WDrop: Drop, +// impl TCopy: Copy, +// impl TDrop: Drop, +// impl TDiv: Div, +// impl TTensor: TensorTrait, +// impl WTensor: TensorTrait, +// impl TFixed: FixedTrait, +// impl WFixed: FixedTrait +// >( +// self: @Tensor, axis: usize, keepdims: bool +// ) -> Tensor { +// let tensor_exp: Tensor = exp_upcast(*self); +// let tensor_exp_log_sum = tensor_exp.reduce_log_sum(axis, keepdims); + +// return tensor_exp_log_sum; +// } + +fn reduce_log_sum_exp< + T, + MAG, + impl Tensor: TensorTrait, + impl TNumber: NumberTrait, + impl TMul: Mul, + impl TAddEq: AddEq, + impl TCopy: Copy, + impl TDrop: Drop, +>( + self: @Tensor, axis: usize, keepdims: bool +) -> Tensor { + let tensor_exp = self.exp(); + let tensor_exp_log_sum = tensor_exp.reduce_log_sum(axis: axis, keepdims: keepdims); + return tensor_exp_log_sum; +} diff --git a/src/operators/tensor/math/reduce_mean.cairo b/src/operators/tensor/math/reduce_mean.cairo index a692fdb91..44839e19e 100644 --- a/src/operators/tensor/math/reduce_mean.cairo +++ b/src/operators/tensor/math/reduce_mean.cairo @@ -1,10 +1,5 @@ -use core::option::OptionTrait; -use core::traits::Div; -use core::traits::TryInto; -use core::traits::Into; - -use core::array::ArrayTrait; -use core::array::SpanTrait; +use alexandria_sorting::bubble_sort; +use alexandria_data_structures::array_ext::{SpanTraitExt}; use orion::numbers::fixed_point::core::FixedTrait; use orion::numbers::NumberTrait; @@ -13,10 +8,6 @@ use orion::operators::tensor::helpers::{ reduce_output_shape, len_from_shape, combine_indices, get_all_axes }; -use alexandria_sorting::bubble_sort; -use alexandria_data_structures::array_ext::{SpanTraitExt}; - - /// Cf: TensorTrait::reduce_mean docstring fn reduce_mean< T, @@ -35,7 +26,7 @@ fn reduce_mean< ) -> Tensor { let noop_with_empty_axes = match noop_with_empty_axes { Option::Some(noop_with_empty_axes) => noop_with_empty_axes, - Option::None => { false }, + Option::None => false, }; let axes = match axes { Option::Some(axes) => { @@ -43,7 +34,7 @@ fn reduce_mean< get_all_axes(*self.shape) } else { assert(axes.len() == axes.unique().len(), 'duplicated axis.'); - let mut axes_arr = ArrayTrait::new(); + let mut axes_arr = array![]; let mut copy_axes = axes; loop { match copy_axes.pop_front() { @@ -56,7 +47,7 @@ fn reduce_mean< } }, Option::None => { - if (noop_with_empty_axes == true) { + if noop_with_empty_axes { return *self; } get_all_axes(*self.shape) @@ -64,7 +55,7 @@ fn reduce_mean< }; let keepdims = match keepdims { Option::Some(keepdims) => keepdims, - Option::None => { true }, + Option::None => true, }; let mut axis_c = 0; @@ -80,21 +71,19 @@ fn reduce_mean< data = array![current_mean].span(); break (); } - let mut temp_data = ArrayTrait::new(); + let mut temp_data = array![]; let mut temp_shape = reduce_output_shape(shape, *axis - axis_c, false); let data_len = len_from_shape(temp_shape); let mut index: usize = 0; - loop { + while index != data_len { let indices = unravel_index(index, temp_shape); let current_mean = accumulate_mean::(data, shape, indices, *axis - axis_c); temp_data.append(current_mean); index += 1; - if index == data_len { - break (); - }; }; + shape = temp_shape; data = temp_data.span(); axis_c += 1; @@ -104,7 +93,7 @@ fn reduce_mean< }; let mut axes_copy = axes; - if keepdims == true { + if keepdims { shape = *self.shape; loop { match axes_copy.pop_front() { @@ -112,9 +101,10 @@ fn reduce_mean< Option::None => { break; } }; }; - return TensorTrait::::new(shape, data); + + TensorTrait::::new(shape, data) } else { - return TensorTrait::::new(shape, data); + TensorTrait::::new(shape, data) } } @@ -149,11 +139,7 @@ fn accumulate_mean< let mut axis_indexu32 = 0; if (input_shape).len() > 1 { - loop { - if axis_indexu32 == axis_len { - break (); - } - + while axis_indexu32 != axis_len { let input_indices = combine_indices(output_indices, axis_indexu32, axis); let input_index = ravel_index(input_shape, input_indices); let ele = *(input_data)[input_index]; @@ -174,5 +160,5 @@ fn accumulate_mean< }; } // let axis_index: T = NumberTrait::::new(axis_index.try_into().unwrap(), false); - return acc / axis_index; + acc / axis_index } diff --git a/src/operators/tensor/math/reduce_min.cairo b/src/operators/tensor/math/reduce_min.cairo index eb268c1f2..9fb0d1117 100644 --- a/src/operators/tensor/math/reduce_min.cairo +++ b/src/operators/tensor/math/reduce_min.cairo @@ -1,9 +1,5 @@ -use core::option::OptionTrait; -use core::traits::TryInto; -use core::traits::Into; - -use core::array::ArrayTrait; -use core::array::SpanTrait; +use alexandria_sorting::bubble_sort; +use alexandria_data_structures::array_ext::{SpanTraitExt}; use orion::numbers::fixed_point::core::FixedTrait; use orion::numbers::NumberTrait; @@ -12,10 +8,6 @@ use orion::operators::tensor::helpers::{ reduce_output_shape, len_from_shape, combine_indices, get_all_axes }; -use alexandria_sorting::bubble_sort; -use alexandria_data_structures::array_ext::{SpanTraitExt}; - - /// Cf: TensorTrait::reduce_min docstring fn reduce_min< T, @@ -33,7 +25,7 @@ fn reduce_min< ) -> Tensor { let noop_with_empty_axes = match noop_with_empty_axes { Option::Some(noop_with_empty_axes) => noop_with_empty_axes, - Option::None => { false }, + Option::None => false, }; let axes = match axes { Option::Some(axes) => { @@ -41,7 +33,7 @@ fn reduce_min< get_all_axes(*self.shape) } else { assert(axes.len() == axes.unique().len(), 'duplicated axis.'); - let mut axes_arr = ArrayTrait::new(); + let mut axes_arr: Array = array![]; let mut copy_axes = axes; loop { match copy_axes.pop_front() { @@ -54,7 +46,7 @@ fn reduce_min< } }, Option::None => { - if (noop_with_empty_axes == true) { + if noop_with_empty_axes { return *self; } get_all_axes(*self.shape) @@ -62,7 +54,7 @@ fn reduce_min< }; let keepdims = match keepdims { Option::Some(keepdims) => keepdims, - Option::None => { true }, + Option::None => true, }; let mut axis_c = 0; @@ -78,21 +70,19 @@ fn reduce_min< data = array![current_min].span(); break (); } - let mut temp_data = ArrayTrait::new(); + let mut temp_data = array![]; let mut temp_shape = reduce_output_shape(shape, *axis - axis_c, false); let data_len = len_from_shape(temp_shape); let mut index: usize = 0; - loop { + while index != data_len { let indices = unravel_index(index, temp_shape); let current_min = accumulate_min::(data, shape, indices, *axis - axis_c); temp_data.append(current_min); index += 1; - if index == data_len { - break (); - }; }; + shape = temp_shape; data = temp_data.span(); axis_c += 1; @@ -102,7 +92,7 @@ fn reduce_min< }; let mut axes_copy = axes; - if keepdims == true { + if keepdims { shape = *self.shape; loop { match axes_copy.pop_front() { @@ -110,9 +100,10 @@ fn reduce_min< Option::None => { break; } }; }; - return TensorTrait::::new(shape, data); + + TensorTrait::::new(shape, data) } else { - return TensorTrait::::new(shape, data); + TensorTrait::::new(shape, data) } } @@ -145,11 +136,7 @@ fn accumulate_min< let mut axis_index = 0; if (input_shape).len() > 1 { - loop { - if axis_index == axis_len { - break (); - } - + while axis_index != axis_len { let input_indices = combine_indices(output_indices, axis_index, axis); let input_index = ravel_index(input_shape, input_indices); let ele = *(input_data)[input_index]; @@ -169,5 +156,6 @@ fn accumulate_min< }; }; } - return min; + + min } diff --git a/src/operators/tensor/math/reduce_prod.cairo b/src/operators/tensor/math/reduce_prod.cairo index cf66dec97..f5df6179d 100644 --- a/src/operators/tensor/math/reduce_prod.cairo +++ b/src/operators/tensor/math/reduce_prod.cairo @@ -1,8 +1,3 @@ -use core::option::OptionTrait; -use core::traits::MulEq; -use core::array::ArrayTrait; -use core::array::SpanTrait; - use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index}; use orion::operators::tensor::helpers::{reduce_output_shape, len_from_shape, combine_indices}; @@ -64,14 +59,14 @@ fn reduce_prod< /// - ONNX: Open Neural Network Exchange: https://onnx.ai/ /// /// ``` - let mut output_data = ArrayTrait::new(); + let mut output_data = array![]; if (*self.shape).len() == 1 { assert(axis == 0, 'axis out of dimensions'); let current_prod = accumulate_production::(*self.data, *self.shape, *self.shape, axis); output_data.append(current_prod); - let mut output_shape = ArrayTrait::new(); + let mut output_shape = array![]; output_shape.append(1); return TensorTrait::new(output_shape.span(), output_data.span()); @@ -80,7 +75,7 @@ fn reduce_prod< let output_shape = reduce_output_shape(*self.shape, axis, false); let output_data_len = len_from_shape(output_shape); let mut index: usize = 0; - loop { + while index != output_data_len { let output_indices = unravel_index(index, output_shape); let current_sum = accumulate_production::< T @@ -89,16 +84,14 @@ fn reduce_prod< output_data.append(current_sum); index += 1; - if index == output_data_len { - break (); - }; }; if keepdims { let output_shape = reduce_output_shape(*self.shape, axis, true); - return TensorTrait::::new(output_shape, output_data.span()); + + TensorTrait::::new(output_shape, output_data.span()) } else { - return TensorTrait::::new(output_shape, output_data.span()); + TensorTrait::::new(output_shape, output_data.span()) } } } diff --git a/src/operators/tensor/math/reduce_sum.cairo b/src/operators/tensor/math/reduce_sum.cairo index ab834136f..078345f4a 100644 --- a/src/operators/tensor/math/reduce_sum.cairo +++ b/src/operators/tensor/math/reduce_sum.cairo @@ -1,12 +1,7 @@ -use core::option::OptionTrait; -use core::array::ArrayTrait; -use core::array::SpanTrait; - use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index}; use orion::operators::tensor::helpers::{reduce_output_shape, len_from_shape, combine_indices}; - /// Cf: TensorTrait::reduce_sum docstring fn reduce_sum< T, @@ -19,14 +14,14 @@ fn reduce_sum< >( self: @Tensor, axis: usize, keepdims: bool ) -> Tensor { - let mut output_data = ArrayTrait::new(); + let mut output_data: Array = array![]; if (*self.shape).len() == 1 { assert(axis == 0, 'axis out of dimensions'); let current_sum = accumulate_sum::(*self.data, *self.shape, *self.shape, axis); output_data.append(current_sum); - let mut output_shape = ArrayTrait::new(); + let mut output_shape: Array = array![]; output_shape.append(1); return TensorTrait::new(output_shape.span(), output_data.span()); @@ -35,23 +30,21 @@ fn reduce_sum< let output_shape = reduce_output_shape(*self.shape, axis, false); let output_data_len = len_from_shape(output_shape); let mut index: usize = 0; - loop { + while index != output_data_len { let output_indices = unravel_index(index, output_shape); let current_sum = accumulate_sum::(*self.data, *self.shape, output_indices, axis); output_data.append(current_sum); index += 1; - if index == output_data_len { - break (); - }; }; if keepdims { let output_shape = reduce_output_shape(*self.shape, axis, true); - return TensorTrait::::new(output_shape, output_data.span()); + + TensorTrait::::new(output_shape, output_data.span()) } else { - return TensorTrait::::new(output_shape, output_data.span()); + TensorTrait::::new(output_shape, output_data.span()) } } } diff --git a/src/operators/tensor/math/reduce_sum_square.cairo b/src/operators/tensor/math/reduce_sum_square.cairo index 329b8fb4e..b8ad7df99 100644 --- a/src/operators/tensor/math/reduce_sum_square.cairo +++ b/src/operators/tensor/math/reduce_sum_square.cairo @@ -1,13 +1,7 @@ -use core::option::OptionTrait; -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::debug::PrintTrait; - use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index}; use orion::numbers::fixed_point::core::FixedTrait; - fn square< T, MAG, @@ -20,7 +14,7 @@ fn square< self: @Tensor ) -> Tensor { let mut data = *self.data; - let mut output_data = ArrayTrait::new(); + let mut output_data = array![]; loop { match data.pop_front() { @@ -33,7 +27,8 @@ fn square< }; let tensor_square = TensorTrait::new(*self.shape, output_data.span()); - return tensor_square; + + tensor_square } /// Cf: TensorTrait::reduce_sum_square docstring @@ -51,5 +46,6 @@ fn reduce_sum_square< ) -> Tensor { let tensor_square = square(self); let tensor_square_sum = tensor_square.reduce_sum(axis: axis, keepdims: keepdims); - return tensor_square_sum; + + tensor_square_sum } diff --git a/src/operators/tensor/math/resize.cairo b/src/operators/tensor/math/resize.cairo index 961fc3853..ab0ef86f7 100644 --- a/src/operators/tensor/math/resize.cairo +++ b/src/operators/tensor/math/resize.cairo @@ -1,15 +1,10 @@ -use core::traits::TryInto; -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; -use core::traits::Into; -use orion::numbers::NumberTrait; use alexandria_sorting::bubble_sort; + +use orion::numbers::NumberTrait; use orion::operators::tensor::{ TensorTrait, Tensor, I8Tensor, I32Tensor, U32Tensor, FP16x16Tensor, BoolTensor }; use orion::numbers::{FP16x16, FP16x16Impl, FP32x32, FP32x32Impl, FixedTrait}; -use core::debug::PrintTrait; #[derive(Copy, Drop)] enum MODE { @@ -43,7 +38,6 @@ enum TRANSFORMATION_MODE { HALF_PIXEL_SYMMETRIC } - /// Cf: TensorTrait::resize docstring fn resize< T, @@ -91,7 +85,8 @@ fn resize< axes, cubic_coeff_a ); - return output; + + output } fn interpolate_nd< @@ -157,18 +152,17 @@ fn interpolate_nd< Option::Some(scale_factors) => { let mut new_scale_factors = ArrayTrait::::new(); let mut d = 0; - loop { - if d == r { - break; - } + while d != r { let mut i = 0; let item = loop { if i == axes.len() { break NumberTrait::one(); } + if *axes.at(i) == d { break *scale_factors.at(i); } + i += 1; }; new_scale_factors.append(item); @@ -182,25 +176,25 @@ fn interpolate_nd< let mut output_size = match output_size { Option::Some(output_size) => { - let mut new_output_size = ArrayTrait::new(); + let mut new_output_size = array![]; let mut d = 0; - loop { - if d == r { - break; - } + while d != r { let mut i = 0; let item = loop { if i == axes.len() { break *(*data).shape.at(d); } + if *axes.at(i) == d { break *output_size.at(i); } + i += 1; }; new_output_size.append(item); d += 1; }; + Option::Some(new_output_size.span()) }, Option::None => { Option::None }, @@ -208,75 +202,71 @@ fn interpolate_nd< let mut roi = match roi { Option::Some(roi) => { - let mut new_roi_data = ArrayTrait::new(); + let mut new_roi_data = array![]; let naxes = axes.len(); let mut d = 0; - loop { - if d == r { - break; - } + while d != r { let mut i = 0; let item = loop { if i == axes.len() { break NumberTrait::zero(); } + if *axes.at(i) == d { break *roi.data.at(i); } + i += 1; }; + new_roi_data.append(item); d += 1; }; let mut d = 0; - loop { - if d == r { - break; - } + while d != r { let mut i = 0; let item = loop { if i == axes.len() { break NumberTrait::one(); } + if *axes.at(i) == d { break *roi.data.at(i + naxes); } + i += 1; }; + new_roi_data.append(item); d += 1; }; + let mut shape = ArrayTrait::new(); shape.append(r * 2); Option::Some(TensorTrait::new(shape.span(), new_roi_data.span())) }, Option::None => { Option::None }, }; + (axes, scale_factors, output_size, roi) }, Option::None => { - let mut axes = ArrayTrait::new(); + let mut axes = array![]; let mut i = 0; - loop { - if i == r { - break; - } + while i != r { axes.append(i); i += 1; }; + (axes.span(), scale_factors, output_size, roi) } }; let (mut output_size, mut scale_factors) = match output_size { Option::Some(output_size) => { - let mut scale_factors = ArrayTrait::::new(); + let mut scale_factors: Array = array![]; let mut i = 0; - loop { - if i == r { - break; - } - + while i != r { let output_size_i: T = NumberTrait::new_unscaled( (*output_size.at(i)).into(), false ); @@ -293,47 +283,42 @@ fn interpolate_nd< KEEP_ASPECT_RATIO_POLICY::NOT_LARGER => { let mut scale = *scale_factors.at(*axes.at(0)); let mut i = 1; - loop { - if i == axes.len() { - break; - } + while i != axes.len() { if scale > *scale_factors.at(*axes.at(i)) { scale = *scale_factors.at(*axes.at(i)); } + i += 1; }; - let mut scale_factors = ArrayTrait::::new(); + let mut scale_factors: Array = array![]; let mut d = 0; - loop { - if d == r { - break; - } + while d != r { let mut i = 0; let item = loop { if i == axes.len() { break NumberTrait::one(); } + if *axes.at(i) == d { break scale; } + i += 1; }; scale_factors.append(item); d += 1; }; - let mut output_size = ArrayTrait::new(); + let mut output_size = array![]; let mut d = 0; - loop { - if d == r { - break; - } + while d != r { let mut i = 0; let item = loop { if i == axes.len() { break *(*data).shape.at(d); } + if *axes.at(i) == d { break NumberTrait::round( scale @@ -344,56 +329,54 @@ fn interpolate_nd< .try_into() .unwrap(); } + i += 1; }; output_size.append(item); d += 1; }; + (output_size.span(), scale_factors.span()) }, KEEP_ASPECT_RATIO_POLICY::NOT_SMALLER => { let mut scale = *scale_factors.at(*axes.at(0)); let mut i = 1; - loop { - if i == axes.len() { - break; - } + while i != axes.len() { if scale < *scale_factors.at(*axes.at(i)) { scale = *scale_factors.at(*axes.at(i)); } + i += 1; }; - let mut scale_factors = ArrayTrait::::new(); + + let mut scale_factors: Array = array![]; let mut d = 0; - loop { - if d == r { - break; - } + while d != r { let mut i = 0; let item = loop { if i == axes.len() { break NumberTrait::one(); } + if *axes.at(i) == d { break scale; } + i += 1; }; scale_factors.append(item); d += 1; }; - let mut output_size = ArrayTrait::new(); + let mut output_size = array![]; let mut d = 0; - loop { - if d == r { - break; - } + while d != r { let mut i = 0; let item = loop { if i == axes.len() { break *(*data).shape.at(d); } + if *axes.at(i) == d { break NumberTrait::round( scale @@ -404,11 +387,13 @@ fn interpolate_nd< .try_into() .unwrap(); } + i += 1; }; output_size.append(item); d += 1; }; + (output_size.span(), scale_factors.span()) }, }; @@ -416,7 +401,7 @@ fn interpolate_nd< (output_size, scale_factors) }, Option::None => { - let mut output_size = ArrayTrait::::new(); + let mut output_size: Array = array![]; let scale_factors = match scale_factors { Option::Some(scale_factors) => scale_factors, @@ -424,54 +409,44 @@ fn interpolate_nd< }; let mut i = 0; - loop { - if i == scale_factors.len() { - break; - } - + while i != scale_factors.len() { let item = *scale_factors.at(i) * NumberTrait::new_unscaled((*(*(data).shape).at(i)).into(), false); output_size.append(item.try_into().unwrap()); i += 1; }; + (output_size.span(), scale_factors) }, }; - let mut ret = ArrayTrait::>::new(); + let mut ret: Array> = array![]; let mut i = 0; - loop { + while i != output_size.len() { let mut temp = ArrayTrait::::new(); - if i == output_size.len() { - break; - } let mut j = 0; - loop { - if j == *output_size.at(i) { - break; - } + while j != *output_size.at(i) { temp.append(j); j += 1; }; + ret.append(temp.span()); i += 1; }; let mut ret = cartesian(ret.span()); - let mut ret_data = ArrayTrait::new(); + let mut ret_data = array![]; loop { match ret.pop_front() { Option::Some(X) => { - let mut x = ArrayTrait::::new(); + let mut x: Array = array![]; let mut i = 0; - loop { - if i == X.len() { - break; - } + while i != X.len() { x.append(NumberTrait::new_unscaled((*X.at(i)).into(), false)); i += 1; }; + let mut x = x.span(); let item = interpolate_nd_with_x( data, @@ -495,9 +470,10 @@ fn interpolate_nd< } }; - let mut shape = ArrayTrait::new(); + let mut shape = array![]; shape.append(ret_data.len()); - return TensorTrait::new(output_size, ret_data.span()); + + TensorTrait::new(output_size, ret_data.span()) } fn cartesian(mut arrays: Span>,) -> Array> { @@ -512,24 +488,18 @@ fn cartesian(mut arrays: Span>,) -> Array> { }; let mut i = 0; - let mut size_arrays = ArrayTrait::new(); - loop { - if i == arrays.len() { - break; - } + let mut size_arrays = array![]; + while i != arrays.len() { size_arrays.append((*(arrays.at(i))).len()); - i += 1; }; + let size_arrays = size_arrays.span(); - let mut output_arrays = ArrayTrait::>::new(); + let mut output_arrays = array![]; let mut m = n; let mut i = 0; - loop { - if i == arrays.len() { - break; - } + while i != arrays.len() { m = m / (*(arrays.at(i))).len(); let mut out = repeat(*(arrays.at(i)), m); out = repeat_2(out, size_arrays, i); @@ -537,75 +507,58 @@ fn cartesian(mut arrays: Span>,) -> Array> { output_arrays.append(out); i += 1; }; + let output_arrays = output_arrays.span(); let mut i = 0; - let mut ret = ArrayTrait::>::new(); - loop { - if i == n { - break; - } + let mut ret = array![]; + while i != n { let mut j = 0; - let mut x = ArrayTrait::new(); - loop { - if j == arrays.len() { - break; - } - + let mut x = array![]; + while j != arrays.len() { x.append(*(output_arrays.at(j)).at(i)); j += 1; }; + ret.append(x); i += 1; }; - return ret; + ret } - fn repeat_2(mut array: Array, size_array: Span, index: usize) -> Array { let mut size = array.len(); let mut i = 0; - loop { - if i == index { - break; - } + while i != index { let mut j = 1; - loop { - if j == *size_array.at(index - 1 - i) { - break; - } + while j != *size_array.at(index - 1 - i) { let mut k = 0; - loop { - if k == size { - break; - } + while k != size { array.append(*array.at(k)); k += 1; }; + j += 1; }; + size = size * *size_array.at(index - 1 - i); i += 1; }; + array } fn repeat(array: Span, m: usize,) -> Array { - let mut out = ArrayTrait::new(); + let mut out = array![]; let mut j = 0; - loop { - if j == array.len() { - break; - } + while j != array.len() { let mut k = 0; - loop { - if k == m { - break; - } + while k != m { out.append(*array.at(j)); k += 1; }; + j += 1; }; @@ -659,7 +612,8 @@ fn interpolate_nd_with_x< cubic_coeff_a ); } - let mut res1d = ArrayTrait::new(); + + let mut res1d = array![]; let scale_factor_zero = match scale_factor.pop_front() { Option::Some(item) => { *item }, @@ -681,13 +635,11 @@ fn interpolate_nd_with_x< reduced_roi_shape.append(roi.data.len() - 2); let mut i = 1; - loop { - if i == 2 * n { - break; - } + while i != 2 * n { if i != n { reduced_roi.append(*roi.data.at(i)); } + i += 1; }; Option::Some(TensorTrait::new(reduced_roi_shape.span(), reduced_roi.span())) @@ -696,10 +648,7 @@ fn interpolate_nd_with_x< }; let mut i = 0; - loop { - if i == *(*data).shape.at(0) { - break; - } + while i != *(*data).shape.at(0) { let data = get_row_n(data, i); let mut r = interpolate_nd_with_x( @@ -717,24 +666,26 @@ fn interpolate_nd_with_x< exclude_outside, cubic_coeff_a ); + loop { match r.data.pop_front() { Option::Some(item) => { res1d.append(*item); }, Option::None => { break; } } }; + i += 1; }; - let mut shape = ArrayTrait::new(); + let mut shape = array![]; shape.append(res1d.len()); let res1d = TensorTrait::new(shape.span(), res1d.span()); let reduced_roi = match roi { Option::Some(roi) => { - let mut reduced_roi = ArrayTrait::new(); - let mut reduced_roi_shape = ArrayTrait::new(); + let mut reduced_roi = array![]; + let mut reduced_roi_shape = array![]; reduced_roi_shape.append(2); reduced_roi.append(*roi.data.at(0)); @@ -760,45 +711,40 @@ fn interpolate_nd_with_x< cubic_coeff_a ); - //let mut ret = ArrayTrait::new(); - //let mut shape = ArrayTrait::new(); + //let mut ret = array![]; + //let mut shape = array![]; //shape.append(2); //ret.append(NumberTrait::zero()); - return a; + + a } fn get_row_n, +Copy, +Drop,>( data: @Tensor, index: usize, ) -> Tensor { - let mut output_data = ArrayTrait::new(); - let mut output_shape = ArrayTrait::new(); + let mut output_data = array![]; + let mut output_shape = array![]; let mut stride_output = 1; let mut i = 0; - loop { - if i == (*data).shape.len() { - break; - } + while i != (*data).shape.len() { if i != 0 { output_shape.append(*(*data).shape.at(i)); stride_output = stride_output * *(*data).shape.at(i); } + i += 1; }; let mut i = 0; - loop { - if i == stride_output { - break; - } + while i != stride_output { output_data.append(*(*data).data.at(index * stride_output + i)); i += 1; }; - return TensorTrait::new(output_shape.span(), output_data.span()); + TensorTrait::new(output_shape.span(), output_data.span()) } - fn interpolate_1d_with_x< T, MAG, @@ -915,9 +861,7 @@ fn interpolate_1d_with_x< let mut coeffs = match mode { MODE::NEAREST => { let coeffs = match antialias { - Option::Some => core::panic_with_felt252( - 'antialias not for mode NEAREST' - ), + Option::Some => core::panic_with_felt252('antialias not for mode NEAREST'), Option::None => { nearest_coeffs(ratio, nearest_mode) }, }; coeffs @@ -938,9 +882,7 @@ fn interpolate_1d_with_x< }, MODE::CUBIC => { let coeffs = match antialias { - Option::Some => { - cubic_coeffs_antialias(ratio, scale_factor, cubic_coeff_a) - }, + Option::Some => { cubic_coeffs_antialias(ratio, scale_factor, cubic_coeff_a) }, Option::None => { cubic_coeffs(ratio, cubic_coeff_a) }, }; coeffs @@ -952,13 +894,10 @@ fn interpolate_1d_with_x< let (idxes, points) = get_neighbor(x_ori, n, data); if exclude_outside { - let mut coeffs_exclude_outside = ArrayTrait::::new(); + let mut coeffs_exclude_outside: Array = array![]; let mut sum = NumberTrait::zero(); let mut i = 0; - loop { - if i == idxes.data.len() { - break; - } + while i != idxes.data.len() { if *idxes.data.at(i) { coeffs_exclude_outside.append(NumberTrait::zero()); sum += NumberTrait::zero(); @@ -966,23 +905,22 @@ fn interpolate_1d_with_x< coeffs_exclude_outside.append(*coeffs.data.at(i)); sum += *coeffs.data.at(i); } + i += 1; }; - let mut coeff_div = ArrayTrait::::new(); + let mut coeff_div: Array = array![]; let mut i = 0; - loop { - if i == n { - break; - } + while i != n { coeff_div.append(*coeffs_exclude_outside.at(i) / sum); i += 1; }; + coeffs = TensorTrait::new(coeffs.shape, coeff_div.span()); } - return TensorTrait::matmul(@coeffs, @points); -} + TensorTrait::matmul(@coeffs, @points) +} fn get_neighbor< T, @@ -1009,29 +947,22 @@ fn get_neighbor< ) .try_into() .unwrap(); - let mut padded = ArrayTrait::new(); + let mut padded = array![]; let mut i = 0; - loop { - if i == pad_width { - break; - } + while i != pad_width { padded.append(*(*data).data.at(0)); i += 1; }; + let mut i = 0; - loop { - if i == (*data).data.len() { - break; - } + while i != (*data).data.len() { padded.append(*(*data).data.at(i)); i += 1; }; + let mut i = 0; - loop { - if i == pad_width { - break; - } + while i != pad_width { padded.append(*(*data).data.at((*data).data.len() - 1)); i += 1; }; @@ -1040,13 +971,10 @@ fn get_neighbor< let mut idxes = get_neighbor_idxes(x, n, padded.len()); - let mut idxes_centered = ArrayTrait::new(); - let mut ret = ArrayTrait::new(); + let mut idxes_centered = array![]; + let mut ret = array![]; let mut i = 0; - loop { - if i == idxes.data.len() { - break; - } + while i != idxes.data.len() { ret.append(*padded.at(*idxes.data.at(i))); if *idxes.data.at(i) >= pad_width { @@ -1058,16 +986,17 @@ fn get_neighbor< } else { idxes_centered.append(true); } + i += 1; }; - let mut shape = ArrayTrait::new(); + let mut shape = array![]; shape.append(idxes.data.len()); - return ( + ( TensorTrait::new(shape.span(), idxes_centered.span()), TensorTrait::new(shape.span(), ret.span()) - ); + ) } fn get_neighbor_idxes< @@ -1097,7 +1026,7 @@ fn get_neighbor_idxes< ) .try_into() .unwrap(); - let mut idxes = ArrayTrait::new(); + let mut idxes = array![]; if n % 2 == 0 { let (mut i_low, mut i_high) = if x < NumberTrait::zero() { @@ -1120,10 +1049,7 @@ fn get_neighbor_idxes< } let mut i = 0; - loop { - if i == n / 2 { - break; - } + while i != n / 2 { if i_low - i < 0 { idxes.append(i_high + i); i_high += 1; @@ -1136,6 +1062,7 @@ fn get_neighbor_idxes< } else { idxes.append(i_high + i); } + i += 1; } } else { @@ -1144,10 +1071,10 @@ fn get_neighbor_idxes< idxes = bubble_sort::bubble_sort_elements(idxes, true); - let mut shape = ArrayTrait::new(); + let mut shape = array![]; shape.append(n); - return TensorTrait::new(shape.span(), idxes.span()); + TensorTrait::new(shape.span(), idxes.span()) } fn linear_coeffs< @@ -1163,14 +1090,14 @@ fn linear_coeffs< >( mut ratio: T ) -> Tensor { - let mut ret = ArrayTrait::new(); - let mut shape = ArrayTrait::new(); + let mut ret = array![]; + let mut shape = array![]; shape.append(2); ret.append(NumberTrait::one() - ratio); ret.append(ratio); - return TensorTrait::new(shape.span(), ret.span()); -} + TensorTrait::new(shape.span(), ret.span()) +} fn linear_coeffs_antialias< T, @@ -1197,15 +1124,12 @@ fn linear_coeffs_antialias< let footprint = (NumberTrait::one() + NumberTrait::one()) - (NumberTrait::one() + NumberTrait::one()) * start; - let mut coeffs = ArrayTrait::::new(); + let mut coeffs: Array = array![]; let mut sum = NumberTrait::zero(); // arange and clip + compute sum let mut i = start; - loop { - if i == start + footprint { - break; - } + while i != start + footprint { let value = NumberTrait::one() - NumberTrait::abs((i - ratio) * scale); if value < NumberTrait::zero() { @@ -1217,25 +1141,23 @@ fn linear_coeffs_antialias< coeffs.append(value); sum += value; } + i += NumberTrait::one(); }; let n = coeffs.len(); - let mut coeff_div = ArrayTrait::::new(); + let mut coeff_div: Array = array![]; let mut i = 0; - loop { - if i == n { - break; - } + while i != n { coeff_div.append(*coeffs.at(i) / sum); i += 1; }; - let mut shape = ArrayTrait::new(); + let mut shape = array![]; shape.append(n); - return TensorTrait::new(shape.span(), coeff_div.span()); + TensorTrait::new(shape.span(), coeff_div.span()) } fn cubic_coeffs< @@ -1267,8 +1189,8 @@ fn cubic_coeffs< Option::None => { NumberTrait::neg(three / four) }, }; - let mut coeffs = ArrayTrait::new(); - let mut shape = ArrayTrait::new(); + let mut coeffs = array![]; + let mut shape = array![]; coeffs .append( @@ -1284,7 +1206,8 @@ fn cubic_coeffs< ); shape.append(4); - return TensorTrait::new(shape.span(), coeffs.span()); + + TensorTrait::new(shape.span(), coeffs.span()) } fn cubic_coeffs_antialias< @@ -1322,14 +1245,11 @@ fn cubic_coeffs_antialias< Option::None => { NumberTrait::neg(three / four) }, }; - let mut coeffs = ArrayTrait::new(); + let mut coeffs = array![]; let mut sum = NumberTrait::zero(); let mut i = i_start; - loop { - if i == i_end { - break; - } + while i != i_end { let value = compute_coeff(scale * (i - ratio), A); coeffs.append(value); sum += value; @@ -1339,20 +1259,17 @@ fn cubic_coeffs_antialias< let n = coeffs.len(); - let mut coeff_div = ArrayTrait::::new(); + let mut coeff_div: Array = array![]; let mut i = 0; - loop { - if i == n { - break; - } + while i != n { coeff_div.append(*coeffs.at(i) / sum); i += 1; }; - let mut shape = ArrayTrait::new(); + let mut shape = array![]; shape.append(n); - return TensorTrait::new(shape.span(), coeff_div.span()); + TensorTrait::new(shape.span(), coeff_div.span()) } fn compute_coeff< @@ -1388,9 +1305,9 @@ fn compute_coeff< if x < two { return A * x_3 - five * A * x_2 + eigth * A * x - four * A; } - return NumberTrait::zero(); -} + NumberTrait::zero() +} fn nearest_coeffs< T, @@ -1414,8 +1331,8 @@ fn nearest_coeffs< Option::None => { NEAREST_MODE::ROUND_PREFER_FLOOR }, }; - let mut ret = ArrayTrait::new(); - let mut shape = ArrayTrait::new(); + let mut ret = array![]; + let mut shape = array![]; shape.append(2); // CHECK SI C'EST UNE CONDITION ASSEZ GENERALE diff --git a/src/operators/tensor/math/round.cairo b/src/operators/tensor/math/round.cairo index 5515dad9b..fc1680f16 100644 --- a/src/operators/tensor/math/round.cairo +++ b/src/operators/tensor/math/round.cairo @@ -1,12 +1,6 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; -use core::traits::Into; - use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; - fn round< T, MAG, @@ -17,7 +11,7 @@ fn round< >( mut self: Tensor ) -> Tensor { - let mut result = ArrayTrait::new(); + let mut result: Array = array![]; loop { match self.data.pop_front() { diff --git a/src/operators/tensor/math/scatter.cairo b/src/operators/tensor/math/scatter.cairo index a108ae4e2..fe3d9ffce 100644 --- a/src/operators/tensor/math/scatter.cairo +++ b/src/operators/tensor/math/scatter.cairo @@ -1,18 +1,10 @@ use alexandria_data_structures::array_ext::SpanTraitExt; -use core::array::ArrayTrait; -use core::array::SpanTrait; - -use core::traits::Into; -use core::debug::PrintTrait; -use core::traits::TryInto; -use core::serde::Serde; -use core::traits::Destruct; -use core::option::OptionTrait; use orion::numbers::NumberTrait; use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; use core::dict::Felt252DictTrait; use core::nullable::{nullable_from_box, match_nullable, FromNullableResult}; + /// Cf: TensorTrait::scatter docstring fn scatter< T, @@ -57,7 +49,7 @@ fn scatter< 'shape must be same' ); - let mut output_data = ArrayTrait::new(); + let mut output_data = array![]; let mut data_indices = indices.data; let mut data_updates = updates.data; let mut indices_updates: Felt252Dict = Default::default(); @@ -68,25 +60,20 @@ fn scatter< *data_shape_copy.pop_front().unwrap(); *indices_shape_copy.pop_front().unwrap(); - let mut indices_loop: usize = 1; let mut data_loop: usize = 1; if (axis == 0) { loop { match indices_shape_copy.pop_front() { - Option::Some(val) => { - indices_loop *= *val; - }, + Option::Some(val) => { indices_loop *= *val; }, Option::None => { break; } }; }; loop { match data_shape_copy.pop_front() { - Option::Some(val) => { - data_loop *= *val; - }, + Option::Some(val) => { data_loop *= *val; }, Option::None => { break; } }; }; @@ -140,7 +127,7 @@ fn scatter< if (reduction == 'none') { indices_updates.insert(result.into(), value.into()); } else { - let mut arr = ArrayTrait::new(); + let mut arr = array![]; let val = indices_updates_reduction.get(result.into()); let mut a = ArrayTrait::new(); @@ -155,10 +142,12 @@ fn scatter< Option::None => { break; } }; }; + arr.append(total_count); indices_updates_reduction .insert(result.into(), nullable_from_box(BoxTrait::new(arr.span()))); } + total_count += 1; }, Option::None => { break; } @@ -180,7 +169,7 @@ fn scatter< } } else { let value = indices_updates_reduction.get(i.into()); - let mut a = ArrayTrait::new(); + let mut a = array![]; let mut span = match match_nullable(value) { FromNullableResult::Null(()) => a.span(), FromNullableResult::NotNull(value) => value.unbox(), @@ -199,6 +188,7 @@ fn scatter< Option::None => { break; } }; }; + output_data.append(result); } @@ -209,6 +199,7 @@ fn scatter< Option::None => { break; } }; }; + output_data.append(result); } @@ -224,6 +215,7 @@ fn scatter< Option::None => { break; } }; }; + output_data.append(result); } @@ -239,6 +231,7 @@ fn scatter< Option::None => { break; } }; }; + output_data.append(result); } } @@ -252,9 +245,9 @@ fn scatter< let mut output_tensor = TensorTrait::::new(*self.shape, output_data.span()); - if (transpose == true) { + if transpose { output_tensor = output_tensor.transpose(axes: array![0, 2, 1].span()) } - return output_tensor; + output_tensor } diff --git a/src/operators/tensor/math/scatter_nd.cairo b/src/operators/tensor/math/scatter_nd.cairo index 61535f618..285949ebd 100644 --- a/src/operators/tensor/math/scatter_nd.cairo +++ b/src/operators/tensor/math/scatter_nd.cairo @@ -1,18 +1,10 @@ -use alexandria_data_structures::array_ext::SpanTraitExt; -use core::array::ArrayTrait; -use core::array::SpanTrait; +use core::nullable::{nullable_from_box, match_nullable, FromNullableResult}; -use core::traits::Into; -use core::debug::PrintTrait; -use core::traits::TryInto; -use core::serde::Serde; -use core::traits::Destruct; -use core::option::OptionTrait; +use alexandria_data_structures::array_ext::SpanTraitExt; use orion::numbers::NumberTrait; use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; -use core::dict::Felt252DictTrait; -use core::nullable::{nullable_from_box, match_nullable, FromNullableResult}; + /// Cf: TensorTrait::scatter_nd docstring fn scatter_nd< T, @@ -24,12 +16,8 @@ fn scatter_nd< impl TPartialOrd: PartialOrd, impl TPartialEq: PartialEq, >( - self: @Tensor, - updates: Tensor, - indices: Tensor, - reduction: Option + self: @Tensor, updates: Tensor, indices: Tensor, reduction: Option ) -> Tensor { - let reduction = match reduction { Option::Some(val) => val, Option::None => 'none' @@ -44,16 +32,16 @@ fn scatter_nd< assert(*indices_last_axis <= data_rank, 'must be <= data rank'); let ind_max = indices.data.max().unwrap(); - if (data_rank > 1){ + if (data_rank > 1) { assert(ind_max < data_rank, 'index is out of bound'); } - let mut batch_dims_shape = ArrayTrait::new(); + let mut batch_dims_shape = array![]; let mut ind: usize = 0; loop { match indices_shape.pop_front() { - Option::Some(val) => { batch_dims_shape.append(*val);}, + Option::Some(val) => { batch_dims_shape.append(*val); }, Option::None => { break; } }; }; @@ -61,11 +49,11 @@ fn scatter_nd< let mut data_shape_clone = data_shape.clone(); loop { match data_shape_clone.pop_front() { - Option::Some(val) => { + Option::Some(val) => { if (ind >= *indices_last_axis) { batch_dims_shape.append(*val); - } - }, + } + }, Option::None => { break; } }; }; @@ -73,10 +61,8 @@ fn scatter_nd< let mut ind: usize = 0; loop { match batch_dims_shape.pop_front() { - Option::Some(val) => { - assert(val == *updates_shape[ind], 'must be same'); - }, - Option::None => { break; } + Option::Some(val) => { assert(val == *updates_shape[ind], 'must be same'); }, + Option::None => { break; } }; }; @@ -89,7 +75,7 @@ fn scatter_nd< if data_rank >= 1 { loop { match data_shape_clone.pop_front() { - Option::Some(val) => { indexer *= *val;}, + Option::Some(val) => { indexer *= *val; }, Option::None => { break; } }; } @@ -99,7 +85,7 @@ fn scatter_nd< let mut dict_ind: usize = 1; loop { match data_indices.pop_front() { - Option::Some(val) => { + Option::Some(val) => { updates_index_dict.insert((*val).into(), dict_ind); dict_ind += 1; }, @@ -107,68 +93,62 @@ fn scatter_nd< }; }; - let mut output_data = ArrayTrait::::new(); + let mut output_data: Array = array![]; let mut data = *self.data; let mut index: usize = 0; let mut inner_index: usize = 0; let num = *data_shape_first.unwrap(); - loop { - if (index == num){ - break; - } + while index != num { let comp_index = updates_index_dict.get(index.into()); - if (comp_index == 0) { + if comp_index == 0 { loop { - if (inner_index == indexer) { + if (inner_index == indexer) { inner_index = 0; - break; + break; } let val = *data.at((index * indexer) + inner_index); output_data.append(val); inner_index += 1; }; - } - else { + } else { loop { - if (inner_index == indexer) { + if (inner_index == indexer) { inner_index = 0; - break; + break; } - if (reduction == 'none'){ - let val = data_updates.at(((comp_index-1) * indexer) + inner_index); + if (reduction == 'none') { + let val = data_updates.at(((comp_index - 1) * indexer) + inner_index); output_data.append(*val); } if (reduction == 'add') { - let val = data_updates.at(((comp_index-1) * indexer) + inner_index); + let val = data_updates.at(((comp_index - 1) * indexer) + inner_index); let data_val = *data.at((index * indexer) + inner_index); output_data.append(*val + data_val); } if (reduction == 'mul') { - let val = data_updates.at(((comp_index-1) * indexer) + inner_index); + let val = data_updates.at(((comp_index - 1) * indexer) + inner_index); let data_val = *data.at((index * indexer) + inner_index); output_data.append((*val) * data_val); } if (reduction == 'max') { - let val = data_updates.at(((comp_index-1) * indexer) + inner_index); + let val = data_updates.at(((comp_index - 1) * indexer) + inner_index); let data_val = *data.at((index * indexer) + inner_index); if (*val > data_val) { output_data.append(*val); - } - else { + } else { output_data.append(data_val); } - } + } if (reduction == 'min') { - let val = data_updates.at(((comp_index-1) * indexer) + inner_index); + let val = data_updates.at(((comp_index - 1) * indexer) + inner_index); let data_val = *data.at((index * indexer) + inner_index); if (*val > data_val) { output_data.append(data_val); - } - else { + } else { output_data.append(*val); } - } + } inner_index += 1; } } @@ -176,6 +156,6 @@ fn scatter_nd< }; let mut output_tensor = TensorTrait::::new(*self.shape, output_data.span()); - return output_tensor; -} \ No newline at end of file + output_tensor +} diff --git a/src/operators/tensor/math/shrink.cairo b/src/operators/tensor/math/shrink.cairo index 20ed4041f..c6d6f0409 100644 --- a/src/operators/tensor/math/shrink.cairo +++ b/src/operators/tensor/math/shrink.cairo @@ -1,7 +1,3 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; - use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; @@ -29,7 +25,7 @@ fn shrink< NumberTrait::half() }; - let mut data_result = ArrayTrait::::new(); + let mut data_result: Array = array![]; loop { match self.data.pop_front() { @@ -48,5 +44,5 @@ fn shrink< }; }; - return TensorTrait::new(self.shape, data_result.span()); + TensorTrait::new(self.shape, data_result.span()) } diff --git a/src/operators/tensor/math/sign.cairo b/src/operators/tensor/math/sign.cairo index 557a96995..afd97ef41 100644 --- a/src/operators/tensor/math/sign.cairo +++ b/src/operators/tensor/math/sign.cairo @@ -1,12 +1,6 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; -use core::traits::Into; - use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; - fn sign< T, MAG, @@ -17,7 +11,7 @@ fn sign< >( mut self: Tensor ) -> Tensor { - let mut result = ArrayTrait::new(); + let mut result: Array = array![]; loop { match self.data.pop_front() { @@ -26,5 +20,5 @@ fn sign< }; }; - return TensorTrait::new(self.shape, result.span()); + TensorTrait::new(self.shape, result.span()) } diff --git a/src/operators/tensor/math/sin.cairo b/src/operators/tensor/math/sin.cairo index 91e5d9949..46f87d5e6 100644 --- a/src/operators/tensor/math/sin.cairo +++ b/src/operators/tensor/math/sin.cairo @@ -1,13 +1,7 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; -use core::traits::Into; - use orion::numbers::NumberTrait; use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; - /// Cf: TensorTrait::sin docstring fn sin< T, @@ -19,7 +13,7 @@ fn sin< >( mut self: Tensor ) -> Tensor { - let mut result = ArrayTrait::new(); + let mut result: Array = array![]; loop { match self.data.pop_front() { @@ -28,6 +22,6 @@ fn sin< }; }; - return TensorTrait::new(self.shape, result.span()); + TensorTrait::new(self.shape, result.span()) } diff --git a/src/operators/tensor/math/sinh.cairo b/src/operators/tensor/math/sinh.cairo index 72caffd21..ca09e9f68 100644 --- a/src/operators/tensor/math/sinh.cairo +++ b/src/operators/tensor/math/sinh.cairo @@ -1,13 +1,7 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; -use core::traits::Into; - use orion::numbers::NumberTrait; use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; - /// Cf: TensorTrait::sinh docstring fn sinh< T, @@ -19,8 +13,7 @@ fn sinh< >( mut self: Tensor ) -> Tensor { - let mut result = ArrayTrait::new(); - + let mut result: Array = array![]; loop { match self.data.pop_front() { Option::Some(item) => { result.append((*item).sinh()); }, @@ -28,5 +21,5 @@ fn sinh< }; }; - return TensorTrait::new(self.shape, result.span()); + TensorTrait::new(self.shape, result.span()) } diff --git a/src/operators/tensor/math/sqrt.cairo b/src/operators/tensor/math/sqrt.cairo index 22ca78d77..c3a0d4e6f 100644 --- a/src/operators/tensor/math/sqrt.cairo +++ b/src/operators/tensor/math/sqrt.cairo @@ -1,13 +1,7 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; -use core::traits::Into; - use orion::numbers::NumberTrait; use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; - fn sqrt< T, MAG, @@ -18,7 +12,7 @@ fn sqrt< >( mut self: Tensor ) -> Tensor { - let mut result = ArrayTrait::new(); + let mut result: Array = array![]; loop { match self.data.pop_front() { @@ -27,5 +21,5 @@ fn sqrt< }; }; - return TensorTrait::new(self.shape, result.span()); + TensorTrait::new(self.shape, result.span()) } diff --git a/src/operators/tensor/math/tanh.cairo b/src/operators/tensor/math/tanh.cairo index 681f4d8f6..9759cd9bd 100644 --- a/src/operators/tensor/math/tanh.cairo +++ b/src/operators/tensor/math/tanh.cairo @@ -1,13 +1,7 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; -use core::traits::Into; - use orion::numbers::NumberTrait; use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; - /// Cf: TensorTrait::tanh docstring fn tanh< T, @@ -19,7 +13,7 @@ fn tanh< >( mut self: Tensor ) -> Tensor { - let mut result = ArrayTrait::new(); + let mut result: Array = array![]; loop { match self.data.pop_front() { @@ -28,5 +22,5 @@ fn tanh< }; }; - return TensorTrait::new(self.shape, result.span()); + TensorTrait::new(self.shape, result.span()) } diff --git a/src/operators/tensor/math/where.cairo b/src/operators/tensor/math/where.cairo index ba71c1279..f09883a41 100644 --- a/src/operators/tensor/math/where.cairo +++ b/src/operators/tensor/math/where.cairo @@ -1,7 +1,3 @@ -use core::array::ArrayTrait; -use core::option::OptionTrait; -use core::array::SpanTrait; - use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index}; use orion::operators::tensor::helpers::{ @@ -21,12 +17,12 @@ fn where< ) -> Tensor { let xy_shape = broadcast_shape(*x.shape, *y.shape); let broadcasted_shape = broadcast_shape(*self.shape, xy_shape); - let mut result: Array = ArrayTrait::new(); + let mut result: Array = array![]; let num_elements = len_from_shape(broadcasted_shape); let mut n: usize = 0; - loop { + while n != num_elements { let indices_broadcasted = unravel_index(n, broadcasted_shape); let indices_cond = broadcast_index_mapping(*self.shape, indices_broadcasted); @@ -39,10 +35,7 @@ fn where< result.append(res); n += 1; - if n == num_elements { - break (); - }; }; - return TensorTrait::new(broadcasted_shape, result.span()); + TensorTrait::new(broadcasted_shape, result.span()) } diff --git a/src/operators/tensor/math/xor.cairo b/src/operators/tensor/math/xor.cairo index 467fd1bfd..7ed06eba5 100644 --- a/src/operators/tensor/math/xor.cairo +++ b/src/operators/tensor/math/xor.cairo @@ -1,7 +1,3 @@ -use core::array::ArrayTrait; -use core::option::OptionTrait; -use core::array::SpanTrait; - use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index}; use orion::operators::tensor::helpers::{ @@ -20,12 +16,12 @@ fn xor< y: @Tensor, z: @Tensor ) -> Tensor { let broadcasted_shape = broadcast_shape(*y.shape, *z.shape); - let mut result: Array = ArrayTrait::new(); + let mut result: Array = array![]; let num_elements = len_from_shape(broadcasted_shape); let mut n: usize = 0; - loop { + while n != num_elements { let indices_broadcasted = unravel_index(n, broadcasted_shape); let indices_self = broadcast_index_mapping(*y.shape, indices_broadcasted); @@ -38,10 +34,7 @@ fn xor< } n += 1; - if n == num_elements { - break (); - }; }; - return TensorTrait::new(broadcasted_shape, result.span()); + TensorTrait::new(broadcasted_shape, result.span()) } diff --git a/src/operators/tensor/ml.cairo b/src/operators/tensor/ml.cairo index f47deeffd..54cb2aaa8 100644 --- a/src/operators/tensor/ml.cairo +++ b/src/operators/tensor/ml.cairo @@ -1 +1,2 @@ mod array_feature_extractor; +mod label_encoder; diff --git a/src/operators/tensor/ml/array_feature_extractor.cairo b/src/operators/tensor/ml/array_feature_extractor.cairo index 8605c00ab..efe8f099e 100644 --- a/src/operators/tensor/ml/array_feature_extractor.cairo +++ b/src/operators/tensor/ml/array_feature_extractor.cairo @@ -1,7 +1,3 @@ -use core::array::ArrayTrait; -use core::option::OptionTrait; -use core::array::SpanTrait; - use orion::operators::tensor::core::{Tensor, TensorTrait}; use orion::numbers::NumberTrait; @@ -21,14 +17,14 @@ fn array_feature_extractor< let output_data = calculate_output_data::(self, indices, total_elements); - return TensorTrait::new(output_shape.span(), output_data.span()); + TensorTrait::new(output_shape.span(), output_data.span()) } fn process_1D_tensor, impl TCopy: Copy, impl TDrop: Drop>( self: Tensor, indices: Tensor ) -> Tensor { - let mut output_data = ArrayTrait::::new(); + let mut output_data: Array = array![]; let mut indices_values: Span = indices.data; let self_len = *self.shape.at(0); @@ -43,7 +39,7 @@ fn process_1D_tensor, impl TCopy: Copy, impl }; }; - return TensorTrait::new(indices.shape, output_data.span()); + TensorTrait::new(indices.shape, output_data.span()) } @@ -53,7 +49,7 @@ fn calculate_output_shape< input_shape: Span, indices: Tensor ) -> (Array, usize) { let mut total_elements: usize = 1; - let mut output_shape: Array = ArrayTrait::new(); + let mut output_shape: Array = array![]; let mut input_shape_copy = input_shape; let mut input_shape_counter: usize = 0; @@ -75,7 +71,7 @@ fn calculate_output_shape< output_shape.append(indices.data.len()); - return (output_shape, total_elements); + (output_shape, total_elements) } @@ -84,18 +80,14 @@ fn calculate_output_data, impl TCopy: Copy, i ) -> Array { let last_tensor_axis: usize = *self.shape.at(self.shape.len() - 1); - let mut output_data = ArrayTrait::::new(); + let mut output_data: Array = array![]; let strides: Span = TensorTrait::stride(@self); let mut element_counter: usize = 0; let mut stride_l2 = *strides.at(strides.len() - 2); let mut stride_l1 = *strides.at(strides.len() - 1); - loop { - if element_counter > total_elements - 1 { - break; - } - + while element_counter != total_elements { let mut base_index = if strides.len() > 1 { element_counter * stride_l2 } else { @@ -119,5 +111,5 @@ fn calculate_output_data, impl TCopy: Copy, i element_counter += 1; }; - return output_data; + output_data } diff --git a/src/operators/tensor/ml/label_encoder.cairo b/src/operators/tensor/ml/label_encoder.cairo new file mode 100644 index 000000000..277c687fe --- /dev/null +++ b/src/operators/tensor/ml/label_encoder.cairo @@ -0,0 +1,98 @@ +use core::array::ArrayTrait; +use core::option::OptionTrait; +use core::array::SpanTrait; + +use orion::operators::tensor::core::{Tensor, TensorTrait}; +use orion::numbers::NumberTrait; + +use core::dict::Felt252DictTrait; +use core::nullable::{nullable_from_box, match_nullable, FromNullableResult}; +use core::debug::PrintTrait; + +use core::traits::Into; +use core::traits::TryInto; +/// Cf: TensorTrait::label_encoder docstring +fn label_encoder< + T, +Drop, +Copy, +AddEq, +TensorTrait, +PartialOrd, +Into, +>( + // self: @Tensor, default: T, keys: Array, values: Array + self: @Tensor, + default_list: Option>, + default_tensor: Option>, + keys: Option>, + keys_tensor: Option>, + values: Option>, + values_tensor: Option>, +) -> Tensor { + let mut default = match default_list { + Option::Some(value) => value, + Option::None => { + match default_tensor { + Option::Some(value) => value.data, + Option::None => { core::panic_with_felt252('None') }, + } + } + }; + + let default = match default.pop_front() { + Option::Some(value) => *value, + Option::None => { core::panic_with_felt252('None') } + }; + + let mut keys = match keys { + Option::Some(value) => { value }, + Option::None => { + match keys_tensor { + Option::Some(value) => { value.data }, + Option::None => { core::panic_with_felt252('None') }, + } + } + }; + + let mut values = match values { + Option::Some(value) => { value }, + Option::None => { + match values_tensor { + Option::Some(value) => { value.data }, + Option::None => { core::panic_with_felt252('None') }, + } + } + }; + + assert(keys.len() == values.len(), 'keys must be eq to values'); + let mut key_value_dict: Felt252Dict> = Default::default(); + let mut output_data = ArrayTrait::::new(); + + loop { + let key = match keys.pop_front() { + Option::Some(key) => key, + Option::None => { break; } + }; + let value = match values.pop_front() { + Option::Some(value) => value, + Option::None => { break; } + }; + + key_value_dict.insert((*key).into(), nullable_from_box(BoxTrait::new(*value))); + }; + + let mut data = *self.data; + loop { + match data.pop_front() { + Option::Some(val) => { + let value = *val; + let res = key_value_dict.get(value.into()); + + let mut span = match match_nullable(res) { + FromNullableResult::Null => default, + FromNullableResult::NotNull(res) => res.unbox(), + }; + output_data.append(span); + }, + Option::None => { break; } + }; + }; + + let mut output_tensor = TensorTrait::::new(*self.shape, output_data.span()); + return output_tensor; +} diff --git a/src/operators/tensor/quantization/dequantize_linear.cairo b/src/operators/tensor/quantization/dequantize_linear.cairo index b17c4a2d3..d9fef16a7 100644 --- a/src/operators/tensor/quantization/dequantize_linear.cairo +++ b/src/operators/tensor/quantization/dequantize_linear.cairo @@ -1,8 +1,3 @@ -use core::traits::Into; -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; - use orion::operators::tensor::core::{Tensor, TensorTrait}; use orion::operators::tensor::helpers::check_compatibility; use orion::utils::saturate; @@ -31,6 +26,7 @@ fn dequantize_linear< check_compatibility(*x.shape, *x_scale.shape); check_compatibility(*x.shape, *x_zero_point.shape); check_compatibility(*x_scale.shape, *x_zero_point.shape); + dequantize_per_axis(@(*x).into(), x_scale, x_zero_point) } } @@ -45,7 +41,6 @@ fn dequantize_per_axis< >( x: @Tensor, x_scale: @Tensor, x_zero_point: @Tensor ) -> Tensor:: { - (*x - *x_zero_point) * *x_scale } @@ -63,7 +58,7 @@ fn dequantize_element_wise< >( mut x: Tensor::, x_scale: T, x_zero_point: T ) -> Tensor:: { - let mut result_data = ArrayTrait::::new(); + let mut result_data: Array = array![]; loop { match x.data.pop_front() { @@ -75,7 +70,7 @@ fn dequantize_element_wise< }; }; - return TensorTrait::new(x.shape, result_data.span()); + TensorTrait::new(x.shape, result_data.span()) } fn dequantize< diff --git a/src/operators/tensor/quantization/dynamic_quantize_linear.cairo b/src/operators/tensor/quantization/dynamic_quantize_linear.cairo index 085132e92..db8be469d 100644 --- a/src/operators/tensor/quantization/dynamic_quantize_linear.cairo +++ b/src/operators/tensor/quantization/dynamic_quantize_linear.cairo @@ -1,7 +1,3 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; -use core::debug::PrintTrait; use orion::numbers::NumberTrait; use orion::operators::tensor::quantization::dequantize_linear::dequantize_linear; use orion::operators::tensor::quantization::quantize_linear::quantize_linear; @@ -42,37 +38,36 @@ fn dynamic_quantize_linear< let y_scale_value: T = (x_max - x_min) / (max - min); if x_max == x_min { y_scale_values.append(one); - }else{ + } else { y_scale_values.append(y_scale_value); } - - - let mut y_scale_tensor_shape = ArrayTrait::new(); + + let mut y_scale_tensor_shape: Array = array![]; y_scale_tensor_shape.append(y_scale_values.len()); - let y_scale = TensorTrait::::new( - shape: y_scale_tensor_shape.span(), data: y_scale_values.span(), - ); + let y_scale = TensorTrait::< + T + >::new(shape: y_scale_tensor_shape.span(), data: y_scale_values.span(),); // intermediate_zero_point = qmin - min(x)/y_scale let intermediate_zero_point: T = min - x_min / y_scale_value; // y_zero_point = cast(round(saturate(itermediate_zero_point))) let mut y_zero_point_value: T = saturate(min, max, intermediate_zero_point); - let mut y_zero_point_values = ArrayTrait::new(); + let mut y_zero_point_values: Array = array![]; y_zero_point_values.append(y_zero_point_value); - let mut y_zero_point_tensor_shape = ArrayTrait::new(); + let mut y_zero_point_tensor_shape: Array = array![]; y_zero_point_tensor_shape.append(y_zero_point_values.len()); - let mut y_zero_point_values = ArrayTrait::new(); + let mut y_zero_point_values: Array = array![]; y_zero_point_values.append(y_zero_point_value); - let mut y_zero_point = TensorTrait::::new( - shape: y_zero_point_tensor_shape.span(), data: y_zero_point_values.span(), - ); + let mut y_zero_point = TensorTrait::< + T + >::new(shape: y_zero_point_tensor_shape.span(), data: y_zero_point_values.span(),); // y_zero_point = y_zero_point.round(); // tensor only supported! // y = saturate (round (x / y_scale) + y_zero_point) - return (quantize_linear(x, @y_scale, @y_zero_point, min, max), y_scale, y_zero_point); -} \ No newline at end of file + (quantize_linear(x, @y_scale, @y_zero_point, min, max), y_scale, y_zero_point) +} diff --git a/src/operators/tensor/quantization/qlinear_add.cairo b/src/operators/tensor/quantization/qlinear_add.cairo index 3c69564c2..20673c1c7 100644 --- a/src/operators/tensor/quantization/qlinear_add.cairo +++ b/src/operators/tensor/quantization/qlinear_add.cairo @@ -1,13 +1,8 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; - use orion::numbers::{NumberTrait}; use orion::operators::tensor::quantization::dequantize_linear::dequantize_linear; use orion::operators::tensor::quantization::quantize_linear::quantize_linear; use orion::operators::tensor::{TensorTrait, Tensor}; - fn qlinear_add< T, MAG, @@ -49,5 +44,5 @@ fn qlinear_add< let mut x = (dequantized_a + dequantized_b).into(); - return quantize_linear(@x, y_scale, y_zero_point, min, max); + quantize_linear(@x, y_scale, y_zero_point, min, max) } diff --git a/src/operators/tensor/quantization/qlinear_concat.cairo b/src/operators/tensor/quantization/qlinear_concat.cairo index 7d6280202..43e11bbdb 100644 --- a/src/operators/tensor/quantization/qlinear_concat.cairo +++ b/src/operators/tensor/quantization/qlinear_concat.cairo @@ -1,7 +1,3 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; - use orion::numbers::{NumberTrait}; use orion::operators::tensor::quantization::dequantize_linear::dequantize_linear; use orion::operators::tensor::quantization::quantize_linear::quantize_linear; @@ -50,7 +46,7 @@ fn qlinear_concat< //let mut x = TensorTrait::concat(tensors: array![dequantized_a, dequantized_b].span(), axis: axis); let mut x = concat_dequantize(tensors, scales, zero_points, axis, min, max); - return quantize_linear(@x, y_scale, y_zero_point, min, max); + quantize_linear(@x, y_scale, y_zero_point, min, max) } @@ -125,7 +121,7 @@ fn dequantize_tensors< min: T, max: T ) -> Span> { - let mut array = ArrayTrait::>::new(); + let mut array: Array> = array![]; let mut i = 0; loop { match tensors.pop_front() { @@ -135,9 +131,11 @@ fn dequantize_tensors< }, Option::None => { break; } }; + i += 1; }; - return array.span(); + + array.span() } /// # tensor.concat /// diff --git a/src/operators/tensor/quantization/qlinear_leakyrelu.cairo b/src/operators/tensor/quantization/qlinear_leakyrelu.cairo index 4fc0db823..bfa9346ff 100644 --- a/src/operators/tensor/quantization/qlinear_leakyrelu.cairo +++ b/src/operators/tensor/quantization/qlinear_leakyrelu.cairo @@ -1,7 +1,3 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; - use orion::numbers::{NumberTrait}; use orion::operators::tensor::quantization::dequantize_linear::dequantize_linear; use orion::operators::tensor::quantization::quantize_linear::quantize_linear; @@ -36,7 +32,7 @@ fn qlinear_leakyrelu< ) -> Tensor { let mut dequantized_a = dequantize_linear(@(*a), a_scale, a_zero_point); - let mut result_data = ArrayTrait::::new(); + let mut result_data: Array = array![]; loop { match dequantized_a.data.pop_front() { Option::Some(elem) => { @@ -50,7 +46,7 @@ fn qlinear_leakyrelu< }; }; - return quantize_linear( + quantize_linear( @TensorTrait::new(dequantized_a.shape, result_data.span()), a_scale, a_zero_point, min, max - ); + ) } diff --git a/src/operators/tensor/quantization/qlinear_matmul.cairo b/src/operators/tensor/quantization/qlinear_matmul.cairo index 03e542945..325f4fd30 100644 --- a/src/operators/tensor/quantization/qlinear_matmul.cairo +++ b/src/operators/tensor/quantization/qlinear_matmul.cairo @@ -1,13 +1,8 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; - use orion::numbers::{NumberTrait}; use orion::operators::tensor::quantization::dequantize_linear::dequantize_linear; use orion::operators::tensor::quantization::quantize_linear::quantize_linear; use orion::operators::tensor::{TensorTrait, Tensor}; - /// Cf: TensorTrait::qlinear_matmul docstring fn qlinear_matmul< T, @@ -65,8 +60,8 @@ fn qlinear_matmul< assert(a_ndim == b_ndim, 'dim missmatch'); let mut dequantized_a = dequantize_linear(@(*a), a_scale, a_zero_point); let mut dequantized_b = dequantize_linear(@(*b), b_scale, b_zero_point); - let mut x_shape = ArrayTrait::::new(); - let mut x_data = ArrayTrait::::new(); + let mut x_shape: Array = array![]; + let mut x_data: Array = array![]; assert(a_shape[a_ndim - 1] == b_shape[b_ndim - 2], 'incompatible dim for matmul'); @@ -74,20 +69,16 @@ fn qlinear_matmul< let k = *a_shape[a_ndim - 1]; let n = *b_shape[b_ndim - 1]; - let mut a_shape_reduced = ArrayTrait::::new(); + let mut a_shape_reduced: Array = array![]; a_shape_reduced.append(m); a_shape_reduced.append(k); - let mut b_shape_reduced = ArrayTrait::::new(); + let mut b_shape_reduced: Array = array![]; b_shape_reduced.append(k); b_shape_reduced.append(n); let mut i = 0; - loop { - if i == stride(a_shape) / (m * k) { - break; - }; - + while i != stride(a_shape) / (m * k) { result_updates( @subtensor(@dequantized_a, i * (m * k), a_shape_reduced.span()), @subtensor(@dequantized_b, i * (k * n), b_shape_reduced.span()), @@ -95,21 +86,21 @@ fn qlinear_matmul< ); i += 1; }; + x_shape(ref x_shape, a_shape, m, n); let x = TensorTrait::new(x_shape.span(), x_data.span()); - return quantize_linear(@x, y_scale, y_zero_point, min, max); + + quantize_linear(@x, y_scale, y_zero_point, min, max) } fn x_shape(ref x_data: Array, mut shape: Span, m: usize, n: usize) { - loop { - if shape.len() == 2 { - break; - } + while shape.len() != 2 { match shape.pop_front() { Option::Some(elem) => { x_data.append(*elem); }, Option::None => { break; } }; }; + x_data.append(m); x_data.append(n); } @@ -125,7 +116,8 @@ fn stride(mut shape: Span) -> usize { Option::None => { break; } }; }; - return accumulated; + + accumulated } fn subtensor, impl TCopy: Copy, impl TDrop: Drop>( @@ -135,14 +127,12 @@ fn subtensor, impl TCopy: Copy, impl TDrop: D let mut stride = stride(shape); let mut i = 0; - loop { - if i == stride { - break; - } + while i != stride { data.append(*x.data[start + i]); i += 1; }; - return TensorTrait::new(shape, data.span()); + + TensorTrait::new(shape, data.span()) } @@ -165,29 +155,17 @@ fn result_updates< let mat1 = *mat1.data; let mat2 = *mat2.data; - let mut result_shape = ArrayTrait::new(); + let mut result_shape: Array = array![]; result_shape.append(m); result_shape.append(p); let mut i = 0_usize; - loop { - if i == m { - break (); - } - + while i != m { let mut j = 0_usize; - loop { - if j == p { - break (); - } - + while j != p { let mut sum: T = NumberTrait::zero(); let mut k = 0_usize; - loop { - if k == n { - break (); - } - + while k != n { let mat1_index = i * n + k; let mat2_index = k * p + j; sum += *mat1[mat1_index] * *mat2[mat2_index]; diff --git a/src/operators/tensor/quantization/qlinear_mul.cairo b/src/operators/tensor/quantization/qlinear_mul.cairo index 4c952c3f9..4a243b1d5 100644 --- a/src/operators/tensor/quantization/qlinear_mul.cairo +++ b/src/operators/tensor/quantization/qlinear_mul.cairo @@ -1,7 +1,3 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; - use orion::numbers::{NumberTrait}; use orion::operators::tensor::quantization::dequantize_linear::dequantize_linear; use orion::operators::tensor::quantization::quantize_linear::quantize_linear; @@ -49,6 +45,6 @@ fn qlinear_mul< let mut x = (dequantized_a * dequantized_b).into(); - return quantize_linear(@x, y_scale, y_zero_point, min, max); + quantize_linear(@x, y_scale, y_zero_point, min, max) } diff --git a/src/operators/tensor/quantization/quantize_linear.cairo b/src/operators/tensor/quantization/quantize_linear.cairo index 90633516a..58466e563 100644 --- a/src/operators/tensor/quantization/quantize_linear.cairo +++ b/src/operators/tensor/quantization/quantize_linear.cairo @@ -1,9 +1,3 @@ -use core::debug::PrintTrait; -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; -use core::traits::TryInto; - use orion::operators::tensor::core::{Tensor, TensorTrait}; use orion::operators::tensor::helpers::check_compatibility; use orion::operators::tensor::math::arithmetic::saturated_add; @@ -33,6 +27,7 @@ fn quantize_linear< check_compatibility(*x.shape, *y_scale.shape); check_compatibility(*x.shape, *y_zero_point.shape); check_compatibility(*y_scale.shape, *y_zero_point.shape); + quantize_per_axis(x, y_scale, y_zero_point, min, max) } } @@ -70,7 +65,7 @@ fn quantize_element_wise< >( mut x: Tensor::, y_scale: T, y_zero_point: T, min: T, max: T ) -> Tensor:: { - let mut result_data = ArrayTrait::::new(); + let mut result_data: Array = array![]; loop { match x.data.pop_front() { @@ -82,7 +77,7 @@ fn quantize_element_wise< }; }; - return TensorTrait::new(x.shape, result_data.span()); + TensorTrait::new(x.shape, result_data.span()) } fn quantize< diff --git a/src/test_helper/tensor/fixed_point/fp16x16.cairo b/src/test_helper/tensor/fixed_point/fp16x16.cairo index 096edb4f8..f82f0972a 100644 --- a/src/test_helper/tensor/fixed_point/fp16x16.cairo +++ b/src/test_helper/tensor/fixed_point/fp16x16.cairo @@ -1,5 +1,3 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; use orion::numbers::fixed_point::core::{FixedTrait}; use orion::numbers::fixed_point::implementations::fp16x16::core::FP16x16; use orion::operators::tensor::implementations::tensor_fp16x16::FP16x16Tensor; @@ -7,381 +5,341 @@ use orion::operators::tensor::{TensorTrait, Tensor}; // 1D fn fp_tensor_1x3_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, false)); - data.append(FixedTrait::new_unscaled(2, false)); + let mut sizes: Array = array![3]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, false), + FixedTrait::new_unscaled(2, false) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn fp_tensor_1x3_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, true)); - data.append(FixedTrait::new_unscaled(2, true)); + let mut sizes: Array = array![3]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, true), + FixedTrait::new_unscaled(2, true) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } // 2D - fn fp_tensor_2x2_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(2); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, false)); - data.append(FixedTrait::new_unscaled(2, false)); - data.append(FixedTrait::new_unscaled(3, false)); + let mut sizes: Array = array![2, 2]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, false), + FixedTrait::new_unscaled(2, false), + FixedTrait::new_unscaled(3, false) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn fp_tensor_2x2_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(2); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, true)); - data.append(FixedTrait::new_unscaled(2, true)); - data.append(FixedTrait::new_unscaled(3, true)); + let mut sizes: Array = array![2, 2]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, true), + FixedTrait::new_unscaled(2, true), + FixedTrait::new_unscaled(3, true) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn fp_tensor_3x3_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(3); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, false)); - data.append(FixedTrait::new_unscaled(2, false)); - data.append(FixedTrait::new_unscaled(3, false)); - data.append(FixedTrait::new_unscaled(4, false)); - data.append(FixedTrait::new_unscaled(5, false)); - data.append(FixedTrait::new_unscaled(6, false)); - data.append(FixedTrait::new_unscaled(7, false)); - data.append(FixedTrait::new_unscaled(8, false)); + let mut sizes: Array = array![3, 3]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, false), + FixedTrait::new_unscaled(2, false), + FixedTrait::new_unscaled(3, false), + FixedTrait::new_unscaled(4, false), + FixedTrait::new_unscaled(5, false), + FixedTrait::new_unscaled(6, false), + FixedTrait::new_unscaled(7, false), + FixedTrait::new_unscaled(8, false) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn fp_tensor_3x3_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(3); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, true)); - data.append(FixedTrait::new_unscaled(2, true)); - data.append(FixedTrait::new_unscaled(3, true)); - data.append(FixedTrait::new_unscaled(4, true)); - data.append(FixedTrait::new_unscaled(5, true)); - data.append(FixedTrait::new_unscaled(6, true)); - data.append(FixedTrait::new_unscaled(7, true)); - data.append(FixedTrait::new_unscaled(8, true)); + let mut sizes: Array = array![3, 3]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, true), + FixedTrait::new_unscaled(2, true), + FixedTrait::new_unscaled(3, true), + FixedTrait::new_unscaled(4, true), + FixedTrait::new_unscaled(5, true), + FixedTrait::new_unscaled(6, true), + FixedTrait::new_unscaled(7, true), + FixedTrait::new_unscaled(8, true) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn fp_tensor_3x2_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, false)); - data.append(FixedTrait::new_unscaled(2, false)); - data.append(FixedTrait::new_unscaled(3, false)); - data.append(FixedTrait::new_unscaled(4, false)); - data.append(FixedTrait::new_unscaled(5, false)); + let mut sizes: Array = array![3, 2]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, false), + FixedTrait::new_unscaled(2, false), + FixedTrait::new_unscaled(3, false), + FixedTrait::new_unscaled(4, false), + FixedTrait::new_unscaled(5, false) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn fp_tensor_3x2_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, true)); - data.append(FixedTrait::new_unscaled(2, true)); - data.append(FixedTrait::new_unscaled(3, true)); - data.append(FixedTrait::new_unscaled(4, true)); - data.append(FixedTrait::new_unscaled(5, true)); + let mut sizes: Array = array![3, 2]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, true), + FixedTrait::new_unscaled(2, true), + FixedTrait::new_unscaled(3, true), + FixedTrait::new_unscaled(4, true), + FixedTrait::new_unscaled(5, true) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn fp_tensor_3x1_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(1); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, false)); - data.append(FixedTrait::new_unscaled(2, false)); + let mut sizes: Array = array![3, 1]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, false), + FixedTrait::new_unscaled(2, false) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn fp_tensor_3x1_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(1); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, true)); - data.append(FixedTrait::new_unscaled(2, true)); + let mut sizes: Array = array![3, 1]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, true), + FixedTrait::new_unscaled(2, true) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn fp_tensor_2x3_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(2); - sizes.append(3); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, false)); - data.append(FixedTrait::new_unscaled(2, false)); - data.append(FixedTrait::new_unscaled(3, false)); - data.append(FixedTrait::new_unscaled(4, false)); - data.append(FixedTrait::new_unscaled(5, false)); + let mut sizes: Array = array![2, 3]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, false), + FixedTrait::new_unscaled(2, false), + FixedTrait::new_unscaled(3, false), + FixedTrait::new_unscaled(4, false), + FixedTrait::new_unscaled(5, false) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn fp_tensor_2x3_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(2); - sizes.append(3); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, true)); - data.append(FixedTrait::new_unscaled(2, true)); - data.append(FixedTrait::new_unscaled(3, true)); - data.append(FixedTrait::new_unscaled(4, true)); - data.append(FixedTrait::new_unscaled(5, true)); + let mut sizes: Array = array![2, 3]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, true), + FixedTrait::new_unscaled(2, true), + FixedTrait::new_unscaled(3, true), + FixedTrait::new_unscaled(4, true), + FixedTrait::new_unscaled(5, true) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } // 3D - fn fp_tensor_2x2x2_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(2); - sizes.append(2); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, false)); - data.append(FixedTrait::new_unscaled(2, false)); - data.append(FixedTrait::new_unscaled(3, false)); - data.append(FixedTrait::new_unscaled(4, false)); - data.append(FixedTrait::new_unscaled(5, false)); - data.append(FixedTrait::new_unscaled(6, false)); - data.append(FixedTrait::new_unscaled(7, false)); + let mut sizes: Array = array![2, 2, 2]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, false), + FixedTrait::new_unscaled(2, false), + FixedTrait::new_unscaled(3, false), + FixedTrait::new_unscaled(4, false), + FixedTrait::new_unscaled(5, false), + FixedTrait::new_unscaled(6, false), + FixedTrait::new_unscaled(7, false) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn fp_tensor_2x2x2_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(2); - sizes.append(2); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, true)); - data.append(FixedTrait::new_unscaled(2, true)); - data.append(FixedTrait::new_unscaled(3, true)); - data.append(FixedTrait::new_unscaled(4, true)); - data.append(FixedTrait::new_unscaled(5, true)); - data.append(FixedTrait::new_unscaled(6, true)); - data.append(FixedTrait::new_unscaled(7, true)); + let mut sizes: Array = array![2, 2, 2]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, true), + FixedTrait::new_unscaled(2, true), + FixedTrait::new_unscaled(3, true), + FixedTrait::new_unscaled(4, true), + FixedTrait::new_unscaled(5, true), + FixedTrait::new_unscaled(6, true), + FixedTrait::new_unscaled(7, true) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn fp_tensor_3x2x2_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(2); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, false)); - data.append(FixedTrait::new_unscaled(2, false)); - data.append(FixedTrait::new_unscaled(3, false)); - data.append(FixedTrait::new_unscaled(4, false)); - data.append(FixedTrait::new_unscaled(5, false)); - data.append(FixedTrait::new_unscaled(6, false)); - data.append(FixedTrait::new_unscaled(7, false)); - data.append(FixedTrait::new_unscaled(8, false)); - data.append(FixedTrait::new_unscaled(9, false)); - data.append(FixedTrait::new_unscaled(10, false)); - data.append(FixedTrait::new_unscaled(11, false)); + let mut sizes: Array = array![3, 2, 2]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, false), + FixedTrait::new_unscaled(2, false), + FixedTrait::new_unscaled(3, false), + FixedTrait::new_unscaled(4, false), + FixedTrait::new_unscaled(5, false), + FixedTrait::new_unscaled(6, false), + FixedTrait::new_unscaled(7, false), + FixedTrait::new_unscaled(8, false), + FixedTrait::new_unscaled(9, false), + FixedTrait::new_unscaled(10, false), + FixedTrait::new_unscaled(11, false) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn fp_tensor_3x2x2_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(2); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, true)); - data.append(FixedTrait::new_unscaled(2, true)); - data.append(FixedTrait::new_unscaled(3, true)); - data.append(FixedTrait::new_unscaled(4, true)); - data.append(FixedTrait::new_unscaled(5, true)); - data.append(FixedTrait::new_unscaled(6, true)); - data.append(FixedTrait::new_unscaled(7, true)); - data.append(FixedTrait::new_unscaled(8, true)); - data.append(FixedTrait::new_unscaled(9, true)); - data.append(FixedTrait::new_unscaled(10, true)); - data.append(FixedTrait::new_unscaled(11, true)); + let mut sizes: Array = array![3, 2, 2]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, true), + FixedTrait::new_unscaled(2, true), + FixedTrait::new_unscaled(3, true), + FixedTrait::new_unscaled(4, true), + FixedTrait::new_unscaled(5, true), + FixedTrait::new_unscaled(6, true), + FixedTrait::new_unscaled(7, true), + FixedTrait::new_unscaled(8, true), + FixedTrait::new_unscaled(9, true), + FixedTrait::new_unscaled(10, true), + FixedTrait::new_unscaled(11, true) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } + fn fp_tensor_3x3x3_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(3); - sizes.append(3); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, false)); - data.append(FixedTrait::new_unscaled(2, false)); - data.append(FixedTrait::new_unscaled(3, false)); - data.append(FixedTrait::new_unscaled(4, false)); - data.append(FixedTrait::new_unscaled(5, false)); - data.append(FixedTrait::new_unscaled(6, false)); - data.append(FixedTrait::new_unscaled(7, false)); - data.append(FixedTrait::new_unscaled(8, false)); - data.append(FixedTrait::new_unscaled(9, false)); - data.append(FixedTrait::new_unscaled(10, false)); - data.append(FixedTrait::new_unscaled(11, false)); - data.append(FixedTrait::new_unscaled(12, false)); - data.append(FixedTrait::new_unscaled(13, false)); - data.append(FixedTrait::new_unscaled(14, false)); - data.append(FixedTrait::new_unscaled(15, false)); - data.append(FixedTrait::new_unscaled(16, false)); - data.append(FixedTrait::new_unscaled(17, false)); - data.append(FixedTrait::new_unscaled(18, false)); - data.append(FixedTrait::new_unscaled(19, false)); - data.append(FixedTrait::new_unscaled(20, false)); - data.append(FixedTrait::new_unscaled(21, false)); - data.append(FixedTrait::new_unscaled(22, false)); - data.append(FixedTrait::new_unscaled(23, false)); - data.append(FixedTrait::new_unscaled(24, false)); - data.append(FixedTrait::new_unscaled(25, false)); - data.append(FixedTrait::new_unscaled(26, false)); + let mut sizes: Array = array![3, 3, 3]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, false), + FixedTrait::new_unscaled(2, false), + FixedTrait::new_unscaled(3, false), + FixedTrait::new_unscaled(4, false), + FixedTrait::new_unscaled(5, false), + FixedTrait::new_unscaled(6, false), + FixedTrait::new_unscaled(7, false), + FixedTrait::new_unscaled(8, false), + FixedTrait::new_unscaled(9, false), + FixedTrait::new_unscaled(10, false), + FixedTrait::new_unscaled(11, false), + FixedTrait::new_unscaled(12, false), + FixedTrait::new_unscaled(13, false), + FixedTrait::new_unscaled(14, false), + FixedTrait::new_unscaled(15, false), + FixedTrait::new_unscaled(16, false), + FixedTrait::new_unscaled(17, false), + FixedTrait::new_unscaled(18, false), + FixedTrait::new_unscaled(19, false), + FixedTrait::new_unscaled(20, false), + FixedTrait::new_unscaled(21, false), + FixedTrait::new_unscaled(22, false), + FixedTrait::new_unscaled(23, false), + FixedTrait::new_unscaled(24, false), + FixedTrait::new_unscaled(25, false), + FixedTrait::new_unscaled(26, false) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn fp_tensor_3x3x3_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(3); - sizes.append(3); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, true)); - data.append(FixedTrait::new_unscaled(2, true)); - data.append(FixedTrait::new_unscaled(3, true)); - data.append(FixedTrait::new_unscaled(4, true)); - data.append(FixedTrait::new_unscaled(5, true)); - data.append(FixedTrait::new_unscaled(6, true)); - data.append(FixedTrait::new_unscaled(7, true)); - data.append(FixedTrait::new_unscaled(8, true)); - data.append(FixedTrait::new_unscaled(9, true)); - data.append(FixedTrait::new_unscaled(10, true)); - data.append(FixedTrait::new_unscaled(11, true)); - data.append(FixedTrait::new_unscaled(12, true)); - data.append(FixedTrait::new_unscaled(13, true)); - data.append(FixedTrait::new_unscaled(14, true)); - data.append(FixedTrait::new_unscaled(15, true)); - data.append(FixedTrait::new_unscaled(16, true)); - data.append(FixedTrait::new_unscaled(17, true)); - data.append(FixedTrait::new_unscaled(18, true)); - data.append(FixedTrait::new_unscaled(19, true)); - data.append(FixedTrait::new_unscaled(20, true)); - data.append(FixedTrait::new_unscaled(21, true)); - data.append(FixedTrait::new_unscaled(22, true)); - data.append(FixedTrait::new_unscaled(23, true)); - data.append(FixedTrait::new_unscaled(24, true)); - data.append(FixedTrait::new_unscaled(25, true)); - data.append(FixedTrait::new_unscaled(26, true)); + let mut sizes: Array = array![3, 3, 3]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, true), + FixedTrait::new_unscaled(2, true), + FixedTrait::new_unscaled(3, true), + FixedTrait::new_unscaled(4, true), + FixedTrait::new_unscaled(5, true), + FixedTrait::new_unscaled(6, true), + FixedTrait::new_unscaled(7, true), + FixedTrait::new_unscaled(8, true), + FixedTrait::new_unscaled(9, true), + FixedTrait::new_unscaled(10, true), + FixedTrait::new_unscaled(11, true), + FixedTrait::new_unscaled(12, true), + FixedTrait::new_unscaled(13, true), + FixedTrait::new_unscaled(14, true), + FixedTrait::new_unscaled(15, true), + FixedTrait::new_unscaled(16, true), + FixedTrait::new_unscaled(17, true), + FixedTrait::new_unscaled(18, true), + FixedTrait::new_unscaled(19, true), + FixedTrait::new_unscaled(20, true), + FixedTrait::new_unscaled(21, true), + FixedTrait::new_unscaled(22, true), + FixedTrait::new_unscaled(23, true), + FixedTrait::new_unscaled(24, true), + FixedTrait::new_unscaled(25, true), + FixedTrait::new_unscaled(26, true) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } + diff --git a/src/test_helper/tensor/fixed_point/fp8x23.cairo b/src/test_helper/tensor/fixed_point/fp8x23.cairo index eac13a89b..1746859e5 100644 --- a/src/test_helper/tensor/fixed_point/fp8x23.cairo +++ b/src/test_helper/tensor/fixed_point/fp8x23.cairo @@ -1,5 +1,3 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; use orion::numbers::fixed_point::core::{FixedTrait}; use orion::numbers::fixed_point::implementations::fp8x23::core::FP8x23; use orion::operators::tensor::implementations::tensor_fp8x23::FP8x23Tensor; @@ -7,383 +5,341 @@ use orion::operators::tensor::{TensorTrait, Tensor}; // 1D fn fp_tensor_1x3_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, false)); - data.append(FixedTrait::new_unscaled(2, false)); + let mut sizes: Array = array![3]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, false), + FixedTrait::new_unscaled(2, false) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn fp_tensor_1x3_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, true)); - data.append(FixedTrait::new_unscaled(2, true)); + let mut sizes: Array = array![3]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, true), + FixedTrait::new_unscaled(2, true) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } // 2D - fn fp_tensor_2x2_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(2); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, false)); - data.append(FixedTrait::new_unscaled(2, false)); - data.append(FixedTrait::new_unscaled(3, false)); + let mut sizes: Array = array![2, 2]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, false), + FixedTrait::new_unscaled(2, false), + FixedTrait::new_unscaled(3, false) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn fp_tensor_2x2_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(2); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, true)); - data.append(FixedTrait::new_unscaled(2, true)); - data.append(FixedTrait::new_unscaled(3, true)); + let mut sizes: Array = array![2, 2]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, true), + FixedTrait::new_unscaled(2, true), + FixedTrait::new_unscaled(3, true) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn fp_tensor_3x3_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(3); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, false)); - data.append(FixedTrait::new_unscaled(2, false)); - data.append(FixedTrait::new_unscaled(3, false)); - data.append(FixedTrait::new_unscaled(4, false)); - data.append(FixedTrait::new_unscaled(5, false)); - data.append(FixedTrait::new_unscaled(6, false)); - data.append(FixedTrait::new_unscaled(7, false)); - data.append(FixedTrait::new_unscaled(8, false)); + let mut sizes: Array = array![3, 3]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, false), + FixedTrait::new_unscaled(2, false), + FixedTrait::new_unscaled(3, false), + FixedTrait::new_unscaled(4, false), + FixedTrait::new_unscaled(5, false), + FixedTrait::new_unscaled(6, false), + FixedTrait::new_unscaled(7, false), + FixedTrait::new_unscaled(8, false) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn fp_tensor_3x3_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(3); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, true)); - data.append(FixedTrait::new_unscaled(2, true)); - data.append(FixedTrait::new_unscaled(3, true)); - data.append(FixedTrait::new_unscaled(4, true)); - data.append(FixedTrait::new_unscaled(5, true)); - data.append(FixedTrait::new_unscaled(6, true)); - data.append(FixedTrait::new_unscaled(7, true)); - data.append(FixedTrait::new_unscaled(8, true)); + let mut sizes: Array = array![3, 3]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, true), + FixedTrait::new_unscaled(2, true), + FixedTrait::new_unscaled(3, true), + FixedTrait::new_unscaled(4, true), + FixedTrait::new_unscaled(5, true), + FixedTrait::new_unscaled(6, true), + FixedTrait::new_unscaled(7, true), + FixedTrait::new_unscaled(8, true) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn fp_tensor_3x2_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, false)); - data.append(FixedTrait::new_unscaled(2, false)); - data.append(FixedTrait::new_unscaled(3, false)); - data.append(FixedTrait::new_unscaled(4, false)); - data.append(FixedTrait::new_unscaled(5, false)); + let mut sizes: Array = array![3, 2]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, false), + FixedTrait::new_unscaled(2, false), + FixedTrait::new_unscaled(3, false), + FixedTrait::new_unscaled(4, false), + FixedTrait::new_unscaled(5, false) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn fp_tensor_3x2_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, true)); - data.append(FixedTrait::new_unscaled(2, true)); - data.append(FixedTrait::new_unscaled(3, true)); - data.append(FixedTrait::new_unscaled(4, true)); - data.append(FixedTrait::new_unscaled(5, true)); + let mut sizes: Array = array![3, 2]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, true), + FixedTrait::new_unscaled(2, true), + FixedTrait::new_unscaled(3, true), + FixedTrait::new_unscaled(4, true), + FixedTrait::new_unscaled(5, true) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn fp_tensor_3x1_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(1); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, false)); - data.append(FixedTrait::new_unscaled(2, false)); + let mut sizes: Array = array![3, 1]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, false), + FixedTrait::new_unscaled(2, false) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn fp_tensor_3x1_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(1); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, true)); - data.append(FixedTrait::new_unscaled(2, true)); + let mut sizes: Array = array![3, 1]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, true), + FixedTrait::new_unscaled(2, true) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn fp_tensor_2x3_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(2); - sizes.append(3); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, false)); - data.append(FixedTrait::new_unscaled(2, false)); - data.append(FixedTrait::new_unscaled(3, false)); - data.append(FixedTrait::new_unscaled(4, false)); - data.append(FixedTrait::new_unscaled(5, false)); + let mut sizes: Array = array![2, 3]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, false), + FixedTrait::new_unscaled(2, false), + FixedTrait::new_unscaled(3, false), + FixedTrait::new_unscaled(4, false), + FixedTrait::new_unscaled(5, false) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn fp_tensor_2x3_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(2); - sizes.append(3); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, true)); - data.append(FixedTrait::new_unscaled(2, true)); - data.append(FixedTrait::new_unscaled(3, true)); - data.append(FixedTrait::new_unscaled(4, true)); - data.append(FixedTrait::new_unscaled(5, true)); + let mut sizes: Array = array![2, 3]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, true), + FixedTrait::new_unscaled(2, true), + FixedTrait::new_unscaled(3, true), + FixedTrait::new_unscaled(4, true), + FixedTrait::new_unscaled(5, true) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } // 3D - fn fp_tensor_2x2x2_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(2); - sizes.append(2); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, false)); - data.append(FixedTrait::new_unscaled(2, false)); - data.append(FixedTrait::new_unscaled(3, false)); - data.append(FixedTrait::new_unscaled(4, false)); - data.append(FixedTrait::new_unscaled(5, false)); - data.append(FixedTrait::new_unscaled(6, false)); - data.append(FixedTrait::new_unscaled(7, false)); + let mut sizes: Array = array![2, 2, 2]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, false), + FixedTrait::new_unscaled(2, false), + FixedTrait::new_unscaled(3, false), + FixedTrait::new_unscaled(4, false), + FixedTrait::new_unscaled(5, false), + FixedTrait::new_unscaled(6, false), + FixedTrait::new_unscaled(7, false) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn fp_tensor_2x2x2_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(2); - sizes.append(2); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, true)); - data.append(FixedTrait::new_unscaled(2, true)); - data.append(FixedTrait::new_unscaled(3, true)); - data.append(FixedTrait::new_unscaled(4, true)); - data.append(FixedTrait::new_unscaled(5, true)); - data.append(FixedTrait::new_unscaled(6, true)); - data.append(FixedTrait::new_unscaled(7, true)); + let mut sizes: Array = array![2, 2, 2]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, true), + FixedTrait::new_unscaled(2, true), + FixedTrait::new_unscaled(3, true), + FixedTrait::new_unscaled(4, true), + FixedTrait::new_unscaled(5, true), + FixedTrait::new_unscaled(6, true), + FixedTrait::new_unscaled(7, true) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn fp_tensor_3x2x2_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(2); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, false)); - data.append(FixedTrait::new_unscaled(2, false)); - data.append(FixedTrait::new_unscaled(3, false)); - data.append(FixedTrait::new_unscaled(4, false)); - data.append(FixedTrait::new_unscaled(5, false)); - data.append(FixedTrait::new_unscaled(6, false)); - data.append(FixedTrait::new_unscaled(7, false)); - data.append(FixedTrait::new_unscaled(8, false)); - data.append(FixedTrait::new_unscaled(9, false)); - data.append(FixedTrait::new_unscaled(10, false)); - data.append(FixedTrait::new_unscaled(11, false)); + let mut sizes: Array = array![3, 2, 2]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, false), + FixedTrait::new_unscaled(2, false), + FixedTrait::new_unscaled(3, false), + FixedTrait::new_unscaled(4, false), + FixedTrait::new_unscaled(5, false), + FixedTrait::new_unscaled(6, false), + FixedTrait::new_unscaled(7, false), + FixedTrait::new_unscaled(8, false), + FixedTrait::new_unscaled(9, false), + FixedTrait::new_unscaled(10, false), + FixedTrait::new_unscaled(11, false) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn fp_tensor_3x2x2_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(2); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, true)); - data.append(FixedTrait::new_unscaled(2, true)); - data.append(FixedTrait::new_unscaled(3, true)); - data.append(FixedTrait::new_unscaled(4, true)); - data.append(FixedTrait::new_unscaled(5, true)); - data.append(FixedTrait::new_unscaled(6, true)); - data.append(FixedTrait::new_unscaled(7, true)); - data.append(FixedTrait::new_unscaled(8, true)); - data.append(FixedTrait::new_unscaled(9, true)); - data.append(FixedTrait::new_unscaled(10, true)); - data.append(FixedTrait::new_unscaled(11, true)); + let mut sizes: Array = array![3, 2, 2]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, true), + FixedTrait::new_unscaled(2, true), + FixedTrait::new_unscaled(3, true), + FixedTrait::new_unscaled(4, true), + FixedTrait::new_unscaled(5, true), + FixedTrait::new_unscaled(6, true), + FixedTrait::new_unscaled(7, true), + FixedTrait::new_unscaled(8, true), + FixedTrait::new_unscaled(9, true), + FixedTrait::new_unscaled(10, true), + FixedTrait::new_unscaled(11, true) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn fp_tensor_3x3x3_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(3); - sizes.append(3); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, false)); - data.append(FixedTrait::new_unscaled(2, false)); - data.append(FixedTrait::new_unscaled(3, false)); - data.append(FixedTrait::new_unscaled(4, false)); - data.append(FixedTrait::new_unscaled(5, false)); - data.append(FixedTrait::new_unscaled(6, false)); - data.append(FixedTrait::new_unscaled(7, false)); - data.append(FixedTrait::new_unscaled(8, false)); - data.append(FixedTrait::new_unscaled(9, false)); - data.append(FixedTrait::new_unscaled(10, false)); - data.append(FixedTrait::new_unscaled(11, false)); - data.append(FixedTrait::new_unscaled(12, false)); - data.append(FixedTrait::new_unscaled(13, false)); - data.append(FixedTrait::new_unscaled(14, false)); - data.append(FixedTrait::new_unscaled(15, false)); - data.append(FixedTrait::new_unscaled(16, false)); - data.append(FixedTrait::new_unscaled(17, false)); - data.append(FixedTrait::new_unscaled(18, false)); - data.append(FixedTrait::new_unscaled(19, false)); - data.append(FixedTrait::new_unscaled(20, false)); - data.append(FixedTrait::new_unscaled(21, false)); - data.append(FixedTrait::new_unscaled(22, false)); - data.append(FixedTrait::new_unscaled(23, false)); - data.append(FixedTrait::new_unscaled(24, false)); - data.append(FixedTrait::new_unscaled(25, false)); - data.append(FixedTrait::new_unscaled(26, false)); + let mut sizes: Array = array![3, 3, 3]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, false), + FixedTrait::new_unscaled(2, false), + FixedTrait::new_unscaled(3, false), + FixedTrait::new_unscaled(4, false), + FixedTrait::new_unscaled(5, false), + FixedTrait::new_unscaled(6, false), + FixedTrait::new_unscaled(7, false), + FixedTrait::new_unscaled(8, false), + FixedTrait::new_unscaled(9, false), + FixedTrait::new_unscaled(10, false), + FixedTrait::new_unscaled(11, false), + FixedTrait::new_unscaled(12, false), + FixedTrait::new_unscaled(13, false), + FixedTrait::new_unscaled(14, false), + FixedTrait::new_unscaled(15, false), + FixedTrait::new_unscaled(16, false), + FixedTrait::new_unscaled(17, false), + FixedTrait::new_unscaled(18, false), + FixedTrait::new_unscaled(19, false), + FixedTrait::new_unscaled(20, false), + FixedTrait::new_unscaled(21, false), + FixedTrait::new_unscaled(22, false), + FixedTrait::new_unscaled(23, false), + FixedTrait::new_unscaled(24, false), + FixedTrait::new_unscaled(25, false), + FixedTrait::new_unscaled(26, false) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn fp_tensor_3x3x3_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(3); - sizes.append(3); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, true)); - data.append(FixedTrait::new_unscaled(2, true)); - data.append(FixedTrait::new_unscaled(3, true)); - data.append(FixedTrait::new_unscaled(4, true)); - data.append(FixedTrait::new_unscaled(5, true)); - data.append(FixedTrait::new_unscaled(6, true)); - data.append(FixedTrait::new_unscaled(7, true)); - data.append(FixedTrait::new_unscaled(8, true)); - data.append(FixedTrait::new_unscaled(9, true)); - data.append(FixedTrait::new_unscaled(10, true)); - data.append(FixedTrait::new_unscaled(11, true)); - data.append(FixedTrait::new_unscaled(12, true)); - data.append(FixedTrait::new_unscaled(13, true)); - data.append(FixedTrait::new_unscaled(14, true)); - data.append(FixedTrait::new_unscaled(15, true)); - data.append(FixedTrait::new_unscaled(16, true)); - data.append(FixedTrait::new_unscaled(17, true)); - data.append(FixedTrait::new_unscaled(18, true)); - data.append(FixedTrait::new_unscaled(19, true)); - data.append(FixedTrait::new_unscaled(20, true)); - data.append(FixedTrait::new_unscaled(21, true)); - data.append(FixedTrait::new_unscaled(22, true)); - data.append(FixedTrait::new_unscaled(23, true)); - data.append(FixedTrait::new_unscaled(24, true)); - data.append(FixedTrait::new_unscaled(25, true)); - data.append(FixedTrait::new_unscaled(26, true)); + let mut sizes: Array = array![3, 3, 3]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, true), + FixedTrait::new_unscaled(2, true), + FixedTrait::new_unscaled(3, true), + FixedTrait::new_unscaled(4, true), + FixedTrait::new_unscaled(5, true), + FixedTrait::new_unscaled(6, true), + FixedTrait::new_unscaled(7, true), + FixedTrait::new_unscaled(8, true), + FixedTrait::new_unscaled(9, true), + FixedTrait::new_unscaled(10, true), + FixedTrait::new_unscaled(11, true), + FixedTrait::new_unscaled(12, true), + FixedTrait::new_unscaled(13, true), + FixedTrait::new_unscaled(14, true), + FixedTrait::new_unscaled(15, true), + FixedTrait::new_unscaled(16, true), + FixedTrait::new_unscaled(17, true), + FixedTrait::new_unscaled(18, true), + FixedTrait::new_unscaled(19, true), + FixedTrait::new_unscaled(20, true), + FixedTrait::new_unscaled(21, true), + FixedTrait::new_unscaled(22, true), + FixedTrait::new_unscaled(23, true), + FixedTrait::new_unscaled(24, true), + FixedTrait::new_unscaled(25, true), + FixedTrait::new_unscaled(26, true) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } diff --git a/src/test_helper/tensor/i32.cairo b/src/test_helper/tensor/i32.cairo index 89979eef0..feadb88dc 100644 --- a/src/test_helper/tensor/i32.cairo +++ b/src/test_helper/tensor/i32.cairo @@ -1,385 +1,223 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; - - use orion::operators::tensor::{TensorTrait, Tensor}; use orion::operators::tensor::I32Tensor; - // 1D fn i32_tensor_1x3_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - let mut data = ArrayTrait::new(); - data.append(0_i32); - data.append(1_i32); - data.append(2_i32); + let mut sizes: Array = array![3]; + let mut data: Array = array![0, 1, 2]; let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + + tensor } fn i32_tensor_1x3_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - let mut data = ArrayTrait::new(); - data.append(0_i32); - data.append(-1_i32); - data.append(-2_i32); + let mut sizes: Array = array![3]; + let mut data: Array = array![0, -1, -2]; let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + + tensor } // 2D - fn i32_tensor_2x2_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(2); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(0_i32); - data.append(1_i32); - data.append(2_i32); - data.append(3_i32); + let mut sizes: Array = array![2, 2]; + let mut data: Array = array![0, 1, 2, 3]; let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + tensor } fn i32_tensor_2x2_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(2); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(0_i32); - data.append(-1_i32); - data.append(-2_i32); - data.append(-3_i32); + let mut sizes: Array = array![2, 2]; + let mut data: Array = array![0, -1, -2, -3]; let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + tensor } fn i32_tensor_3x3_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(3); - - let mut data = ArrayTrait::new(); - - data.append(0_i32); - data.append(1_i32); - data.append(2_i32); - data.append(3_i32); - data.append(4_i32); - data.append(5_i32); - data.append(6_i32); - data.append(7_i32); - data.append(8_i32); + let mut sizes: Array = array![3, 3]; + let mut data: Array = array![0, 1, 2, 3, 4, 5, 6, 7, 8]; let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + tensor } fn i32_tensor_3x3_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(3); - - let mut data = ArrayTrait::new(); - - data.append(0_i32); - data.append(-1_i32); - data.append(-2_i32); - data.append(-3_i32); - data.append(-4_i32); - data.append(-5_i32); - data.append(-6_i32); - data.append(-7_i32); - data.append(-8_i32); + let mut sizes: Array = array![3, 3]; + let mut data: Array = array![0, -1, -2, -3, -4, -5, -6, -7, -8]; let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + tensor } fn i32_tensor_3x2_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(2); - - let mut data: Array = ArrayTrait::new(); - data.append(0_i32); - data.append(1_i32); - data.append(2_i32); - data.append(3_i32); - data.append(4_i32); - data.append(5_i32); + let mut sizes: Array = array![3, 2]; + let mut data: Array = array![0, 1, 2, 3, 4, 5]; let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + tensor } fn i32_tensor_3x2_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(0_i32); - data.append(-1_i32); - data.append(-2_i32); - data.append(-3_i32); - data.append(-4_i32); - data.append(-5_i32); + let mut sizes: Array = array![3, 2]; + let mut data: Array = array![0, -1, -2, -3, -4, -5]; + let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + tensor } fn i32_tensor_3x1_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(1); - - let mut data = ArrayTrait::new(); - data.append(0_i32); - data.append(1_i32); - data.append(2_i32); + let mut sizes: Array = array![3, 1]; + let mut data: Array = array![0, 1, 2]; let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + tensor } fn i32_tensor_3x1_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(1); - - let mut data = ArrayTrait::new(); - data.append(0_i32); - data.append(-1_i32); - data.append(-2_i32); + let mut sizes: Array = array![3, 1]; + let mut data: Array = array![0, -1, -2]; let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + tensor } fn i32_tensor_2x3_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(2); - sizes.append(3); - - let mut data = ArrayTrait::new(); - data.append(0_i32); - data.append(1_i32); - data.append(2_i32); - data.append(3_i32); - data.append(4_i32); - data.append(5_i32); + let mut sizes: Array = array![2, 3]; + let mut data: Array = array![0, 1, 2, 3, 4, 5]; let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + tensor } fn i32_tensor_2x3_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(2); - sizes.append(3); - - let mut data = ArrayTrait::new(); - data.append(0_i32); - data.append(-1_i32); - data.append(-2_i32); - data.append(-3_i32); - data.append(-4_i32); - data.append(-5_i32); + let mut sizes: Array = array![2, 3]; + let mut data: Array = array![0, -1, -2, -3, -4, -5]; let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + tensor } // 3D - fn i32_tensor_2x2x2_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(2); - sizes.append(2); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(0_i32); - data.append(1_i32); - data.append(2_i32); - data.append(3_i32); - data.append(4_i32); - data.append(5_i32); - data.append(6_i32); - data.append(7_i32); + let mut sizes: Array = array![2, 2, 2]; + let mut data: Array = array![0, 1, 2, 3, 4, 5, 6, 7]; let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + tensor } fn i32_tensor_2x2x2_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(2); - sizes.append(2); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(0_i32); - data.append(-1_i32); - data.append(-2_i32); - data.append(-3_i32); - data.append(-4_i32); - data.append(-5_i32); - data.append(-6_i32); - data.append(-7_i32); + let mut sizes: Array = array![2, 2, 2]; + let mut data: Array = array![0, -1, -2, -3, -4, -5, -6, -7]; let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + tensor } fn i32_tensor_3x2x2_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(2); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(0_i32); - data.append(1_i32); - data.append(2_i32); - data.append(3_i32); - data.append(4_i32); - data.append(5_i32); - data.append(6_i32); - data.append(7_i32); - data.append(8_i32); - data.append(9_i32); - data.append(10_i32); - data.append(11_i32); + let mut sizes: Array = array![3, 2, 2]; + let mut data: Array = array![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]; let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + tensor } fn i32_tensor_3x2x2_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(2); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(0_i32); - data.append(-1_i32); - data.append(-2_i32); - data.append(-3_i32); - data.append(-4_i32); - data.append(-5_i32); - data.append(-6_i32); - data.append(-7_i32); - data.append(-8_i32); - data.append(-9_i32); - data.append(-10_i32); - data.append(-11_i32); + let mut sizes: Array = array![3, 2, 2]; + let mut data: Array = array![0, -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11]; let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + tensor } fn i32_tensor_3x3x3_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(3); - sizes.append(3); - - let mut data = ArrayTrait::new(); - data.append(0_i32); - data.append(1_i32); - data.append(2_i32); - data.append(3_i32); - data.append(4_i32); - data.append(5_i32); - data.append(6_i32); - data.append(7_i32); - data.append(8_i32); - data.append(9_i32); - data.append(10_i32); - data.append(11_i32); - data.append(12_i32); - data.append(13_i32); - data.append(14_i32); - data.append(15_i32); - data.append(16_i32); - data.append(17_i32); - data.append(18_i32); - data.append(19_i32); - data.append(20_i32); - data.append(21_i32); - data.append(22_i32); - data.append(23_i32); - data.append(24_i32); - data.append(25_i32); - data.append(26_i32); - - let tensor = TensorTrait::new(sizes.span(), data.span()); - - return tensor; + let mut sizes: Array = array![3, 3, 3]; + let mut data: Array = array![ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26 + ]; + + let tensor = TensorTrait::new(sizes.span(), data.span()); + + tensor } fn i32_tensor_3x3x3_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(3); - sizes.append(3); - - let mut data = ArrayTrait::new(); - data.append(0_i32); - data.append(-1_i32); - data.append(-2_i32); - data.append(-3_i32); - data.append(-4_i32); - data.append(-5_i32); - data.append(-6_i32); - data.append(-7_i32); - data.append(-8_i32); - data.append(-9_i32); - data.append(-10_i32); - data.append(-11_i32); - data.append(-12_i32); - data.append(-13_i32); - data.append(-14_i32); - data.append(-15_i32); - data.append(-16_i32); - data.append(-17_i32); - data.append(-18_i32); - data.append(-19_i32); - data.append(-20_i32); - data.append(-21_i32); - data.append(-22_i32); - data.append(-23_i32); - data.append(-24_i32); - data.append(-25_i32); - data.append(-26_i32); - - let tensor = TensorTrait::new(sizes.span(), data.span()); - - return tensor; + let mut sizes: Array = array![3, 3, 3]; + let mut data: Array = array![ + 0, + -1, + -2, + -3, + -4, + -5, + -6, + -7, + -8, + -9, + -10, + -11, + -12, + -13, + -14, + -15, + -16, + -17, + -18, + -19, + -20, + -21, + -22, + -23, + -24, + -25, + -26 + ]; + + let tensor = TensorTrait::new(sizes.span(), data.span()); + + tensor } diff --git a/src/test_helper/tensor/i8.cairo b/src/test_helper/tensor/i8.cairo index 6d85e4b3e..a13889c25 100644 --- a/src/test_helper/tensor/i8.cairo +++ b/src/test_helper/tensor/i8.cairo @@ -1,385 +1,224 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; - - use orion::operators::tensor::{TensorTrait, Tensor}; use orion::operators::tensor::I8Tensor; - // 1D fn i8_tensor_1x3_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - let mut data = ArrayTrait::new(); - data.append(0_i8); - data.append(1_i8); - data.append(2_i8); + let mut sizes: Array = array![3]; + let mut data: Array = array![0, 1, 2]; let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + + tensor } fn i8_tensor_1x3_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - let mut data = ArrayTrait::new(); - data.append(0_i8); - data.append(-1_i8); - data.append(-2_i8); + let mut sizes: Array = array![3]; + let mut data: Array = array![0, -1, 2]; let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + + tensor } // 2D - fn i8_tensor_2x2_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(2); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(0_i8); - data.append(1_i8); - data.append(2_i8); - data.append(3_i8); + let mut sizes: Array = array![2, 2]; + let mut data: Array = array![0, 1, 2, 3]; let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + tensor } fn i8_tensor_2x2_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(2); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(0_i8); - data.append(-1_i8); - data.append(-2_i8); - data.append(-3_i8); + let mut sizes: Array = array![2, 2]; + let mut data: Array = array![0, -1, -2, -3]; let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + tensor } fn i8_tensor_3x3_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(3); - - let mut data = ArrayTrait::new(); - - data.append(0_i8); - data.append(1_i8); - data.append(2_i8); - data.append(3_i8); - data.append(4_i8); - data.append(5_i8); - data.append(6_i8); - data.append(7_i8); - data.append(8_i8); + let mut sizes: Array = array![3, 3]; + let mut data: Array = array![0, 1, 2, 3, 4, 5, 6, 7, 8]; let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + tensor } fn i8_tensor_3x3_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(3); - - let mut data = ArrayTrait::new(); - - data.append(0_i8); - data.append(-1_i8); - data.append(-2_i8); - data.append(-3_i8); - data.append(-4_i8); - data.append(-5_i8); - data.append(-6_i8); - data.append(-7_i8); - data.append(-8_i8); + let mut sizes: Array = array![3, 3]; + let mut data: Array = array![0, -1, -2, -3, -4, -5, -6, -7, -8]; let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + tensor } fn i8_tensor_3x2_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(2); - - let mut data: Array = ArrayTrait::new(); - data.append(0_i8); - data.append(1_i8); - data.append(2_i8); - data.append(3_i8); - data.append(4_i8); - data.append(5_i8); + let mut sizes: Array = array![3, 2]; + let mut data: Array = array![0, 1, 2, 3, 4, 5]; let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + tensor } fn i8_tensor_3x2_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(0_i8); - data.append(-1_i8); - data.append(-2_i8); - data.append(-3_i8); - data.append(-4_i8); - data.append(-5_i8); + let mut sizes: Array = array![3, 2]; + let mut data: Array = array![0, -1, -2, -3, -4, -5]; + let tensor = TensorTrait::new(sizes.span(), data.span()); return tensor; } fn i8_tensor_3x1_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(1); - - let mut data = ArrayTrait::new(); - data.append(0_i8); - data.append(1_i8); - data.append(2_i8); + let mut sizes: Array = array![3, 1]; + let mut data: Array = array![0, 1, 2]; let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + tensor } fn i8_tensor_3x1_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(1); - - let mut data = ArrayTrait::new(); - data.append(0_i8); - data.append(-1_i8); - data.append(-2_i8); + let mut sizes: Array = array![3, 1]; + let mut data: Array = array![0, -1, -2]; let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + tensor } fn i8_tensor_2x3_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(2); - sizes.append(3); - - let mut data = ArrayTrait::new(); - data.append(0_i8); - data.append(1_i8); - data.append(2_i8); - data.append(3_i8); - data.append(4_i8); - data.append(5_i8); + let mut sizes: Array = array![2, 3]; + let mut data: Array = array![0, 1, 2, 3, 4, 5]; let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + tensor } fn i8_tensor_2x3_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(2); - sizes.append(3); - - let mut data = ArrayTrait::new(); - data.append(0_i8); - data.append(-1_i8); - data.append(-2_i8); - data.append(-3_i8); - data.append(-4_i8); - data.append(-5_i8); + let mut sizes: Array = array![2, 3]; + let mut data: Array = array![0, -1, -2, -3, -4, -5]; let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + tensor } // 3D fn i8_tensor_2x2x2_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(2); - sizes.append(2); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(0_i8); - data.append(1_i8); - data.append(2_i8); - data.append(3_i8); - data.append(4_i8); - data.append(5_i8); - data.append(6_i8); - data.append(7_i8); + let mut sizes: Array = array![2, 2, 2]; + let mut data: Array = array![0, 1, 2, 3, 4, 5, 6, 7]; let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + tensor } fn i8_tensor_2x2x2_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(2); - sizes.append(2); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(0_i8); - data.append(-1_i8); - data.append(-2_i8); - data.append(-3_i8); - data.append(-4_i8); - data.append(-5_i8); - data.append(-6_i8); - data.append(-7_i8); + let mut sizes: Array = array![2, 2, 2]; + let mut data: Array = array![0, -1, -2, -3, -4, -5, -6, -7]; let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + tensor } fn i8_tensor_3x2x2_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(2); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(0_i8); - data.append(1_i8); - data.append(2_i8); - data.append(3_i8); - data.append(4_i8); - data.append(5_i8); - data.append(6_i8); - data.append(7_i8); - data.append(8_i8); - data.append(9_i8); - data.append(10_i8); - data.append(11_i8); + let mut sizes: Array = array![3, 2, 2]; + let mut data: Array = array![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]; let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + tensor } fn i8_tensor_3x2x2_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(2); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(0_i8); - data.append(-1_i8); - data.append(-2_i8); - data.append(-3_i8); - data.append(-4_i8); - data.append(-5_i8); - data.append(-6_i8); - data.append(-7_i8); - data.append(-8_i8); - data.append(-9_i8); - data.append(-10_i8); - data.append(-11_i8); + let mut sizes: Array = array![3, 2, 2]; + let mut data: Array = array![0, -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11]; let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + tensor } fn i8_tensor_3x3x3_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(3); - sizes.append(3); - - let mut data = ArrayTrait::new(); - data.append(0_i8); - data.append(1_i8); - data.append(2_i8); - data.append(3_i8); - data.append(4_i8); - data.append(5_i8); - data.append(6_i8); - data.append(7_i8); - data.append(8_i8); - data.append(9_i8); - data.append(10_i8); - data.append(11_i8); - data.append(12_i8); - data.append(13_i8); - data.append(14_i8); - data.append(15_i8); - data.append(16_i8); - data.append(17_i8); - data.append(18_i8); - data.append(19_i8); - data.append(20_i8); - data.append(21_i8); - data.append(22_i8); - data.append(23_i8); - data.append(24_i8); - data.append(25_i8); - data.append(26_i8); - - let tensor = TensorTrait::new(sizes.span(), data.span()); - - return tensor; + let mut sizes: Array = array![3, 3, 3]; + let mut data: Array = array![ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26 + ]; + + let tensor = TensorTrait::new(sizes.span(), data.span()); + + tensor } fn i8_tensor_3x3x3_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(3); - sizes.append(3); - - let mut data = ArrayTrait::new(); - data.append(0_i8); - data.append(-1_i8); - data.append(-2_i8); - data.append(-3_i8); - data.append(-4_i8); - data.append(-5_i8); - data.append(-6_i8); - data.append(-7_i8); - data.append(-8_i8); - data.append(-9_i8); - data.append(-10_i8); - data.append(-11_i8); - data.append(-12_i8); - data.append(-13_i8); - data.append(-14_i8); - data.append(-15_i8); - data.append(-16_i8); - data.append(-17_i8); - data.append(-18_i8); - data.append(-19_i8); - data.append(-20_i8); - data.append(-21_i8); - data.append(-22_i8); - data.append(-23_i8); - data.append(-24_i8); - data.append(-25_i8); - data.append(-26_i8); - - let tensor = TensorTrait::new(sizes.span(), data.span()); - - return tensor; + let mut sizes: Array = array![3, 3, 3]; + let mut data: Array = array![ + 0, + -1, + -2, + -3, + -4, + -5, + -6, + -7, + -8, + -9, + -10, + -11, + -12, + -13, + -14, + -15, + -16, + -17, + -18, + -19, + -20, + -21, + -22, + -23, + -24, + -25, + -26 + ]; + + let tensor = TensorTrait::new(sizes.span(), data.span()); + + tensor } diff --git a/src/test_helper/tensor/u32.cairo b/src/test_helper/tensor/u32.cairo index 553ebecff..09ea289ce 100644 --- a/src/test_helper/tensor/u32.cairo +++ b/src/test_helper/tensor/u32.cairo @@ -1,198 +1,115 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; use orion::operators::tensor::U32Tensor; use orion::operators::tensor::{TensorTrait, Tensor}; // 1D fn u32_tensor_1x3_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); + let mut sizes: Array = array![3]; + let mut data: Array = array![0, 1, 2]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } // 2D - fn u32_tensor_2x2_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(2); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); - data.append(3); + let mut sizes: Array = array![2, 2]; + let mut data: Array = array![0, 1, 2, 3]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn u32_tensor_3x3_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(3); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); - data.append(3); - data.append(4); - data.append(5); - data.append(6); - data.append(7); - data.append(8); + let mut sizes: Array = array![3, 3]; + let mut data: Array = array![0, 1, 2, 3, 4, 5, 6, 7, 8]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn u32_tensor_3x2_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); - data.append(3); - data.append(4); - data.append(5); + let mut sizes: Array = array![3, 2]; + let mut data: Array = array![0, 1, 2, 3, 4, 5]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn u32_tensor_3x1_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(1); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); + let mut sizes: Array = array![3, 1]; + let mut data: Array = array![0, 1, 2]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn u32_tensor_2x3_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(2); - sizes.append(3); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); - data.append(3); - data.append(4); - data.append(5); + let mut sizes: Array = array![2, 3]; + let mut data: Array = array![0, 1, 2, 3, 4, 5]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } // 3D - fn u32_tensor_2x2x2_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(2); - sizes.append(2); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); - data.append(3); - data.append(4); - data.append(5); - data.append(6); - data.append(7); + let mut sizes: Array = array![2, 2, 2]; + let mut data: Array = array![0, 1, 2, 3, 4, 5, 6, 7]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn u32_tensor_3x2x2_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(2); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); - data.append(3); - data.append(4); - data.append(5); - data.append(6); - data.append(7); - data.append(8); - data.append(9); - data.append(10); - data.append(11); + let mut sizes: Array = array![3, 2, 2]; + let mut data: Array = array![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn u32_tensor_3x3x3_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(3); - sizes.append(3); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); - data.append(3); - data.append(4); - data.append(5); - data.append(6); - data.append(7); - data.append(8); - data.append(9); - data.append(10); - data.append(11); - data.append(12); - data.append(13); - data.append(14); - data.append(15); - data.append(16); - data.append(17); - data.append(18); - data.append(19); - data.append(20); - data.append(21); - data.append(22); - data.append(23); - data.append(24); - data.append(25); - data.append(26); + let mut sizes: Array = array![3, 3, 3]; + let mut data: Array = array![ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26 + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } diff --git a/src/utils.cairo b/src/utils.cairo index 34946d5c5..e9ec9d31e 100644 --- a/src/utils.cairo +++ b/src/utils.cairo @@ -1,8 +1,3 @@ -use core::traits::TryInto; -use core::option::OptionTrait; -use core::array::ArrayTrait; -use core::array::SpanTrait; - use orion::operators::tensor::{Tensor, TensorTrait}; fn u32_max(a: u32, b: u32) -> u32 { @@ -37,10 +32,7 @@ fn assert_seq_eq, impl TCopy: Copy, impl TDr assert(lhs.len() == rhs.len(), 'should be equal'); let mut i = 0; - loop { - if i >= lhs.len() { - break; - } + while i != lhs.len() { assert_eq(lhs[i], rhs[i]); i += 1; } diff --git a/tests/ml.cairo b/tests/ml.cairo index 3f071f13d..4e3e0781e 100644 --- a/tests/ml.cairo +++ b/tests/ml.cairo @@ -4,4 +4,4 @@ mod linear_regressor_test; mod linear_classifier_test; mod svm_regressor_test; mod svm_classifier_test; - +mod normalizer_test; diff --git a/tests/ml/linear_classifier_test.cairo b/tests/ml/linear_classifier_test.cairo index 8dc59afd9..7258533ee 100644 --- a/tests/ml/linear_classifier_test.cairo +++ b/tests/ml/linear_classifier_test.cairo @@ -11,7 +11,7 @@ use core::debug::PrintTrait; fn test_linear_classifier_multi_none() { let (mut classifier, X) = linear_classifier_helper(POST_TRANSFORM::NONE); - let (labels, mut scores) = LinearClassifierTrait::predict(ref classifier, X); + let (labels, mut scores) = LinearClassifierTrait::predict(classifier, X); // ASSERT LABELS assert(*labels[0] == 0, 'labels[0]'); @@ -37,7 +37,7 @@ fn test_linear_classifier_multi_none() { fn test_linear_classifier_multi_softmax() { let (mut classifier, X) = linear_classifier_helper(POST_TRANSFORM::SOFTMAX); - let (labels, mut scores) = LinearClassifierTrait::predict(ref classifier, X); + let (labels, mut scores) = LinearClassifierTrait::predict(classifier, X); // ASSERT LABELS assert(*labels[0] == 0, 'labels[0]'); @@ -62,7 +62,7 @@ fn test_linear_classifier_multi_softmax() { fn test_linear_classifier_multi_softmax_zero() { let (mut classifier, X) = linear_classifier_helper(POST_TRANSFORM::SOFTMAXZERO); - let (labels, mut scores) = LinearClassifierTrait::predict(ref classifier, X); + let (labels, mut scores) = LinearClassifierTrait::predict(classifier, X); // ASSERT LABELS assert(*labels[0] == 0, 'labels[0]'); @@ -88,7 +88,7 @@ fn test_linear_classifier_multi_softmax_zero() { fn test_linear_classifier_multi_logistic() { let (mut classifier, X) = linear_classifier_helper(POST_TRANSFORM::LOGISTIC); - let (labels, mut scores) = LinearClassifierTrait::predict(ref classifier, X); + let (labels, mut scores) = LinearClassifierTrait::predict(classifier, X); // ASSERT LABELS assert(*labels[0] == 0, 'labels[0] == 0'); @@ -113,7 +113,7 @@ fn test_linear_classifier_multi_logistic() { fn test_linear_classifier_binary_none() { let (mut classifier, X) = linear_classifier_helper_binary(POST_TRANSFORM::NONE); - let (labels, mut scores) = LinearClassifierTrait::predict(ref classifier, X); + let (labels, mut scores) = LinearClassifierTrait::predict(classifier, X); // ASSERT LABELS assert(*labels[0] == 1, 'labels[0]'); @@ -132,7 +132,7 @@ fn test_linear_classifier_binary_none() { fn test_linear_classifier_binary_logistic() { let (mut classifier, X) = linear_classifier_helper_binary(POST_TRANSFORM::LOGISTIC); - let (labels, mut scores) = LinearClassifierTrait::predict(ref classifier, X); + let (labels, mut scores) = LinearClassifierTrait::predict(classifier, X); // ASSERT LABELS assert(*labels[0] == 1, 'labels[0]'); @@ -151,7 +151,7 @@ fn test_linear_classifier_binary_logistic() { fn test_linear_classifier_binary_softmax() { let (mut classifier, X) = linear_classifier_helper_binary(POST_TRANSFORM::SOFTMAX); - let (labels, mut scores) = LinearClassifierTrait::predict(ref classifier, X); + let (labels, mut scores) = LinearClassifierTrait::predict(classifier, X); // ASSERT LABELS assert(*labels[0] == 1, 'labels[0]'); assert(*labels[1] == 1, 'labels[1]'); @@ -169,7 +169,7 @@ fn test_linear_classifier_binary_softmax() { fn test_linear_classifier_binary_softmax_zero() { let (mut classifier, X) = linear_classifier_helper_binary(POST_TRANSFORM::SOFTMAXZERO); - let (labels, mut scores) = LinearClassifierTrait::predict(ref classifier, X); + let (labels, mut scores) = LinearClassifierTrait::predict(classifier, X); // ASSERT LABELS assert(*labels[0] == 1, 'labels[0]'); assert(*labels[1] == 1, 'labels[1]'); @@ -187,7 +187,7 @@ fn test_linear_classifier_binary_softmax_zero() { fn test_linear_classifier_unary_none() { let (mut classifier, X) = linear_classifier_helper_unary(POST_TRANSFORM::NONE); - let (labels, mut scores) = LinearClassifierTrait::predict(ref classifier, X); + let (labels, mut scores) = LinearClassifierTrait::predict(classifier, X); // ASSERT LABELS assert(*labels[0] == 1, 'labels[0]'); @@ -204,7 +204,7 @@ fn test_linear_classifier_unary_none() { fn test_linear_classifier_unary_logistic() { let (mut classifier, X) = linear_classifier_helper_unary(POST_TRANSFORM::LOGISTIC); - let (labels, mut scores) = LinearClassifierTrait::predict(ref classifier, X); + let (labels, mut scores) = LinearClassifierTrait::predict(classifier, X); // ASSERT LABELS assert(*labels[0] == 1, 'labels[0]'); @@ -221,7 +221,7 @@ fn test_linear_classifier_unary_logistic() { fn test_linear_classifier_unary_softmax() { let (mut classifier, X) = linear_classifier_helper_unary(POST_TRANSFORM::SOFTMAX); - let (labels, mut scores) = LinearClassifierTrait::predict(ref classifier, X); + let (labels, mut scores) = LinearClassifierTrait::predict(classifier, X); // ASSERT LABELS assert(*labels[0] == 1, 'labels[0]'); @@ -238,7 +238,7 @@ fn test_linear_classifier_unary_softmax() { fn test_linear_classifier_unary_softmax_zero() { let (mut classifier, X) = linear_classifier_helper_unary(POST_TRANSFORM::SOFTMAXZERO); - let (labels, mut scores) = LinearClassifierTrait::predict(ref classifier, X); + let (labels, mut scores) = LinearClassifierTrait::predict(classifier, X); // ASSERT LABELS assert(*labels[0] == 1, 'labels[0]'); diff --git a/tests/ml/linear_regressor_test.cairo b/tests/ml/linear_regressor_test.cairo index 1aa7a4211..141ea40ce 100644 --- a/tests/ml/linear_regressor_test.cairo +++ b/tests/ml/linear_regressor_test.cairo @@ -40,7 +40,7 @@ fn test_linear_regressor() { coefficients, intercepts, target, post_transform }; - let scores = LinearRegressorTrait::predict(ref regressor, X); + let scores = LinearRegressorTrait::predict(regressor, X); assert(*scores.data[0] == FP16x16 { mag: 17695, sign: true }, '*scores[0] == -0.27'); assert(*scores.data[1] == FP16x16 { mag: 79299, sign: true }, '*scores[1] == -1.21'); @@ -84,7 +84,7 @@ fn test_linear_regressor_2() { coefficients, intercepts, target, post_transform }; - let scores = LinearRegressorTrait::predict(ref regressor, X); + let scores = LinearRegressorTrait::predict(regressor, X); assert(*scores.data[0] == FP16x16 { mag: 17695, sign: true }, '*scores[0] == -0.27'); assert(*scores.data[1] == FP16x16 { mag: 4588, sign: true }, '*scores[1] == -0.07'); diff --git a/tests/ml/normalizer_test.cairo b/tests/ml/normalizer_test.cairo new file mode 100644 index 000000000..67beaed79 --- /dev/null +++ b/tests/ml/normalizer_test.cairo @@ -0,0 +1,169 @@ +use orion::operators::ml::normalizer::normalizer::{NormalizerTrait, NORM}; +use orion::utils::{assert_eq, assert_seq_eq}; + +use orion::numbers::FP16x16; +use orion::operators::tensor::{ + Tensor, TensorTrait, FP16x16Tensor, FP16x16TensorDiv, FP16x16TensorPartialEq +}; + +#[test] +#[available_gas(200000000000)] +fn test_normalizer_max() { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 52428, sign: true }); + data.append(FP16x16 { mag: 39321, sign: true }); + data.append(FP16x16 { mag: 26214, sign: true }); + data.append(FP16x16 { mag: 13107, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 13107, sign: false }); + data.append(FP16x16 { mag: 26214, sign: false }); + data.append(FP16x16 { mag: 39321, sign: false }); + let X = TensorTrait::new(shape.span(), data.span()); + + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 52428, sign: true }); + data.append(FP16x16 { mag: 39321, sign: true }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 32768, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 21845, sign: false }); + data.append(FP16x16 { mag: 43690, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + let expected_output = TensorTrait::new(shape.span(), data.span()); + + let actual_output = NormalizerTrait::predict(X, NORM::MAX); + + assert_eq(actual_output, expected_output); +} + +#[test] +#[available_gas(200000000000)] +fn test_normalizer_l1() { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 52428, sign: true }); + data.append(FP16x16 { mag: 39321, sign: true }); + data.append(FP16x16 { mag: 26214, sign: true }); + data.append(FP16x16 { mag: 13107, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 13107, sign: false }); + data.append(FP16x16 { mag: 26214, sign: false }); + data.append(FP16x16 { mag: 39321, sign: false }); + let X = TensorTrait::new(shape.span(), data.span()); + + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 27306, sign: true }); + data.append(FP16x16 { mag: 21845, sign: true }); + data.append(FP16x16 { mag: 16384, sign: true }); + data.append(FP16x16 { mag: 43690, sign: true }); + data.append(FP16x16 { mag: 21845, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 10922, sign: false }); + data.append(FP16x16 { mag: 21845, sign: false }); + data.append(FP16x16 { mag: 32768, sign: false }); + let expected_output = TensorTrait::new(shape.span(), data.span()); + + let actual_output = NormalizerTrait::predict(X, NORM::L1); + + assert_eq(actual_output, expected_output); +} + +#[test] +#[available_gas(200000000000)] +fn test_normalizer_l2() { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 52428, sign: true }); + data.append(FP16x16 { mag: 39321, sign: true }); + data.append(FP16x16 { mag: 26214, sign: true }); + data.append(FP16x16 { mag: 13107, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 13107, sign: false }); + data.append(FP16x16 { mag: 26214, sign: false }); + data.append(FP16x16 { mag: 39321, sign: false }); + let X = TensorTrait::new(shape.span(), data.span()); + + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 46340, sign: true }); + data.append(FP16x16 { mag: 37072, sign: true }); + data.append(FP16x16 { mag: 27804, sign: true }); + data.append(FP16x16 { mag: 58617, sign: true }); + data.append(FP16x16 { mag: 29308, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 17515, sign: false }); + data.append(FP16x16 { mag: 35030, sign: false }); + data.append(FP16x16 { mag: 52545, sign: false }); + let expected_output = TensorTrait::new(shape.span(), data.span()); + + let actual_output = NormalizerTrait::predict(X, NORM::L2); + + assert_eq(actual_output, expected_output); +} + + +#[test] +#[available_gas(200000000000)] +fn test_normalizer_max_avoid_div_zero() { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + let X = TensorTrait::new(shape.span(), data.span()); + + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + let expected_output = TensorTrait::new(shape.span(), data.span()); + + let actual_output = NormalizerTrait::predict(X, NORM::MAX); + + assert_eq(actual_output, expected_output); +} + diff --git a/tests/ml/tree_ensemble_classifier.cairo b/tests/ml/tree_ensemble_classifier.cairo index 441aabb34..eb3c7ef67 100644 --- a/tests/ml/tree_ensemble_classifier.cairo +++ b/tests/ml/tree_ensemble_classifier.cairo @@ -12,7 +12,7 @@ use orion::operators::matrix::{MutMatrix, MutMatrixImpl}; fn test_tree_ensemble_classifier_multi_pt_none() { let (mut classifier, X) = tree_ensemble_classifier_helper(POST_TRANSFORM::NONE); - let (labels, mut scores) = TreeEnsembleClassifierTrait::predict(ref classifier, X); + let (labels, mut scores) = TreeEnsembleClassifierTrait::predict(classifier, X); // ASSERT LABELS assert(*labels[0] == 0, 'labels[0]'); @@ -64,7 +64,7 @@ fn test_tree_ensemble_classifier_multi_pt_none() { fn test_tree_ensemble_classifier_multi_pt_softmax() { let (mut classifier, X) = tree_ensemble_classifier_helper(POST_TRANSFORM::SOFTMAX); - let (labels, mut scores) = TreeEnsembleClassifierTrait::predict(ref classifier, X); + let (labels, mut scores) = TreeEnsembleClassifierTrait::predict(classifier, X); // ASSERT LABELS assert(*labels[0] == 0, 'labels[0]'); @@ -116,7 +116,7 @@ fn test_tree_ensemble_classifier_multi_pt_softmax() { fn test_tree_ensemble_classifier_multi_pt_softmax_zero() { let (mut classifier, X) = tree_ensemble_classifier_helper(POST_TRANSFORM::SOFTMAXZERO); - let (labels, mut scores) = TreeEnsembleClassifierTrait::predict(ref classifier, X); + let (labels, mut scores) = TreeEnsembleClassifierTrait::predict(classifier, X); // ASSERT LABELS assert(*labels[0] == 0, 'labels[0] == 0'); @@ -169,7 +169,7 @@ fn test_tree_ensemble_classifier_multi_pt_softmax_zero() { fn test_tree_ensemble_classifier_multi_pt_logistic() { let (mut classifier, X) = tree_ensemble_classifier_helper(POST_TRANSFORM::LOGISTIC); - let (labels, mut scores) = TreeEnsembleClassifierTrait::predict(ref classifier, X); + let (labels, mut scores) = TreeEnsembleClassifierTrait::predict(classifier, X); // ASSERT LABELS assert(*labels[0] == 0, 'labels[0] == 0'); @@ -221,7 +221,7 @@ fn test_tree_ensemble_classifier_multi_pt_logistic() { fn test_tree_ensemble_classifier_binary_none() { let (mut classifier, X) = tree_ensemble_classifier_binary_class_helper(POST_TRANSFORM::NONE); - let (labels, mut scores) = TreeEnsembleClassifierTrait::predict(ref classifier, X); + let (labels, mut scores) = TreeEnsembleClassifierTrait::predict(classifier, X); // ASSERT LABELS assert(*labels[0] == 1, 'labels[0]'); @@ -245,7 +245,7 @@ fn test_tree_ensemble_classifier_binary_logistic() { POST_TRANSFORM::LOGISTIC ); - let (labels, mut scores) = TreeEnsembleClassifierTrait::predict(ref classifier, X); + let (labels, mut scores) = TreeEnsembleClassifierTrait::predict(classifier, X); // ASSERT LABELS assert(*labels[0] == 1, 'labels[0]'); @@ -267,7 +267,7 @@ fn test_tree_ensemble_classifier_binary_logistic() { fn test_tree_ensemble_classifier_binary_softmax() { let (mut classifier, X) = tree_ensemble_classifier_binary_class_helper(POST_TRANSFORM::SOFTMAX); - let (labels, mut scores) = TreeEnsembleClassifierTrait::predict(ref classifier, X); + let (labels, mut scores) = TreeEnsembleClassifierTrait::predict(classifier, X); // ASSERT LABELS assert(*labels[0] == 1, 'labels[0]'); @@ -291,7 +291,7 @@ fn test_tree_ensemble_classifier_binary_softmax_zero() { POST_TRANSFORM::SOFTMAXZERO ); - let (labels, mut scores) = TreeEnsembleClassifierTrait::predict(ref classifier, X); + let (labels, mut scores) = TreeEnsembleClassifierTrait::predict(classifier, X); // ASSERT LABELS assert(*labels[0] == 1, 'labels[0]'); @@ -313,7 +313,7 @@ fn test_tree_ensemble_classifier_binary_softmax_zero() { // fn test_tree_ensemble_classifier_binary_probit() { // let (mut classifier, X) = tree_ensemble_classifier_binary_class_helper(POST_TRANSFORM::PROBIT); -// let (labels, mut scores) = TreeEnsembleClassifierTrait::predict(ref classifier, X); +// let (labels, mut scores) = TreeEnsembleClassifierTrait::predict(classifier, X); // // ASSERT LABELS // assert(*labels[0] == 1, 'labels[0]'); diff --git a/tests/ml/tree_ensemble_regressor.cairo b/tests/ml/tree_ensemble_regressor.cairo index 5b1aeeb41..70452cb42 100644 --- a/tests/ml/tree_ensemble_regressor.cairo +++ b/tests/ml/tree_ensemble_regressor.cairo @@ -13,7 +13,7 @@ use core::debug::PrintTrait; fn test_tree_ensemble_regressor_SUM() { let (mut regressor, X) = tree_ensemble_regressor_helper(AGGREGATE_FUNCTION::SUM); - let mut res = TreeEnsembleRegressorTrait::predict(ref regressor, X); + let mut res = TreeEnsembleRegressorTrait::predict(regressor, X); // ASSERT RES assert( @@ -35,7 +35,7 @@ fn test_tree_ensemble_regressor_SUM() { fn test_tree_ensemble_regressor_AVERAGE() { let (mut regressor, X) = tree_ensemble_regressor_helper(AGGREGATE_FUNCTION::AVERAGE); - let mut res = TreeEnsembleRegressorTrait::predict(ref regressor, X); + let mut res = TreeEnsembleRegressorTrait::predict(regressor, X); // ASSERT RES assert( @@ -57,7 +57,7 @@ fn test_tree_ensemble_regressor_AVERAGE() { fn test_tree_ensemble_regressor_MIN() { let (mut regressor, X) = tree_ensemble_regressor_helper(AGGREGATE_FUNCTION::MIN); - let mut res = TreeEnsembleRegressorTrait::predict(ref regressor, X); + let mut res = TreeEnsembleRegressorTrait::predict(regressor, X); // ASSERT RES assert( @@ -79,7 +79,7 @@ fn test_tree_ensemble_regressor_MIN() { fn test_tree_ensemble_regressor_MAX() { let (mut regressor, X) = tree_ensemble_regressor_helper(AGGREGATE_FUNCTION::MAX); - let mut res = TreeEnsembleRegressorTrait::predict(ref regressor, X); + let mut res = TreeEnsembleRegressorTrait::predict(regressor, X); // ASSERT RES assert( diff --git a/tests/nodes.cairo b/tests/nodes.cairo index 8814cfb80..29bebb762 100644 --- a/tests/nodes.cairo +++ b/tests/nodes.cairo @@ -906,6 +906,9 @@ mod compress_u32_3d_axis1; mod compress_u32_3d_axis2; mod compress_u32_3d_axis2_2; mod compress_u32_3d_axis3; +mod reduce_log_sum_exp_fp32x32_export_do_not_keepdims; +mod reduce_log_sum_exp_fp32x32_export_keepdims; +mod reduce_log_sum_exp_fp32x32_export_negative_axes_keepdims; mod layer_normalization_default_axis; mod layer_normalization_4d_axis0; mod layer_normalization_4d_axis1; @@ -1039,3 +1042,8 @@ mod conv_2D_with_autopad_same; mod conv_2D_with_strides_asymmetric_padding; mod conv_2D_with_strides_with_padding; mod conv_4D_with_padding; +mod label_encoder_fp16x16_3d_default; +mod label_encoder_fp8x23_default; +mod label_encoder_i8_default; +mod label_encoder_i32_default; +mod label_encoder_u32_default; diff --git a/tests/nodes/gemm_default_vector_bias.cairo b/tests/nodes/gemm_default_vector_bias.cairo index 24826f739..fbed99929 100644 --- a/tests/nodes/gemm_default_vector_bias.cairo +++ b/tests/nodes/gemm_default_vector_bias.cairo @@ -3,7 +3,6 @@ mod input_1; mod input_2; mod output_0; - use orion::operators::nn::NNTrait; use orion::numbers::FixedTrait; use orion::utils::{assert_eq, assert_seq_eq}; diff --git a/tests/nodes/gemm_default_vector_bias/input_2.cairo b/tests/nodes/gemm_default_vector_bias/input_2.cairo index f340a6ea2..e3d351aff 100644 --- a/tests/nodes/gemm_default_vector_bias/input_2.cairo +++ b/tests/nodes/gemm_default_vector_bias/input_2.cairo @@ -5,7 +5,6 @@ use orion::numbers::{FixedTrait, FP16x16}; fn input_2() -> Tensor { let mut shape = ArrayTrait::::new(); - shape.append(1); shape.append(4); let mut data = ArrayTrait::new(); diff --git a/tests/nodes/label_encoder_fp16x16_3d_default.cairo b/tests/nodes/label_encoder_fp16x16_3d_default.cairo new file mode 100644 index 000000000..d70b6da48 --- /dev/null +++ b/tests/nodes/label_encoder_fp16x16_3d_default.cairo @@ -0,0 +1,34 @@ +mod input_0; +mod input_1; +mod input_2; +mod input_3; +mod output_0; + + +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::utils::{assert_eq, assert_seq_eq}; + +#[test] +#[available_gas(2000000000)] +fn test_label_encoder_fp16x16_3d_default() { + let input_0 = input_0::input_0(); + let input_1 = input_1::input_1(); + let input_2 = input_2::input_2(); + let input_3 = input_3::input_3(); + let z_0 = output_0::output_0(); + + let y_0 = input_0 + .label_encoder( + default_list: Option::None, + default_tensor: Option::Some(input_1), + keys: Option::None, + keys_tensor: Option::Some(input_2), + values: Option::None, + values_tensor: Option::Some(input_3) + ); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/label_encoder_fp16x16_3d_default/input_0.cairo b/tests/nodes/label_encoder_fp16x16_3d_default/input_0.cairo new file mode 100644 index 000000000..18f484a1b --- /dev/null +++ b/tests/nodes/label_encoder_fp16x16_3d_default/input_0.cairo @@ -0,0 +1,21 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(9); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/label_encoder_fp16x16_3d_default/input_1.cairo b/tests/nodes/label_encoder_fp16x16_3d_default/input_1.cairo new file mode 100644 index 000000000..7f195d298 --- /dev/null +++ b/tests/nodes/label_encoder_fp16x16_3d_default/input_1.cairo @@ -0,0 +1,12 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_1() -> Tensor { + let mut shape = ArrayTrait::::new(); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 6488064, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/label_encoder_fp16x16_3d_default/input_2.cairo b/tests/nodes/label_encoder_fp16x16_3d_default/input_2.cairo new file mode 100644 index 000000000..a2e5f4f86 --- /dev/null +++ b/tests/nodes/label_encoder_fp16x16_3d_default/input_2.cairo @@ -0,0 +1,16 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_2() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/label_encoder_fp16x16_3d_default/input_3.cairo b/tests/nodes/label_encoder_fp16x16_3d_default/input_3.cairo new file mode 100644 index 000000000..d5d76889d --- /dev/null +++ b/tests/nodes/label_encoder_fp16x16_3d_default/input_3.cairo @@ -0,0 +1,16 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_3() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 1441792, sign: false }); + data.append(FP16x16 { mag: 3604480, sign: false }); + data.append(FP16x16 { mag: 4325376, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/label_encoder_fp16x16_3d_default/output_0.cairo b/tests/nodes/label_encoder_fp16x16_3d_default/output_0.cairo new file mode 100644 index 000000000..dc5cf560a --- /dev/null +++ b/tests/nodes/label_encoder_fp16x16_3d_default/output_0.cairo @@ -0,0 +1,21 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(9); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 1441792, sign: false }); + data.append(FP16x16 { mag: 6488064, sign: false }); + data.append(FP16x16 { mag: 6488064, sign: false }); + data.append(FP16x16 { mag: 3604480, sign: false }); + data.append(FP16x16 { mag: 4325376, sign: false }); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 1441792, sign: false }); + data.append(FP16x16 { mag: 6488064, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/label_encoder_fp8x23_default.cairo b/tests/nodes/label_encoder_fp8x23_default.cairo new file mode 100644 index 000000000..d673a35df --- /dev/null +++ b/tests/nodes/label_encoder_fp8x23_default.cairo @@ -0,0 +1,34 @@ +mod input_0; +mod input_1; +mod input_2; +mod input_3; +mod output_0; + + +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::operators::tensor::FP8x23TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::utils::{assert_eq, assert_seq_eq}; + +#[test] +#[available_gas(2000000000)] +fn test_label_encoder_fp8x23_default() { + let input_0 = input_0::input_0(); + let input_1 = input_1::input_1(); + let input_2 = input_2::input_2(); + let input_3 = input_3::input_3(); + let z_0 = output_0::output_0(); + + let y_0 = input_0 + .label_encoder( + default_list: Option::None, + default_tensor: Option::Some(input_1), + keys: Option::None, + keys_tensor: Option::Some(input_2), + values: Option::None, + values_tensor: Option::Some(input_3) + ); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/label_encoder_fp8x23_default/input_0.cairo b/tests/nodes/label_encoder_fp8x23_default/input_0.cairo new file mode 100644 index 000000000..a69808fe6 --- /dev/null +++ b/tests/nodes/label_encoder_fp8x23_default/input_0.cairo @@ -0,0 +1,23 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::numbers::{FixedTrait, FP8x23}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(11); + + let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/label_encoder_fp8x23_default/input_1.cairo b/tests/nodes/label_encoder_fp8x23_default/input_1.cairo new file mode 100644 index 000000000..60a4f9f51 --- /dev/null +++ b/tests/nodes/label_encoder_fp8x23_default/input_1.cairo @@ -0,0 +1,12 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::numbers::{FixedTrait, FP8x23}; + +fn input_1() -> Tensor { + let mut shape = ArrayTrait::::new(); + + let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 830472192, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/label_encoder_fp8x23_default/input_2.cairo b/tests/nodes/label_encoder_fp8x23_default/input_2.cairo new file mode 100644 index 000000000..111238f1c --- /dev/null +++ b/tests/nodes/label_encoder_fp8x23_default/input_2.cairo @@ -0,0 +1,17 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::numbers::{FixedTrait, FP8x23}; + +fn input_2() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(5); + + let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/label_encoder_fp8x23_default/input_3.cairo b/tests/nodes/label_encoder_fp8x23_default/input_3.cairo new file mode 100644 index 000000000..a63818c51 --- /dev/null +++ b/tests/nodes/label_encoder_fp8x23_default/input_3.cairo @@ -0,0 +1,17 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::numbers::{FixedTrait, FP8x23}; + +fn input_3() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(5); + + let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 92274688, sign: false }); + data.append(FP8x23 { mag: 184549376, sign: false }); + data.append(FP8x23 { mag: 461373440, sign: false }); + data.append(FP8x23 { mag: 553648128, sign: false }); + data.append(FP8x23 { mag: 645922816, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/label_encoder_fp8x23_default/output_0.cairo b/tests/nodes/label_encoder_fp8x23_default/output_0.cairo new file mode 100644 index 000000000..1fc9d413b --- /dev/null +++ b/tests/nodes/label_encoder_fp8x23_default/output_0.cairo @@ -0,0 +1,23 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::numbers::{FixedTrait, FP8x23}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(11); + + let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 92274688, sign: false }); + data.append(FP8x23 { mag: 184549376, sign: false }); + data.append(FP8x23 { mag: 830472192, sign: false }); + data.append(FP8x23 { mag: 830472192, sign: false }); + data.append(FP8x23 { mag: 461373440, sign: false }); + data.append(FP8x23 { mag: 553648128, sign: false }); + data.append(FP8x23 { mag: 92274688, sign: false }); + data.append(FP8x23 { mag: 184549376, sign: false }); + data.append(FP8x23 { mag: 830472192, sign: false }); + data.append(FP8x23 { mag: 645922816, sign: false }); + data.append(FP8x23 { mag: 830472192, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/label_encoder_i32_default.cairo b/tests/nodes/label_encoder_i32_default.cairo new file mode 100644 index 000000000..d504e2721 --- /dev/null +++ b/tests/nodes/label_encoder_i32_default.cairo @@ -0,0 +1,34 @@ +mod input_0; +mod input_1; +mod input_2; +mod input_3; +mod output_0; + + +use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; + +#[test] +#[available_gas(2000000000)] +fn test_label_encoder_i32_default() { + let input_0 = input_0::input_0(); + let input_1 = input_1::input_1(); + let input_2 = input_2::input_2(); + let input_3 = input_3::input_3(); + let z_0 = output_0::output_0(); + + let y_0 = input_0 + .label_encoder( + default_list: Option::None, + default_tensor: Option::Some(input_1), + keys: Option::None, + keys_tensor: Option::Some(input_2), + values: Option::None, + values_tensor: Option::Some(input_3) + ); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/label_encoder_i32_default/input_0.cairo b/tests/nodes/label_encoder_i32_default/input_0.cairo new file mode 100644 index 000000000..3e9f1463e --- /dev/null +++ b/tests/nodes/label_encoder_i32_default/input_0.cairo @@ -0,0 +1,23 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(11); + + let mut data = ArrayTrait::new(); + data.append(1); + data.append(2); + data.append(3); + data.append(4); + data.append(5); + data.append(6); + data.append(1); + data.append(2); + data.append(3); + data.append(7); + data.append(8); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/label_encoder_i32_default/input_1.cairo b/tests/nodes/label_encoder_i32_default/input_1.cairo new file mode 100644 index 000000000..63962bf4f --- /dev/null +++ b/tests/nodes/label_encoder_i32_default/input_1.cairo @@ -0,0 +1,12 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_1() -> Tensor { + let mut shape = ArrayTrait::::new(); + + let mut data = ArrayTrait::new(); + data.append(99); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/label_encoder_i32_default/input_2.cairo b/tests/nodes/label_encoder_i32_default/input_2.cairo new file mode 100644 index 000000000..2a2d83c67 --- /dev/null +++ b/tests/nodes/label_encoder_i32_default/input_2.cairo @@ -0,0 +1,17 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_2() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(5); + + let mut data = ArrayTrait::new(); + data.append(1); + data.append(2); + data.append(5); + data.append(6); + data.append(7); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/label_encoder_i32_default/input_3.cairo b/tests/nodes/label_encoder_i32_default/input_3.cairo new file mode 100644 index 000000000..0c5e1aecc --- /dev/null +++ b/tests/nodes/label_encoder_i32_default/input_3.cairo @@ -0,0 +1,17 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_3() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(5); + + let mut data = ArrayTrait::new(); + data.append(11); + data.append(22); + data.append(55); + data.append(66); + data.append(77); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/label_encoder_i32_default/output_0.cairo b/tests/nodes/label_encoder_i32_default/output_0.cairo new file mode 100644 index 000000000..4149a930f --- /dev/null +++ b/tests/nodes/label_encoder_i32_default/output_0.cairo @@ -0,0 +1,23 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(11); + + let mut data = ArrayTrait::new(); + data.append(11); + data.append(22); + data.append(99); + data.append(99); + data.append(55); + data.append(66); + data.append(11); + data.append(22); + data.append(99); + data.append(77); + data.append(99); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/label_encoder_i8_default.cairo b/tests/nodes/label_encoder_i8_default.cairo new file mode 100644 index 000000000..a04266eff --- /dev/null +++ b/tests/nodes/label_encoder_i8_default.cairo @@ -0,0 +1,34 @@ +mod input_0; +mod input_1; +mod input_2; +mod input_3; +mod output_0; + + +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::I8TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_label_encoder_i8_default() { + let input_0 = input_0::input_0(); + let input_1 = input_1::input_1(); + let input_2 = input_2::input_2(); + let input_3 = input_3::input_3(); + let z_0 = output_0::output_0(); + + let y_0 = input_0 + .label_encoder( + default_list: Option::None, + default_tensor: Option::Some(input_1), + keys: Option::None, + keys_tensor: Option::Some(input_2), + values: Option::None, + values_tensor: Option::Some(input_3) + ); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/label_encoder_i8_default/input_0.cairo b/tests/nodes/label_encoder_i8_default/input_0.cairo new file mode 100644 index 000000000..35d5a6551 --- /dev/null +++ b/tests/nodes/label_encoder_i8_default/input_0.cairo @@ -0,0 +1,23 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(11); + + let mut data = ArrayTrait::new(); + data.append(1); + data.append(2); + data.append(3); + data.append(4); + data.append(5); + data.append(6); + data.append(1); + data.append(2); + data.append(3); + data.append(7); + data.append(8); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/label_encoder_i8_default/input_1.cairo b/tests/nodes/label_encoder_i8_default/input_1.cairo new file mode 100644 index 000000000..89ec47169 --- /dev/null +++ b/tests/nodes/label_encoder_i8_default/input_1.cairo @@ -0,0 +1,12 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_1() -> Tensor { + let mut shape = ArrayTrait::::new(); + + let mut data = ArrayTrait::new(); + data.append(99); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/label_encoder_i8_default/input_2.cairo b/tests/nodes/label_encoder_i8_default/input_2.cairo new file mode 100644 index 000000000..b30dc93da --- /dev/null +++ b/tests/nodes/label_encoder_i8_default/input_2.cairo @@ -0,0 +1,17 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_2() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(5); + + let mut data = ArrayTrait::new(); + data.append(1); + data.append(2); + data.append(5); + data.append(6); + data.append(7); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/label_encoder_i8_default/input_3.cairo b/tests/nodes/label_encoder_i8_default/input_3.cairo new file mode 100644 index 000000000..939c07245 --- /dev/null +++ b/tests/nodes/label_encoder_i8_default/input_3.cairo @@ -0,0 +1,17 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_3() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(5); + + let mut data = ArrayTrait::new(); + data.append(11); + data.append(22); + data.append(55); + data.append(66); + data.append(77); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/label_encoder_i8_default/output_0.cairo b/tests/nodes/label_encoder_i8_default/output_0.cairo new file mode 100644 index 000000000..edfc272a1 --- /dev/null +++ b/tests/nodes/label_encoder_i8_default/output_0.cairo @@ -0,0 +1,23 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(11); + + let mut data = ArrayTrait::new(); + data.append(11); + data.append(22); + data.append(99); + data.append(99); + data.append(55); + data.append(66); + data.append(11); + data.append(22); + data.append(99); + data.append(77); + data.append(99); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/label_encoder_u32_default.cairo b/tests/nodes/label_encoder_u32_default.cairo new file mode 100644 index 000000000..eea7120b5 --- /dev/null +++ b/tests/nodes/label_encoder_u32_default.cairo @@ -0,0 +1,34 @@ +mod input_0; +mod input_1; +mod input_2; +mod input_3; +mod output_0; + + +use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; + +#[test] +#[available_gas(2000000000)] +fn test_label_encoder_u32_default() { + let input_0 = input_0::input_0(); + let input_1 = input_1::input_1(); + let input_2 = input_2::input_2(); + let input_3 = input_3::input_3(); + let z_0 = output_0::output_0(); + + let y_0 = input_0 + .label_encoder( + default_list: Option::None, + default_tensor: Option::Some(input_1), + keys: Option::None, + keys_tensor: Option::Some(input_2), + values: Option::None, + values_tensor: Option::Some(input_3) + ); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/label_encoder_u32_default/input_0.cairo b/tests/nodes/label_encoder_u32_default/input_0.cairo new file mode 100644 index 000000000..ab0f20967 --- /dev/null +++ b/tests/nodes/label_encoder_u32_default/input_0.cairo @@ -0,0 +1,23 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(11); + + let mut data = ArrayTrait::new(); + data.append(1); + data.append(2); + data.append(3); + data.append(4); + data.append(5); + data.append(6); + data.append(1); + data.append(2); + data.append(3); + data.append(7); + data.append(8); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/label_encoder_u32_default/input_1.cairo b/tests/nodes/label_encoder_u32_default/input_1.cairo new file mode 100644 index 000000000..093b42375 --- /dev/null +++ b/tests/nodes/label_encoder_u32_default/input_1.cairo @@ -0,0 +1,12 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_1() -> Tensor { + let mut shape = ArrayTrait::::new(); + + let mut data = ArrayTrait::new(); + data.append(99); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/label_encoder_u32_default/input_2.cairo b/tests/nodes/label_encoder_u32_default/input_2.cairo new file mode 100644 index 000000000..871bd4ee0 --- /dev/null +++ b/tests/nodes/label_encoder_u32_default/input_2.cairo @@ -0,0 +1,17 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_2() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(5); + + let mut data = ArrayTrait::new(); + data.append(1); + data.append(2); + data.append(5); + data.append(6); + data.append(7); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/label_encoder_u32_default/input_3.cairo b/tests/nodes/label_encoder_u32_default/input_3.cairo new file mode 100644 index 000000000..182c78fc6 --- /dev/null +++ b/tests/nodes/label_encoder_u32_default/input_3.cairo @@ -0,0 +1,17 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_3() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(5); + + let mut data = ArrayTrait::new(); + data.append(11); + data.append(22); + data.append(55); + data.append(66); + data.append(77); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/label_encoder_u32_default/output_0.cairo b/tests/nodes/label_encoder_u32_default/output_0.cairo new file mode 100644 index 000000000..ac2cc67e3 --- /dev/null +++ b/tests/nodes/label_encoder_u32_default/output_0.cairo @@ -0,0 +1,23 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(11); + + let mut data = ArrayTrait::new(); + data.append(11); + data.append(22); + data.append(99); + data.append(99); + data.append(55); + data.append(66); + data.append(11); + data.append(22); + data.append(99); + data.append(77); + data.append(99); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/random_uniform_like_fp16x16.cairo b/tests/nodes/random_uniform_like_fp16x16.cairo index 951a567b8..cc8fa60a9 100644 --- a/tests/nodes/random_uniform_like_fp16x16.cairo +++ b/tests/nodes/random_uniform_like_fp16x16.cairo @@ -15,7 +15,12 @@ fn test_random_uniform_like_fp16x16() { let input_0 = input_0::input_0(); let z_0 = output_0::output_0(); - let y_0 = TensorTrait::random_uniform_like(@input_0, Option::Some(FP16x16 { mag: 655360, sign: false }),Option::Some(FP16x16 { mag: 65536, sign: false }), Option::Some(354145)); + let y_0 = TensorTrait::random_uniform_like( + @input_0, + Option::Some(FP16x16 { mag: 655360, sign: false }), + Option::Some(FP16x16 { mag: 65536, sign: false }), + Option::Some(354145) + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/random_uniform_like_fp8x23.cairo b/tests/nodes/random_uniform_like_fp8x23.cairo index 06c1ad47d..b4192e536 100644 --- a/tests/nodes/random_uniform_like_fp8x23.cairo +++ b/tests/nodes/random_uniform_like_fp8x23.cairo @@ -15,7 +15,12 @@ fn test_random_uniform_like_fp8x23() { let input_0 = input_0::input_0(); let z_0 = output_0::output_0(); - let y_0 = TensorTrait::random_uniform_like(@input_0, Option::Some(FP8x23 { mag: 83886080, sign: false }),Option::Some(FP8x23 { mag: 8388608, sign: false }), Option::Some(354145)); + let y_0 = TensorTrait::random_uniform_like( + @input_0, + Option::Some(FP8x23 { mag: 83886080, sign: false }), + Option::Some(FP8x23 { mag: 8388608, sign: false }), + Option::Some(354145) + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/range_fp16x16.cairo b/tests/nodes/range_fp16x16.cairo index efacd031c..331a4fc61 100644 --- a/tests/nodes/range_fp16x16.cairo +++ b/tests/nodes/range_fp16x16.cairo @@ -13,7 +13,11 @@ use orion::numbers::{FixedTrait, FP16x16}; fn test_range_fp16x16() { let z_0 = output_0::output_0(); - let y_0 = TensorTrait::range(FP16x16 { mag: 65536, sign: false },FP16x16 { mag: 1638400, sign: false },FP16x16 { mag: 196608, sign: false }); + let y_0 = TensorTrait::range( + FP16x16 { mag: 65536, sign: false }, + FP16x16 { mag: 1638400, sign: false }, + FP16x16 { mag: 196608, sign: false } + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/range_fp8x23.cairo b/tests/nodes/range_fp8x23.cairo index c299a96ab..ac218e22b 100644 --- a/tests/nodes/range_fp8x23.cairo +++ b/tests/nodes/range_fp8x23.cairo @@ -13,7 +13,11 @@ use orion::numbers::{FixedTrait, FP8x23}; fn test_range_fp8x23() { let z_0 = output_0::output_0(); - let y_0 = TensorTrait::range(FP8x23 { mag: 8388608, sign: false },FP8x23 { mag: 41943040, sign: false },FP8x23 { mag: 2516582, sign: false }); + let y_0 = TensorTrait::range( + FP8x23 { mag: 8388608, sign: false }, + FP8x23 { mag: 41943040, sign: false }, + FP8x23 { mag: 2516582, sign: false } + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/range_i32.cairo b/tests/nodes/range_i32.cairo index 786094089..90580f17c 100644 --- a/tests/nodes/range_i32.cairo +++ b/tests/nodes/range_i32.cairo @@ -13,7 +13,7 @@ use orion::numbers::NumberTrait; fn test_range_i32() { let z_0 = output_0::output_0(); - let y_0 = TensorTrait::range(21,2,-3); + let y_0 = TensorTrait::range(21, 2, -3); assert_eq(y_0, z_0); } diff --git a/tests/nodes/range_i8.cairo b/tests/nodes/range_i8.cairo index 90c9917cd..088a2cd7d 100644 --- a/tests/nodes/range_i8.cairo +++ b/tests/nodes/range_i8.cairo @@ -13,7 +13,7 @@ use orion::numbers::NumberTrait; fn test_range_i8() { let z_0 = output_0::output_0(); - let y_0 = TensorTrait::range(-1,25,3); + let y_0 = TensorTrait::range(-1, 25, 3); assert_eq(y_0, z_0); } diff --git a/tests/nodes/range_u32.cairo b/tests/nodes/range_u32.cairo index fcaa30ca0..aacb50f4b 100644 --- a/tests/nodes/range_u32.cairo +++ b/tests/nodes/range_u32.cairo @@ -13,7 +13,7 @@ use orion::numbers::NumberTrait; fn test_range_u32() { let z_0 = output_0::output_0(); - let y_0 = TensorTrait::range(1,25,3); + let y_0 = TensorTrait::range(1, 25, 3); assert_eq(y_0, z_0); } diff --git a/tests/nodes/reduce_log_sum_exp_fp32x32_export_do_not_keepdims.cairo b/tests/nodes/reduce_log_sum_exp_fp32x32_export_do_not_keepdims.cairo new file mode 100644 index 000000000..ae11dfc55 --- /dev/null +++ b/tests/nodes/reduce_log_sum_exp_fp32x32_export_do_not_keepdims.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP32x32Tensor; +use orion::operators::tensor::FP32x32TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_reduce_log_sum_exp_fp32x32_export_do_not_keepdims() { + let input_0 = input_0::input_0(); + let z = output_0::output_0(); + + let y = input_0.reduce_log_sum_exp(2, false); + + assert_eq(y, z); +} diff --git a/tests/nodes/reduce_log_sum_exp_fp32x32_export_do_not_keepdims/input_0.cairo b/tests/nodes/reduce_log_sum_exp_fp32x32_export_do_not_keepdims/input_0.cairo new file mode 100644 index 000000000..d6f2475e3 --- /dev/null +++ b/tests/nodes/reduce_log_sum_exp_fp32x32_export_do_not_keepdims/input_0.cairo @@ -0,0 +1,26 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP32x32Tensor; +use orion::numbers::{FixedTrait, FP32x32}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP32x32 { mag: 4294967296, sign: false }); + data.append(FP32x32 { mag: 8589934592, sign: false }); + data.append(FP32x32 { mag: 12884901888, sign: false }); + data.append(FP32x32 { mag: 17179869184, sign: false }); + data.append(FP32x32 { mag: 21474836480, sign: false }); + data.append(FP32x32 { mag: 25769803776, sign: false }); + data.append(FP32x32 { mag: 30064771072, sign: false }); + data.append(FP32x32 { mag: 34359738368, sign: false }); + data.append(FP32x32 { mag: 38654705664, sign: false }); + data.append(FP32x32 { mag: 42949672960, sign: false }); + data.append(FP32x32 { mag: 47244640256, sign: false }); + data.append(FP32x32 { mag: 51539607552, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_log_sum_exp_fp32x32_export_do_not_keepdims/output_0.cairo b/tests/nodes/reduce_log_sum_exp_fp32x32_export_do_not_keepdims/output_0.cairo new file mode 100644 index 000000000..507762b24 --- /dev/null +++ b/tests/nodes/reduce_log_sum_exp_fp32x32_export_do_not_keepdims/output_0.cairo @@ -0,0 +1,19 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP32x32Tensor; +use orion::numbers::{FixedTrait, FP32x32}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP32x32 { mag: 9935383294, sign: false }); + data.append(FP32x32 { mag: 18525317886, sign: false }); + data.append(FP32x32 { mag: 27115252478, sign: false }); + data.append(FP32x32 { mag: 35705187070, sign: false }); + data.append(FP32x32 { mag: 44295121662, sign: false }); + data.append(FP32x32 { mag: 52885056254, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_log_sum_exp_fp32x32_export_keepdims.cairo b/tests/nodes/reduce_log_sum_exp_fp32x32_export_keepdims.cairo new file mode 100644 index 000000000..d9aed9c39 --- /dev/null +++ b/tests/nodes/reduce_log_sum_exp_fp32x32_export_keepdims.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP32x32Tensor; +use orion::operators::tensor::FP32x32TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_reduce_log_sum_exp_fp32x32_export_keepdims() { + let input_0 = input_0::input_0(); + let z = output_0::output_0(); + + let y = input_0.reduce_log_sum_exp(2, true); + + assert_eq(y, z); +} diff --git a/tests/nodes/reduce_log_sum_exp_fp32x32_export_keepdims/input_0.cairo b/tests/nodes/reduce_log_sum_exp_fp32x32_export_keepdims/input_0.cairo new file mode 100644 index 000000000..d6f2475e3 --- /dev/null +++ b/tests/nodes/reduce_log_sum_exp_fp32x32_export_keepdims/input_0.cairo @@ -0,0 +1,26 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP32x32Tensor; +use orion::numbers::{FixedTrait, FP32x32}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP32x32 { mag: 4294967296, sign: false }); + data.append(FP32x32 { mag: 8589934592, sign: false }); + data.append(FP32x32 { mag: 12884901888, sign: false }); + data.append(FP32x32 { mag: 17179869184, sign: false }); + data.append(FP32x32 { mag: 21474836480, sign: false }); + data.append(FP32x32 { mag: 25769803776, sign: false }); + data.append(FP32x32 { mag: 30064771072, sign: false }); + data.append(FP32x32 { mag: 34359738368, sign: false }); + data.append(FP32x32 { mag: 38654705664, sign: false }); + data.append(FP32x32 { mag: 42949672960, sign: false }); + data.append(FP32x32 { mag: 47244640256, sign: false }); + data.append(FP32x32 { mag: 51539607552, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_log_sum_exp_fp32x32_export_keepdims/output_0.cairo b/tests/nodes/reduce_log_sum_exp_fp32x32_export_keepdims/output_0.cairo new file mode 100644 index 000000000..04adcc345 --- /dev/null +++ b/tests/nodes/reduce_log_sum_exp_fp32x32_export_keepdims/output_0.cairo @@ -0,0 +1,20 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP32x32Tensor; +use orion::numbers::{FixedTrait, FP32x32}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(2); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(FP32x32 { mag: 9935383294, sign: false }); + data.append(FP32x32 { mag: 18525317886, sign: false }); + data.append(FP32x32 { mag: 27115252478, sign: false }); + data.append(FP32x32 { mag: 35705187070, sign: false }); + data.append(FP32x32 { mag: 44295121662, sign: false }); + data.append(FP32x32 { mag: 52885056254, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_log_sum_exp_fp32x32_export_negative_axes_keepdims.cairo b/tests/nodes/reduce_log_sum_exp_fp32x32_export_negative_axes_keepdims.cairo new file mode 100644 index 000000000..1b75f4815 --- /dev/null +++ b/tests/nodes/reduce_log_sum_exp_fp32x32_export_negative_axes_keepdims.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP32x32Tensor; +use orion::operators::tensor::FP32x32TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_reduce_log_sum_exp_fp32x32_export_negative_axes_keepdims() { + let input_0 = input_0::input_0(); + let z = output_0::output_0(); + + let y = input_0.reduce_log_sum_exp(0, true); + + assert_eq(y, z); +} diff --git a/tests/nodes/reduce_log_sum_exp_fp32x32_export_negative_axes_keepdims/input_0.cairo b/tests/nodes/reduce_log_sum_exp_fp32x32_export_negative_axes_keepdims/input_0.cairo new file mode 100644 index 000000000..d6f2475e3 --- /dev/null +++ b/tests/nodes/reduce_log_sum_exp_fp32x32_export_negative_axes_keepdims/input_0.cairo @@ -0,0 +1,26 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP32x32Tensor; +use orion::numbers::{FixedTrait, FP32x32}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP32x32 { mag: 4294967296, sign: false }); + data.append(FP32x32 { mag: 8589934592, sign: false }); + data.append(FP32x32 { mag: 12884901888, sign: false }); + data.append(FP32x32 { mag: 17179869184, sign: false }); + data.append(FP32x32 { mag: 21474836480, sign: false }); + data.append(FP32x32 { mag: 25769803776, sign: false }); + data.append(FP32x32 { mag: 30064771072, sign: false }); + data.append(FP32x32 { mag: 34359738368, sign: false }); + data.append(FP32x32 { mag: 38654705664, sign: false }); + data.append(FP32x32 { mag: 42949672960, sign: false }); + data.append(FP32x32 { mag: 47244640256, sign: false }); + data.append(FP32x32 { mag: 51539607552, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_log_sum_exp_fp32x32_export_negative_axes_keepdims/output_0.cairo b/tests/nodes/reduce_log_sum_exp_fp32x32_export_negative_axes_keepdims/output_0.cairo new file mode 100644 index 000000000..75f47ee0f --- /dev/null +++ b/tests/nodes/reduce_log_sum_exp_fp32x32_export_negative_axes_keepdims/output_0.cairo @@ -0,0 +1,18 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP32x32Tensor; +use orion::numbers::{FixedTrait, FP32x32}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP32x32 { mag: 38734073664, sign: false }); + data.append(FP32x32 { mag: 43029040960, sign: false }); + data.append(FP32x32 { mag: 47324008256, sign: false }); + data.append(FP32x32 { mag: 51618975552, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_log_sum_fp16x16_export_do_not_keepdims.cairo b/tests/nodes/reduce_log_sum_fp16x16_export_do_not_keepdims.cairo index 108ef328f..74bf3636e 100644 --- a/tests/nodes/reduce_log_sum_fp16x16_export_do_not_keepdims.cairo +++ b/tests/nodes/reduce_log_sum_fp16x16_export_do_not_keepdims.cairo @@ -2,11 +2,11 @@ mod input_0; mod output_0; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::FP8x23Tensor; use orion::operators::tensor::FP8x23TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/reduce_log_sum_fp16x16_export_keepdims.cairo b/tests/nodes/reduce_log_sum_fp16x16_export_keepdims.cairo index 5ee464e1c..19a7f1fac 100644 --- a/tests/nodes/reduce_log_sum_fp16x16_export_keepdims.cairo +++ b/tests/nodes/reduce_log_sum_fp16x16_export_keepdims.cairo @@ -2,11 +2,11 @@ mod input_0; mod output_0; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::FP8x23Tensor; use orion::operators::tensor::FP8x23TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/reduce_log_sum_fp16x16_export_negative_axes_keepdims.cairo b/tests/nodes/reduce_log_sum_fp16x16_export_negative_axes_keepdims.cairo index 7f7fc7f98..f1764b328 100644 --- a/tests/nodes/reduce_log_sum_fp16x16_export_negative_axes_keepdims.cairo +++ b/tests/nodes/reduce_log_sum_fp16x16_export_negative_axes_keepdims.cairo @@ -2,11 +2,11 @@ mod input_0; mod output_0; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::FP8x23Tensor; use orion::operators::tensor::FP8x23TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/reduce_log_sum_fp8x23_export_do_not_keepdims.cairo b/tests/nodes/reduce_log_sum_fp8x23_export_do_not_keepdims.cairo index 3f0adf3eb..4b6fde3fe 100644 --- a/tests/nodes/reduce_log_sum_fp8x23_export_do_not_keepdims.cairo +++ b/tests/nodes/reduce_log_sum_fp8x23_export_do_not_keepdims.cairo @@ -2,11 +2,11 @@ mod input_0; mod output_0; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::FP8x23Tensor; use orion::operators::tensor::FP8x23TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/reduce_log_sum_fp8x23_export_keepdims.cairo b/tests/nodes/reduce_log_sum_fp8x23_export_keepdims.cairo index 5662f1510..f6fb67955 100644 --- a/tests/nodes/reduce_log_sum_fp8x23_export_keepdims.cairo +++ b/tests/nodes/reduce_log_sum_fp8x23_export_keepdims.cairo @@ -2,11 +2,11 @@ mod input_0; mod output_0; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::FP8x23Tensor; use orion::operators::tensor::FP8x23TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/reduce_log_sum_fp8x23_export_negative_axes_keepdims.cairo b/tests/nodes/reduce_log_sum_fp8x23_export_negative_axes_keepdims.cairo index ec295a396..e4c4345b8 100644 --- a/tests/nodes/reduce_log_sum_fp8x23_export_negative_axes_keepdims.cairo +++ b/tests/nodes/reduce_log_sum_fp8x23_export_negative_axes_keepdims.cairo @@ -2,11 +2,11 @@ mod input_0; mod output_0; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::FP8x23Tensor; use orion::operators::tensor::FP8x23TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/reverse_sequence_different_dimensions_1_6.cairo b/tests/nodes/reverse_sequence_different_dimensions_1_6.cairo index 1ae01e30b..e98d95345 100644 --- a/tests/nodes/reverse_sequence_different_dimensions_1_6.cairo +++ b/tests/nodes/reverse_sequence_different_dimensions_1_6.cairo @@ -14,7 +14,12 @@ fn test_reverse_sequence_different_dimensions_1_6() { let input_0 = input_0::input_0(); let z_0 = output_0::output_0(); - let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![1].span(), array![5].span()), Option::Some(0), Option::Some(1)); + let y_0 = input_0 + .reverse_sequence( + TensorTrait::::new(array![1].span(), array![5].span()), + Option::Some(0), + Option::Some(1) + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/reverse_sequence_different_dimensions_2_4.cairo b/tests/nodes/reverse_sequence_different_dimensions_2_4.cairo index 3fc2a4d28..96ddce2b2 100644 --- a/tests/nodes/reverse_sequence_different_dimensions_2_4.cairo +++ b/tests/nodes/reverse_sequence_different_dimensions_2_4.cairo @@ -14,7 +14,12 @@ fn test_reverse_sequence_different_dimensions_2_4() { let input_0 = input_0::input_0(); let z_0 = output_0::output_0(); - let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![2,2,2,2].span()), Option::Some(1), Option::Some(0)); + let y_0 = input_0 + .reverse_sequence( + TensorTrait::::new(array![4].span(), array![2, 2, 2, 2].span()), + Option::Some(1), + Option::Some(0) + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/reverse_sequence_different_dimensions_3x9_batch.cairo b/tests/nodes/reverse_sequence_different_dimensions_3x9_batch.cairo index 254ade4de..b22a61862 100644 --- a/tests/nodes/reverse_sequence_different_dimensions_3x9_batch.cairo +++ b/tests/nodes/reverse_sequence_different_dimensions_3x9_batch.cairo @@ -14,7 +14,12 @@ fn test_reverse_sequence_different_dimensions_3x9_batch() { let input_0 = input_0::input_0(); let z_0 = output_0::output_0(); - let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![3].span(), array![7,8,9].span()), Option::Some(0), Option::Some(1)); + let y_0 = input_0 + .reverse_sequence( + TensorTrait::::new(array![3].span(), array![7, 8, 9].span()), + Option::Some(0), + Option::Some(1) + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/reverse_sequence_different_dimensions_3x9_time.cairo b/tests/nodes/reverse_sequence_different_dimensions_3x9_time.cairo index aa8667bdd..f1e961f08 100644 --- a/tests/nodes/reverse_sequence_different_dimensions_3x9_time.cairo +++ b/tests/nodes/reverse_sequence_different_dimensions_3x9_time.cairo @@ -14,7 +14,12 @@ fn test_reverse_sequence_different_dimensions_3x9_time() { let input_0 = input_0::input_0(); let z_0 = output_0::output_0(); - let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![9].span(), array![3,2,3,2,3,2,3,2,1].span()), Option::Some(1), Option::Some(0)); + let y_0 = input_0 + .reverse_sequence( + TensorTrait::::new(array![9].span(), array![3, 2, 3, 2, 3, 2, 3, 2, 1].span()), + Option::Some(1), + Option::Some(0) + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/reverse_sequence_different_dimensions_4_5.cairo b/tests/nodes/reverse_sequence_different_dimensions_4_5.cairo index 053f187e1..e1986a7b7 100644 --- a/tests/nodes/reverse_sequence_different_dimensions_4_5.cairo +++ b/tests/nodes/reverse_sequence_different_dimensions_4_5.cairo @@ -14,7 +14,12 @@ fn test_reverse_sequence_different_dimensions_4_5() { let input_0 = input_0::input_0(); let z_0 = output_0::output_0(); - let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![5,4,3,2].span()), Option::Some(0), Option::Some(1)); + let y_0 = input_0 + .reverse_sequence( + TensorTrait::::new(array![4].span(), array![5, 4, 3, 2].span()), + Option::Some(0), + Option::Some(1) + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/reverse_sequence_fp16x16_2d_batch_equal_parts.cairo b/tests/nodes/reverse_sequence_fp16x16_2d_batch_equal_parts.cairo index 9dcfd511c..697231f17 100644 --- a/tests/nodes/reverse_sequence_fp16x16_2d_batch_equal_parts.cairo +++ b/tests/nodes/reverse_sequence_fp16x16_2d_batch_equal_parts.cairo @@ -15,7 +15,12 @@ fn test_reverse_sequence_fp16x16_2d_batch_equal_parts() { let input_0 = input_0::input_0(); let z_0 = output_0::output_0(); - let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![1,2,3,4].span()), Option::Some(0), Option::Some(1)); + let y_0 = input_0 + .reverse_sequence( + TensorTrait::::new(array![4].span(), array![1, 2, 3, 4].span()), + Option::Some(0), + Option::Some(1) + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/reverse_sequence_fp16x16_2d_time_equal_parts.cairo b/tests/nodes/reverse_sequence_fp16x16_2d_time_equal_parts.cairo index 518db31ca..af77e1a55 100644 --- a/tests/nodes/reverse_sequence_fp16x16_2d_time_equal_parts.cairo +++ b/tests/nodes/reverse_sequence_fp16x16_2d_time_equal_parts.cairo @@ -15,7 +15,12 @@ fn test_reverse_sequence_fp16x16_2d_time_equal_parts() { let input_0 = input_0::input_0(); let z_0 = output_0::output_0(); - let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![4,3,2,1].span()), Option::Some(1), Option::Some(0)); + let y_0 = input_0 + .reverse_sequence( + TensorTrait::::new(array![4].span(), array![4, 3, 2, 1].span()), + Option::Some(1), + Option::Some(0) + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/reverse_sequence_fp16x16_batch_equal_parts.cairo b/tests/nodes/reverse_sequence_fp16x16_batch_equal_parts.cairo index c1bcdbc6a..858c62e43 100644 --- a/tests/nodes/reverse_sequence_fp16x16_batch_equal_parts.cairo +++ b/tests/nodes/reverse_sequence_fp16x16_batch_equal_parts.cairo @@ -15,7 +15,12 @@ fn test_reverse_sequence_fp16x16_batch_equal_parts() { let input_0 = input_0::input_0(); let z_0 = output_0::output_0(); - let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![1,2,3,4].span()), Option::Some(0), Option::Some(1)); + let y_0 = input_0 + .reverse_sequence( + TensorTrait::::new(array![4].span(), array![1, 2, 3, 4].span()), + Option::Some(0), + Option::Some(1) + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/reverse_sequence_fp16x16_time_equal_parts.cairo b/tests/nodes/reverse_sequence_fp16x16_time_equal_parts.cairo index c2f27748e..4b576add6 100644 --- a/tests/nodes/reverse_sequence_fp16x16_time_equal_parts.cairo +++ b/tests/nodes/reverse_sequence_fp16x16_time_equal_parts.cairo @@ -15,7 +15,12 @@ fn test_reverse_sequence_fp16x16_time_equal_parts() { let input_0 = input_0::input_0(); let z_0 = output_0::output_0(); - let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![4,3,2,1].span()), Option::Some(1), Option::Some(0)); + let y_0 = input_0 + .reverse_sequence( + TensorTrait::::new(array![4].span(), array![4, 3, 2, 1].span()), + Option::Some(1), + Option::Some(0) + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/reverse_sequence_i32_2d_batch_equal_parts.cairo b/tests/nodes/reverse_sequence_i32_2d_batch_equal_parts.cairo index 350607eae..85a21abf1 100644 --- a/tests/nodes/reverse_sequence_i32_2d_batch_equal_parts.cairo +++ b/tests/nodes/reverse_sequence_i32_2d_batch_equal_parts.cairo @@ -15,7 +15,12 @@ fn test_reverse_sequence_i32_2d_batch_equal_parts() { let input_0 = input_0::input_0(); let z_0 = output_0::output_0(); - let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![1,2,3,4].span()), Option::Some(0), Option::Some(1)); + let y_0 = input_0 + .reverse_sequence( + TensorTrait::::new(array![4].span(), array![1, 2, 3, 4].span()), + Option::Some(0), + Option::Some(1) + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/reverse_sequence_i32_2d_time_equal_parts.cairo b/tests/nodes/reverse_sequence_i32_2d_time_equal_parts.cairo index dd47c062e..489ebf8da 100644 --- a/tests/nodes/reverse_sequence_i32_2d_time_equal_parts.cairo +++ b/tests/nodes/reverse_sequence_i32_2d_time_equal_parts.cairo @@ -15,7 +15,12 @@ fn test_reverse_sequence_i32_2d_time_equal_parts() { let input_0 = input_0::input_0(); let z_0 = output_0::output_0(); - let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![4,3,2,1].span()), Option::Some(1), Option::Some(0)); + let y_0 = input_0 + .reverse_sequence( + TensorTrait::::new(array![4].span(), array![4, 3, 2, 1].span()), + Option::Some(1), + Option::Some(0) + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/reverse_sequence_i32_batch_equal_parts.cairo b/tests/nodes/reverse_sequence_i32_batch_equal_parts.cairo index 86f1855e4..4ab9f9559 100644 --- a/tests/nodes/reverse_sequence_i32_batch_equal_parts.cairo +++ b/tests/nodes/reverse_sequence_i32_batch_equal_parts.cairo @@ -16,7 +16,12 @@ fn test_reverse_sequence_i32_batch_equal_parts() { let input_0 = input_0::input_0(); let z_0 = output_0::output_0(); - let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![1,2,3,4].span()), Option::Some(0), Option::Some(1)); + let y_0 = input_0 + .reverse_sequence( + TensorTrait::::new(array![4].span(), array![1, 2, 3, 4].span()), + Option::Some(0), + Option::Some(1) + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/reverse_sequence_i32_time_equal_parts.cairo b/tests/nodes/reverse_sequence_i32_time_equal_parts.cairo index 28c9d0a84..2280f61c3 100644 --- a/tests/nodes/reverse_sequence_i32_time_equal_parts.cairo +++ b/tests/nodes/reverse_sequence_i32_time_equal_parts.cairo @@ -16,7 +16,12 @@ fn test_reverse_sequence_i32_time_equal_parts() { let input_0 = input_0::input_0(); let z_0 = output_0::output_0(); - let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![4,3,2,1].span()), Option::Some(1), Option::Some(0)); + let y_0 = input_0 + .reverse_sequence( + TensorTrait::::new(array![4].span(), array![4, 3, 2, 1].span()), + Option::Some(1), + Option::Some(0) + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/reverse_sequence_i8_2d_batch_equal_parts.cairo b/tests/nodes/reverse_sequence_i8_2d_batch_equal_parts.cairo index 7a9ebc438..a2f3e974c 100644 --- a/tests/nodes/reverse_sequence_i8_2d_batch_equal_parts.cairo +++ b/tests/nodes/reverse_sequence_i8_2d_batch_equal_parts.cairo @@ -15,7 +15,12 @@ fn test_reverse_sequence_i8_2d_batch_equal_parts() { let input_0 = input_0::input_0(); let z_0 = output_0::output_0(); - let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![1,2,3,4].span()), Option::Some(0), Option::Some(1)); + let y_0 = input_0 + .reverse_sequence( + TensorTrait::::new(array![4].span(), array![1, 2, 3, 4].span()), + Option::Some(0), + Option::Some(1) + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/reverse_sequence_i8_2d_time_equal_parts.cairo b/tests/nodes/reverse_sequence_i8_2d_time_equal_parts.cairo index e99616d89..bbb7556ce 100644 --- a/tests/nodes/reverse_sequence_i8_2d_time_equal_parts.cairo +++ b/tests/nodes/reverse_sequence_i8_2d_time_equal_parts.cairo @@ -15,7 +15,12 @@ fn test_reverse_sequence_i8_2d_time_equal_parts() { let input_0 = input_0::input_0(); let z_0 = output_0::output_0(); - let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![4,3,2,1].span()), Option::Some(1), Option::Some(0)); + let y_0 = input_0 + .reverse_sequence( + TensorTrait::::new(array![4].span(), array![4, 3, 2, 1].span()), + Option::Some(1), + Option::Some(0) + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/reverse_sequence_i8_batch_equal_parts.cairo b/tests/nodes/reverse_sequence_i8_batch_equal_parts.cairo index 9b5afdecf..63434d7df 100644 --- a/tests/nodes/reverse_sequence_i8_batch_equal_parts.cairo +++ b/tests/nodes/reverse_sequence_i8_batch_equal_parts.cairo @@ -15,7 +15,12 @@ fn test_reverse_sequence_i8_batch_equal_parts() { let input_0 = input_0::input_0(); let z_0 = output_0::output_0(); - let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![1,2,3,4].span()), Option::Some(0), Option::Some(1)); + let y_0 = input_0 + .reverse_sequence( + TensorTrait::::new(array![4].span(), array![1, 2, 3, 4].span()), + Option::Some(0), + Option::Some(1) + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/reverse_sequence_i8_time_equal_parts.cairo b/tests/nodes/reverse_sequence_i8_time_equal_parts.cairo index a803ef02e..0f10bac72 100644 --- a/tests/nodes/reverse_sequence_i8_time_equal_parts.cairo +++ b/tests/nodes/reverse_sequence_i8_time_equal_parts.cairo @@ -16,7 +16,12 @@ fn test_reverse_sequence_i8_time_equal_parts() { let input_0 = input_0::input_0(); let z_0 = output_0::output_0(); - let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![4,3,2,1].span()), Option::Some(1), Option::Some(0)); + let y_0 = input_0 + .reverse_sequence( + TensorTrait::::new(array![4].span(), array![4, 3, 2, 1].span()), + Option::Some(1), + Option::Some(0) + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/reverse_sequence_time_equal_parts.cairo b/tests/nodes/reverse_sequence_time_equal_parts.cairo index a79efe4af..8e102a0ab 100644 --- a/tests/nodes/reverse_sequence_time_equal_parts.cairo +++ b/tests/nodes/reverse_sequence_time_equal_parts.cairo @@ -14,7 +14,12 @@ fn test_reverse_sequence_time_equal_parts() { let input_0 = input_0::input_0(); let z_0 = output_0::output_0(); - let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![4,3,2,1].span()), Option::Some(1), Option::Some(0)); + let y_0 = input_0 + .reverse_sequence( + TensorTrait::::new(array![4].span(), array![4, 3, 2, 1].span()), + Option::Some(1), + Option::Some(0) + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/reverse_sequence_u32_2d_batch_equal_parts.cairo b/tests/nodes/reverse_sequence_u32_2d_batch_equal_parts.cairo index 9dcfc9735..ce8e125b9 100644 --- a/tests/nodes/reverse_sequence_u32_2d_batch_equal_parts.cairo +++ b/tests/nodes/reverse_sequence_u32_2d_batch_equal_parts.cairo @@ -14,7 +14,12 @@ fn test_reverse_sequence_u32_2d_batch_equal_parts() { let input_0 = input_0::input_0(); let z_0 = output_0::output_0(); - let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![1,2,3,4].span()), Option::Some(0), Option::Some(1)); + let y_0 = input_0 + .reverse_sequence( + TensorTrait::::new(array![4].span(), array![1, 2, 3, 4].span()), + Option::Some(0), + Option::Some(1) + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/reverse_sequence_u32_2d_time_equal_parts.cairo b/tests/nodes/reverse_sequence_u32_2d_time_equal_parts.cairo index d89e73242..d2d9606b0 100644 --- a/tests/nodes/reverse_sequence_u32_2d_time_equal_parts.cairo +++ b/tests/nodes/reverse_sequence_u32_2d_time_equal_parts.cairo @@ -14,7 +14,12 @@ fn test_reverse_sequence_u32_2d_time_equal_parts() { let input_0 = input_0::input_0(); let z_0 = output_0::output_0(); - let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![4,3,2,1].span()), Option::Some(1), Option::Some(0)); + let y_0 = input_0 + .reverse_sequence( + TensorTrait::::new(array![4].span(), array![4, 3, 2, 1].span()), + Option::Some(1), + Option::Some(0) + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/reverse_sequence_u32_3x3_batch.cairo b/tests/nodes/reverse_sequence_u32_3x3_batch.cairo index e8ff9ca9d..a95260c33 100644 --- a/tests/nodes/reverse_sequence_u32_3x3_batch.cairo +++ b/tests/nodes/reverse_sequence_u32_3x3_batch.cairo @@ -14,7 +14,12 @@ fn test_reverse_sequence_u32_3x3_batch() { let input_0 = input_0::input_0(); let z_0 = output_0::output_0(); - let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![3].span(), array![3,1,2].span()), Option::Some(0), Option::Some(1)); + let y_0 = input_0 + .reverse_sequence( + TensorTrait::::new(array![3].span(), array![3, 1, 2].span()), + Option::Some(0), + Option::Some(1) + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/reverse_sequence_u32_3x3_time.cairo b/tests/nodes/reverse_sequence_u32_3x3_time.cairo index ac2b62361..12978fe32 100644 --- a/tests/nodes/reverse_sequence_u32_3x3_time.cairo +++ b/tests/nodes/reverse_sequence_u32_3x3_time.cairo @@ -14,7 +14,12 @@ fn test_reverse_sequence_u32_3x3_time() { let input_0 = input_0::input_0(); let z_0 = output_0::output_0(); - let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![3].span(), array![1,3,3].span()), Option::Some(1), Option::Some(0)); + let y_0 = input_0 + .reverse_sequence( + TensorTrait::::new(array![3].span(), array![1, 3, 3].span()), + Option::Some(1), + Option::Some(0) + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/reverse_sequence_u32_4x4_batch.cairo b/tests/nodes/reverse_sequence_u32_4x4_batch.cairo index ce124a89e..dafd62951 100644 --- a/tests/nodes/reverse_sequence_u32_4x4_batch.cairo +++ b/tests/nodes/reverse_sequence_u32_4x4_batch.cairo @@ -14,7 +14,12 @@ fn test_reverse_sequence_u32_4x4_batch() { let input_0 = input_0::input_0(); let z_0 = output_0::output_0(); - let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![1,2,3,4].span()), Option::Some(0), Option::Some(1)); + let y_0 = input_0 + .reverse_sequence( + TensorTrait::::new(array![4].span(), array![1, 2, 3, 4].span()), + Option::Some(0), + Option::Some(1) + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/reverse_sequence_u32_4x4_time.cairo b/tests/nodes/reverse_sequence_u32_4x4_time.cairo index a0d9ca889..d7e42272f 100644 --- a/tests/nodes/reverse_sequence_u32_4x4_time.cairo +++ b/tests/nodes/reverse_sequence_u32_4x4_time.cairo @@ -14,7 +14,12 @@ fn test_reverse_sequence_u32_4x4_time() { let input_0 = input_0::input_0(); let z_0 = output_0::output_0(); - let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![4,3,2,1].span()), Option::Some(1), Option::Some(0)); + let y_0 = input_0 + .reverse_sequence( + TensorTrait::::new(array![4].span(), array![4, 3, 2, 1].span()), + Option::Some(1), + Option::Some(0) + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/reverse_sequence_u32_zero_size.cairo b/tests/nodes/reverse_sequence_u32_zero_size.cairo index cabda40b5..34f18742d 100644 --- a/tests/nodes/reverse_sequence_u32_zero_size.cairo +++ b/tests/nodes/reverse_sequence_u32_zero_size.cairo @@ -14,7 +14,12 @@ fn test_reverse_sequence_u32_zero_size() { let input_0 = input_0::input_0(); let z_0 = output_0::output_0(); - let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![0].span(), array![].span()), Option::Some(1), Option::Some(0)); + let y_0 = input_0 + .reverse_sequence( + TensorTrait::::new(array![0].span(), array![].span()), + Option::Some(1), + Option::Some(0) + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/scatter_nd_fp16x16_3d_add.cairo b/tests/nodes/scatter_nd_fp16x16_3d_add.cairo index 95b09a56d..d182c89d1 100644 --- a/tests/nodes/scatter_nd_fp16x16_3d_add.cairo +++ b/tests/nodes/scatter_nd_fp16x16_3d_add.cairo @@ -20,7 +20,8 @@ fn test_scatter_nd_fp16x16_3d_add() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let y_0 = input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('add')); + let y_0 = input_0 + .scatter_nd(updates: input_1, indices: input_2, reduction: Option::Some('add')); assert_eq(y_0, z_0); } diff --git a/tests/nodes/scatter_nd_fp16x16_3d_default.cairo b/tests/nodes/scatter_nd_fp16x16_3d_default.cairo index 80bb892de..5cec477a9 100644 --- a/tests/nodes/scatter_nd_fp16x16_3d_default.cairo +++ b/tests/nodes/scatter_nd_fp16x16_3d_default.cairo @@ -20,7 +20,7 @@ fn test_scatter_nd_fp16x16_3d_default() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let y_0 = input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::None(())); + let y_0 = input_0.scatter_nd(updates: input_1, indices: input_2, reduction: Option::None(())); assert_eq(y_0, z_0); } diff --git a/tests/nodes/scatter_nd_fp16x16_3d_max.cairo b/tests/nodes/scatter_nd_fp16x16_3d_max.cairo index 84e99545c..5399cffa4 100644 --- a/tests/nodes/scatter_nd_fp16x16_3d_max.cairo +++ b/tests/nodes/scatter_nd_fp16x16_3d_max.cairo @@ -20,7 +20,8 @@ fn test_scatter_nd_fp16x16_3d_max() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let y_0 = input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('max')); + let y_0 = input_0 + .scatter_nd(updates: input_1, indices: input_2, reduction: Option::Some('max')); assert_eq(y_0, z_0); } diff --git a/tests/nodes/scatter_nd_fp16x16_3d_min.cairo b/tests/nodes/scatter_nd_fp16x16_3d_min.cairo index 9ee1c89b6..51f848437 100644 --- a/tests/nodes/scatter_nd_fp16x16_3d_min.cairo +++ b/tests/nodes/scatter_nd_fp16x16_3d_min.cairo @@ -20,7 +20,8 @@ fn test_scatter_nd_fp16x16_3d_min() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let y_0 = input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('min')); + let y_0 = input_0 + .scatter_nd(updates: input_1, indices: input_2, reduction: Option::Some('min')); assert_eq(y_0, z_0); } diff --git a/tests/nodes/scatter_nd_fp16x16_3d_mul.cairo b/tests/nodes/scatter_nd_fp16x16_3d_mul.cairo index 2d5716d98..64fcc74f4 100644 --- a/tests/nodes/scatter_nd_fp16x16_3d_mul.cairo +++ b/tests/nodes/scatter_nd_fp16x16_3d_mul.cairo @@ -20,7 +20,8 @@ fn test_scatter_nd_fp16x16_3d_mul() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let y_0 = input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('mul')); + let y_0 = input_0 + .scatter_nd(updates: input_1, indices: input_2, reduction: Option::Some('mul')); assert_eq(y_0, z_0); } diff --git a/tests/nodes/scatter_nd_fp8x23_3d_add.cairo b/tests/nodes/scatter_nd_fp8x23_3d_add.cairo index 3b748994a..f24e9bcd5 100644 --- a/tests/nodes/scatter_nd_fp8x23_3d_add.cairo +++ b/tests/nodes/scatter_nd_fp8x23_3d_add.cairo @@ -20,7 +20,8 @@ fn test_scatter_nd_fp8x23_3d_add() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let y_0 = input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('add')); + let y_0 = input_0 + .scatter_nd(updates: input_1, indices: input_2, reduction: Option::Some('add')); assert_eq(y_0, z_0); } diff --git a/tests/nodes/scatter_nd_fp8x23_3d_default.cairo b/tests/nodes/scatter_nd_fp8x23_3d_default.cairo index 75dc57f69..36e7ab220 100644 --- a/tests/nodes/scatter_nd_fp8x23_3d_default.cairo +++ b/tests/nodes/scatter_nd_fp8x23_3d_default.cairo @@ -20,7 +20,7 @@ fn test_scatter_nd_fp8x23_3d_default() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let y_0 = input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::None(())); + let y_0 = input_0.scatter_nd(updates: input_1, indices: input_2, reduction: Option::None(())); assert_eq(y_0, z_0); } diff --git a/tests/nodes/scatter_nd_fp8x23_3d_max.cairo b/tests/nodes/scatter_nd_fp8x23_3d_max.cairo index d09351807..360dc10a3 100644 --- a/tests/nodes/scatter_nd_fp8x23_3d_max.cairo +++ b/tests/nodes/scatter_nd_fp8x23_3d_max.cairo @@ -20,7 +20,8 @@ fn test_scatter_nd_fp8x23_3d_max() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let y_0 = input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('max')); + let y_0 = input_0 + .scatter_nd(updates: input_1, indices: input_2, reduction: Option::Some('max')); assert_eq(y_0, z_0); } diff --git a/tests/nodes/scatter_nd_fp8x23_3d_min.cairo b/tests/nodes/scatter_nd_fp8x23_3d_min.cairo index dadc8d27d..39ba6a903 100644 --- a/tests/nodes/scatter_nd_fp8x23_3d_min.cairo +++ b/tests/nodes/scatter_nd_fp8x23_3d_min.cairo @@ -20,7 +20,8 @@ fn test_scatter_nd_fp8x23_3d_min() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let y_0 = input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('min')); + let y_0 = input_0 + .scatter_nd(updates: input_1, indices: input_2, reduction: Option::Some('min')); assert_eq(y_0, z_0); } diff --git a/tests/nodes/scatter_nd_fp8x23_3d_mul.cairo b/tests/nodes/scatter_nd_fp8x23_3d_mul.cairo index 853780f6c..bb1756dc2 100644 --- a/tests/nodes/scatter_nd_fp8x23_3d_mul.cairo +++ b/tests/nodes/scatter_nd_fp8x23_3d_mul.cairo @@ -20,7 +20,8 @@ fn test_scatter_nd_fp8x23_3d_mul() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let y_0 = input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('mul')); + let y_0 = input_0 + .scatter_nd(updates: input_1, indices: input_2, reduction: Option::Some('mul')); assert_eq(y_0, z_0); } diff --git a/tests/nodes/scatter_nd_u32_add.cairo b/tests/nodes/scatter_nd_u32_add.cairo index cf3c4018b..c1112d27d 100644 --- a/tests/nodes/scatter_nd_u32_add.cairo +++ b/tests/nodes/scatter_nd_u32_add.cairo @@ -18,7 +18,8 @@ fn test_scatter_nd_u32_add() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let y_0 = input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('add')); + let y_0 = input_0 + .scatter_nd(updates: input_1, indices: input_2, reduction: Option::Some('add')); assert_eq(y_0, z_0); } diff --git a/tests/nodes/scatter_nd_u32_default.cairo b/tests/nodes/scatter_nd_u32_default.cairo index 076d44277..3d832f243 100644 --- a/tests/nodes/scatter_nd_u32_default.cairo +++ b/tests/nodes/scatter_nd_u32_default.cairo @@ -18,7 +18,7 @@ fn test_scatter_nd_u32_default() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let y_0 = input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::None(())); + let y_0 = input_0.scatter_nd(updates: input_1, indices: input_2, reduction: Option::None(())); assert_eq(y_0, z_0); } diff --git a/tests/nodes/scatter_nd_u32_max.cairo b/tests/nodes/scatter_nd_u32_max.cairo index 5d3a4940d..e64e2b84a 100644 --- a/tests/nodes/scatter_nd_u32_max.cairo +++ b/tests/nodes/scatter_nd_u32_max.cairo @@ -18,7 +18,8 @@ fn test_scatter_nd_u32_max() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let y_0 = input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('max')); + let y_0 = input_0 + .scatter_nd(updates: input_1, indices: input_2, reduction: Option::Some('max')); assert_eq(y_0, z_0); } diff --git a/tests/nodes/scatter_nd_u32_min.cairo b/tests/nodes/scatter_nd_u32_min.cairo index 63033d6b7..51b7cb9af 100644 --- a/tests/nodes/scatter_nd_u32_min.cairo +++ b/tests/nodes/scatter_nd_u32_min.cairo @@ -18,7 +18,8 @@ fn test_scatter_nd_u32_min() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let y_0 = input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('min')); + let y_0 = input_0 + .scatter_nd(updates: input_1, indices: input_2, reduction: Option::Some('min')); assert_eq(y_0, z_0); } diff --git a/tests/nodes/scatter_nd_u32_mul.cairo b/tests/nodes/scatter_nd_u32_mul.cairo index b5367e914..8d19773d9 100644 --- a/tests/nodes/scatter_nd_u32_mul.cairo +++ b/tests/nodes/scatter_nd_u32_mul.cairo @@ -18,7 +18,8 @@ fn test_scatter_nd_u32_mul() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let y_0 = input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('mul')); + let y_0 = input_0 + .scatter_nd(updates: input_1, indices: input_2, reduction: Option::Some('mul')); assert_eq(y_0, z_0); } diff --git a/tests/nodes/split_to_sequence_fp16x16_1d_equal_parts.cairo b/tests/nodes/split_to_sequence_fp16x16_1d_equal_parts.cairo index a1ac7a9ec..c585612c3 100644 --- a/tests/nodes/split_to_sequence_fp16x16_1d_equal_parts.cairo +++ b/tests/nodes/split_to_sequence_fp16x16_1d_equal_parts.cairo @@ -14,7 +14,12 @@ fn test_split_to_sequence_fp16x16_1d_equal_parts() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.split_to_sequence(0, 1, Option::Some(TensorTrait::::new(shape: array![1].span(), data: array![3].span(),))); + let y = input_0 + .split_to_sequence( + 0, + 1, + Option::Some(TensorTrait::::new(shape: array![1].span(), data: array![3].span(),)) + ); assert_seq_eq(y, z); } diff --git a/tests/nodes/split_to_sequence_fp16x16_1d_uneven.cairo b/tests/nodes/split_to_sequence_fp16x16_1d_uneven.cairo index 42f0dc900..b10b573e3 100644 --- a/tests/nodes/split_to_sequence_fp16x16_1d_uneven.cairo +++ b/tests/nodes/split_to_sequence_fp16x16_1d_uneven.cairo @@ -14,7 +14,12 @@ fn test_split_to_sequence_fp16x16_1d_uneven() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.split_to_sequence(0, 1, Option::Some(TensorTrait::::new(shape: array![1].span(), data: array![4].span()))); + let y = input_0 + .split_to_sequence( + 0, + 1, + Option::Some(TensorTrait::::new(shape: array![1].span(), data: array![4].span())) + ); assert_seq_eq(y, z); } diff --git a/tests/nodes/split_to_sequence_fp16x16_1d_variable_parts.cairo b/tests/nodes/split_to_sequence_fp16x16_1d_variable_parts.cairo index 1c3faf614..010736d45 100644 --- a/tests/nodes/split_to_sequence_fp16x16_1d_variable_parts.cairo +++ b/tests/nodes/split_to_sequence_fp16x16_1d_variable_parts.cairo @@ -14,7 +14,14 @@ fn test_split_to_sequence_fp16x16_1d_variable_parts() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.split_to_sequence(0, 1, Option::Some(TensorTrait::::new(shape: array![2].span(), data: array![2, 4].span(),))); + let y = input_0 + .split_to_sequence( + 0, + 1, + Option::Some( + TensorTrait::::new(shape: array![2].span(), data: array![2, 4].span(),) + ) + ); assert_seq_eq(y, z); } diff --git a/tests/nodes/split_to_sequence_fp16x16_2d_equal_parts.cairo b/tests/nodes/split_to_sequence_fp16x16_2d_equal_parts.cairo index 96e743399..7e740f9d7 100644 --- a/tests/nodes/split_to_sequence_fp16x16_2d_equal_parts.cairo +++ b/tests/nodes/split_to_sequence_fp16x16_2d_equal_parts.cairo @@ -14,7 +14,12 @@ fn test_split_to_sequence_fp16x16_2d_equal_parts() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.split_to_sequence(1, 1, Option::Some(TensorTrait::::new(shape: array![1].span(), data: array![2].span(),))); + let y = input_0 + .split_to_sequence( + 1, + 1, + Option::Some(TensorTrait::::new(shape: array![1].span(), data: array![2].span(),)) + ); assert_seq_eq(y, z); } diff --git a/tests/nodes/split_to_sequence_fp16x16_2d_uneven.cairo b/tests/nodes/split_to_sequence_fp16x16_2d_uneven.cairo index bfd1f8cec..2ca6dd030 100644 --- a/tests/nodes/split_to_sequence_fp16x16_2d_uneven.cairo +++ b/tests/nodes/split_to_sequence_fp16x16_2d_uneven.cairo @@ -14,7 +14,12 @@ fn test_split_to_sequence_fp16x16_2d_uneven() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.split_to_sequence(1, 1, Option::Some(TensorTrait::::new(shape: array![1].span(), data: array![3].span(),))); + let y = input_0 + .split_to_sequence( + 1, + 1, + Option::Some(TensorTrait::::new(shape: array![1].span(), data: array![3].span(),)) + ); assert_seq_eq(y, z); } diff --git a/tests/nodes/split_to_sequence_fp16x16_2d_variable_parts.cairo b/tests/nodes/split_to_sequence_fp16x16_2d_variable_parts.cairo index 5cd4e1845..9cdd54aa0 100644 --- a/tests/nodes/split_to_sequence_fp16x16_2d_variable_parts.cairo +++ b/tests/nodes/split_to_sequence_fp16x16_2d_variable_parts.cairo @@ -14,7 +14,14 @@ fn test_split_to_sequence_fp16x16_2d_variable_parts() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.split_to_sequence(1, 1, Option::Some(TensorTrait::::new(shape: array![2].span(), data: array![2, 4].span(),))); + let y = input_0 + .split_to_sequence( + 1, + 1, + Option::Some( + TensorTrait::::new(shape: array![2].span(), data: array![2, 4].span(),) + ) + ); assert_seq_eq(y, z); } diff --git a/tests/nodes/split_to_sequence_fp16x16_zero_size.cairo b/tests/nodes/split_to_sequence_fp16x16_zero_size.cairo index e8ecfba30..a9f7bed3e 100644 --- a/tests/nodes/split_to_sequence_fp16x16_zero_size.cairo +++ b/tests/nodes/split_to_sequence_fp16x16_zero_size.cairo @@ -14,7 +14,14 @@ fn test_split_to_sequence_fp16x16_zero_size() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.split_to_sequence(0, 1, Option::Some(TensorTrait::::new(shape: array![3].span(), data: array![0, 0, 0].span(),))); + let y = input_0 + .split_to_sequence( + 0, + 1, + Option::Some( + TensorTrait::::new(shape: array![3].span(), data: array![0, 0, 0].span(),) + ) + ); assert_seq_eq(y, z); } diff --git a/tests/nodes/split_to_sequence_u32_1d_equal_parts.cairo b/tests/nodes/split_to_sequence_u32_1d_equal_parts.cairo index 9c14470b5..74995267f 100644 --- a/tests/nodes/split_to_sequence_u32_1d_equal_parts.cairo +++ b/tests/nodes/split_to_sequence_u32_1d_equal_parts.cairo @@ -14,7 +14,12 @@ fn test_split_to_sequence_u32_1d_equal_parts() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.split_to_sequence(0, 1, Option::Some(TensorTrait::::new(shape: array![1].span(), data: array![3].span(),))); + let y = input_0 + .split_to_sequence( + 0, + 1, + Option::Some(TensorTrait::::new(shape: array![1].span(), data: array![3].span(),)) + ); assert_seq_eq(y, z); } diff --git a/tests/nodes/split_to_sequence_u32_1d_uneven.cairo b/tests/nodes/split_to_sequence_u32_1d_uneven.cairo index 0dfb5547f..f72378a1e 100644 --- a/tests/nodes/split_to_sequence_u32_1d_uneven.cairo +++ b/tests/nodes/split_to_sequence_u32_1d_uneven.cairo @@ -14,7 +14,12 @@ fn test_split_to_sequence_u32_1d_uneven() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.split_to_sequence(0, 1, Option::Some(TensorTrait::::new(shape: array![1].span(), data: array![4].span(),))); + let y = input_0 + .split_to_sequence( + 0, + 1, + Option::Some(TensorTrait::::new(shape: array![1].span(), data: array![4].span(),)) + ); assert_seq_eq(y, z); } diff --git a/tests/nodes/split_to_sequence_u32_1d_variable_parts.cairo b/tests/nodes/split_to_sequence_u32_1d_variable_parts.cairo index 4df4fbee7..62a16a2e3 100644 --- a/tests/nodes/split_to_sequence_u32_1d_variable_parts.cairo +++ b/tests/nodes/split_to_sequence_u32_1d_variable_parts.cairo @@ -14,7 +14,14 @@ fn test_split_to_sequence_u32_1d_variable_parts() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.split_to_sequence(0, 1, Option::Some(TensorTrait::::new(shape: array![2].span(), data: array![2, 4].span(),))); + let y = input_0 + .split_to_sequence( + 0, + 1, + Option::Some( + TensorTrait::::new(shape: array![2].span(), data: array![2, 4].span(),) + ) + ); assert_seq_eq(y, z); } diff --git a/tests/nodes/split_to_sequence_u32_2d_equal_parts.cairo b/tests/nodes/split_to_sequence_u32_2d_equal_parts.cairo index 24c06c857..8cd2fa373 100644 --- a/tests/nodes/split_to_sequence_u32_2d_equal_parts.cairo +++ b/tests/nodes/split_to_sequence_u32_2d_equal_parts.cairo @@ -14,7 +14,12 @@ fn test_split_to_sequence_u32_2d_equal_parts() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.split_to_sequence(1, 1, Option::Some(TensorTrait::::new(shape: array![1].span(), data: array![2].span(),))); + let y = input_0 + .split_to_sequence( + 1, + 1, + Option::Some(TensorTrait::::new(shape: array![1].span(), data: array![2].span(),)) + ); assert_seq_eq(y, z); } diff --git a/tests/nodes/split_to_sequence_u32_2d_uneven.cairo b/tests/nodes/split_to_sequence_u32_2d_uneven.cairo index 7ab6604be..839576e6d 100644 --- a/tests/nodes/split_to_sequence_u32_2d_uneven.cairo +++ b/tests/nodes/split_to_sequence_u32_2d_uneven.cairo @@ -14,7 +14,12 @@ fn test_split_to_sequence_u32_2d_uneven() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.split_to_sequence(1, 1, Option::Some(TensorTrait::::new(shape: array![1].span(), data: array![3].span(),))); + let y = input_0 + .split_to_sequence( + 1, + 1, + Option::Some(TensorTrait::::new(shape: array![1].span(), data: array![3].span(),)) + ); assert_seq_eq(y, z); } diff --git a/tests/nodes/split_to_sequence_u32_2d_variable_parts.cairo b/tests/nodes/split_to_sequence_u32_2d_variable_parts.cairo index dc81b4325..1bb3b017b 100644 --- a/tests/nodes/split_to_sequence_u32_2d_variable_parts.cairo +++ b/tests/nodes/split_to_sequence_u32_2d_variable_parts.cairo @@ -14,7 +14,14 @@ fn test_split_to_sequence_u32_2d_variable_parts() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.split_to_sequence(1, 1, Option::Some(TensorTrait::::new(shape: array![2].span(), data: array![2, 4].span(),))); + let y = input_0 + .split_to_sequence( + 1, + 1, + Option::Some( + TensorTrait::::new(shape: array![2].span(), data: array![2, 4].span(),) + ) + ); assert_seq_eq(y, z); } diff --git a/tests/nodes/split_to_sequence_u32_zero_size.cairo b/tests/nodes/split_to_sequence_u32_zero_size.cairo index 815ba7d4e..9f24d154d 100644 --- a/tests/nodes/split_to_sequence_u32_zero_size.cairo +++ b/tests/nodes/split_to_sequence_u32_zero_size.cairo @@ -14,7 +14,14 @@ fn test_split_to_sequence_u32_zero_size() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.split_to_sequence(0, 1, Option::Some(TensorTrait::::new(shape: array![3].span(), data: array![0, 0, 0].span(),))); + let y = input_0 + .split_to_sequence( + 0, + 1, + Option::Some( + TensorTrait::::new(shape: array![3].span(), data: array![0, 0, 0].span(),) + ) + ); assert_seq_eq(y, z); } diff --git a/tests/nodes/squeeze_fP16x16.cairo b/tests/nodes/squeeze_fP16x16.cairo index b595e5100..83748ca1f 100644 --- a/tests/nodes/squeeze_fP16x16.cairo +++ b/tests/nodes/squeeze_fP16x16.cairo @@ -14,7 +14,7 @@ fn test_squeeze_fP16x16() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.squeeze(Option::Some(array![0_i32, 2_i32].span())); + let y = input_0.squeeze(Option::Some(array![0, 2].span())); assert(y.shape == z.shape, 'shapes do not match'); } diff --git a/tests/nodes/squeeze_fP8x23.cairo b/tests/nodes/squeeze_fP8x23.cairo index 0ee6f8a15..57d418686 100644 --- a/tests/nodes/squeeze_fP8x23.cairo +++ b/tests/nodes/squeeze_fP8x23.cairo @@ -14,7 +14,7 @@ fn test_squeeze_fP8x23() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.squeeze(Option::Some(array![0_i32, 2_i32].span())); + let y = input_0.squeeze(Option::Some(array![0, 2].span())); assert(y.shape == z.shape, 'shapes do not match'); } diff --git a/tests/nodes/squeeze_i32.cairo b/tests/nodes/squeeze_i32.cairo index ce880a31c..b4b09d25d 100644 --- a/tests/nodes/squeeze_i32.cairo +++ b/tests/nodes/squeeze_i32.cairo @@ -14,7 +14,7 @@ fn test_squeeze_i32() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.squeeze(Option::Some(array![0_i32, 2_i32].span())); + let y = input_0.squeeze(Option::Some(array![0, 2].span())); assert(y.shape == z.shape, 'shapes do not match'); } diff --git a/tests/nodes/squeeze_i8.cairo b/tests/nodes/squeeze_i8.cairo index 1a5f9147f..0634f1213 100644 --- a/tests/nodes/squeeze_i8.cairo +++ b/tests/nodes/squeeze_i8.cairo @@ -14,7 +14,7 @@ fn test_squeeze_i8() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.squeeze(Option::Some(array![0_i32, 2_i32].span())); + let y = input_0.squeeze(Option::Some(array![0, 2].span())); assert(y.shape == z.shape, 'shapes do not match'); } diff --git a/tests/nodes/squeeze_u32.cairo b/tests/nodes/squeeze_u32.cairo index 15939db4a..bf90027b6 100644 --- a/tests/nodes/squeeze_u32.cairo +++ b/tests/nodes/squeeze_u32.cairo @@ -14,7 +14,7 @@ fn test_squeeze_u32() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.squeeze(Option::Some(array![0_i32, 2_i32].span())); + let y = input_0.squeeze(Option::Some(array![0, 2].span())); assert(y.shape == z.shape, 'shapes do not match'); } diff --git a/tests/operators/optional/optional_get_element_test.cairo b/tests/operators/optional/optional_get_element_test.cairo index 576e12cd6..ef6c68097 100644 --- a/tests/operators/optional/optional_get_element_test.cairo +++ b/tests/operators/optional/optional_get_element_test.cairo @@ -14,17 +14,7 @@ fn optional_get_element_i8_test() { i8 >::new( shape: array![4, 2].span(), - data: array![ - 1_i8, - 2_i8, - 3_i8, - 4_i8, - 5_i8, - 6_i8, - 7_i8, - 8_i8 - ] - .span(), + data: array![1_i8, 2_i8, 3_i8, 4_i8, 5_i8, 6_i8, 7_i8, 8_i8].span(), ); let ele = optional_get_element(a.optional()); @@ -67,4 +57,4 @@ fn optional_get_element_fp16x16_test() { assert(*(ele.data).at(5) == *(a.data).at(5), 'ele[5] == a[5]'); assert(*(ele.data).at(6) == *(a.data).at(6), 'ele[6] == a[6]'); assert(*(ele.data).at(7) == *(a.data).at(7), 'ele[7] == a[7]'); -} \ No newline at end of file +} diff --git a/tests/operators/optional/optional_has_element_test.cairo b/tests/operators/optional/optional_has_element_test.cairo index f08bdcc73..7aedbb9bd 100644 --- a/tests/operators/optional/optional_has_element_test.cairo +++ b/tests/operators/optional/optional_has_element_test.cairo @@ -14,17 +14,7 @@ fn optional_has_element_i8_test() { i8 >::new( shape: array![4, 2].span(), - data: array![ - 1_i8, - 2_i8, - 3_i8, - 4_i8, - 5_i8, - 6_i8, - 7_i8, - 8_i8 - ] - .span(), + data: array![1_i8, 2_i8, 3_i8, 4_i8, 5_i8, 6_i8, 7_i8, 8_i8].span(), ); let a_optional = a.optional(); let has_ele = optional_has_element(a_optional); @@ -64,4 +54,4 @@ fn optional_has_element_none_test() { let has_ele = optional_has_element(a); assert(*(has_ele.data).at(0) == false, 'has_ele[0] == false'); -} \ No newline at end of file +} diff --git a/tests/operators/optional/optional_test.cairo b/tests/operators/optional/optional_test.cairo index 3632e173a..06c31e3b4 100644 --- a/tests/operators/optional/optional_test.cairo +++ b/tests/operators/optional/optional_test.cairo @@ -14,28 +14,42 @@ fn optional_i8_test() { i8 >::new( shape: array![4, 2].span(), - data: array![ - 1_i8, - 2_i8, - 3_i8, - 4_i8, - 5_i8, - 6_i8, - 7_i8, - 8_i8 - ] - .span(), + data: array![1_i8, 2_i8, 3_i8, 4_i8, 5_i8, 6_i8, 7_i8, 8_i8].span(), ); let a_optional = a.optional(); - assert(*(optional_get_element(a_optional).data).at(0) == *(a.data).at(0), 'a_optional[0] == Option(a)[0]'); - assert(*(optional_get_element(a_optional).data).at(1) == *(a.data).at(1), 'a_optional[1] == Option(a)[1]'); - assert(*(optional_get_element(a_optional).data).at(2) == *(a.data).at(2), 'a_optional[2] == Option(a)[2]'); - assert(*(optional_get_element(a_optional).data).at(3) == *(a.data).at(3), 'a_optional[3] == Option(a)[3]'); - assert(*(optional_get_element(a_optional).data).at(4) == *(a.data).at(4), 'a_optional[4] == Option(a)[4]'); - assert(*(optional_get_element(a_optional).data).at(5) == *(a.data).at(5), 'a_optional[5] == Option(a)[5]'); - assert(*(optional_get_element(a_optional).data).at(6) == *(a.data).at(6), 'a_optional[6] == Option(a)[6]'); - assert(*(optional_get_element(a_optional).data).at(7) == *(a.data).at(7), 'a_optional[7] == Option(a)[7]'); + assert( + *(optional_get_element(a_optional).data).at(0) == *(a.data).at(0), + 'a_optional[0] == Option(a)[0]' + ); + assert( + *(optional_get_element(a_optional).data).at(1) == *(a.data).at(1), + 'a_optional[1] == Option(a)[1]' + ); + assert( + *(optional_get_element(a_optional).data).at(2) == *(a.data).at(2), + 'a_optional[2] == Option(a)[2]' + ); + assert( + *(optional_get_element(a_optional).data).at(3) == *(a.data).at(3), + 'a_optional[3] == Option(a)[3]' + ); + assert( + *(optional_get_element(a_optional).data).at(4) == *(a.data).at(4), + 'a_optional[4] == Option(a)[4]' + ); + assert( + *(optional_get_element(a_optional).data).at(5) == *(a.data).at(5), + 'a_optional[5] == Option(a)[5]' + ); + assert( + *(optional_get_element(a_optional).data).at(6) == *(a.data).at(6), + 'a_optional[6] == Option(a)[6]' + ); + assert( + *(optional_get_element(a_optional).data).at(7) == *(a.data).at(7), + 'a_optional[7] == Option(a)[7]' + ); } #[test] @@ -59,12 +73,36 @@ fn optional_fp16x16_test() { ); let a_optional = a.optional(); - assert(*(optional_get_element(a_optional).data).at(0) == *(a.data).at(0), 'a_optional[0] == Option(a)[0]'); - assert(*(optional_get_element(a_optional).data).at(1) == *(a.data).at(1), 'a_optional[1] == Option(a)[1]'); - assert(*(optional_get_element(a_optional).data).at(2) == *(a.data).at(2), 'a_optional[2] == Option(a)[2]'); - assert(*(optional_get_element(a_optional).data).at(3) == *(a.data).at(3), 'a_optional[3] == Option(a)[3]'); - assert(*(optional_get_element(a_optional).data).at(4) == *(a.data).at(4), 'a_optional[4] == Option(a)[4]'); - assert(*(optional_get_element(a_optional).data).at(5) == *(a.data).at(5), 'a_optional[5] == Option(a)[5]'); - assert(*(optional_get_element(a_optional).data).at(6) == *(a.data).at(6), 'a_optional[6] == Option(a)[6]'); - assert(*(optional_get_element(a_optional).data).at(7) == *(a.data).at(7), 'a_optional[7] == Option(a)[7]'); -} \ No newline at end of file + assert( + *(optional_get_element(a_optional).data).at(0) == *(a.data).at(0), + 'a_optional[0] == Option(a)[0]' + ); + assert( + *(optional_get_element(a_optional).data).at(1) == *(a.data).at(1), + 'a_optional[1] == Option(a)[1]' + ); + assert( + *(optional_get_element(a_optional).data).at(2) == *(a.data).at(2), + 'a_optional[2] == Option(a)[2]' + ); + assert( + *(optional_get_element(a_optional).data).at(3) == *(a.data).at(3), + 'a_optional[3] == Option(a)[3]' + ); + assert( + *(optional_get_element(a_optional).data).at(4) == *(a.data).at(4), + 'a_optional[4] == Option(a)[4]' + ); + assert( + *(optional_get_element(a_optional).data).at(5) == *(a.data).at(5), + 'a_optional[5] == Option(a)[5]' + ); + assert( + *(optional_get_element(a_optional).data).at(6) == *(a.data).at(6), + 'a_optional[6] == Option(a)[6]' + ); + assert( + *(optional_get_element(a_optional).data).at(7) == *(a.data).at(7), + 'a_optional[7] == Option(a)[7]' + ); +} diff --git a/tests/performance.cairo b/tests/performance.cairo index da71869ed..14012d1cb 100644 --- a/tests/performance.cairo +++ b/tests/performance.cairo @@ -1,3 +1,3 @@ mod quantize_linear_test; mod dequantize_linear_test; -mod dynamic_quantize_linear_test; \ No newline at end of file +mod dynamic_quantize_linear_test; diff --git a/tests/performance/dynamic_quantize_linear_test.cairo b/tests/performance/dynamic_quantize_linear_test.cairo index bbd43eb29..dc6c5315b 100644 --- a/tests/performance/dynamic_quantize_linear_test.cairo +++ b/tests/performance/dynamic_quantize_linear_test.cairo @@ -1 +1 @@ -mod dynamic_quantize_linear_fp_test; \ No newline at end of file +mod dynamic_quantize_linear_fp_test; diff --git a/tests/performance/dynamic_quantize_linear_test/dynamic_quantize_linear_fp_test.cairo b/tests/performance/dynamic_quantize_linear_test/dynamic_quantize_linear_fp_test.cairo index e1817dff9..c16277b85 100644 --- a/tests/performance/dynamic_quantize_linear_test/dynamic_quantize_linear_fp_test.cairo +++ b/tests/performance/dynamic_quantize_linear_test/dynamic_quantize_linear_fp_test.cairo @@ -19,11 +19,11 @@ mod fp8x23 { shape.append(6); let mut data = ArrayTrait::::new(); data.append(FixedTrait::new(0, false)); - data.append(FixedTrait::new(587203, false)); // 0.07 - data.append(FixedTrait::new(838861, false)); // 0.1 - data.append(FixedTrait::new(1677722, false)); // 0.2 - data.append(FixedTrait::new(4194304, false)); // 0.5 - data.append(FixedTrait::new(7549747, false)); // 0.9 + data.append(FixedTrait::new(587203, false)); // 0.07 + data.append(FixedTrait::new(838861, false)); // 0.1 + data.append(FixedTrait::new(1677722, false)); // 0.2 + data.append(FixedTrait::new(4194304, false)); // 0.5 + data.append(FixedTrait::new(7549747, false)); // 0.9 let x = TensorTrait::new(shape.span(), data.span()); @@ -61,12 +61,12 @@ mod fp16x16 { let mut shape = ArrayTrait::::new(); shape.append(6); let mut data = ArrayTrait::::new(); - data.append(FixedTrait::new(10945, false)); // 0.167 - data.append(FixedTrait::new(190054, false)); // 2.9 - data.append(FixedTrait::new_unscaled(3, false)); // 3.0 - data.append(FixedTrait::new(229376, false)); // 3.5 - data.append(FixedTrait::new_unscaled(3, true)); // -3.0 - data.append(FixedTrait::new(229376, true)); // -3.5 + data.append(FixedTrait::new(10945, false)); // 0.167 + data.append(FixedTrait::new(190054, false)); // 2.9 + data.append(FixedTrait::new_unscaled(3, false)); // 3.0 + data.append(FixedTrait::new(229376, false)); // 3.5 + data.append(FixedTrait::new_unscaled(3, true)); // -3.0 + data.append(FixedTrait::new(229376, true)); // -3.5 let x = TensorTrait::new(shape.span(), data.span());