diff --git a/.all-contributorsrc b/.all-contributorsrc
index 8d4b73289..acd1673a4 100644
--- a/.all-contributorsrc
+++ b/.all-contributorsrc
@@ -287,6 +287,33 @@
"contributions": [
"code"
]
+ },
+ {
+ "login": "TAdev0",
+ "name": "Tristan",
+ "avatar_url": "https://avatars.githubusercontent.com/u/122918260?v=4",
+ "profile": "https://nodeguardians.io/character/98995858fd55 ",
+ "contributions": [
+ "code"
+ ]
+ },
+ {
+ "login": "Gakunt",
+ "name": "Kugo",
+ "avatar_url": "https://avatars.githubusercontent.com/u/153402253?v=4",
+ "profile": "https://github.com/Gakunt",
+ "contributions": [
+ "doc"
+ ]
+ },
+ {
+ "login": "FriendlyLifeguard",
+ "name": "Beeyoung",
+ "avatar_url": "https://avatars.githubusercontent.com/u/55970530?v=4",
+ "profile": "http://alankang.xyz",
+ "contributions": [
+ "code"
+ ]
}
],
"contributorsPerLine": 7,
@@ -296,4 +323,4 @@
"projectName": "orion",
"projectOwner": "gizatechxyz",
"commitType": "docs"
-}
\ No newline at end of file
+}
diff --git a/README.md b/README.md
index cc2cf1ef2..19888ec6f 100644
--- a/README.md
+++ b/README.md
@@ -23,7 +23,7 @@
# Orion: An Open-source Framework for Validity and ZK ML β¨
-[](#contributors-)
+[](#contributors-)
Orion is an open-source, community-driven framework dedicated to Provable Machine Learning. It provides essential components and a new ONNX runtime for building verifiable Machine Learning models using [STARKs](https://starkware.co/stark/).
@@ -57,7 +57,7 @@ For a full list of all authors and contributors, see [the contributors page](htt
This project is licensed under the **MIT license**.
-See [LICENSE](https://github.com/franalgaba/onnx-cairo/blob/main/LICENSE/README.md) for more information.
+See [LICENSE](https://github.com/franalgaba/onnx-cairo/blob/main/LICENSE) for more information.
## Contributors β¨
@@ -108,6 +108,9 @@ Thanks goes to these wonderful people:
 Vid Kersic π» |
 Trunks @ Carbonable π |
 canacechan π» |
+  Tristan π» |
+  Kugo π |
+  Beeyoung π» |
diff --git a/Scarb.toml b/Scarb.toml
index 463e4ac62..f05fa6649 100644
--- a/Scarb.toml
+++ b/Scarb.toml
@@ -1,6 +1,6 @@
[package]
name = "orion"
-version = "0.2.3"
+version = "0.2.4"
cairo-version = "2.5.3"
edition = "2023_10"
description = "ONNX Runtime in Cairo for verifiable ML inference using STARK"
diff --git a/docgen/src/main.rs b/docgen/src/main.rs
index fc780ad03..8d1f90f4b 100644
--- a/docgen/src/main.rs
+++ b/docgen/src/main.rs
@@ -90,6 +90,14 @@ fn main() {
let trait_name: &str = "SVMClassifierTrait";
doc_trait(trait_path, doc_path, label);
doc_functions(trait_path, doc_path, trait_name, label);
+
+ // NORMALIZER DOC
+ let trait_path = "src/operators/ml/normalizer/normalizer.cairo";
+ let doc_path = "docs/framework/operators/machine-learning/normalizer";
+ let label = "normalizer";
+ let trait_name: &str = "NormalizerTrait";
+ doc_trait(trait_path, doc_path, label);
+ doc_functions(trait_path, doc_path, trait_name, label);
}
fn doc_trait(trait_path: &str, doc_path: &str, label: &str) {
diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md
index 16a00107b..1608069ae 100644
--- a/docs/CHANGELOG.md
+++ b/docs/CHANGELOG.md
@@ -3,6 +3,11 @@
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+## [Unreleased] - 2024-02-21
+
+## Added
+- Label Encoder.
+-
## [Unreleased] - 2024-01-17
## Added
diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md
index d867a96ba..477601b37 100644
--- a/docs/SUMMARY.md
+++ b/docs/SUMMARY.md
@@ -144,6 +144,7 @@
* [tensor.not](framework/operators/tensor/tensor.not.md)
* [tensor.erf](framework/operators/tensor/tensor.erf.md)
* [tensor.reduce\_log\_sum](framework/operators/tensor/tensor.reduce\_log\_sum.md)
+ * [tensor.reduce\_log\_sum\_exp](framework/operators/tensor/tensor.reduce\_log\_sum\_exp.md)
* [tensor.unique](framework/operators/tensor/tensor.unique.md)
* [tensor.compress](framework/operators/tensor/tensor.compress.md)
* [tensor.layer_normalization](framework/operators/tensor/tensor.layer_normalization.md)
@@ -157,6 +158,7 @@
* [tensor.hamming_window](framework/operators/tensor/tensor.hamming_window.md)
* [tensor.blackman_window](framework/operators/tensor/tensor.blackman_window.md)
* [tensor.random_uniform_like](framework/operators/tensor/tensor.random_uniform_like.md)
+ * [tensor.label_encoder](framework/operators/tensor/tensor.label_encoder.md)
* [Neural Network](framework/operators/neural-network/README.md)
* [nn.relu](framework/operators/neural-network/nn.relu.md)
* [nn.leaky\_relu](framework/operators/neural-network/nn.leaky\_relu.md)
@@ -198,6 +200,8 @@
* [sequence.sequence\_erase](framework/operators/sequence/sequence.sequence\_erase.md)
* [sequence.sequence\_insert](framework/operators/sequence/sequence.sequence\_insert.md)
* [sequence.concat\_from\_sequence](framework/operators/sequence/sequence.concat\_from\_sequence.md)
+ * [Normalizer](framework/operators/machine-learning/normalizer/README.md)
+ * [normalize.predict](framework/operators/machine-learning/normalizer/normalizer.predict.md)
## π Hub
diff --git a/docs/framework/compatibility.md b/docs/framework/compatibility.md
index a05d3bbce..f3f84ac3f 100644
--- a/docs/framework/compatibility.md
+++ b/docs/framework/compatibility.md
@@ -88,7 +88,7 @@ You can see below the list of current supported ONNX Operators:
| [Max](operators/tensor/tensor.max.md) | :white\_check\_mark: |
| [ReduceSumSquare](operators/tensor/tensor.reduce\_sum\_square.md) | :white\_check\_mark: |
| [Trilu](operators/tensor/tensor.trilu.md) | :white\_check\_mark: |
-| [Scatter](operators/tensor/scatter.max.md) | :white\_check\_mark: |
+| [Scatter](operators/tensor/tensor.scatter.md) | :white\_check\_mark: |
| [ArrayFeatureExtractor](operators/tensor/tensor.array\_feature\_extractor.md) | :white\_check\_mark: |
| [Binarizer](operators/tensor/tensor.binarizer.md) | :white\_check\_mark: |
| [ConstantOfShape](operators/tensor/tensor.constant_of_shape.md) | :white\_check\_mark: |
@@ -111,6 +111,7 @@ You can see below the list of current supported ONNX Operators:
| [ReduceLogSum](operators/tensor/tensor.reduce\_log\_sum.md) | :white\_check\_mark: |
| [Erf](operators/tensor/tensor.erf.md) | :white\_check\_mark: |
| [Compress](operators/tensor/tensor.compress.md) | :white\_check\_mark: |
+| [ReduceLogSumExp](operators/tensor/tensor.reduce\_log\_sum\_exp.md) | :white\_check\_mark: |
| [Layer_normalization](operators/tensor/tensor.layer_normalization.md) | :white\_check\_mark: |
| [ScatterND](operators/tensor/tensor.scatter/_nd.md) | :white\_check\_mark: |
| [DequantizeLinear](operators/tensor/tensor.dequantize_linear.md) | :white\_check\_mark: |
@@ -124,5 +125,6 @@ You can see below the list of current supported ONNX Operators:
| [HammingWindow](operators/tensor/tensor.tensor.hamming_window.md) | :white\_check\_mark: |
| [BlackmanWindow](operators/tensor/tensor.tensor.blackman_window.md) | :white\_check\_mark: |
| [RandomUniformLike](operators/tensor/tensor.tensor.random_uniform_like.md) | :white\_check\_mark: |
+| [LabelEncoder](operators/tensor/tensor.label_encoder.md) | :white\_check\_mark: |
-Current Operators support: **117/156 (75%)**
+Current Operators support: **118/156 (75%)**
diff --git a/docs/framework/numbers/fixed-point/README.md b/docs/framework/numbers/fixed-point/README.md
index f30122676..4ecc2246f 100644
--- a/docs/framework/numbers/fixed-point/README.md
+++ b/docs/framework/numbers/fixed-point/README.md
@@ -69,6 +69,7 @@ use orion::numbers::fixed_point::core::FixedTrait;
| [`fp.sinh`](fp.sinh.md) | Returns the value of the hyperbolic sine of the fixed point number. |
| [`fp.tanh`](fp.tanh.md) | Returns the value of the hyperbolic tangent of the fixed point number. |
| [`fp.sign`](fp.sign.md) | Returns the element-wise indication of the sign of the input fixed point number. |
+| [`fp.erf`](fp.erf.md) | Returns the error function of the input fixed point number computed element-wise. |
### Arithmetic & Comparison operators
diff --git a/docs/framework/operators/machine-learning/linear-classifier/linear_classifier.predict.md b/docs/framework/operators/machine-learning/linear-classifier/linear_classifier.predict.md
index aec154f68..7ed30f236 100644
--- a/docs/framework/operators/machine-learning/linear-classifier/linear_classifier.predict.md
+++ b/docs/framework/operators/machine-learning/linear-classifier/linear_classifier.predict.md
@@ -1,7 +1,7 @@
# LinearClassifierTrait::predict
```rust
- fn predict(ref self: LinearClassifier, X: Tensor) -> Tensor;
+ fn predict(classifier: LinearClassifier, X: Tensor) -> Tensor;
```
Linear Classifier. Performs the linear classification.
@@ -85,7 +85,7 @@ fn linear_classifier_helper(
fn linear_classifier_multi_softmax() -> (Span, Tensor) {
let (mut classifier, X) = linear_classifier_helper(POST_TRANSFORM::SOFTMAX);
- let (labels, mut scores) = LinearClassifierTrait::predict(ref classifier, X);
+ let (labels, mut scores) = LinearClassifierTrait::predict(classifier, X);
(labels, scores)
}
diff --git a/docs/framework/operators/machine-learning/linear-regressor/linear_regressor.predict.md b/docs/framework/operators/machine-learning/linear-regressor/linear_regressor.predict.md
index f1bd38831..6c40ac930 100644
--- a/docs/framework/operators/machine-learning/linear-regressor/linear_regressor.predict.md
+++ b/docs/framework/operators/machine-learning/linear-regressor/linear_regressor.predict.md
@@ -1,14 +1,14 @@
# LinearRegressorTrait::predict
```rust
- fn predict(ref self: LinearRegressor, X: Tensor) -> Tensor;
+ fn predict(regressor: LinearRegressor, X: Tensor) -> Tensor;
```
Linear Regressor. Performs the generalized linear regression evaluation.
## Args
-* `self`: LinearRegressor - A LinearRegressor object.
+* `regressor`: LinearRegressor - A LinearRegressor object.
* `X`: Input 2D tensor.
## Returns
@@ -68,7 +68,7 @@ fn example_linear_regressor() -> Tensor {
post_transform
};
- let scores = LinearRegressorTrait::predict(ref regressor, X);
+ let scores = LinearRegressorTrait::predict(regressor, X);
scores
}
@@ -120,7 +120,7 @@ fn example_linear_regressor_2() -> Tensor {
post_transform
};
- let scores = LinearRegressorTrait::predict(ref regressor, X);
+ let scores = LinearRegressorTrait::predict(regressor, X);
scores
}
diff --git a/docs/framework/operators/machine-learning/normalizer/README.md b/docs/framework/operators/machine-learning/normalizer/README.md
new file mode 100644
index 000000000..5b31584eb
--- /dev/null
+++ b/docs/framework/operators/machine-learning/normalizer/README.md
@@ -0,0 +1,23 @@
+# Normalizer
+
+`NormalizerTrait` computes the normalization of the input, each row of the input is normalized independently.
+
+```rust
+use orion::operators::ml::NormalizerTrait;
+```
+
+### Data types
+
+Orion supports currently only fixed point data types for `NormalizerTrait`.
+
+| Data type | dtype |
+| -------------------- | ------------------------------------------------------------- |
+| Fixed point (signed) | `NormalizerTrait` |
+
+
+***
+
+| function | description |
+| --- | --- |
+| [`normalizer.predict`](normalizer.predict.md) | Returns the normalization of the input, each row of the input is normalized independently. |
+
diff --git a/docs/framework/operators/machine-learning/normalizer/normalizer.predict.md b/docs/framework/operators/machine-learning/normalizer/normalizer.predict.md
new file mode 100644
index 000000000..93a603e4b
--- /dev/null
+++ b/docs/framework/operators/machine-learning/normalizer/normalizer.predict.md
@@ -0,0 +1,61 @@
+# Normalizer::predict
+
+```rust
+ fn predict(X: Tensor, norm: NORM) -> Tensor;
+```
+
+Returns the normalized input.
+Tree different types of normalization can be performed and are defined as follow :
+MAX: $Y = \frac{X}{max(X)}$
+L1: $Y = \frac{X}{sum(X)}$
+L2: $Y = \frac{X}\sqrt{{sum(XΒ²)}}$
+For batches, that is, [N,C] tensors, normalization is done along the C axis. In other words, each row of the batch is normalized independently.
+
+## Args
+
+* `X`(`@Tensor`) - Input 2D tensor.
+* `norm`(`NORM`) - NORM::MAX, NORM::L1 or NORM::L2
+
+
+## Returns
+
+* Tensor - output tensor
+
+## Examples
+
+```rust
+use orion::numbers::FP16x16;
+use orion::operators::tensor::{Tensor, TensorTrait, FP16x16Tensor, FP16x16TensorDiv, FP16x16TensorPartialEq};
+
+use orion::operators::ml::normalizer::normalizer::{
+ NormalizerTrait, NORM
+};
+
+
+
+fn normalizer_max() -> Tensor {
+ let mut shape = ArrayTrait::::new();
+ shape.append(3);
+ shape.append(3);
+
+ let mut data = ArrayTrait::new();
+ data.append(FP16x16 { mag: 65536, sign: true });
+ data.append(FP16x16 { mag: 52428, sign: true });
+ data.append(FP16x16 { mag: 39321, sign: true });
+ data.append(FP16x16 { mag: 26214, sign: true });
+ data.append(FP16x16 { mag: 13107, sign: true });
+ data.append(FP16x16 { mag: 0, sign: false });
+ data.append(FP16x16 { mag: 13107, sign: false });
+ data.append(FP16x16 { mag: 26214, sign: false });
+ data.append(FP16x16 { mag: 39321, sign: false });
+
+ let X = TensorTrait::new(shape.span(), data.span());
+
+ return NormalizerTrait::predict(X, NORM::MAX);
+}
+>>> [[-1. -0.8 -0.6 ]
+ [-1. -0.5 0. ]
+ [ 0.3333333 0.6666666 1. ]]
+
+```
+
diff --git a/docs/framework/operators/machine-learning/tree-ensemble-classifier/tree_ensemble_classifier.predict.md b/docs/framework/operators/machine-learning/tree-ensemble-classifier/tree_ensemble_classifier.predict.md
index 6d839e873..c38f3e46d 100644
--- a/docs/framework/operators/machine-learning/tree-ensemble-classifier/tree_ensemble_classifier.predict.md
+++ b/docs/framework/operators/machine-learning/tree-ensemble-classifier/tree_ensemble_classifier.predict.md
@@ -1,7 +1,7 @@
# TreeEnsembleClassifier::predict
```rust
- fn predict(ref self: TreeEnsembleClassifier, X: Tensor) -> (Span, MutMatrix::);
+ fn predict(classifier: TreeEnsembleClassifier, X: Tensor) -> (Span, MutMatrix::);
```
Tree Ensemble classifier. Returns the top class for each of N inputs.
@@ -185,7 +185,7 @@ fn tree_ensemble_classifier_helper(
fn test_tree_ensemble_classifier_multi_pt_softmax() -> (Span, MutMatrix::) {
let (mut classifier, X) = tree_ensemble_classifier_helper(POST_TRANSFORM::SOFTMAX);
- let (labels, scores) = TreeEnsembleClassifierTrait::predict(ref classifier, X);
+ let (labels, scores) = TreeEnsembleClassifierTrait::predict(classifier, X);
(labels, scores)
}
diff --git a/docs/framework/operators/machine-learning/tree-ensemble-regressor/tree_ensemble_regressor.predict.md b/docs/framework/operators/machine-learning/tree-ensemble-regressor/tree_ensemble_regressor.predict.md
index 812115971..243bda558 100644
--- a/docs/framework/operators/machine-learning/tree-ensemble-regressor/tree_ensemble_regressor.predict.md
+++ b/docs/framework/operators/machine-learning/tree-ensemble-regressor/tree_ensemble_regressor.predict.md
@@ -1,7 +1,7 @@
# TreeEnsembleRegressor::predict
```rust
- fn predict(ref self: TreeEnsembleRegressor, X: Tensor) -> (Span, MutMatrix::);
+ fn predict(regressor: TreeEnsembleRegressor, X: Tensor) -> (Span, MutMatrix::);
```
Tree Ensemble regressor. Returns the regressed values for each input in N.
@@ -160,7 +160,7 @@ fn tree_ensemble_regressor_helper(
fn test_tree_ensemble_regressor_SUM() -> MutMatrix:: {
let (mut regressor, X) = tree_ensemble_regressor_helper(AGGREGATE_FUNCTION::SUM);
- let mut res = TreeEnsembleRegressorTrait::predict(ref regressor, X);
+ let mut res = TreeEnsembleRegressorTrait::predict(regressor, X);
res
}
>>>
diff --git a/docs/framework/operators/neural-network/README.md b/docs/framework/operators/neural-network/README.md
index b24ad9e40..fc3bfb612 100644
--- a/docs/framework/operators/neural-network/README.md
+++ b/docs/framework/operators/neural-network/README.md
@@ -37,6 +37,6 @@ Orion supports currently these `NN` types.
| [`nn.gemm`](nn.gemm.md) | Performs General Matrix multiplication. |
| [`nn.grid_sample`](nn.grid\_sample.md) | Computes the grid sample of the input tensor and input grid. |
| [`nn.col2im`](nn.col2im.md) | Rearranges column blocks back into a multidimensional image |
-| [`nn.conv_transpose`](nn.conv\_transpose.md) | Performs the convolution transpose of the input data tensor and weigth tensor. |
-| [`nn.conv`](nn.conv.md) | Performs the convolution of the input data tensor and weigth tensor. |
+| [`nn.conv_transpose`](nn.conv\_transpose.md) | Performs the convolution transpose of the input data tensor and weight tensor. |
+| [`nn.conv`](nn.conv.md) | Performs the convolution of the input data tensor and weight tensor. |
diff --git a/docs/framework/operators/neural-network/nn.col2im.md b/docs/framework/operators/neural-network/nn.col2im.md
index fd5e82ffa..6c7b1af05 100644
--- a/docs/framework/operators/neural-network/nn.col2im.md
+++ b/docs/framework/operators/neural-network/nn.col2im.md
@@ -1,4 +1,3 @@
-
# NNTrait::col2im
```rust
diff --git a/docs/framework/operators/neural-network/nn.conv.md b/docs/framework/operators/neural-network/nn.conv.md
index 086737f0b..fd7d53010 100644
--- a/docs/framework/operators/neural-network/nn.conv.md
+++ b/docs/framework/operators/neural-network/nn.conv.md
@@ -15,7 +15,7 @@
) -> Tensor
```
-The convolution operator consumes an input tensor and a filter (input weigth tensor), and computes the output.
+The convolution operator consumes an input tensor and a filter (input weight tensor), and computes the output.
## Args
diff --git a/docs/framework/operators/neural-network/nn.conv_transpose.md b/docs/framework/operators/neural-network/nn.conv_transpose.md
index 29b2af6d2..83082fd94 100644
--- a/docs/framework/operators/neural-network/nn.conv_transpose.md
+++ b/docs/framework/operators/neural-network/nn.conv_transpose.md
@@ -16,7 +16,7 @@
) -> Tensor
```
-The convolution transpose operator consumes an input tensor and a input weigth tensor, and computes the output.
+The convolution transpose operator consumes an input tensor and a input weight tensor, and computes the output.
## Args
diff --git a/docs/framework/operators/tensor/README.md b/docs/framework/operators/tensor/README.md
index 46de5f3ad..fe2995096 100644
--- a/docs/framework/operators/tensor/README.md
+++ b/docs/framework/operators/tensor/README.md
@@ -118,6 +118,7 @@ use orion::operators::tensor::TensorTrait;
| [`tensor.gather_nd`](tensor.gather\_nd.md) | Given data tensor of rank r >= 1, indices tensor of rank q >= 1, and batch_dims integer b, this operator gathers slices of data into an output tensor of rank q + r - indices_shape[-1] - 1 - b. |
| [`tensor.reduce_log_sum`](tensor.reduce\_log\_sum.md) | Computes the log sum of the input tensor's elements along the provided axes. |
| [`tensor.erf`](tensor.erf.md) | Computes the error function of the given input tensor element-wise. |
+| [`tensor.reduce_log_sum_exp`](tensor.reduce\_log\_sum\_exp.md) | Computes the log sum of the exponentials of the input tensor's elements along the provided axes. |
| [`tensor.layer_normalization`](tensor.layer\_normalization.md) | computes the layer normalization of the input tensor. |
| [`tensor.split`](tensor.split.md) | Split a tensor into a list of tensors, along the specified βaxisβ. |
| [`tensor.random_uniform_like`](tensor.random\_uniform\_like.md) | RandomUniformLike generates a tensor with random values using a uniform distribution, matching the shape of the input tensor. |
@@ -130,6 +131,7 @@ use orion::operators::tensor::TensorTrait;
| [`tensor.optional`](tensor.optional.md) | Constructs an optional-type value containing either an empty optional of a certain type specified by the attribute, or a non-empty value containing the input element. |
| [`tensor.dynamic_quantize_linear`](tensor.dynamic\_quantize\_linear.md) | Computes the Scale, Zero Point and FP32->8Bit conversion of FP32 Input data. |
| [`tensor.scatter_nd`](tensor.scatter\_nd.md) | The output of the operation is produced by creating a copy of the input data, and then updating its value to values specified by updates at specific index positions specified by indices. Its output shape is the same as the shape of data |
+| [`tensor.label_encoder`](tensor.label\_encoder.md) | Maps each element in the input tensor to another value. |
## Arithmetic Operations
diff --git a/docs/framework/operators/tensor/scatter.max.md b/docs/framework/operators/tensor/scatter.max.md
deleted file mode 100644
index e69de29bb..000000000
diff --git a/docs/framework/operators/tensor/tensor.label_encoder.md b/docs/framework/operators/tensor/tensor.label_encoder.md
new file mode 100644
index 000000000..20bfd212e
--- /dev/null
+++ b/docs/framework/operators/tensor/tensor.label_encoder.md
@@ -0,0 +1,110 @@
+# tensor.label_encoder
+
+```rust
+fn label_encoder(self: @Tensor, default_list: Option>, default_tensor: Option>, keys: Option>, keys_tensor: Option>, values: Option>, values_tensor: Option>) -> Tensor;
+```
+
+Maps each element in the input tensor to another value.
+
+The mapping is determined by the two parallel attributes, 'keys_' and 'values_' attribute.
+The i-th value in the specified 'keys_' attribute would be mapped to the i-th value in the specified 'values_' attribute.
+ It implies that input's element type and the element type of the specified 'keys_' should be identical while the output type is identical to the specified 'values_' attribute.
+
+## Args
+
+* `self`(`@Tensor`) - The input tensor.
+* `default_list`(`Option>`) - The default span.
+* `default_tensor`(`Option>`) - The default tensor.
+* `keys`(`Option>`) - The keys span.
+* `keys_tensor`(`Option>`) - The keys tensor.
+* `values`(` Option>`) - The values span.
+* `values_tensor`(`Option>`) - The values tensor.
+
+One and only one of 'default_*'s should be set
+One and only one of 'keys*'s should be set
+ One and only one of 'values*'s should be set.
+
+## Panics
+
+* Panics if the len/shape of keys and values are not the same.
+
+## Returns
+
+A new `Tensor` which maps each element in the input tensor to another value..
+
+## Type Constraints
+
+* `T` in (`Tensor`, `Tensor`, `Tensor`, `tensor,`)
+
+## Examples
+
+```rust
+use array::{ArrayTrait, SpanTrait};
+use orion::operators::tensor::U32Tensor;
+use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
+
+fn label_encoder_example() -> Tensor, {
+ fn data() -> Tensor {
+ let mut sizes = ArrayTrait::new();
+ sizes.append(2);
+ sizes.append(3);
+ let mut data = ArrayTrait::new();
+ data.append(1);
+ data.append(2);
+ data.append(3);
+ data.append(1);
+ data.append(4);
+ data.append(5);
+
+ let tensor = TensorTrait::::new(sizes.span(), data.span());
+ return tensor;
+ }
+
+ fn keys() -> Tensor {
+ let mut sizes = ArrayTrait::new();
+ sizes.append(3);
+ sizes.append(1);
+
+ let mut data = ArrayTrait::new();
+ data.append(1);
+ data.append(2);
+ data.append(1);
+
+ let tensor = TensorTrait::::new(sizes.span(), data.span());
+ return tensor;
+ }
+
+ fn values() -> Tensor {
+ let mut sizes = ArrayTrait::new();
+ sizes.append(3);
+ sizes.append(1);
+
+ let mut data = ArrayTrait::new();
+ data.append(8);
+ data.append(9);
+ data.append(7);
+
+ let tensor = TensorTrait::::new(sizes.span(), data.span());
+ return tensor;
+ }
+
+ fn default() -> Tensor {
+ let mut sizes = ArrayTrait::new();
+ sizes.append(1);
+
+ let mut data = ArrayTrait::new();
+ data.append(999);
+
+ let tensor = TensorTrait::::new(sizes.span(), data.span());
+ return tensor;
+ }
+
+ let data = data();
+ let keys = keys();
+ let values = values();
+ let default = default();
+ return data.label_encoder(default_list: Option::None, default_tensor: Option::Some(default),
+ keys: Option::None, keys_tensor: Option::Some(keys),
+ values: Option::None, values_tensor: Option::Some(values));
+>>> [7, 9, 999, 7, 999, 999],
+```
diff --git a/docs/framework/operators/tensor/tensor.min.md b/docs/framework/operators/tensor/tensor.min.md
index 92bc2d150..12deae2e3 100644
--- a/docs/framework/operators/tensor/tensor.min.md
+++ b/docs/framework/operators/tensor/tensor.min.md
@@ -4,7 +4,7 @@
fn min(tensors: Span>) -> Tensor;
```
-Returns the element-wise minumum values from a list of input tensors
+Returns the element-wise minimum values from a list of input tensors
The input tensors must have either:
* Exactly the same shape
* The same number of dimensions and the length of each dimension is either a common length or 1.
diff --git a/docs/framework/operators/tensor/tensor.qlinear_add.md b/docs/framework/operators/tensor/tensor.qlinear_add.md
index b89987c21..bb997d9c3 100644
--- a/docs/framework/operators/tensor/tensor.qlinear_add.md
+++ b/docs/framework/operators/tensor/tensor.qlinear_add.md
@@ -8,7 +8,7 @@ Performs the sum of quantized Tensors
It consumes two quantized input tensors, their scales and zero points, scale and zero point of output, and computes the quantized output.
The quantization formula is y = saturate((x / y_scale) + y_zero_point).
-It perfoms the addition of the two vectors once dequantized, then return the quantization of the result of the addition.
+It performs the addition of the two vectors once dequantized, then return the quantization of the result of the addition.
The broadcasting is supported
Scale and zero point must have same shape and the same type. They must be either scalar (per tensor) or N-D tensor (per row for 'a' and per column for 'b').
Scalar refers to per tensor quantization whereas N-D refers to per row or per column quantization.
diff --git a/docs/framework/operators/tensor/tensor.qlinear_matmul.md b/docs/framework/operators/tensor/tensor.qlinear_matmul.md
index b5928a8bb..eb52f4c03 100644
--- a/docs/framework/operators/tensor/tensor.qlinear_matmul.md
+++ b/docs/framework/operators/tensor/tensor.qlinear_matmul.md
@@ -8,7 +8,7 @@ Multiplies quantized Tensors
It consumes two quantized input tensors, their scales and zero points, scale and zero point of output, and computes the quantized output.
The quantization formula is y = saturate((x / y_scale) + y_zero_point).
-It perfoms the multiplication of the two vectors once dequantized. If either argument is N-D, N > 2, it is treated as a stack of matrices residing in the last two indexes.
+It performs the multiplication of the two vectors once dequantized. If either argument is N-D, N > 2, it is treated as a stack of matrices residing in the last two indexes.
Then return the quantization of the result of the multiplication.
Scale and zero point must have same shape and the same type. They must be either scalar (per tensor) or N-D tensor (per row for 'a' and per column for 'b').
Scalar refers to per tensor quantization whereas N-D refers to per row or per column quantization.
diff --git a/docs/framework/operators/tensor/tensor.qlinear_mul.md b/docs/framework/operators/tensor/tensor.qlinear_mul.md
index e1877a137..aeedd3365 100644
--- a/docs/framework/operators/tensor/tensor.qlinear_mul.md
+++ b/docs/framework/operators/tensor/tensor.qlinear_mul.md
@@ -8,7 +8,7 @@ Performs the element-wise multiplication of quantized Tensors
It consumes two quantized input tensors, their scales and zero points, scale and zero point of output, and computes the quantized output.
The quantization formula is y = saturate((x / y_scale) + y_zero_point).
-It perfoms the element-wise multiplication of the two vectors once dequantized, then return the quantization of the result of the multiplication.
+It performs the element-wise multiplication of the two vectors once dequantized, then return the quantization of the result of the multiplication.
The broadcasting is supported
Scale and zero point must have same shape and the same type. They must be either scalar (per tensor) or N-D tensor (per row for 'a' and per column for 'b').
Scalar refers to per tensor quantization whereas N-D refers to per row or per column quantization.
diff --git a/docs/framework/operators/tensor/tensor.reduce_log_sum_exp.md b/docs/framework/operators/tensor/tensor.reduce_log_sum_exp.md
new file mode 100644
index 000000000..8befd8c43
--- /dev/null
+++ b/docs/framework/operators/tensor/tensor.reduce_log_sum_exp.md
@@ -0,0 +1,60 @@
+## tensor.reduce_log_sum_exp
+
+```rust
+ fn reduce_log_sum_exp(self: @Tensor, axis: usize, keepdims: bool) -> Tensor;
+```
+
+Computes the log sum of the exponentials of the input tensor's elements along the provided axes.
+
+## Args
+* 'self'(`@Tensor`) - The input tensor.
+* 'axis'(`usize`) - The dimension to reduce.
+* 'keepdims'(`bool`) - If true, retains reduced dimensions with length 1.
+
+## Panics
+
+* Panics if axis is not in the range of the input tensor's dimensions.
+
+## Returns
+
+Returns a new `Tensor` instance with the specified axis reduced by summing its elements.
+
+
+## Example
+
+```rust
+use core::array::{ArrayTrait, SpanTrait};
+use orion::operators::tensor::{TensorTrait, Tensor};
+use orion::operators::tensor::FP32x32Tensor;
+use orion::numbers::{FixedTrait, FP32x32};
+
+fn reduce_log_sum_exp() -> Tensor {
+ let mut shape = ArrayTrait::::new();
+ shape.append(3);
+ shape.append(2);
+ shape.append(2);
+
+ let mut data = ArrayTrait::new();
+ data.append(FP32x32 { mag: 4294967296, sign: false });
+ data.append(FP32x32 { mag: 8589934592, sign: false });
+ data.append(FP32x32 { mag: 12884901888, sign: false });
+ data.append(FP32x32 { mag: 17179869184, sign: false });
+ data.append(FP32x32 { mag: 21474836480, sign: false });
+ data.append(FP32x32 { mag: 25769803776, sign: false });
+ data.append(FP32x32 { mag: 30064771072, sign: false });
+ data.append(FP32x32 { mag: 34359738368, sign: false });
+ data.append(FP32x32 { mag: 38654705664, sign: false });
+ data.append(FP32x32 { mag: 42949672960, sign: false });
+ data.append(FP32x32 { mag: 47244640256, sign: false });
+ data.append(FP32x32 { mag: 51539607552, sign: false });
+ TensorTrait::new(shape.span(), data.span())
+
+ let tensor = TensorTrait::::new(shape.span(), data.span());
+
+ return tensor.reduce_log_sum_exp(axis: 2, keepdims: false);
+
+ }
+
+
+>>> [[9215828, 16323477, 20115004], [22716772, 24699744, 26302432]]
+```
diff --git a/nodegen/helpers.py b/nodegen/helpers.py
index 9983f62e7..3508ac305 100644
--- a/nodegen/helpers.py
+++ b/nodegen/helpers.py
@@ -10,7 +10,7 @@
class FixedImpl(Enum):
FP8x23 = 'FP8x23'
FP16x16 = 'FP16x16'
- FP64x64 = 'FP64x64'
+ FP32x32 = 'FP32x32'
@@ -20,14 +20,15 @@ def to_fp(x: np.ndarray, fp_impl: FixedImpl):
return (x * 2**23).astype(np.int64)
case FixedImpl.FP16x16:
return (x * 2**16).astype(np.int64)
- case FixedImpl.FP64x64:
- return (x * 2**64)
+ case FixedImpl.FP32x32:
+ return (x * 2**32).astype(np.int64)
+
class Dtype(Enum):
FP8x23 = 'FP8x23'
FP16x16 = 'FP16x16'
- FP64x64 = 'FP64x64'
+ FP32x32 = 'FP32x32'
I8 = 'i8'
I32 = 'i32'
U32 = 'u32'
@@ -173,8 +174,8 @@ def get_data_statement(data: np.ndarray, dtype: Dtype) -> list[str]:
return ["FP8x23 { "+f"mag: {abs(int(x))}, sign: {str(x < 0).lower()} "+"}" for x in data.flatten()]
case Dtype.FP16x16:
return ["FP16x16 { "+f"mag: {abs(int(x))}, sign: {str(x < 0).lower()} "+"}" for x in data.flatten()]
- case Dtype.FP64x64:
- return ["FP64x64 { "+f"mag: {abs(int(x))}, sign: {str(x < 0).lower()} "+"}" for x in data.flatten()]
+ case Dtype.FP32x32:
+ return ["FP32x32 { "+f"mag: {abs(int(x))}, sign: {str(x < 0).lower()} "+"}" for x in data.flatten()]
case Dtype.BOOL:
return [str(x).lower() for x in data.flatten()]
case Dtype.COMPLEX64:
@@ -253,6 +254,7 @@ def find_all_types(tensors: list[Tensor | Sequence]) -> list[Dtype]:
Dtype.FP16x16: ["orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}",],
Dtype.BOOL: ["orion::operators::tensor::BoolTensor",],
Dtype.COMPLEX64: ["orion::operators::tensor::Complex64Tensor",],
+ Dtype.FP32x32: ["orion::operators::tensor::FP32x32Tensor",],
}
@@ -280,6 +282,7 @@ def find_all_types(tensors: list[Tensor | Sequence]) -> list[Dtype]:
Dtype.I8: ["orion::operators::tensor::I8TensorPartialEq",],
Dtype.FP8x23: ["orion::operators::tensor::FP8x23TensorPartialEq",],
Dtype.FP16x16: ["orion::operators::tensor::FP16x16TensorPartialEq",],
+ Dtype.FP32x32: ["orion::operators::tensor::FP32x32TensorPartialEq",],
Dtype.BOOL: ["orion::operators::tensor::BoolTensorPartialEq",],
Dtype.COMPLEX64: ["orion::operators::tensor::Complex64TensorPartialEq",],
}
@@ -291,6 +294,7 @@ def find_all_types(tensors: list[Tensor | Sequence]) -> list[Dtype]:
Dtype.I8: ["orion::numbers::NumberTrait"],
Dtype.FP8x23: ["orion::numbers::{FixedTrait, FP8x23}",],
Dtype.FP16x16: ["orion::numbers::{FixedTrait, FP16x16}",],
+ Dtype.FP32x32: ["orion::numbers::{FixedTrait, FP32x32}",],
Dtype.BOOL: [],
Dtype.COMPLEX64: ["orion::numbers::{NumberTrait, complex64}",],
}
\ No newline at end of file
diff --git a/nodegen/node/label_encoder.py b/nodegen/node/label_encoder.py
new file mode 100644
index 000000000..d5f4407f0
--- /dev/null
+++ b/nodegen/node/label_encoder.py
@@ -0,0 +1,203 @@
+import numpy as np
+from nodegen.node import RunAll
+from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait
+# Copyright (c) ONNX Project Contributors
+
+# SPDX-License-Identifier: Apache-2.0
+# pylint: disable=R0913,R0914,W0221
+def labelEncoder( # type: ignore
+ x,
+ default_float=None,
+ default_int64=None,
+ default_string=None,
+ keys_floats=None,
+ keys_int64s=None,
+ keys_strings=None,
+ values_floats=None,
+ values_int64s=None,
+ values_strings=None,
+):
+ keys = keys_floats if keys_floats is not None else (keys_int64s if np.any(keys_int64s) else keys_strings)
+ values = values_floats if values_floats is not None else (values_int64s if np.any(values_int64s) else values_strings)
+
+ classes = dict(zip(keys, values))
+ if id(keys) == id(keys_floats):
+ cast = float
+ elif id(keys) == id(keys_int64s):
+ cast = int # type: ignore
+ else:
+ cast = str # type: ignore
+ if id(values) == id(values_floats):
+ defval = default_float
+ dtype = np.float32
+ elif id(values) == id(values_int64s):
+ defval = default_int64
+ dtype = np.int64 # type: ignore
+ else:
+ defval = default_string
+ if not isinstance(defval, str):
+ defval = ""
+ dtype = np.str_ # type: ignore
+ shape = x.shape
+ if len(x.shape) > 1:
+ x = x.flatten()
+ res = []
+ for i in range(0, x.shape[0]):
+ v = classes.get(cast(x[i]), defval)
+ res.append(v)
+ return np.array(res, dtype=dtype).reshape(shape)
+
+class Label_encoder(RunAll):
+
+ @staticmethod
+ def label_encoder_fp16x16():
+
+ def labelencoder():
+ def default():
+ x = np.array([1, 2, 3, 4, 5, 6, 1, 2, 3]).astype(np.int64)
+ keys = np.array([1, 2, 5, 6, ]).astype(np.int64)
+ values = np.array([11, 22, 55, 66]).astype(np.int64)
+ default = np.array(99).astype(np.int64)
+
+ y = labelEncoder(x=x, keys_int64s=keys, values_int64s=values, default_int64=default)
+
+ x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16))
+ default = Tensor(Dtype.FP16x16, default.shape, to_fp(default.flatten(), FixedImpl.FP16x16))
+ keys = Tensor(Dtype.FP16x16, keys.shape, to_fp(keys.flatten(), FixedImpl.FP16x16))
+ values = Tensor(Dtype.FP16x16, values.shape, to_fp(values.flatten(), FixedImpl.FP16x16))
+ y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
+
+ name = "label_encoder_fp16x16_3d_default"
+ make_test(
+ inputs = [x, default, keys, values], output = y, func_sig = """input_0.label_encoder(default_list:Option::None, default_tensor: Option::Some(input_1),
+ keys:Option::None, keys_tensor: Option::Some(input_2),
+ values: Option::None, values_tensor: Option::Some(input_3))""",
+ name= name)
+
+ default()
+ labelencoder()
+
+ @staticmethod
+ def label_encoder_fp8x23():
+
+ def label_encoder():
+ def default():
+
+ x = np.array([1, 2, 3, 4, 5, 6, 1, 2, 3, 7, 8]).astype(np.int64)
+ keys = np.array([1, 2, 5, 6, 7]).astype(np.int64)
+ values = np.array([11, 22, 55, 66, 77]).astype(np.int64)
+ default = np.array(99).astype(np.int64)
+
+ y = labelEncoder(x=x, keys_int64s=keys, values_int64s=values, default_int64=default)
+
+ x = Tensor(Dtype.FP8x23, x.shape, to_fp(x.flatten(), FixedImpl.FP8x23))
+ default = Tensor(Dtype.FP8x23, default.shape, to_fp(default.flatten(), FixedImpl.FP8x23))
+ keys = Tensor(Dtype.FP8x23, keys.shape, to_fp(keys.flatten(), FixedImpl.FP8x23))
+ values = Tensor(Dtype.FP8x23, values.shape, to_fp(values.flatten(), FixedImpl.FP8x23))
+ y = Tensor(Dtype.FP8x23, y.shape, to_fp(y.flatten(), FixedImpl.FP8x23))
+
+ name = "label_encoder_fp8x23_default"
+
+ make_test(
+ inputs = [x, default, keys, values], output = y, func_sig = """input_0.label_encoder(default_list:Option::None, default_tensor: Option::Some(input_1),
+ keys:Option::None, keys_tensor: Option::Some(input_2),
+ values: Option::None, values_tensor: Option::Some(input_3))""",
+ name= name)
+
+
+
+ default()
+ label_encoder()
+
+ @staticmethod
+ def label_encoder_i8():
+
+ def label_encoder_3D():
+ def default():
+
+ x = np.array([1, 2, 3, 4, 5, 6, 1, 2, 3, 7, 8]).astype(np.int8)
+ keys = np.array([1, 2, 5, 6, 7]).astype(np.int8)
+ values = np.array([11, 22, 55, 66, 77]).astype(np.int8)
+ default = np.array(99).astype(np.int8)
+
+ y = labelEncoder(x=x, keys_int64s=keys, values_int64s=values, default_int64=default)
+
+ x = Tensor(Dtype.I8, x.shape, x.flatten())
+ default = Tensor(Dtype.I8, default.shape, default.flatten())
+ keys = Tensor(Dtype.I8, keys.shape, keys.flatten())
+ values = Tensor(Dtype.I8, values.shape, values.flatten())
+ y = Tensor(Dtype.I8, y.shape, y.flatten())
+
+ name = "label_encoder_i8_default"
+ make_test(
+ inputs = [x, default, keys, values], output = y, func_sig = """input_0.label_encoder(default_list:Option::None, default_tensor: Option::Some(input_1),
+ keys:Option::None, keys_tensor: Option::Some(input_2),
+ values: Option::None, values_tensor: Option::Some(input_3))""",
+ name= name)
+
+
+ default()
+ label_encoder_3D()
+
+
+ @staticmethod
+ def label_encoder_i32():
+ def label_encoder_3D():
+ def default():
+ x = np.array([1, 2, 3, 4, 5, 6, 1, 2, 3, 7, 8]).astype(np.int32)
+ keys = np.array([1, 2, 5, 6, 7]).astype(np.int32)
+ values = np.array([11, 22, 55, 66, 77]).astype(np.int32)
+ default = np.array(99).astype(np.int32)
+
+ y = labelEncoder(x=x, keys_int64s=keys, values_int64s=values, default_int64=default)
+
+ x = Tensor(Dtype.I32, x.shape, x.flatten())
+ default = Tensor(Dtype.I32, default.shape, default.flatten())
+ keys = Tensor(Dtype.I32, keys.shape, keys.flatten())
+ values = Tensor(Dtype.I32, values.shape, values.flatten())
+ y = Tensor(Dtype.I32, y.shape, y.flatten())
+
+ name = "label_encoder_i32_default"
+ make_test(
+ inputs = [x, default, keys, values], output = y, func_sig = """input_0.label_encoder(default_list:Option::None, default_tensor: Option::Some(input_1),
+ keys:Option::None, keys_tensor: Option::Some(input_2),
+ values: Option::None, values_tensor: Option::Some(input_3))""",
+ name= name)
+
+
+
+
+ default()
+ label_encoder_3D()
+
+
+ @staticmethod
+ def label_encoder_u32():
+
+ def label_encoder_3D():
+ def default():
+
+ x = np.array([1, 2, 3, 4, 5, 6, 1, 2, 3, 7, 8]).astype(np.uint32)
+ keys = np.array([1, 2, 5, 6, 7]).astype(np.uint32)
+ values = np.array([11, 22, 55, 66, 77]).astype(np.uint32)
+ default = np.array(99).astype(np.uint32)
+
+ y = labelEncoder(x=x, keys_int64s=keys, values_int64s=values, default_int64=default)
+
+ x = Tensor(Dtype.U32, x.shape, x.flatten())
+ default = Tensor(Dtype.U32, default.shape, default.flatten())
+ keys = Tensor(Dtype.U32, keys.shape, keys.flatten())
+ values = Tensor(Dtype.U32, values.shape, values.flatten())
+ y = Tensor(Dtype.U32, y.shape, y.flatten())
+
+ name = "label_encoder_u32_default"
+
+ make_test(
+ inputs = [x, default, keys, values], output = y, func_sig = """input_0.label_encoder(default_list:Option::None, default_tensor: Option::Some(input_1),
+ keys:Option::None, keys_tensor: Option::Some(input_2),
+ values: Option::None, values_tensor: Option::Some(input_3))""",
+ name= name)
+
+
+ default()
+ label_encoder_3D()
diff --git a/nodegen/node/reduce_log_sum.py b/nodegen/node/reduce_log_sum.py
index 259081f5a..9dc8ad4df 100644
--- a/nodegen/node/reduce_log_sum.py
+++ b/nodegen/node/reduce_log_sum.py
@@ -1,7 +1,7 @@
import numpy as np
from nodegen.node import RunAll
from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl
-import numpy as np
+
class Reduce_log_sum(RunAll):
diff --git a/nodegen/node/reduce_log_sum_exp.py b/nodegen/node/reduce_log_sum_exp.py
new file mode 100644
index 000000000..e4da8b1e0
--- /dev/null
+++ b/nodegen/node/reduce_log_sum_exp.py
@@ -0,0 +1,62 @@
+import numpy as np
+from nodegen.node import RunAll
+from ..helpers import make_test, Tensor, Dtype, FixedImpl, to_fp
+
+class Reduce_log_sum_exp(RunAll):
+ @staticmethod
+ def reduce_log_sum_exp_fp32x32():
+ def reduce_log_sum_exp_export_do_not_keepdims():
+ shape = [3, 2, 2]
+ axes = np.array([2], dtype=np.int64)
+ keepdims = False
+ x = np.reshape(np.arange(1, np.prod(shape) + 1), shape)
+ y = np.log(np.sum(np.exp(x), axis=tuple(axes), keepdims=False)).astype(np.float64)
+
+ x = Tensor(Dtype.FP32x32, x.shape, to_fp(
+ x.flatten(), FixedImpl.FP32x32))
+ y = Tensor(Dtype.FP32x32, y.shape, to_fp(
+ y.flatten(), FixedImpl.FP32x32))
+
+ name = "reduce_log_sum_exp_fp32x32_export_do_not_keepdims"
+ make_test(
+ [x], y, "input_0.reduce_log_sum_exp(2, false)", name)
+
+ def reduce_log_sum_exp_export_keepdims():
+ shape = [3, 2, 2]
+ axes = np.array([2], dtype=np.int64)
+ keepdims = True
+ x = np.reshape(np.arange(1, np.prod(shape) + 1), shape)
+ y = np.log(np.sum(np.exp(x), axis=tuple(axes), keepdims=True)).astype(np.float64)
+
+ x = Tensor(Dtype.FP32x32, x.shape, to_fp(
+ x.flatten(), FixedImpl.FP32x32))
+ y = Tensor(Dtype.FP32x32, y.shape, to_fp(
+ y.flatten(), FixedImpl.FP32x32))
+
+ name = "reduce_log_sum_exp_fp32x32_export_keepdims"
+ make_test(
+ [x], y, "input_0.reduce_log_sum_exp(2, true)", name)
+
+ def reduce_log_sum_exp_axis_0():
+ shape = [3, 2, 2]
+ axes = np.array([0], dtype=np.int64)
+ keepdims = True
+ x = np.reshape(np.arange(1, np.prod(shape) + 1), shape)
+ y = np.log(np.sum(np.exp(x), axis=tuple(axes), keepdims=True)).astype(np.float64)
+
+ x = Tensor(Dtype.FP32x32, x.shape, to_fp(
+ x.flatten(), FixedImpl.FP32x32))
+ y = Tensor(Dtype.FP32x32, y.shape, to_fp(
+ y.flatten(), FixedImpl.FP32x32))
+
+ name = "reduce_log_sum_exp_fp32x32_export_negative_axes_keepdims"
+ make_test(
+ [x], y, "input_0.reduce_log_sum_exp(0, true)", name)
+
+
+ reduce_log_sum_exp_export_do_not_keepdims()
+ reduce_log_sum_exp_export_keepdims()
+ reduce_log_sum_exp_axis_0()
+
+
+
diff --git a/nodegen/node/reduce_prod.py b/nodegen/node/reduce_prod.py
deleted file mode 100644
index 7d145bae1..000000000
--- a/nodegen/node/reduce_prod.py
+++ /dev/null
@@ -1,287 +0,0 @@
-import numpy as np
-from nodegen.node import RunAll
-from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl
-
-
-class Reduce_prod(RunAll):
- @staticmethod
- def reduce_prod_u32():
- def reduce_prod_1D():
- x = np.array([0, 1, 2,]).astype(np.uint32)
- y = np.array([0]).astype(np.uint32)
-
- x = Tensor(Dtype.U32, x.shape, x.flatten())
- y = Tensor(Dtype.U32, y.shape, y.flatten())
-
- name = "reduce_prod_u32_1D"
- make_test(
- [x], y, "input_0.reduce_prod(0, false)", name)
-
- def reduce_prod_2D():
- def default():
- x = np.array([0, 1, 2, 3]).astype(np.uint32).reshape(2, 2)
- y = np.array([0, 3]).astype(np.uint32)
-
- x = Tensor(Dtype.U32, x.shape, x.flatten())
- y = Tensor(Dtype.U32, y.shape, y.flatten())
-
- name = "reduce_prod_u32_2D_default"
- make_test(
- [x], y, "input_0.reduce_prod(0, false)", name)
-
- def keepdims():
- x = np.array([0, 1, 2, 3]).astype(np.uint32).reshape(2, 2)
- y = np.array([0, 3]).astype(np.uint32).reshape(1, 2)
-
- x = Tensor(Dtype.U32, x.shape, x.flatten())
- y = Tensor(Dtype.U32, y.shape, y.flatten())
-
- name = "reduce_prod_u32_2D_keepdims"
- make_test(
- [x], y, "input_0.reduce_prod(0, true)", name)
-
- def axis_1():
- x = np.array([0, 1, 2, 3]).astype(np.uint32).reshape(2, 2)
- y = np.array([0, 6]).astype(np.uint32)
-
- x = Tensor(Dtype.U32, x.shape, x.flatten())
- y = Tensor(Dtype.U32, y.shape, y.flatten())
-
- name = "reduce_prod_u32_2D_axis_1"
- make_test(
- [x], y, "input_0.reduce_prod(1, false)", name)
-
- default()
- keepdims()
- axis_1()
- reduce_prod_1D()
- reduce_prod_2D()
-
- @staticmethod
- def reduce_prod_i32():
- def reduce_prod_1D():
- x = np.array([0, 1, 2,]).astype(np.int32)
- y = np.array([0]).astype(np.int32)
-
- x = Tensor(Dtype.I32, x.shape, x.flatten())
- y = Tensor(Dtype.I32, y.shape, y.flatten())
-
- name = "reduce_prod_i32_1D"
- make_test(
- [x], y, "input_0.reduce_prod(0, false)", name)
-
- def reduce_prod_2D():
- def default():
- x = np.array([0, 1, 2, 3]).astype(np.int32).reshape(2, 2)
- y = np.array([0, 3]).astype(np.int32)
-
- x = Tensor(Dtype.I32, x.shape, x.flatten())
- y = Tensor(Dtype.I32, y.shape, y.flatten())
-
- name = "reduce_prod_i32_2D_default"
- make_test(
- [x], y, "input_0.reduce_prod(0, false)", name)
-
- def keepdims():
- x = np.array([0, 1, 2, 3]).astype(np.int32).reshape(2, 2)
- y = np.array([0, 3]).astype(np.int32).reshape(1, 2)
-
- x = Tensor(Dtype.I32, x.shape, x.flatten())
- y = Tensor(Dtype.I32, y.shape, y.flatten())
-
- name = "reduce_prod_i32_2D_keepdims"
- make_test(
- [x], y, "input_0.reduce_prod(0, true)", name)
-
- def axis_1():
- x = np.array([0, 1, 2, 3]).astype(np.int32).reshape(2, 2)
- y = np.array([0, 6]).astype(np.int32)
-
- x = Tensor(Dtype.I32, x.shape, x.flatten())
- y = Tensor(Dtype.I32, y.shape, y.flatten())
-
- name = "reduce_prod_i32_2D_axis_1"
- make_test(
- [x], y, "input_0.reduce_prod(1, false)", name)
-
- default()
- keepdims()
- axis_1()
- reduce_prod_1D()
- reduce_prod_2D()
-
- @staticmethod
- def reduce_prod_i8():
- def reduce_prod_1D():
- x = np.array([0, 1, 2,]).astype(np.int8)
- y = np.array([0]).astype(np.int8)
-
- x = Tensor(Dtype.FP8x23, x.shape, x.flatten())
- y = Tensor(Dtype.FP8x23, y.shape, y.flatten())
-
- name = "reduce_prod_i8_1D"
- make_test(
- [x], y, "input_0.reduce_prod(0, false)", name)
-
- def reduce_prod_2D():
- def default():
- x = np.array([0, 1, 2, 3]).astype(np.int8).reshape(2, 2)
- y = np.array([0, 3]).astype(np.int8)
-
- x = Tensor(Dtype.FP8x23, x.shape, x.flatten())
- y = Tensor(Dtype.FP8x23, y.shape, y.flatten())
-
- name = "reduce_prod_i8_2D_default"
- make_test(
- [x], y, "input_0.reduce_prod(0, false)", name)
-
- def keepdims():
- x = np.array([0, 1, 2, 3]).astype(np.int8).reshape(2, 2)
- y = np.array([0, 3]).astype(np.int8).reshape(1, 2)
-
- x = Tensor(Dtype.FP8x23, x.shape, x.flatten())
- y = Tensor(Dtype.FP8x23, y.shape, y.flatten())
-
- name = "reduce_prod_i8_2D_keepdims"
- make_test(
- [x], y, "input_0.reduce_prod(0, true)", name)
-
- def axis_1():
- x = np.array([0, 1, 2, 3]).astype(np.int8).reshape(2, 2)
- y = np.array([0, 6]).astype(np.int8)
- x = Tensor(Dtype.FP8x23, x.shape, x.flatten())
- y = Tensor(Dtype.FP8x23, y.shape, y.flatten())
-
- name = "reduce_prod_i8_2D_axis_1"
- make_test(
- [x], y, "input_0.reduce_prod(1, false)", name)
-
- default()
- keepdims()
- axis_1()
- reduce_prod_1D()
- reduce_prod_2D()
-
- @staticmethod
- def reduce_prod_fp8x23():
- def reduce_prod_1D():
- x = np.array([0, 1, 2,]).astype(np.int64)
- y = np.array([0]).astype(np.int64)
-
- x = Tensor(Dtype.FP8x23, x.shape, to_fp(
- x.flatten(), FixedImpl.FP8x23))
- y = Tensor(Dtype.FP8x23, y.shape, to_fp(
- y.flatten(), FixedImpl.FP8x23))
-
- name = "reduce_prod_fp8x23_1D"
- make_test(
- [x], y, "input_0.reduce_prod(0, false)", name)
-
- def reduce_prod_2D():
- def default():
- x = np.array([0, 1, 2, 3]).astype(np.int64).reshape(2, 2)
- y = np.array([0, 3]).astype(np.int64)
-
- x = Tensor(Dtype.FP8x23, x.shape, to_fp(
- x.flatten(), FixedImpl.FP8x23))
- y = Tensor(Dtype.FP8x23, y.shape, to_fp(
- y.flatten(), FixedImpl.FP8x23))
-
- name = "reduce_prod_fp8x23_2D_default"
- make_test(
- [x], y, "input_0.reduce_prod(0, false)", name)
-
- def keepdims():
- x = np.array([0, 1, 2, 3]).astype(np.int64).reshape(2, 2)
- y = np.array([0, 3]).astype(np.int64).reshape(1, 2)
-
- x = Tensor(Dtype.FP8x23, x.shape, to_fp(
- x.flatten(), FixedImpl.FP8x23))
- y = Tensor(Dtype.FP8x23, y.shape, to_fp(
- y.flatten(), FixedImpl.FP8x23))
-
- name = "reduce_prod_fp8x23_2D_keepdims"
- make_test(
- [x], y, "input_0.reduce_prod(0, true)", name)
-
- def axis_1():
- x = np.array([0, 1, 2, 3]).astype(np.int64).reshape(2, 2)
- y = np.array([0, 6]).astype(np.int64)
-
- x = Tensor(Dtype.FP8x23, x.shape, to_fp(
- x.flatten(), FixedImpl.FP8x23))
- y = Tensor(Dtype.FP8x23, y.shape, to_fp(
- y.flatten(), FixedImpl.FP8x23))
-
- name = "reduce_prod_fp8x23_2D_axis_1"
- make_test(
- [x], y, "input_0.reduce_prod(1, false)", name)
-
- default()
- keepdims()
- axis_1()
-
- reduce_prod_1D()
- reduce_prod_2D()
-
- @staticmethod
- def reduce_prod_fp16x16():
- def reduce_prod_1D():
- x = np.array([0, 1, 2,]).astype(np.int64)
- y = np.array([0]).astype(np.int64)
-
- x = Tensor(Dtype.FP16x16, x.shape, to_fp(
- x.flatten(), FixedImpl.FP16x16))
- y = Tensor(Dtype.FP16x16, y.shape, to_fp(
- y.flatten(), FixedImpl.FP16x16))
-
- name = "reduce_prod_fp16x16_1D"
- make_test(
- [x], y, "input_0.reduce_prod(0, false)", name)
-
- def reduce_prod_2D():
- def default():
- x = np.array([0, 1, 2, 3]).astype(np.int64).reshape(2, 2)
- y = np.array([0, 3]).astype(np.int64)
-
- x = Tensor(Dtype.FP16x16, x.shape, to_fp(
- x.flatten(), FixedImpl.FP16x16))
- y = Tensor(Dtype.FP16x16, y.shape, to_fp(
- y.flatten(), FixedImpl.FP16x16))
-
- name = "reduce_prod_fp16x16_2D_default"
- make_test(
- [x], y, "input_0.reduce_prod(0, false)", name)
-
- def keepdims():
- x = np.array([0, 1, 2, 3]).astype(np.int64).reshape(2, 2)
- y = np.array([0, 3]).astype(np.int64).reshape(1, 2)
-
- x = Tensor(Dtype.FP16x16, x.shape, to_fp(
- x.flatten(), FixedImpl.FP16x16))
- y = Tensor(Dtype.FP16x16, y.shape, to_fp(
- y.flatten(), FixedImpl.FP16x16))
-
- name = "reduce_prod_fp16x16_2D_keepdims"
- make_test(
- [x], y, "input_0.reduce_prod(0, true)", name)
-
- def axis_1():
- x = np.array([0, 1, 2, 3]).astype(np.int64).reshape(2, 2)
- y = np.array([0, 6]).astype(np.int64)
-
- x = Tensor(Dtype.FP16x16, x.shape, to_fp(
- x.flatten(), FixedImpl.FP16x16))
- y = Tensor(Dtype.FP16x16, y.shape, to_fp(
- y.flatten(), FixedImpl.FP16x16))
-
- name = "reduce_prod_fp16x16_2D_axis_1"
- make_test(
- [x], y, "input_0.reduce_prod(1, false)", name)
-
- default()
- keepdims()
- axis_1()
-
- reduce_prod_1D()
- reduce_prod_2D()
diff --git a/nodegen/node/squeeze.py b/nodegen/node/squeeze.py
index 2f598e1ea..44d1d4a22 100644
--- a/nodegen/node/squeeze.py
+++ b/nodegen/node/squeeze.py
@@ -15,11 +15,11 @@ def squeeze():
name = "squeeze_i8"
make_test(
- [x], y, "input_0.squeeze(Option::Some(array![0_i32, 2_i32].span()))", name)
+ [x], y, "input_0.squeeze(Option::Some(array![0, 2].span()))", name)
squeeze()
@staticmethod
- def squeeze_i32():
+ def squeeze():
def squeeze():
x = np.ones((1, 2, 1, 2, 1), dtype=np.int32)
y = np.ones((2, 2, 1), dtype=np.int32)
@@ -27,9 +27,9 @@ def squeeze():
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
- name = "squeeze_i32"
+ name = "squeeze"
make_test(
- [x], y, "input_0.squeeze(Option::Some(array![0_i32, 2_i32].span()))", name)
+ [x], y, "input_0.squeeze(Option::Some(array![0, 2].span()))", name)
squeeze()
@staticmethod
@@ -43,7 +43,7 @@ def squeeze():
name = "squeeze_u32"
make_test(
- [x], y, "input_0.squeeze(Option::Some(array![0_i32, 2_i32].span()))", name)
+ [x], y, "input_0.squeeze(Option::Some(array![0, 2].span()))", name)
squeeze()
@staticmethod
@@ -59,7 +59,7 @@ def squeeze():
name = "squeeze_fP16x16"
make_test(
- [x], y, "input_0.squeeze(Option::Some(array![0_i32, 2_i32].span()))", name)
+ [x], y, "input_0.squeeze(Option::Some(array![0, 2].span()))", name)
squeeze()
@staticmethod
@@ -75,5 +75,5 @@ def squeeze():
name = "squeeze_fP8x23"
make_test(
- [x], y, "input_0.squeeze(Option::Some(array![0_i32, 2_i32].span()))", name)
+ [x], y, "input_0.squeeze(Option::Some(array![0, 2].span()))", name)
squeeze()
diff --git a/src/numbers.cairo b/src/numbers.cairo
index 156fc5bf5..ddd95fa10 100644
--- a/src/numbers.cairo
+++ b/src/numbers.cairo
@@ -1432,9 +1432,10 @@ impl FP32x32Number of NumberTrait {
impl I8Number of NumberTrait {
fn new(mag: i8, sign: bool) -> i8 {
- if sign{
+ if sign {
return -mag;
}
+
mag
}
@@ -1559,7 +1560,7 @@ impl I8Number of NumberTrait {
fn abs(self: i8) -> i8 {
if self >= 0 {
- return self;
+ self
} else {
self * -1_i8
}
@@ -1579,7 +1580,7 @@ impl I8Number of NumberTrait {
fn min(self: i8, other: i8) -> i8 {
if self < other {
- return self;
+ self
} else {
other
}
@@ -1587,7 +1588,7 @@ impl I8Number of NumberTrait {
fn max(self: i8, other: i8) -> i8 {
if self > other {
- return self;
+ self
} else {
other
}
@@ -1603,43 +1604,43 @@ impl I8Number of NumberTrait {
fn xor(lhs: i8, rhs: i8) -> bool {
if (lhs == 0 || rhs == 0) && lhs != rhs {
- return true;
+ true
} else {
- return false;
+ false
}
}
fn or(lhs: i8, rhs: i8) -> bool {
- if (lhs == 0 && rhs == 0) {
- return false;
+ if lhs == 0 && rhs == 0 {
+ false
} else {
- return true;
+ true
}
}
fn sign(self: i8) -> i8 {
if self == 0 {
- return 0_i8;
+ 0_i8
} else if self > 0 {
- return 1_i8;
+ 1_i8
} else {
-1_i8
}
}
fn and(lhs: i8, rhs: i8) -> bool {
- if (lhs == 0 || rhs == 0) {
- return false;
+ if lhs == 0 || rhs == 0 {
+ false
} else {
- return true;
+ true
}
}
fn where(self: i8, x: i8, y: i8) -> i8 {
if self == 0 {
- return y;
+ y
} else {
- return x;
+ x
}
}
@@ -1656,7 +1657,7 @@ impl I8Number of NumberTrait {
}
fn is_inf(self: i8) -> bool {
- (self == 127 || self == -127)
+ self == 127 || self == -127
}
fn is_pos_inf(self: i8) -> bool {
@@ -1691,8 +1692,10 @@ impl I8Number of NumberTrait {
impl I8Div of Div {
fn div(lhs: i8, rhs: i8) -> i8 {
assert(rhs != 0, 'divisor cannot be 0');
+
let mut lhs_positive = lhs;
let mut rhs_positive = rhs;
+
// making sure everything is positive
if lhs < 0 {
lhs_positive = lhs * -1;
@@ -1700,6 +1703,7 @@ impl I8Div of Div {
if rhs < 0 {
rhs_positive = rhs * -1;
}
+
//felt252 plays role of a bridge for type casting
let lhs_felt: felt252 = lhs_positive.into();
let rhs_felt: felt252 = rhs_positive.into();
@@ -1708,6 +1712,7 @@ impl I8Div of Div {
let mut result = lhs_u128 / rhs_u128;
let felt_result: felt252 = result.into();
let signed_int_result: i8 = felt_result.try_into().unwrap();
+
if lhs * rhs < 0 {
signed_int_result * -1
} else {
@@ -1727,11 +1732,14 @@ impl I8IntoFP8x23 of Into {
fn into(self: i8) -> FP8x23 {
let number_sign: bool = self < 0;
let mut self_positive: i8 = self;
+
if number_sign {
self_positive = self_positive * -1_i8
}
+
let number_felt: felt252 = self_positive.into();
let number_u32: u32 = number_felt.try_into().unwrap();
+
FP8x23 { mag: number_u32 * ONE_fp8x23, sign: number_sign }
}
}
@@ -1740,11 +1748,14 @@ impl I8IntoFP16x16 of Into {
fn into(self: i8) -> FP16x16 {
let number_sign: bool = self < 0;
let mut self_positive: i8 = self;
+
if number_sign {
self_positive = self_positive * -1_i8
}
+
let number_felt: felt252 = self_positive.into();
let number_u32: u32 = number_felt.try_into().unwrap();
+
FP16x16 { mag: number_u32 * ONE_fp16x16, sign: number_sign }
}
}
@@ -1753,11 +1764,14 @@ impl I8IntoFP64x64 of Into {
fn into(self: i8) -> FP64x64 {
let number_sign: bool = self < 0;
let mut self_positive: i8 = self;
+
if number_sign {
self_positive = self_positive * -1_i8
}
+
let number_felt: felt252 = self_positive.into();
let number_u128: u128 = number_felt.try_into().unwrap();
+
FP64x64 { mag: number_u128 * ONE_fp64x64, sign: number_sign }
}
}
@@ -1766,20 +1780,24 @@ impl I8IntoFP32x32 of Into {
fn into(self: i8) -> FP32x32 {
let number_sign: bool = self < 0;
let mut self_positive: i8 = self;
+
if number_sign {
self_positive = self_positive * -1_i8
}
+
let number_felt: felt252 = self_positive.into();
let number_u128: u64 = number_felt.try_into().unwrap();
+
FP32x32 { mag: number_u128 * ONE_fp32x32, sign: number_sign }
}
}
impl I16Number of NumberTrait {
fn new(mag: i16, sign: bool) -> i16 {
- if sign{
+ if sign {
return -mag;
}
+
mag
}
@@ -1904,7 +1922,7 @@ impl I16Number of NumberTrait {
fn abs(self: i16) -> i16 {
if self >= 0 {
- return self;
+ self
} else {
self * -1_i16
}
@@ -1924,7 +1942,7 @@ impl I16Number of NumberTrait {
fn min(self: i16, other: i16) -> i16 {
if self < other {
- return self;
+ self
} else {
other
}
@@ -1932,7 +1950,7 @@ impl I16Number of NumberTrait {
fn max(self: i16, other: i16) -> i16 {
if self > other {
- return self;
+ self
} else {
other
}
@@ -1948,43 +1966,43 @@ impl I16Number of NumberTrait {
fn xor(lhs: i16, rhs: i16) -> bool {
if (lhs == 0 || rhs == 0) && lhs != rhs {
- return true;
+ true
} else {
- return false;
+ false
}
}
fn or(lhs: i16, rhs: i16) -> bool {
- if (lhs == 0 && rhs == 0) {
- return false;
+ if lhs == 0 && rhs == 0 {
+ false
} else {
- return true;
+ true
}
}
fn sign(self: i16) -> i16 {
if self == 0 {
- return 0_i16;
+ 0_i16
} else if self > 0 {
- return 1_i16;
+ 1_i16
} else {
-1_i16
}
}
fn and(lhs: i16, rhs: i16) -> bool {
- if (lhs == 0 || rhs == 0) {
- return false;
+ if lhs == 0 || rhs == 0 {
+ false
} else {
- return true;
+ true
}
}
fn where(self: i16, x: i16, y: i16) -> i16 {
if self == 0 {
- return y;
+ y
} else {
- return x;
+ x
}
}
@@ -2001,7 +2019,7 @@ impl I16Number of NumberTrait {
}
fn is_inf(self: i16) -> bool {
- (self == 32767 || self == -32767)
+ self == 32767 || self == -32767
}
fn is_pos_inf(self: i16) -> bool {
@@ -2036,8 +2054,10 @@ impl I16Number of NumberTrait {
impl I16Div of Div {
fn div(lhs: i16, rhs: i16) -> i16 {
assert(rhs != 0, 'divisor cannot be 0');
+
let mut lhs_positive = lhs;
let mut rhs_positive = rhs;
+
// making sure everything is positive
if lhs < 0 {
lhs_positive = lhs * -1;
@@ -2045,6 +2065,7 @@ impl I16Div of Div {
if rhs < 0 {
rhs_positive = rhs * -1;
}
+
//felt252 plays role of a bridge for type casting
let lhs_felt: felt252 = lhs_positive.into();
let rhs_felt: felt252 = rhs_positive.into();
@@ -2053,6 +2074,7 @@ impl I16Div of Div {
let mut result = lhs_u128 / rhs_u128;
let felt_result: felt252 = result.into();
let signed_int_result: i16 = felt_result.try_into().unwrap();
+
if lhs * rhs < 0 {
signed_int_result * -1
} else {
@@ -2070,9 +2092,10 @@ impl I16DivEq of DivEq {
impl I32Number of NumberTrait {
fn new(mag: i32, sign: bool) -> i32 {
- if sign{
+ if sign {
return -mag;
}
+
mag
}
@@ -2197,7 +2220,7 @@ impl I32Number of NumberTrait {
fn abs(self: i32) -> i32 {
if self >= 0 {
- return self;
+ self
} else {
self * -1_i32
}
@@ -2217,7 +2240,7 @@ impl I32Number of NumberTrait {
fn min(self: i32, other: i32) -> i32 {
if self < other {
- return self;
+ self
} else {
other
}
@@ -2225,7 +2248,7 @@ impl I32Number of NumberTrait {
fn max(self: i32, other: i32) -> i32 {
if self > other {
- return self;
+ self
} else {
other
}
@@ -2241,43 +2264,43 @@ impl I32Number of NumberTrait {
fn xor(lhs: i32, rhs: i32) -> bool {
if (lhs == 0 || rhs == 0) && lhs != rhs {
- return true;
+ true
} else {
- return false;
+ false
}
}
fn or(lhs: i32, rhs: i32) -> bool {
- if (lhs == 0 && rhs == 0) {
- return false;
+ if lhs == 0 && rhs == 0 {
+ false
} else {
- return true;
+ true
}
}
fn sign(self: i32) -> i32 {
if self == 0 {
- return 0_i32;
+ 0_i32
} else if self > 0 {
- return 1_i32;
+ 1_i32
} else {
-1_i32
}
}
fn and(lhs: i32, rhs: i32) -> bool {
- if (lhs == 0 || rhs == 0) {
- return false;
+ if lhs == 0 || rhs == 0 {
+ false
} else {
- return true;
+ true
}
}
fn where(self: i32, x: i32, y: i32) -> i32 {
if self == 0 {
- return y;
+ y
} else {
- return x;
+ x
}
}
@@ -2294,7 +2317,7 @@ impl I32Number of NumberTrait {
}
fn is_inf(self: i32) -> bool {
- (self == 2147483647 || self == -2147483647)
+ self == 2147483647 || self == -2147483647
}
fn is_pos_inf(self: i32) -> bool {
@@ -2329,8 +2352,10 @@ impl I32Number of NumberTrait {
impl I32Div of Div {
fn div(lhs: i32, rhs: i32) -> i32 {
assert(rhs != 0, 'divisor cannot be 0');
+
let mut lhs_positive = lhs;
let mut rhs_positive = rhs;
+
// making sure everything is positive
if lhs < 0 {
lhs_positive = lhs * -1;
@@ -2338,6 +2363,7 @@ impl I32Div of Div {
if rhs < 0 {
rhs_positive = rhs * -1;
}
+
//felt252 plays role of a bridge for type casting
let lhs_felt: felt252 = lhs_positive.into();
let rhs_felt: felt252 = rhs_positive.into();
@@ -2346,6 +2372,7 @@ impl I32Div of Div {
let mut result = lhs_u128 / rhs_u128;
let felt_result: felt252 = result.into();
let signed_int_result: i32 = felt_result.try_into().unwrap();
+
if lhs * rhs < 0 {
signed_int_result * -1
} else {
@@ -2365,20 +2392,24 @@ impl I32IntoU32 of Into {
fn into(self: i32) -> u32 {
let number_sign: bool = self < 0;
let mut self_positive: i32 = self;
+
if number_sign {
self_positive = self_positive * -1_i32
}
+
let number_felt: felt252 = self_positive.into();
let number_u32: u32 = number_felt.try_into().unwrap();
+
number_u32
}
}
impl I64Number of NumberTrait {
fn new(mag: i64, sign: bool) -> i64 {
- if sign{
+ if sign {
return -mag;
}
+
mag
}
@@ -2503,7 +2534,7 @@ impl I64Number of NumberTrait {
fn abs(self: i64) -> i64 {
if self >= 0 {
- return self;
+ self
} else {
self * -1_i64
}
@@ -2523,7 +2554,7 @@ impl I64Number of NumberTrait {
fn min(self: i64, other: i64) -> i64 {
if self < other {
- return self;
+ self
} else {
other
}
@@ -2531,7 +2562,7 @@ impl I64Number of NumberTrait {
fn max(self: i64, other: i64) -> i64 {
if self > other {
- return self;
+ self
} else {
other
}
@@ -2547,43 +2578,43 @@ impl I64Number of NumberTrait {
fn xor(lhs: i64, rhs: i64) -> bool {
if (lhs == 0 || rhs == 0) && lhs != rhs {
- return true;
+ true
} else {
- return false;
+ false
}
}
fn or(lhs: i64, rhs: i64) -> bool {
- if (lhs == 0 && rhs == 0) {
- return false;
+ if lhs == 0 && rhs == 0 {
+ false
} else {
- return true;
+ true
}
}
fn sign(self: i64) -> i64 {
if self == 0 {
- return 0_i64;
+ 0_i64
} else if self > 0 {
- return 1_i64;
+ 1_i64
} else {
-1_i64
}
}
fn and(lhs: i64, rhs: i64) -> bool {
- if (lhs == 0 || rhs == 0) {
- return false;
+ if lhs == 0 || rhs == 0 {
+ false
} else {
- return true;
+ true
}
}
fn where(self: i64, x: i64, y: i64) -> i64 {
if self == 0 {
- return y;
+ y
} else {
- return x;
+ x
}
}
@@ -2600,7 +2631,7 @@ impl I64Number of NumberTrait {
}
fn is_inf(self: i64) -> bool {
- (self == 9223372036854775807 || self == -9223372036854775807)
+ self == 9223372036854775807 || self == -9223372036854775807
}
fn is_pos_inf(self: i64) -> bool {
@@ -2635,8 +2666,10 @@ impl I64Number of NumberTrait {
impl I64Div of Div {
fn div(lhs: i64, rhs: i64) -> i64 {
assert(rhs != 0, 'divisor cannot be 0');
+
let mut lhs_positive = lhs;
let mut rhs_positive = rhs;
+
// making sure everything is positive
if lhs < 0 {
lhs_positive = lhs * -1;
@@ -2644,6 +2677,7 @@ impl I64Div of Div {
if rhs < 0 {
rhs_positive = rhs * -1;
}
+
//felt252 plays role of a bridge for type casting
let lhs_felt: felt252 = lhs_positive.into();
let rhs_felt: felt252 = rhs_positive.into();
@@ -2652,6 +2686,7 @@ impl I64Div of Div {
let mut result = lhs_u128 / rhs_u128;
let felt_result: felt252 = result.into();
let signed_int_result: i64 = felt_result.try_into().unwrap();
+
if lhs * rhs < 0 {
signed_int_result * -1
} else {
@@ -2669,9 +2704,10 @@ impl I64DivEq of DivEq {
impl I128Number of NumberTrait {
fn new(mag: i128, sign: bool) -> i128 {
- if sign{
+ if sign {
return -mag;
}
+
mag
}
@@ -2796,7 +2832,7 @@ impl I128Number of NumberTrait {
fn abs(self: i128) -> i128 {
if self >= 0 {
- return self;
+ self
} else {
self * -1_i128
}
@@ -2816,7 +2852,7 @@ impl I128Number of NumberTrait {
fn min(self: i128, other: i128) -> i128 {
if self < other {
- return self;
+ self
} else {
other
}
@@ -2824,7 +2860,7 @@ impl I128Number of NumberTrait {
fn max(self: i128, other: i128) -> i128 {
if self > other {
- return self;
+ self
} else {
other
}
@@ -2840,43 +2876,43 @@ impl I128Number of NumberTrait {
fn xor(lhs: i128, rhs: i128) -> bool {
if (lhs == 0 || rhs == 0) && lhs != rhs {
- return true;
+ true
} else {
- return false;
+ false
}
}
fn or(lhs: i128, rhs: i128) -> bool {
- if (lhs == 0 && rhs == 0) {
- return false;
+ if lhs == 0 && rhs == 0 {
+ false
} else {
- return true;
+ true
}
}
fn sign(self: i128) -> i128 {
if self == 0 {
- return 0_i128;
+ 0_i128
} else if self > 0 {
- return 1_i128;
+ 1_i128
} else {
-1_i128
}
}
fn and(lhs: i128, rhs: i128) -> bool {
- if (lhs == 0 || rhs == 0) {
- return false;
+ if lhs == 0 || rhs == 0 {
+ false
} else {
- return true;
+ true
}
}
fn where(self: i128, x: i128, y: i128) -> i128 {
if self == 0 {
- return y;
+ y
} else {
- return x;
+ x
}
}
@@ -2893,8 +2929,8 @@ impl I128Number of NumberTrait {
}
fn is_inf(self: i128) -> bool {
- (self == 170141183460469231731687303715884105727
- || self == -170141183460469231731687303715884105727)
+ self == 170141183460469231731687303715884105727
+ || self == -170141183460469231731687303715884105727
}
fn is_pos_inf(self: i128) -> bool {
@@ -2929,8 +2965,10 @@ impl I128Number of NumberTrait {
impl I128Div of Div {
fn div(lhs: i128, rhs: i128) -> i128 {
assert(rhs != 0, 'divisor cannot be 0');
+
let mut lhs_positive = lhs;
let mut rhs_positive = rhs;
+
// making sure everything is positive
if lhs < 0 {
lhs_positive = lhs * -1;
@@ -2938,6 +2976,7 @@ impl I128Div of Div {
if rhs < 0 {
rhs_positive = rhs * -1;
}
+
//felt252 plays role of a bridge for type casting
let lhs_felt: felt252 = lhs_positive.into();
let rhs_felt: felt252 = rhs_positive.into();
@@ -2946,6 +2985,7 @@ impl I128Div of Div {
let mut result = lhs_u128 / rhs_u128;
let felt_result: felt252 = result.into();
let signed_int_result: i128 = felt_result.try_into().unwrap();
+
// assigning the sign and returning
if lhs * rhs < 0 {
signed_int_result * -1
@@ -3105,7 +3145,7 @@ impl u32Number of NumberTrait {
fn min(self: u32, other: u32) -> u32 {
if self < other {
- return self;
+ self
} else {
other
}
@@ -3113,7 +3153,7 @@ impl u32Number of NumberTrait {
fn max(self: u32, other: u32) -> u32 {
if self > other {
- return self;
+ self
} else {
other
}
@@ -3129,17 +3169,17 @@ impl u32Number of NumberTrait {
fn xor(lhs: u32, rhs: u32) -> bool {
if (lhs == 0 || rhs == 0) && lhs != rhs {
- return true;
+ true
} else {
- return false;
+ false
}
}
fn or(lhs: u32, rhs: u32) -> bool {
- if (lhs == 0 && rhs == 0) {
- return false;
+ if lhs == 0 && rhs == 0 {
+ false
} else {
- return true;
+ true
}
}
@@ -3148,18 +3188,18 @@ impl u32Number of NumberTrait {
}
fn and(lhs: u32, rhs: u32) -> bool {
- if (lhs == 0 || rhs == 0) {
- return false;
+ if lhs == 0 || rhs == 0 {
+ false
} else {
- return true;
+ true
}
}
fn where(self: u32, x: u32, y: u32) -> u32 {
if self == 0 {
- return y;
+ y
} else {
- return x;
+ x
}
}
@@ -3324,6 +3364,7 @@ impl Complex64Number of NumberTrait {
if self == Complex64Impl::zero() {
return true;
}
+
false
}
@@ -3343,6 +3384,7 @@ impl Complex64Number of NumberTrait {
if self == Complex64Impl::one() {
return true;
}
+
false
}
@@ -3447,6 +3489,7 @@ impl U32IntoI32 of Into {
fn into(self: u32) -> i32 {
let number_felt: felt252 = self.into();
let number_i32: i32 = number_felt.try_into().unwrap();
+
number_i32
}
}
diff --git a/src/numbers/complex_number/complex64.cairo b/src/numbers/complex_number/complex64.cairo
index 20fb57f88..9edcb8d1a 100644
--- a/src/numbers/complex_number/complex64.cairo
+++ b/src/numbers/complex_number/complex64.cairo
@@ -15,7 +15,6 @@ struct complex64 {
}
// CONSTANTS for FP64x64
-
const PI: u128 = 57952155664616982739;
const HALF_PI: u128 = 28976077832308491370;
const TWO: u128 = 36893488147419103232;
@@ -40,15 +39,16 @@ impl Complex64Impl of ComplexTrait {
}
fn zero() -> complex64 {
- return complex64 { real: FixedTrait::ZERO(), img: FP64x64Impl::ZERO() };
+ complex64 { real: FixedTrait::ZERO(), img: FP64x64Impl::ZERO() }
}
fn one() -> complex64 {
- return complex64 { real: FP64x64Impl::ONE(), img: FP64x64Impl::ZERO() };
+ complex64 { real: FP64x64Impl::ONE(), img: FP64x64Impl::ZERO() }
}
fn mag(self: complex64) -> FP64x64 {
let two = FP64x64Impl::new(TWO, false);
+
(self.real.pow(two) + self.img.pow(two)).sqrt()
}
@@ -59,15 +59,16 @@ impl Complex64Impl of ComplexTrait {
fn exp(self: complex64) -> complex64 {
let real = self.real.exp() * self.img.cos();
let img = self.real.exp() * self.img.sin();
+
complex64 { real, img }
}
fn exp2(self: complex64) -> complex64 {
let two = complex64 { real: FP64x64Impl::new(TWO, false), img: FP64x64Impl::ZERO() };
+
two.pow(self)
}
-
fn sqrt(self: complex64) -> complex64 {
let x = self.real;
let y = self.img;
@@ -78,26 +79,29 @@ impl Complex64Impl of ComplexTrait {
} else {
(((x.pow(two) + y.pow(two)).sqrt() - x) / two).sqrt()
};
-
let img = FP64x64Impl::new(img.mag, y.sign);
+
complex64 { real, img }
}
fn ln(self: complex64) -> complex64 {
let real = self.mag().ln();
let img = self.arg();
+
complex64 { real, img }
}
fn log2(self: complex64) -> complex64 {
let ln_2 = FP64x64Impl::new(12786309186476892720, false);
let ln = self.ln();
+
complex64 { real: (ln.real / ln_2), img: (ln.img / ln_2) }
}
fn log10(self: complex64) -> complex64 {
let ln_10 = FP64x64Impl::new(42475197399893398429, false);
let ln = self.ln();
+
complex64 { real: (ln.real / ln_10), img: (ln.img / ln_10) }
}
@@ -129,6 +133,7 @@ impl Complex64Impl of ComplexTrait {
let B = b.real * self.arg() + b.img * self.mag().ln();
let real = A * B.cos();
let img = A * B.sin();
+
complex64 { real, img }
}
@@ -136,17 +141,18 @@ impl Complex64Impl of ComplexTrait {
fn cos(self: complex64) -> complex64 {
let a = self.real;
let b = self.img;
+
complex64 {
real: FP64x64Impl::cos(a) * FP64x64Impl::cosh(b),
img: -FP64x64Impl::sin(a) * FP64x64Impl::sinh(b)
}
}
-
//sin(z) = sin(a+bi) = sin(a)cosh(b)+icos(a)sinh(b)
fn sin(self: complex64) -> complex64 {
let a = self.real;
let b = self.img;
+
complex64 {
real: FP64x64Impl::sin(a) * FP64x64Impl::cosh(b),
img: FP64x64Impl::cos(a) * FP64x64Impl::sinh(b)
@@ -159,6 +165,7 @@ impl Complex64Impl of ComplexTrait {
let a = self.real;
let b = self.img;
let den = FP64x64Impl::cosh(two * b) + FP64x64Impl::cos(two * a);
+
complex64 { real: FP64x64Impl::sin(two * a) / den, img: FP64x64Impl::sinh(two * b) / den }
}
@@ -184,7 +191,6 @@ impl Complex64Impl of ComplexTrait {
asin
}
-
//atan(z) = 1/2 * i[ln (1 - iz) - ln(1 + iz)]
fn atan(self: complex64) -> complex64 {
let two = Complex64Impl::new(FP64x64Impl::new(TWO, false), FP64x64Impl::ZERO());
@@ -198,7 +204,6 @@ impl Complex64Impl of ComplexTrait {
atan
}
-
//acosh(z) = ln (z + sqrt(z + 1) * sqrt(z - 1))
fn acosh(self: complex64) -> complex64 {
let one = Complex64Impl::new(FP64x64Impl::ONE(), FP64x64Impl::ZERO());
@@ -218,7 +223,6 @@ impl Complex64Impl of ComplexTrait {
asinh
}
-
//atanh(z) = 1/2 * [ln (1 + z) - ln(1 - z)]
fn atanh(self: complex64) -> complex64 {
let two = Complex64Impl::new(FP64x64Impl::new(TWO, false), FP64x64Impl::ZERO());
@@ -232,6 +236,7 @@ impl Complex64Impl of ComplexTrait {
fn cosh(self: complex64) -> complex64 {
let a = self.real;
let b = self.img;
+
complex64 {
real: FP64x64Impl::cosh(a) * FP64x64Impl::cos(b),
img: FP64x64Impl::sinh(a) * FP64x64Impl::sin(b)
@@ -242,6 +247,7 @@ impl Complex64Impl of ComplexTrait {
fn sinh(self: complex64) -> complex64 {
let a = self.real;
let b = self.img;
+
complex64 {
real: FP64x64Impl::sinh(a) * FP64x64Impl::cos(b),
img: FP64x64Impl::cosh(a) * FP64x64Impl::sin(b)
@@ -254,6 +260,7 @@ impl Complex64Impl of ComplexTrait {
let a = self.real;
let b = self.img;
let den = FP64x64Impl::cosh(two * a) + FP64x64Impl::cos(two * b);
+
complex64 { real: FP64x64Impl::sinh(two * a) / den, img: FP64x64Impl::sin(two * b) / den }
}
@@ -261,12 +268,14 @@ impl Complex64Impl of ComplexTrait {
fn to_polar(self: complex64) -> (FP64x64, FP64x64) {
let mag = self.mag();
let arg = self.arg();
- return (mag, arg);
+
+ (mag, arg)
}
fn from_polar(mag: FP64x64, arg: FP64x64) -> complex64 {
let real = mag * arg.cos();
let img = mag * arg.sin();
+
complex64 { real, img }
}
@@ -277,6 +286,7 @@ impl Complex64Impl of ComplexTrait {
let real = x / (x.pow(two) + y.pow(two));
let img = -y / (x.pow(two) + y.pow(two));
+
complex64 { real, img }
}
}
@@ -361,7 +371,6 @@ impl Complex64DivEq of DivEq {
}
}
-
// Implements the PartialEq trait for complex64.
impl Complex64PartialEq of PartialEq {
fn eq(lhs: @complex64, rhs: @complex64) -> bool {
@@ -394,7 +403,8 @@ impl Complex64Neg of Neg {
fn complex64_add(a: complex64, b: complex64) -> complex64 {
let real = a.real + b.real;
let img = a.img + b.img;
- return ComplexTrait::new(real, img);
+
+ ComplexTrait::new(real, img)
}
// Subtracts complex64 complex numbers.
@@ -409,7 +419,8 @@ fn complex64_add(a: complex64, b: complex64) -> complex64 {
fn complex64_sub(a: complex64, b: complex64) -> complex64 {
let real = a.real - b.real;
let img = a.img - b.img;
- return ComplexTrait::new(real, img);
+
+ ComplexTrait::new(real, img)
}
// Multiplies two complex64 integers.
@@ -427,7 +438,8 @@ fn complex64_sub(a: complex64, b: complex64) -> complex64 {
fn complex64_mul(a: complex64, b: complex64) -> complex64 {
let real = a.real * b.real - a.img * b.img;
let img = a.real * b.img + a.img * b.real;
- return ComplexTrait::new(real, img);
+
+ ComplexTrait::new(real, img)
}
// Divides the first complex64 by the second complex64.
@@ -452,7 +464,7 @@ fn complex64_eq(a: complex64, b: complex64) -> bool {
return true;
}
- return false;
+ false
}
// Compares two complex64 complex numbers for inequality.
@@ -463,7 +475,7 @@ fn complex64_eq(a: complex64, b: complex64) -> bool {
// * `bool` - `true` if the two complex numbers are not equal, `false` otherwise.
fn complex64_ne(a: complex64, b: complex64) -> bool {
// The result is the inverse of the equal function.
- return !complex64_eq(a, b);
+ !complex64_eq(a, b)
}
// Negates the given complex64 complex number.
@@ -473,5 +485,5 @@ fn complex64_ne(a: complex64, b: complex64) -> bool {
// * `complex64` - The negation of `x`.
fn complex64_neg(x: complex64) -> complex64 {
// The negation of an complex number is obtained by negating its real part and its imaginary part.
- return ComplexTrait::new(-x.real, -x.img);
+ ComplexTrait::new(-x.real, -x.img)
}
diff --git a/src/numbers/fixed_point/core.cairo b/src/numbers/fixed_point/core.cairo
index 0ef1f8c6f..e35d8abdb 100644
--- a/src/numbers/fixed_point/core.cairo
+++ b/src/numbers/fixed_point/core.cairo
@@ -33,6 +33,7 @@
/// sinh - Returns the value of the hyperbolic sine of the fixed point number.
/// tanh - Returns the value of the hyperbolic tangent of the fixed point number.
/// sign - Returns the element-wise indication of the sign of the input fixed point number.
+/// erf - Returns the error function of the input fixed point number computed element-wise.
///
trait FixedTrait {
/// # FixedTrait::new
diff --git a/src/numbers/fixed_point/implementations/fp16x16/core.cairo b/src/numbers/fixed_point/implementations/fp16x16/core.cairo
index d39820ce8..8f77324aa 100644
--- a/src/numbers/fixed_point/implementations/fp16x16/core.cairo
+++ b/src/numbers/fixed_point/implementations/fp16x16/core.cairo
@@ -1,9 +1,5 @@
use core::debug::PrintTrait;
-use core::option::OptionTrait;
-use core::result::{ResultTrait, ResultTraitImpl};
-use core::traits::{TryInto, Into};
-
use orion::numbers::fixed_point::core::FixedTrait;
use orion::numbers::fixed_point::implementations::fp16x16::math::{
core as core_math, trig, hyp, erf
@@ -18,178 +14,177 @@ struct FP16x16 {
}
// CONSTANTS
-
const TWO: u32 = 131072; // 2 ** 17
const ONE: u32 = 65536; // 2 ** 16
const HALF: u32 = 32768; // 2 ** 15
const MAX: u32 = 2147483648; // 2 ** 31
-
impl FP16x16Impl of FixedTrait {
fn ZERO() -> FP16x16 {
- return FP16x16 { mag: 0, sign: false };
+ FP16x16 { mag: 0, sign: false }
}
fn HALF() -> FP16x16 {
- return FP16x16 { mag: HALF, sign: false };
+ FP16x16 { mag: HALF, sign: false }
}
fn ONE() -> FP16x16 {
- return FP16x16 { mag: ONE, sign: false };
+ FP16x16 { mag: ONE, sign: false }
}
fn MAX() -> FP16x16 {
- return FP16x16 { mag: MAX, sign: false };
+ FP16x16 { mag: MAX, sign: false }
}
fn new(mag: u32, sign: bool) -> FP16x16 {
- return FP16x16 { mag: mag, sign: sign };
+ FP16x16 { mag: mag, sign: sign }
}
fn new_unscaled(mag: u32, sign: bool) -> FP16x16 {
- return FP16x16 { mag: mag * ONE, sign: sign };
+ FP16x16 { mag: mag * ONE, sign: sign }
}
fn from_felt(val: felt252) -> FP16x16 {
let mag = core::integer::u32_try_from_felt252(utils::felt_abs(val)).unwrap();
- return FixedTrait::new(mag, utils::felt_sign(val));
+
+ FixedTrait::new(mag, utils::felt_sign(val))
}
fn abs(self: FP16x16) -> FP16x16 {
- return core_math::abs(self);
+ core_math::abs(self)
}
fn acos(self: FP16x16) -> FP16x16 {
- return trig::acos_fast(self);
+ trig::acos_fast(self)
}
fn acos_fast(self: FP16x16) -> FP16x16 {
- return trig::acos_fast(self);
+ trig::acos_fast(self)
}
fn acosh(self: FP16x16) -> FP16x16 {
- return hyp::acosh(self);
+ hyp::acosh(self)
}
fn asin(self: FP16x16) -> FP16x16 {
- return trig::asin_fast(self);
+ trig::asin_fast(self)
}
fn asin_fast(self: FP16x16) -> FP16x16 {
- return trig::asin_fast(self);
+ trig::asin_fast(self)
}
fn asinh(self: FP16x16) -> FP16x16 {
- return hyp::asinh(self);
+ hyp::asinh(self)
}
fn atan(self: FP16x16) -> FP16x16 {
- return trig::atan_fast(self);
+ trig::atan_fast(self)
}
fn atan_fast(self: FP16x16) -> FP16x16 {
- return trig::atan_fast(self);
+ trig::atan_fast(self)
}
fn atanh(self: FP16x16) -> FP16x16 {
- return hyp::atanh(self);
+ hyp::atanh(self)
}
fn ceil(self: FP16x16) -> FP16x16 {
- return core_math::ceil(self);
+ core_math::ceil(self)
}
fn cos(self: FP16x16) -> FP16x16 {
- return trig::cos_fast(self);
+ trig::cos_fast(self)
}
fn cos_fast(self: FP16x16) -> FP16x16 {
- return trig::cos_fast(self);
+ trig::cos_fast(self)
}
fn cosh(self: FP16x16) -> FP16x16 {
- return hyp::cosh(self);
+ hyp::cosh(self)
}
fn floor(self: FP16x16) -> FP16x16 {
- return core_math::floor(self);
+ core_math::floor(self)
}
// Calculates the natural exponent of x: e^x
fn exp(self: FP16x16) -> FP16x16 {
- return core_math::exp(self);
+ core_math::exp(self)
}
// Calculates the binary exponent of x: 2^x
fn exp2(self: FP16x16) -> FP16x16 {
- return core_math::exp2(self);
+ core_math::exp2(self)
}
// Calculates the natural logarithm of x: ln(x)
// self must be greater than zero
fn ln(self: FP16x16) -> FP16x16 {
- return core_math::ln(self);
+ core_math::ln(self)
}
// Calculates the binary logarithm of x: log2(x)
// self must be greather than zero
fn log2(self: FP16x16) -> FP16x16 {
- return core_math::log2(self);
+ core_math::log2(self)
}
// Calculates the base 10 log of x: log10(x)
// self must be greater than zero
fn log10(self: FP16x16) -> FP16x16 {
- return core_math::log10(self);
+ core_math::log10(self)
}
// Calclates the value of x^y and checks for overflow before returning
// self is a fixed point value
// b is a fixed point value
fn pow(self: FP16x16, b: FP16x16) -> FP16x16 {
- return core_math::pow(self, b);
+ core_math::pow(self, b)
}
fn round(self: FP16x16) -> FP16x16 {
- return core_math::round(self);
+ core_math::round(self)
}
fn sin(self: FP16x16) -> FP16x16 {
- return trig::sin_fast(self);
+ trig::sin_fast(self)
}
fn sin_fast(self: FP16x16) -> FP16x16 {
- return trig::sin_fast(self);
+ trig::sin_fast(self)
}
fn sinh(self: FP16x16) -> FP16x16 {
- return hyp::sinh(self);
+ hyp::sinh(self)
}
// Calculates the square root of a fixed point value
// x must be positive
fn sqrt(self: FP16x16) -> FP16x16 {
- return core_math::sqrt(self);
+ core_math::sqrt(self)
}
fn tan(self: FP16x16) -> FP16x16 {
- return trig::tan_fast(self);
+ trig::tan_fast(self)
}
fn tan_fast(self: FP16x16) -> FP16x16 {
- return trig::tan_fast(self);
+ trig::tan_fast(self)
}
fn tanh(self: FP16x16) -> FP16x16 {
- return hyp::tanh(self);
+ hyp::tanh(self)
}
fn sign(self: FP16x16) -> FP16x16 {
- return core_math::sign(self);
+ core_math::sign(self)
}
fn NaN() -> FP16x16 {
- return FP16x16 { mag: 0, sign: true };
+ FP16x16 { mag: 0, sign: true }
}
fn is_nan(self: FP16x16) -> bool {
@@ -197,15 +192,15 @@ impl FP16x16Impl of FixedTrait {
}
fn INF() -> FP16x16 {
- return FP16x16 { mag: 4294967295, sign: false };
+ FP16x16 { mag: 4294967295, sign: false }
}
fn POS_INF() -> FP16x16 {
- return FP16x16 { mag: 4294967295, sign: false };
+ FP16x16 { mag: 4294967295, sign: false }
}
fn NEG_INF() -> FP16x16 {
- return FP16x16 { mag: 4294967295, sign: true };
+ FP16x16 { mag: 4294967295, sign: true }
}
fn is_inf(self: FP16x16) -> bool {
@@ -221,7 +216,7 @@ impl FP16x16Impl of FixedTrait {
}
fn erf(self: FP16x16) -> FP16x16 {
- return erf::erf(self);
+ erf::erf(self)
}
}
@@ -239,9 +234,9 @@ impl FP16x16IntoFelt252 of Into {
let mag_felt = self.mag.into();
if self.sign {
- return mag_felt * -1;
+ mag_felt * -1
} else {
- return mag_felt * 1;
+ mag_felt * 1
}
}
}
@@ -262,10 +257,10 @@ impl FP16x16TryIntoI8 of TryInto {
impl FP16x16TryIntoU128 of TryInto {
fn try_into(self: FP16x16) -> Option {
if self.sign {
- return Option::None(());
+ Option::None(())
} else {
// Unscale the magnitude and round down
- return Option::Some((self.mag / ONE).into());
+ Option::Some((self.mag / ONE).into())
}
}
}
@@ -273,10 +268,10 @@ impl FP16x16TryIntoU128 of TryInto {
impl FP16x16TryIntoU64 of TryInto {
fn try_into(self: FP16x16) -> Option {
if self.sign {
- return Option::None(());
+ Option::None(())
} else {
// Unscale the magnitude and round down
- return Option::Some((self.mag / ONE).into());
+ Option::Some((self.mag / ONE).into())
}
}
}
@@ -284,10 +279,10 @@ impl FP16x16TryIntoU64 of TryInto {
impl FP16x16TryIntoU32 of TryInto {
fn try_into(self: FP16x16) -> Option {
if self.sign {
- return Option::None(());
+ Option::None(())
} else {
// Unscale the magnitude and round down
- return Option::Some(self.mag / ONE);
+ Option::Some(self.mag / ONE)
}
}
}
@@ -298,7 +293,7 @@ impl FP16x16TryIntoU16 of TryInto {
Option::None(())
} else {
// Unscale the magnitude and round down
- return (self.mag / ONE).try_into();
+ (self.mag / ONE).try_into()
}
}
}
@@ -309,7 +304,7 @@ impl FP16x16TryIntoU8 of TryInto {
Option::None(())
} else {
// Unscale the magnitude and round down
- return (self.mag / ONE).try_into();
+ (self.mag / ONE).try_into()
}
}
}
@@ -317,18 +312,18 @@ impl FP16x16TryIntoU8 of TryInto {
impl FP16x16PartialEq of PartialEq {
#[inline(always)]
fn eq(lhs: @FP16x16, rhs: @FP16x16) -> bool {
- return core_math::eq(lhs, rhs);
+ core_math::eq(lhs, rhs)
}
#[inline(always)]
fn ne(lhs: @FP16x16, rhs: @FP16x16) -> bool {
- return core_math::ne(lhs, rhs);
+ core_math::ne(lhs, rhs)
}
}
impl FP16x16Add of Add {
fn add(lhs: FP16x16, rhs: FP16x16) -> FP16x16 {
- return core_math::add(lhs, rhs);
+ core_math::add(lhs, rhs)
}
}
@@ -341,7 +336,7 @@ impl FP16x16AddEq of AddEq {
impl FP16x16Sub of Sub {
fn sub(lhs: FP16x16, rhs: FP16x16) -> FP16x16 {
- return core_math::sub(lhs, rhs);
+ core_math::sub(lhs, rhs)
}
}
@@ -354,7 +349,7 @@ impl FP16x16SubEq of SubEq {
impl FP16x16Mul of Mul {
fn mul(lhs: FP16x16, rhs: FP16x16) -> FP16x16 {
- return core_math::mul(lhs, rhs);
+ core_math::mul(lhs, rhs)
}
}
@@ -367,7 +362,7 @@ impl FP16x16MulEq of MulEq {
impl FP16x16Div of Div {
fn div(lhs: FP16x16, rhs: FP16x16) -> FP16x16 {
- return core_math::div(lhs, rhs);
+ core_math::div(lhs, rhs)
}
}
@@ -381,48 +376,47 @@ impl FP16x16DivEq of DivEq {
impl FP16x16PartialOrd of PartialOrd {
#[inline(always)]
fn ge(lhs: FP16x16, rhs: FP16x16) -> bool {
- return core_math::ge(lhs, rhs);
+ core_math::ge(lhs, rhs)
}
#[inline(always)]
fn gt(lhs: FP16x16, rhs: FP16x16) -> bool {
- return core_math::gt(lhs, rhs);
+ core_math::gt(lhs, rhs)
}
#[inline(always)]
fn le(lhs: FP16x16, rhs: FP16x16) -> bool {
- return core_math::le(lhs, rhs);
+ core_math::le(lhs, rhs)
}
#[inline(always)]
fn lt(lhs: FP16x16, rhs: FP16x16) -> bool {
- return core_math::lt(lhs, rhs);
+ core_math::lt(lhs, rhs)
}
}
impl FP16x16Neg of Neg {
#[inline(always)]
fn neg(a: FP16x16) -> FP16x16 {
- return core_math::neg(a);
+ core_math::neg(a)
}
}
impl FP16x16Rem of Rem {
#[inline(always)]
fn rem(lhs: FP16x16, rhs: FP16x16) -> FP16x16 {
- return core_math::rem(lhs, rhs);
+ core_math::rem(lhs, rhs)
}
}
-
/// INTERNAL
-
fn _i32_into_fp(x: FP16x16) -> i32 {
let number_felt: felt252 = (x.mag / ONE).into();
let number_i32: i32 = number_felt.try_into().unwrap();
if x.sign {
return number_i32 * -1_i32;
}
+
number_i32
}
diff --git a/src/numbers/fixed_point/implementations/fp16x16/helpers.cairo b/src/numbers/fixed_point/implementations/fp16x16/helpers.cairo
index 0cd5a8f0f..d18fc4108 100644
--- a/src/numbers/fixed_point/implementations/fp16x16/helpers.cairo
+++ b/src/numbers/fixed_point/implementations/fp16x16/helpers.cairo
@@ -1,5 +1,4 @@
use core::debug::PrintTrait;
-use core::traits::Into;
use orion::numbers::fixed_point::implementations::fp16x16::core::{
HALF, ONE, TWO, FP16x16, FP16x16Impl, FP16x16Sub, FP16x16Div, FixedTrait, FP16x16Print
diff --git a/src/numbers/fixed_point/implementations/fp16x16/math/comp.cairo b/src/numbers/fixed_point/implementations/fp16x16/math/comp.cairo
index ddf153f18..b53adc614 100644
--- a/src/numbers/fixed_point/implementations/fp16x16/math/comp.cairo
+++ b/src/numbers/fixed_point/implementations/fp16x16/math/comp.cairo
@@ -3,65 +3,65 @@ use orion::numbers::fixed_point::implementations::fp16x16::core::{
};
fn max(a: FP16x16, b: FP16x16) -> FP16x16 {
- if (a >= b) {
- return a;
+ if a >= b {
+ a
} else {
- return b;
+ b
}
}
fn min(a: FP16x16, b: FP16x16) -> FP16x16 {
- if (a <= b) {
- return a;
+ if a <= b {
+ a
} else {
- return b;
+ b
}
}
fn xor(a: FP16x16, b: FP16x16) -> bool {
if (a == FixedTrait::new(0, false) || b == FixedTrait::new(0, false)) && (a != b) {
- return true;
+ true
} else {
- return false;
+ false
}
}
fn or(a: FP16x16, b: FP16x16) -> bool {
let zero = FixedTrait::new(0, false);
if a == zero && b == zero {
- return false;
+ false
} else {
- return true;
+ true
}
}
fn and(a: FP16x16, b: FP16x16) -> bool {
let zero = FixedTrait::new(0, false);
if a == zero || b == zero {
- return false;
+ false
} else {
- return true;
+ true
}
}
fn where(a: FP16x16, b: FP16x16, c: FP16x16) -> FP16x16 {
if a == FixedTrait::new(0, false) {
- return c;
+ c
} else {
- return b;
+ b
}
}
fn bitwise_and(a: FP16x16, b: FP16x16) -> FP16x16 {
- return FixedTrait::new(a.mag & b.mag, a.sign & b.sign);
+ FixedTrait::new(a.mag & b.mag, a.sign & b.sign)
}
fn bitwise_xor(a: FP16x16, b: FP16x16) -> FP16x16 {
- return FixedTrait::new(a.mag ^ b.mag, a.sign ^ b.sign);
+ FixedTrait::new(a.mag ^ b.mag, a.sign ^ b.sign)
}
fn bitwise_or(a: FP16x16, b: FP16x16) -> FP16x16 {
- return FixedTrait::new(a.mag | b.mag, a.sign | b.sign);
+ FixedTrait::new(a.mag | b.mag, a.sign | b.sign)
}
// Tests --------------------------------------------------------------------------------------------------------------
@@ -70,7 +70,6 @@ fn bitwise_or(a: FP16x16, b: FP16x16) -> FP16x16 {
mod tests {
use super::{FixedTrait, max, min, bitwise_and, bitwise_xor, bitwise_or};
-
#[test]
fn test_max() {
let a = FixedTrait::new_unscaled(1, false);
@@ -127,6 +126,7 @@ mod tests {
assert(bitwise_xor(a, b) == c, 'bitwise_xor(a,b)')
}
+ #[test]
fn test_bitwise_or() {
let a = FixedTrait::new(225280, false); // 3.4375
let b = FixedTrait::new(4160843776, true); // -2046.5625
diff --git a/src/numbers/fixed_point/implementations/fp16x16/math/core.cairo b/src/numbers/fixed_point/implementations/fp16x16/math/core.cairo
index d477f051d..0085d2639 100644
--- a/src/numbers/fixed_point/implementations/fp16x16/math/core.cairo
+++ b/src/numbers/fixed_point/implementations/fp16x16/math/core.cairo
@@ -1,9 +1,4 @@
-use core::debug::PrintTrait;
-use core::option::OptionTrait;
-use core::result::{ResultTrait, ResultTraitImpl};
-use core::traits::{Into, TryInto};
use core::integer;
-use core::integer::{u32_safe_divmod, u32_as_non_zero, u32_wide_mul};
use orion::numbers::fixed_point::implementations::fp16x16::core::{
HALF, ONE, MAX, FP16x16, FP16x16Impl, FP16x16Add, FP16x16AddEq, FP16x16Sub, FP16x16Mul,
@@ -13,9 +8,8 @@ use orion::numbers::fixed_point::implementations::fp16x16::core::{
use orion::numbers::fixed_point::implementations::fp16x16::math::lut;
// PUBLIC
-
fn abs(a: FP16x16) -> FP16x16 {
- return FixedTrait::new(a.mag, false);
+ FixedTrait::new(a.mag, false)
}
fn add(a: FP16x16, b: FP16x16) -> FP16x16 {
@@ -28,23 +22,23 @@ fn add(a: FP16x16, b: FP16x16) -> FP16x16 {
}
if (a.mag > b.mag) {
- return FixedTrait::new(a.mag - b.mag, a.sign);
+ FixedTrait::new(a.mag - b.mag, a.sign)
} else {
- return FixedTrait::new(b.mag - a.mag, b.sign);
+ FixedTrait::new(b.mag - a.mag, b.sign)
}
}
fn ceil(a: FP16x16) -> FP16x16 {
- let (div, rem) = u32_safe_divmod(a.mag, u32_as_non_zero(ONE));
+ let (div, rem) = integer::u32_safe_divmod(a.mag, integer::u32_as_non_zero(ONE));
if rem == 0 {
- return a;
+ a
} else if !a.sign {
- return FixedTrait::new_unscaled(div + 1, false);
+ FixedTrait::new_unscaled(div + 1, false)
} else if div == 0 {
- return FixedTrait::new_unscaled(0, false);
+ FixedTrait::new_unscaled(0, false)
} else {
- return FixedTrait::new_unscaled(div, true);
+ FixedTrait::new_unscaled(div, true)
}
}
@@ -53,16 +47,16 @@ fn div(a: FP16x16, b: FP16x16) -> FP16x16 {
let res_u64 = a_u64 / b.mag.into();
// Re-apply sign
- return FixedTrait::new(res_u64.try_into().unwrap(), a.sign ^ b.sign);
+ FixedTrait::new(res_u64.try_into().unwrap(), a.sign ^ b.sign)
}
fn eq(a: @FP16x16, b: @FP16x16) -> bool {
- return (*a.mag == *b.mag) && (*a.sign == *b.sign);
+ (*a.mag == *b.mag) && (*a.sign == *b.sign)
}
// Calculates the natural exponent of x: e^x
fn exp(a: FP16x16) -> FP16x16 {
- return exp2(FixedTrait::new(94548, false) * a); // log2(e) * 2^23 β 12102203
+ exp2(FixedTrait::new(94548, false) * a) // log2(e) * 2^23 β 12102203
}
// Calculates the binary exponent of x: 2^x
@@ -71,7 +65,7 @@ fn exp2(a: FP16x16) -> FP16x16 {
return FixedTrait::ONE();
}
- let (int_part, frac_part) = integer::u32_safe_divmod(a.mag, u32_as_non_zero(ONE));
+ let (int_part, frac_part) = integer::u32_safe_divmod(a.mag, integer::u32_as_non_zero(ONE));
let int_res = FixedTrait::new_unscaled(lut::exp2(int_part), false);
let mut res_u = int_res;
@@ -87,57 +81,57 @@ fn exp2(a: FP16x16) -> FP16x16 {
res_u = res_u * (r1 + FixedTrait::ONE());
}
- if (a.sign == true) {
- return FixedTrait::ONE() / res_u;
+ if a.sign {
+ FixedTrait::ONE() / res_u
} else {
- return res_u;
+ res_u
}
}
fn exp2_int(exp: u32) -> FP16x16 {
- return FixedTrait::new_unscaled(lut::exp2(exp), false);
+ FixedTrait::new_unscaled(lut::exp2(exp), false)
}
fn floor(a: FP16x16) -> FP16x16 {
- let (div, rem) = integer::u32_safe_divmod(a.mag, u32_as_non_zero(ONE));
+ let (div, rem) = integer::u32_safe_divmod(a.mag, integer::u32_as_non_zero(ONE));
if rem == 0 {
- return a;
+ a
} else if !a.sign {
- return FixedTrait::new_unscaled(div, false);
+ FixedTrait::new_unscaled(div, false)
} else {
- return FixedTrait::new_unscaled(div + 1, true);
+ FixedTrait::new_unscaled(div + 1, true)
}
}
fn ge(a: FP16x16, b: FP16x16) -> bool {
if a.sign != b.sign {
- return !a.sign;
+ !a.sign
} else {
- return (a.mag == b.mag) || ((a.mag > b.mag) ^ a.sign);
+ (a.mag == b.mag) || ((a.mag > b.mag) ^ a.sign)
}
}
fn gt(a: FP16x16, b: FP16x16) -> bool {
if a.sign != b.sign {
- return !a.sign;
+ !a.sign
} else {
- return (a.mag != b.mag) && ((a.mag > b.mag) ^ a.sign);
+ (a.mag != b.mag) && ((a.mag > b.mag) ^ a.sign)
}
}
fn le(a: FP16x16, b: FP16x16) -> bool {
if a.sign != b.sign {
- return a.sign;
+ a.sign
} else {
- return (a.mag == b.mag) || ((a.mag < b.mag) ^ a.sign);
+ (a.mag == b.mag) || ((a.mag < b.mag) ^ a.sign)
}
}
// Calculates the natural logarithm of x: ln(x)
// self must be greater than zero
fn ln(a: FP16x16) -> FP16x16 {
- return FixedTrait::new(45426, false) * log2(a); // ln(2) = 0.693...
+ FixedTrait::new(45426, false) * log2(a) // ln(2) = 0.693...
}
// Calculates the binary logarithm of x: log2(x)
@@ -157,7 +151,7 @@ fn log2(a: FP16x16) -> FP16x16 {
let (msb, div) = lut::msb(whole);
if a.mag == div * ONE {
- return FixedTrait::new_unscaled(msb, false);
+ FixedTrait::new_unscaled(msb, false)
} else {
let norm = a / FixedTrait::new_unscaled(div, false);
let r8 = FixedTrait::new(596, true) * norm;
@@ -168,21 +162,22 @@ fn log2(a: FP16x16) -> FP16x16 {
let r3 = (r4 + FixedTrait::new(608566, false)) * norm;
let r2 = (r3 + FixedTrait::new(655828, true)) * norm;
let r1 = (r2 + FixedTrait::new(534433, false)) * norm;
- return r1 + FixedTrait::new(224487, true) + FixedTrait::new_unscaled(msb, false);
+
+ r1 + FixedTrait::new(224487, true) + FixedTrait::new_unscaled(msb, false)
}
}
// Calculates the base 10 log of x: log10(x)
// self must be greater than zero
fn log10(a: FP16x16) -> FP16x16 {
- return FixedTrait::new(19728, false) * log2(a); // log10(2) = 0.301...
+ FixedTrait::new(19728, false) * log2(a) // log10(2) = 0.301...
}
fn lt(a: FP16x16, b: FP16x16) -> bool {
if a.sign != b.sign {
- return a.sign;
+ a.sign
} else {
- return (a.mag != b.mag) && ((a.mag < b.mag) ^ a.sign);
+ (a.mag != b.mag) && ((a.mag < b.mag) ^ a.sign)
}
}
@@ -190,20 +185,20 @@ fn mul(a: FP16x16, b: FP16x16) -> FP16x16 {
let prod_u128 = integer::u32_wide_mul(a.mag, b.mag);
// Re-apply sign
- return FixedTrait::new((prod_u128 / ONE.into()).try_into().unwrap(), a.sign ^ b.sign);
+ FixedTrait::new((prod_u128 / ONE.into()).try_into().unwrap(), a.sign ^ b.sign)
}
fn ne(a: @FP16x16, b: @FP16x16) -> bool {
- return (*a.mag != *b.mag) || (*a.sign != *b.sign);
+ (*a.mag != *b.mag) || (*a.sign != *b.sign)
}
fn neg(a: FP16x16) -> FP16x16 {
if a.mag == 0 {
- return a;
+ a
} else if !a.sign {
- return FixedTrait::new(a.mag, !a.sign);
+ FixedTrait::new(a.mag, !a.sign)
} else {
- return FixedTrait::new(a.mag, false);
+ FixedTrait::new(a.mag, false)
}
}
@@ -211,7 +206,7 @@ fn neg(a: FP16x16) -> FP16x16 {
// self is a FP16x16 point value
// b is a FP16x16 point value
fn pow(a: FP16x16, b: FP16x16) -> FP16x16 {
- let (_, rem) = integer::u32_safe_divmod(b.mag, u32_as_non_zero(ONE));
+ let (_, rem) = integer::u32_safe_divmod(b.mag, integer::u32_as_non_zero(ONE));
// use the more performant integer pow when y is an int
if (rem == 0) {
@@ -219,7 +214,7 @@ fn pow(a: FP16x16, b: FP16x16) -> FP16x16 {
}
// x^y = exp(y*ln(x)) for x > 0 will error for x < 0
- return exp(b * ln(a));
+ exp(b * ln(a))
}
// Calclates the value of a^b and checks for overflow before returning
@@ -227,7 +222,7 @@ fn pow_int(a: FP16x16, b: u32, sign: bool) -> FP16x16 {
let mut x = a;
let mut n = b;
- if sign == true {
+ if sign {
x = FixedTrait::ONE() / x;
}
@@ -238,11 +233,7 @@ fn pow_int(a: FP16x16, b: u32, sign: bool) -> FP16x16 {
let mut y = FixedTrait::ONE();
let two = integer::u32_as_non_zero(2);
- loop {
- if n <= 1 {
- break;
- }
-
+ while n > 1 {
let (div, rem) = integer::u32_safe_divmod(n, two);
if rem == 1 {
@@ -253,20 +244,20 @@ fn pow_int(a: FP16x16, b: u32, sign: bool) -> FP16x16 {
n = div;
};
- return x * y;
+ x * y
}
fn rem(a: FP16x16, b: FP16x16) -> FP16x16 {
- return a - floor(a / b) * b;
+ a - floor(a / b) * b
}
fn round(a: FP16x16) -> FP16x16 {
- let (div, rem) = integer::u32_safe_divmod(a.mag, u32_as_non_zero(ONE));
+ let (div, rem) = integer::u32_safe_divmod(a.mag, integer::u32_as_non_zero(ONE));
if (HALF <= rem) {
- return FixedTrait::new_unscaled(div + 1, a.sign);
+ FixedTrait::new_unscaled(div + 1, a.sign)
} else {
- return FixedTrait::new_unscaled(div, a.sign);
+ FixedTrait::new_unscaled(div, a.sign)
}
}
@@ -276,11 +267,12 @@ fn sqrt(a: FP16x16) -> FP16x16 {
assert(a.sign == false, 'must be positive');
let root = integer::u64_sqrt(a.mag.into() * ONE.into());
- return FixedTrait::new(root.into(), false);
+
+ FixedTrait::new(root.into(), false)
}
fn sub(a: FP16x16, b: FP16x16) -> FP16x16 {
- return add(a, -b);
+ add(a, -b)
}
fn sign(a: FP16x16) -> FP16x16 {
@@ -467,7 +459,7 @@ mod tests {
let a = FixedTrait::new_unscaled(42, false);
let b = FixedTrait::new_unscaled(42, false);
let c = eq(@a, @b);
- assert(c == true, 'invalid result');
+ assert(c, 'invalid result');
}
#[test]
@@ -475,7 +467,7 @@ mod tests {
let a = FixedTrait::new_unscaled(42, false);
let b = FixedTrait::new_unscaled(42, false);
let c = ne(@a, @b);
- assert(c == false, 'invalid result');
+ assert(!c, 'invalid result');
}
#[test]
@@ -549,12 +541,12 @@ mod tests {
let c = FixedTrait::::new_unscaled(1, true);
assert(a <= a, 'a <= a');
- assert(a <= b == false, 'a <= b');
- assert(a <= c == false, 'a <= c');
+ assert(!(a <= b), 'a <= b');
+ assert(!(a <= c), 'a <= c');
assert(b <= a, 'b <= a');
assert(b <= b, 'b <= b');
- assert(b <= c == false, 'b <= c');
+ assert(!(b <= c), 'b <= c');
assert(c <= a, 'c <= a');
assert(c <= b, 'c <= b');
@@ -567,17 +559,17 @@ mod tests {
let b = FixedTrait::new_unscaled(0, false);
let c = FixedTrait::::new_unscaled(1, true);
- assert(a < a == false, 'a < a');
- assert(a < b == false, 'a < b');
- assert(a < c == false, 'a < c');
+ assert(!(a < a), 'a < a');
+ assert(!(a < b), 'a < b');
+ assert(!(a < c), 'a < c');
assert(b < a, 'b < a');
- assert(b < b == false, 'b < b');
- assert(b < c == false, 'b < c');
+ assert(!(b < b), 'b < b');
+ assert(!(b < c), 'b < c');
assert(c < a, 'c < a');
assert(c < b, 'c < b');
- assert(c < c == false, 'c < c');
+ assert(!(c < c), 'c < c');
}
#[test]
@@ -590,12 +582,12 @@ mod tests {
assert(a >= b, 'a >= b');
assert(a >= c, 'a >= c');
- assert(b >= a == false, 'b >= a');
+ assert(!(b >= a), 'b >= a');
assert(b >= b, 'b >= b');
assert(b >= c, 'b >= c');
- assert(c >= a == false, 'c >= a');
- assert(c >= b == false, 'c >= b');
+ assert(!(c >= a), 'c >= a');
+ assert(!(c >= b), 'c >= b');
assert(c >= c, 'c >= c');
}
@@ -605,17 +597,17 @@ mod tests {
let b = FixedTrait::new_unscaled(0, false);
let c = FixedTrait::::new_unscaled(1, true);
- assert(a > a == false, 'a > a');
+ assert(!(a > a), 'a > a');
assert(a > b, 'a > b');
assert(a > c, 'a > c');
- assert(b > a == false, 'b > a');
- assert(b > b == false, 'b > b');
+ assert(!(b > a), 'b > a');
+ assert(!(b > b), 'b > b');
assert(b > c, 'b > c');
- assert(c > a == false, 'c > a');
- assert(c > b == false, 'c > b');
- assert(c > c == false, 'c > c');
+ assert(!(c > a), 'c > a');
+ assert(!(c > b), 'c > b');
+ assert(!(c > c), 'c > c');
}
#[test]
diff --git a/src/numbers/fixed_point/implementations/fp16x16/math/erf.cairo b/src/numbers/fixed_point/implementations/fp16x16/math/erf.cairo
index 86f87f5ca..4561e5b78 100644
--- a/src/numbers/fixed_point/implementations/fp16x16/math/erf.cairo
+++ b/src/numbers/fixed_point/implementations/fp16x16/math/erf.cairo
@@ -1,4 +1,3 @@
-use core::traits::Into;
use orion::numbers::fixed_point::implementations::fp16x16::core::{ONE, FP16x16, FixedTrait};
use orion::numbers::fixed_point::implementations::fp16x16::math::lut::erf_lut;
@@ -20,5 +19,6 @@ fn erf(x: FP16x16) -> FP16x16 {
} else {
erf_value = ONE;
}
+
FP16x16 { mag: erf_value, sign: x.sign }
}
diff --git a/src/numbers/fixed_point/implementations/fp16x16/math/hyp.cairo b/src/numbers/fixed_point/implementations/fp16x16/math/hyp.cairo
index 78d0cdac2..b77271087 100644
--- a/src/numbers/fixed_point/implementations/fp16x16/math/hyp.cairo
+++ b/src/numbers/fixed_point/implementations/fp16x16/math/hyp.cairo
@@ -1,4 +1,3 @@
-use core::debug::PrintTrait;
use orion::numbers::fixed_point::implementations::fp16x16::core::{
HALF, ONE, TWO, FP16x16, FP16x16Impl, FP16x16Add, FP16x16AddEq, FP16x16Sub, FP16x16Mul,
FP16x16MulEq, FP16x16TryIntoU128, FP16x16PartialEq, FP16x16PartialOrd, FP16x16SubEq, FP16x16Neg,
@@ -8,53 +7,55 @@ use orion::numbers::fixed_point::implementations::fp16x16::core::{
// Calculates hyperbolic cosine of a (fixed point)
fn cosh(a: FP16x16) -> FP16x16 {
let ea = a.exp();
- return (ea + (FixedTrait::ONE() / ea)) / FixedTrait::new(TWO, false);
+
+ (ea + (FixedTrait::ONE() / ea)) / FixedTrait::new(TWO, false)
}
// Calculates hyperbolic sine of a (fixed point)
fn sinh(a: FP16x16) -> FP16x16 {
let ea = a.exp();
- return (ea - (FixedTrait::ONE() / ea)) / FixedTrait::new(TWO, false);
+
+ (ea - (FixedTrait::ONE() / ea)) / FixedTrait::new(TWO, false)
}
// Calculates hyperbolic tangent of a (fixed point)
fn tanh(a: FP16x16) -> FP16x16 {
let ea = a.exp();
let ea_i = FixedTrait::ONE() / ea;
- return (ea - ea_i) / (ea + ea_i);
+
+ (ea - ea_i) / (ea + ea_i)
}
// Calculates inverse hyperbolic cosine of a (fixed point)
fn acosh(a: FP16x16) -> FP16x16 {
let root = (a * a - FixedTrait::ONE()).sqrt();
- return (a + root).ln();
+
+ (a + root).ln()
}
// Calculates inverse hyperbolic sine of a (fixed point)
fn asinh(a: FP16x16) -> FP16x16 {
let root = (a * a + FixedTrait::ONE()).sqrt();
- return (a + root).ln();
+
+ (a + root).ln()
}
// Calculates inverse hyperbolic tangent of a (fixed point)
fn atanh(a: FP16x16) -> FP16x16 {
let one = FixedTrait::ONE();
let ln_arg = (one + a) / (one - a);
- return ln_arg.ln() / FixedTrait::new(TWO, false);
+
+ ln_arg.ln() / FixedTrait::new(TWO, false)
}
// Tests --------------------------------------------------------------------------------------------------------------
#[cfg(test)]
mod tests {
- use core::option::OptionTrait;
- use core::traits::Into;
-
use orion::numbers::fixed_point::implementations::fp16x16::helpers::assert_precise;
use super::{FixedTrait, TWO, cosh, ONE, sinh, tanh, acosh, asinh, atanh, HALF};
-
#[test]
#[available_gas(10000000)]
fn test_cosh() {
diff --git a/src/numbers/fixed_point/implementations/fp16x16/math/lut.cairo b/src/numbers/fixed_point/implementations/fp16x16/math/lut.cairo
index 65c9746c1..723ac975f 100644
--- a/src/numbers/fixed_point/implementations/fp16x16/math/lut.cairo
+++ b/src/numbers/fixed_point/implementations/fp16x16/math/lut.cairo
@@ -54,7 +54,7 @@ fn msb(whole: u32) -> (u32, u32) {
}
}
- return (16, 65536);
+ (16, 65536)
}
fn exp2(exp: u32) -> u32 {
@@ -112,7 +112,7 @@ fn exp2(exp: u32) -> u32 {
}
}
- return 65536;
+ 65536
}
fn sin(a: u32) -> (u32, u32, u32) {
@@ -929,7 +929,7 @@ fn sin(a: u32) -> (u32, u32, u32) {
}
}
- return (102542, 65535, 65536);
+ (102542, 65535, 65536)
}
fn atan(a: u32) -> (u32, u32, u32) {
@@ -1233,7 +1233,7 @@ fn atan(a: u32) -> (u32, u32, u32) {
return (44958, 39405, 39716);
}
- return (45416, 39716, 40025);
+ (45416, 39716, 40025)
}
fn erf_lut(x: u32) -> u32 {
@@ -1925,5 +1925,6 @@ fn erf_lut(x: u32) -> u32 {
return 65535;
}
}
- return ONE;
+
+ ONE
}
diff --git a/src/numbers/fixed_point/implementations/fp16x16/math/trig.cairo b/src/numbers/fixed_point/implementations/fp16x16/math/trig.cairo
index 8b0d9b47f..7c4ad199c 100644
--- a/src/numbers/fixed_point/implementations/fp16x16/math/trig.cairo
+++ b/src/numbers/fixed_point/implementations/fp16x16/math/trig.cairo
@@ -1,6 +1,4 @@
-use core::debug::PrintTrait;
-use core::integer::{u32_safe_divmod, u32_as_non_zero};
-use core::option::OptionTrait;
+use core::integer;
use orion::numbers::fixed_point::implementations::fp16x16::math::lut;
use orion::numbers::fixed_point::implementations::fp16x16::core::{
@@ -9,7 +7,6 @@ use orion::numbers::fixed_point::implementations::fp16x16::core::{
};
// CONSTANTS
-
const TWO_PI: u32 = 411775;
const PI: u32 = 205887;
const HALF_PI: u32 = 102944;
@@ -22,10 +19,10 @@ fn acos(a: FP16x16) -> FP16x16 {
let asin_arg = (FixedTrait::ONE() - a * a).sqrt(); // will fail if a > 1
let asin_res = asin(asin_arg);
- if (a.sign) {
- return FixedTrait::new(PI, false) - asin_res;
+ if a.sign {
+ FixedTrait::new(PI, false) - asin_res
} else {
- return asin_res;
+ asin_res
}
}
@@ -33,10 +30,10 @@ fn acos_fast(a: FP16x16) -> FP16x16 {
let asin_arg = (FixedTrait::ONE() - a * a).sqrt(); // will fail if a > 1
let asin_res = asin_fast(asin_arg);
- if (a.sign) {
- return FixedTrait::new(PI, false) - asin_res;
+ if a.sign {
+ FixedTrait::new(PI, false) - asin_res
} else {
- return asin_res;
+ asin_res
}
}
@@ -48,7 +45,8 @@ fn asin(a: FP16x16) -> FP16x16 {
}
let div = (FixedTrait::ONE() - a * a).sqrt(); // will fail if a > 1
- return atan(a / div);
+
+ atan(a / div)
}
fn asin_fast(a: FP16x16) -> FP16x16 {
@@ -57,7 +55,8 @@ fn asin_fast(a: FP16x16) -> FP16x16 {
}
let div = (FixedTrait::ONE() - a * a).sqrt(); // will fail if a > 1
- return atan_fast(a / div);
+
+ atan_fast(a / div)
}
// Calculates arctan(a) (fixed point)
@@ -100,10 +99,9 @@ fn atan(a: FP16x16) -> FP16x16 {
res = res - FixedTrait::new(HALF_PI, false);
}
- return FixedTrait::new(res.mag, a.sign);
+ FixedTrait::new(res.mag, a.sign)
}
-
fn atan_fast(a: FP16x16) -> FP16x16 {
let mut at = a.abs();
let mut shift = false;
@@ -135,31 +133,32 @@ fn atan_fast(a: FP16x16) -> FP16x16 {
res = res - FixedTrait::::new(HALF_PI, false);
}
- return FixedTrait::new(res.mag, a.sign);
+ FixedTrait::new(res.mag, a.sign)
}
// Calculates cos(a) with a in radians (fixed point)
fn cos(a: FP16x16) -> FP16x16 {
- return sin(FixedTrait::new(HALF_PI, false) - a);
+ sin(FixedTrait::new(HALF_PI, false) - a)
}
fn cos_fast(a: FP16x16) -> FP16x16 {
- return sin_fast(FixedTrait::new(HALF_PI, false) - a);
+ sin_fast(FixedTrait::new(HALF_PI, false) - a)
}
fn sin(a: FP16x16) -> FP16x16 {
let a1 = a.mag % TWO_PI;
- let (whole_rem, partial_rem) = u32_safe_divmod(a1, u32_as_non_zero(PI));
+ let (whole_rem, partial_rem) = integer::u32_safe_divmod(a1, integer::u32_as_non_zero(PI));
let a2 = FixedTrait::new(partial_rem, false);
let partial_sign = whole_rem == 1;
let loop_res = a2 * _sin_loop(a2, 7, FixedTrait::ONE());
- return FixedTrait::new(loop_res.mag, a.sign ^ partial_sign && loop_res.mag != 0);
+
+ FixedTrait::new(loop_res.mag, a.sign ^ partial_sign && loop_res.mag != 0)
}
fn sin_fast(a: FP16x16) -> FP16x16 {
let a1 = a.mag % TWO_PI;
- let (whole_rem, mut partial_rem) = u32_safe_divmod(a1, u32_as_non_zero(PI));
+ let (whole_rem, mut partial_rem) = integer::u32_safe_divmod(a1, integer::u32_as_non_zero(PI));
let partial_sign = whole_rem == 1;
if partial_rem >= HALF_PI {
@@ -171,7 +170,7 @@ fn sin_fast(a: FP16x16) -> FP16x16 {
let res = partial_step * (FixedTrait::new(high, false) - FixedTrait::new(low, false))
+ FixedTrait::::new(low, false);
- return FixedTrait::new(res.mag, a.sign ^ partial_sign && res.mag != 0);
+ FixedTrait::new(res.mag, a.sign ^ partial_sign && res.mag != 0)
}
// Calculates tan(a) with a in radians (fixed point)
@@ -179,14 +178,16 @@ fn tan(a: FP16x16) -> FP16x16 {
let sinx = sin(a);
let cosx = cos(a);
assert(cosx.mag != 0, 'tan undefined');
- return sinx / cosx;
+
+ sinx / cosx
}
fn tan_fast(a: FP16x16) -> FP16x16 {
let sinx = sin_fast(a);
let cosx = cos_fast(a);
assert(cosx.mag != 0, 'tan undefined');
- return sinx / cosx;
+
+ sinx / cosx
}
// Helper function to calculate Taylor series for sin
@@ -199,15 +200,13 @@ fn _sin_loop(a: FP16x16, i: u32, acc: FP16x16) -> FP16x16 {
return new_acc;
}
- return _sin_loop(a, i - 1, new_acc);
+ _sin_loop(a, i - 1, new_acc)
}
// Tests --------------------------------------------------------------------------------------------------------------
#[cfg(test)]
mod tests {
- use core::traits::Into;
-
use orion::numbers::fixed_point::implementations::fp16x16::helpers::{
assert_precise, assert_relative
};
diff --git a/src/numbers/fixed_point/implementations/fp16x16wide/core.cairo b/src/numbers/fixed_point/implementations/fp16x16wide/core.cairo
index 9c97cce46..0a6c4795e 100644
--- a/src/numbers/fixed_point/implementations/fp16x16wide/core.cairo
+++ b/src/numbers/fixed_point/implementations/fp16x16wide/core.cairo
@@ -1,9 +1,5 @@
use core::debug::PrintTrait;
-use core::option::OptionTrait;
-use core::result::{ResultTrait, ResultTraitImpl};
-use core::traits::{TryInto, Into};
-
use orion::numbers::{fixed_point::core::FixedTrait, FP16x16};
use orion::numbers::fixed_point::implementations::fp16x16wide::math::{
core as core_math, trig, hyp, erf
@@ -18,178 +14,177 @@ struct FP16x16W {
}
// CONSTANTS
-
const TWO: u64 = 131072; // 2 ** 17
const ONE: u64 = 65536; // 2 ** 16
const HALF: u64 = 32768; // 2 ** 15
const MAX: u64 = 2147483648; // 2 ** 31
-
impl FP16x16WImpl of FixedTrait {
fn ZERO() -> FP16x16W {
- return FP16x16W { mag: 0, sign: false };
+ FP16x16W { mag: 0, sign: false }
}
fn HALF() -> FP16x16W {
- return FP16x16W { mag: HALF, sign: false };
+ FP16x16W { mag: HALF, sign: false }
}
fn ONE() -> FP16x16W {
- return FP16x16W { mag: ONE, sign: false };
+ FP16x16W { mag: ONE, sign: false }
}
fn MAX() -> FP16x16W {
- return FP16x16W { mag: MAX, sign: false };
+ FP16x16W { mag: MAX, sign: false }
}
fn new(mag: u64, sign: bool) -> FP16x16W {
- return FP16x16W { mag: mag, sign: sign };
+ FP16x16W { mag: mag, sign: sign }
}
fn new_unscaled(mag: u64, sign: bool) -> FP16x16W {
- return FP16x16W { mag: mag * ONE, sign: sign };
+ FP16x16W { mag: mag * ONE, sign: sign }
}
fn from_felt(val: felt252) -> FP16x16W {
let mag = core::integer::u64_try_from_felt252(utils::felt_abs(val)).unwrap();
- return FixedTrait::new(mag, utils::felt_sign(val));
+
+ FixedTrait::new(mag, utils::felt_sign(val))
}
fn abs(self: FP16x16W) -> FP16x16W {
- return core_math::abs(self);
+ core_math::abs(self)
}
fn acos(self: FP16x16W) -> FP16x16W {
- return trig::acos_fast(self);
+ trig::acos_fast(self)
}
fn acos_fast(self: FP16x16W) -> FP16x16W {
- return trig::acos_fast(self);
+ trig::acos_fast(self)
}
fn acosh(self: FP16x16W) -> FP16x16W {
- return hyp::acosh(self);
+ hyp::acosh(self)
}
fn asin(self: FP16x16W) -> FP16x16W {
- return trig::asin_fast(self);
+ trig::asin_fast(self)
}
fn asin_fast(self: FP16x16W) -> FP16x16W {
- return trig::asin_fast(self);
+ trig::asin_fast(self)
}
fn asinh(self: FP16x16W) -> FP16x16W {
- return hyp::asinh(self);
+ hyp::asinh(self)
}
fn atan(self: FP16x16W) -> FP16x16W {
- return trig::atan_fast(self);
+ trig::atan_fast(self)
}
fn atan_fast(self: FP16x16W) -> FP16x16W {
- return trig::atan_fast(self);
+ trig::atan_fast(self)
}
fn atanh(self: FP16x16W) -> FP16x16W {
- return hyp::atanh(self);
+ hyp::atanh(self)
}
fn ceil(self: FP16x16W) -> FP16x16W {
- return core_math::ceil(self);
+ core_math::ceil(self)
}
fn cos(self: FP16x16W) -> FP16x16W {
- return trig::cos_fast(self);
+ trig::cos_fast(self)
}
fn cos_fast(self: FP16x16W) -> FP16x16W {
- return trig::cos_fast(self);
+ trig::cos_fast(self)
}
fn cosh(self: FP16x16W) -> FP16x16W {
- return hyp::cosh(self);
+ hyp::cosh(self)
}
fn floor(self: FP16x16W) -> FP16x16W {
- return core_math::floor(self);
+ core_math::floor(self)
}
// Calculates the natural exponent of x: e^x
fn exp(self: FP16x16W) -> FP16x16W {
- return core_math::exp(self);
+ core_math::exp(self)
}
// Calculates the binary exponent of x: 2^x
fn exp2(self: FP16x16W) -> FP16x16W {
- return core_math::exp2(self);
+ core_math::exp2(self)
}
// Calculates the natural logarithm of x: ln(x)
// self must be greater than zero
fn ln(self: FP16x16W) -> FP16x16W {
- return core_math::ln(self);
+ core_math::ln(self)
}
// Calculates the binary logarithm of x: log2(x)
// self must be greather than zero
fn log2(self: FP16x16W) -> FP16x16W {
- return core_math::log2(self);
+ core_math::log2(self)
}
// Calculates the base 10 log of x: log10(x)
// self must be greater than zero
fn log10(self: FP16x16W) -> FP16x16W {
- return core_math::log10(self);
+ core_math::log10(self)
}
// Calclates the value of x^y and checks for overflow before returning
// self is a fixed point value
// b is a fixed point value
fn pow(self: FP16x16W, b: FP16x16W) -> FP16x16W {
- return core_math::pow(self, b);
+ core_math::pow(self, b)
}
fn round(self: FP16x16W) -> FP16x16W {
- return core_math::round(self);
+ core_math::round(self)
}
fn sin(self: FP16x16W) -> FP16x16W {
- return trig::sin_fast(self);
+ trig::sin_fast(self)
}
fn sin_fast(self: FP16x16W) -> FP16x16W {
- return trig::sin_fast(self);
+ trig::sin_fast(self)
}
fn sinh(self: FP16x16W) -> FP16x16W {
- return hyp::sinh(self);
+ hyp::sinh(self)
}
// Calculates the square root of a fixed point value
// x must be positive
fn sqrt(self: FP16x16W) -> FP16x16W {
- return core_math::sqrt(self);
+ core_math::sqrt(self)
}
fn tan(self: FP16x16W) -> FP16x16W {
- return trig::tan_fast(self);
+ trig::tan_fast(self)
}
fn tan_fast(self: FP16x16W) -> FP16x16W {
- return trig::tan_fast(self);
+ trig::tan_fast(self)
}
fn tanh(self: FP16x16W) -> FP16x16W {
- return hyp::tanh(self);
+ hyp::tanh(self)
}
fn sign(self: FP16x16W) -> FP16x16W {
- return core_math::sign(self);
+ core_math::sign(self)
}
fn NaN() -> FP16x16W {
- return FP16x16W { mag: 0, sign: true };
+ FP16x16W { mag: 0, sign: true }
}
fn is_nan(self: FP16x16W) -> bool {
@@ -197,15 +192,15 @@ impl FP16x16WImpl of FixedTrait {
}
fn INF() -> FP16x16W {
- return FP16x16W { mag: 4294967295, sign: false };
+ FP16x16W { mag: 4294967295, sign: false }
}
fn POS_INF() -> FP16x16W {
- return FP16x16W { mag: 4294967295, sign: false };
+ FP16x16W { mag: 4294967295, sign: false }
}
fn NEG_INF() -> FP16x16W {
- return FP16x16W { mag: 4294967295, sign: true };
+ FP16x16W { mag: 4294967295, sign: true }
}
fn is_inf(self: FP16x16W) -> bool {
@@ -221,7 +216,7 @@ impl FP16x16WImpl of FixedTrait {
}
fn erf(self: FP16x16W) -> FP16x16W {
- return erf::erf(self);
+ erf::erf(self)
}
}
@@ -239,9 +234,9 @@ impl FP16x16WIntoFelt252 of Into {
let mag_felt = self.mag.into();
if self.sign {
- return mag_felt * -1;
+ mag_felt * -1
} else {
- return mag_felt * 1;
+ mag_felt * 1
}
}
}
@@ -277,10 +272,10 @@ impl FP16x16WTryIntoI8 of TryInto {
impl FP16x16WTryIntoU128 of TryInto {
fn try_into(self: FP16x16W) -> Option {
if self.sign {
- return Option::None(());
+ Option::None(())
} else {
// Unscale the magnitude and round down
- return Option::Some((self.mag / ONE).into());
+ Option::Some((self.mag / ONE).into())
}
}
}
@@ -288,10 +283,10 @@ impl FP16x16WTryIntoU128 of TryInto {
impl FP16x16WTryIntoU64 of TryInto {
fn try_into(self: FP16x16W) -> Option {
if self.sign {
- return Option::None(());
+ Option::None(())
} else {
// Unscale the magnitude and round down
- return Option::Some((self.mag / ONE).into());
+ Option::Some((self.mag / ONE).into())
}
}
}
@@ -299,10 +294,10 @@ impl FP16x16WTryIntoU64 of TryInto {
impl FP16x16WTryIntoU32 of TryInto {
fn try_into(self: FP16x16W) -> Option {
if self.sign {
- return Option::None(());
+ Option::None(())
} else {
// Unscale the magnitude and round down
- return (self.mag / ONE).try_into();
+ (self.mag / ONE).try_into()
}
}
}
@@ -313,7 +308,7 @@ impl FP16x16WTryIntoU16 of TryInto {
Option::None(())
} else {
// Unscale the magnitude and round down
- return (self.mag / ONE).try_into();
+ (self.mag / ONE).try_into()
}
}
}
@@ -324,7 +319,7 @@ impl FP16x16WTryIntoU8 of TryInto {
Option::None(())
} else {
// Unscale the magnitude and round down
- return (self.mag / ONE).try_into();
+ (self.mag / ONE).try_into()
}
}
}
@@ -332,18 +327,18 @@ impl FP16x16WTryIntoU8 of TryInto {
impl FP16x16WPartialEq of PartialEq {
#[inline(always)]
fn eq(lhs: @FP16x16W, rhs: @FP16x16W) -> bool {
- return core_math::eq(lhs, rhs);
+ core_math::eq(lhs, rhs)
}
#[inline(always)]
fn ne(lhs: @FP16x16W, rhs: @FP16x16W) -> bool {
- return core_math::ne(lhs, rhs);
+ core_math::ne(lhs, rhs)
}
}
impl FP16x16WAdd of Add {
fn add(lhs: FP16x16W, rhs: FP16x16W) -> FP16x16W {
- return core_math::add(lhs, rhs);
+ core_math::add(lhs, rhs)
}
}
@@ -356,7 +351,7 @@ impl FP16x16WAddEq of AddEq {
impl FP16x16WSub of Sub {
fn sub(lhs: FP16x16W, rhs: FP16x16W) -> FP16x16W {
- return core_math::sub(lhs, rhs);
+ core_math::sub(lhs, rhs)
}
}
@@ -369,7 +364,7 @@ impl FP16x16WSubEq of SubEq {
impl FP16x16WMul of Mul {
fn mul(lhs: FP16x16W, rhs: FP16x16W) -> FP16x16W {
- return core_math::mul(lhs, rhs);
+ core_math::mul(lhs, rhs)
}
}
@@ -382,7 +377,7 @@ impl FP16x16WMulEq of MulEq {
impl FP16x16WDiv of Div {
fn div(lhs: FP16x16W, rhs: FP16x16W) -> FP16x16W {
- return core_math::div(lhs, rhs);
+ core_math::div(lhs, rhs)
}
}
@@ -396,48 +391,47 @@ impl FP16x16WDivEq of DivEq {
impl FP16x16WPartialOrd of PartialOrd {
#[inline(always)]
fn ge(lhs: FP16x16W, rhs: FP16x16W) -> bool {
- return core_math::ge(lhs, rhs);
+ core_math::ge(lhs, rhs)
}
#[inline(always)]
fn gt(lhs: FP16x16W, rhs: FP16x16W) -> bool {
- return core_math::gt(lhs, rhs);
+ core_math::gt(lhs, rhs)
}
#[inline(always)]
fn le(lhs: FP16x16W, rhs: FP16x16W) -> bool {
- return core_math::le(lhs, rhs);
+ core_math::le(lhs, rhs)
}
#[inline(always)]
fn lt(lhs: FP16x16W, rhs: FP16x16W) -> bool {
- return core_math::lt(lhs, rhs);
+ core_math::lt(lhs, rhs)
}
}
impl FP16x16WNeg of Neg {
#[inline(always)]
fn neg(a: FP16x16W) -> FP16x16W {
- return core_math::neg(a);
+ core_math::neg(a)
}
}
impl FP16x16WRem of Rem {
#[inline(always)]
fn rem(lhs: FP16x16W, rhs: FP16x16W) -> FP16x16W {
- return core_math::rem(lhs, rhs);
+ core_math::rem(lhs, rhs)
}
}
-
/// INTERNAL
-
fn _i32_into_fp(x: FP16x16W) -> i32 {
let number_felt: felt252 = (x.mag / ONE).into();
let number_i32: i32 = number_felt.try_into().unwrap();
if x.sign {
return number_i32 * -1_i32;
}
+
number_i32
}
diff --git a/src/numbers/fixed_point/implementations/fp16x16wide/helpers.cairo b/src/numbers/fixed_point/implementations/fp16x16wide/helpers.cairo
index c9627852a..ea5f7cf65 100644
--- a/src/numbers/fixed_point/implementations/fp16x16wide/helpers.cairo
+++ b/src/numbers/fixed_point/implementations/fp16x16wide/helpers.cairo
@@ -1,5 +1,4 @@
use core::debug::PrintTrait;
-use core::traits::Into;
use orion::numbers::fixed_point::implementations::fp16x16wide::core::{
HALF, ONE, TWO, FP16x16W, FP16x16WImpl, FP16x16WSub, FP16x16WDiv, FixedTrait, FP16x16WPrint
diff --git a/src/numbers/fixed_point/implementations/fp16x16wide/math/comp.cairo b/src/numbers/fixed_point/implementations/fp16x16wide/math/comp.cairo
index 50f93edea..5573f7650 100644
--- a/src/numbers/fixed_point/implementations/fp16x16wide/math/comp.cairo
+++ b/src/numbers/fixed_point/implementations/fp16x16wide/math/comp.cairo
@@ -3,65 +3,65 @@ use orion::numbers::fixed_point::implementations::fp16x16wide::core::{
};
fn max(a: FP16x16W, b: FP16x16W) -> FP16x16W {
- if (a >= b) {
- return a;
+ if a >= b {
+ a
} else {
- return b;
+ b
}
}
fn min(a: FP16x16W, b: FP16x16W) -> FP16x16W {
- if (a <= b) {
- return a;
+ if a <= b {
+ a
} else {
- return b;
+ b
}
}
fn xor(a: FP16x16W, b: FP16x16W) -> bool {
if (a == FixedTrait::new(0, false) || b == FixedTrait::new(0, false)) && (a != b) {
- return true;
+ true
} else {
- return false;
+ false
}
}
fn or(a: FP16x16W, b: FP16x16W) -> bool {
let zero = FixedTrait::new(0, false);
if a == zero && b == zero {
- return false;
+ false
} else {
- return true;
+ true
}
}
fn and(a: FP16x16W, b: FP16x16W) -> bool {
let zero = FixedTrait::new(0, false);
if a == zero || b == zero {
- return false;
+ false
} else {
- return true;
+ true
}
}
fn where(a: FP16x16W, b: FP16x16W, c: FP16x16W) -> FP16x16W {
if a == FixedTrait::new(0, false) {
- return c;
+ c
} else {
- return b;
+ b
}
}
fn bitwise_and(a: FP16x16W, b: FP16x16W) -> FP16x16W {
- return FixedTrait::new(a.mag & b.mag, a.sign & b.sign);
+ FixedTrait::new(a.mag & b.mag, a.sign & b.sign)
}
fn bitwise_xor(a: FP16x16W, b: FP16x16W) -> FP16x16W {
- return FixedTrait::new(a.mag ^ b.mag, a.sign ^ b.sign);
+ FixedTrait::new(a.mag ^ b.mag, a.sign ^ b.sign)
}
fn bitwise_or(a: FP16x16W, b: FP16x16W) -> FP16x16W {
- return FixedTrait::new(a.mag | b.mag, a.sign | b.sign);
+ FixedTrait::new(a.mag | b.mag, a.sign | b.sign)
}
// Tests --------------------------------------------------------------------------------------------------------------
@@ -70,7 +70,6 @@ fn bitwise_or(a: FP16x16W, b: FP16x16W) -> FP16x16W {
mod tests {
use super::{FixedTrait, max, min, bitwise_and, bitwise_xor, bitwise_or};
-
#[test]
fn test_max() {
let a = FixedTrait::new_unscaled(1, false);
@@ -127,6 +126,7 @@ mod tests {
assert(bitwise_xor(a, b) == c, 'bitwise_xor(a,b)')
}
+ #[test]
fn test_bitwise_or() {
let a = FixedTrait::new(225280, false); // 3.4375
let b = FixedTrait::new(4160843776, true); // -2046.5625
diff --git a/src/numbers/fixed_point/implementations/fp16x16wide/math/core.cairo b/src/numbers/fixed_point/implementations/fp16x16wide/math/core.cairo
index 902a54b48..cafc20e4d 100644
--- a/src/numbers/fixed_point/implementations/fp16x16wide/math/core.cairo
+++ b/src/numbers/fixed_point/implementations/fp16x16wide/math/core.cairo
@@ -1,9 +1,4 @@
-use core::debug::PrintTrait;
-use core::option::OptionTrait;
-use core::result::{ResultTrait, ResultTraitImpl};
-use core::traits::{Into, TryInto};
use core::integer;
-use core::integer::{u64_safe_divmod, u64_as_non_zero, u64_wide_mul};
use orion::numbers::fixed_point::implementations::fp16x16wide::core::{
HALF, ONE, MAX, FP16x16W, FP16x16WImpl, FP16x16WAdd, FP16x16WAddEq, FP16x16WSub, FP16x16WMul,
@@ -13,9 +8,8 @@ use orion::numbers::fixed_point::implementations::fp16x16wide::core::{
use orion::numbers::fixed_point::implementations::fp16x16wide::math::lut;
// PUBLIC
-
fn abs(a: FP16x16W) -> FP16x16W {
- return FixedTrait::new(a.mag, false);
+ FixedTrait::new(a.mag, false)
}
fn add(a: FP16x16W, b: FP16x16W) -> FP16x16W {
@@ -28,23 +22,23 @@ fn add(a: FP16x16W, b: FP16x16W) -> FP16x16W {
}
if (a.mag > b.mag) {
- return FixedTrait::new(a.mag - b.mag, a.sign);
+ FixedTrait::new(a.mag - b.mag, a.sign)
} else {
- return FixedTrait::new(b.mag - a.mag, b.sign);
+ FixedTrait::new(b.mag - a.mag, b.sign)
}
}
fn ceil(a: FP16x16W) -> FP16x16W {
- let (div, rem) = u64_safe_divmod(a.mag, u64_as_non_zero(ONE));
+ let (div, rem) = integer::u64_safe_divmod(a.mag, integer::u64_as_non_zero(ONE));
if rem == 0 {
- return a;
+ a
} else if !a.sign {
- return FixedTrait::new_unscaled(div + 1, false);
+ FixedTrait::new_unscaled(div + 1, false)
} else if div == 0 {
- return FixedTrait::new_unscaled(0, false);
+ FixedTrait::new_unscaled(0, false)
} else {
- return FixedTrait::new_unscaled(div, true);
+ FixedTrait::new_unscaled(div, true)
}
}
@@ -53,16 +47,16 @@ fn div(a: FP16x16W, b: FP16x16W) -> FP16x16W {
let res_u64 = a_u64 / b.mag.into();
// Re-apply sign
- return FixedTrait::new(res_u64.try_into().unwrap(), a.sign ^ b.sign);
+ FixedTrait::new(res_u64.try_into().unwrap(), a.sign ^ b.sign)
}
fn eq(a: @FP16x16W, b: @FP16x16W) -> bool {
- return (*a.mag == *b.mag) && (*a.sign == *b.sign);
+ (*a.mag == *b.mag) && (*a.sign == *b.sign)
}
// Calculates the natural exponent of x: e^x
fn exp(a: FP16x16W) -> FP16x16W {
- return exp2(FixedTrait::new(94548, false) * a); // log2(e) * 2^23 β 12102203
+ exp2(FixedTrait::new(94548, false) * a) // log2(e) * 2^23 β 12102203
}
// Calculates the binary exponent of x: 2^x
@@ -71,7 +65,7 @@ fn exp2(a: FP16x16W) -> FP16x16W {
return FixedTrait::ONE();
}
- let (int_part, frac_part) = integer::u64_safe_divmod(a.mag, u64_as_non_zero(ONE));
+ let (int_part, frac_part) = integer::u64_safe_divmod(a.mag, integer::u64_as_non_zero(ONE));
let int_res = FixedTrait::new_unscaled(lut::exp2(int_part), false);
let mut res_u = int_res;
@@ -87,57 +81,57 @@ fn exp2(a: FP16x16W) -> FP16x16W {
res_u = res_u * (r1 + FixedTrait::ONE());
}
- if (a.sign == true) {
- return FixedTrait::ONE() / res_u;
+ if a.sign {
+ FixedTrait::ONE() / res_u
} else {
- return res_u;
+ res_u
}
}
fn exp2_int(exp: u64) -> FP16x16W {
- return FixedTrait::new_unscaled(lut::exp2(exp), false);
+ FixedTrait::new_unscaled(lut::exp2(exp), false)
}
fn floor(a: FP16x16W) -> FP16x16W {
- let (div, rem) = integer::u64_safe_divmod(a.mag, u64_as_non_zero(ONE));
+ let (div, rem) = integer::u64_safe_divmod(a.mag, integer::u64_as_non_zero(ONE));
if rem == 0 {
- return a;
+ a
} else if !a.sign {
- return FixedTrait::new_unscaled(div, false);
+ FixedTrait::new_unscaled(div, false)
} else {
- return FixedTrait::new_unscaled(div + 1, true);
+ FixedTrait::new_unscaled(div + 1, true)
}
}
fn ge(a: FP16x16W, b: FP16x16W) -> bool {
if a.sign != b.sign {
- return !a.sign;
+ !a.sign
} else {
- return (a.mag == b.mag) || ((a.mag > b.mag) ^ a.sign);
+ (a.mag == b.mag) || ((a.mag > b.mag) ^ a.sign)
}
}
fn gt(a: FP16x16W, b: FP16x16W) -> bool {
if a.sign != b.sign {
- return !a.sign;
+ !a.sign
} else {
- return (a.mag != b.mag) && ((a.mag > b.mag) ^ a.sign);
+ (a.mag != b.mag) && ((a.mag > b.mag) ^ a.sign)
}
}
fn le(a: FP16x16W, b: FP16x16W) -> bool {
if a.sign != b.sign {
- return a.sign;
+ a.sign
} else {
- return (a.mag == b.mag) || ((a.mag < b.mag) ^ a.sign);
+ (a.mag == b.mag) || ((a.mag < b.mag) ^ a.sign)
}
}
// Calculates the natural logarithm of x: ln(x)
// self must be greater than zero
fn ln(a: FP16x16W) -> FP16x16W {
- return FixedTrait::new(45426, false) * log2(a); // ln(2) = 0.693...
+ FixedTrait::new(45426, false) * log2(a) // ln(2) = 0.693...
}
// Calculates the binary logarithm of x: log2(x)
@@ -157,7 +151,7 @@ fn log2(a: FP16x16W) -> FP16x16W {
let (msb, div) = lut::msb(whole);
if a.mag == div * ONE {
- return FixedTrait::new_unscaled(msb, false);
+ FixedTrait::new_unscaled(msb, false)
} else {
let norm = a / FixedTrait::new_unscaled(div, false);
let r8 = FixedTrait::new(596, true) * norm;
@@ -168,21 +162,22 @@ fn log2(a: FP16x16W) -> FP16x16W {
let r3 = (r4 + FixedTrait::new(608566, false)) * norm;
let r2 = (r3 + FixedTrait::new(655828, true)) * norm;
let r1 = (r2 + FixedTrait::new(534433, false)) * norm;
- return r1 + FixedTrait::new(224487, true) + FixedTrait::new_unscaled(msb, false);
+
+ r1 + FixedTrait::new(224487, true) + FixedTrait::new_unscaled(msb, false)
}
}
// Calculates the base 10 log of x: log10(x)
// self must be greater than zero
fn log10(a: FP16x16W) -> FP16x16W {
- return FixedTrait::new(19728, false) * log2(a); // log10(2) = 0.301...
+ FixedTrait::new(19728, false) * log2(a) // log10(2) = 0.301...
}
fn lt(a: FP16x16W, b: FP16x16W) -> bool {
if a.sign != b.sign {
- return a.sign;
+ a.sign
} else {
- return (a.mag != b.mag) && ((a.mag < b.mag) ^ a.sign);
+ (a.mag != b.mag) && ((a.mag < b.mag) ^ a.sign)
}
}
@@ -190,20 +185,20 @@ fn mul(a: FP16x16W, b: FP16x16W) -> FP16x16W {
let prod_u128 = integer::u64_wide_mul(a.mag, b.mag);
// Re-apply sign
- return FixedTrait::new((prod_u128 / ONE.into()).try_into().unwrap(), a.sign ^ b.sign);
+ FixedTrait::new((prod_u128 / ONE.into()).try_into().unwrap(), a.sign ^ b.sign)
}
fn ne(a: @FP16x16W, b: @FP16x16W) -> bool {
- return (*a.mag != *b.mag) || (*a.sign != *b.sign);
+ (*a.mag != *b.mag) || (*a.sign != *b.sign)
}
fn neg(a: FP16x16W) -> FP16x16W {
if a.mag == 0 {
- return a;
+ a
} else if !a.sign {
- return FixedTrait::new(a.mag, !a.sign);
+ FixedTrait::new(a.mag, !a.sign)
} else {
- return FixedTrait::new(a.mag, false);
+ FixedTrait::new(a.mag, false)
}
}
@@ -211,7 +206,7 @@ fn neg(a: FP16x16W) -> FP16x16W {
// self is a FP16x16W point value
// b is a FP16x16W point value
fn pow(a: FP16x16W, b: FP16x16W) -> FP16x16W {
- let (_, rem) = integer::u64_safe_divmod(b.mag, u64_as_non_zero(ONE));
+ let (_, rem) = integer::u64_safe_divmod(b.mag, integer::u64_as_non_zero(ONE));
// use the more performant integer pow when y is an int
if (rem == 0) {
@@ -219,7 +214,7 @@ fn pow(a: FP16x16W, b: FP16x16W) -> FP16x16W {
}
// x^y = exp(y*ln(x)) for x > 0 will error for x < 0
- return exp(b * ln(a));
+ exp(b * ln(a))
}
// Calclates the value of a^b and checks for overflow before returning
@@ -227,7 +222,7 @@ fn pow_int(a: FP16x16W, b: u64, sign: bool) -> FP16x16W {
let mut x = a;
let mut n = b;
- if sign == true {
+ if sign {
x = FixedTrait::ONE() / x;
}
@@ -238,11 +233,7 @@ fn pow_int(a: FP16x16W, b: u64, sign: bool) -> FP16x16W {
let mut y = FixedTrait::ONE();
let two = integer::u64_as_non_zero(2);
- loop {
- if n <= 1 {
- break;
- }
-
+ while n > 1 {
let (div, rem) = integer::u64_safe_divmod(n, two);
if rem == 1 {
@@ -253,20 +244,20 @@ fn pow_int(a: FP16x16W, b: u64, sign: bool) -> FP16x16W {
n = div;
};
- return x * y;
+ x * y
}
fn rem(a: FP16x16W, b: FP16x16W) -> FP16x16W {
- return a - floor(a / b) * b;
+ a - floor(a / b) * b
}
fn round(a: FP16x16W) -> FP16x16W {
- let (div, rem) = integer::u64_safe_divmod(a.mag, u64_as_non_zero(ONE));
+ let (div, rem) = integer::u64_safe_divmod(a.mag, integer::u64_as_non_zero(ONE));
if (HALF <= rem) {
- return FixedTrait::new_unscaled(div + 1, a.sign);
+ FixedTrait::new_unscaled(div + 1, a.sign)
} else {
- return FixedTrait::new_unscaled(div, a.sign);
+ FixedTrait::new_unscaled(div, a.sign)
}
}
@@ -276,11 +267,12 @@ fn sqrt(a: FP16x16W) -> FP16x16W {
assert(a.sign == false, 'must be positive');
let root = integer::u64_sqrt(a.mag.into() * ONE.into());
- return FixedTrait::new(root.into(), false);
+
+ FixedTrait::new(root.into(), false)
}
fn sub(a: FP16x16W, b: FP16x16W) -> FP16x16W {
- return add(a, -b);
+ add(a, -b)
}
fn sign(a: FP16x16W) -> FP16x16W {
@@ -467,7 +459,7 @@ mod tests {
let a = FixedTrait::new_unscaled(42, false);
let b = FixedTrait::new_unscaled(42, false);
let c = eq(@a, @b);
- assert(c == true, 'invalid result');
+ assert(c, 'invalid result');
}
#[test]
@@ -475,7 +467,7 @@ mod tests {
let a = FixedTrait::new_unscaled(42, false);
let b = FixedTrait::new_unscaled(42, false);
let c = ne(@a, @b);
- assert(c == false, 'invalid result');
+ assert(!c, 'invalid result');
}
#[test]
@@ -549,12 +541,12 @@ mod tests {
let c = FixedTrait::::new_unscaled(1, true);
assert(a <= a, 'a <= a');
- assert(a <= b == false, 'a <= b');
- assert(a <= c == false, 'a <= c');
+ assert(!(a <= b), 'a <= b');
+ assert(!(a <= c), 'a <= c');
assert(b <= a, 'b <= a');
assert(b <= b, 'b <= b');
- assert(b <= c == false, 'b <= c');
+ assert(!(b <= c), 'b <= c');
assert(c <= a, 'c <= a');
assert(c <= b, 'c <= b');
@@ -567,17 +559,17 @@ mod tests {
let b = FixedTrait::new_unscaled(0, false);
let c = FixedTrait::::new_unscaled(1, true);
- assert(a < a == false, 'a < a');
- assert(a < b == false, 'a < b');
- assert(a < c == false, 'a < c');
+ assert(!(a < a), 'a < a');
+ assert(!(a < b), 'a < b');
+ assert(!(a < c), 'a < c');
assert(b < a, 'b < a');
- assert(b < b == false, 'b < b');
- assert(b < c == false, 'b < c');
+ assert(!(b < b), 'b < b');
+ assert(!(b < c), 'b < c');
assert(c < a, 'c < a');
assert(c < b, 'c < b');
- assert(c < c == false, 'c < c');
+ assert(!(c < c), 'c < c');
}
#[test]
@@ -590,12 +582,12 @@ mod tests {
assert(a >= b, 'a >= b');
assert(a >= c, 'a >= c');
- assert(b >= a == false, 'b >= a');
+ assert(!(b >= a), 'b >= a');
assert(b >= b, 'b >= b');
assert(b >= c, 'b >= c');
- assert(c >= a == false, 'c >= a');
- assert(c >= b == false, 'c >= b');
+ assert(!(c >= a), 'c >= a');
+ assert(!(c >= b), 'c >= b');
assert(c >= c, 'c >= c');
}
@@ -605,17 +597,17 @@ mod tests {
let b = FixedTrait::new_unscaled(0, false);
let c = FixedTrait::::new_unscaled(1, true);
- assert(a > a == false, 'a > a');
+ assert(!(a > a), 'a > a');
assert(a > b, 'a > b');
assert(a > c, 'a > c');
- assert(b > a == false, 'b > a');
- assert(b > b == false, 'b > b');
+ assert(!(b > a), 'b > a');
+ assert(!(b > b), 'b > b');
assert(b > c, 'b > c');
- assert(c > a == false, 'c > a');
- assert(c > b == false, 'c > b');
- assert(c > c == false, 'c > c');
+ assert(!(c > a), 'c > a');
+ assert(!(c > b), 'c > b');
+ assert(!(c > c), 'c > c');
}
#[test]
diff --git a/src/numbers/fixed_point/implementations/fp16x16wide/math/erf.cairo b/src/numbers/fixed_point/implementations/fp16x16wide/math/erf.cairo
index 49d19bf20..143b7dfe6 100644
--- a/src/numbers/fixed_point/implementations/fp16x16wide/math/erf.cairo
+++ b/src/numbers/fixed_point/implementations/fp16x16wide/math/erf.cairo
@@ -1,8 +1,6 @@
-use core::traits::Into;
use orion::numbers::fixed_point::implementations::fp16x16wide::core::{ONE, FP16x16W, FixedTrait};
use orion::numbers::fixed_point::implementations::fp16x16wide::math::lut::erf_lut;
-
const ERF_COMPUTATIONAL_ACCURACY: u64 = 100;
const ROUND_CHECK_NUMBER: u64 = 10;
// Values > MAX_ERF_NUMBER return 1
@@ -21,5 +19,6 @@ fn erf(x: FP16x16W) -> FP16x16W {
} else {
erf_value = ONE;
}
+
FP16x16W { mag: erf_value, sign: x.sign }
}
diff --git a/src/numbers/fixed_point/implementations/fp16x16wide/math/hyp.cairo b/src/numbers/fixed_point/implementations/fp16x16wide/math/hyp.cairo
index 527b6046d..e2ab580fb 100644
--- a/src/numbers/fixed_point/implementations/fp16x16wide/math/hyp.cairo
+++ b/src/numbers/fixed_point/implementations/fp16x16wide/math/hyp.cairo
@@ -1,4 +1,3 @@
-use core::debug::PrintTrait;
use orion::numbers::fixed_point::implementations::fp16x16wide::core::{
HALF, ONE, TWO, FP16x16W, FP16x16WImpl, FP16x16WAdd, FP16x16WAddEq, FP16x16WSub, FP16x16WMul,
FP16x16WMulEq, FP16x16WTryIntoU128, FP16x16WPartialEq, FP16x16WPartialOrd, FP16x16WSubEq,
@@ -8,53 +7,55 @@ use orion::numbers::fixed_point::implementations::fp16x16wide::core::{
// Calculates hyperbolic cosine of a (fixed point)
fn cosh(a: FP16x16W) -> FP16x16W {
let ea = a.exp();
- return (ea + (FixedTrait::ONE() / ea)) / FixedTrait::new(TWO, false);
+
+ (ea + (FixedTrait::ONE() / ea)) / FixedTrait::new(TWO, false)
}
// Calculates hyperbolic sine of a (fixed point)
fn sinh(a: FP16x16W) -> FP16x16W {
let ea = a.exp();
- return (ea - (FixedTrait::ONE() / ea)) / FixedTrait::new(TWO, false);
+
+ (ea - (FixedTrait::ONE() / ea)) / FixedTrait::new(TWO, false)
}
// Calculates hyperbolic tangent of a (fixed point)
fn tanh(a: FP16x16W) -> FP16x16W {
let ea = a.exp();
let ea_i = FixedTrait::ONE() / ea;
- return (ea - ea_i) / (ea + ea_i);
+
+ (ea - ea_i) / (ea + ea_i)
}
// Calculates inverse hyperbolic cosine of a (fixed point)
fn acosh(a: FP16x16W) -> FP16x16W {
let root = (a * a - FixedTrait::ONE()).sqrt();
- return (a + root).ln();
+
+ (a + root).ln()
}
// Calculates inverse hyperbolic sine of a (fixed point)
fn asinh(a: FP16x16W) -> FP16x16W {
let root = (a * a + FixedTrait::ONE()).sqrt();
- return (a + root).ln();
+
+ (a + root).ln()
}
// Calculates inverse hyperbolic tangent of a (fixed point)
fn atanh(a: FP16x16W) -> FP16x16W {
let one = FixedTrait::ONE();
let ln_arg = (one + a) / (one - a);
- return ln_arg.ln() / FixedTrait::new(TWO, false);
+
+ ln_arg.ln() / FixedTrait::new(TWO, false)
}
// Tests --------------------------------------------------------------------------------------------------------------
#[cfg(test)]
mod tests {
- use core::option::OptionTrait;
- use core::traits::Into;
-
use orion::numbers::fixed_point::implementations::fp16x16wide::helpers::assert_precise;
use super::{FixedTrait, TWO, cosh, ONE, sinh, tanh, acosh, asinh, atanh, HALF};
-
#[test]
#[available_gas(10000000)]
fn test_cosh() {
diff --git a/src/numbers/fixed_point/implementations/fp16x16wide/math/lut.cairo b/src/numbers/fixed_point/implementations/fp16x16wide/math/lut.cairo
index 62c58537e..f40f4d15a 100644
--- a/src/numbers/fixed_point/implementations/fp16x16wide/math/lut.cairo
+++ b/src/numbers/fixed_point/implementations/fp16x16wide/math/lut.cairo
@@ -54,7 +54,7 @@ fn msb(whole: u64) -> (u64, u64) {
}
}
- return (16, 65536);
+ (16, 65536)
}
fn exp2(exp: u64) -> u64 {
@@ -112,7 +112,7 @@ fn exp2(exp: u64) -> u64 {
}
}
- return 65536;
+ 65536
}
fn sin(a: u64) -> (u64, u64, u64) {
@@ -929,7 +929,7 @@ fn sin(a: u64) -> (u64, u64, u64) {
}
}
- return (102542, 65535, 65536);
+ (102542, 65535, 65536)
}
fn atan(a: u64) -> (u64, u64, u64) {
@@ -1233,7 +1233,7 @@ fn atan(a: u64) -> (u64, u64, u64) {
return (44958, 39405, 39716);
}
- return (45416, 39716, 40025);
+ (45416, 39716, 40025)
}
fn erf_lut(x: u64) -> u64 {
@@ -1925,5 +1925,6 @@ fn erf_lut(x: u64) -> u64 {
return 65535;
}
}
- return ONE;
+
+ ONE
}
diff --git a/src/numbers/fixed_point/implementations/fp16x16wide/math/trig.cairo b/src/numbers/fixed_point/implementations/fp16x16wide/math/trig.cairo
index 3c22fd97f..441248cf8 100644
--- a/src/numbers/fixed_point/implementations/fp16x16wide/math/trig.cairo
+++ b/src/numbers/fixed_point/implementations/fp16x16wide/math/trig.cairo
@@ -1,6 +1,4 @@
-use core::debug::PrintTrait;
-use core::integer::{u64_safe_divmod, u64_as_non_zero};
-use core::option::OptionTrait;
+use core::integer;
use orion::numbers::fixed_point::implementations::fp16x16wide::math::lut;
use orion::numbers::fixed_point::implementations::fp16x16wide::core::{
@@ -9,7 +7,6 @@ use orion::numbers::fixed_point::implementations::fp16x16wide::core::{
};
// CONSTANTS
-
const TWO_PI: u64 = 411775;
const PI: u64 = 205887;
const HALF_PI: u64 = 102944;
@@ -22,10 +19,10 @@ fn acos(a: FP16x16W) -> FP16x16W {
let asin_arg = (FixedTrait::ONE() - a * a).sqrt(); // will fail if a > 1
let asin_res = asin(asin_arg);
- if (a.sign) {
- return FixedTrait::new(PI, false) - asin_res;
+ if a.sign {
+ FixedTrait::new(PI, false) - asin_res
} else {
- return asin_res;
+ asin_res
}
}
@@ -33,10 +30,10 @@ fn acos_fast(a: FP16x16W) -> FP16x16W {
let asin_arg = (FixedTrait::ONE() - a * a).sqrt(); // will fail if a > 1
let asin_res = asin_fast(asin_arg);
- if (a.sign) {
- return FixedTrait::new(PI, false) - asin_res;
+ if a.sign {
+ FixedTrait::new(PI, false) - asin_res
} else {
- return asin_res;
+ asin_res
}
}
@@ -48,7 +45,8 @@ fn asin(a: FP16x16W) -> FP16x16W {
}
let div = (FixedTrait::ONE() - a * a).sqrt(); // will fail if a > 1
- return atan(a / div);
+
+ atan(a / div)
}
fn asin_fast(a: FP16x16W) -> FP16x16W {
@@ -57,7 +55,8 @@ fn asin_fast(a: FP16x16W) -> FP16x16W {
}
let div = (FixedTrait::ONE() - a * a).sqrt(); // will fail if a > 1
- return atan_fast(a / div);
+
+ atan_fast(a / div)
}
// Calculates arctan(a) (fixed point)
@@ -100,10 +99,9 @@ fn atan(a: FP16x16W) -> FP16x16W {
res = res - FixedTrait::new(HALF_PI, false);
}
- return FixedTrait::new(res.mag, a.sign);
+ FixedTrait::new(res.mag, a.sign)
}
-
fn atan_fast(a: FP16x16W) -> FP16x16W {
let mut at = a.abs();
let mut shift = false;
@@ -135,31 +133,32 @@ fn atan_fast(a: FP16x16W) -> FP16x16W {
res = res - FixedTrait::::new(HALF_PI, false);
}
- return FixedTrait::new(res.mag, a.sign);
+ FixedTrait::new(res.mag, a.sign)
}
// Calculates cos(a) with a in radians (fixed point)
fn cos(a: FP16x16W) -> FP16x16W {
- return sin(FixedTrait::new(HALF_PI, false) - a);
+ sin(FixedTrait::new(HALF_PI, false) - a)
}
fn cos_fast(a: FP16x16W) -> FP16x16W {
- return sin_fast(FixedTrait::new(HALF_PI, false) - a);
+ sin_fast(FixedTrait::new(HALF_PI, false) - a)
}
fn sin(a: FP16x16W) -> FP16x16W {
let a1 = a.mag % TWO_PI;
- let (whole_rem, partial_rem) = u64_safe_divmod(a1, u64_as_non_zero(PI));
+ let (whole_rem, partial_rem) = integer::u64_safe_divmod(a1, integer::u64_as_non_zero(PI));
let a2 = FixedTrait::new(partial_rem, false);
let partial_sign = whole_rem == 1;
let loop_res = a2 * _sin_loop(a2, 7, FixedTrait::ONE());
- return FixedTrait::new(loop_res.mag, a.sign ^ partial_sign && loop_res.mag != 0);
+
+ FixedTrait::new(loop_res.mag, a.sign ^ partial_sign && loop_res.mag != 0)
}
fn sin_fast(a: FP16x16W) -> FP16x16W {
let a1 = a.mag % TWO_PI;
- let (whole_rem, mut partial_rem) = u64_safe_divmod(a1, u64_as_non_zero(PI));
+ let (whole_rem, mut partial_rem) = integer::u64_safe_divmod(a1, integer::u64_as_non_zero(PI));
let partial_sign = whole_rem == 1;
if partial_rem >= HALF_PI {
@@ -171,7 +170,7 @@ fn sin_fast(a: FP16x16W) -> FP16x16W {
let res = partial_step * (FixedTrait::new(high, false) - FixedTrait::new(low, false))
+ FixedTrait::::new(low, false);
- return FixedTrait::new(res.mag, a.sign ^ partial_sign && res.mag != 0);
+ FixedTrait::new(res.mag, a.sign ^ partial_sign && res.mag != 0)
}
// Calculates tan(a) with a in radians (fixed point)
@@ -179,14 +178,16 @@ fn tan(a: FP16x16W) -> FP16x16W {
let sinx = sin(a);
let cosx = cos(a);
assert(cosx.mag != 0, 'tan undefined');
- return sinx / cosx;
+
+ sinx / cosx
}
fn tan_fast(a: FP16x16W) -> FP16x16W {
let sinx = sin_fast(a);
let cosx = cos_fast(a);
assert(cosx.mag != 0, 'tan undefined');
- return sinx / cosx;
+
+ sinx / cosx
}
// Helper function to calculate Taylor series for sin
@@ -199,15 +200,13 @@ fn _sin_loop(a: FP16x16W, i: u64, acc: FP16x16W) -> FP16x16W {
return new_acc;
}
- return _sin_loop(a, i - 1, new_acc);
+ _sin_loop(a, i - 1, new_acc)
}
// Tests --------------------------------------------------------------------------------------------------------------
#[cfg(test)]
mod tests {
- use core::traits::Into;
-
use orion::numbers::fixed_point::implementations::fp16x16wide::helpers::{
assert_precise, assert_relative
};
diff --git a/src/numbers/fixed_point/implementations/fp32x32/comp.cairo b/src/numbers/fixed_point/implementations/fp32x32/comp.cairo
index 14bcf69c8..ec1043f89 100644
--- a/src/numbers/fixed_point/implementations/fp32x32/comp.cairo
+++ b/src/numbers/fixed_point/implementations/fp32x32/comp.cairo
@@ -1,48 +1,47 @@
-use orion::numbers::{FP32x32, FixedTrait};
-use orion::numbers::FP32x32Impl;
+use orion::numbers::{FP32x32, FP32x32Impl, FixedTrait};
fn xor(a: FP32x32, b: FP32x32) -> bool {
if (a == FixedTrait::new(0, false) || b == FixedTrait::new(0, false)) && (a != b) {
- return true;
+ true
} else {
- return false;
+ false
}
}
fn or(a: FP32x32, b: FP32x32) -> bool {
let zero = FixedTrait::new(0, false);
if a == zero && b == zero {
- return false;
+ false
} else {
- return true;
+ true
}
}
fn and(a: FP32x32, b: FP32x32) -> bool {
let zero = FixedTrait::new(0, false);
if a == zero || b == zero {
- return false;
+ false
} else {
- return true;
+ true
}
}
fn where(a: FP32x32, b: FP32x32, c: FP32x32) -> FP32x32 {
if a == FixedTrait::new(0, false) {
- return c;
+ c
} else {
- return b;
+ b
}
}
fn bitwise_and(a: FP32x32, b: FP32x32) -> FP32x32 {
- return FixedTrait::new(a.mag & b.mag, a.sign & b.sign);
+ FixedTrait::new(a.mag & b.mag, a.sign & b.sign)
}
fn bitwise_xor(a: FP32x32, b: FP32x32) -> FP32x32 {
- return FixedTrait::new(a.mag ^ b.mag, a.sign ^ b.sign);
+ FixedTrait::new(a.mag ^ b.mag, a.sign ^ b.sign)
}
fn bitwise_or(a: FP32x32, b: FP32x32) -> FP32x32 {
- return FixedTrait::new(a.mag | b.mag, a.sign | b.sign);
+ FixedTrait::new(a.mag | b.mag, a.sign | b.sign)
}
diff --git a/src/numbers/fixed_point/implementations/fp32x32/core.cairo b/src/numbers/fixed_point/implementations/fp32x32/core.cairo
index e7fd8e24d..ee38799da 100644
--- a/src/numbers/fixed_point/implementations/fp32x32/core.cairo
+++ b/src/numbers/fixed_point/implementations/fp32x32/core.cairo
@@ -1,177 +1,174 @@
use core::debug::PrintTrait;
-use core::option::OptionTrait;
-use core::result::{ResultTrait, ResultTraitImpl};
-use core::traits::{TryInto, Into};
-
use cubit::f64 as fp32x32;
use cubit::f64::Fixed as FP32x32;
use cubit::f64::{ONE, HALF};
use cubit::f64::types::fixed;
-use orion::numbers::fixed_point::implementations::fp32x32::erf;
use orion::numbers::fixed_point::core::{FixedTrait};
+use orion::numbers::fixed_point::implementations::fp32x32::erf;
use orion::numbers::fixed_point::utils;
const MAX: u64 = 9223372036854775808;
impl FP32x32Impl of FixedTrait {
fn ZERO() -> FP32x32 {
- return FP32x32 { mag: 0, sign: false };
+ FP32x32 { mag: 0, sign: false }
}
fn HALF() -> FP32x32 {
- return FP32x32 { mag: HALF, sign: false };
+ FP32x32 { mag: HALF, sign: false }
}
fn ONE() -> FP32x32 {
- return FP32x32 { mag: ONE, sign: false };
+ FP32x32 { mag: ONE, sign: false }
}
fn MAX() -> FP32x32 {
- return FP32x32 { mag: MAX, sign: false };
+ FP32x32 { mag: MAX, sign: false }
}
fn new(mag: u64, sign: bool) -> FP32x32 {
- return FP32x32 { mag: mag, sign: sign };
+ FP32x32 { mag: mag, sign: sign }
}
fn new_unscaled(mag: u64, sign: bool) -> FP32x32 {
- return FP32x32 { mag: mag * ONE, sign: sign };
+ FP32x32 { mag: mag * ONE, sign: sign }
}
fn from_felt(val: felt252) -> FP32x32 {
let mag = core::integer::u64_try_from_felt252(utils::felt_abs(val)).unwrap();
- return FixedTrait::new(mag, utils::felt_sign(val));
+
+ FixedTrait::new(mag, utils::felt_sign(val))
}
fn abs(self: FP32x32) -> FP32x32 {
- return fp32x32::ops::abs(self);
+ fp32x32::ops::abs(self)
}
fn acos(self: FP32x32) -> FP32x32 {
- return fp32x32::trig::acos_fast(self);
+ fp32x32::trig::acos_fast(self)
}
fn acos_fast(self: FP32x32) -> FP32x32 {
- return fp32x32::trig::acos_fast(self);
+ fp32x32::trig::acos_fast(self)
}
fn acosh(self: FP32x32) -> FP32x32 {
- return fp32x32::hyp::acosh(self);
+ fp32x32::hyp::acosh(self)
}
fn asin(self: FP32x32) -> FP32x32 {
- return fp32x32::trig::asin_fast(self);
+ fp32x32::trig::asin_fast(self)
}
fn asin_fast(self: FP32x32) -> FP32x32 {
- return fp32x32::trig::asin_fast(self);
+ fp32x32::trig::asin_fast(self)
}
fn asinh(self: FP32x32) -> FP32x32 {
- return fp32x32::hyp::asinh(self);
+ fp32x32::hyp::asinh(self)
}
fn atan(self: FP32x32) -> FP32x32 {
- return fp32x32::trig::atan_fast(self);
+ fp32x32::trig::atan_fast(self)
}
fn atan_fast(self: FP32x32) -> FP32x32 {
- return fp32x32::trig::atan_fast(self);
+ fp32x32::trig::atan_fast(self)
}
fn atanh(self: FP32x32) -> FP32x32 {
- return fp32x32::hyp::atanh(self);
+ fp32x32::hyp::atanh(self)
}
fn ceil(self: FP32x32) -> FP32x32 {
- return fp32x32::ops::ceil(self);
+ fp32x32::ops::ceil(self)
}
fn cos(self: FP32x32) -> FP32x32 {
- return fp32x32::trig::cos_fast(self);
+ fp32x32::trig::cos_fast(self)
}
fn cos_fast(self: FP32x32) -> FP32x32 {
- return fp32x32::trig::cos_fast(self);
+ fp32x32::trig::cos_fast(self)
}
fn cosh(self: FP32x32) -> FP32x32 {
- return fp32x32::hyp::cosh(self);
+ fp32x32::hyp::cosh(self)
}
fn floor(self: FP32x32) -> FP32x32 {
- return fp32x32::ops::floor(self);
+ fp32x32::ops::floor(self)
}
// Calculates the natural exponent of x: e^x
fn exp(self: FP32x32) -> FP32x32 {
- return fp32x32::ops::exp(self);
+ fp32x32::ops::exp(self)
}
// Calculates the binary exponent of x: 2^x
fn exp2(self: FP32x32) -> FP32x32 {
- return fp32x32::ops::exp2(self);
+ fp32x32::ops::exp2(self)
}
// Calculates the natural logarithm of x: ln(x)
// self must be greater than zero
fn ln(self: FP32x32) -> FP32x32 {
- return fp32x32::ops::ln(self);
+ fp32x32::ops::ln(self)
}
// Calculates the binary logarithm of x: log2(x)
// self must be greather than zero
fn log2(self: FP32x32) -> FP32x32 {
- return fp32x32::ops::log2(self);
+ fp32x32::ops::log2(self)
}
// Calculates the base 10 log of x: log10(x)
// self must be greater than zero
fn log10(self: FP32x32) -> FP32x32 {
- return fp32x32::ops::log10(self);
+ fp32x32::ops::log10(self)
}
// Calclates the value of x^y and checks for overflow before returning
// self is a fixed point value
// b is a fixed point value
fn pow(self: FP32x32, b: FP32x32) -> FP32x32 {
- return fp32x32::ops::pow(self, b);
+ fp32x32::ops::pow(self, b)
}
fn round(self: FP32x32) -> FP32x32 {
- return fp32x32::ops::round(self);
+ fp32x32::ops::round(self)
}
fn sin(self: FP32x32) -> FP32x32 {
- return fp32x32::trig::sin_fast(self);
+ fp32x32::trig::sin_fast(self)
}
fn sin_fast(self: FP32x32) -> FP32x32 {
- return fp32x32::trig::sin_fast(self);
+ fp32x32::trig::sin_fast(self)
}
fn sinh(self: FP32x32) -> FP32x32 {
- return fp32x32::hyp::sinh(self);
+ fp32x32::hyp::sinh(self)
}
// Calculates the square root of a fixed point value
// x must be positive
fn sqrt(self: FP32x32) -> FP32x32 {
- return fp32x32::ops::sqrt(self);
+ fp32x32::ops::sqrt(self)
}
fn tan(self: FP32x32) -> FP32x32 {
- return fp32x32::trig::tan_fast(self);
+ fp32x32::trig::tan_fast(self)
}
fn tan_fast(self: FP32x32) -> FP32x32 {
- return fp32x32::trig::tan_fast(self);
+ fp32x32::trig::tan_fast(self)
}
fn tanh(self: FP32x32) -> FP32x32 {
- return fp32x32::hyp::tanh(self);
+ fp32x32::hyp::tanh(self)
}
fn sign(self: FP32x32) -> FP32x32 {
@@ -179,7 +176,7 @@ impl FP32x32Impl of FixedTrait {
}
fn NaN() -> FP32x32 {
- return FP32x32 { mag: 0, sign: true };
+ FP32x32 { mag: 0, sign: true }
}
fn is_nan(self: FP32x32) -> bool {
@@ -187,15 +184,15 @@ impl FP32x32Impl of FixedTrait {
}
fn INF() -> FP32x32 {
- return FP32x32 { mag: 4294967295, sign: false };
+ FP32x32 { mag: 4294967295, sign: false }
}
fn POS_INF() -> FP32x32 {
- return FP32x32 { mag: 4294967295, sign: false };
+ FP32x32 { mag: 4294967295, sign: false }
}
fn NEG_INF() -> FP32x32 {
- return FP32x32 { mag: 4294967295, sign: true };
+ FP32x32 { mag: 4294967295, sign: true }
}
fn is_inf(self: FP32x32) -> bool {
@@ -211,11 +208,10 @@ impl FP32x32Impl of FixedTrait {
}
fn erf(self: FP32x32) -> FP32x32 {
- return erf::erf(self);
+ erf::erf(self)
}
}
-
impl FP32x32Print of PrintTrait {
fn print(self: FP32x32) {
self.sign.print();
@@ -229,9 +225,9 @@ impl FP32x32IntoFelt252 of Into {
let mag_felt = self.mag.into();
if self.sign {
- return mag_felt * -1;
+ mag_felt * -1
} else {
- return mag_felt * 1;
+ mag_felt * 1
}
}
}
@@ -239,10 +235,10 @@ impl FP32x32IntoFelt252 of Into {
impl FP32x32TryIntoU64 of TryInto {
fn try_into(self: FP32x32) -> Option {
if self.sign {
- return Option::None(());
+ Option::None(())
} else {
// Unscale the magnitude and round down
- return Option::Some((self.mag / ONE).into());
+ Option::Some((self.mag / ONE).into())
}
}
}
@@ -253,7 +249,7 @@ impl FP32x32TryIntoU16 of TryInto {
Option::None(())
} else {
// Unscale the magnitude and round down
- return (self.mag / ONE).try_into();
+ (self.mag / ONE).try_into()
}
}
}
@@ -264,7 +260,7 @@ impl FP32x32TryIntoU32 of TryInto {
Option::None(())
} else {
// Unscale the magnitude and round down
- return (self.mag / ONE).try_into();
+ (self.mag / ONE).try_into()
}
}
}
@@ -275,7 +271,7 @@ impl FP32x32TryIntoU8 of TryInto {
Option::None(())
} else {
// Unscale the magnitude and round down
- return (self.mag / ONE).try_into();
+ (self.mag / ONE).try_into()
}
}
}
@@ -300,7 +296,7 @@ impl FP32x32TryIntoI8 of TryInto {
impl FP32x32Add of Add {
fn add(lhs: FP32x32, rhs: FP32x32) -> FP32x32 {
- return fp32x32::ops::add(lhs, rhs);
+ fp32x32::ops::add(lhs, rhs)
}
}
@@ -313,7 +309,7 @@ impl FP32x32AddEq of AddEq {
impl FP32x32Sub of Sub {
fn sub(lhs: FP32x32, rhs: FP32x32) -> FP32x32 {
- return fp32x32::ops::sub(lhs, rhs);
+ fp32x32::ops::sub(lhs, rhs)
}
}
@@ -326,7 +322,7 @@ impl FP32x32SubEq of SubEq {
impl FP32x32Mul of Mul {
fn mul(lhs: FP32x32, rhs: FP32x32) -> FP32x32 {
- return fp32x32::ops::mul(lhs, rhs);
+ fp32x32::ops::mul(lhs, rhs)
}
}
@@ -339,7 +335,7 @@ impl FP32x32MulEq of MulEq {
impl FP32x32Div of Div {
fn div(lhs: FP32x32, rhs: FP32x32) -> FP32x32 {
- return fp32x32::ops::div(lhs, rhs);
+ fp32x32::ops::div(lhs, rhs)
}
}
@@ -353,45 +349,44 @@ impl FP32x32DivEq of DivEq