diff --git a/Cargo.toml b/Cargo.toml
index d381f8a..e0fae0a 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -4,8 +4,9 @@ version = "0.1.0"
 edition = "2021"
 description = "A tensor library for scientific computing in Rust"
 license = "MIT"
-license-file = "LICENSE"
 homepage = "https://github.com/Rust-Scientific-Computing/feotensor"
 repository = "https://github.com/Rust-Scientific-Computing/feotensor"
 
 [dependencies]
+itertools = "0.13.0"
+num = "0.4.3"
diff --git a/src/axes.rs b/src/axes.rs
new file mode 100644
index 0000000..a2cb0d2
--- /dev/null
+++ b/src/axes.rs
@@ -0,0 +1 @@
+pub type Axes = Vec<usize>;
diff --git a/src/coordinate.rs b/src/coordinate.rs
new file mode 100644
index 0000000..ad03454
--- /dev/null
+++ b/src/coordinate.rs
@@ -0,0 +1,131 @@
+use std::fmt;
+use std::ops::{Index, IndexMut};
+
+use crate::error::ShapeError;
+
+#[derive(Debug, Clone, PartialEq)]
+pub struct Coordinate {
+    indices: Vec<usize>,
+}
+
+impl Coordinate {
+    pub fn new(indices: Vec<usize>) -> Result<Self, ShapeError> {
+        if indices.is_empty() {
+            return Err(ShapeError::new("Coordinate cannot be empty"));
+        }
+        Ok(Self { indices })
+    }
+
+    pub fn order(&self) -> usize {
+        self.indices.len()
+    }
+
+    pub fn iter(&self) -> std::slice::Iter<'_, usize> {
+        self.indices.iter()
+    }
+
+    pub fn insert(&self, index: usize, axis: usize) -> Self {
+        let mut new_indices = self.indices.clone();
+        new_indices.insert(index, axis);
+        Self {
+            indices: new_indices,
+        }
+    }
+}
+
+impl Index<usize> for Coordinate {
+    type Output = usize;
+
+    fn index(&self, index: usize) -> &Self::Output {
+        &self.indices[index]
+    }
+}
+
+impl IndexMut<usize> for Coordinate {
+    fn index_mut(&mut self, index: usize) -> &mut Self::Output {
+        &mut self.indices[index]
+    }
+}
+
+impl fmt::Display for Coordinate {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        use itertools::Itertools;
+        let idxs = self.indices.iter().map(|&x| format!("{}", x)).join(", ");
+        write!(f, "({})", idxs)
+    }
+}
+
+#[macro_export]
+macro_rules! coord {
+    ($($index:expr),*) => {
+        {
+            use $crate::coordinate::Coordinate;
+            Coordinate::new(vec![$($index),*])
+        }
+    };
+
+    ($index:expr; $count:expr) => {
+        {
+            use $crate::coordinate::Coordinate;
+            Coordinate::new(vec![$index; $count])
+        }
+    };
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    #[test]
+    fn test_order() {
+        let coord = coord![1, 2, 3].unwrap();
+        assert_eq!(coord.order(), 3);
+    }
+
+    #[test]
+    fn test_iter() {
+        let coord = coord![1, 2, 3].unwrap();
+        let mut iter = coord.iter();
+        assert_eq!(iter.next(), Some(&1));
+        assert_eq!(iter.next(), Some(&2));
+        assert_eq!(iter.next(), Some(&3));
+        assert_eq!(iter.next(), None);
+    }
+
+    #[test]
+    fn test_insert() {
+        let coord = coord![1, 2, 3].unwrap();
+        let new_coord = coord.insert(1, 4);
+        assert_eq!(new_coord, coord![1, 4, 2, 3].unwrap());
+    }
+
+    #[test]
+    fn test_index() {
+        let coord = coord![1, 2, 3].unwrap();
+        assert_eq!(coord[0], 1);
+        assert_eq!(coord[1], 2);
+        assert_eq!(coord[2], 3);
+    }
+
+    #[test]
+    fn test_index_mut() {
+        let mut coord = coord![1, 2, 3].unwrap();
+        coord[1] = 4;
+        assert_eq!(coord[1], 4);
+    }
+
+    #[test]
+    fn test_display() {
+        let coord = coord![1, 2, 3].unwrap();
+        assert_eq!(format!("{}", coord), "(1, 2, 3)");
+    }
+
+    #[test]
+    fn test_coord_macro() {
+        let coord = coord![1, 2, 3].unwrap();
+        assert_eq!(coord, Coordinate::new(vec![1, 2, 3]).unwrap());
+
+        let coord_repeated = coord![1; 3].unwrap();
+        assert_eq!(coord_repeated, Coordinate::new(vec![1, 1, 1]).unwrap());
+    }
+}
diff --git a/src/error.rs b/src/error.rs
new file mode 100644
index 0000000..c74013b
--- /dev/null
+++ b/src/error.rs
@@ -0,0 +1,20 @@
+use std::fmt;
+
+#[derive(Debug, Clone)]
+pub struct ShapeError {
+    reason: String,
+}
+
+impl ShapeError {
+    pub fn new(reason: &str) -> Self {
+        ShapeError {
+            reason: reason.to_string(),
+        }
+    }
+}
+
+impl fmt::Display for ShapeError {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        write!(f, "ShapeError: {}", self.reason)
+    }
+}
diff --git a/src/iter.rs b/src/iter.rs
new file mode 100644
index 0000000..90788b6
--- /dev/null
+++ b/src/iter.rs
@@ -0,0 +1,88 @@
+use crate::coord;
+use crate::coordinate::Coordinate;
+use crate::shape::Shape;
+use std::cmp::max;
+
+pub struct IndexIterator {
+    shape: Shape,
+    current: Coordinate,
+    done: bool,
+}
+
+impl IndexIterator {
+    pub fn new(shape: &Shape) -> Self {
+        // (shape.order() == 0) => `next` returns None before `current` is used
+        let current = coord![0; max(shape.order(), 1)].unwrap();
+        IndexIterator {
+            shape: shape.clone(),
+            current,
+            done: false,
+        }
+    }
+}
+
+impl Iterator for IndexIterator {
+    type Item = Coordinate;
+
+    fn next(&mut self) -> Option<Self::Item> {
+        if self.done || self.shape.order() == 0 {
+            return None;
+        }
+
+        let result = self.current.clone();
+
+        for i in (0..self.shape.order()).rev() {
+            if self.current[i] + 1 < self.shape[i] {
+                self.current[i] += 1;
+                break;
+            } else {
+                self.current[i] = 0;
+                if i == 0 {
+                    self.done = true;
+                }
+            }
+        }
+
+        Some(result)
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+    use crate::shape;
+
+    #[test]
+    fn test_index_iterator() {
+        let shape = shape![2, 3].unwrap();
+        let mut iter = IndexIterator::new(&shape);
+
+        assert_eq!(iter.next(), Some(coord![0, 0].unwrap()));
+        assert_eq!(iter.next(), Some(coord![0, 1].unwrap()));
+        assert_eq!(iter.next(), Some(coord![0, 2].unwrap()));
+        assert_eq!(iter.next(), Some(coord![1, 0].unwrap()));
+        assert_eq!(iter.next(), Some(coord![1, 1].unwrap()));
+        assert_eq!(iter.next(), Some(coord![1, 2].unwrap()));
+        assert_eq!(iter.next(), None);
+    }
+
+    #[test]
+    fn test_index_iterator_single_dimension() {
+        let shape = shape![4].unwrap();
+        let mut iter = IndexIterator::new(&shape);
+
+        assert_eq!(iter.next(), Some(coord![0].unwrap()));
+        assert_eq!(iter.next(), Some(coord![1].unwrap()));
+        assert_eq!(iter.next(), Some(coord![2].unwrap()));
+        assert_eq!(iter.next(), Some(coord![3].unwrap()));
+        assert_eq!(iter.next(), None);
+    }
+
+    #[test]
+    fn test_index_iterator_empty_tensor() {
+        let shape = shape![].unwrap();
+        let mut iter = IndexIterator::new(&shape);
+
+        assert_eq!(iter.next(), None);
+    }
+}
diff --git a/src/lib.rs b/src/lib.rs
index 7d12d9a..718f954 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -1,14 +1,9 @@
-pub fn add(left: usize, right: usize) -> usize {
-    left + right
-}
-
-#[cfg(test)]
-mod tests {
-    use super::*;
-
-    #[test]
-    fn it_works() {
-        let result = add(2, 2);
-        assert_eq!(result, 4);
-    }
-}
+pub mod axes;
+pub mod coordinate;
+pub mod error;
+pub mod iter;
+pub mod matrix;
+pub mod shape;
+pub mod storage;
+pub mod tensor;
+pub mod vector;
diff --git a/src/matrix.rs b/src/matrix.rs
new file mode 100644
index 0000000..215be90
--- /dev/null
+++ b/src/matrix.rs
@@ -0,0 +1,657 @@
+use std::ops::{Add, Deref, DerefMut, Div, Index, IndexMut, Mul, Sub};
+
+use crate::axes::Axes;
+use crate::coord;
+use crate::coordinate::Coordinate;
+use crate::error::ShapeError;
+use crate::shape;
+use crate::shape::Shape;
+use crate::tensor::DynamicTensor;
+use crate::vector::DynamicVector;
+use num::{Float, Num};
+
+pub struct DynamicMatrix<T: Num> {
+    tensor: DynamicTensor<T>,
+}
+pub type Matrix<T> = DynamicMatrix<T>;
+
+impl<T: Num + PartialOrd + Copy> DynamicMatrix<T> {
+    pub fn new(shape: &Shape, data: &[T]) -> Result<DynamicMatrix<T>, ShapeError> {
+        Ok(DynamicMatrix {
+            tensor: DynamicTensor::new(shape, data)?,
+        })
+    }
+
+    pub fn from_tensor(tensor: DynamicTensor<T>) -> Result<DynamicMatrix<T>, ShapeError> {
+        if tensor.shape().order() != 2 {
+            return Err(ShapeError::new("Shape must have order of 2"));
+        }
+        Ok(DynamicMatrix { tensor })
+    }
+
+    pub fn fill(shape: &Shape, value: T) -> Result<DynamicMatrix<T>, ShapeError> {
+        let data = vec![value; shape.size()];
+        DynamicMatrix::new(shape, &data)
+    }
+    pub fn eye(shape: &Shape) -> Result<DynamicMatrix<T>, ShapeError> {
+        let mut result = DynamicMatrix::zeros(shape).unwrap();
+        for i in 0..shape[0] {
+            result.set(&coord![i, i].unwrap(), T::one()).unwrap();
+        }
+        Ok(result)
+    }
+    pub fn zeros(shape: &Shape) -> Result<DynamicMatrix<T>, ShapeError> {
+        Self::fill(shape, T::zero())
+    }
+    pub fn ones(shape: &Shape) -> Result<DynamicMatrix<T>, ShapeError> {
+        Self::fill(shape, T::one())
+    }
+
+    pub fn sum(&self, axes: Axes) -> DynamicVector<T> {
+        let result = self.tensor.sum(axes);
+        DynamicVector::from_tensor(result).unwrap()
+    }
+
+    pub fn mean(&self, axes: Axes) -> DynamicVector<T> {
+        let result = self.tensor.mean(axes);
+        DynamicVector::from_tensor(result).unwrap()
+    }
+
+    pub fn var(&self, axes: Axes) -> DynamicVector<T> {
+        let result = self.tensor.var(axes);
+        DynamicVector::from_tensor(result).unwrap()
+    }
+
+    pub fn max(&self, axes: Axes) -> DynamicVector<T> {
+        let result = self.tensor.max(axes);
+        DynamicVector::from_tensor(result).unwrap()
+    }
+
+    pub fn min(&self, axes: Axes) -> DynamicVector<T> {
+        let result = self.tensor.min(axes);
+        DynamicVector::from_tensor(result).unwrap()
+    }
+
+    // Vector/Matrix Multiplication
+    pub fn matmul(&self, rhs: &Self) -> DynamicMatrix<T> {
+        // Matrix-Matrix multiplication
+        assert_eq!(self.shape()[1], rhs.shape()[0]);
+        let mut result = DynamicTensor::zeros(&shape![self.shape()[0], rhs.shape()[1]].unwrap());
+        for i in 0..self.shape()[0] {
+            for j in 0..rhs.shape()[1] {
+                let mut sum = T::zero();
+                for k in 0..self.shape()[1] {
+                    sum = sum + self[coord![i, k].unwrap()] * rhs[coord![k, j].unwrap()];
+                }
+                result.set(&coord![i, j].unwrap(), sum).unwrap();
+            }
+        }
+        DynamicMatrix::from_tensor(result).unwrap()
+    }
+
+    pub fn vecmul(&self, rhs: &DynamicVector<T>) -> DynamicVector<T> {
+        assert_eq!(self.shape()[1], rhs.shape()[0]);
+        let mut result = DynamicTensor::zeros(&shape![self.shape()[0]].unwrap());
+        for i in 0..self.shape()[0] {
+            let mut sum = T::zero();
+            for j in 0..self.shape()[1] {
+                sum = sum + self[coord![i, j].unwrap()] * rhs[j];
+            }
+            result.set(&coord![i].unwrap(), sum).unwrap();
+        }
+        DynamicVector::from_tensor(result).unwrap()
+    }
+}
+
+impl<T: Float + PartialOrd + Copy> DynamicMatrix<T> {
+    pub fn pow(&self, power: T) -> DynamicMatrix<T> {
+        DynamicMatrix::from_tensor(self.tensor.pow(power)).unwrap()
+    }
+}
+
+// Scalar Addition
+impl<T: Num + PartialOrd + Copy> Add<T> for DynamicMatrix<T> {
+    type Output = DynamicMatrix<T>;
+
+    fn add(self, rhs: T) -> DynamicMatrix<T> {
+        DynamicMatrix::from_tensor(self.tensor + rhs).unwrap()
+    }
+}
+
+// Tensor Addition
+impl<T: Num + PartialOrd + Copy> Add<DynamicMatrix<T>> for DynamicMatrix<T> {
+    type Output = DynamicMatrix<T>;
+
+    fn add(self, rhs: DynamicMatrix<T>) -> DynamicMatrix<T> {
+        DynamicMatrix::from_tensor(self.tensor + rhs.tensor).unwrap()
+    }
+}
+
+impl<T: Num + PartialOrd + Copy> Add<DynamicTensor<T>> for DynamicMatrix<T> {
+    type Output = DynamicMatrix<T>;
+
+    fn add(self, rhs: DynamicTensor<T>) -> DynamicMatrix<T> {
+        DynamicMatrix::from_tensor(self.tensor + rhs).unwrap()
+    }
+}
+
+// Scalar Subtraction
+impl<T: Num + PartialOrd + Copy> Sub<T> for DynamicMatrix<T> {
+    type Output = DynamicMatrix<T>;
+
+    fn sub(self, rhs: T) -> DynamicMatrix<T> {
+        DynamicMatrix::from_tensor(self.tensor - rhs).unwrap()
+    }
+}
+
+// Tensor Subtraction
+impl<T: Num + PartialOrd + Copy> Sub<DynamicMatrix<T>> for DynamicMatrix<T> {
+    type Output = DynamicMatrix<T>;
+
+    fn sub(self, rhs: DynamicMatrix<T>) -> DynamicMatrix<T> {
+        DynamicMatrix::from_tensor(self.tensor - rhs.tensor).unwrap()
+    }
+}
+
+impl<T: Num + PartialOrd + Copy> Sub<DynamicTensor<T>> for DynamicMatrix<T> {
+    type Output = DynamicMatrix<T>;
+
+    fn sub(self, rhs: DynamicTensor<T>) -> DynamicMatrix<T> {
+        DynamicMatrix::from_tensor(self.tensor - rhs).unwrap()
+    }
+}
+
+// Scalar Multiplication
+impl<T: Num + PartialOrd + Copy> Mul<T> for DynamicMatrix<T> {
+    type Output = DynamicMatrix<T>;
+
+    fn mul(self, rhs: T) -> DynamicMatrix<T> {
+        DynamicMatrix::from_tensor(self.tensor * rhs).unwrap()
+    }
+}
+
+// Tensor Multiplication
+impl<T: Num + PartialOrd + Copy> Mul<DynamicMatrix<T>> for DynamicMatrix<T> {
+    type Output = DynamicMatrix<T>;
+
+    fn mul(self, rhs: DynamicMatrix<T>) -> DynamicMatrix<T> {
+        DynamicMatrix::from_tensor(self.tensor * rhs.tensor).unwrap()
+    }
+}
+
+impl<T: Num + PartialOrd + Copy> Mul<DynamicTensor<T>> for DynamicMatrix<T> {
+    type Output = DynamicMatrix<T>;
+
+    fn mul(self, rhs: DynamicTensor<T>) -> DynamicMatrix<T> {
+        DynamicMatrix::from_tensor(self.tensor * rhs).unwrap()
+    }
+}
+
+// Scalar Division
+impl<T: Num + PartialOrd + Copy> Div<T> for DynamicMatrix<T> {
+    type Output = DynamicMatrix<T>;
+
+    fn div(self, rhs: T) -> DynamicMatrix<T> {
+        DynamicMatrix::from_tensor(self.tensor / rhs).unwrap()
+    }
+}
+
+// Tensor Division
+impl<T: Num + PartialOrd + Copy> Div<DynamicMatrix<T>> for DynamicMatrix<T> {
+    type Output = DynamicMatrix<T>;
+
+    fn div(self, rhs: DynamicMatrix<T>) -> DynamicMatrix<T> {
+        DynamicMatrix::from_tensor(self.tensor / rhs.tensor).unwrap()
+    }
+}
+
+impl<T: Num + PartialOrd + Copy> Div<DynamicTensor<T>> for DynamicMatrix<T> {
+    type Output = DynamicMatrix<T>;
+
+    fn div(self, rhs: DynamicTensor<T>) -> DynamicMatrix<T> {
+        DynamicMatrix::from_tensor(self.tensor / rhs).unwrap()
+    }
+}
+
+impl<T: Num> Deref for DynamicMatrix<T> {
+    type Target = DynamicTensor<T>;
+
+    fn deref(&self) -> &DynamicTensor<T> {
+        &self.tensor
+    }
+}
+
+impl<T: Num> DerefMut for DynamicMatrix<T> {
+    fn deref_mut(&mut self) -> &mut DynamicTensor<T> {
+        &mut self.tensor
+    }
+}
+
+impl<T: Num + Copy + PartialOrd> Index<Coordinate> for DynamicMatrix<T> {
+    type Output = T;
+
+    fn index(&self, index: Coordinate) -> &Self::Output {
+        self.tensor.get(&index).unwrap()
+    }
+}
+
+impl<T: Num + Copy + PartialOrd> IndexMut<Coordinate> for DynamicMatrix<T> {
+    fn index_mut(&mut self, index: Coordinate) -> &mut Self::Output {
+        self.tensor.get_mut(&index).unwrap()
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    #[test]
+    fn test_new() {
+        let shape = shape![2, 2].unwrap();
+        let data = vec![1.0, 2.0, 3.0, 4.0];
+        let matrix = DynamicMatrix::new(&shape, &data).unwrap();
+        assert_eq!(matrix.shape(), &shape);
+        assert_eq!(matrix[coord![0, 0].unwrap()], 1.0);
+        assert_eq!(matrix[coord![0, 1].unwrap()], 2.0);
+        assert_eq!(matrix[coord![1, 0].unwrap()], 3.0);
+        assert_eq!(matrix[coord![1, 1].unwrap()], 4.0);
+    }
+
+    #[test]
+    fn test_from_tensor() {
+        let shape = shape![2, 2].unwrap();
+        let data = vec![1.0, 2.0, 3.0, 4.0];
+        let tensor = DynamicTensor::new(&shape, &data).unwrap();
+        let matrix = DynamicMatrix::from_tensor(tensor).unwrap();
+        assert_eq!(matrix.shape(), &shape);
+        assert_eq!(matrix[coord![0, 0].unwrap()], 1.0);
+        assert_eq!(matrix[coord![0, 1].unwrap()], 2.0);
+        assert_eq!(matrix[coord![1, 0].unwrap()], 3.0);
+        assert_eq!(matrix[coord![1, 1].unwrap()], 4.0);
+    }
+
+    #[test]
+    fn test_from_tensor_fail() {
+        let shape = shape![2, 2, 2].unwrap();
+        let data = vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0];
+        let tensor = DynamicTensor::new(&shape, &data).unwrap();
+        let result = DynamicMatrix::from_tensor(tensor);
+        assert!(result.is_err());
+    }
+
+    #[test]
+    fn test_fill() {
+        let shape = shape![2, 2].unwrap();
+        let matrix = DynamicMatrix::fill(&shape, 3.0).unwrap();
+        assert_eq!(matrix.shape(), &shape);
+        assert_eq!(matrix[coord![0, 0].unwrap()], 3.0);
+        assert_eq!(matrix[coord![0, 1].unwrap()], 3.0);
+        assert_eq!(matrix[coord![1, 0].unwrap()], 3.0);
+        assert_eq!(matrix[coord![1, 1].unwrap()], 3.0);
+    }
+
+    #[test]
+    fn test_eye() {
+        let shape = shape![3, 3].unwrap();
+        let matrix = DynamicMatrix::<f64>::eye(&shape).unwrap();
+        assert_eq!(matrix.shape(), &shape);
+        assert_eq!(matrix[coord![0, 0].unwrap()], 1.0);
+        assert_eq!(matrix[coord![0, 1].unwrap()], 0.0);
+        assert_eq!(matrix[coord![0, 2].unwrap()], 0.0);
+        assert_eq!(matrix[coord![1, 0].unwrap()], 0.0);
+        assert_eq!(matrix[coord![1, 1].unwrap()], 1.0);
+        assert_eq!(matrix[coord![1, 2].unwrap()], 0.0);
+        assert_eq!(matrix[coord![2, 0].unwrap()], 0.0);
+        assert_eq!(matrix[coord![2, 1].unwrap()], 0.0);
+        assert_eq!(matrix[coord![2, 2].unwrap()], 1.0);
+    }
+
+    #[test]
+    fn test_zeros() {
+        let shape = shape![2, 2].unwrap();
+        let matrix = DynamicMatrix::<f64>::zeros(&shape).unwrap();
+        assert_eq!(matrix.shape(), &shape);
+        assert_eq!(matrix[coord![0, 0].unwrap()], 0.0);
+        assert_eq!(matrix[coord![0, 1].unwrap()], 0.0);
+        assert_eq!(matrix[coord![1, 0].unwrap()], 0.0);
+        assert_eq!(matrix[coord![1, 1].unwrap()], 0.0);
+    }
+
+    #[test]
+    fn test_ones() {
+        let shape = shape![2, 2].unwrap();
+        let matrix = DynamicMatrix::<f64>::ones(&shape).unwrap();
+        assert_eq!(matrix.shape(), &shape);
+        assert_eq!(matrix[coord![0, 0].unwrap()], 1.0);
+        assert_eq!(matrix[coord![0, 1].unwrap()], 1.0);
+        assert_eq!(matrix[coord![1, 0].unwrap()], 1.0);
+        assert_eq!(matrix[coord![1, 1].unwrap()], 1.0);
+    }
+
+    #[test]
+    fn test_size() {
+        let shape = shape![2, 2].unwrap();
+        let matrix = DynamicMatrix::<f64>::zeros(&shape).unwrap();
+        assert_eq!(matrix.size(), 4);
+    }
+
+    #[test]
+    fn test_get() {
+        let shape = shape![2, 2].unwrap();
+        let data = vec![1.0, 2.0, 3.0, 4.0];
+        let matrix = DynamicMatrix::new(&shape, &data).unwrap();
+        assert_eq!(matrix[coord![1, 0].unwrap()], 3.0);
+    }
+
+    #[test]
+    fn test_get_mut() {
+        let shape = shape![2, 2].unwrap();
+        let data = vec![1.0, 2.0, 3.0, 4.0];
+        let mut matrix = DynamicMatrix::new(&shape, &data).unwrap();
+        matrix[coord![1, 0].unwrap()] = 5.0;
+        assert_eq!(matrix.shape(), &shape);
+        assert_eq!(matrix[coord![0, 0].unwrap()], 1.0);
+        assert_eq!(matrix[coord![0, 1].unwrap()], 2.0);
+        assert_eq!(matrix[coord![1, 0].unwrap()], 5.0);
+        assert_eq!(matrix[coord![1, 1].unwrap()], 4.0);
+    }
+
+    #[test]
+    fn test_set() {
+        let shape = shape![2, 2].unwrap();
+        let data = vec![1.0, 2.0, 3.0, 4.0];
+        let mut matrix = DynamicMatrix::new(&shape, &data).unwrap();
+        matrix.set(&coord![1, 0].unwrap(), 5.0).unwrap();
+        assert_eq!(matrix.shape(), &shape);
+        assert_eq!(matrix[coord![0, 0].unwrap()], 1.0);
+        assert_eq!(matrix[coord![0, 1].unwrap()], 2.0);
+        assert_eq!(matrix[coord![1, 0].unwrap()], 5.0);
+        assert_eq!(matrix[coord![1, 1].unwrap()], 4.0);
+    }
+
+    #[test]
+    fn test_sum() {
+        let shape = shape![2, 2].unwrap();
+        let data = vec![1.0, 2.0, 3.0, 4.0];
+        let matrix = DynamicMatrix::new(&shape, &data).unwrap();
+        let result = matrix.sum(vec![0, 1]);
+        assert_eq!(result[0], 10.0);
+        assert_eq!(result.shape(), &shape![1].unwrap());
+    }
+
+    #[test]
+    fn test_mean() {
+        let shape = shape![2, 2].unwrap();
+        let data = vec![1.0, 2.0, 3.0, 4.0];
+        let matrix = DynamicMatrix::new(&shape, &data).unwrap();
+        let result = matrix.mean(vec![0, 1]);
+        assert_eq!(result[0], 2.5);
+        assert_eq!(result.shape(), &shape![1].unwrap());
+    }
+
+    #[test]
+    fn test_var() {
+        let shape = shape![2, 2].unwrap();
+        let data = vec![1.0, 2.0, 3.0, 4.0];
+        let matrix = DynamicMatrix::new(&shape, &data).unwrap();
+        let result = matrix.var(vec![0, 1]);
+        assert_eq!(result[0], 1.25);
+        assert_eq!(result.shape(), &shape![1].unwrap());
+    }
+
+    #[test]
+    fn test_min() {
+        let shape = shape![2, 2].unwrap();
+        let data = vec![1.0, 2.0, 3.0, 4.0];
+        let matrix = DynamicMatrix::new(&shape, &data).unwrap();
+        let result = matrix.min(vec![0, 1]);
+        assert_eq!(result[0], 1.0);
+        assert_eq!(result.shape(), &shape![1].unwrap());
+    }
+
+    #[test]
+    fn test_max() {
+        let shape = shape![2, 2].unwrap();
+        let data = vec![-1.0, -2.0, -3.0, -4.0];
+        let matrix = DynamicMatrix::new(&shape, &data).unwrap();
+        let result = matrix.max(vec![0, 1]);
+        assert_eq!(result[0], -1.0);
+        assert_eq!(result.shape(), &shape![1].unwrap());
+    }
+
+    #[test]
+    fn test_matmul() {
+        let shape = shape![2, 2].unwrap();
+        let data1 = vec![1.0, 2.0, 3.0, 4.0];
+        let data2 = vec![2.0, 3.0, 4.0, 5.0];
+        let matrix1 = DynamicMatrix::new(&shape, &data1).unwrap();
+        let matrix2 = DynamicMatrix::new(&shape, &data2).unwrap();
+        let result = matrix1.matmul(&matrix2);
+        assert_eq!(result.shape(), &shape);
+        assert_eq!(result[coord![0, 0].unwrap()], 10.0);
+        assert_eq!(result[coord![0, 1].unwrap()], 13.0);
+        assert_eq!(result[coord![1, 0].unwrap()], 22.0);
+        assert_eq!(result[coord![1, 1].unwrap()], 29.0);
+    }
+
+    #[test]
+    fn test_vecmul() {
+        let shape = shape![2, 2].unwrap();
+        let data = vec![1.0, 2.0, 3.0, 4.0];
+        let matrix = DynamicMatrix::new(&shape, &data).unwrap();
+        let vector_data = vec![1.0, 2.0];
+        let vector = DynamicVector::new(&vector_data).unwrap();
+        let result = matrix.vecmul(&vector);
+        assert_eq!(result.shape(), &shape![2].unwrap());
+        assert_eq!(result[0], 5.0);
+        assert_eq!(result[1], 11.0);
+    }
+
+    #[test]
+    fn test_add_scalar() {
+        let shape = shape![2, 2].unwrap();
+        let data = vec![1.0, 2.0, 3.0, 4.0];
+        let matrix = DynamicMatrix::new(&shape, &data).unwrap();
+        let result = matrix + 2.0;
+        assert_eq!(result[coord![0, 0].unwrap()], 3.0);
+        assert_eq!(result[coord![0, 1].unwrap()], 4.0);
+        assert_eq!(result[coord![1, 0].unwrap()], 5.0);
+        assert_eq!(result[coord![1, 1].unwrap()], 6.0);
+        assert_eq!(result.shape(), &shape);
+    }
+
+    #[test]
+    fn test_add_matrix() {
+        let shape = shape![2, 2].unwrap();
+        let data1 = vec![1.0, 2.0, 3.0, 4.0];
+        let data2 = vec![2.0, 3.0, 4.0, 5.0];
+        let matrix1 = DynamicMatrix::new(&shape, &data1).unwrap();
+        let matrix2 = DynamicMatrix::new(&shape, &data2).unwrap();
+        let result = matrix1 + matrix2;
+        assert_eq!(result[coord![0, 0].unwrap()], 3.0);
+        assert_eq!(result[coord![0, 1].unwrap()], 5.0);
+        assert_eq!(result[coord![1, 0].unwrap()], 7.0);
+        assert_eq!(result[coord![1, 1].unwrap()], 9.0);
+        assert_eq!(result.shape(), &shape);
+    }
+
+    #[test]
+    fn test_add_matrix_tensor() {
+        let shape = shape![2, 2].unwrap();
+        let data1 = vec![1.0, 2.0, 3.0, 4.0];
+        let data2 = vec![2.0, 3.0, 4.0, 5.0];
+        let matrix = DynamicMatrix::new(&shape, &data1).unwrap();
+        let tensor = DynamicTensor::new(&shape, &data2).unwrap();
+        let result = matrix + tensor;
+        assert_eq!(result[coord![0, 0].unwrap()], 3.0);
+        assert_eq!(result[coord![0, 1].unwrap()], 5.0);
+        assert_eq!(result[coord![1, 0].unwrap()], 7.0);
+        assert_eq!(result[coord![1, 1].unwrap()], 9.0);
+        assert_eq!(result.shape(), &shape);
+    }
+
+    #[test]
+    fn test_sub_scalar() {
+        let shape = shape![2, 2].unwrap();
+        let data = vec![1.0, 2.0, 3.0, 4.0];
+        let matrix = DynamicMatrix::new(&shape, &data).unwrap();
+        let result = matrix - 2.0;
+        assert_eq!(result[coord![0, 0].unwrap()], -1.0);
+        assert_eq!(result[coord![0, 1].unwrap()], 0.0);
+        assert_eq!(result[coord![1, 0].unwrap()], 1.0);
+        assert_eq!(result[coord![1, 1].unwrap()], 2.0);
+        assert_eq!(result.shape(), &shape);
+    }
+
+    #[test]
+    fn test_sub_matrix() {
+        let shape = shape![2, 2].unwrap();
+        let data1 = vec![1.0, 2.0, 3.0, 4.0];
+        let data2 = vec![2.0, 3.0, 4.0, 5.0];
+        let matrix1 = DynamicMatrix::new(&shape, &data1).unwrap();
+        let matrix2 = DynamicMatrix::new(&shape, &data2).unwrap();
+        let result = matrix1 - matrix2;
+        assert_eq!(result[coord![0, 0].unwrap()], -1.0);
+        assert_eq!(result[coord![0, 1].unwrap()], -1.0);
+        assert_eq!(result[coord![1, 0].unwrap()], -1.0);
+        assert_eq!(result[coord![1, 1].unwrap()], -1.0);
+        assert_eq!(result.shape(), &shape);
+    }
+
+    #[test]
+    fn test_sub_matrix_tensor() {
+        let shape = shape![2, 2].unwrap();
+        let data1 = vec![1.0, 2.0, 3.0, 4.0];
+        let data2 = vec![2.0, 3.0, 4.0, 5.0];
+        let matrix = DynamicMatrix::new(&shape, &data1).unwrap();
+        let tensor = DynamicTensor::new(&shape, &data2).unwrap();
+        let result = matrix - tensor;
+        assert_eq!(result[coord![0, 0].unwrap()], -1.0);
+        assert_eq!(result[coord![0, 1].unwrap()], -1.0);
+        assert_eq!(result[coord![1, 0].unwrap()], -1.0);
+        assert_eq!(result[coord![1, 1].unwrap()], -1.0);
+        assert_eq!(result.shape(), &shape);
+    }
+
+    #[test]
+    fn test_mul_scalar() {
+        let shape = shape![2, 2].unwrap();
+        let data = vec![1.0, 2.0, 3.0, 4.0];
+        let matrix = DynamicMatrix::new(&shape, &data).unwrap();
+        let result = matrix * 2.0;
+        assert_eq!(result[coord![0, 0].unwrap()], 2.0);
+        assert_eq!(result[coord![0, 1].unwrap()], 4.0);
+        assert_eq!(result[coord![1, 0].unwrap()], 6.0);
+        assert_eq!(result[coord![1, 1].unwrap()], 8.0);
+        assert_eq!(result.shape(), &shape);
+    }
+
+    #[test]
+    fn test_mul_matrix() {
+        let shape = shape![2, 2].unwrap();
+        let data1 = vec![1.0, 2.0, 3.0, 4.0];
+        let data2 = vec![2.0, 3.0, 4.0, 5.0];
+        let matrix1 = DynamicMatrix::new(&shape, &data1).unwrap();
+        let matrix2 = DynamicMatrix::new(&shape, &data2).unwrap();
+        let result = matrix1 * matrix2;
+        assert_eq!(result[coord![0, 0].unwrap()], 2.0);
+        assert_eq!(result[coord![0, 1].unwrap()], 6.0);
+        assert_eq!(result[coord![1, 0].unwrap()], 12.0);
+        assert_eq!(result[coord![1, 1].unwrap()], 20.0);
+        assert_eq!(result.shape(), &shape);
+    }
+
+    #[test]
+    fn test_mul_matrix_tensor() {
+        let shape = shape![2, 2].unwrap();
+        let data1 = vec![1.0, 2.0, 3.0, 4.0];
+        let data2 = vec![2.0, 3.0, 4.0, 5.0];
+        let matrix = DynamicMatrix::new(&shape, &data1).unwrap();
+        let tensor = DynamicTensor::new(&shape, &data2).unwrap();
+        let result = matrix * tensor;
+        assert_eq!(result[coord![0, 0].unwrap()], 2.0);
+        assert_eq!(result[coord![0, 1].unwrap()], 6.0);
+        assert_eq!(result[coord![1, 0].unwrap()], 12.0);
+        assert_eq!(result[coord![1, 1].unwrap()], 20.0);
+        assert_eq!(result.shape(), &shape);
+    }
+
+    #[test]
+    fn test_div_scalar() {
+        let shape = shape![2, 2].unwrap();
+        let data = vec![4.0, 6.0, 8.0, 10.0];
+        let matrix = DynamicMatrix::new(&shape, &data).unwrap();
+        let result = matrix / 2.0;
+        assert_eq!(result[coord![0, 0].unwrap()], 2.0);
+        assert_eq!(result[coord![0, 1].unwrap()], 3.0);
+        assert_eq!(result[coord![1, 0].unwrap()], 4.0);
+        assert_eq!(result[coord![1, 1].unwrap()], 5.0);
+        assert_eq!(result.shape(), &shape);
+    }
+
+    #[test]
+    fn test_div_matrix() {
+        let shape = shape![2, 2].unwrap();
+        let data1 = vec![4.0, 6.0, 8.0, 10.0];
+        let data2 = vec![2.0, 3.0, 4.0, 5.0];
+        let matrix1 = DynamicMatrix::new(&shape, &data1).unwrap();
+        let matrix2 = DynamicMatrix::new(&shape, &data2).unwrap();
+        let result = matrix1 / matrix2;
+        assert_eq!(result[coord![0, 0].unwrap()], 2.0);
+        assert_eq!(result[coord![0, 1].unwrap()], 2.0);
+        assert_eq!(result[coord![1, 0].unwrap()], 2.0);
+        assert_eq!(result[coord![1, 1].unwrap()], 2.0);
+        assert_eq!(result.shape(), &shape);
+    }
+
+    #[test]
+    fn test_div_matrix_tensor() {
+        let shape = shape![2, 2].unwrap();
+        let data1 = vec![4.0, 6.0, 8.0, 10.0];
+        let data2 = vec![2.0, 3.0, 4.0, 5.0];
+        let matrix = DynamicMatrix::new(&shape, &data1).unwrap();
+        let tensor = DynamicTensor::new(&shape, &data2).unwrap();
+        let result = matrix / tensor;
+        assert_eq!(result[coord![0, 0].unwrap()], 2.0);
+        assert_eq!(result[coord![0, 1].unwrap()], 2.0);
+        assert_eq!(result[coord![1, 0].unwrap()], 2.0);
+        assert_eq!(result[coord![1, 1].unwrap()], 2.0);
+        assert_eq!(result.shape(), &shape);
+    }
+
+    #[test]
+    fn test_index_coord() {
+        let shape = shape![2, 2].unwrap();
+        let data = vec![1.0, 2.0, 3.0, 4.0];
+        let matrix = DynamicMatrix::new(&shape, &data).unwrap();
+        assert_eq!(matrix[coord![0, 0].unwrap()], 1.0);
+        assert_eq!(matrix[coord![0, 1].unwrap()], 2.0);
+        assert_eq!(matrix[coord![1, 0].unwrap()], 3.0);
+        assert_eq!(matrix[coord![1, 1].unwrap()], 4.0);
+    }
+
+    #[test]
+    fn test_index_mut_coord() {
+        let shape = shape![2, 2].unwrap();
+        let data = vec![1.0, 2.0, 3.0, 4.0];
+        let mut matrix = DynamicMatrix::new(&shape, &data).unwrap();
+        matrix[coord![0, 0].unwrap()] = 5.0;
+        assert_eq!(matrix[coord![0, 0].unwrap()], 5.0);
+        assert_eq!(matrix[coord![0, 1].unwrap()], 2.0);
+        assert_eq!(matrix[coord![1, 0].unwrap()], 3.0);
+        assert_eq!(matrix[coord![1, 1].unwrap()], 4.0);
+    }
+
+    #[test]
+    fn test_pow_matrix() {
+        let shape = shape![2, 2].unwrap();
+        let data = vec![2.0, 3.0, 4.0, 5.0];
+        let matrix = DynamicMatrix::new(&shape, &data).unwrap();
+        let result = matrix.pow(2.0);
+        assert_eq!(result[coord![0, 0].unwrap()], 4.0);
+        assert_eq!(result[coord![0, 1].unwrap()], 9.0);
+        assert_eq!(result[coord![1, 0].unwrap()], 16.0);
+        assert_eq!(result[coord![1, 1].unwrap()], 25.0);
+        assert_eq!(result.shape(), &shape);
+    }
+}
diff --git a/src/shape.rs b/src/shape.rs
new file mode 100644
index 0000000..7901559
--- /dev/null
+++ b/src/shape.rs
@@ -0,0 +1,114 @@
+use std::fmt;
+use std::ops::Index;
+
+use crate::error::ShapeError;
+
+#[derive(Debug, Clone, PartialEq)]
+pub struct Shape {
+    dims: Vec<usize>,
+}
+
+impl Shape {
+    pub fn new(dims: Vec<usize>) -> Result<Shape, ShapeError> {
+        if dims.iter().any(|&x| x == 0) {
+            return Err(ShapeError::new("Dimension cannot be zero"));
+        }
+        Ok(Shape { dims })
+    }
+
+    pub fn size(&self) -> usize {
+        self.dims.iter().product()
+    }
+
+    pub fn order(&self) -> usize {
+        self.dims.len()
+    }
+
+    pub fn stack(&self, rhs: &Shape) -> Shape {
+        let mut new_dims = self.dims.clone();
+        new_dims.extend(rhs.dims.iter());
+        Shape { dims: new_dims }
+    }
+}
+
+impl Index<usize> for Shape {
+    type Output = usize;
+
+    fn index(&self, index: usize) -> &Self::Output {
+        &self.dims[index]
+    }
+}
+
+impl Index<std::ops::RangeFrom<usize>> for Shape {
+    type Output = [usize];
+
+    fn index(&self, index: std::ops::RangeFrom<usize>) -> &Self::Output {
+        &self.dims[index]
+    }
+}
+
+impl fmt::Display for Shape {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        use itertools::Itertools;
+        let dims = self.dims.iter().map(|&x| format!("{}", x)).join(", ");
+        write!(f, "({})", dims)
+    }
+}
+
+#[macro_export]
+macro_rules! shape {
+    ($($dim:expr),*) => {
+        {
+            use $crate::shape::Shape;
+            Shape::new(vec![$($dim),*])
+        }
+    };
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    #[test]
+    fn test_shape_new() {
+        let dims = vec![2, 3, 4];
+        let shape = Shape::new(dims.clone()).unwrap();
+        assert_eq!(shape.dims, dims);
+    }
+
+    #[test]
+    fn test_shape_new_with_zero_dimension() {
+        let dims = vec![2, 0, 4];
+        let result = Shape::new(dims);
+        assert!(result.is_err());
+    }
+
+    #[test]
+    fn test_shape_size() {
+        let shape = Shape::new(vec![2, 3, 4]).unwrap();
+        assert_eq!(shape.size(), 24);
+    }
+
+    #[test]
+    fn test_shape_display() {
+        let shape = Shape::new(vec![2, 3, 4]).unwrap();
+        assert_eq!(format!("{}", shape), "(2, 3, 4)");
+    }
+
+    #[test]
+    fn test_shape_macro() {
+        let shape = shape![2, 3, 4].unwrap();
+        assert_eq!(shape.dims, vec![2, 3, 4]);
+
+        let shape = shape![1].unwrap();
+        assert_eq!(shape.dims, vec![1]);
+    }
+
+    #[test]
+    fn test_shape_extend() {
+        let shape1 = Shape::new(vec![2, 3]).unwrap();
+        let shape2 = Shape::new(vec![4, 5]).unwrap();
+        let extended_shape = shape1.stack(&shape2);
+        assert_eq!(extended_shape.dims, vec![2, 3, 4, 5]);
+    }
+}
diff --git a/src/storage.rs b/src/storage.rs
new file mode 100644
index 0000000..b51fbdf
--- /dev/null
+++ b/src/storage.rs
@@ -0,0 +1,178 @@
+use std::ops::{Index, IndexMut};
+
+use crate::coordinate::Coordinate;
+use crate::error::ShapeError;
+use crate::shape::Shape;
+
+#[derive(Debug, PartialEq)]
+pub struct DynamicStorage<T> {
+    data: Vec<T>,
+}
+
+impl<T> DynamicStorage<T> {
+    pub fn new(data: Vec<T>) -> Self {
+        Self { data }
+    }
+
+    /// For the row-wise maths see: https://bit.ly/3KQjPa3
+    pub fn flatten(&self, coord: &Coordinate, shape: &Shape) -> Result<usize, ShapeError> {
+        if coord.order() != shape.order() {
+            let msg = format!("incorrect order ({} vs {}).", coord.order(), shape.order());
+            return Err(ShapeError::new(msg.as_str()));
+        }
+
+        for (i, &dim) in coord.iter().enumerate() {
+            if dim >= shape[i] {
+                return Err(ShapeError::new(
+                    format!("out of bounds for dimension {}", i).as_str(),
+                ));
+            }
+        }
+
+        let mut index = 0;
+        for k in 0..shape.order() {
+            let stride = shape[k + 1..].iter().product::<usize>();
+            index += coord[k] * stride;
+        }
+        Ok(index)
+    }
+
+    pub fn size(&self) -> usize {
+        self.data.len()
+    }
+
+    pub fn iter(&self) -> std::slice::Iter<'_, T> {
+        self.data.iter()
+    }
+
+    pub fn iter_mut(&mut self) -> std::slice::IterMut<'_, T> {
+        self.data.iter_mut()
+    }
+}
+
+impl<T> Index<usize> for DynamicStorage<T> {
+    type Output = T;
+
+    fn index(&self, index: usize) -> &Self::Output {
+        &self.data[index]
+    }
+}
+
+impl<T> Index<std::ops::Range<usize>> for DynamicStorage<T> {
+    type Output = [T];
+
+    fn index(&self, range: std::ops::Range<usize>) -> &Self::Output {
+        &self.data[range]
+    }
+}
+
+impl<T> IndexMut<usize> for DynamicStorage<T> {
+    fn index_mut(&mut self, index: usize) -> &mut Self::Output {
+        &mut self.data[index]
+    }
+}
+
+impl<T> IntoIterator for DynamicStorage<T> {
+    type Item = T;
+    type IntoIter = std::vec::IntoIter<T>;
+
+    fn into_iter(self) -> Self::IntoIter {
+        self.data.into_iter()
+    }
+}
+
+impl<'a, T> IntoIterator for &'a DynamicStorage<T> {
+    type Item = &'a T;
+    type IntoIter = std::slice::Iter<'a, T>;
+
+    fn into_iter(self) -> Self::IntoIter {
+        self.data.iter()
+    }
+}
+
+impl<'a, T> IntoIterator for &'a mut DynamicStorage<T> {
+    type Item = &'a mut T;
+    type IntoIter = std::slice::IterMut<'a, T>;
+
+    fn into_iter(self) -> Self::IntoIter {
+        self.data.iter_mut()
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    #[test]
+    fn test_iter() {
+        let storage = DynamicStorage::new(vec![1, 2, 3, 4]);
+        let mut iter = storage.iter();
+        assert_eq!(iter.next(), Some(&1));
+        assert_eq!(iter.next(), Some(&2));
+        assert_eq!(iter.next(), Some(&3));
+        assert_eq!(iter.next(), Some(&4));
+        assert_eq!(iter.next(), None);
+    }
+
+    #[test]
+    fn test_iter_mut() {
+        let mut storage = DynamicStorage::new(vec![1, 2, 3, 4]);
+        {
+            let mut iter = storage.iter_mut();
+            if let Some(x) = iter.next() {
+                *x = 10;
+            }
+        }
+        assert_eq!(storage.data, vec![10, 2, 3, 4]);
+    }
+
+    #[test]
+    fn test_index() {
+        let storage = DynamicStorage::new(vec![1, 2, 3, 4]);
+        assert_eq!(storage[0], 1);
+        assert_eq!(storage[1], 2);
+        assert_eq!(storage[2], 3);
+        assert_eq!(storage[3], 4);
+    }
+
+    #[test]
+    fn test_index_mut() {
+        let mut storage = DynamicStorage::new(vec![1, 2, 3, 4]);
+        storage[0] = 10;
+        assert_eq!(storage[0], 10);
+    }
+
+    #[test]
+    fn test_into_iter() {
+        let storage = DynamicStorage::new(vec![1, 2, 3, 4]);
+        let mut iter = storage.into_iter();
+        assert_eq!(iter.next(), Some(1));
+        assert_eq!(iter.next(), Some(2));
+        assert_eq!(iter.next(), Some(3));
+        assert_eq!(iter.next(), Some(4));
+        assert_eq!(iter.next(), None);
+    }
+
+    #[test]
+    fn test_into_iter_ref() {
+        let storage = DynamicStorage::new(vec![1, 2, 3, 4]);
+        let mut iter = (&storage).into_iter();
+        assert_eq!(iter.next(), Some(&1));
+        assert_eq!(iter.next(), Some(&2));
+        assert_eq!(iter.next(), Some(&3));
+        assert_eq!(iter.next(), Some(&4));
+        assert_eq!(iter.next(), None);
+    }
+
+    #[test]
+    fn test_into_iter_mut() {
+        let mut storage = DynamicStorage::new(vec![1, 2, 3, 4]);
+        {
+            let mut iter = (&mut storage).into_iter();
+            if let Some(x) = iter.next() {
+                *x = 10;
+            }
+        }
+        assert_eq!(storage.data, vec![10, 2, 3, 4]);
+    }
+}
diff --git a/src/tensor.rs b/src/tensor.rs
new file mode 100644
index 0000000..595dc26
--- /dev/null
+++ b/src/tensor.rs
@@ -0,0 +1,1366 @@
+use num::{Float, Num};
+use std::ops::{Add, Div, Mul, Sub};
+
+use crate::axes::Axes;
+use crate::coordinate::Coordinate;
+use crate::error::ShapeError;
+use crate::iter::IndexIterator;
+use crate::matrix::DynamicMatrix;
+use crate::shape;
+use crate::shape::Shape;
+use crate::storage::DynamicStorage;
+use crate::vector::DynamicVector;
+
+#[derive(Debug)]
+pub struct DynamicTensor<T: Num> {
+    data: DynamicStorage<T>,
+    shape: Shape,
+}
+pub type Tensor<T> = DynamicTensor<T>; // Alias for convenience
+
+impl<T: Num + PartialOrd + Copy> Tensor<T> {
+    pub fn new(shape: &Shape, data: &[T]) -> Result<Tensor<T>, ShapeError> {
+        if data.len() != shape.size() {
+            return Err(ShapeError::new("Data length does not match shape size"));
+        }
+        Ok(Tensor {
+            data: DynamicStorage::new(data.to_vec()),
+            shape: shape.clone(),
+        })
+    }
+
+    pub fn fill(shape: &Shape, value: T) -> Tensor<T> {
+        let mut vec = Vec::with_capacity(shape.size());
+        for _ in 0..shape.size() {
+            vec.push(value);
+        }
+        Tensor::new(shape, &vec).unwrap()
+    }
+    pub fn zeros(shape: &Shape) -> Tensor<T> {
+        Tensor::fill(shape, T::zero())
+    }
+    pub fn ones(shape: &Shape) -> Tensor<T> {
+        Tensor::fill(shape, T::one())
+    }
+
+    // Properties
+    pub fn shape(&self) -> &Shape {
+        &self.shape
+    }
+    pub fn size(&self) -> usize {
+        self.shape.size()
+    }
+
+    pub fn get(&self, coord: &Coordinate) -> Result<&T, ShapeError> {
+        Ok(&self.data[self.data.flatten(coord, &self.shape)?])
+    }
+
+    pub fn get_mut(&mut self, coord: &Coordinate) -> Result<&mut T, ShapeError> {
+        let index = self.data.flatten(coord, &self.shape)?;
+        Ok(&mut self.data[index])
+    }
+
+    pub fn set(&mut self, coord: &Coordinate, value: T) -> Result<(), ShapeError> {
+        let index = self.data.flatten(coord, &self.shape)?;
+        self.data[index] = value;
+        Ok(())
+    }
+
+    // // Reduction operations
+    pub fn sum(&self, axes: Axes) -> Tensor<T> {
+        let all_axes = (0..self.shape.order()).collect::<Vec<_>>();
+        let remaining_axes = all_axes
+            .clone()
+            .into_iter()
+            .filter(|&i| !axes.contains(&i))
+            .collect::<Vec<_>>();
+        let remaining_dims = remaining_axes
+            .iter()
+            .map(|&i| self.shape[i])
+            .collect::<Vec<_>>();
+        let removing_dims = axes.iter().map(|&i| self.shape[i]).collect::<Vec<_>>();
+
+        // We resolve to a scalar value
+        if axes.is_empty() | remaining_dims.is_empty() {
+            let sum: T = self.data.iter().fold(T::zero(), |acc, x| acc + *x);
+            return Tensor::new(&shape![1].unwrap(), &[sum]).unwrap();
+        }
+
+        // Create new tensor with right shape
+        let new_shape = Shape::new(remaining_dims).unwrap();
+        let remove_shape = Shape::new(removing_dims).unwrap();
+        let mut t: Tensor<T> = Tensor::zeros(&new_shape);
+
+        for target in IndexIterator::new(&new_shape) {
+            let sum_iter = IndexIterator::new(&remove_shape);
+            for sum_index in sum_iter {
+                let mut indices = target.clone();
+                for (i, &axis) in axes.iter().enumerate() {
+                    indices = indices.insert(axis, sum_index[i]);
+                }
+
+                let value = *t.get(&target).unwrap() + *self.get(&indices).unwrap();
+                t.set(&target, value).unwrap();
+            }
+        }
+
+        t
+    }
+
+    pub fn mean(&self, axes: Axes) -> Tensor<T> {
+        let removing_dims = axes.iter().map(|&i| self.shape[i]).collect::<Vec<_>>();
+        let removing_dims_t: Vec<T> = removing_dims
+            .iter()
+            .map(|&dim| {
+                let mut result = T::zero();
+                for _ in 0..dim {
+                    result = result + T::one();
+                }
+                result
+            })
+            .collect();
+        let n = if !removing_dims_t.is_empty() {
+            removing_dims_t.iter().fold(T::one(), |acc, x| acc * *x)
+        } else {
+            let mut sum = T::zero();
+            for _ in 0..self.shape().size() {
+                sum = sum + T::one();
+            }
+            sum
+        };
+        self.sum(axes) / n
+    }
+
+    pub fn var(&self, axes: Axes) -> Tensor<T> {
+        let removing_dims = axes.iter().map(|&i| self.shape[i]).collect::<Vec<_>>();
+        let removing_dims_t: Vec<T> = removing_dims
+            .iter()
+            .map(|&dim| {
+                let mut result = T::zero();
+                for _ in 0..dim {
+                    result = result + T::one();
+                }
+                result
+            })
+            .collect();
+
+        let n = if !removing_dims_t.is_empty() {
+            removing_dims_t.iter().fold(T::one(), |acc, x| acc * *x)
+        } else {
+            let mut sum = T::zero();
+            for _ in 0..self.shape().size() {
+                sum = sum + T::one();
+            }
+            sum
+        };
+
+        let all_axes = (0..self.shape.order()).collect::<Vec<_>>();
+        let remaining_axes = all_axes
+            .clone()
+            .into_iter()
+            .filter(|&i| !axes.contains(&i))
+            .collect::<Vec<_>>();
+        let remaining_dims = remaining_axes
+            .iter()
+            .map(|&i| self.shape[i])
+            .collect::<Vec<_>>();
+        let removing_dims = axes.iter().map(|&i| self.shape[i]).collect::<Vec<_>>();
+
+        // We resolve to a scalar value
+        if axes.is_empty() | remaining_dims.is_empty() {
+            let avg: T = self.data.iter().fold(T::zero(), |acc, x| acc + *x) / n;
+            let var: T = self
+                .data
+                .iter()
+                .map(|&x| (x - avg) * (x - avg))
+                .fold(T::zero(), |acc, x| acc + x)
+                / n;
+            return Tensor::new(&Shape::new(vec![1]).unwrap(), &[var]).unwrap();
+        }
+
+        // Create new tensor with right shape
+        let new_shape = Shape::new(remaining_dims).unwrap();
+        let remove_shape = Shape::new(removing_dims).unwrap();
+        let mut t: Tensor<T> = Tensor::zeros(&new_shape);
+
+        for target in IndexIterator::new(&new_shape) {
+            let sum_iter = IndexIterator::new(&remove_shape);
+            let mean = self.mean(axes.clone());
+
+            for sum_index in sum_iter {
+                let mut indices = target.clone();
+                for (i, &axis) in axes.iter().enumerate() {
+                    indices = indices.insert(axis, sum_index[i]);
+                }
+
+                let centered = *self.get(&indices).unwrap() - *mean.get(&target).unwrap();
+                let value = *t.get(&target).unwrap() + centered * centered;
+                t.set(&target, value).unwrap();
+            }
+        }
+
+        t / n
+    }
+
+    pub fn max(&self, axes: Axes) -> Tensor<T> {
+        let all_axes = (0..self.shape.order()).collect::<Vec<_>>();
+        let remaining_axes = all_axes
+            .clone()
+            .into_iter()
+            .filter(|&i| !axes.contains(&i))
+            .collect::<Vec<_>>();
+        let remaining_dims = remaining_axes
+            .iter()
+            .map(|&i| self.shape[i])
+            .collect::<Vec<_>>();
+        let removing_dims = axes.iter().map(|&i| self.shape[i]).collect::<Vec<_>>();
+
+        // We resolve to a scalar value
+        if axes.is_empty() | remaining_dims.is_empty() {
+            let min: T = self
+                .data
+                .iter()
+                .fold(T::zero(), |acc, x| if acc < *x { acc } else { *x });
+            let max: T = self
+                .data
+                .iter()
+                .fold(min, |acc, x| if acc > *x { acc } else { *x });
+            return Tensor::new(&Shape::new(vec![1]).unwrap(), &[max]).unwrap();
+        }
+
+        // Create new tensor with right shape
+        let new_shape = Shape::new(remaining_dims).unwrap();
+        let remove_shape = Shape::new(removing_dims).unwrap();
+        let min: T = self
+            .data
+            .iter()
+            .fold(T::zero(), |acc, x| if acc < *x { acc } else { *x });
+        let mut t: Tensor<T> = Tensor::fill(&new_shape, min);
+
+        for target in IndexIterator::new(&new_shape) {
+            let max_iter = IndexIterator::new(&remove_shape);
+            for max_index in max_iter {
+                let mut indices = target.clone();
+                for (i, &axis) in axes.iter().enumerate() {
+                    indices = indices.insert(axis, max_index[i]);
+                }
+
+                if self.get(&indices).unwrap() > t.get(&target).unwrap() {
+                    let _ = t.set(&target, *self.get(&indices).unwrap());
+                }
+            }
+        }
+
+        t
+    }
+
+    pub fn min(&self, axes: Axes) -> Tensor<T> {
+        let all_axes = (0..self.shape.order()).collect::<Vec<_>>();
+        let remaining_axes = all_axes
+            .clone()
+            .into_iter()
+            .filter(|&i| !axes.contains(&i))
+            .collect::<Vec<_>>();
+        let remaining_dims = remaining_axes
+            .iter()
+            .map(|&i| self.shape[i])
+            .collect::<Vec<_>>();
+        let removing_dims = axes.iter().map(|&i| self.shape[i]).collect::<Vec<_>>();
+
+        // We resolve to a scalar value
+        if axes.is_empty() | remaining_dims.is_empty() {
+            let max: T = self
+                .data
+                .iter()
+                .fold(T::zero(), |acc, x| if acc > *x { acc } else { *x });
+            let min: T = self
+                .data
+                .iter()
+                .fold(max, |acc, x| if acc < *x { acc } else { *x });
+            return Tensor::new(&Shape::new(vec![1]).unwrap(), &[min]).unwrap();
+        }
+
+        // Create new tensor with right shape
+        let new_shape = Shape::new(remaining_dims).unwrap();
+        let remove_shape = Shape::new(removing_dims).unwrap();
+        let max: T = self
+            .data
+            .iter()
+            .fold(T::zero(), |acc, x| if acc > *x { acc } else { *x });
+        let mut t: Tensor<T> = Tensor::fill(&new_shape, max);
+
+        for target in IndexIterator::new(&new_shape) {
+            let min_iter = IndexIterator::new(&remove_shape);
+            for min_index in min_iter {
+                let mut indices = target.clone();
+                for (i, &axis) in axes.iter().enumerate() {
+                    indices = indices.insert(axis, min_index[i]);
+                }
+
+                if self.get(&indices).unwrap() < t.get(&target).unwrap() {
+                    let _ = t.set(&target, *self.get(&indices).unwrap());
+                }
+            }
+        }
+
+        t
+    }
+
+    // Tensor Product
+    // Consistent with numpy.tensordot(a, b, axis=0)
+    pub fn prod(&self, other: &Tensor<T>) -> Tensor<T> {
+        let new_shape = self.shape.stack(&other.shape);
+
+        let mut new_data = Vec::with_capacity(self.size() * other.size());
+        for &a in &self.data {
+            for &b in &other.data {
+                new_data.push(a * b);
+            }
+        }
+
+        Tensor::new(&new_shape, &new_data).unwrap()
+    }
+}
+
+impl<T: Float + PartialOrd + Copy> Tensor<T> {
+    pub fn pow(&self, power: T) -> Tensor<T> {
+        let mut result = Tensor::zeros(&self.shape);
+        for i in 0..self.size() {
+            result.data[i] = self.data[i].powf(power);
+        }
+        result
+    }
+}
+
+// Element-wise Multiplication
+impl<T: Num + PartialOrd + Copy> Mul<T> for Tensor<T> {
+    type Output = Tensor<T>;
+
+    fn mul(self, rhs: T) -> Tensor<T> {
+        let mut result = Tensor::zeros(&self.shape);
+        for i in 0..self.size() {
+            result.data[i] = self.data[i] * rhs;
+        }
+        result
+    }
+}
+
+// Element-wise Addition
+impl<T: Num + PartialOrd + Copy> Add<T> for Tensor<T> {
+    type Output = Tensor<T>;
+
+    fn add(self, rhs: T) -> Tensor<T> {
+        let mut result = Tensor::zeros(&self.shape);
+        for i in 0..self.size() {
+            result.data[i] = self.data[i] + rhs;
+        }
+        result
+    }
+}
+
+// Tensor Addition
+impl<T: Num + PartialOrd + Copy> Add<Tensor<T>> for Tensor<T> {
+    type Output = Tensor<T>;
+
+    fn add(self, rhs: Tensor<T>) -> Tensor<T> {
+        assert!(self.shape == rhs.shape);
+        let mut result = Tensor::zeros(&self.shape);
+        for i in 0..self.size() {
+            result.data[i] = self.data[i] + rhs.data[i];
+        }
+        result
+    }
+}
+
+impl<T: Num + PartialOrd + Copy> Add<DynamicVector<T>> for Tensor<T> {
+    type Output = DynamicVector<T>;
+
+    fn add(self, rhs: DynamicVector<T>) -> DynamicVector<T> {
+        rhs + self
+    }
+}
+
+impl<T: Num + PartialOrd + Copy> Add<DynamicMatrix<T>> for Tensor<T> {
+    type Output = DynamicMatrix<T>;
+
+    fn add(self, rhs: DynamicMatrix<T>) -> DynamicMatrix<T> {
+        rhs + self
+    }
+}
+
+// Element-wise Subtraction
+impl<T: Num + PartialOrd + Copy> Sub<T> for Tensor<T> {
+    type Output = Tensor<T>;
+
+    fn sub(self, rhs: T) -> Tensor<T> {
+        let mut result = Tensor::zeros(&self.shape);
+        for i in 0..self.size() {
+            result.data[i] = self.data[i] - rhs;
+        }
+        result
+    }
+}
+
+// Tensor Subtraction
+impl<T: Num + PartialOrd + Copy> Sub<Tensor<T>> for Tensor<T> {
+    type Output = Tensor<T>;
+
+    fn sub(self, rhs: Tensor<T>) -> Tensor<T> {
+        assert!(self.shape == rhs.shape);
+        let mut result = Tensor::zeros(&self.shape);
+        for i in 0..self.size() {
+            result.data[i] = self.data[i] - rhs.data[i];
+        }
+        result
+    }
+}
+
+impl<T: Num + PartialOrd + Copy> Sub<DynamicVector<T>> for Tensor<T> {
+    type Output = DynamicVector<T>;
+
+    fn sub(self, rhs: DynamicVector<T>) -> DynamicVector<T> {
+        (rhs * (T::zero() - T::one())) + self
+    }
+}
+
+impl<T: Num + PartialOrd + Copy> Sub<DynamicMatrix<T>> for Tensor<T> {
+    type Output = DynamicMatrix<T>;
+
+    fn sub(self, rhs: DynamicMatrix<T>) -> DynamicMatrix<T> {
+        (rhs * (T::zero() - T::one())) + self
+    }
+}
+
+// Tensor Multiplication
+impl<T: Num + PartialOrd + Copy> Mul<Tensor<T>> for Tensor<T> {
+    type Output = Tensor<T>;
+
+    fn mul(self, rhs: Tensor<T>) -> Tensor<T> {
+        assert!(self.shape == rhs.shape);
+        let mut result = Tensor::zeros(&self.shape);
+        for i in 0..self.size() {
+            result.data[i] = self.data[i] * rhs.data[i];
+        }
+        result
+    }
+}
+
+impl<T: Num + PartialOrd + Copy> Mul<DynamicVector<T>> for Tensor<T> {
+    type Output = DynamicVector<T>;
+
+    fn mul(self, rhs: DynamicVector<T>) -> DynamicVector<T> {
+        rhs * self
+    }
+}
+
+impl<T: Num + PartialOrd + Copy> Mul<DynamicMatrix<T>> for Tensor<T> {
+    type Output = DynamicMatrix<T>;
+
+    fn mul(self, rhs: DynamicMatrix<T>) -> DynamicMatrix<T> {
+        rhs * self
+    }
+}
+
+// Element-wise Division
+impl<T: Num + PartialOrd + Copy> Div<T> for Tensor<T> {
+    type Output = Tensor<T>;
+
+    fn div(self, rhs: T) -> Tensor<T> {
+        let mut result = Tensor::zeros(&self.shape);
+        for i in 0..self.size() {
+            result.data[i] = self.data[i] / rhs;
+        }
+        result
+    }
+}
+
+// Tensor Division
+impl<T: Num + PartialOrd + Copy> Div<Tensor<T>> for Tensor<T> {
+    type Output = Tensor<T>;
+
+    fn div(self, rhs: Tensor<T>) -> Tensor<T> {
+        assert!(self.shape == rhs.shape);
+        let mut result = Tensor::zeros(&self.shape);
+        for i in 0..self.size() {
+            result.data[i] = self.data[i] / rhs.data[i];
+        }
+        result
+    }
+}
+
+impl<T: Num + PartialOrd + Copy> Div<DynamicVector<T>> for Tensor<T> {
+    type Output = DynamicVector<T>;
+
+    fn div(self, rhs: DynamicVector<T>) -> DynamicVector<T> {
+        DynamicVector::<T>::from_tensor(self).unwrap() / rhs
+    }
+}
+
+impl<T: Num + PartialOrd + Copy> Div<DynamicMatrix<T>> for Tensor<T> {
+    type Output = DynamicMatrix<T>;
+
+    fn div(self, rhs: DynamicMatrix<T>) -> DynamicMatrix<T> {
+        DynamicMatrix::<T>::from_tensor(self).unwrap() / rhs
+    }
+}
+
+impl<T: Num + PartialOrd + Copy + std::fmt::Display> Tensor<T> {
+    pub fn display(&self) -> String {
+        fn format_tensor<T: Num + PartialOrd + Copy + std::fmt::Display>(
+            data: &DynamicStorage<T>,
+            shape: &Shape,
+            level: usize,
+        ) -> String {
+            if shape.order() == 1 {
+                let mut result = String::from("[");
+                for (i, item) in data.iter().enumerate() {
+                    result.push_str(&format!("{}", item));
+                    if i < data.size() - 1 {
+                        result.push_str(", ");
+                    }
+                }
+                result.push(']');
+                return result;
+            }
+
+            let mut result = String::from("[");
+            let sub_size = Shape::new(shape[1..].to_vec()).unwrap().size();
+            for i in 0..shape[0] {
+                if i > 0 {
+                    result.push_str(",\n");
+                    for _ in 0..shape.order() - 2 {
+                        result.push('\n');
+                    }
+                    for _ in 0..level {
+                        result.push(' ');
+                    }
+                }
+                let sub_data = DynamicStorage::new(data[i * sub_size..(i + 1) * sub_size].to_vec());
+                result.push_str(&format_tensor(
+                    &sub_data,
+                    &Shape::new(shape[1..].to_vec()).unwrap(),
+                    level + 1,
+                ));
+            }
+            result.push(']');
+            result
+        }
+
+        format_tensor(&self.data, &self.shape, 1)
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+    use crate::coord;
+
+    #[test]
+    fn test_new_tensor() {
+        let shape = shape![2, 2].unwrap();
+        let data = vec![1.0, 2.0, 3.0, 4.0];
+
+        let tensor = Tensor::new(&shape, &data).unwrap();
+
+        assert_eq!(tensor.shape(), &shape);
+        assert_eq!(tensor.data, DynamicStorage::new(data));
+    }
+
+    #[test]
+    fn test_new_tensor_shape_data_mismatch() {
+        let shape = shape![2, 2].unwrap();
+        let data = vec![1.0, 2.0, 3.0]; // Mismatched data length
+
+        let result = Tensor::new(&shape, &data);
+
+        assert!(result.is_err());
+    }
+
+    #[test]
+    fn test_zeros_tensor() {
+        let shape = shape![2, 3].unwrap();
+        let tensor: Tensor<f32> = Tensor::zeros(&shape);
+
+        assert_eq!(tensor.shape(), &shape);
+        assert_eq!(tensor.data, DynamicStorage::new(vec![0.0; shape.size()]));
+    }
+
+    #[test]
+    fn test_ones_tensor() {
+        let shape = shape![2, 3].unwrap();
+        let tensor: Tensor<f32> = Tensor::ones(&shape);
+
+        assert_eq!(tensor.shape(), &shape);
+        assert_eq!(tensor.data, DynamicStorage::new(vec![1.0; shape.size()]));
+    }
+
+    #[test]
+    fn test_fill_tensor() {
+        let shape = shape![2, 3].unwrap();
+        let tensor: Tensor<f32> = Tensor::fill(&shape, 7.0);
+
+        assert_eq!(tensor.shape(), &shape);
+        assert_eq!(tensor.data, DynamicStorage::new(vec![7.0; shape.size()]));
+    }
+
+    #[test]
+    fn test_tensor_shape() {
+        let shape = shape![2, 3].unwrap();
+        let tensor: Tensor<f32> = Tensor::zeros(&shape);
+
+        assert_eq!(tensor.shape(), &shape);
+    }
+
+    #[test]
+    fn test_tensor_size() {
+        let shape = shape![2, 3].unwrap();
+        let tensor: Tensor<f32> = Tensor::zeros(&shape);
+
+        assert_eq!(tensor.size(), 6);
+    }
+
+    #[test]
+    fn test_tensor_get() {
+        let shape = shape![2, 2].unwrap();
+        let data = vec![1.0, 2.0, 3.0, 4.0];
+        let tensor = Tensor::new(&shape, &data).unwrap();
+
+        assert_eq!(*tensor.get(&coord![0, 0].unwrap()).unwrap(), 1.0);
+        assert_eq!(*tensor.get(&coord![0, 1].unwrap()).unwrap(), 2.0);
+        assert_eq!(*tensor.get(&coord![1, 0].unwrap()).unwrap(), 3.0);
+        assert_eq!(*tensor.get(&coord![1, 1].unwrap()).unwrap(), 4.0);
+    }
+
+    #[test]
+    fn test_tensor_set() {
+        let shape = shape![2, 2].unwrap();
+        let data = vec![1.0, 2.0, 3.0, 4.0];
+        let mut tensor = Tensor::new(&shape, &data).unwrap();
+
+        tensor.set(&coord![0, 0].unwrap(), 5.0).unwrap();
+        tensor.set(&coord![0, 1].unwrap(), 6.0).unwrap();
+        tensor.set(&coord![1, 0].unwrap(), 7.0).unwrap();
+        tensor.set(&coord![1, 1].unwrap(), 8.0).unwrap();
+
+        assert_eq!(*tensor.get(&coord![0, 0].unwrap()).unwrap(), 5.0);
+        assert_eq!(*tensor.get(&coord![0, 1].unwrap()).unwrap(), 6.0);
+        assert_eq!(*tensor.get(&coord![1, 0].unwrap()).unwrap(), 7.0);
+        assert_eq!(*tensor.get(&coord![1, 1].unwrap()).unwrap(), 8.0);
+    }
+
+    #[test]
+    fn test_tensor_get_out_of_bounds() {
+        let shape = shape![2, 2].unwrap();
+        let data = vec![1.0, 2.0, 3.0, 4.0];
+        let tensor = Tensor::new(&shape, &data).unwrap();
+
+        assert!(tensor.get(&coord![2, 0].unwrap()).is_err());
+        assert!(tensor.get(&coord![0, 2].unwrap()).is_err());
+        assert!(tensor.get(&coord![2, 2].unwrap()).is_err());
+    }
+
+    #[test]
+    fn test_tensor_set_out_of_bounds() {
+        let shape = shape![2, 2].unwrap();
+        let data = vec![1.0, 2.0, 3.0, 4.0];
+        let mut tensor = Tensor::new(&shape, &data).unwrap();
+
+        assert!(tensor.set(&coord![2, 0].unwrap(), 5.0).is_err());
+        assert!(tensor.set(&coord![0, 2].unwrap(), 6.0).is_err());
+        assert!(tensor.set(&coord![2, 2].unwrap(), 7.0).is_err());
+    }
+
+    #[test]
+    fn test_tensor_sum_no_axis_1d() {
+        let shape = shape![5].unwrap();
+        let data = vec![1.0, 2.0, 3.0, 4.0, 5.0];
+        let tensor = Tensor::new(&shape, &data).unwrap();
+
+        let result = tensor.sum(vec![]);
+
+        assert_eq!(result.shape(), &shape![1].unwrap());
+        assert_eq!(result.data, DynamicStorage::new(vec![15.0]));
+    }
+
+    #[test]
+    fn test_tensor_sum_no_axis_2d() {
+        let shape = shape![2, 3].unwrap();
+        let data = vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0];
+        let tensor = Tensor::new(&shape, &data).unwrap();
+
+        let result = tensor.sum(vec![]);
+
+        assert_eq!(result.shape(), &shape![1].unwrap());
+        assert_eq!(result.data, DynamicStorage::new(vec![21.0]));
+    }
+
+    #[test]
+    fn test_tensor_sum_no_axis_3d() {
+        let shape = shape![2, 2, 3].unwrap();
+        let data = vec![
+            1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0,
+        ];
+        let tensor = Tensor::new(&shape, &data).unwrap();
+
+        let result = tensor.sum(vec![]);
+
+        assert_eq!(result.shape(), &shape![1].unwrap());
+        assert_eq!(result.data, DynamicStorage::new(vec![78.0]));
+    }
+
+    #[test]
+    fn test_tensor_sum_one_axis_1d() {
+        let shape = shape![5].unwrap();
+        let data = vec![1.0, 2.0, 3.0, 4.0, 5.0];
+        let tensor = Tensor::new(&shape, &data).unwrap();
+
+        let result = tensor.sum(vec![0]);
+
+        assert_eq!(result.shape(), &shape![1].unwrap());
+        assert_eq!(result.data, DynamicStorage::new(vec![15.0]));
+    }
+
+    #[test]
+    fn test_tensor_sum_one_axis_2d() {
+        let shape = shape![2, 3].unwrap();
+        let data = vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0];
+        let tensor = Tensor::new(&shape, &data).unwrap();
+
+        let result = tensor.sum(vec![0]);
+
+        assert_eq!(result.shape(), &shape![3].unwrap());
+        assert_eq!(result.data, DynamicStorage::new(vec![5.0, 7.0, 9.0]));
+    }
+
+    #[test]
+    fn test_tensor_sum_one_axis_3d() {
+        let shape = shape![2, 2, 3].unwrap();
+        let data = vec![
+            1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0,
+        ];
+        let tensor = Tensor::new(&shape, &data).unwrap();
+
+        let result = tensor.sum(vec![0]);
+
+        assert_eq!(result.shape(), &shape![2, 3].unwrap());
+        assert_eq!(
+            result.data,
+            DynamicStorage::new(vec![8.0, 10.0, 12.0, 14.0, 16.0, 18.0])
+        );
+    }
+
+    #[test]
+    fn test_tensor_sum_multiple_axes_2d() {
+        let shape = shape![2, 3].unwrap();
+        let data = vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0];
+        let tensor = Tensor::new(&shape, &data).unwrap();
+
+        let result = tensor.sum(vec![0, 1]);
+
+        assert_eq!(result.shape(), &shape![1].unwrap());
+        assert_eq!(result.data, DynamicStorage::new(vec![21.0]));
+    }
+
+    #[test]
+    fn test_tensor_sum_multiple_axes_3d() {
+        let shape = shape![2, 2, 3].unwrap();
+        let data = vec![
+            1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0,
+        ];
+        let tensor = Tensor::new(&shape, &data).unwrap();
+
+        let result = tensor.sum(vec![0, 1]);
+
+        assert_eq!(result.shape(), &shape![3].unwrap());
+        assert_eq!(result.data, DynamicStorage::new(vec![22.0, 26.0, 30.0]));
+    }
+
+    #[test]
+    fn test_tensor_mean_no_axis_2d() {
+        let shape = shape![2, 3].unwrap();
+        let data = vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0];
+        let tensor = Tensor::new(&shape, &data).unwrap();
+
+        let result = tensor.mean(vec![]);
+
+        assert_eq!(result.shape(), &shape![1].unwrap());
+        assert_eq!(result.data, DynamicStorage::new(vec![3.5]));
+    }
+
+    #[test]
+    fn test_tensor_mean_one_axis_2d() {
+        let shape = shape![2, 3].unwrap();
+        let data = vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0];
+        let tensor = Tensor::new(&shape, &data).unwrap();
+
+        let result = tensor.mean(vec![0]);
+
+        assert_eq!(result.shape(), &shape![3].unwrap());
+        assert_eq!(result.data, DynamicStorage::new(vec![2.5, 3.5, 4.5]));
+    }
+
+    #[test]
+    fn test_tensor_mean_one_axis_3d() {
+        let shape = shape![2, 2, 3].unwrap();
+        let data = vec![
+            1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0,
+        ];
+        let tensor = Tensor::new(&shape, &data).unwrap();
+
+        let result = tensor.mean(vec![0]);
+
+        assert_eq!(result.shape(), &shape![2, 3].unwrap());
+        assert_eq!(
+            result.data,
+            DynamicStorage::new(vec![4.0, 5.0, 6.0, 7.0, 8.0, 9.0])
+        );
+    }
+
+    #[test]
+    fn test_tensor_mean_multiple_axes_2d() {
+        let shape = shape![2, 3].unwrap();
+        let data = vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0];
+        let tensor = Tensor::new(&shape, &data).unwrap();
+
+        let result = tensor.mean(vec![0, 1]);
+
+        assert_eq!(result.shape(), &shape![1].unwrap());
+        assert_eq!(result.data, DynamicStorage::new(vec![3.5]));
+    }
+
+    #[test]
+    fn test_tensor_mean_multiple_axes_3d() {
+        let shape = shape![2, 2, 3].unwrap();
+        let data = vec![
+            1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0,
+        ];
+        let tensor = Tensor::new(&shape, &data).unwrap();
+
+        let result = tensor.mean(vec![0, 1]);
+
+        assert_eq!(result.shape(), &shape![3].unwrap());
+        assert_eq!(result.data, DynamicStorage::new(vec![5.5, 6.5, 7.5]));
+    }
+
+    #[test]
+    fn test_tensor_var_no_axis_2d() {
+        let shape = shape![2, 3].unwrap();
+        let data = vec![1.0, 1.0, 1.0, 7.0, 7.0, 7.0];
+        let tensor = Tensor::new(&shape, &data).unwrap();
+
+        let result = tensor.var(vec![]);
+
+        assert_eq!(result.shape(), &shape![1].unwrap());
+        assert_eq!(result.data, DynamicStorage::new(vec![9.0]));
+    }
+
+    #[test]
+    fn test_tensor_var_one_axis_2d() {
+        let shape = shape![2, 3].unwrap();
+        let data = vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0];
+        let tensor = Tensor::new(&shape, &data).unwrap();
+
+        let result = tensor.var(vec![0]);
+
+        assert_eq!(result.shape(), &shape![3].unwrap());
+        assert_eq!(result.data, DynamicStorage::new(vec![2.25, 2.25, 2.25]));
+    }
+
+    #[test]
+    fn test_tensor_var_one_axis_3d() {
+        let shape = shape![2, 2, 3].unwrap();
+        let data = vec![
+            1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0,
+        ];
+        let tensor = Tensor::new(&shape, &data).unwrap();
+
+        let result = tensor.var(vec![0]);
+
+        assert_eq!(result.shape(), &shape![2, 3].unwrap());
+        assert_eq!(
+            result.data,
+            DynamicStorage::new(vec![9.0, 9.0, 9.0, 9.0, 9.0, 9.0])
+        );
+    }
+
+    #[test]
+    fn test_tensor_var_multiple_axes_2d() {
+        let shape = shape![2, 3].unwrap();
+        let data = vec![1.0, 1.0, 1.0, 7.0, 7.0, 7.0];
+        let tensor = Tensor::new(&shape, &data).unwrap();
+
+        let result = tensor.var(vec![0, 1]);
+
+        assert_eq!(result.shape(), &shape![1].unwrap());
+        assert_eq!(result.data, DynamicStorage::new(vec![9.0]));
+    }
+
+    #[test]
+    fn test_tensor_var_multiple_axes_3d() {
+        let shape = shape![2, 2, 3].unwrap();
+        let data = vec![
+            1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, 15.0, 17.0, 19.0, 21.0, 23.0,
+        ];
+        let tensor = Tensor::new(&shape, &data).unwrap();
+
+        let result = tensor.var(vec![0, 1]);
+
+        assert_eq!(result.shape(), &shape![3].unwrap());
+        assert_eq!(result.data, DynamicStorage::new(vec![45.0, 45.0, 45.0]));
+    }
+
+    #[test]
+    fn test_tensor_max_no_axis_1d() {
+        let shape = shape![5].unwrap();
+        let data = vec![1.0, -2.0, 3.0, -4.0, 5.0];
+        let tensor = Tensor::new(&shape, &data).unwrap();
+
+        let result = tensor.max(vec![]);
+
+        assert_eq!(result.shape(), &shape![1].unwrap());
+        assert_eq!(result.data, DynamicStorage::new(vec![5.0]));
+    }
+
+    #[test]
+    fn test_tensor_max_one_axis_2d() {
+        let shape = shape![2, 3].unwrap();
+        let data = vec![1.0, -2.0, 3.0, -4.0, 5.0, -6.0];
+        let tensor = Tensor::new(&shape, &data).unwrap();
+
+        let result = tensor.max(vec![0]);
+
+        assert_eq!(result.shape(), &shape![3].unwrap());
+        assert_eq!(result.data, DynamicStorage::new(vec![1.0, 5.0, 3.0]));
+    }
+
+    #[test]
+    fn test_tensor_max_multiple_axes_3d() {
+        let shape = shape![2, 2, 3].unwrap();
+        let data = vec![
+            1.0, -2.0, 3.0, -4.0, 5.0, -6.0, 7.0, -8.0, 9.0, -10.0, 11.0, -12.0,
+        ];
+        let tensor = Tensor::new(&shape, &data).unwrap();
+
+        let result = tensor.max(vec![0, 1]);
+
+        assert_eq!(result.shape(), &shape![3].unwrap());
+        assert_eq!(result.data, DynamicStorage::new(vec![7.0, 11.0, 9.0]));
+    }
+
+    #[test]
+    fn test_tensor_min_no_axis_1d() {
+        let shape = shape![5].unwrap();
+        let data = vec![1.0, -2.0, 3.0, -4.0, 5.0];
+        let tensor = Tensor::new(&shape, &data).unwrap();
+
+        let result = tensor.min(vec![]);
+
+        assert_eq!(result.shape(), &shape![1].unwrap());
+        assert_eq!(result.data, DynamicStorage::new(vec![-4.0]));
+    }
+
+    #[test]
+    fn test_tensor_min_one_axis_2d() {
+        let shape = shape![2, 3].unwrap();
+        let data = vec![1.0, -2.0, 3.0, -4.0, 5.0, -6.0];
+        let tensor = Tensor::new(&shape, &data).unwrap();
+
+        let result = tensor.min(vec![0]);
+
+        assert_eq!(result.shape(), &shape![3].unwrap());
+        assert_eq!(result.data, DynamicStorage::new(vec![-4.0, -2.0, -6.0]));
+    }
+
+    #[test]
+    fn test_tensor_min_multiple_axes_3d() {
+        let shape = shape![2, 2, 3].unwrap();
+        let data = vec![
+            1.0, -2.0, 3.0, -4.0, 5.0, -6.0, 7.0, -8.0, 9.0, -10.0, 11.0, -12.0,
+        ];
+        let tensor = Tensor::new(&shape, &data).unwrap();
+
+        let result = tensor.min(vec![0, 1]);
+
+        assert_eq!(result.shape(), &shape![3].unwrap());
+        assert_eq!(result.data, DynamicStorage::new(vec![-10.0, -8.0, -12.0]));
+    }
+
+    #[test]
+    fn test_tensor_prod_1d_1d() {
+        let shape1 = shape![3].unwrap();
+        let data1 = vec![1.0, 2.0, 3.0];
+        let tensor1 = Tensor::new(&shape1, &data1).unwrap();
+
+        let shape2 = shape![2].unwrap();
+        let data2 = vec![4.0, 5.0];
+        let tensor2 = Tensor::new(&shape2, &data2).unwrap();
+
+        let result = tensor1.prod(&tensor2);
+
+        assert_eq!(result.shape(), &shape![3, 2].unwrap());
+        assert_eq!(
+            result.data,
+            DynamicStorage::new(vec![4.0, 5.0, 8.0, 10.0, 12.0, 15.0])
+        );
+    }
+
+    #[test]
+    fn test_tensor_prod_2d_1d() {
+        let shape1 = shape![2, 2].unwrap();
+        let data1 = vec![1.0, 2.0, 3.0, 4.0];
+        let tensor1 = Tensor::new(&shape1, &data1).unwrap();
+
+        let shape2 = shape![2].unwrap();
+        let data2 = vec![5.0, 6.0];
+        let tensor2 = Tensor::new(&shape2, &data2).unwrap();
+
+        let result = tensor1.prod(&tensor2);
+
+        assert_eq!(result.shape(), &shape![2, 2, 2].unwrap());
+        assert_eq!(
+            result.data,
+            DynamicStorage::new(vec![5.0, 6.0, 10.0, 12.0, 15.0, 18.0, 20.0, 24.0])
+        );
+    }
+
+    #[test]
+    fn test_tensor_prod_2d_2d() {
+        let shape1 = shape![2, 2].unwrap();
+        let data1 = vec![1.0, 2.0, 3.0, 4.0];
+        let tensor1 = Tensor::new(&shape1, &data1).unwrap();
+
+        let shape2 = shape![2, 2].unwrap();
+        let data2 = vec![5.0, 6.0, 7.0, 8.0];
+        let tensor2 = Tensor::new(&shape2, &data2).unwrap();
+
+        let result = tensor1.prod(&tensor2);
+
+        assert_eq!(result.shape(), &shape![2, 2, 2, 2].unwrap());
+        assert_eq!(
+            result.data,
+            DynamicStorage::new(vec![
+                5.0, 6.0, 7.0, 8.0, 10.0, 12.0, 14.0, 16.0, 15.0, 18.0, 21.0, 24.0, 20.0, 24.0,
+                28.0, 32.0
+            ])
+        );
+    }
+
+    #[test]
+    fn test_add_tensor() {
+        let shape = shape![4].unwrap();
+        let data1 = vec![1.0, 2.0, 3.0, 4.0];
+        let tensor1 = Tensor::new(&shape, &data1).unwrap();
+
+        let result = tensor1 + 3.0;
+
+        assert_eq!(result.shape(), &shape);
+        assert_eq!(result.data, DynamicStorage::new(vec![4.0, 5.0, 6.0, 7.0]));
+    }
+
+    #[test]
+    fn test_add_tensors() {
+        let shape = shape![2, 2].unwrap();
+        let data1 = vec![1.0, 2.0, 3.0, 4.0];
+        let data2 = vec![5.0, 6.0, 7.0, 8.0];
+        let tensor1 = Tensor::new(&shape, &data1).unwrap();
+        let tensor2 = Tensor::new(&shape, &data2).unwrap();
+
+        let result = tensor1 + tensor2;
+
+        assert_eq!(result.shape(), &shape);
+        assert_eq!(result.data, DynamicStorage::new(vec![6.0, 8.0, 10.0, 12.0]));
+    }
+
+    #[test]
+    fn test_sub_tensor() {
+        let shape = shape![4].unwrap();
+        let data1 = vec![5.0, 6.0, 7.0, 8.0];
+
+        let tensor1 = Tensor::new(&shape, &data1).unwrap();
+
+        let result = tensor1 - 3.0;
+
+        assert_eq!(result.shape(), &shape);
+        assert_eq!(result.data, DynamicStorage::new(vec![2.0, 3.0, 4.0, 5.0]));
+    }
+
+    #[test]
+    fn test_sub_tensors() {
+        let shape = shape![2, 2].unwrap();
+        let data1 = vec![5.0, 6.0, 7.0, 8.0];
+        let data2 = vec![1.0, 2.0, 3.0, 4.0];
+        let tensor1 = Tensor::new(&shape, &data1).unwrap();
+        let tensor2 = Tensor::new(&shape, &data2).unwrap();
+
+        let result = tensor1 - tensor2;
+
+        assert_eq!(result.shape(), &shape);
+        assert_eq!(result.data, DynamicStorage::new(vec![4.0, 4.0, 4.0, 4.0]));
+    }
+
+    #[test]
+    fn test_mul_tensor() {
+        let shape = shape![4].unwrap();
+        let data1 = vec![1.0, 2.0, 3.0, 4.0];
+
+        let tensor1 = Tensor::new(&shape, &data1).unwrap();
+
+        let result = tensor1 * 2.0;
+
+        assert_eq!(result.shape(), &shape);
+        assert_eq!(result.data, DynamicStorage::new(vec![2.0, 4.0, 6.0, 8.0]));
+    }
+
+    #[test]
+    fn test_div_tensor() {
+        let shape = shape![4].unwrap();
+        let data1 = vec![4.0, 6.0, 8.0, 10.0];
+
+        let tensor1 = Tensor::new(&shape, &data1).unwrap();
+
+        let result = tensor1 / 2.0;
+
+        assert_eq!(result.shape(), &shape);
+        assert_eq!(result.data, DynamicStorage::new(vec![2.0, 3.0, 4.0, 5.0]));
+    }
+
+    #[test]
+    fn test_vec_vec_mul_single() {
+        let shape = shape![1].unwrap();
+        let data1 = vec![2.0];
+        let data2 = vec![5.0];
+
+        let tensor1 = Tensor::new(&shape, &data1).unwrap();
+        let tensor2 = Tensor::new(&shape, &data2).unwrap();
+
+        let result = tensor1 * tensor2;
+
+        assert_eq!(result.shape(), &shape![1].unwrap());
+        assert_eq!(result.data, DynamicStorage::new(vec![10.0]));
+    }
+
+    #[test]
+    fn test_vec_vec_mul() {
+        let shape = shape![4].unwrap();
+        let data1 = vec![1.0, 2.0, 3.0, 4.0];
+        let data2 = vec![2.0, 3.0, 4.0, 5.0];
+
+        let tensor1 = Tensor::new(&shape, &data1).unwrap();
+        let tensor2 = Tensor::new(&shape, &data2).unwrap();
+
+        let result = tensor1 * tensor2;
+
+        assert_eq!(result.shape(), &shape);
+        assert_eq!(result.data, DynamicStorage::new(vec![2.0, 6.0, 12.0, 20.0]));
+    }
+
+    #[test]
+    fn test_matrix_matrix_mul() {
+        let shape1 = shape![2, 3].unwrap();
+        let shape2 = shape![2, 3].unwrap();
+        let data1 = vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0];
+        let data2 = vec![7.0, 8.0, 9.0, 10.0, 11.0, 12.0];
+
+        let tensor1 = Tensor::new(&shape1, &data1).unwrap();
+        let tensor2 = Tensor::new(&shape2, &data2).unwrap();
+
+        let result = tensor1 * tensor2;
+
+        assert_eq!(result.shape(), &shape![2, 3].unwrap());
+        assert_eq!(
+            result.data,
+            DynamicStorage::new(vec![7.0, 16.0, 27.0, 40.0, 55.0, 72.0])
+        );
+    }
+
+    #[test]
+    fn test_add_tensor_vector() {
+        let shape = shape![4].unwrap();
+        let data1 = vec![1.0, 2.0, 3.0, 4.0];
+        let data2 = vec![2.0, 3.0, 4.0, 5.0];
+        let tensor = DynamicTensor::new(&shape, &data1).unwrap();
+        let vector = DynamicVector::new(&data2).unwrap();
+        let result = tensor + vector;
+        assert_eq!(result[0], 3.0);
+        assert_eq!(result[1], 5.0);
+        assert_eq!(result[2], 7.0);
+        assert_eq!(result[3], 9.0);
+        assert_eq!(result.shape(), &shape);
+    }
+
+    #[test]
+    fn test_sub_tensor_vector() {
+        let shape = shape![4].unwrap();
+        let data1 = vec![2.0, 3.0, 4.0, 5.0];
+        let data2 = vec![1.0, 2.0, 3.0, 4.0];
+        let tensor = DynamicTensor::new(&shape, &data1).unwrap();
+        let vector = DynamicVector::new(&data2).unwrap();
+        let result = tensor - vector;
+        assert_eq!(result[0], 1.0);
+        assert_eq!(result[1], 1.0);
+        assert_eq!(result[2], 1.0);
+        assert_eq!(result[3], 1.0);
+        assert_eq!(result.shape(), &shape);
+    }
+
+    #[test]
+    fn test_mul_tensor_vector() {
+        let shape = shape![4].unwrap();
+        let data1 = vec![2.0, 3.0, 4.0, 5.0];
+        let data2 = vec![1.0, 2.0, 3.0, 4.0];
+        let tensor = DynamicTensor::new(&shape, &data1).unwrap();
+        let vector = DynamicVector::new(&data2).unwrap();
+        let result = tensor * vector;
+        assert_eq!(result[0], 2.0);
+        assert_eq!(result[1], 6.0);
+        assert_eq!(result[2], 12.0);
+        assert_eq!(result[3], 20.0);
+        assert_eq!(result.shape(), &shape);
+    }
+
+    #[test]
+    fn test_div_tensor_vector() {
+        let shape = shape![4].unwrap();
+        let data1 = vec![2.0, 4.0, 6.0, 8.0];
+        let data2 = vec![1.0, 2.0, 3.0, 4.0];
+        let tensor = DynamicTensor::new(&shape, &data1).unwrap();
+        let vector = DynamicVector::new(&data2).unwrap();
+        let result = tensor / vector;
+        assert_eq!(result[0], 2.0);
+        assert_eq!(result[1], 2.0);
+        assert_eq!(result[2], 2.0);
+        assert_eq!(result[3], 2.0);
+        assert_eq!(result.shape(), &shape);
+    }
+
+    #[test]
+    fn test_add_tensor_matrix() {
+        let shape = shape![2, 2].unwrap();
+        let data1 = vec![1.0, 2.0, 3.0, 4.0];
+        let data2 = vec![2.0, 3.0, 4.0, 5.0];
+        let tensor = DynamicTensor::new(&shape, &data1).unwrap();
+        let matrix = DynamicMatrix::new(&shape, &data2).unwrap();
+        let result = tensor + matrix;
+        assert_eq!(result[coord![0, 0].unwrap()], 3.0);
+        assert_eq!(result[coord![0, 1].unwrap()], 5.0);
+        assert_eq!(result[coord![1, 0].unwrap()], 7.0);
+        assert_eq!(result[coord![1, 1].unwrap()], 9.0);
+        assert_eq!(result.shape(), &shape);
+    }
+
+    #[test]
+    fn test_sub_tensor_matrix() {
+        let shape = shape![2, 2].unwrap();
+        let data1 = vec![2.0, 3.0, 4.0, 5.0];
+        let data2 = vec![1.0, 2.0, 3.0, 4.0];
+        let tensor = DynamicTensor::new(&shape, &data1).unwrap();
+        let matrix = DynamicMatrix::new(&shape, &data2).unwrap();
+        let result = tensor - matrix;
+        assert_eq!(result[coord![0, 0].unwrap()], 1.0);
+        assert_eq!(result[coord![0, 1].unwrap()], 1.0);
+        assert_eq!(result[coord![1, 0].unwrap()], 1.0);
+        assert_eq!(result[coord![1, 1].unwrap()], 1.0);
+        assert_eq!(result.shape(), &shape);
+    }
+
+    #[test]
+    fn test_mul_tensor_matrix() {
+        let shape = shape![2, 2].unwrap();
+        let data1 = vec![2.0, 3.0, 4.0, 5.0];
+        let data2 = vec![1.0, 2.0, 3.0, 4.0];
+        let tensor = DynamicTensor::new(&shape, &data1).unwrap();
+        let matrix = DynamicMatrix::new(&shape, &data2).unwrap();
+        let result = tensor * matrix;
+        assert_eq!(result[coord![0, 0].unwrap()], 2.0);
+        assert_eq!(result[coord![0, 1].unwrap()], 6.0);
+        assert_eq!(result[coord![1, 0].unwrap()], 12.0);
+        assert_eq!(result[coord![1, 1].unwrap()], 20.0);
+        assert_eq!(result.shape(), &shape);
+    }
+
+    #[test]
+    fn test_div_tensor_matrix() {
+        let shape = shape![2, 2].unwrap();
+        let data1 = vec![2.0, 4.0, 6.0, 8.0];
+        let data2 = vec![1.0, 2.0, 3.0, 4.0];
+        let tensor = DynamicTensor::new(&shape, &data1).unwrap();
+        let matrix = DynamicMatrix::new(&shape, &data2).unwrap();
+        let result = tensor / matrix;
+        assert_eq!(result[coord![0, 0].unwrap()], 2.0);
+        assert_eq!(result[coord![0, 1].unwrap()], 2.0);
+        assert_eq!(result[coord![1, 0].unwrap()], 2.0);
+        assert_eq!(result[coord![1, 1].unwrap()], 2.0);
+        assert_eq!(result.shape(), &shape);
+    }
+
+    #[test]
+    fn test_display_1d_tensor() {
+        let shape = shape![3].unwrap();
+        let data = vec![1.0, 2.0, 3.0];
+        let tensor = Tensor::new(&shape, &data).unwrap();
+        let display = tensor.display();
+        assert_eq!(display, "[1, 2, 3]");
+    }
+
+    #[test]
+    fn test_display_2d_tensor() {
+        let shape = shape![2, 2].unwrap();
+        let data = vec![1.0, 2.0, 3.0, 4.0];
+        let tensor = Tensor::new(&shape, &data).unwrap();
+        let display = tensor.display();
+        assert_eq!(display, "[[1, 2],\n [3, 4]]");
+    }
+
+    #[test]
+    fn test_display_3d_tensor() {
+        let shape = shape![2, 2, 2].unwrap();
+        let data = vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0];
+        let tensor = Tensor::new(&shape, &data).unwrap();
+        let display = tensor.display();
+        assert_eq!(display, "[[[1, 2],\n  [3, 4]],\n\n [[5, 6],\n  [7, 8]]]");
+    }
+
+    #[test]
+    fn test_display_4d_tensor() {
+        let shape = shape![2, 2, 2, 2].unwrap();
+        let data = vec![
+            1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
+        ];
+        let tensor = Tensor::new(&shape, &data).unwrap();
+        let display = tensor.display();
+        assert_eq!(display, "[[[[1, 2],\n   [3, 4]],\n\n  [[5, 6],\n   [7, 8]]],\n\n\n [[[9, 10],\n   [11, 12]],\n\n  [[13, 14],\n   [15, 16]]]]");
+    }
+
+    #[test]
+    fn test_pow_tensor_square() {
+        let shape = shape![2, 2].unwrap();
+        let data = vec![1.0, 2.0, 3.0, 4.0];
+        let tensor = Tensor::new(&shape, &data).unwrap();
+        let result = tensor.pow(2.0);
+        assert_eq!(result.shape(), &shape);
+        assert_eq!(result.data, DynamicStorage::new(vec![1.0, 4.0, 9.0, 16.0]));
+    }
+
+    #[test]
+    fn test_pow_tensor_sqrt() {
+        let shape = shape![2, 2].unwrap();
+        let data = vec![1.0, 4.0, 9.0, 16.0];
+        let tensor = Tensor::new(&shape, &data).unwrap();
+        let result = tensor.pow(0.5);
+        assert_eq!(result.shape(), &shape);
+        assert_eq!(result.data, DynamicStorage::new(vec![1.0, 2.0, 3.0, 4.0]));
+    }
+
+    #[test]
+    fn test_pow_tensor_negative_exponent() {
+        let shape = shape![2, 2].unwrap();
+        let data = vec![1.0, 2.0, 4.0, 8.0];
+        let tensor = Tensor::new(&shape, &data).unwrap();
+        let result = tensor.pow(-1.0);
+        assert_eq!(result.shape(), &shape);
+        assert_eq!(
+            result.data,
+            DynamicStorage::new(vec![1.0, 0.5, 0.25, 0.125])
+        );
+    }
+}
diff --git a/src/vector.rs b/src/vector.rs
new file mode 100644
index 0000000..ff2b939
--- /dev/null
+++ b/src/vector.rs
@@ -0,0 +1,608 @@
+use std::ops::{Add, Deref, DerefMut, Div, Index, IndexMut, Mul, Sub};
+
+use crate::coord;
+use crate::error::ShapeError;
+use crate::matrix::DynamicMatrix;
+use crate::shape;
+use crate::shape::Shape;
+use crate::tensor::DynamicTensor;
+use num::Float;
+use num::Num;
+
+pub struct DynamicVector<T: Num> {
+    tensor: DynamicTensor<T>,
+}
+pub type Vector<T> = DynamicVector<T>;
+
+impl<T: Num + PartialOrd + Copy> DynamicVector<T> {
+    pub fn new(data: &[T]) -> Result<DynamicVector<T>, ShapeError> {
+        Ok(DynamicVector {
+            tensor: DynamicTensor::new(&shape![data.len()].unwrap(), data)?,
+        })
+    }
+
+    pub fn from_tensor(tensor: DynamicTensor<T>) -> Result<DynamicVector<T>, ShapeError> {
+        if tensor.shape().order() != 1 {
+            return Err(ShapeError::new("Shape must have order of 1"));
+        }
+        Ok(DynamicVector { tensor })
+    }
+
+    pub fn fill(shape: &Shape, value: T) -> Result<DynamicVector<T>, ShapeError> {
+        if shape.order() != 1 {
+            return Err(ShapeError::new("Shape must have order of 1"));
+        }
+        let data = vec![value; shape[0]];
+        DynamicVector::new(&data)
+    }
+    pub fn zeros(shape: &Shape) -> Result<DynamicVector<T>, ShapeError> {
+        Self::fill(shape, T::zero())
+    }
+    pub fn ones(shape: &Shape) -> Result<DynamicVector<T>, ShapeError> {
+        Self::fill(shape, T::one())
+    }
+
+    pub fn sum(&self) -> DynamicVector<T> {
+        let result = self.tensor.sum(vec![]);
+        DynamicVector::from_tensor(result).unwrap()
+    }
+
+    pub fn mean(&self) -> DynamicVector<T> {
+        let result = self.tensor.mean(vec![]);
+        DynamicVector::from_tensor(result).unwrap()
+    }
+
+    pub fn var(&self) -> DynamicVector<T> {
+        let result = self.tensor.var(vec![]);
+        DynamicVector::from_tensor(result).unwrap()
+    }
+
+    pub fn max(&self) -> DynamicVector<T> {
+        let result = self.tensor.max(vec![]);
+        DynamicVector::from_tensor(result).unwrap()
+    }
+
+    pub fn min(&self) -> DynamicVector<T> {
+        let result = self.tensor.min(vec![]);
+        DynamicVector::from_tensor(result).unwrap()
+    }
+
+    // Vector/Matrix Multiplication
+    pub fn vecmul(&self, rhs: &DynamicVector<T>) -> DynamicVector<T> {
+        assert!(self.shape() == rhs.shape());
+        let mut result = T::zero();
+        for i in 0..self.size() {
+            result = result + self[i] * rhs[i];
+        }
+        DynamicVector::new(&[result]).unwrap()
+    }
+
+    pub fn matmul(&self, rhs: &DynamicMatrix<T>) -> DynamicVector<T> {
+        assert_eq!(self.shape()[0], rhs.shape()[0]);
+        let mut result = DynamicTensor::zeros(&shape![rhs.shape()[1]].unwrap());
+        for j in 0..rhs.shape()[1] {
+            let mut sum = T::zero();
+            for i in 0..self.shape()[0] {
+                sum = sum + self[i] * rhs[coord![i, j].unwrap()];
+            }
+            result.set(&coord![j].unwrap(), sum).unwrap();
+        }
+        DynamicVector::from_tensor(result).unwrap()
+    }
+}
+
+impl<T: Float + PartialOrd + Copy> DynamicVector<T> {
+    pub fn pow(&self, power: T) -> DynamicVector<T> {
+        DynamicVector::from_tensor(self.tensor.pow(power)).unwrap()
+    }
+}
+
+// Scalar Addition
+impl<T: Num + PartialOrd + Copy> Add<T> for DynamicVector<T> {
+    type Output = DynamicVector<T>;
+
+    fn add(self, rhs: T) -> DynamicVector<T> {
+        DynamicVector::from_tensor(self.tensor + rhs).unwrap()
+    }
+}
+
+// Tensor Addition
+impl<T: Num + PartialOrd + Copy> Add<DynamicVector<T>> for DynamicVector<T> {
+    type Output = DynamicVector<T>;
+
+    fn add(self, rhs: DynamicVector<T>) -> DynamicVector<T> {
+        DynamicVector::from_tensor(self.tensor + rhs.tensor).unwrap()
+    }
+}
+
+impl<T: Num + PartialOrd + Copy> Add<DynamicTensor<T>> for DynamicVector<T> {
+    type Output = DynamicVector<T>;
+
+    fn add(self, rhs: DynamicTensor<T>) -> DynamicVector<T> {
+        DynamicVector::from_tensor(self.tensor + rhs).unwrap()
+    }
+}
+
+// Scalar Subtraction
+impl<T: Num + PartialOrd + Copy> Sub<T> for DynamicVector<T> {
+    type Output = DynamicVector<T>;
+
+    fn sub(self, rhs: T) -> DynamicVector<T> {
+        DynamicVector::from_tensor(self.tensor - rhs).unwrap()
+    }
+}
+
+// Tensor Subtraction
+impl<T: Num + PartialOrd + Copy> Sub<DynamicVector<T>> for DynamicVector<T> {
+    type Output = DynamicVector<T>;
+
+    fn sub(self, rhs: DynamicVector<T>) -> DynamicVector<T> {
+        DynamicVector::from_tensor(self.tensor - rhs.tensor).unwrap()
+    }
+}
+
+impl<T: Num + PartialOrd + Copy> Sub<DynamicTensor<T>> for DynamicVector<T> {
+    type Output = DynamicVector<T>;
+
+    fn sub(self, rhs: DynamicTensor<T>) -> DynamicVector<T> {
+        DynamicVector::from_tensor(self.tensor - rhs).unwrap()
+    }
+}
+
+// Scalar Multiplication
+impl<T: Num + PartialOrd + Copy> Mul<T> for DynamicVector<T> {
+    type Output = DynamicVector<T>;
+
+    fn mul(self, rhs: T) -> DynamicVector<T> {
+        DynamicVector::from_tensor(self.tensor * rhs).unwrap()
+    }
+}
+
+// Tensor Multiplication
+impl<T: Num + PartialOrd + Copy> Mul<DynamicVector<T>> for DynamicVector<T> {
+    type Output = DynamicVector<T>;
+
+    fn mul(self, rhs: DynamicVector<T>) -> DynamicVector<T> {
+        DynamicVector::from_tensor(self.tensor * rhs.tensor).unwrap()
+    }
+}
+
+impl<T: Num + PartialOrd + Copy> Mul<DynamicTensor<T>> for DynamicVector<T> {
+    type Output = DynamicVector<T>;
+
+    fn mul(self, rhs: DynamicTensor<T>) -> DynamicVector<T> {
+        DynamicVector::from_tensor(self.tensor * rhs).unwrap()
+    }
+}
+
+// Scalar Division
+impl<T: Num + PartialOrd + Copy> Div<T> for DynamicVector<T> {
+    type Output = DynamicVector<T>;
+
+    fn div(self, rhs: T) -> DynamicVector<T> {
+        DynamicVector::from_tensor(self.tensor / rhs).unwrap()
+    }
+}
+
+// Tensor Division
+impl<T: Num + PartialOrd + Copy> Div<DynamicVector<T>> for DynamicVector<T> {
+    type Output = DynamicVector<T>;
+
+    fn div(self, rhs: DynamicVector<T>) -> DynamicVector<T> {
+        DynamicVector::from_tensor(self.tensor / rhs.tensor).unwrap()
+    }
+}
+
+impl<T: Num + PartialOrd + Copy> Div<DynamicTensor<T>> for DynamicVector<T> {
+    type Output = DynamicVector<T>;
+
+    fn div(self, rhs: DynamicTensor<T>) -> DynamicVector<T> {
+        DynamicVector::from_tensor(self.tensor / rhs).unwrap()
+    }
+}
+
+impl<T: Num> Deref for DynamicVector<T> {
+    type Target = DynamicTensor<T>;
+
+    fn deref(&self) -> &DynamicTensor<T> {
+        &self.tensor
+    }
+}
+
+impl<T: Num> DerefMut for DynamicVector<T> {
+    fn deref_mut(&mut self) -> &mut DynamicTensor<T> {
+        &mut self.tensor
+    }
+}
+
+impl<T: Num + Copy + PartialOrd> Index<usize> for DynamicVector<T> {
+    type Output = T;
+
+    fn index(&self, index: usize) -> &Self::Output {
+        self.tensor.get(&coord![index].unwrap()).unwrap()
+    }
+}
+
+impl<T: Num + Copy + PartialOrd> IndexMut<usize> for DynamicVector<T> {
+    fn index_mut(&mut self, index: usize) -> &mut Self::Output {
+        self.tensor.get_mut(&coord![index].unwrap()).unwrap()
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    #[test]
+    fn test_new() {
+        let shape = shape![4].unwrap();
+        let data = vec![1.0, 2.0, 3.0, 4.0];
+        let vector = DynamicVector::new(&data).unwrap();
+        assert_eq!(vector.shape(), &shape);
+        assert_eq!(vector[0], 1.0);
+        assert_eq!(vector[1], 2.0);
+        assert_eq!(vector[2], 3.0);
+        assert_eq!(vector[3], 4.0);
+    }
+
+    #[test]
+    fn test_from_tensor() {
+        let shape = shape![4].unwrap();
+        let data = vec![1.0, 2.0, 3.0, 4.0];
+        let tensor = DynamicTensor::new(&shape, &data).unwrap();
+        let vector = DynamicVector::from_tensor(tensor).unwrap();
+        assert_eq!(vector.shape(), &shape);
+        assert_eq!(vector[0], 1.0);
+        assert_eq!(vector[1], 2.0);
+        assert_eq!(vector[2], 3.0);
+        assert_eq!(vector[3], 4.0);
+    }
+
+    #[test]
+    fn test_from_tensor_fail() {
+        let shape = shape![2, 2].unwrap();
+        let data = vec![1.0, 2.0, 3.0, 4.0];
+        let tensor = DynamicTensor::new(&shape, &data).unwrap();
+        let result = DynamicVector::from_tensor(tensor);
+        assert!(result.is_err());
+    }
+
+    #[test]
+    fn test_fill() {
+        let shape = shape![4].unwrap();
+        let vector = DynamicVector::fill(&shape, 3.0).unwrap();
+        assert_eq!(vector.shape(), &shape);
+        assert_eq!(vector[0], 3.0);
+        assert_eq!(vector[1], 3.0);
+        assert_eq!(vector[2], 3.0);
+        assert_eq!(vector[3], 3.0);
+    }
+
+    #[test]
+    fn test_zeros() {
+        let shape = shape![4].unwrap();
+        let vector = DynamicVector::<f64>::zeros(&shape).unwrap();
+        assert_eq!(vector.shape(), &shape);
+        assert_eq!(vector[0], 0.0);
+        assert_eq!(vector[1], 0.0);
+        assert_eq!(vector[2], 0.0);
+        assert_eq!(vector[3], 0.0);
+    }
+
+    #[test]
+    fn test_ones() {
+        let shape = shape![4].unwrap();
+        let vector = DynamicVector::<f64>::ones(&shape).unwrap();
+        assert_eq!(vector.shape(), &shape);
+        assert_eq!(vector[0], 1.0);
+        assert_eq!(vector[1], 1.0);
+        assert_eq!(vector[2], 1.0);
+        assert_eq!(vector[3], 1.0);
+    }
+
+    #[test]
+    fn test_size() {
+        let shape = shape![4].unwrap();
+        let vector = DynamicVector::<f64>::zeros(&shape).unwrap();
+        assert_eq!(vector.size(), 4);
+    }
+
+    #[test]
+    fn test_get() {
+        let data = vec![1.0, 2.0, 3.0, 4.0];
+        let vector = DynamicVector::new(&data).unwrap();
+        assert_eq!(vector[2], 3.0);
+    }
+
+    #[test]
+    fn test_get_mut() {
+        let data = vec![1.0, 2.0, 3.0, 4.0];
+        let mut vector = DynamicVector::new(&data).unwrap();
+        vector[2] = 5.0;
+        assert_eq!(vector[2], 5.0);
+    }
+
+    #[test]
+    fn test_set() {
+        let data = vec![1.0, 2.0, 3.0, 4.0];
+        let mut vector = DynamicVector::new(&data).unwrap();
+        vector.set(&coord![2].unwrap(), 5.0).unwrap();
+        assert_eq!(*vector.get(&coord![2].unwrap()).unwrap(), 5.0);
+    }
+
+    #[test]
+    fn test_sum() {
+        let data = vec![1.0, 2.0, 3.0, 4.0];
+        let vector = DynamicVector::new(&data).unwrap();
+        let result = vector.sum();
+        assert_eq!(result[0], 10.0);
+        assert_eq!(result.shape(), &shape![1].unwrap());
+    }
+
+    #[test]
+    fn test_mean() {
+        let data = vec![1.0, 2.0, 3.0, 4.0];
+        let vector = DynamicVector::new(&data).unwrap();
+        let result = vector.mean();
+        assert_eq!(result[0], 2.5);
+        assert_eq!(result.shape(), &shape![1].unwrap());
+    }
+
+    #[test]
+    fn test_var() {
+        let data = vec![1.0, 2.0, 3.0, 4.0];
+        let vector = DynamicVector::new(&data).unwrap();
+        let result = vector.var();
+        assert_eq!(result[0], 1.25);
+        assert_eq!(result.shape(), &shape![1].unwrap());
+    }
+
+    #[test]
+    fn test_min() {
+        let data = vec![1.0, 2.0, 3.0, 4.0];
+        let vector = DynamicVector::new(&data).unwrap();
+        let result = vector.min();
+        assert_eq!(result[0], 1.0);
+        assert_eq!(result.shape(), &shape![1].unwrap());
+    }
+
+    #[test]
+    fn test_max() {
+        let data = vec![-1.0, -2.0, -3.0, -4.0];
+        let vector = DynamicVector::new(&data).unwrap();
+        let result = vector.max();
+        assert_eq!(result[0], -1.0);
+        assert_eq!(result.shape(), &shape![1].unwrap());
+    }
+
+    #[test]
+    fn test_vecmul() {
+        let data1 = vec![1.0, 2.0, 3.0, 4.0];
+        let data2 = vec![2.0, 3.0, 4.0, 5.0];
+        let vector1 = DynamicVector::new(&data1).unwrap();
+        let vector2 = DynamicVector::new(&data2).unwrap();
+        let result = vector1.vecmul(&vector2);
+        assert_eq!(result[0], 40.0);
+        assert_eq!(result.shape(), &shape![1].unwrap());
+    }
+
+    #[test]
+    fn test_matmul() {
+        let data_vector = vec![1.0, 2.0];
+        let data_matrix = vec![1.0, 2.0, 3.0, 4.0];
+        let vector = DynamicVector::new(&data_vector).unwrap();
+        let matrix = DynamicMatrix::new(&shape![2, 2].unwrap(), &data_matrix).unwrap();
+        let result = vector.matmul(&matrix);
+        assert_eq!(result.shape(), &shape![2].unwrap());
+        assert_eq!(result[0], 7.0);
+        assert_eq!(result[1], 10.0);
+    }
+
+    #[test]
+    fn test_prod() {
+        let data1 = vec![1.0, 2.0, 3.0, 4.0];
+        let data2 = vec![2.0, 3.0, 4.0, 5.0];
+        let vector1 = DynamicVector::new(&data1).unwrap();
+        let vector2 = DynamicVector::new(&data2).unwrap();
+        let result = vector1.prod(&vector2);
+
+        let expected_data = vec![
+            2.0, 3.0, 4.0, 5.0, 4.0, 6.0, 8.0, 10.0, 6.0, 9.0, 12.0, 15.0, 8.0, 12.0, 16.0, 20.0,
+        ];
+        let expected_shape = shape![4, 4].unwrap();
+        let expected_tensor = DynamicTensor::new(&expected_shape, &expected_data).unwrap();
+
+        assert_eq!(result.shape(), &expected_shape);
+        for i in 0..result.shape()[0] {
+            for j in 0..result.shape()[1] {
+                let x = result.get(&coord![i, j].unwrap()).unwrap();
+                let y = expected_tensor.get(&coord![i, j].unwrap()).unwrap();
+                assert_eq!(*x, *y);
+            }
+        }
+    }
+
+    #[test]
+    fn test_add_scalar() {
+        let data = vec![1.0, 2.0, 3.0, 4.0];
+        let vector = DynamicVector::new(&data).unwrap();
+        let result = vector + 2.0;
+        assert_eq!(result[0], 3.0);
+        assert_eq!(result[1], 4.0);
+        assert_eq!(result[2], 5.0);
+        assert_eq!(result[3], 6.0);
+        assert_eq!(result.shape(), &shape![4].unwrap());
+    }
+
+    #[test]
+    fn test_add_vector() {
+        let shape = shape![4].unwrap();
+        let data1 = vec![1.0, 2.0, 3.0, 4.0];
+        let data2 = vec![2.0, 3.0, 4.0, 5.0];
+        let vector1 = DynamicVector::new(&data1).unwrap();
+        let vector2 = DynamicVector::new(&data2).unwrap();
+        let result = vector1 + vector2;
+        assert_eq!(result[0], 3.0);
+        assert_eq!(result[1], 5.0);
+        assert_eq!(result[2], 7.0);
+        assert_eq!(result[3], 9.0);
+        assert_eq!(result.shape(), &shape);
+    }
+
+    #[test]
+    fn test_add_vector_tensor() {
+        let shape = shape![4].unwrap();
+        let data1 = vec![1.0, 2.0, 3.0, 4.0];
+        let data2 = vec![2.0, 3.0, 4.0, 5.0];
+        let vector = DynamicVector::new(&data1).unwrap();
+        let tensor = DynamicTensor::new(&shape, &data2).unwrap();
+        let result = vector + tensor;
+        assert_eq!(result[0], 3.0);
+        assert_eq!(result[1], 5.0);
+        assert_eq!(result[2], 7.0);
+        assert_eq!(result[3], 9.0);
+        assert_eq!(result.shape(), &shape);
+    }
+
+    #[test]
+    fn test_sub_scalar() {
+        let shape = shape![4].unwrap();
+        let data = vec![1.0, 2.0, 3.0, 4.0];
+        let vector = DynamicVector::new(&data).unwrap();
+        let result = vector - 2.0;
+        assert_eq!(result[0], -1.0);
+        assert_eq!(result[1], 0.0);
+        assert_eq!(result[2], 1.0);
+        assert_eq!(result[3], 2.0);
+        assert_eq!(result.shape(), &shape);
+    }
+
+    #[test]
+    fn test_sub_vector() {
+        let shape = shape![4].unwrap();
+        let data1 = vec![1.0, 2.0, 3.0, 4.0];
+        let data2 = vec![2.0, 3.0, 4.0, 5.0];
+        let vector1 = DynamicVector::new(&data1).unwrap();
+        let vector2 = DynamicVector::new(&data2).unwrap();
+        let result = vector1 - vector2;
+        assert_eq!(result[0], -1.0);
+        assert_eq!(result[1], -1.0);
+        assert_eq!(result[2], -1.0);
+        assert_eq!(result[3], -1.0);
+        assert_eq!(result.shape(), &shape);
+    }
+
+    #[test]
+    fn test_sub_vector_tensor() {
+        let shape = shape![4].unwrap();
+        let data1 = vec![1.0, 2.0, 3.0, 4.0];
+        let data2 = vec![2.0, 3.0, 4.0, 5.0];
+        let vector = DynamicVector::new(&data1).unwrap();
+        let tensor = DynamicTensor::new(&shape, &data2).unwrap();
+        let result = vector - tensor;
+        assert_eq!(result[0], -1.0);
+        assert_eq!(result[1], -1.0);
+        assert_eq!(result[2], -1.0);
+        assert_eq!(result[3], -1.0);
+        assert_eq!(result.shape(), &shape);
+    }
+
+    #[test]
+    fn test_mul_scalar() {
+        let shape = shape![4].unwrap();
+        let data = vec![1.0, 2.0, 3.0, 4.0];
+        let vector = DynamicVector::new(&data).unwrap();
+        let result = vector * 2.0;
+        assert_eq!(result[0], 2.0);
+        assert_eq!(result[1], 4.0);
+        assert_eq!(result[2], 6.0);
+        assert_eq!(result[3], 8.0);
+        assert_eq!(result.shape(), &shape);
+    }
+
+    #[test]
+    fn test_mul_vector() {
+        let shape = shape![4].unwrap();
+        let data1 = vec![1.0, 2.0, 3.0, 4.0];
+        let data2 = vec![2.0, 3.0, 4.0, 5.0];
+        let vector1 = DynamicVector::new(&data1).unwrap();
+        let vector2 = DynamicVector::new(&data2).unwrap();
+        let result = vector1 * vector2;
+        assert_eq!(result[0], 2.0);
+        assert_eq!(result[1], 6.0);
+        assert_eq!(result[2], 12.0);
+        assert_eq!(result[3], 20.0);
+        assert_eq!(result.shape(), &shape);
+    }
+
+    #[test]
+    fn test_mul_vector_tensor() {
+        let shape = shape![4].unwrap();
+        let data1 = vec![1.0, 2.0, 3.0, 4.0];
+        let data2 = vec![2.0, 3.0, 4.0, 5.0];
+        let vector = DynamicVector::new(&data1).unwrap();
+        let tensor = DynamicTensor::new(&shape, &data2).unwrap();
+        let result = vector * tensor;
+        assert_eq!(result[0], 2.0);
+        assert_eq!(result[1], 6.0);
+        assert_eq!(result[2], 12.0);
+        assert_eq!(result[3], 20.0);
+        assert_eq!(result.shape(), &shape);
+    }
+
+    #[test]
+    fn test_div_scalar() {
+        let shape = shape![4].unwrap();
+        let data = vec![4.0, 6.0, 8.0, 10.0];
+        let vector = DynamicVector::new(&data).unwrap();
+        let result = vector / 2.0;
+        assert_eq!(result[0], 2.0);
+        assert_eq!(result[1], 3.0);
+        assert_eq!(result[2], 4.0);
+        assert_eq!(result[3], 5.0);
+        assert_eq!(result.shape(), &shape);
+    }
+
+    #[test]
+    fn test_div_vector() {
+        let shape = shape![4].unwrap();
+        let data1 = vec![4.0, 6.0, 8.0, 10.0];
+        let data2 = vec![2.0, 3.0, 4.0, 5.0];
+        let vector1 = DynamicVector::new(&data1).unwrap();
+        let vector2 = DynamicVector::new(&data2).unwrap();
+        let result = vector1 / vector2;
+        assert_eq!(result[0], 2.0);
+        assert_eq!(result[1], 2.0);
+        assert_eq!(result[2], 2.0);
+        assert_eq!(result[3], 2.0);
+        assert_eq!(result.shape(), &shape);
+    }
+
+    #[test]
+    fn test_div_vector_tensor() {
+        let shape = shape![4].unwrap();
+        let data1 = vec![4.0, 6.0, 8.0, 10.0];
+        let data2 = vec![2.0, 3.0, 4.0, 5.0];
+        let vector = DynamicVector::new(&data1).unwrap();
+        let tensor = DynamicTensor::new(&shape, &data2).unwrap();
+        let result = vector / tensor;
+        assert_eq!(result[0], 2.0);
+        assert_eq!(result[1], 2.0);
+        assert_eq!(result[2], 2.0);
+        assert_eq!(result[3], 2.0);
+        assert_eq!(result.shape(), &shape);
+    }
+
+    #[test]
+    fn test_pow_vector() {
+        let shape = shape![4].unwrap();
+        let data = vec![2.0, 3.0, 4.0, 5.0];
+        let vector = DynamicVector::new(&data).unwrap();
+        let result = vector.pow(2.0);
+        assert_eq!(result[0], 4.0);
+        assert_eq!(result[1], 9.0);
+        assert_eq!(result[2], 16.0);
+        assert_eq!(result[3], 25.0);
+        assert_eq!(result.shape(), &shape);
+    }
+}