diff --git a/Cargo.toml b/Cargo.toml index 20543ab..d76a0eb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "numeric" -version = "0.1.4" +version = "0.2.0" authors = ["Gustav Larsson "] description = "N-dimensional matrix class for Rust" repository = "https://github.com/numeric-rust/numeric" @@ -11,12 +11,12 @@ keywords = ["numeric", "tensor", "matrix", "vector", "hdf5"] license = "MIT" [dependencies] -blas = "0.9.1" -lapack = "0.8.1" -num = "0.1.29" -rand = "0.3.12" -libc = "0.2.4" -hdf5-sys = "0.3.2" +blas = "0.22.0" +lapack = "0.19.0" +num = "0.4.0" +rand = "0.8.3" +libc = "0.2.94" +hdf5-sys = "0.7.1" [[test]] name = "numeric" diff --git a/src/io/mod.rs b/src/io/mod.rs index dab1084..562a216 100644 --- a/src/io/mod.rs +++ b/src/io/mod.rs @@ -43,11 +43,11 @@ extern crate std; use libc::{c_char, c_void}; use std::path::Path; -use hdf5_sys as ffi; - +// use hdf5_sys as ffi; +use hdf5_sys::{h5d, h5t, h5p, h5f, h5e, h5s, h5i}; use tensor::Tensor; -extern fn error_handler(_: ffi::hid_t, _: *const c_void) { +extern fn error_handler(_: h5i::hid_t, _: *const c_void) { // Suppress errors. We will rely on return statuses alone. } @@ -71,30 +71,30 @@ macro_rules! add_save { let group = "data"; unsafe { - let filename_cstr = try!(::std::ffi::CString::new(filename)); - let group_cstr = try!(::std::ffi::CString::new(group)); + let filename_cstr = ::std::ffi::CString::new(filename)?; + let group_cstr = ::std::ffi::CString::new(group)?; - //ffi::H5Eset_auto2(0, error_handler, 0 as *const c_void); + //h5e::H5Eset_auto2(0, error_handler, 0 as *const c_void); - let file = ffi::H5Fcreate(filename_cstr.as_ptr() as *const c_char, - ffi::H5F_ACC_TRUNC, ffi::H5P_DEFAULT, ffi::H5P_DEFAULT); + let file = h5f::H5Fcreate(filename_cstr.as_ptr() as *const c_char, + h5f::H5F_ACC_TRUNC, h5p::H5P_DEFAULT, h5p::H5P_DEFAULT); let mut shape: Vec = Vec::new(); for s in self.shape().iter() { shape.push(*s as u64); } - let space = ffi::H5Screate_simple(shape.len() as i32, shape.as_ptr(), + let space = h5s::H5Screate_simple(shape.len() as i32, shape.as_ptr(), std::ptr::null()); - let dset = ffi::H5Dcreate2(file, group_cstr.as_ptr() as *const c_char, + let dset = h5d::H5Dcreate2(file, group_cstr.as_ptr() as *const c_char, $h5type, space, - ffi::H5P_DEFAULT, - ffi::H5P_DEFAULT, - ffi::H5P_DEFAULT); + h5p::H5P_DEFAULT, + h5p::H5P_DEFAULT, + h5p::H5P_DEFAULT); - let status = ffi::H5Dwrite(dset, $h5type, ffi::H5S_ALL, ffi::H5S_ALL, - ffi::H5P_DEFAULT, self.as_ptr() as * const c_void); + let status = h5d::H5Dwrite(dset, $h5type, h5s::H5S_ALL, h5s::H5S_ALL, + h5p::H5P_DEFAULT, self.as_ptr() as * const c_void); if status < 0 { let msg = format!("Failed to write '{}': {:?}", group, path); @@ -103,8 +103,8 @@ macro_rules! add_save { } - ffi::H5Dclose(dset); - ffi::H5Fclose(file); + h5d::H5Dclose(dset); + h5f::H5Fclose(file); } Ok(()) } @@ -112,16 +112,16 @@ macro_rules! add_save { ) } -add_save!(u8, ffi::H5T_NATIVE_UINT8); -add_save!(u16, ffi::H5T_NATIVE_UINT16); -add_save!(u32, ffi::H5T_NATIVE_UINT32); -add_save!(u64, ffi::H5T_NATIVE_UINT64); -add_save!(i8, ffi::H5T_NATIVE_INT8); -add_save!(i16, ffi::H5T_NATIVE_INT16); -add_save!(i32, ffi::H5T_NATIVE_INT32); -add_save!(i64, ffi::H5T_NATIVE_INT64); -add_save!(f32, ffi::H5T_NATIVE_FLOAT); -add_save!(f64, ffi::H5T_NATIVE_DOUBLE); +add_save!(u8, h5t::H5T_NATIVE_UINT8); +add_save!(u16, h5t::H5T_NATIVE_UINT16); +add_save!(u32, h5t::H5T_NATIVE_UINT32); +add_save!(u64, h5t::H5T_NATIVE_UINT64); +add_save!(i8, h5t::H5T_NATIVE_INT8); +add_save!(i16, h5t::H5T_NATIVE_INT16); +add_save!(i32, h5t::H5T_NATIVE_INT32); +add_save!(i64, h5t::H5T_NATIVE_INT64); +add_save!(f32, h5t::H5T_NATIVE_FLOAT); +add_save!(f64, h5t::H5T_NATIVE_DOUBLE); macro_rules! add_load { @@ -137,13 +137,13 @@ macro_rules! add_load { }, }; unsafe { - let filename_cstr = try!(::std::ffi::CString::new(filename)); - let group_cstr = try!(::std::ffi::CString::new(group)); + let filename_cstr = ::std::ffi::CString::new(filename)?; + let group_cstr = ::std::ffi::CString::new(group)?; - ffi::H5Eset_auto2(0, error_handler, 0 as *const c_void); + h5e::H5Eset_auto2(0, error_handler, 0 as *const c_void); - let file = ffi::H5Fopen(filename_cstr.as_ptr() as *const c_char, - ffi::H5F_ACC_RDONLY, ffi::H5P_DEFAULT); + let file = h5f::H5Fopen(filename_cstr.as_ptr() as *const c_char, + h5f::H5F_ACC_RDONLY, h5p::H5P_DEFAULT); if file < 0 { let msg = format!("File not found: {:?}", path); @@ -151,8 +151,8 @@ macro_rules! add_load { return Err(err); } - let dset = ffi::H5Dopen2(file, group_cstr.as_ptr() as *const c_char, - ffi::H5P_DEFAULT); + let dset = h5d::H5Dopen2(file, group_cstr.as_ptr() as *const c_char, + h5p::H5P_DEFAULT); if dset < 0 { let msg = format!("Group '{}' not found: {}", group, filename); @@ -160,15 +160,15 @@ macro_rules! add_load { return Err(err); } - let datatype = ffi::H5Dget_type(dset); + let datatype = h5d::H5Dget_type(dset); - let space = ffi::H5Dget_space(dset); - let ndims = ffi::H5Sget_simple_extent_ndims(space); + let space = h5d::H5Dget_space(dset); + let ndims = h5s::H5Sget_simple_extent_ndims(space); - let mut shape: Tensor = Tensor::zeros(&[ndims as usize]); + let mut shape: Tensor = Tensor::zeros(&[ndims as usize]); - if ffi::H5Sget_simple_extent_dims(space, shape.as_mut_ptr(), - 0 as *mut ffi::hsize_t) != ndims { + if h5s::H5Sget_simple_extent_dims(space, shape.as_mut_ptr(), + 0 as *mut h5d::hsize_t) != ndims { let msg = format!("Could not read shape of tesor: {}", filename); let err = std::io::Error::new(std::io::ErrorKind::InvalidData, msg); return Err(err); @@ -179,65 +179,65 @@ macro_rules! add_load { let unsigned_shape = &unsigned_tensor.data(); let data: Tensor<$t> = { - if ffi::H5Tequal(datatype, ffi::H5T_NATIVE_UINT8) == 1 { + if h5t::H5Tequal(datatype, h5t::H5T_NATIVE_UINT8) == 1 { let mut native_data: Tensor = Tensor::empty(&unsigned_shape[..]); // Finally load the actual data - ffi::H5Dread(dset, ffi::H5T_NATIVE_UINT8, ffi::H5S_ALL, ffi::H5S_ALL, - ffi::H5P_DEFAULT, native_data.as_mut_ptr() as *mut c_void); + h5d::H5Dread(dset, h5t::H5T_NATIVE_UINT8, h5s::H5S_ALL, h5s::H5S_ALL, + h5p::H5P_DEFAULT, native_data.as_mut_ptr() as *mut c_void); native_data.convert::<$t>() - } else if ffi::H5Tequal(datatype, ffi::H5T_NATIVE_INT8) == 1 { + } else if h5t::H5Tequal(datatype, h5t::H5T_NATIVE_INT8) == 1 { let mut native_data: Tensor = Tensor::empty(&unsigned_shape[..]); // Finally load the actual data - ffi::H5Dread(dset, ffi::H5T_NATIVE_INT8, ffi::H5S_ALL, ffi::H5S_ALL, - ffi::H5P_DEFAULT, native_data.as_mut_ptr() as *mut c_void); + h5d::H5Dread(dset, h5t::H5T_NATIVE_INT8, h5s::H5S_ALL, h5s::H5S_ALL, + h5p::H5P_DEFAULT, native_data.as_mut_ptr() as *mut c_void); native_data.convert::<$t>() - } else if ffi::H5Tequal(datatype, ffi::H5T_NATIVE_UINT16) == 1 { + } else if h5t::H5Tequal(datatype, h5t::H5T_NATIVE_UINT16) == 1 { let mut native_data: Tensor = Tensor::empty(&unsigned_shape[..]); // Finally load the actual data - ffi::H5Dread(dset, ffi::H5T_NATIVE_UINT16, ffi::H5S_ALL, ffi::H5S_ALL, - ffi::H5P_DEFAULT, native_data.as_mut_ptr() as *mut c_void); + h5d::H5Dread(dset, h5t::H5T_NATIVE_UINT16, h5s::H5S_ALL, h5s::H5S_ALL, + h5p::H5P_DEFAULT, native_data.as_mut_ptr() as *mut c_void); native_data.convert::<$t>() - } else if ffi::H5Tequal(datatype, ffi::H5T_NATIVE_INT16) == 1 { + } else if h5t::H5Tequal(datatype, h5t::H5T_NATIVE_INT16) == 1 { let mut native_data: Tensor = Tensor::empty(&unsigned_shape[..]); // Finally load the actual data - ffi::H5Dread(dset, ffi::H5T_NATIVE_INT16, ffi::H5S_ALL, ffi::H5S_ALL, - ffi::H5P_DEFAULT, native_data.as_mut_ptr() as *mut c_void); + h5d::H5Dread(dset, h5t::H5T_NATIVE_INT16, h5s::H5S_ALL, h5s::H5S_ALL, + h5p::H5P_DEFAULT, native_data.as_mut_ptr() as *mut c_void); native_data.convert::<$t>() - } else if ffi::H5Tequal(datatype, ffi::H5T_NATIVE_UINT32) == 1 { + } else if h5t::H5Tequal(datatype, h5t::H5T_NATIVE_UINT32) == 1 { let mut native_data: Tensor = Tensor::empty(&unsigned_shape[..]); // Finally load the actual data - ffi::H5Dread(dset, ffi::H5T_NATIVE_UINT32, ffi::H5S_ALL, ffi::H5S_ALL, - ffi::H5P_DEFAULT, native_data.as_mut_ptr() as *mut c_void); + h5d::H5Dread(dset, h5t::H5T_NATIVE_UINT32, h5s::H5S_ALL, h5s::H5S_ALL, + h5p::H5P_DEFAULT, native_data.as_mut_ptr() as *mut c_void); native_data.convert::<$t>() - } else if ffi::H5Tequal(datatype, ffi::H5T_NATIVE_INT32) == 1 { + } else if h5t::H5Tequal(datatype, h5t::H5T_NATIVE_INT32) == 1 { let mut native_data: Tensor = Tensor::empty(&unsigned_shape[..]); // Finally load the actual data - ffi::H5Dread(dset, ffi::H5T_NATIVE_INT32, ffi::H5S_ALL, ffi::H5S_ALL, - ffi::H5P_DEFAULT, native_data.as_mut_ptr() as *mut c_void); + h5d::H5Dread(dset, h5t::H5T_NATIVE_INT32, h5s::H5S_ALL, h5s::H5S_ALL, + h5p::H5P_DEFAULT, native_data.as_mut_ptr() as *mut c_void); native_data.convert::<$t>() - } else if ffi::H5Tequal(datatype, ffi::H5T_NATIVE_UINT64) == 1 { + } else if h5t::H5Tequal(datatype, h5t::H5T_NATIVE_UINT64) == 1 { let mut native_data: Tensor = Tensor::empty(&unsigned_shape[..]); // Finally load the actual data - ffi::H5Dread(dset, ffi::H5T_NATIVE_UINT64, ffi::H5S_ALL, ffi::H5S_ALL, - ffi::H5P_DEFAULT, native_data.as_mut_ptr() as *mut c_void); + h5d::H5Dread(dset, h5t::H5T_NATIVE_UINT64, h5s::H5S_ALL, h5s::H5S_ALL, + h5p::H5P_DEFAULT, native_data.as_mut_ptr() as *mut c_void); native_data.convert::<$t>() - } else if ffi::H5Tequal(datatype, ffi::H5T_NATIVE_INT64) == 1 { + } else if h5t::H5Tequal(datatype, h5t::H5T_NATIVE_INT64) == 1 { let mut native_data: Tensor = Tensor::empty(&unsigned_shape[..]); // Finally load the actual data - ffi::H5Dread(dset, ffi::H5T_NATIVE_INT64, ffi::H5S_ALL, ffi::H5S_ALL, - ffi::H5P_DEFAULT, native_data.as_mut_ptr() as *mut c_void); + h5d::H5Dread(dset, h5t::H5T_NATIVE_INT64, h5s::H5S_ALL, h5s::H5S_ALL, + h5p::H5P_DEFAULT, native_data.as_mut_ptr() as *mut c_void); native_data.convert::<$t>() - } else if ffi::H5Tequal(datatype, ffi::H5T_NATIVE_FLOAT) == 1 { + } else if h5t::H5Tequal(datatype, h5t::H5T_NATIVE_FLOAT) == 1 { let mut native_data: Tensor = Tensor::empty(&unsigned_shape[..]); // Finally load the actual data - ffi::H5Dread(dset, ffi::H5T_NATIVE_FLOAT, ffi::H5S_ALL, ffi::H5S_ALL, - ffi::H5P_DEFAULT, native_data.as_mut_ptr() as *mut c_void); + h5d::H5Dread(dset, h5t::H5T_NATIVE_FLOAT, h5s::H5S_ALL, h5s::H5S_ALL, + h5p::H5P_DEFAULT, native_data.as_mut_ptr() as *mut c_void); native_data.convert::<$t>() - } else if ffi::H5Tequal(datatype, ffi::H5T_NATIVE_DOUBLE) == 1 { + } else if h5t::H5Tequal(datatype, h5t::H5T_NATIVE_DOUBLE) == 1 { let mut native_data: Tensor = Tensor::empty(&unsigned_shape[..]); // Finally load the actual data - ffi::H5Dread(dset, ffi::H5T_NATIVE_DOUBLE, ffi::H5S_ALL, ffi::H5S_ALL, - ffi::H5P_DEFAULT, native_data.as_mut_ptr() as *mut c_void); + h5d::H5Dread(dset, h5t::H5T_NATIVE_DOUBLE, h5s::H5S_ALL, h5s::H5S_ALL, + h5p::H5P_DEFAULT, native_data.as_mut_ptr() as *mut c_void); native_data.convert::<$t>() } else { let msg = format!("Unable to convert '{}' to {}: {}", @@ -247,9 +247,9 @@ macro_rules! add_load { } }; - ffi::H5Tclose(datatype); - ffi::H5Dclose(dset); - ffi::H5Fclose(file); + h5t::H5Tclose(datatype); + h5d::H5Dclose(dset); + h5f::H5Fclose(file); Ok(data) } diff --git a/src/random/mod.rs b/src/random/mod.rs index dacb43f..bbe2ba3 100644 --- a/src/random/mod.rs +++ b/src/random/mod.rs @@ -16,8 +16,9 @@ //! // 0.702227 0.346673 0.737954 //! // [Tensor of shape 3x3] //! ``` -use rand::{Rng, SeedableRng, StdRng}; -use rand::distributions::range::SampleRange; +use rand::{Rng, SeedableRng}; +use rand::rngs::StdRng; +use rand::distributions::uniform::SampleRange; use num::traits::Float; use std::f64; @@ -40,7 +41,7 @@ impl RandomState { /// Generates a tensor by independently drawing samples from a uniform distribution in the /// range [`low`, `high`). This is appropriate for integer types as well. pub fn uniform(&mut self, low: T, high: T, shape: &[usize]) -> Tensor - where T: NumericTrait + SampleRange { + where T: NumericTrait + SampleRange { let mut t = Tensor::zeros(shape); { let n = t.size(); @@ -54,7 +55,7 @@ impl RandomState { /// Generates a tensor by independently drawing samples from a standard normal. pub fn normal(&mut self, shape: &[usize]) -> Tensor - where T: NumericTrait + SampleRange + Float { + where T: NumericTrait + SampleRange + Float { let u1 = self.uniform(T::zero(), T::one(), shape); let u2 = self.uniform(T::zero(), T::one(), shape); diff --git a/src/tensor/dot.rs b/src/tensor/dot.rs index da95fcb..971aa6a 100644 --- a/src/tensor/dot.rs +++ b/src/tensor/dot.rs @@ -25,8 +25,8 @@ macro_rules! add_impl { } else { let t1 = self.canonize(); let t2 = rhs.canonize(); - blas::$gemv(b'T', t1.shape[1], t1.shape[0], 1.0, &t1.data, - t1.shape[1], &t2.data, 1, 0.0, data, 1); + blas::$gemv(b'T', t1.shape[1] as i32, t1.shape[0] as i32, 1.0, &t1.data, + t1.shape[1] as i32, &t2.data, 1, 0.0, data, 1); } } t3 @@ -47,8 +47,8 @@ macro_rules! add_impl { } else { let t1 = self.canonize(); let t2 = rhs.canonize(); - blas::$gemv(b'N', t2.shape[1], t2.shape[0], 1.0, &t2.data, - t2.shape[1], &t1.data, 1, 0.0, data, 1); + blas::$gemv(b'N', t2.shape[1] as i32, t2.shape[0] as i32, 1.0, &t2.data, + t2.shape[1] as i32, &t1.data, 1, 0.0, data, 1); } } t3 @@ -72,9 +72,9 @@ macro_rules! add_impl { let t1 = self.canonize(); let t2 = rhs.canonize(); let mut data = t3.slice_mut(); - blas::$gemm(b'N', b'N', t2.shape[1], t1.shape[0], t2.shape[0], 1.0, - &t2.data, t2.shape[1], &t1.data, t2.shape[0], 0.0, - data, t2.shape[1]); + blas::$gemm(b'N', b'N', t2.shape[1] as i32, t1.shape[0] as i32, t2.shape[0] as i32, 1.0, + &t2.data, t2.shape[1] as i32, &t1.data, t2.shape[0] as i32, 0.0, + data, t2.shape[1] as i32); } t3 } else if self.ndim() == 1 && rhs.ndim() == 1 { // scalar product @@ -88,7 +88,7 @@ macro_rules! add_impl { } else { let t1 = self.canonize(); let t2 = rhs.canonize(); - v = blas::$dot(t1.size(), &t1.data, 1, &t2.data, 1); + v = blas::$dot(t1.size() as i32, &t1.data, 1, &t2.data, 1); } Tensor::scalar(v) } else {