diff --git a/jans-cedarling/cedarling/src/data/config.rs b/jans-cedarling/cedarling/src/data/config.rs index 72e70c6140e..2765994ff12 100644 --- a/jans-cedarling/cedarling/src/data/config.rs +++ b/jans-cedarling/cedarling/src/data/config.rs @@ -7,7 +7,7 @@ use std::time::Duration; /// Configuration for the DataStore component. /// -/// Controls storage limits, TTL behavior, and capacity management. +/// Controls storage limits, TTL behavior, capacity management, and metrics. /// /// ## TTL Semantics /// @@ -47,6 +47,8 @@ pub struct DataStoreConfig { /// Maximum allowed TTL. /// `None` means no upper limit on TTL values (uses 10 years). pub max_ttl: Option, + /// Enable metrics tracking (access counts, timestamps) + pub enable_metrics: bool, } impl Default for DataStoreConfig { @@ -56,10 +58,43 @@ impl Default for DataStoreConfig { max_entry_size: 1_048_576, // 1MB default_ttl: None, max_ttl: Some(Duration::from_secs(3600)), // 1 hour + enable_metrics: true, } } } +/// Error returned when DataStoreConfig validation fails. +#[derive(Debug, thiserror::Error)] +pub enum ConfigValidationError { + /// default_ttl exceeds max_ttl + #[error("default_ttl ({default:?}) exceeds max_ttl ({max:?})")] + DefaultTtlExceedsMax { + /// The default TTL value that exceeds the maximum + default: Duration, + /// The maximum TTL value + max: Duration, + }, +} + +impl DataStoreConfig { + /// Validate the configuration for consistency. + /// + /// # Errors + /// + /// Returns `ConfigValidationError` if: + /// - `default_ttl` exceeds `max_ttl` (when both are Some) + pub fn validate(&self) -> Result<(), ConfigValidationError> { + // Check if default_ttl exceeds max_ttl + if let (Some(default), Some(max)) = (self.default_ttl, self.max_ttl) { + if default > max { + return Err(ConfigValidationError::DefaultTtlExceedsMax { default, max }); + } + } + + Ok(()) + } +} + #[cfg(test)] mod tests { use super::*; @@ -71,5 +106,74 @@ mod tests { assert_eq!(config.max_entry_size, 1_048_576); assert_eq!(config.default_ttl, None); assert_eq!(config.max_ttl, Some(Duration::from_secs(3600))); + assert!(config.enable_metrics); + } + + #[test] + fn test_valid_config() { + let config = DataStoreConfig { + default_ttl: Some(Duration::from_secs(300)), + max_ttl: Some(Duration::from_secs(3600)), + ..Default::default() + }; + assert!( + matches!(config.validate(), Ok(_)), + "expected DataStoreConfig::validate() to succeed when default_ttl is less than max_ttl" + ); + } + + #[test] + fn test_default_ttl_exceeds_max() { + let config = DataStoreConfig { + default_ttl: Some(Duration::from_secs(7200)), // 2 hours + max_ttl: Some(Duration::from_secs(3600)), // 1 hour + ..Default::default() + }; + assert!( + matches!( + config.validate(), + Err(ConfigValidationError::DefaultTtlExceedsMax { .. }) + ), + "expected DataStoreConfig::validate() to return ConfigValidationError when default_ttl exceeds max_ttl" + ); + } + + #[test] + fn test_none_ttls_are_valid() { + let config = DataStoreConfig { + default_ttl: None, + max_ttl: None, + ..Default::default() + }; + assert!( + matches!(config.validate(), Ok(_)), + "expected DataStoreConfig::validate() to succeed when both TTL values are None" + ); + } + + #[test] + fn test_only_default_ttl_is_valid() { + let config = DataStoreConfig { + default_ttl: Some(Duration::from_secs(300)), + max_ttl: None, + ..Default::default() + }; + assert!( + matches!(config.validate(), Ok(_)), + "expected DataStoreConfig::validate() to succeed when only default_ttl is set" + ); + } + + #[test] + fn test_only_max_ttl_is_valid() { + let config = DataStoreConfig { + default_ttl: None, + max_ttl: Some(Duration::from_secs(3600)), + ..Default::default() + }; + assert!( + matches!(config.validate(), Ok(_)), + "expected DataStoreConfig::validate() to succeed when only max_ttl is set" + ); } } diff --git a/jans-cedarling/cedarling/src/data/entry.rs b/jans-cedarling/cedarling/src/data/entry.rs new file mode 100644 index 00000000000..0ec122ff0cc --- /dev/null +++ b/jans-cedarling/cedarling/src/data/entry.rs @@ -0,0 +1,277 @@ +// This software is available under the Apache-2.0 license. +// See https://www.apache.org/licenses/LICENSE-2.0.txt for full text. +// +// Copyright (c) 2024, Gluu, Inc. + +use super::store::std_duration_to_chrono_duration; +use chrono::{DateTime, Utc}; +use serde::{Deserialize, Serialize}; +use serde_json::Value; +use std::time::Duration as StdDuration; + +/// Helper module for serializing DateTime +mod datetime { + use chrono::{DateTime, Utc}; + use serde::{Deserialize, Deserializer, Serializer}; + + pub(super) fn serialize(date: &DateTime, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_str(&date.to_rfc3339()) + } + + pub(super) fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> + where + D: Deserializer<'de>, + { + let s: String = String::deserialize(deserializer)?; + DateTime::parse_from_rfc3339(&s) + .map(|dt| dt.with_timezone(&Utc)) + .map_err(serde::de::Error::custom) + } +} + +/// Helper module for serializing Optional DateTime +mod datetime_option { + use chrono::{DateTime, Utc}; + use serde::{Deserialize, Deserializer, Serializer}; + + pub(super) fn serialize( + date: &Option>, + serializer: S, + ) -> Result + where + S: Serializer, + { + match date { + Some(dt) => serializer.serialize_some(&dt.to_rfc3339()), + None => serializer.serialize_none(), + } + } + + pub(super) fn deserialize<'de, D>(deserializer: D) -> Result>, D::Error> + where + D: Deserializer<'de>, + { + let opt: Option = Option::deserialize(deserializer)?; + match opt { + Some(s) => DateTime::parse_from_rfc3339(&s) + .map(|dt| Some(dt.with_timezone(&Utc))) + .map_err(serde::de::Error::custom), + None => Ok(None), + } + } +} + +/// Represents the type of a Cedar value based on JSON structure. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +pub enum CedarType { + /// String type + String, + /// Long (integer) type + Long, + /// Boolean type + Bool, + /// Set (array) type + Set, + /// Record (object) type + Record, + /// Entity reference type + Entity, +} + +impl CedarType { + /// Infer the Cedar type from a JSON value. + pub fn from_value(value: &Value) -> Self { + match value { + Value::String(_) => Self::String, + Value::Number(n) => { + if n.is_i64() || n.is_u64() { + Self::Long + } else { + // Decimals are not directly supported in basic Cedar types + // but we'll treat them as Long for now + Self::Long + } + }, + Value::Bool(_) => Self::Bool, + Value::Array(_) => Self::Set, + Value::Object(obj) => { + // Check if it's an entity reference with explicit marker + // Entity references must have exactly "type" and "id" fields, + // and the "type" value must be a string (to avoid misclassifying + // normal records that happen to have these fields) + if obj.len() == 2 + && obj.contains_key("type") + && obj.contains_key("id") + && obj.get("type").map_or(false, |v| v.is_string()) + && obj.get("id").map_or(false, |v| v.is_string()) + { + Self::Entity + } else { + Self::Record + } + }, + Value::Null => { + // Null is not a valid Cedar type, but we'll default to String + Self::String + }, + } + } +} + +/// A data entry in the DataStore with value and metadata. +/// +/// This structure wraps the actual value with metadata including creation time, +/// expiration time, access count, and type information. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct DataEntry { + /// The key for this entry + pub key: String, + /// The actual value stored + pub value: Value, + /// The inferred Cedar type of the value + pub data_type: CedarType, + /// Timestamp when this entry was created + #[serde(with = "datetime")] + pub created_at: DateTime, + /// Timestamp when this entry expires (if TTL is set) + #[serde(with = "datetime_option")] + pub expires_at: Option>, + /// Number of times this entry has been accessed + pub access_count: u64, +} + +impl DataEntry { + /// Create a new DataEntry with the given key and value. + /// + /// The `created_at` timestamp is set to the current time, + /// and `expires_at` is calculated from the optional TTL. + /// Uses saturating conversion for TTL to prevent overflow. + pub fn new(key: String, value: Value, ttl: Option) -> Self { + let created_at = Utc::now(); + let expires_at = ttl.map(|duration| { + // Use saturating conversion to prevent overflow + let chrono_duration = std_duration_to_chrono_duration(duration); + created_at + chrono_duration + }); + + Self { + key, + data_type: CedarType::from_value(&value), + value, + created_at, + expires_at, + access_count: 0, + } + } + + /// Increment the access count for this entry. + pub fn increment_access(&mut self) { + self.access_count = self.access_count.saturating_add(1); + } + + /// Check if this entry has expired. + pub fn is_expired(&self) -> bool { + if let Some(expires_at) = self.expires_at { + Utc::now() > expires_at + } else { + false + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use serde_json::json; + + #[test] + fn test_cedar_type_from_value() { + assert_eq!(CedarType::from_value(&json!("test")), CedarType::String); + assert_eq!(CedarType::from_value(&json!(42)), CedarType::Long); + assert_eq!(CedarType::from_value(&json!(true)), CedarType::Bool); + assert_eq!(CedarType::from_value(&json!([1, 2, 3])), CedarType::Set); + assert_eq!(CedarType::from_value(&json!({"a": 1})), CedarType::Record); + assert_eq!( + CedarType::from_value(&json!({"type": "User", "id": "123"})), + CedarType::Entity + ); + } + + #[test] + fn test_data_entry_new() { + let entry = DataEntry::new("key1".to_string(), json!("value1"), None); + assert_eq!(entry.key, "key1"); + assert_eq!(entry.value, json!("value1")); + assert_eq!(entry.data_type, CedarType::String); + assert_eq!(entry.access_count, 0); + assert!(entry.expires_at.is_none()); + } + + #[test] + fn test_data_entry_with_ttl() { + let entry = DataEntry::new( + "key1".to_string(), + json!("value1"), + Some(StdDuration::from_secs(60)), + ); + assert!(entry.expires_at.is_some()); + assert!(entry.expires_at.unwrap() > entry.created_at); + } + + #[test] + fn test_increment_access() { + let mut entry = DataEntry::new("key1".to_string(), json!("value1"), None); + assert_eq!(entry.access_count, 0); + entry.increment_access(); + assert_eq!(entry.access_count, 1); + entry.increment_access(); + assert_eq!(entry.access_count, 2); + } + + #[test] + #[cfg(not(target_arch = "wasm32"))] + fn test_is_expired() { + let entry = DataEntry::new( + "key1".to_string(), + json!("value1"), + Some(StdDuration::from_millis(100)), + ); + assert!(!entry.is_expired()); + + // Wait for expiration + std::thread::sleep(StdDuration::from_millis(150)); + assert!(entry.is_expired()); + } + + #[test] + fn test_serialization() { + let entry = DataEntry::new( + "key1".to_string(), + json!("value1"), + Some(StdDuration::from_secs(3600)), + ); + let serialized = serde_json::to_string(&entry).expect("should serialize"); + let deserialized: DataEntry = + serde_json::from_str(&serialized).expect("should deserialize"); + assert_eq!(entry.key, deserialized.key); + assert_eq!(entry.value, deserialized.value); + assert_eq!(entry.data_type, deserialized.data_type); + assert_eq!(entry.access_count, deserialized.access_count); + + // Verify datetime fields survive round-trip using RFC3339 representation + assert_eq!( + entry.created_at.to_rfc3339(), + deserialized.created_at.to_rfc3339(), + "created_at should survive serde_json round-trip" + ); + assert_eq!( + entry.expires_at.map(|dt| dt.to_rfc3339()), + deserialized.expires_at.map(|dt| dt.to_rfc3339()), + "expires_at should survive serde_json round-trip" + ); + } +} diff --git a/jans-cedarling/cedarling/src/data/error.rs b/jans-cedarling/cedarling/src/data/error.rs index f5c8d981309..a1813d7cdf1 100644 --- a/jans-cedarling/cedarling/src/data/error.rs +++ b/jans-cedarling/cedarling/src/data/error.rs @@ -13,20 +13,36 @@ pub enum DataError { InvalidKey, /// Key not found in store - #[error("key not found: {0}")] - KeyNotFound(String), + #[error("key not found: {key}")] + KeyNotFound { + /// The key that was not found + key: String, + }, /// Storage limit exceeded #[error("storage limit exceeded: max {max} entries")] - StorageLimitExceeded { max: usize }, + StorageLimitExceeded { + /// Maximum allowed entries + max: usize, + }, /// TTL exceeds maximum allowed - #[error("TTL exceeds maximum: requested {requested:?}, max {max:?}")] - TTLExceeded { requested: Duration, max: Duration }, + #[error("TTL ({requested:?}) exceeds max ({max:?})")] + TTLExceeded { + /// The requested TTL + requested: Duration, + /// The maximum allowed TTL + max: Duration, + }, /// Value too large - #[error("value too large: {size} bytes exceeds max {max} bytes")] - ValueTooLarge { size: usize, max: usize }, + #[error("value size {size} bytes exceeds max {max} bytes")] + ValueTooLarge { + /// Actual size in bytes + size: usize, + /// Maximum allowed size in bytes + max: usize, + }, /// Serialization error #[error("serialization error: {0}")] diff --git a/jans-cedarling/cedarling/src/data/mod.rs b/jans-cedarling/cedarling/src/data/mod.rs index a1e963cac9c..20ffcbdcf8e 100644 --- a/jans-cedarling/cedarling/src/data/mod.rs +++ b/jans-cedarling/cedarling/src/data/mod.rs @@ -8,9 +8,11 @@ //! and thread-safe concurrent access. mod config; +mod entry; mod error; mod store; -pub use config::DataStoreConfig; +pub use config::{ConfigValidationError, DataStoreConfig}; +pub use entry::{CedarType, DataEntry}; pub use error::DataError; pub use store::DataStore; diff --git a/jans-cedarling/cedarling/src/data/store.rs b/jans-cedarling/cedarling/src/data/store.rs index 9bd77486b79..5676f1f9c78 100644 --- a/jans-cedarling/cedarling/src/data/store.rs +++ b/jans-cedarling/cedarling/src/data/store.rs @@ -11,7 +11,8 @@ use chrono::Duration as ChronoDuration; use serde_json::Value; use sparkv::{Config as SparKVConfig, Error as SparKVError, SparKV}; -use super::config::DataStoreConfig; +use super::config::{ConfigValidationError, DataStoreConfig}; +use super::entry::DataEntry; use super::error::DataError; const RWLOCK_EXPECT_MESSAGE: &str = "DataStore storage lock should not be poisoned"; @@ -32,7 +33,7 @@ const INFINITE_TTL_SECS: i64 = 315_360_000; // 10 years in seconds /// - `config.max_ttl = None` means no upper limit on TTL values (10 years max) /// - When both `ttl` parameter and `config.default_ttl` are `None`, entries use the infinite TTL pub struct DataStore { - storage: RwLock>, + storage: RwLock>, config: DataStoreConfig, } @@ -43,33 +44,40 @@ impl DataStore { /// /// - If `config.max_ttl` is `None`, uses 10 years (effectively infinite) /// - If `config.default_ttl` is `None`, uses 10 years (effectively infinite) - pub fn new(config: DataStoreConfig) -> Self { + /// + /// # Errors + /// + /// Returns `ConfigValidationError` if the configuration is invalid. + pub fn new(config: DataStoreConfig) -> Result { + // Validate configuration before creating the store + config.validate()?; + let sparkv_config = SparKVConfig { max_items: config.max_entries, max_item_size: config.max_entry_size, max_ttl: config .max_ttl - .map(|d| std_duration_to_chrono_duration(d)) + .map(std_duration_to_chrono_duration) .unwrap_or_else(|| ChronoDuration::seconds(INFINITE_TTL_SECS)), default_ttl: config .default_ttl - .map(|d| std_duration_to_chrono_duration(d)) + .map(std_duration_to_chrono_duration) .unwrap_or_else(|| ChronoDuration::seconds(INFINITE_TTL_SECS)), auto_clear_expired: true, earliest_expiration_eviction: false, }; - // Use JSON string length as size calculator for accurate size checking - let size_calculator: Option usize> = - Some(|v| serde_json::to_string(v).map(|s| s.len()).unwrap_or(0)); + // Calculate size based on the serialized DataEntry + let size_calculator: Option usize> = + Some(|entry| serde_json::to_string(entry).map(|s| s.len()).unwrap_or(0)); - Self { + Ok(Self { storage: RwLock::new(SparKV::with_config_and_sizer( sparkv_config, size_calculator, )), config, - } + }) } /// Push a value into the store with an optional TTL. @@ -92,63 +100,53 @@ impl DataStore { return Err(DataError::InvalidKey); } - // Check value size before storing - let value_size = serde_json::to_string(&value) + // Create DataEntry with metadata + let entry = DataEntry::new(key.to_string(), value, ttl); + + // Check entry size before storing (including metadata) + let entry_size = serde_json::to_string(&entry) .map_err(DataError::from)? .len(); - if self.config.max_entry_size > 0 && value_size > self.config.max_entry_size { + if self.config.max_entry_size > 0 && entry_size > self.config.max_entry_size { return Err(DataError::ValueTooLarge { - size: value_size, + size: entry_size, max: self.config.max_entry_size, }); } - // Determine the effective TTL to use - // Priority: explicit ttl > config.default_ttl > infinite (10 years) - let requested_ttl = ttl - .or(self.config.default_ttl) - .unwrap_or(StdDuration::from_secs(INFINITE_TTL_SECS as u64)); - - // If an explicit TTL was provided, validate it against max_ttl - if ttl.is_some() { + // Validate explicit TTL against max_ttl before calculating effective TTL + if let Some(explicit_ttl) = ttl { if let Some(max_ttl) = self.config.max_ttl { - if requested_ttl > max_ttl { + if explicit_ttl > max_ttl { return Err(DataError::TTLExceeded { - requested: requested_ttl, + requested: explicit_ttl, max: max_ttl, }); } } } - // Cap the effective TTL at max_ttl if set - let effective_ttl = if let Some(max_ttl) = self.config.max_ttl { - requested_ttl.min(max_ttl) - } else { - requested_ttl - }; - - // Convert to chrono::Duration - let chrono_ttl = std_duration_to_chrono_duration(effective_ttl); + // Calculate effective TTL using the helper function + let chrono_ttl = get_effective_ttl(ttl, self.config.default_ttl, self.config.max_ttl); let mut storage = self.storage.write().expect(RWLOCK_EXPECT_MESSAGE); // Use empty index keys since we don't need indexing for data store storage - .set_with_ttl(&key, value, chrono_ttl, &[]) + .set_with_ttl(key, entry, chrono_ttl, &[]) .map_err(|e| match e { SparKVError::CapacityExceeded => DataError::StorageLimitExceeded { max: self.config.max_entries, }, SparKVError::ItemSizeExceeded => DataError::ValueTooLarge { - size: value_size, + size: entry_size, max: self.config.max_entry_size, }, SparKVError::TTLTooLong => { // This shouldn't happen since we validated above, but handle it anyway DataError::TTLExceeded { - requested: effective_ttl, + requested: ttl.unwrap_or_default(), max: self .config .max_ttl @@ -163,10 +161,61 @@ impl DataStore { /// Get a value from the store by key. /// /// Returns `None` if the key doesn't exist or the entry has expired. - /// Uses read lock for concurrent access. + /// If metrics are enabled, increments the access count for the entry. pub fn get(&self, key: &str) -> Option { - let storage = self.storage.read().expect(RWLOCK_EXPECT_MESSAGE); - storage.get(key).cloned() + self.get_entry(key).map(|entry| entry.value) + } + + /// Get a data entry with full metadata by key. + /// + /// Returns `None` if the key doesn't exist or the entry has expired. + /// If metrics are enabled, increments the access count for the entry. + /// Uses read lock initially for better concurrency, upgrading to write lock only when metrics are enabled. + pub fn get_entry(&self, key: &str) -> Option { + // First, try with read lock for better concurrency + let entry = { + let storage = self.storage.read().expect(RWLOCK_EXPECT_MESSAGE); + storage.get(key).cloned() + }; + + let mut entry = entry?; + + // Check if entry has expired + if let Some(expires_at) = entry.expires_at { + if chrono::Utc::now() > expires_at { + // Entry is expired, optionally remove it from storage + let mut storage = self.storage.write().expect(RWLOCK_EXPECT_MESSAGE); + storage.pop(key); + return None; + } + } + + // Only acquire write lock if metrics are enabled + if self.config.enable_metrics { + entry.increment_access(); + + // Calculate remaining TTL to preserve expiration + let remaining_ttl = if let Some(expires_at) = entry.expires_at { + let now = chrono::Utc::now(); + expires_at + .signed_duration_since(now) + .to_std() + .ok() + .map(std_duration_to_chrono_duration) + .unwrap_or_else(|| { + get_effective_ttl(None, self.config.default_ttl, self.config.max_ttl) + }) + } else { + // No expiration, use effective TTL + get_effective_ttl(None, self.config.default_ttl, self.config.max_ttl) + }; + + // Acquire write lock to update the entry with incremented access count + let mut storage = self.storage.write().expect(RWLOCK_EXPECT_MESSAGE); + let _ = storage.set_with_ttl(key, entry.clone(), remaining_ttl, &[]); + } + + Some(entry) } /// Remove a value from the store by key. @@ -202,12 +251,12 @@ impl DataStore { /// Get all active (non-expired) entries as a HashMap. /// /// This is used for context injection during authorization. - /// Uses read lock for concurrent access. + /// Returns only the values, not the metadata. pub fn get_all(&self) -> HashMap { let storage = self.storage.read().expect(RWLOCK_EXPECT_MESSAGE); storage .iter() - .map(|(k, v)| (k.clone(), v.clone())) + .map(|(k, entry)| (k.clone(), entry.value.clone())) .collect() } } @@ -216,7 +265,7 @@ impl DataStore { /// /// Uses saturating conversion to prevent overflow for very large durations. /// Durations exceeding `i64::MAX` seconds will be capped at a safe maximum. -fn std_duration_to_chrono_duration(d: StdDuration) -> ChronoDuration { +pub(super) fn std_duration_to_chrono_duration(d: StdDuration) -> ChronoDuration { let secs = d.as_secs(); let nanos = d.subsec_nanos(); @@ -229,6 +278,38 @@ fn std_duration_to_chrono_duration(d: StdDuration) -> ChronoDuration { ChronoDuration::seconds(secs_capped as i64) + ChronoDuration::nanoseconds(nanos as i64) } +/// Get the effective TTL to use, respecting max_ttl constraints. +/// +/// # TTL Resolution Logic +/// +/// 1. If `ttl` is provided explicitly, use it (subject to `max_ttl` cap) +/// 2. Otherwise, use `default_ttl` from config (subject to `max_ttl` cap) +/// 3. If both are None, use effectively infinite duration (10 years) +/// 4. Always respect `max_ttl` if set, capping the result +/// +/// This ensures that the effective TTL always respects `max_ttl` constraints, +/// even when using default or infinite TTLs. +fn get_effective_ttl( + ttl: Option, + default_ttl: Option, + max_ttl: Option, +) -> ChronoDuration { + // Determine the requested TTL + let requested_ttl = ttl.or(default_ttl); + + // If no TTL is specified, use effectively infinite duration (10 years) + let effective = requested_ttl.unwrap_or(StdDuration::from_secs(INFINITE_TTL_SECS as u64)); + + // Respect max_ttl if set, capping the result + let capped = if let Some(max) = max_ttl { + effective.min(max) + } else { + effective + }; + + std_duration_to_chrono_duration(capped) +} + #[cfg(test)] mod tests { use super::*; @@ -238,7 +319,7 @@ mod tests { use std::time::Duration as StdDuration; fn create_test_store() -> DataStore { - DataStore::new(DataStoreConfig::default()) + DataStore::new(DataStoreConfig::default()).expect("should create store") } #[test] @@ -409,7 +490,7 @@ mod tests { max_entries: 2, ..Default::default() }; - let store = DataStore::new(config); + let store = DataStore::new(config).expect("should create store"); store .push("key1", json!("value1"), None) @@ -429,10 +510,10 @@ mod tests { #[test] fn test_max_entry_size() { let config = DataStoreConfig { - max_entry_size: 10, + max_entry_size: 150, ..Default::default() }; - let store = DataStore::new(config); + let store = DataStore::new(config).expect("should create store"); // Small value should work store @@ -440,7 +521,9 @@ mod tests { .expect("failed to push small value for max_entry_size test"); // Large value should fail - let large_value = json!("this is a very long string that exceeds the limit"); + let large_value = json!( + "this is a very long string that exceeds the limit and it needs to be even longer to exceed 150 bytes including metadata" + ); let result = store.push("key2", large_value, None); assert!(matches!(result, Err(DataError::ValueTooLarge { .. }))); } @@ -451,7 +534,7 @@ mod tests { max_ttl: Some(StdDuration::from_secs(60)), ..Default::default() }; - let store = DataStore::new(config); + let store = DataStore::new(config).expect("should create store"); // TTL within limit should work store @@ -470,7 +553,7 @@ mod tests { default_ttl: Some(StdDuration::from_millis(100)), ..Default::default() }; - let store = DataStore::new(config); + let store = DataStore::new(config).expect("should create store"); // Push without explicit TTL should use default store @@ -606,4 +689,137 @@ mod tests { // Verify some entries were removed assert!(store.count() < 20); } + + #[test] + fn test_get_entry_with_metadata() { + let store = create_test_store(); + + store + .push("key1", json!("value1"), Some(StdDuration::from_secs(60))) + .expect("failed to push value"); + + let entry = store.get_entry("key1").expect("entry should exist"); + assert_eq!(entry.key, "key1"); + assert_eq!(entry.value, json!("value1")); + assert_eq!(entry.data_type, crate::CedarType::String); + assert_eq!(entry.access_count, 1); // Incremented by get_entry + assert!(entry.expires_at.is_some()); + } + + #[test] + fn test_metrics_tracking() { + let config = DataStoreConfig { + enable_metrics: true, + ..Default::default() + }; + let store = DataStore::new(config).expect("should create store"); + + store + .push("key1", json!("value1"), None) + .expect("failed to push value"); + + // First access + let entry1 = store.get_entry("key1").expect("entry should exist"); + assert_eq!(entry1.access_count, 1); + + // Second access + let entry2 = store.get_entry("key1").expect("entry should exist"); + assert_eq!(entry2.access_count, 2); + + // Third access + let entry3 = store.get_entry("key1").expect("entry should exist"); + assert_eq!(entry3.access_count, 3); + } + + #[test] + fn test_metrics_disabled() { + let config = DataStoreConfig { + enable_metrics: false, + ..Default::default() + }; + let store = DataStore::new(config).expect("should create store"); + + store + .push("key1", json!("value1"), None) + .expect("failed to push value"); + + // Access multiple times + let entry1 = store.get_entry("key1").expect("entry should exist"); + assert_eq!(entry1.access_count, 0); // Not incremented + + let entry2 = store.get_entry("key1").expect("entry should exist"); + assert_eq!(entry2.access_count, 0); // Still not incremented + } + + #[test] + fn test_cedar_type_inference() { + let store = create_test_store(); + + store + .push("string", json!("test"), None) + .expect("failed to push string"); + store + .push("number", json!(42), None) + .expect("failed to push number"); + store + .push("bool", json!(true), None) + .expect("failed to push bool"); + store + .push("array", json!([1, 2, 3]), None) + .expect("failed to push array"); + store + .push("object", json!({"key": "value"}), None) + .expect("failed to push object"); + store + .push("entity", json!({"type": "User", "id": "123"}), None) + .expect("failed to push entity"); + + use crate::CedarType; + assert_eq!( + store.get_entry("string").unwrap().data_type, + CedarType::String + ); + assert_eq!( + store.get_entry("number").unwrap().data_type, + CedarType::Long + ); + assert_eq!(store.get_entry("bool").unwrap().data_type, CedarType::Bool); + assert_eq!(store.get_entry("array").unwrap().data_type, CedarType::Set); + assert_eq!( + store.get_entry("object").unwrap().data_type, + CedarType::Record + ); + assert_eq!( + store.get_entry("entity").unwrap().data_type, + CedarType::Entity + ); + } + + #[test] + fn test_config_validation() { + // Valid config + let valid_config = DataStoreConfig { + default_ttl: Some(StdDuration::from_secs(300)), + max_ttl: Some(StdDuration::from_secs(3600)), + ..Default::default() + }; + assert!( + matches!(DataStore::new(valid_config), Ok(_)), + "expected DataStore::new() to succeed with valid DataStoreConfig" + ); + + // Invalid config: default_ttl > max_ttl + let invalid_config = DataStoreConfig { + default_ttl: Some(StdDuration::from_secs(7200)), + max_ttl: Some(StdDuration::from_secs(3600)), + ..Default::default() + }; + assert!( + matches!( + DataStore::new(invalid_config), + Err(ConfigValidationError::DefaultTtlExceedsMax { .. }) + ), + "expected DataStore::new() to return ConfigValidationError when default_ttl exceeds max_ttl" + ); + } } diff --git a/jans-cedarling/cedarling/src/lib.rs b/jans-cedarling/cedarling/src/lib.rs index 540d7a07eda..63a2113670b 100644 --- a/jans-cedarling/cedarling/src/lib.rs +++ b/jans-cedarling/cedarling/src/lib.rs @@ -35,6 +35,7 @@ mod tests; use std::sync::Arc; pub use crate::common::json_rules::JsonRule; +pub use crate::data::{CedarType, ConfigValidationError, DataEntry, DataStoreConfig}; pub use crate::init::policy_store::{PolicyStoreLoadError, load_policy_store}; use crate::log::BaseLogEntry; #[cfg(test)]