1use serde::{Deserialize, Serialize};
2use std::{
3 collections::HashMap,
4 hash::Hash,
5 sync::Arc,
6 time::{Duration, Instant},
7};
8use tokio::sync::RwLock;
9
10#[derive(Debug, Clone)]
12pub struct CacheConfig {
13 pub max_entries: usize,
15 pub default_ttl: Duration,
17 pub cleanup_interval: Duration,
19 pub enable_lru: bool,
21}
22
23impl Default for CacheConfig {
24 fn default() -> Self {
25 Self {
26 max_entries: 1000,
27 default_ttl: Duration::from_secs(300), cleanup_interval: Duration::from_secs(60), enable_lru: true,
30 }
31 }
32}
33
34#[derive(Debug, Clone)]
36struct CacheEntry<V> {
37 value: V,
38 expires_at: Instant,
39 last_accessed: Instant,
40 access_count: u64,
41}
42
43impl<V> CacheEntry<V> {
44 fn new(value: V, ttl: Duration) -> Self {
45 let now = Instant::now();
46 Self { value, expires_at: now + ttl, last_accessed: now, access_count: 1 }
47 }
48
49 fn is_expired(&self) -> bool {
50 Instant::now() > self.expires_at
51 }
52
53 fn access(&mut self) -> &V {
54 self.last_accessed = Instant::now();
55 self.access_count += 1;
56 &self.value
57 }
58}
59
60pub struct Cache<K, V> {
62 config: CacheConfig,
63 entries: Arc<RwLock<HashMap<K, CacheEntry<V>>>>,
64 stats: Arc<RwLock<CacheStats>>,
65}
66
67#[derive(Debug, Default, Serialize, Deserialize)]
69pub struct CacheStats {
70 pub hits: u64,
71 pub misses: u64,
72 pub evictions: u64,
73 pub expired_removals: u64,
74 pub current_size: usize,
75 pub max_size_reached: u64,
76}
77
78impl CacheStats {
79 pub fn hit_rate(&self) -> f64 {
80 if self.hits + self.misses == 0 {
81 0.0
82 } else {
83 self.hits as f64 / (self.hits + self.misses) as f64
84 }
85 }
86}
87
88impl<K, V> Cache<K, V>
89where
90 K: Hash + Eq + Clone + Send + Sync + 'static,
91 V: Clone + Send + Sync + 'static,
92{
93 pub fn new(config: CacheConfig) -> Self {
95 Self {
96 config,
97 entries: Arc::new(RwLock::new(HashMap::new())),
98 stats: Arc::new(RwLock::new(CacheStats::default())),
99 }
100 }
101
102 pub async fn get(&self, key: &K) -> Option<V> {
104 let mut entries = self.entries.write().await;
105 let mut stats = self.stats.write().await;
106
107 if let Some(entry) = entries.get_mut(key) {
108 if entry.is_expired() {
109 entries.remove(key);
110 stats.expired_removals += 1;
111 stats.misses += 1;
112 stats.current_size = entries.len();
113 None
114 } else {
115 stats.hits += 1;
116 Some(entry.access().clone())
117 }
118 } else {
119 stats.misses += 1;
120 None
121 }
122 }
123
124 pub async fn insert(&self, key: K, value: V) {
126 self.insert_with_ttl(key, value, self.config.default_ttl).await;
127 }
128
129 pub async fn insert_with_ttl(&self, key: K, value: V, ttl: Duration) {
131 let mut entries = self.entries.write().await;
132 let mut stats = self.stats.write().await;
133
134 if entries.len() >= self.config.max_entries && !entries.contains_key(&key) {
136 if self.config.enable_lru {
137 self.evict_lru(&mut entries, &mut stats);
138 } else {
139 if let Some(first_key) = entries.keys().next().cloned() {
141 entries.remove(&first_key);
142 stats.evictions += 1;
143 }
144 }
145 stats.max_size_reached += 1;
146 }
147
148 let entry = CacheEntry::new(value, ttl);
149 entries.insert(key, entry);
150 stats.current_size = entries.len();
151 }
152
153 pub async fn remove(&self, key: &K) -> Option<V> {
155 let mut entries = self.entries.write().await;
156 let mut stats = self.stats.write().await;
157
158 let result = entries.remove(key).map(|entry| entry.value);
159 stats.current_size = entries.len();
160 result
161 }
162
163 pub async fn clear(&self) {
165 let mut entries = self.entries.write().await;
166 let mut stats = self.stats.write().await;
167
168 entries.clear();
169 stats.current_size = 0;
170 }
171
172 pub async fn stats(&self) -> CacheStats {
174 let stats = self.stats.read().await;
175 CacheStats {
176 hits: stats.hits,
177 misses: stats.misses,
178 evictions: stats.evictions,
179 expired_removals: stats.expired_removals,
180 current_size: stats.current_size,
181 max_size_reached: stats.max_size_reached,
182 }
183 }
184
185 pub async fn cleanup_expired(&self) {
187 let mut entries = self.entries.write().await;
188 let mut stats = self.stats.write().await;
189
190 let initial_size = entries.len();
191 entries.retain(|_, entry| !entry.is_expired());
192 let removed = initial_size - entries.len();
193
194 stats.expired_removals += removed as u64;
195 stats.current_size = entries.len();
196 }
197
198 pub async fn size(&self) -> usize {
200 let entries = self.entries.read().await;
201 entries.len()
202 }
203
204 pub async fn contains_key(&self, key: &K) -> bool {
206 let entries = self.entries.read().await;
207 entries.contains_key(key)
208 }
209
210 fn evict_lru(&self, entries: &mut HashMap<K, CacheEntry<V>>, stats: &mut CacheStats) {
212 if let Some((lru_key, _)) = entries
213 .iter()
214 .min_by_key(|(_, entry)| entry.last_accessed)
215 .map(|(k, v)| (k.clone(), v.last_accessed))
216 {
217 entries.remove(&lru_key);
218 stats.evictions += 1;
219 }
220 }
221
222 pub fn start_cleanup_task(&self) -> tokio::task::JoinHandle<()> {
224 let cache = Cache {
225 config: self.config.clone(),
226 entries: Arc::clone(&self.entries),
227 stats: Arc::clone(&self.stats),
228 };
229
230 tokio::spawn(async move {
231 let mut interval = tokio::time::interval(cache.config.cleanup_interval);
232 loop {
233 interval.tick().await;
234 cache.cleanup_expired().await;
235 }
236 })
237 }
238}
239
240pub type RpcCache = Cache<String, serde_json::Value>;
242
243impl RpcCache {
244 pub fn new_rpc_cache() -> Self {
246 let config = CacheConfig {
247 max_entries: 5000,
248 default_ttl: Duration::from_secs(30), cleanup_interval: Duration::from_secs(60),
250 enable_lru: true,
251 };
252 Self::new(config)
253 }
254
255 pub async fn cache_block(&self, identifier: String, block: serde_json::Value) {
257 self.insert_with_ttl(
259 format!("block:{}", identifier),
260 block,
261 Duration::from_secs(3600), )
263 .await;
264 }
265
266 pub async fn cache_transaction(&self, tx_hash: String, transaction: serde_json::Value) {
268 self.insert_with_ttl(
270 format!("tx:{}", tx_hash),
271 transaction,
272 Duration::from_secs(3600), )
274 .await;
275 }
276
277 pub async fn cache_contract_state(&self, contract_hash: String, state: serde_json::Value) {
279 self.insert_with_ttl(
281 format!("contract:{}", contract_hash),
282 state,
283 Duration::from_secs(60), )
285 .await;
286 }
287
288 pub async fn cache_balance(&self, address: String, balance: serde_json::Value) {
290 self.insert_with_ttl(
292 format!("balance:{}", address),
293 balance,
294 Duration::from_secs(10), )
296 .await;
297 }
298}
299
300#[cfg(test)]
301mod tests {
302 use super::*;
303 use tokio::time::{sleep, Duration};
304
305 #[tokio::test]
306 async fn test_cache_basic_operations() {
307 let cache = Cache::new(CacheConfig::default());
308
309 cache.insert("key1".to_string(), "value1".to_string()).await;
311 assert_eq!(cache.get(&"key1".to_string()).await, Some("value1".to_string()));
312
313 assert_eq!(cache.get(&"nonexistent".to_string()).await, None);
315
316 assert_eq!(cache.remove(&"key1".to_string()).await, Some("value1".to_string()));
318 assert_eq!(cache.get(&"key1".to_string()).await, None);
319 }
320
321 #[tokio::test]
322 async fn test_cache_expiration() {
323 let config = CacheConfig { default_ttl: Duration::from_millis(100), ..Default::default() };
324 let cache = Cache::new(config);
325
326 cache.insert("key1".to_string(), "value1".to_string()).await;
327 assert_eq!(cache.get(&"key1".to_string()).await, Some("value1".to_string()));
328
329 sleep(Duration::from_millis(150)).await;
331 assert_eq!(cache.get(&"key1".to_string()).await, None);
332 }
333
334 #[tokio::test]
335 async fn test_cache_stats() {
336 let cache = Cache::new(CacheConfig::default());
337
338 cache.insert("key1".to_string(), "value1".to_string()).await;
340 cache.get(&"key1".to_string()).await; cache.get(&"nonexistent".to_string()).await; let stats = cache.stats().await;
344 assert_eq!(stats.hits, 1);
345 assert_eq!(stats.misses, 1);
346 assert_eq!(stats.hit_rate(), 0.5);
347 }
348
349 #[tokio::test]
350 async fn test_rpc_cache() {
351 let cache = RpcCache::new_rpc_cache();
352
353 let block_data = serde_json::json!({
354 "hash": "0x1234",
355 "index": 100
356 });
357
358 cache.cache_block("100".to_string(), block_data.clone()).await;
359 assert_eq!(cache.get(&"block:100".to_string()).await, Some(block_data));
360 }
361}