super_cache 1.0.0 copy "super_cache: ^1.0.0" to clipboard
super_cache: ^1.0.0 copied to clipboard

A production-grade, mobile-first LRU caching engine for Flutter. Zero external dependencies. Includes TTL, observability, memory pressure awareness, layered orchestration, and Clean Architecture integration.

example/main.dart

// ignore_for_file: avoid_print
import 'dart:async';

import 'package:super_cache/super_cache.dart';

/// super_cache core walkthrough
///
/// This example simulates a product-catalog app to show each feature in order:
///
///   1. MemoryCache basics — put, get, LRU eviction
///   2. TTL modes — absolute expiry vs sliding expiry
///   3. Metrics — hits, misses, evictions, hit rate
///   4. CacheOrchestrator — L1 + L2 layered caching with auto-promotion
///   5. CacheRepositoryMixin — cache-aside with stampede protection
///   6. Group invalidation — removeWhere for logout / user switch
///
/// Run with:
///   dart run example/main.dart
void main() async {
  await _section('1. MemoryCache basics', _demo1Basics);
  await _section('2. TTL modes', _demo2Ttl);
  await _section('3. Metrics', _demo3Metrics);
  await _section('4. CacheOrchestrator (L1 → L2)', _demo4Orchestrator);
  await _section('5. CacheRepositoryMixin — stampede protection', _demo5Mixin);
  await _section('6. Group invalidation with removeWhere', _demo6RemoveWhere);

  print('\nDone!');
}

// ---------------------------------------------------------------------------
// 1. MemoryCache basics
//
// MemoryCache uses a HashMap for O(1) lookup and a doubly-linked list for O(1)
// LRU eviction. When maxEntries is reached the least-recently-used entry is
// dropped automatically.
// ---------------------------------------------------------------------------

Future<void> _demo1Basics() async {
  final cache = MemoryCache<String, String>(
    maxEntries: 3, // tiny limit so we can see eviction immediately
  );

  cache.put('product:1', 'Mechanical Keyboard');
  cache.put('product:2', 'USB-C Hub');
  cache.put('product:3', 'Laptop Stand');

  print('get product:1 → ${cache.get("product:1")}'); // Mechanical Keyboard
  print('get product:2 → ${cache.get("product:2")}'); // USB-C Hub

  // Adding a 4th entry evicts the LRU entry (product:3 — never accessed).
  cache.put('product:4', 'Webcam');
  print('get product:3 → ${cache.get("product:3")}'); // null — evicted
  print('get product:4 → ${cache.get("product:4")}'); // Webcam

  // Overwriting a key resets it to the MRU position.
  cache.put('product:1', 'Mechanical Keyboard v2');
  print('after overwrite → ${cache.get("product:1")}'); // Mechanical Keyboard v2

  await cache.dispose();
}

// ---------------------------------------------------------------------------
// 2. TTL modes
//
// TTLMode.absolute  — expires at insertedAt + ttl, no matter how often accessed.
//                     Good for price data ("valid for 10 minutes").
//
// TTLMode.sliding   — TTL resets on every successful get. The entry lives
//                     as long as it is being used. Good for session tokens.
// ---------------------------------------------------------------------------

Future<void> _demo2Ttl() async {
  // --- Absolute TTL ---
  final absolute = MemoryCache<String, String>(
    maxEntries: 200,
    defaultTTL: const Duration(milliseconds: 300),
  );

  absolute.put('price:widget', '\$19.99');
  print('before expiry  → ${absolute.get("price:widget")}'); // $19.99

  // Access it — does NOT reset the clock for absolute TTL.
  await Future<void>.delayed(const Duration(milliseconds: 200));
  print('at 200ms       → ${absolute.get("price:widget")}'); // $19.99 (not yet)

  await Future<void>.delayed(const Duration(milliseconds: 150));
  print('at 350ms       → ${absolute.get("price:widget")}'); // null — expired

  await absolute.dispose();

  // --- Sliding TTL ---
  final sliding = MemoryCache<String, String>(
    maxEntries: 200,
    defaultTTL: const Duration(milliseconds: 300),
    ttlMode: TTLMode.sliding,
  );

  sliding.put('session', 'user_alice');

  // Each access within the window resets the 300 ms clock.
  await Future<void>.delayed(const Duration(milliseconds: 200));
  print('sliding at 200ms → ${sliding.get("session")}'); // user_alice (clock reset)

  await Future<void>.delayed(const Duration(milliseconds: 200));
  print('sliding at 400ms → ${sliding.get("session")}'); // user_alice (clock reset)

  // Stop accessing — let the full 300 ms elapse.
  await Future<void>.delayed(const Duration(milliseconds: 350));
  print('sliding at 750ms → ${sliding.get("session")}'); // null — idle too long

  await sliding.dispose();
}

// ---------------------------------------------------------------------------
// 3. Metrics
//
// Every cache exposes a synchronous CacheMetrics snapshot via .metrics and a
// broadcast stream via .metricsStream (fires on each background sweep).
// ---------------------------------------------------------------------------

Future<void> _demo3Metrics() async {
  final cache = MemoryCache<String, String>(
    maxEntries: 5, // small so we force an eviction
  );

  // Fill to capacity and then add one more to trigger LRU eviction.
  for (var i = 1; i <= 5; i++) {
    cache.put('product:$i', 'Item $i');
  }
  cache.put('product:6', 'Item 6'); // evicts product:1 (inserted first, never accessed)

  // Two hits, one miss.
  cache.get('product:2'); // hit
  cache.get('product:3'); // hit
  cache.get('product:1'); // null — evicted

  final m = cache.metrics;
  print('entries   : ${m.currentEntries}'); // 5
  print('hits      : ${m.hits}');           // 2
  print('misses    : ${m.misses}');         // 1
  print('evictions : ${m.evictions}');      // 1
  print('hit rate  : ${(m.hitRate * 100).toStringAsFixed(1)}%'); // 66.7%

  // In Flutter apps, wire metricsStream to your analytics:
  //   cache.metricsStream.listen((m) => analytics.record('hit_rate', m.hitRate));

  await cache.dispose();
}

// ---------------------------------------------------------------------------
// 4. CacheOrchestrator — layered caching
//
// CacheOrchestrator sequences up to three layers (L1, L2, L3).
//
// On READ:  L1 miss → check L2 → first hit is returned AND promoted to L1
//           so the next access is always an L1 hit.
//
// On WRITE: value is written to every configured layer at once.
// ---------------------------------------------------------------------------

Future<void> _demo4Orchestrator() async {
  final l1 = MemoryCache<String, String>(maxEntries: 3); // hot, tiny
  final l2 = MemoryCache<String, String>(maxEntries: 200); // warm, larger
  final cache = CacheOrchestrator<String, String>(l1: l1, l2: l2);

  // Write goes to both L1 and L2.
  await cache.put('product:1', 'Mechanical Keyboard');

  // First read — L1 hit.
  print('read 1: ${await cache.get("product:1")}'); // Mechanical Keyboard

  // Push product:1 out of L1 by filling it with other entries.
  await cache.put('product:2', 'USB-C Hub');
  await cache.put('product:3', 'Laptop Stand');
  await cache.put('product:4', 'Webcam'); // product:1 evicted from L1

  print('product:1 in L1? ${l1.get("product:1")}'); // null — evicted from L1
  print('product:1 in L2? ${l2.get("product:1")}'); // Mechanical Keyboard — still in L2

  // Orchestrator falls through to L2, gets the value, promotes it back to L1.
  print('via orchestrator: ${await cache.get("product:1")}'); // Mechanical Keyboard
  print('now in L1 again?  ${l1.get("product:1")}');          // Mechanical Keyboard ✓

  await cache.dispose();
}

// ---------------------------------------------------------------------------
// 5. CacheRepositoryMixin — stampede protection
//
// If two callers request the same key while the cache is cold, only ONE
// network call is made. The second caller waits on the same Future.
//
//   caller A: MISS → start fetch ──────────────────► store → return to A
//   caller B: MISS → join A's Future ─────────────────────────► return to B
//                              (one network call, not two)
// ---------------------------------------------------------------------------

Future<void> _demo5Mixin() async {
  final repo = _ProductRepository();

  // Fire two concurrent requests for the same product while cold.
  final results = await Future.wait([
    repo.getProduct('product:42'),
    repo.getProduct('product:42'),
  ]);

  print('caller A got: ${results[0]}'); // Widget Pro (id: product:42)
  print('caller B got: ${results[1]}'); // Widget Pro (id: product:42)
  print('network calls: ${repo.networkCallCount}'); // 1 — not 2!

  // Both callers are now served from cache — zero network calls.
  final results2 = await Future.wait([
    repo.getProduct('product:42'),
    repo.getProduct('product:42'),
  ]);
  print('second round — network calls: ${repo.networkCallCount}'); // still 1
  assert(results2.every((r) => r != null));

  await repo.cache.dispose();
}

class _ProductRepository with CacheRepositoryMixin<String, String> {
  int networkCallCount = 0;

  @override
  final Cache<String, String> cache = MemoryCache(
    maxEntries: 200,
    defaultTTL: const Duration(minutes: 5),
  );

  Future<String?> getProduct(String id) => fetchWithCache(
        id,
        onMiss: () async {
          networkCallCount++;
          print('  → network fetch for $id (call #$networkCallCount)');
          // Simulate 80 ms network latency.
          await Future<void>.delayed(const Duration(milliseconds: 80));
          return 'Widget Pro (id: $id)';
        },
      );
}

// ---------------------------------------------------------------------------
// 6. Group invalidation with removeWhere
//
// removeWhere lets you clear a logical group of keys in one call.
// Common use-cases: user logout, feature-flag refresh, namespace invalidation.
// ---------------------------------------------------------------------------

Future<void> _demo6RemoveWhere() async {
  final cache = MemoryCache<String, String>(maxEntries: 200);

  // Populate data for two users and some global config.
  cache.put('user:alice:profile', 'Alice Smith');
  cache.put('user:alice:cart', 'item_1,item_2');
  cache.put('user:alice:wishlist', 'item_5');
  cache.put('user:bob:profile', 'Bob Jones');
  cache.put('user:bob:cart', 'item_3');
  cache.put('config:theme', 'dark'); // global — must NOT be removed

  print('before logout: ${cache.metrics.currentEntries} entries');

  // On Alice's logout — invalidate everything that belongs to her.
  cache.removeWhere((key, _) => key.startsWith('user:alice:'));

  print('after  logout: ${cache.metrics.currentEntries} entries');
  print('alice:profile → ${cache.get("user:alice:profile")}'); // null
  print('alice:cart    → ${cache.get("user:alice:cart")}');    // null
  print('bob:profile   → ${cache.get("user:bob:profile")}');   // Bob Jones ✓
  print('config:theme  → ${cache.get("config:theme")}');       // dark ✓

  await cache.dispose();
}

// ---------------------------------------------------------------------------
// Helpers
// ---------------------------------------------------------------------------

Future<void> _section(String title, Future<void> Function() demo) async {
  print('\n${'─' * 55}');
  print('  $title');
  print('─' * 55);
  await demo();
}
1
likes
150
points
124
downloads

Publisher

verified publisherjihedmrouki.com

Weekly Downloads

A production-grade, mobile-first LRU caching engine for Flutter. Zero external dependencies. Includes TTL, observability, memory pressure awareness, layered orchestration, and Clean Architecture integration.

Repository (GitHub)
View/report issues

Topics

#cache #caching #lru #performance #memory

Documentation

API reference

License

MIT (license)

Dependencies

flutter

More

Packages that depend on super_cache