Examples¶
For more details, see our user's guide.
Size-based eviction¶
Weight-based eviction¶
package main
import "github.com/maypok86/otter/v2"
func main() {
// Initialize cache with panic on error (Must wraps New and panics if config is invalid)
cache := otter.Must(&otter.Options[int, int]{
MaximumWeight: 5, // Total weight capacity of the cache
// Weigher defines how to calculate weight of each entry
// In this case, we use the key itself as the weight value
// Cache will enforce: sum(weights) <= MaximumWeight
Weigher: func(key int, value int) uint32 {
return uint32(key) // Convert key to unsigned 32-bit weight
},
})
// Define test keys
k1 := 3 // Will be kept (weight 3)
k2 := 4 // Will be evicted (weight 3 + 4 = 7 > 5)
// Store values in cache
cache.Set(k1, k1) // Adds entry with weight = 3
cache.Set(k2, k2) // Adds entry with weight = 4 (total weight now exceeds limit)
// Force immediate processing of pending operations
// This applies eviction policy synchronously
cache.CleanUp()
// Verify cache state after eviction
if _, ok := cache.GetIfPresent(k1); !ok {
panic("not found k1")
}
if _, ok := cache.GetIfPresent(k2); ok {
panic("found k2") // Should be evicted due to weight limit
}
}
Entry pinning¶
package main
import (
"github.com/maypok86/otter/v2"
)
func main() {
// Define cache configuration parameters
maximumSize := 10 // Maximum number of regular (non-pinned) items
pinnedKey := 4 // Special key that will be pinned (never evicted)
// Initialize cache with weight-based eviction
cache := otter.Must[int, int](&otter.Options[int, int]{
MaximumWeight: uint64(maximumSize), // Total weight capacity
// Custom weigher function determines entry weights
Weigher: func(key int, value int) uint32 {
if key == pinnedKey {
return 0 // Pinned entry has 0 weight (never counts against capacity)
}
return 1 // All other entries have weight 1
},
})
// Populate cache with test data
for i := 0; i < maximumSize; i++ {
cache.Set(i, i) // Add entries with keys 0-9
}
// Force eviction of all entries that can be evicted
// Setting maximum to 0 will remove all entries with weight > 0
cache.SetMaximum(0)
// Verify eviction behavior
if _, ok := cache.GetIfPresent(0); ok {
panic("0 shouldn't be found") // Regular entry should be evicted
}
if _, ok := cache.GetIfPresent(pinnedKey); !ok {
panic("4 should be found") // Pinned entry should remain
}
}
Time-based eviction¶
Expiration after creation¶
package main
import (
"time"
"github.com/maypok86/otter/v2"
)
func main() {
// Create a cache with expiration time of 1 second after creation
// - Uses ExpiryCreating policy (timer starts at creation)
// - Must() will panic if configuration is invalid
cache := otter.Must(&otter.Options[int, int]{
ExpiryCalculator: otter.ExpiryCreating[int, int](time.Second),
})
// Test Phase 1: Initial creation and first expiration check
// --------------------------------------------------------
// Add entry with key=1, value=1 at time T=0
cache.Set(1, 1)
// Immediate check - should exist (not expired yet)
if _, ok := cache.GetIfPresent(1); !ok {
panic("1 should be found") // Entry should exist
}
// Wait 500ms (half the expiration duration)
time.Sleep(500 * time.Millisecond) // Now at T=500ms
// Check again - should still exist (500ms < 1s)
if _, ok := cache.GetIfPresent(1); !ok {
panic("1 should be found") // Entry should persist
}
// Test Phase 2: Update and final expiration check
// ----------------------------------------------
// Update the value at T=500ms - this DOESN'T RESET the expiration timer
cache.Set(1, 2)
// Wait another 500ms (total 1s since creation, 0.5s since update)
time.Sleep(500 * time.Millisecond) // Now at T=1000ms since creation
// Final check - should be expired now (1s since creation)
if _, ok := cache.GetIfPresent(1); ok {
panic("1 shouldn't be found") // Entry should be expired
}
}
Expiration after last write¶
package main
import (
"time"
"github.com/maypok86/otter/v2"
)
func main() {
// Create cache with expiration policy:
// - Entries expire 1 second after last WRITE (creation/update)
// - Uses ExpiryWriting policy (vs ExpiryCreating which counts from first creation)
// - Must() panics if configuration is invalid
cache := otter.Must(&otter.Options[int, int]{
ExpiryCalculator: otter.ExpiryWriting[int, int](time.Second),
})
// Phase 1: Initial creation and silent read
// ----------------------------------------
// Create entry at T=0
cache.Set(1, 1) // [Expiration timer starts]
// Normal read - updates access metadata
if _, ok := cache.GetIfPresent(1); !ok {
panic("1 should be found") // Should exist
}
// Wait 500ms (T=500ms)
time.Sleep(500 * time.Millisecond)
// Silent read - doesn't affect expiration or statistics
// Useful for monitoring without changing cache behavior
if _, ok := cache.GetEntryQuietly(1); !ok {
panic("1 should be found") // Should still exist
}
// Phase 2: Update and expiration validation
// ----------------------------------------
// Update at T=500ms - RESETS expiration timer
cache.Set(1, 2) // [Timer restarts]
// Wait 500ms (T=1000ms since creation)
time.Sleep(500 * time.Millisecond)
// Should still exist (500ms since update < 1s)
if _, ok := cache.GetIfPresent(1); !ok {
panic("1 should be found") // Should still exist
}
// Wait another 500ms (T=1500ms since creation, T=1000ms since last update)
time.Sleep(500 * time.Millisecond)
// Should now be expired (1000ms >= 1000ms since last write)
if _, ok := cache.GetIfPresent(1); ok {
panic("1 shouldn't be found") // Expired
}
}
Expiration after last access¶
package main
import (
"time"
"github.com/maypok86/otter/v2"
)
func main() {
// Create cache with expiration policy:
// - Entries expire 1 second after last ACCESS (read or write)
// - Uses ExpiryAccessing policy (timer resets on both reads and writes)
// - Must() panics if configuration is invalid
cache := otter.Must(&otter.Options[int, int]{
ExpiryCalculator: otter.ExpiryAccessing[int, int](time.Second),
})
// Phase 1: Initial creation and active access
// ----------------------------------------
// Create entry at T=0
cache.Set(1, 1) // [Expiration timer starts]
// First read at T=0 - resets expiration timer (active access)
if _, ok := cache.GetIfPresent(1); !ok {
panic("1 should be found") // Should exist
}
// Wait 500ms (T=500ms)
time.Sleep(500 * time.Millisecond)
// Second read at T=500ms - resets timer again
if _, ok := cache.GetIfPresent(1); !ok {
panic("1 should be found") // Should exist
}
// Phase 2: Silent read and expiration test
// ---------------------------------------
// Wait another 500ms (T=500ms since last access)
time.Sleep(500 * time.Millisecond)
// Silent read - doesn't affect expiration or statistics
// Useful for monitoring without changing cache behavior
if _, ok := cache.GetEntryQuietly(1); !ok {
panic("1 should be found") // Still exists (but timer not reset)
}
// Wait 500ms more (T=1000ms since last active access)
time.Sleep(500 * time.Millisecond)
// Final check at T=1500ms since creation - should be expired now
// (Last access was at T=500ms, 1000ms have passed)
if _, ok := cache.GetIfPresent(1); ok {
panic("1 shouldn't be found") // Should be expired
}
}
Custom ExpiryCalculator¶
package main
import (
"time"
"github.com/maypok86/otter/v2"
)
// Custom expiration policy implementation
type expiryCalculator struct{}
// ExpireAfterCreate sets expiration time for new entries (500ms)
func (ec *expiryCalculator) ExpireAfterCreate(_ otter.Entry[int, int]) time.Duration {
return 500 * time.Millisecond
}
// ExpireAfterUpdate sets expiration time after updates (300ms)
func (ec *expiryCalculator) ExpireAfterUpdate(_ otter.Entry[int, int], _ int) time.Duration {
return 300 * time.Millisecond
}
// ExpireAfterRead returns remaining expiration time for reads
func (ec *expiryCalculator) ExpireAfterRead(entry otter.Entry[int, int]) time.Duration {
return entry.ExpiresAfter() // Preserves current expiration
}
func main() {
// Create cache with our custom expiration policy
cache := otter.Must(&otter.Options[int, int]{
ExpiryCalculator: &expiryCalculator{},
})
// Phase 1: Entry creation with 500ms expiration
// --------------------------------------------
cache.Set(1, 1) // New entry - expires in 500ms
// Immediate check - should exist
if _, ok := cache.GetIfPresent(1); !ok {
panic("1 should be found")
}
// Wait 490ms (just before initial expiration)
time.Sleep(490 * time.Millisecond)
// Should still exist (490ms < 500ms)
if _, ok := cache.GetIfPresent(1); !ok {
panic("1 should be found")
}
// Phase 2: Entry update with 300ms expiration
// -------------------------------------------
cache.Set(1, 2) // Update - now expires in 300ms
// Wait 200ms (200ms < 300ms)
time.Sleep(200 * time.Millisecond)
// Should still exist (200ms < 300ms)
if _, ok := cache.GetIfPresent(1); !ok {
panic("1 should be found")
}
// Wait additional 100ms (total 300ms since update)
time.Sleep(100 * time.Millisecond)
// Should now be expired (300ms >= 300ms)
if _, ok := cache.GetIfPresent(1); ok {
panic("1 shouldn't be found")
}
}
Loading¶
Basic¶
package main
import (
"context"
"sync"
"sync/atomic"
"time"
"github.com/maypok86/otter/v2"
)
func main() {
// Create basic cache with default configuration
cache := otter.Must(&otter.Options[int, int]{})
var (
wg sync.WaitGroup // WaitGroup to synchronize goroutines
calls atomic.Int64 // Atomic counter to track loader calls
)
// Test parameters
goroutines := 1000 // Number of concurrent requests to simulate
ctx := context.Background() // Context for cache operations
key := 15 // Cache key to test
value := key + 10000 // Expected value (15 + 10000 = 10015)
// Define loader function that will be called on cache misses
loader := otter.LoaderFunc[int, int](func(ctx context.Context, key int) (int, error) {
calls.Add(1) // Increment call counter (atomic)
time.Sleep(time.Second) // Simulate expensive operation
return value, nil // Return the computed value
})
// Cache stampede simulation - 1000 concurrent requests for same key
wg.Add(goroutines)
for i := 0; i < goroutines; i++ {
go func() {
defer wg.Done()
// All goroutines try to get the same key simultaneously
// Otter will deduplicate loader calls automatically
v, err := cache.Get(ctx, key, loader)
if err != nil {
panic("err should be nil")
}
if v != key+10000 {
panic("incorrect value")
}
}()
}
wg.Wait() // Wait for all goroutines to complete
// Verify loader was called exactly once despite 1000 concurrent requests
if calls.Load() != 1 {
panic("The loader should have been called only once")
}
}
ErrNotFound¶
package main
import (
"context"
"errors"
"fmt"
"time"
"github.com/maypok86/otter/v2"
)
func main() {
// Create a new cache with default options
cache := otter.Must(&otter.Options[int, int]{})
key := 1 // The cache key we'll be testing
ctx := context.Background() // Context for the cache operation
// Attempt to get the key with a loader function that returns an error
value, err := cache.Get(ctx, key, otter.LoaderFunc[int, int](func(ctx context.Context, key int) (int, error) {
time.Sleep(200 * time.Millisecond) // Simulate a slow operation
// Return an error wrapped with otter.ErrNotFound
return 256, fmt.Errorf("lalala: %w", otter.ErrNotFound)
}))
// Validate the returned value and error
if value != 0 {
panic("incorrect value") // Should return zero value on error
}
if err == nil || !errors.Is(err, otter.ErrNotFound) {
panic("incorrect err") // Should preserve the ErrNotFound
}
// Verify the key wasn't stored in cache due to the error
if _, ok := cache.GetIfPresent(key); ok {
panic("1 shouldn't be found") // Failed loads shouldn't cache
}
}
Concurrent loading and invalidation¶
package main
import (
"context"
"sync"
"time"
"github.com/maypok86/otter/v2"
)
func main() {
// Create cache with default options
cache := otter.Must[int, int](&otter.Options[int, int]{})
key := 10 // Test key
value := key + 100 // Expected value (110)
ctx := context.Background()
// Communication channels for controlling test flow:
done := make(chan struct{}) // Signals when loader starts
inv := make(chan struct{}) // Controls loader completion
// Loader function that simulates slow backend operation
loader := otter.LoaderFunc[int, int](func(ctx context.Context, key int) (int, error) {
done <- struct{}{} // Signal that loader has started
time.Sleep(200 * time.Millisecond) // Simulate slow operation
<-inv // Wait for invalidation signal
return value, nil // Return the computed value
})
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
// First goroutine tries to get the value
v, err := cache.Get(ctx, key, loader)
if err != nil {
panic("err should be nil")
}
if v != value {
panic("incorrect value")
}
}()
// Wait for loader to start (blocks until done receives signal)
<-done
// Concurrent operations:
// 1. Invalidate the key while it's being loaded
cache.Invalidate(key)
// 2. Allow loader to complete
inv <- struct{}{}
// Wait for Get operation to complete
wg.Wait()
// Verify the key was properly invalidated and not cached
if _, ok := cache.GetIfPresent(key); ok {
panic("key shouldn't be found")
}
}
Bulk¶
package main
import (
"context"
"fmt"
"math/rand/v2"
"sync"
"sync/atomic"
"time"
"github.com/maypok86/otter/v2"
)
func main() {
// Create cache with default options
cache := otter.Must(&otter.Options[int, int]{})
var (
wg sync.WaitGroup // Synchronize goroutines
calls atomic.Int64 // Count bulk loader invocations
total atomic.Int64 // Track total keys requested
)
goroutines := 1000 // Number of concurrent clients
ctx := context.Background() // Context for operations
size := 100 // Number of unique keys
keys := make([]int, 0, size) // Pre-generate test keys (0-99)
for i := 0; i < size; i++ {
keys = append(keys, i)
}
// Bulk loader function - processes multiple keys at once
bulkLoader := otter.BulkLoaderFunc[int, int](func(ctx context.Context, keys []int) (map[int]int, error) {
total.Add(int64(len(keys))) // Track total keys processed
calls.Add(1) // Count loader invocations
time.Sleep(time.Second) // Simulate expensive bulk operation
// Generate results (key → key+100)
result := make(map[int]int, len(keys))
for _, k := range keys {
if k < 0 || k >= size {
panic("incorrect key") // Validate key range
}
result[k] = k + size // Compute value
}
return result, nil
})
// Simulate cache stampede with 1000 concurrent clients
wg.Add(goroutines)
for i := 0; i < goroutines; i++ {
go func() {
defer wg.Done()
// Each goroutine gets a shuffled copy of keys
copied := make([]int, size)
copy(copied, keys)
rand.Shuffle(len(copied), func(i, j int) {
copied[i], copied[j] = copied[j], copied[i]
})
// Bulk request keys
result, err := cache.BulkGet(ctx, copied, bulkLoader)
if err != nil {
panic("err should be nil")
}
// Verify all returned values
for k, v := range result {
if v != k+size {
panic("incorrect result")
}
}
}()
}
wg.Wait()
// Validation checks
if total.Load() != int64(size) {
panic("The cache must never load more than 'size' keys")
}
fmt.Println("Total loader calls:", calls.Load())
if calls.Load() > 5 {
panic("The loader have been called too many times")
}
}
Loading additional keys¶
package main
import (
"context"
"time"
"github.com/maypok86/otter/v2"
)
func main() {
// Create cache with default configuration
cache := otter.Must(&otter.Options[int, int]{})
ctx := context.Background()
size := 100 // Value offset
keys := []int{0, 1} // Initial keys to request
additionalKey := 2 // Extra key that loader will add
// Bulk loader function that simulates a backend operation
bulkLoader := otter.BulkLoaderFunc[int, int](func(ctx context.Context, keys []int) (map[int]int, error) {
time.Sleep(200 * time.Millisecond) // Simulate processing delay
// Create result map with requested keys
result := make(map[int]int, len(keys))
for _, k := range keys {
result[k] = k + size // Store value as key + offset
}
// Add an extra key-value pair that wasn't requested
result[additionalKey] = additionalKey + size
return result, nil
})
// Perform bulk get operation for initial keys
result, err := cache.BulkGet(ctx, keys, bulkLoader)
if err != nil {
panic("err should be nil") // Should never error in this case
}
// Verify all returned values are correct
for k, v := range result {
if v != k+size {
panic("incorrect value") // Validate value calculation
}
// Check if each key is now cached
if _, ok := cache.GetIfPresent(k); !ok {
panic("key should be found") // Requested keys should be cached
}
}
// Verify the additional key was also cached
if _, ok := cache.GetIfPresent(additionalKey); !ok {
panic("key should be found") // Extra key should be cached too
}
}
Refresh¶
Get with refresh¶
package main
import (
"context"
"time"
"github.com/maypok86/otter/v2"
)
func main() {
// Create cache with refresh policy:
// - Entries refresh 500ms after last WRITE (creation/update)
// - Uses RefreshWriting policy (timer resets on writes)
cache := otter.Must(&otter.Options[int, int]{
RefreshCalculator: otter.RefreshWriting[int, int](500 * time.Millisecond),
})
key := 1 // Test key
ctx := context.Background()
// Phase 1: Initial setup
// ----------------------
// Set initial value at T=0
cache.Set(key, key) // [Refresh timer starts]
// Wait until refresh should trigger (500ms)
time.Sleep(500 * time.Millisecond) // T=500ms
// Phase 2: Refresh handling
// ------------------------
// Define loader that will be called during refresh
loader := otter.LoaderFunc[int, int](func(ctx context.Context, k int) (int, error) {
if k != key {
panic("unexpected key") // Safety check
}
time.Sleep(200 * time.Millisecond) // Simulate slow refresh
return key + 1, nil // Return new value
})
// This Get operation occurs at T=500ms when refresh is due
// - Returns current value (1) immediately
// - Triggers async refresh in background
value, err := cache.Get(ctx, key, loader)
if err != nil {
panic(err)
}
if value != key { // Should get old value while refreshing
panic("loader shouldn't be called during Get")
}
// Phase 3: Verify refresh completion
// ---------------------------------
// Wait for refresh to complete (200ms + buffer)
time.Sleep(210 * time.Millisecond) // T=710ms
// Verify new value is now in cache
v, ok := cache.GetIfPresent(key)
if !ok {
panic("key should be found")
}
if v != key+1 { // Should see refreshed value
panic("refresh should be completed")
}
}
The same logic applies exactly to BulkGet
operations.
Manual refresh¶
package main
import (
"context"
"time"
"github.com/maypok86/otter/v2"
)
func main() {
// Create cache with refresh policy:
// - Entries marked for refresh 500ms after last write
// - Uses RefreshWriting policy (timer based on writes, not reads)
cache := otter.Must(&otter.Options[int, int]{
RefreshCalculator: otter.RefreshWriting[int, int](500 * time.Millisecond),
})
key := 1 // Test key
ctx := context.Background()
// Phase 1: Initial setup
// ----------------------
// Set initial value at T=0
cache.Set(key, key) // [Refresh timer starts]
// Wait 200ms (before automatic refresh would trigger)
time.Sleep(200 * time.Millisecond) // T=200ms
// Phase 2: Manual refresh
// ----------------------
// Define loader for refresh operation
loader := otter.LoaderFunc[int, int](func(ctx context.Context, k int) (int, error) {
if k != key {
panic("unexpected key") // Validate key
}
time.Sleep(200 * time.Millisecond) // Simulate refresh delay
return key + 1, nil // Return new value
})
// Explicitly trigger refresh before automatic timeout
// Returns channel that will receive refresh result
done := cache.Refresh(ctx, key, loader) // T=200ms
// Phase 3: Verify behavior during refresh
// --------------------------------------
// Check cache state while refresh is in progress
v, ok := cache.GetIfPresent(key)
if !ok {
panic("key should be found") // Should still be available
}
if v != key { // Should still show old value during refresh
panic("incorrect value")
}
// Phase 4: Verify refresh completion
// ---------------------------------
// Wait for refresh to complete (blocks until done)
result := <-done // T=400ms
// Verify new value is in cache
v, ok = cache.GetIfPresent(key)
if !ok {
panic("key should be found") // Should persist after refresh
}
// Should match both:
// - Direct cache check
// - Result from refresh operation
if v != key+1 || v != result.Value {
panic("incorrect value")
}
}
The same logic applies exactly to BulkRefresh
operations.
Statistics¶
stats.Counter¶
package main
import (
"github.com/maypok86/otter/v2"
"github.com/maypok86/otter/v2/stats"
)
func main() {
// Create a new statistics counter
counter := stats.NewCounter()
// Initialize cache with statistics recorder
cache := otter.Must(&otter.Options[int, int]{
StatsRecorder: counter, // Attach stats collector to cache
})
// Phase 1: Populate cache with test data
// -------------------------------------
// Insert 10 key-value pairs (0:0 through 9:9)
for i := 0; i < 10; i++ {
cache.Set(i, i) // Each Set is recorded in stats
}
// Phase 2: Test cache operations
// -----------------------------
// Successful gets for existing keys
for i := 0; i < 10; i++ {
cache.GetIfPresent(i) // These will count as hits
}
// Attempt to get non-existent key
cache.GetIfPresent(10) // This will count as a miss
// Phase 3: Verify statistics
// --------------------------
// Get atomic snapshot of current statistics
snapshot := counter.Snapshot()
// Validate hit count (should match successful gets)
if snapshot.Hits != 10 {
panic("incorrect number of hits")
}
// Validate miss count (should match failed get)
if snapshot.Misses != 1 {
panic("incorrect number of misses")
}
}
Event handlers¶
OnAtomicDeletion¶
package main
import (
"sync"
"github.com/maypok86/otter/v2"
)
func main() {
// Synchronization for concurrent access to eviction map
var mutex sync.Mutex
// Cache configuration
maximumSize := 10
// Map to track eviction reasons
m := make(map[otter.DeletionCause]int)
// Create cache with eviction callback
cache := otter.Must[int, int](&otter.Options[int, int]{
MaximumSize: maximumSize, // Cache capacity
OnAtomicDeletion: func(e otter.DeletionEvent[int, int]) {
// Only count evictions (not explicit deletions)
if e.WasEvicted() {
mutex.Lock()
m[e.Cause]++ // Increment counter for this eviction cause
mutex.Unlock()
}
},
})
// Phase 1: Fill cache to capacity
// ------------------------------
// Add entries 0-9 (total of 10)
for i := 0; i < maximumSize; i++ {
cache.Set(i, i)
}
// Phase 2: Force eviction of all entries
// -------------------------------------
// Reduce cache size to 0, forcing eviction of all entries
cache.SetMaximum(0) // Triggers OnAtomicDeletion for each evicted entry
// Phase 3: Validate eviction tracking
// -----------------------------------
// Verify we recorded exactly 10 overflow evictions
if len(m) != 1 || m[otter.CauseOverflow] != maximumSize {
panic("invalid OnAtomicDeletion call detected")
}
}
The same logic applies exactly to OnDeletion
handler.
Iteration¶
InvalidateByFunc¶
package main
import "github.com/maypok86/otter/v2"
func main() {
// Create a new cache with default options
cache := otter.Must(&otter.Options[int, int]{})
// Phase 1: Initialize cache with sample data
// ----------------------------------------
// Populate cache with keys 0-9 and corresponding values
for i := 0; i < 10; i++ {
cache.Set(i, i) // Stores key-value pairs (0:0, 1:1, ..., 9:9)
}
// Phase 2: Iterate with concurrent modification
// --------------------------------------------
// The All() method returns an iterator over cache entries
// Otter supports modifications during iteration
for key := range cache.All() {
if key%2 == 0 {
continue // Skip even keys
}
// Delete odd keys while iterating!
cache.Invalidate(key)
}
// Phase 3: Verify results
// -----------------------
// Check that all odd keys were removed
for i := 0; i < 10; i++ {
if _, ok := cache.GetIfPresent(i); ok && i%2 == 1 {
panic("odd key found")
}
}
}
Extension¶
package main
import (
"time"
"github.com/maypok86/otter/v2"
)
func main() {
// Create cache with custom configuration:
// - Maximum weight capacity: 10
// - Custom weigher function (9 * key)
// - Default expiration: 1000 days (but will be overridden)
cache := otter.Must[int, int](&otter.Options[int, int]{
MaximumWeight: 10,
Weigher: func(key int, value int) uint32 {
return 9 * uint32(key) // Weight calculation (key=1 → weight=9)
},
ExpiryCalculator: otter.ExpiryWriting[int, int](1000 * 24 * time.Hour),
})
key := 1 // Test key
// Phase 1: Test expiration override
// --------------------------------
// Add entry with default expiration (1000 days)
cache.Set(key, key)
// Override expiration to 1 second
cache.SetExpiresAfter(key, time.Second)
// Wait for expiration
time.Sleep(time.Second)
// Verify entry expired
if _, ok := cache.GetIfPresent(key); ok {
panic("1 shouldn't be found") // Should be expired
}
// Phase 2: Test weight calculation
// --------------------------------
// Re-add the entry
cache.Set(key, key)
// Verify entry properties
if entry, ok := cache.GetEntry(key); !ok || entry.Weight != 9 {
panic("incorrect entry") // Should have weight=9 (9*1)
}
// Check total cache weight
if cache.WeightedSize() != 9 {
panic("incorrect weightedSize")
}
// Phase 3: Test dynamic resizing
// -----------------------------
// Reduce maximum weight to 5 (current weight=9 exceeds this)
cache.SetMaximum(5) // Triggers immediate eviction
// Verify entry was evicted due to size constraint
if _, ok := cache.GetIfPresent(key); ok {
panic("1 shouldn't be found")
}
// Verify cache is now empty
if cache.WeightedSize() != 0 {
panic("incorrect weightedSize")
}
// Verify new maximum was set
if cache.GetMaximum() != 5 {
panic("incorrect maximum")
}
}
"Real-world" examples¶
Wrapper¶
package main
import (
"context"
"database/sql"
"errors"
"fmt"
"time"
"github.com/jmoiron/sqlx"
"github.com/maypok86/otter/v2"
"github.com/maypok86/otter/v2/stats"
)
// User represents a user entity in the system
type User struct {
ID int64 `db:"id"` // Unique identifier
Email string `db:"email"` // User's email address
CreatedAt time.Time `db:"created_at"` // Timestamp when user was created
UpdatedAt time.Time `db:"updated_at"` // Timestamp when user was last updated
}
// Repo provides direct database access to user data
type Repo struct {
db *sqlx.DB // Database connection handle
}
// NewRepo creates a new repository instance
func NewRepo(db *sqlx.DB) *Repo {
return &Repo{db: db}
}
// GetByID retrieves a user by ID from the database
func (r *Repo) GetByID(ctx context.Context, id int64) (User, error) {
const query = "SELECT * FROM users WHERE id = $1" // SQL query with parameter binding
var user User
// Execute query and map result to User struct
if err := r.db.GetContext(ctx, &user, query, id); err != nil {
return User{}, fmt.Errorf("get user from db: %w", err) // Wrap error with context
}
return user, nil
}
// CachedRepo provides cached access to user data
type CachedRepo struct {
cache *otter.Cache[int64, User] // Cache instance storing User objects
loader otter.Loader[int64, User] // Loading function for cache misses
}
// NewCachedRepo creates a new cached repository with Otter cache
func NewCachedRepo(repo *Repo) *CachedRepo {
// Loader function that gets called on cache misses
loader := func(ctx context.Context, key int64) (User, error) {
user, err := repo.GetByID(ctx, key)
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
// Convert "not found" DB error to cache-specific error
return User{}, otter.ErrNotFound
}
return User{}, err
}
return user, nil
}
// Initialize and configure Otter cache with:
cache := otter.Must(&otter.Options[int64, User]{
MaximumSize: 10_000, // Maximum cache capacity
ExpiryCalculator: otter.ExpiryWriting[int64, User](time.Hour), // Entry TTL (time-to-live)
RefreshCalculator: otter.RefreshWriting[int64, User](50 * time.Minute), // Refresh interval
StatsRecorder: stats.NewCounter(), // Cache statistics collector
})
return &CachedRepo{
cache: cache,
loader: otter.LoaderFunc[int64, User](loader), // Convert loader to Otter-compatible type
}
}
// GetByID retrieves a user by ID, using cache when possible
func (cr *CachedRepo) GetByID(ctx context.Context, id int64) (User, error) {
// Get from cache, calling loader on cache miss
return cr.cache.Get(ctx, id, cr.loader)
}