From 4af8ea7f86c69e5ab33748f517da828c5bf629ff Mon Sep 17 00:00:00 2001 From: Sergey Melekhin Date: Sun, 4 Jan 2026 18:25:43 +0700 Subject: [PATCH 1/3] add onEvict callback for TTLCache --- map_ttl.go | 31 ++++++++--- ttl_test.go | 152 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 176 insertions(+), 7 deletions(-) diff --git a/map_ttl.go b/map_ttl.go index c81480b..83ffaff 100644 --- a/map_ttl.go +++ b/map_ttl.go @@ -23,16 +23,19 @@ func zero[T any]() T { return z } +type onEvictFunc[K comparable, V any] func(key K, value V) + // MapTTLCache is the thread-safe map-based cache with TTL cache invalidation support. // MapTTLCache uses double linked list to maintain FIFO order of inserted values. type MapTTLCache[K comparable, V any] struct { - data map[K]ttlRec[K, V] - mux sync.RWMutex - ttl time.Duration - now func() time.Time - tail K - head K - zero K + data map[K]ttlRec[K, V] + mux sync.RWMutex + ttl time.Duration + now func() time.Time + onEvict onEvictFunc[K, V] + tail K + head K + zero K } // NewMapTTLCache creates MapTTLCache instance and spawns background @@ -70,6 +73,16 @@ func NewMapTTLCache[K comparable, V any]( return &c } +// OnEvict sets a callback function that will be called when an entry is evicted from the cache +// due to TTL expiration. The callback receives the key and value of the evicted entry. +// Note that the eviction callback is not called for Del operation. +// The callback function should not perform any long-running operations or call other funcitons on the cache (will deadlock). +func (c *MapTTLCache[K, V]) OnEvict(f onEvictFunc[K, V]) { + c.mux.Lock() + c.onEvict = f + c.mux.Unlock() +} + func (c *MapTTLCache[K, V]) Set(key K, value V) { c.mux.Lock() defer c.mux.Unlock() @@ -164,6 +177,10 @@ func (c *MapTTLCache[K, V]) cleanup() error { c.head = rec.next delete(c.data, key) + if c.onEvict != nil { + c.onEvict(key, rec.value) + } + if key == c.tail { c.tail = c.zero return nil diff --git a/ttl_test.go b/ttl_test.go index ecd4c91..4f964e1 100644 --- a/ttl_test.go +++ b/ttl_test.go @@ -332,3 +332,155 @@ func TestSetIfPresentResetsTTL(t *testing.T) { t.Errorf("value was not updated by SetIfPresent, expected %v, but got %v", "value2", v) } } + +func TestOnEvict(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + c := NewMapTTLCache[string, string](ctx, time.Millisecond, time.Millisecond*5) + + evicted := make(map[string]string) + var mu sync.Mutex + + c.OnEvict(func(key string, value string) { + mu.Lock() + evicted[key] = value + mu.Unlock() + }) + + c.Set("key1", "value1") + c.Set("key2", "value2") + c.Set("key3", "value3") + + // Wait for cleanup to run + time.Sleep(time.Millisecond * 10) + + mu.Lock() + defer mu.Unlock() + + if len(evicted) != 3 { + t.Errorf("expected 3 evictions, got %d", len(evicted)) + } + + expected := map[string]string{ + "key1": "value1", + "key2": "value2", + "key3": "value3", + } + + for k, v := range expected { + if evicted[k] != v { + t.Errorf("expected evicted[%q] = %q, got %q", k, v, evicted[k]) + } + } +} + +func TestOnEvictNotCalledForDel(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + c := NewMapTTLCache[string, string](ctx, time.Second, time.Second) + + evicted := make(map[string]string) + var mu sync.Mutex + + c.OnEvict(func(key string, value string) { + mu.Lock() + evicted[key] = value + mu.Unlock() + }) + + c.Set("key1", "value1") + c.Set("key2", "value2") + + // Delete key1 explicitly + if err := c.Del("key1"); err != nil { + t.Errorf("unexpected error in Del: %v", err) + } + + mu.Lock() + if len(evicted) != 0 { + t.Errorf("expected no evictions from Del, got %d", len(evicted)) + } + mu.Unlock() + + // Wait for TTL expiration and cleanup + time.Sleep(time.Millisecond * 1500) + + mu.Lock() + defer mu.Unlock() + + if len(evicted) != 1 { + t.Errorf("expected 1 eviction from TTL, got %d", len(evicted)) + } + + if evicted["key2"] != "value2" { + t.Errorf("expected evicted[key2] = value2, got %q", evicted["key2"]) + } + + if _, ok := evicted["key1"]; ok { + t.Errorf("key1 should not be in evicted map as it was deleted with Del()") + } +} + +func TestOnEvictPartialCleanup(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + c := NewMapTTLCache[string, string](ctx, time.Millisecond*50, time.Millisecond*10) + + evicted := make(map[string]string) + var mu sync.Mutex + + c.OnEvict(func(key string, value string) { + mu.Lock() + evicted[key] = value + mu.Unlock() + }) + + // Add first batch + c.Set("key1", "value1") + c.Set("key2", "value2") + + // Wait a bit + time.Sleep(time.Millisecond * 30) + + // Add second batch + c.Set("key3", "value3") + c.Set("key4", "value4") + + // Wait for first batch to expire and cleanup to run + time.Sleep(time.Millisecond * 40) + + mu.Lock() + if len(evicted) != 2 { + t.Errorf("expected 2 evictions, got %d", len(evicted)) + } + + if evicted["key1"] != "value1" { + t.Errorf("expected evicted[key1] = value1, got %q", evicted["key1"]) + } + + if evicted["key2"] != "value2" { + t.Errorf("expected evicted[key2] = value2, got %q", evicted["key2"]) + } + + if _, ok := evicted["key3"]; ok { + t.Errorf("key3 should not be evicted yet") + } + + if _, ok := evicted["key4"]; ok { + t.Errorf("key4 should not be evicted yet") + } + mu.Unlock() + + // Wait for second batch to expire + time.Sleep(time.Millisecond * 40) + + mu.Lock() + defer mu.Unlock() + + if len(evicted) != 4 { + t.Errorf("expected 4 evictions total, got %d", len(evicted)) + } +} From 2f02c8f07f317eced6debdf9749ae7a01b0230fe Mon Sep 17 00:00:00 2001 From: Sergey Melekhin Date: Fri, 9 Jan 2026 21:33:31 +0700 Subject: [PATCH 2/3] more ergonomic eviction at more allocations price --- map_ttl.go | 29 +++++++++++++++++++++++------ 1 file changed, 23 insertions(+), 6 deletions(-) diff --git a/map_ttl.go b/map_ttl.go index 83ffaff..1aa1451 100644 --- a/map_ttl.go +++ b/map_ttl.go @@ -76,7 +76,6 @@ func NewMapTTLCache[K comparable, V any]( // OnEvict sets a callback function that will be called when an entry is evicted from the cache // due to TTL expiration. The callback receives the key and value of the evicted entry. // Note that the eviction callback is not called for Del operation. -// The callback function should not perform any long-running operations or call other funcitons on the cache (will deadlock). func (c *MapTTLCache[K, V]) OnEvict(f onEvictFunc[K, V]) { c.mux.Lock() c.onEvict = f @@ -158,10 +157,22 @@ func (c *MapTTLCache[K, V]) Del(key K) error { return nil } -// cleanup removes outdated records. +// cleanup removes outdated records +// and calls eviction callbacks. func (c *MapTTLCache[K, V]) cleanup() error { + var ( + evicted map[K]V + onEvict onEvictFunc[K, V] + ) + c.mux.Lock() - defer c.mux.Unlock() + + // Preallocate a small map for evicted records + // if eviction callback is set. + if c.onEvict != nil { + onEvict = c.onEvict + evicted = make(map[K]V, 16) + } key := c.head for { @@ -177,13 +188,13 @@ func (c *MapTTLCache[K, V]) cleanup() error { c.head = rec.next delete(c.data, key) - if c.onEvict != nil { - c.onEvict(key, rec.value) + if onEvict != nil { + evicted[key] = rec.value } if key == c.tail { c.tail = c.zero - return nil + break } next, ok := c.data[rec.next] @@ -193,6 +204,12 @@ func (c *MapTTLCache[K, V]) cleanup() error { } key = rec.next } + c.mux.Unlock() + + // Call eviction callbacks outside of the lock. + for k, v := range evicted { + onEvict(k, v) + } return nil } From 6d7ef70c278803a74e1250d341d81a7bb8ea9c76 Mon Sep 17 00:00:00 2001 From: Sergey Melekhin Date: Sat, 10 Jan 2026 09:55:58 +0700 Subject: [PATCH 3/3] remove sleep in tests --- map_ttl.go | 1 + ttl_test.go | 68 +++++++++++++++++++++++++++++++++++++++++------------ 2 files changed, 54 insertions(+), 15 deletions(-) diff --git a/map_ttl.go b/map_ttl.go index 1aa1451..1b082ad 100644 --- a/map_ttl.go +++ b/map_ttl.go @@ -31,6 +31,7 @@ type MapTTLCache[K comparable, V any] struct { data map[K]ttlRec[K, V] mux sync.RWMutex ttl time.Duration + // TODO: replace with sync.Test now func() time.Time onEvict onEvictFunc[K, V] tail K diff --git a/ttl_test.go b/ttl_test.go index 4f964e1..331439e 100644 --- a/ttl_test.go +++ b/ttl_test.go @@ -337,7 +337,8 @@ func TestOnEvict(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - c := NewMapTTLCache[string, string](ctx, time.Millisecond, time.Millisecond*5) + c := NewMapTTLCache[string, string](ctx, time.Second, time.Hour) + ts := time.Now() evicted := make(map[string]string) var mu sync.Mutex @@ -352,8 +353,15 @@ func TestOnEvict(t *testing.T) { c.Set("key2", "value2") c.Set("key3", "value3") - // Wait for cleanup to run - time.Sleep(time.Millisecond * 10) + // Override now to simulate time passing + c.mux.Lock() + c.now = func() time.Time { return ts.Add(2 * time.Second) } + c.mux.Unlock() + + // Manually trigger cleanup + if err := c.cleanup(); err != nil { + t.Errorf("unexpected error in cleanup: %v", err) + } mu.Lock() defer mu.Unlock() @@ -379,7 +387,8 @@ func TestOnEvictNotCalledForDel(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - c := NewMapTTLCache[string, string](ctx, time.Second, time.Second) + c := NewMapTTLCache[string, string](ctx, time.Second, time.Hour) + ts := time.Now() evicted := make(map[string]string) var mu sync.Mutex @@ -404,8 +413,15 @@ func TestOnEvictNotCalledForDel(t *testing.T) { } mu.Unlock() - // Wait for TTL expiration and cleanup - time.Sleep(time.Millisecond * 1500) + // Override now to simulate TTL expiration + c.mux.Lock() + c.now = func() time.Time { return ts.Add(2 * time.Second) } + c.mux.Unlock() + + // Manually trigger cleanup + if err := c.cleanup(); err != nil { + t.Errorf("unexpected error in cleanup: %v", err) + } mu.Lock() defer mu.Unlock() @@ -427,7 +443,8 @@ func TestOnEvictPartialCleanup(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - c := NewMapTTLCache[string, string](ctx, time.Millisecond*50, time.Millisecond*10) + c := NewMapTTLCache[string, string](ctx, time.Second, time.Hour) + ts := time.Now() evicted := make(map[string]string) var mu sync.Mutex @@ -438,19 +455,33 @@ func TestOnEvictPartialCleanup(t *testing.T) { mu.Unlock() }) - // Add first batch + // Override now to time T for first batch + c.mux.Lock() + c.now = func() time.Time { return ts } + c.mux.Unlock() + + // Add first batch at time T c.Set("key1", "value1") c.Set("key2", "value2") - // Wait a bit - time.Sleep(time.Millisecond * 30) + // Override now to time T+700ms for second batch + c.mux.Lock() + c.now = func() time.Time { return ts.Add(700 * time.Millisecond) } + c.mux.Unlock() - // Add second batch + // Add second batch at time T+700ms c.Set("key3", "value3") c.Set("key4", "value4") - // Wait for first batch to expire and cleanup to run - time.Sleep(time.Millisecond * 40) + // Override now to T+1.5s - first batch expires but second doesn't + c.mux.Lock() + c.now = func() time.Time { return ts.Add(1500 * time.Millisecond) } + c.mux.Unlock() + + // Manually trigger cleanup - should evict first batch only + if err := c.cleanup(); err != nil { + t.Errorf("unexpected error in cleanup: %v", err) + } mu.Lock() if len(evicted) != 2 { @@ -474,8 +505,15 @@ func TestOnEvictPartialCleanup(t *testing.T) { } mu.Unlock() - // Wait for second batch to expire - time.Sleep(time.Millisecond * 40) + // Override now to T+2s - second batch now expires + c.mux.Lock() + c.now = func() time.Time { return ts.Add(2 * time.Second) } + c.mux.Unlock() + + // Manually trigger cleanup - should evict second batch + if err := c.cleanup(); err != nil { + t.Errorf("unexpected error in cleanup: %v", err) + } mu.Lock() defer mu.Unlock()