Ristretto

GoCache LibraryIn-Memory CacheHigh PerformanceConcurrent

GitHub Overview

hypermodeinc/ristretto

A high performance memory-bound Go cache

Stars6,521
Watchers71
Forks425
Created:February 12, 2019
Language:Go
License:Apache License 2.0

Topics

cachegogolanglibraryperformance

Star History

hypermodeinc/ristretto Star History
Data as of: 10/22/2025, 08:07 AM

Cache Library

Ristretto

Overview

Ristretto is a high-performance in-memory cache library for Go that avoids contention, provides excellent throughput performance and hit ratios, and is optimized for concurrent processing as the ideal caching solution.

Details

Ristretto is a fast, concurrent cache library built with a focus on performance and correctness for Go. It was born from the need for a contention-free cache in Dgraph. It features high hit ratios with unique admission/eviction policy pairing, fast throughput using various techniques for managing contention, and full concurrency that allows using as many goroutines as needed with little throughput degradation. It uses SampledLFU for eviction (on par with exact LRU with better performance on Search and Database traces) and TinyLFU for admission (extra performance with little memory overhead - 12 bits per counter). Cost-based eviction allows any large new item deemed valuable to evict multiple smaller items. Optional performance metrics for throughput, hit ratios, and other stats are available with a 10% throughput performance overhead when enabled. Throughput performance is attributed to a mix of batching and eventual consistency, while hit ratio performance is mostly due to excellent admission policy and SampledLFU eviction policy. It's optimized for contention resistance and performs really well under heavy concurrent load, providing consistently high hit ratios.

Pros and Cons

Pros

  • High Performance: Excellent throughput and low latency
  • High Hit Ratios: Optimized cache efficiency with TinyLFU and SampledLFU
  • Fully Concurrent: Maintains high performance regardless of goroutine count
  • Contention Resistant: Consistent performance under heavy concurrent load
  • Flexible Configuration: Cost-based eviction and metrics features
  • Memory Efficient: Minimal memory overhead
  • Type Safe: Go generics support (v2.x)

Cons

  • Go Only: Available only for Go language
  • Metrics Overhead: 10% performance degradation when metrics are enabled
  • Configuration Complexity: Many configuration parameters for optimization
  • Memory Bound: Memory-based cache without persistence
  • Learning Curve: Time required to master advanced features

Key Links

Code Examples

Installation and Basic Configuration

# Installation in Go modules environment
go get github.com/dgraph-io/ristretto
package main

import (
    "fmt"
    "time"
    
    "github.com/dgraph-io/ristretto"
)

func main() {
    // Create basic cache
    cache, err := ristretto.NewCache(&ristretto.Config{
        NumCounters: 1e7,     // number of keys to track frequency of (10M)
        MaxCost:     1 << 30, // maximum cost of cache (1GB)
        BufferItems: 64,      // number of keys per Get buffer
    })
    if err != nil {
        panic(err)
    }
    defer cache.Close()
    
    // Basic operations
    cache.Set("key", "value", 1)
    
    // Get value
    value, found := cache.Get("key")
    if found {
        fmt.Println("Cache hit:", value)
    }
    
    // Clear cache
    cache.Clear()
}

Type-Safe Generics Usage (v2.x)

package main

import (
    "fmt"
    "github.com/dgraph-io/ristretto/v2"
)

type User struct {
    ID   int
    Name string
    Email string
}

func main() {
    // Create type-safe cache
    cache, err := ristretto.NewCache[string, *User](&ristretto.Config[string, *User]{
        NumCounters: 1000,
        MaxCost:     100,
        BufferItems: 64,
    })
    if err != nil {
        panic(err)
    }
    defer cache.Close()
    
    // Cache user data
    user := &User{
        ID:   1,
        Name: "Alice",
        Email: "[email protected]",
    }
    
    // Set with cost consideration
    cache.Set("user:1", user, 1)
    
    // Type-safe retrieval
    if cachedUser, found := cache.Get("user:1"); found {
        fmt.Printf("User: %+v\n", cachedUser)
    }
    
    // Delete
    cache.Del("user:1")
}

Cost-Based Eviction

package main

import (
    "fmt"
    "github.com/dgraph-io/ristretto"
)

func main() {
    cache, err := ristretto.NewCache(&ristretto.Config{
        NumCounters: 1000,
        MaxCost:     100, // Maximum total cost of 100
        BufferItems: 64,
    })
    if err != nil {
        panic(err)
    }
    defer cache.Close()
    
    // Store items with different costs
    cache.Set("small-item", "data1", 1)   // Cost 1
    cache.Set("medium-item", "data2", 5)  // Cost 5
    cache.Set("large-item", "data3", 20)  // Cost 20
    
    // Large item may evict smaller items
    cache.Set("huge-item", "data4", 80)   // Cost 80
    
    // Check existence of each item
    items := []string{"small-item", "medium-item", "large-item", "huge-item"}
    
    for _, key := range items {
        if _, found := cache.Get(key); found {
            fmt.Printf("%s: exists in cache\n", key)
        } else {
            fmt.Printf("%s: evicted\n", key)
        }
    }
}

TTL (Time To Live) Configuration

package main

import (
    "fmt"
    "time"
    
    "github.com/dgraph-io/ristretto"
)

func main() {
    cache, err := ristretto.NewCache(&ristretto.Config{
        NumCounters: 1000,
        MaxCost:     100,
        BufferItems: 64,
    })
    if err != nil {
        panic(err)
    }
    defer cache.Close()
    
    // Set items with TTL
    cache.SetWithTTL("session:123", "session-data", 1, 5*time.Second)
    cache.SetWithTTL("temp-data", "temporary", 1, 2*time.Second)
    
    // Check immediately
    if value, found := cache.Get("session:123"); found {
        fmt.Printf("Session: %v\n", value)
    }
    
    // Wait 3 seconds
    time.Sleep(3 * time.Second)
    
    if _, found := cache.Get("temp-data"); !found {
        fmt.Println("temp-data expired and removed")
    }
    
    if value, found := cache.Get("session:123"); found {
        fmt.Printf("Session (after 3s): %v\n", value)
    }
    
    // Wait another 3 seconds (total 6 seconds)
    time.Sleep(3 * time.Second)
    
    if _, found := cache.Get("session:123"); !found {
        fmt.Println("session:123 expired and removed")
    }
}

Metrics Functionality

package main

import (
    "fmt"
    "github.com/dgraph-io/ristretto"
)

func main() {
    // Cache with metrics enabled
    cache, err := ristretto.NewCache(&ristretto.Config{
        NumCounters: 1000,
        MaxCost:     100,
        BufferItems: 64,
        Metrics:     true, // Enable metrics
    })
    if err != nil {
        panic(err)
    }
    defer cache.Close()
    
    // Insert test data
    for i := 0; i < 100; i++ {
        key := fmt.Sprintf("key:%d", i)
        cache.Set(key, fmt.Sprintf("value:%d", i), 1)
    }
    
    // Test cache hits/misses
    hitCount := 0
    missCount := 0
    
    // Access existing keys
    for i := 0; i < 50; i++ {
        key := fmt.Sprintf("key:%d", i)
        if _, found := cache.Get(key); found {
            hitCount++
        } else {
            missCount++
        }
    }
    
    // Access non-existing keys
    for i := 100; i < 150; i++ {
        key := fmt.Sprintf("key:%d", i)
        if _, found := cache.Get(key); found {
            hitCount++
        } else {
            missCount++
        }
    }
    
    // Get metrics
    metrics := cache.Metrics
    fmt.Printf("Hits: %d\n", hitCount)
    fmt.Printf("Misses: %d\n", missCount)
    fmt.Printf("Cache metrics: %+v\n", metrics)
}

Concurrent Usage

package main

import (
    "fmt"
    "sync"
    "time"
    
    "github.com/dgraph-io/ristretto"
)

func main() {
    cache, err := ristretto.NewCache(&ristretto.Config{
        NumCounters: 10000,
        MaxCost:     1000,
        BufferItems: 64,
        Metrics:     true,
    })
    if err != nil {
        panic(err)
    }
    defer cache.Close()
    
    var wg sync.WaitGroup
    numGoroutines := 100
    operationsPerGoroutine := 1000
    
    // Concurrent writes with multiple goroutines
    wg.Add(numGoroutines)
    for i := 0; i < numGoroutines; i++ {
        go func(goroutineID int) {
            defer wg.Done()
            
            for j := 0; j < operationsPerGoroutine; j++ {
                key := fmt.Sprintf("goroutine:%d:key:%d", goroutineID, j)
                value := fmt.Sprintf("value:%d:%d", goroutineID, j)
                cache.Set(key, value, 1)
            }
        }(i)
    }
    
    // Concurrent reads
    wg.Add(numGoroutines)
    for i := 0; i < numGoroutines; i++ {
        go func(goroutineID int) {
            defer wg.Done()
            
            for j := 0; j < operationsPerGoroutine; j++ {
                key := fmt.Sprintf("goroutine:%d:key:%d", goroutineID, j)
                if value, found := cache.Get(key); found {
                    _ = value // Use the value
                }
            }
        }(i)
    }
    
    wg.Wait()
    
    fmt.Printf("Concurrent processing completed - %d goroutines, %d operations each\n", 
               numGoroutines, operationsPerGoroutine)
    fmt.Printf("Metrics: %+v\n", cache.Metrics)
}

Web Application Usage Example

package main

import (
    "encoding/json"
    "fmt"
    "net/http"
    "strconv"
    "time"
    
    "github.com/dgraph-io/ristretto"
)

type UserService struct {
    cache *ristretto.Cache
}

type User struct {
    ID    int    `json:"id"`
    Name  string `json:"name"`
    Email string `json:"email"`
}

func NewUserService() *UserService {
    cache, err := ristretto.NewCache(&ristretto.Config{
        NumCounters: 1e7,
        MaxCost:     1 << 30,
        BufferItems: 64,
        Metrics:     true,
    })
    if err != nil {
        panic(err)
    }
    
    return &UserService{cache: cache}
}

func (us *UserService) GetUser(id int) (*User, error) {
    cacheKey := fmt.Sprintf("user:%d", id)
    
    // Try to get from cache
    if cachedUser, found := us.cache.Get(cacheKey); found {
        if user, ok := cachedUser.(*User); ok {
            return user, nil
        }
    }
    
    // Fetch from database (simulated)
    user := &User{
        ID:    id,
        Name:  fmt.Sprintf("User %d", id),
        Email: fmt.Sprintf("user%[email protected]", id),
    }
    
    // Store in cache with 1 hour TTL
    us.cache.SetWithTTL(cacheKey, user, 1, time.Hour)
    
    return user, nil
}

func (us *UserService) InvalidateUser(id int) {
    cacheKey := fmt.Sprintf("user:%d", id)
    us.cache.Del(cacheKey)
}

func (us *UserService) GetCacheMetrics() interface{} {
    return us.cache.Metrics
}

func main() {
    userService := NewUserService()
    
    http.HandleFunc("/user/", func(w http.ResponseWriter, r *http.Request) {
        idStr := r.URL.Path[len("/user/"):]
        id, err := strconv.Atoi(idStr)
        if err != nil {
            http.Error(w, "Invalid user ID", http.StatusBadRequest)
            return
        }
        
        user, err := userService.GetUser(id)
        if err != nil {
            http.Error(w, "User not found", http.StatusNotFound)
            return
        }
        
        w.Header().Set("Content-Type", "application/json")
        json.NewEncoder(w).Encode(user)
    })
    
    http.HandleFunc("/metrics", func(w http.ResponseWriter, r *http.Request) {
        metrics := userService.GetCacheMetrics()
        w.Header().Set("Content-Type", "application/json")
        json.NewEncoder(w).Encode(metrics)
    })
    
    fmt.Println("Server started on port 8080")
    http.ListenAndServe(":8080", nil)
}

Database Query Caching

package main

import (
    "fmt"
    "time"
    
    "github.com/dgraph-io/ristretto"
)

type QueryCache struct {
    cache *ristretto.Cache
}

func NewQueryCache() *QueryCache {
    cache, err := ristretto.NewCache(&ristretto.Config{
        NumCounters: 1e7,
        MaxCost:     1 << 30,
        BufferItems: 64,
    })
    if err != nil {
        panic(err)
    }
    
    return &QueryCache{cache: cache}
}

func (qc *QueryCache) Execute(query string, ttl time.Duration) (interface{}, error) {
    // Use query hash as key
    cacheKey := fmt.Sprintf("query:%s", query)
    
    // Get from cache
    if result, found := qc.cache.Get(cacheKey); found {
        fmt.Println("Cache hit:", query)
        return result, nil
    }
    
    // Simulate database query
    fmt.Println("Executing database query:", query)
    time.Sleep(100 * time.Millisecond) // Simulate DB latency
    
    // Simulate result
    result := map[string]interface{}{
        "query":     query,
        "timestamp": time.Now(),
        "data":      []string{"row1", "row2", "row3"},
    }
    
    // Store in cache
    qc.cache.SetWithTTL(cacheKey, result, 1, ttl)
    
    return result, nil
}

func main() {
    queryCache := NewQueryCache()
    
    queries := []string{
        "SELECT * FROM users WHERE active = true",
        "SELECT COUNT(*) FROM orders WHERE date > '2023-01-01'",
        "SELECT * FROM products WHERE category = 'electronics'",
    }
    
    // Execute each query multiple times
    for i := 0; i < 3; i++ {
        fmt.Printf("\n=== Round %d ===\n", i+1)
        
        for _, query := range queries {
            result, err := queryCache.Execute(query, 5*time.Minute)
            if err != nil {
                fmt.Printf("Error: %v\n", err)
                continue
            }
            
            fmt.Printf("Result: %v\n", result)
        }
        
        // Wait a bit
        time.Sleep(1 * time.Second)
    }
}

Custom Eviction Policy

package main

import (
    "fmt"
    "github.com/dgraph-io/ristretto"
)

// Custom cost calculation function
func calculateCost(value interface{}) int64 {
    switch v := value.(type) {
    case string:
        return int64(len(v))
    case []byte:
        return int64(len(v))
    case map[string]interface{}:
        return 10 // Fixed cost for maps
    default:
        return 1 // Default
    }
}

func main() {
    cache, err := ristretto.NewCache(&ristretto.Config{
        NumCounters: 1000,
        MaxCost:     1000, // 1000 bytes limit
        BufferItems: 64,
        Metrics:     true,
    })
    if err != nil {
        panic(err)
    }
    defer cache.Close()
    
    // Store data of different sizes
    smallData := "small"
    mediumData := "this is a medium sized string"
    largeData := "this is a very large string that takes up much more space than the others"
    
    // Set cost based on actual data size
    cache.Set("small", smallData, calculateCost(smallData))
    cache.Set("medium", mediumData, calculateCost(mediumData))
    cache.Set("large", largeData, calculateCost(largeData))
    
    // Map data
    mapData := map[string]interface{}{
        "key1": "value1",
        "key2": "value2",
        "key3": "value3",
    }
    cache.Set("map", mapData, calculateCost(mapData))
    
    // Check cache status
    items := []string{"small", "medium", "large", "map"}
    for _, key := range items {
        if value, found := cache.Get(key); found {
            cost := calculateCost(value)
            fmt.Printf("%s (cost: %d): exists in cache\n", key, cost)
        } else {
            fmt.Printf("%s: evicted\n", key)
        }
    }
    
    fmt.Printf("\nCache metrics: %+v\n", cache.Metrics)
}