Akron | 1634305 | 2025-06-17 16:16:13 +0200 | [diff] [blame^] | 1 | package service |
| 2 | |
| 3 | import ( |
| 4 | "context" |
| 5 | "crypto/md5" |
| 6 | "encoding/hex" |
| 7 | "encoding/json" |
| 8 | "fmt" |
| 9 | "strings" |
| 10 | "time" |
| 11 | |
| 12 | cfg "github.com/korap/korap-mcp/config" |
| 13 | "github.com/korap/korap-mcp/logger" |
| 14 | "github.com/maypok86/otter" |
| 15 | "github.com/rs/zerolog" |
| 16 | ) |
| 17 | |
| 18 | // CacheEntry represents a cached API response |
| 19 | type CacheEntry struct { |
| 20 | Data []byte `json:"data"` |
| 21 | Timestamp time.Time `json:"timestamp"` |
| 22 | TTL time.Duration `json:"ttl"` |
| 23 | } |
| 24 | |
| 25 | // IsExpired checks if the cache entry has expired |
| 26 | func (ce *CacheEntry) IsExpired() bool { |
| 27 | return time.Since(ce.Timestamp) > ce.TTL |
| 28 | } |
| 29 | |
| 30 | // Cache represents the response cache system |
| 31 | type Cache struct { |
| 32 | cache *otter.Cache[string, *CacheEntry] |
| 33 | logger zerolog.Logger |
| 34 | config CacheConfig |
| 35 | } |
| 36 | |
| 37 | // CacheConfig configures the cache behavior |
| 38 | type CacheConfig struct { |
| 39 | // Enabled controls whether caching is active |
| 40 | Enabled bool |
| 41 | // DefaultTTL is the default time-to-live for cache entries |
| 42 | DefaultTTL time.Duration |
| 43 | // SearchTTL is the TTL for search results |
| 44 | SearchTTL time.Duration |
| 45 | // MetadataTTL is the TTL for metadata and corpus information |
| 46 | MetadataTTL time.Duration |
| 47 | // MaxSize is the maximum number of cache entries |
| 48 | MaxSize int |
| 49 | } |
| 50 | |
| 51 | // DefaultCacheConfig returns a default cache configuration |
| 52 | func DefaultCacheConfig() CacheConfig { |
| 53 | return CacheConfig{ |
| 54 | Enabled: true, |
| 55 | DefaultTTL: 5 * time.Minute, |
| 56 | SearchTTL: 2 * time.Minute, // Search results change less frequently |
| 57 | MetadataTTL: 15 * time.Minute, // Metadata is more stable |
| 58 | MaxSize: 1000, |
| 59 | } |
| 60 | } |
| 61 | |
| 62 | // NewCache creates a new cache instance |
| 63 | func NewCache(config CacheConfig) (*Cache, error) { |
| 64 | // Create default logging config for cache |
| 65 | logConfig := &cfg.LoggingConfig{ |
| 66 | Level: "info", |
| 67 | Format: "text", |
| 68 | } |
| 69 | |
| 70 | if !config.Enabled { |
| 71 | return &Cache{ |
| 72 | cache: nil, |
| 73 | logger: logger.GetLogger(logConfig), |
| 74 | config: config, |
| 75 | }, nil |
| 76 | } |
| 77 | |
| 78 | // Create otter cache with specified capacity |
| 79 | cache, err := otter.MustBuilder[string, *CacheEntry](config.MaxSize). |
| 80 | CollectStats(). |
| 81 | WithTTL(config.DefaultTTL). |
| 82 | Build() |
| 83 | if err != nil { |
| 84 | return nil, fmt.Errorf("failed to create cache: %w", err) |
| 85 | } |
| 86 | |
| 87 | return &Cache{ |
| 88 | cache: &cache, |
| 89 | logger: logger.GetLogger(logConfig), |
| 90 | config: config, |
| 91 | }, nil |
| 92 | } |
| 93 | |
| 94 | // generateCacheKey creates a unique cache key for a request |
| 95 | func (c *Cache) generateCacheKey(method, endpoint string, params map[string]any) string { |
| 96 | // Create a deterministic key by combining method, endpoint, and parameters |
| 97 | var keyParts []string |
| 98 | keyParts = append(keyParts, method, endpoint) |
| 99 | |
| 100 | // Add sorted parameters to ensure deterministic cache keys |
| 101 | // Note: json.Marshal automatically sorts map keys lexicographically, |
| 102 | // providing deterministic JSON output regardless of map iteration order |
| 103 | if params != nil { |
| 104 | paramsJSON, _ := json.Marshal(params) |
| 105 | keyParts = append(keyParts, string(paramsJSON)) |
| 106 | } |
| 107 | |
| 108 | key := strings.Join(keyParts, "|") |
| 109 | |
| 110 | // Hash the key to keep it reasonable length and provide privacy |
| 111 | hash := md5.Sum([]byte(key)) |
| 112 | return hex.EncodeToString(hash[:]) |
| 113 | } |
| 114 | |
| 115 | // Get retrieves a cached response |
| 116 | func (c *Cache) Get(ctx context.Context, key string) ([]byte, bool) { |
| 117 | if !c.config.Enabled || c.cache == nil { |
| 118 | return nil, false |
| 119 | } |
| 120 | |
| 121 | entry, found := (*c.cache).Get(key) |
| 122 | if !found { |
| 123 | c.logger.Debug().Str("key", key).Msg("Cache miss") |
| 124 | return nil, false |
| 125 | } |
| 126 | |
| 127 | // Check if entry has expired |
| 128 | if entry.IsExpired() { |
| 129 | c.logger.Debug().Str("key", key).Msg("Cache entry expired") |
| 130 | (*c.cache).Delete(key) |
| 131 | return nil, false |
| 132 | } |
| 133 | |
| 134 | c.logger.Debug().Str("key", key).Msg("Cache hit") |
| 135 | return entry.Data, true |
| 136 | } |
| 137 | |
| 138 | // Set stores a response in the cache |
| 139 | func (c *Cache) Set(ctx context.Context, key string, data []byte, ttl time.Duration) { |
| 140 | if !c.config.Enabled || c.cache == nil { |
| 141 | return |
| 142 | } |
| 143 | |
| 144 | entry := &CacheEntry{ |
| 145 | Data: data, |
| 146 | Timestamp: time.Now(), |
| 147 | TTL: ttl, |
| 148 | } |
| 149 | |
| 150 | (*c.cache).Set(key, entry) |
| 151 | c.logger.Debug().Str("key", key).Dur("ttl", ttl).Msg("Cache entry stored") |
| 152 | } |
| 153 | |
| 154 | // Delete removes an entry from the cache |
| 155 | func (c *Cache) Delete(ctx context.Context, key string) { |
| 156 | if !c.config.Enabled || c.cache == nil { |
| 157 | return |
| 158 | } |
| 159 | |
| 160 | (*c.cache).Delete(key) |
| 161 | c.logger.Debug().Str("key", key).Msg("Cache entry deleted") |
| 162 | } |
| 163 | |
| 164 | // Clear removes all entries from the cache |
| 165 | func (c *Cache) Clear() { |
| 166 | if !c.config.Enabled || c.cache == nil { |
| 167 | return |
| 168 | } |
| 169 | |
| 170 | (*c.cache).Clear() |
| 171 | c.logger.Debug().Msg("Cache cleared") |
| 172 | } |
| 173 | |
| 174 | // Stats returns cache statistics |
| 175 | func (c *Cache) Stats() map[string]interface{} { |
| 176 | if !c.config.Enabled || c.cache == nil { |
| 177 | return map[string]interface{}{ |
| 178 | "enabled": false, |
| 179 | } |
| 180 | } |
| 181 | |
| 182 | stats := (*c.cache).Stats() |
| 183 | return map[string]interface{}{ |
| 184 | "enabled": true, |
| 185 | "size": (*c.cache).Size(), |
| 186 | "hits": stats.Hits(), |
| 187 | "misses": stats.Misses(), |
| 188 | "hit_ratio": stats.Ratio(), |
| 189 | "evictions": stats.EvictedCount(), |
| 190 | "max_size": c.config.MaxSize, |
| 191 | "default_ttl": c.config.DefaultTTL.String(), |
| 192 | "search_ttl": c.config.SearchTTL.String(), |
| 193 | "metadata_ttl": c.config.MetadataTTL.String(), |
| 194 | } |
| 195 | } |
| 196 | |
| 197 | // GetTTLForEndpoint returns the appropriate TTL for a given endpoint |
| 198 | func (c *Cache) GetTTLForEndpoint(endpoint string) time.Duration { |
| 199 | endpoint = strings.ToLower(endpoint) |
| 200 | |
| 201 | // Search endpoints get shorter TTL |
| 202 | if strings.Contains(endpoint, "search") || strings.Contains(endpoint, "query") { |
| 203 | return c.config.SearchTTL |
| 204 | } |
| 205 | |
| 206 | // Metadata and corpus endpoints get longer TTL |
| 207 | if strings.Contains(endpoint, "corpus") || strings.Contains(endpoint, "metadata") || |
| 208 | strings.Contains(endpoint, "statistics") || strings.Contains(endpoint, "info") { |
| 209 | return c.config.MetadataTTL |
| 210 | } |
| 211 | |
| 212 | // Default TTL for other endpoints |
| 213 | return c.config.DefaultTTL |
| 214 | } |
| 215 | |
| 216 | // Close closes the cache and cleans up resources |
| 217 | func (c *Cache) Close() error { |
| 218 | if c.cache != nil { |
| 219 | (*c.cache).Clear() |
| 220 | (*c.cache).Close() |
| 221 | } |
| 222 | return nil |
| 223 | } |