Caching
Overview
Caching improves application performance by storing frequently accessed data in memory or fast storage systems. The framework supports multiple caching strategies and storage backends.
In-Memory Caching
Simple Cache Implementation
type MemoryCache struct {
data map[string]CacheItem
mutex sync.RWMutex
}
type CacheItem struct {
Value interface{}
ExpiresAt time.Time
}
func NewMemoryCache() *MemoryCache {
cache := &MemoryCache{
data: make(map[string]CacheItem),
}
// Start cleanup goroutine
go cache.cleanup()
return cache
}
func (c *MemoryCache) Set(key string, value interface{}, ttl time.Duration) {
c.mutex.Lock()
defer c.mutex.Unlock()
c.data[key] = CacheItem{
Value: value,
ExpiresAt: time.Now().Add(ttl),
}
}
func (c *MemoryCache) Get(key string) (interface{}, bool) {
c.mutex.RLock()
defer c.mutex.RUnlock()
item, exists := c.data[key]
if !exists || time.Now().After(item.ExpiresAt) {
return nil, false
}
return item.Value, true
}
func (c *MemoryCache) cleanup() {
ticker := time.NewTicker(5 * time.Minute)
defer ticker.Stop()
for {
select {
case <-ticker.C:
c.mutex.Lock()
now := time.Now()
for key, item := range c.data {
if now.After(item.ExpiresAt) {
delete(c.data, key)
}
}
c.mutex.Unlock()
}
}
}
Using Cache in Services
type CachedGardenFetcher struct {
fetcher *srv.GardenFetcher
cache *MemoryCache
}
func NewCachedGardenFetcher(fetcher *srv.GardenFetcher) *CachedGardenFetcher {
return &CachedGardenFetcher{
fetcher: fetcher,
cache: NewMemoryCache(),
}
}
func (c *CachedGardenFetcher) FindOneById(ctx context.Context, id string, mod *fetcher.FetcherMod) (*mdl.Garden, bool, error) {
cacheKey := fmt.Sprintf("garden:%s", id)
// Check cache first
if cached, exists := c.cache.Get(cacheKey); exists {
return cached.(*mdl.Garden), true, nil
}
// Fetch from database
garden, exists, err := c.fetcher.FindOneById(ctx, id, mod)
if err != nil || !exists {
return garden, exists, err
}
// Cache the result
c.cache.Set(cacheKey, garden, 15*time.Minute)
return garden, exists, nil
}
Query Result Caching
Caching Database Queries
type QueryCache struct {
cache *MemoryCache
}
func (qc *QueryCache) CacheQuery(query string, args []interface{}, ttl time.Duration, fetcher func() (interface{}, error)) (interface{}, error) {
// Create cache key from query and args
key := qc.generateCacheKey(query, args)
// Check cache
if result, exists := qc.cache.Get(key); exists {
return result, nil
}
// Execute query
result, err := fetcher()
if err != nil {
return nil, err
}
// Cache result
qc.cache.Set(key, result, ttl)
return result, nil
}
func (qc *QueryCache) generateCacheKey(query string, args []interface{}) string {
h := sha256.New()
h.Write([]byte(query))
for _, arg := range args {
h.Write([]byte(fmt.Sprintf("%v", arg)))
}
return fmt.Sprintf("query:%x", h.Sum(nil))
}
// Usage in Fetcher
func (f *GardenFetcher) FindAllCached(ctx context.Context, mod *fetcher.FetcherMod) ([]*mdl.Garden, error) {
result, err := f.queryCache.CacheQuery(
"SELECT * FROM gardens",
[]interface{}{},
10*time.Minute,
func() (interface{}, error) {
return f.FindAll(ctx, mod)
},
)
if err != nil {
return nil, err
}
return result.([]*mdl.Garden), nil
}
Cache Invalidation
Tag-Based Invalidation
type TaggedCache struct {
cache *MemoryCache
tags map[string][]string // tag -> list of keys
mutex sync.RWMutex
}
func (tc *TaggedCache) SetWithTags(key string, value interface{}, ttl time.Duration, tags []string) {
tc.cache.Set(key, value, ttl)
tc.mutex.Lock()
defer tc.mutex.Unlock()
for _, tag := range tags {
if tc.tags[tag] == nil {
tc.tags[tag] = make([]string, 0)
}
tc.tags[tag] = append(tc.tags[tag], key)
}
}
func (tc *TaggedCache) InvalidateTag(tag string) {
tc.mutex.Lock()
defer tc.mutex.Unlock()
keys, exists := tc.tags[tag]
if !exists {
return
}
for _, key := range keys {
tc.cache.Delete(key)
}
delete(tc.tags, tag)
}
// Usage
func (f *CachedGardenFetcher) FindOneById(ctx context.Context, id string, mod *fetcher.FetcherMod) (*mdl.Garden, bool, error) {
cacheKey := fmt.Sprintf("garden:%s", id)
tags := []string{"gardens", fmt.Sprintf("garden:%s", id)}
if cached, exists := f.cache.GetTagged(cacheKey); exists {
return cached.(*mdl.Garden), true, nil
}
garden, exists, err := f.fetcher.FindOneById(ctx, id, mod)
if err != nil || !exists {
return garden, exists, err
}
f.cache.SetWithTags(cacheKey, garden, 15*time.Minute, tags)
return garden, exists, nil
}
// Invalidate when garden is updated
func (h *GardenHandler) Update(ctx context.Context, garden *mdl.Garden, mod *handler.HandlerMod) (*mdl.Garden, error) {
updatedGarden, err := h.baseHandler.Update(ctx, garden, mod)
if err != nil {
return nil, err
}
// Invalidate cache
h.cache.InvalidateTag(fmt.Sprintf("garden:%s", garden.Id))
h.cache.InvalidateTag("gardens")
return updatedGarden, nil
}
HTTP Response Caching
ETag Support
func ETagMiddleware() gin.HandlerFunc {
return func(c *gin.Context) {
// Only cache GET requests
if c.Request.Method != "GET" {
c.Next()
return
}
// Create response writer that captures output
w := &responseWriter{
ResponseWriter: c.Writer,
body: &bytes.Buffer{},
}
c.Writer = w
c.Next()
// Generate ETag from response body
etag := generateETag(w.body.Bytes())
c.Header("ETag", etag)
// Check If-None-Match header
if c.GetHeader("If-None-Match") == etag {
c.Status(304)
return
}
// Write the actual response
c.Writer.Write(w.body.Bytes())
}
}
type responseWriter struct {
gin.ResponseWriter
body *bytes.Buffer
}
func (w *responseWriter) Write(data []byte) (int, error) {
w.body.Write(data)
return len(data), nil
}
func generateETag(data []byte) string {
h := sha256.Sum256(data)
return fmt.Sprintf(`"%x"`, h[:8])
}
Cache-Control Headers
func CacheControlMiddleware() gin.HandlerFunc {
return func(c *gin.Context) {
path := c.Request.URL.Path
switch {
case strings.HasPrefix(path, "/assets/"):
// Static assets - cache for 1 year
c.Header("Cache-Control", "public, max-age=31536000, immutable")
case strings.HasPrefix(path, "/api/"):
// API responses - no cache by default
c.Header("Cache-Control", "no-cache, no-store, must-revalidate")
c.Header("Pragma", "no-cache")
c.Header("Expires", "0")
case strings.HasSuffix(path, ".html"):
// HTML pages - cache for 5 minutes
c.Header("Cache-Control", "public, max-age=300")
default:
// Default - cache for 1 minute
c.Header("Cache-Control", "public, max-age=60")
}
c.Next()
}
}
Redis Caching
Redis Cache Implementation
type RedisCache struct {
client redis.Cmdable
}
func NewRedisCache(client redis.Cmdable) *RedisCache {
return &RedisCache{client: client}
}
func (rc *RedisCache) Set(ctx context.Context, key string, value interface{}, ttl time.Duration) error {
data, err := json.Marshal(value)
if err != nil {
return err
}
return rc.client.Set(ctx, key, data, ttl).Err()
}
func (rc *RedisCache) Get(ctx context.Context, key string, dest interface{}) error {
data, err := rc.client.Get(ctx, key).Result()
if err != nil {
return err
}
return json.Unmarshal([]byte(data), dest)
}
func (rc *RedisCache) Delete(ctx context.Context, keys ...string) error {
return rc.client.Del(ctx, keys...).Err()
}
func (rc *RedisCache) InvalidatePattern(ctx context.Context, pattern string) error {
keys, err := rc.client.Keys(ctx, pattern).Result()
if err != nil {
return err
}
if len(keys) > 0 {
return rc.client.Del(ctx, keys...).Err()
}
return nil
}
Distributed Cache with Redis
type DistributedGardenFetcher struct {
fetcher *srv.GardenFetcher
cache *RedisCache
}
func (f *DistributedGardenFetcher) FindOneById(ctx context.Context, id string, mod *fetcher.FetcherMod) (*mdl.Garden, bool, error) {
cacheKey := fmt.Sprintf("garden:%s", id)
// Try cache first
var garden mdl.Garden
err := f.cache.Get(ctx, cacheKey, &garden)
if err == nil {
return &garden, true, nil
}
// Cache miss - fetch from database
fetchedGarden, exists, err := f.fetcher.FindOneById(ctx, id, mod)
if err != nil || !exists {
return fetchedGarden, exists, err
}
// Cache the result
go func() {
f.cache.Set(context.Background(), cacheKey, fetchedGarden, 15*time.Minute)
}()
return fetchedGarden, exists, nil
}
Cache Warming
Background Cache Warming
type CacheWarmer struct {
gardenFetcher *CachedGardenFetcher
plantFetcher *CachedPlantFetcher
logger *log.Logger
}
func (cw *CacheWarmer) WarmCache(ctx context.Context) error {
cw.logger.Println("Starting cache warming...")
// Warm most accessed gardens
err := cw.warmPopularGardens(ctx)
if err != nil {
return err
}
// Warm recent plants
err = cw.warmRecentPlants(ctx)
if err != nil {
return err
}
cw.logger.Println("Cache warming completed")
return nil
}
func (cw *CacheWarmer) warmPopularGardens(ctx context.Context) error {
// Get list of popular garden IDs (from analytics, etc.)
popularIds := []string{"garden1", "garden2", "garden3"}
for _, id := range popularIds {
_, _, err := cw.gardenFetcher.FindOneById(ctx, id, nil)
if err != nil {
cw.logger.Printf("Failed to warm garden %s: %v", id, err)
}
}
return nil
}
// Schedule cache warming
func (app *App) StartCacheWarmer() {
warmer := &CacheWarmer{
gardenFetcher: app.container.CachedGardenFetcher,
plantFetcher: app.container.CachedPlantFetcher,
logger: app.container.Logger,
}
ticker := time.NewTicker(1 * time.Hour)
go func() {
for range ticker.C {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
warmer.WarmCache(ctx)
cancel()
}
}()
}
Cache Patterns
Write-Through Cache
func (h *CachedGardenHandler) Create(ctx context.Context, garden *mdl.Garden, mod *handler.HandlerMod) (*mdl.Garden, error) {
// Write to database first
createdGarden, err := h.handler.Create(ctx, garden, mod)
if err != nil {
return nil, err
}
// Then write to cache
cacheKey := fmt.Sprintf("garden:%s", createdGarden.Id)
h.cache.Set(cacheKey, createdGarden, 15*time.Minute)
return createdGarden, nil
}
Write-Behind Cache
type WriteBehindCache struct {
cache *MemoryCache
writeQueue chan CacheWrite
handler *srv.GardenHandler
}
type CacheWrite struct {
Key string
Garden *mdl.Garden
}
func (wbc *WriteBehindCache) Set(key string, garden *mdl.Garden, ttl time.Duration) {
// Write to cache immediately
wbc.cache.Set(key, garden, ttl)
// Queue for background database write
select {
case wbc.writeQueue <- CacheWrite{Key: key, Garden: garden}:
default:
// Queue is full, handle overflow
}
}
func (wbc *WriteBehindCache) processWrites() {
for write := range wbc.writeQueue {
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
_, err := wbc.handler.Update(ctx, write.Garden, nil)
if err != nil {
log.Printf("Failed to write garden %s to database: %v", write.Garden.Id, err)
}
cancel()
}
}
Monitoring and Metrics
Cache Metrics
type CacheMetrics struct {
hits int64
misses int64
sets int64
deletes int64
errors int64
mutex sync.RWMutex
}
func (cm *CacheMetrics) RecordHit() {
atomic.AddInt64(&cm.hits, 1)
}
func (cm *CacheMetrics) RecordMiss() {
atomic.AddInt64(&cm.misses, 1)
}
func (cm *CacheMetrics) HitRate() float64 {
hits := atomic.LoadInt64(&cm.hits)
misses := atomic.LoadInt64(&cm.misses)
total := hits + misses
if total == 0 {
return 0
}
return float64(hits) / float64(total)
}
// Usage in cache implementation
func (c *MetricsCache) Get(key string) (interface{}, bool) {
value, exists := c.cache.Get(key)
if exists {
c.metrics.RecordHit()
} else {
c.metrics.RecordMiss()
}
return value, exists
}
Caching is essential for building performant web applications. Choose the right caching strategy based on your data access patterns and consistency requirements.