logo

API Response Caching

Cache API responses to reduce network requests and improve response times.

Basic Pattern

import { MemoryCache } from '@humanspeak/memory-cache'

interface ApiResponse<T> {
    data: T
    cachedAt: number
}

const apiCache = new MemoryCache<ApiResponse<unknown>>({
    maxSize: 500,
    ttl: 5 * 60 * 1000  // 5 minutes
})

async function fetchWithCache<T>(url: string): Promise<T> {
    // Check cache first
    const cached = apiCache.get(url)
    if (cached) {
        console.log('Cache hit:', url)
        return cached.data as T
    }

    // Fetch and cache
    console.log('Cache miss:', url)
    const response = await fetch(url)
    const data = await response.json()

    apiCache.set(url, {
        data,
        cachedAt: Date.now()
    })

    return data
}

// Usage
const users = await fetchWithCache<User[]>('/api/users')
const user = await fetchWithCache<User>('/api/users/123')
import { MemoryCache } from '@humanspeak/memory-cache'

interface ApiResponse<T> {
    data: T
    cachedAt: number
}

const apiCache = new MemoryCache<ApiResponse<unknown>>({
    maxSize: 500,
    ttl: 5 * 60 * 1000  // 5 minutes
})

async function fetchWithCache<T>(url: string): Promise<T> {
    // Check cache first
    const cached = apiCache.get(url)
    if (cached) {
        console.log('Cache hit:', url)
        return cached.data as T
    }

    // Fetch and cache
    console.log('Cache miss:', url)
    const response = await fetch(url)
    const data = await response.json()

    apiCache.set(url, {
        data,
        cachedAt: Date.now()
    })

    return data
}

// Usage
const users = await fetchWithCache<User[]>('/api/users')
const user = await fetchWithCache<User>('/api/users/123')

With Hooks for Monitoring

import { MemoryCache } from '@humanspeak/memory-cache'

const apiCache = new MemoryCache<ApiResponse<unknown>>({
    maxSize: 500,
    ttl: 5 * 60 * 1000,
    hooks: {
        onHit: ({ key }) => {
            metrics.increment('api_cache.hit')
            console.log(`API cache hit: ${key}`)
        },
        onMiss: ({ key, reason }) => {
            metrics.increment('api_cache.miss')
            console.log(`API cache miss: ${key} (${reason})`)
        },
        onSet: ({ key }) => {
            console.log(`API response cached: ${key}`)
        }
    }
})
import { MemoryCache } from '@humanspeak/memory-cache'

const apiCache = new MemoryCache<ApiResponse<unknown>>({
    maxSize: 500,
    ttl: 5 * 60 * 1000,
    hooks: {
        onHit: ({ key }) => {
            metrics.increment('api_cache.hit')
            console.log(`API cache hit: ${key}`)
        },
        onMiss: ({ key, reason }) => {
            metrics.increment('api_cache.miss')
            console.log(`API cache miss: ${key} (${reason})`)
        },
        onSet: ({ key }) => {
            console.log(`API response cached: ${key}`)
        }
    }
})

Key Considerations

  • TTL: Choose based on how often data changes (1-5 minutes typical)
  • Max Size: Consider memory constraints and number of unique endpoints
  • Cache Key: Use the full URL including query parameters
  • Error Handling: Don’t cache error responses