feat: production hardening — slog, rate limiting, integration tests, seed data (Phase 4)
- Structured logging: replace log.* with log/slog JSON output across backend - Request logger middleware: logs method, path, status, duration for all non-health requests - Rate limiting: token bucket (5 req/min, burst 10) on AI endpoints (/api/ai/*) - Integration tests: full critical path test (auth -> create case -> add deadline -> dashboard) - Seed demo data: 1 tenant, 5 cases with deadlines/appointments/parties/events - docker-compose.yml: add all required env vars (DATABASE_URL, SUPABASE_*, ANTHROPIC_API_KEY) - .env.example: document all env vars including DATABASE_URL and CalDAV note
This commit is contained in:
98
backend/internal/middleware/ratelimit.go
Normal file
98
backend/internal/middleware/ratelimit.go
Normal file
@@ -0,0 +1,98 @@
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// TokenBucket implements a simple per-IP token bucket rate limiter.
|
||||
type TokenBucket struct {
|
||||
mu sync.Mutex
|
||||
buckets map[string]*bucket
|
||||
rate float64 // tokens per second
|
||||
burst int // max tokens
|
||||
}
|
||||
|
||||
type bucket struct {
|
||||
tokens float64
|
||||
lastTime time.Time
|
||||
}
|
||||
|
||||
// NewTokenBucket creates a rate limiter allowing rate requests per second with burst capacity.
|
||||
func NewTokenBucket(rate float64, burst int) *TokenBucket {
|
||||
tb := &TokenBucket{
|
||||
buckets: make(map[string]*bucket),
|
||||
rate: rate,
|
||||
burst: burst,
|
||||
}
|
||||
// Periodically clean up stale buckets
|
||||
go tb.cleanup()
|
||||
return tb
|
||||
}
|
||||
|
||||
func (tb *TokenBucket) allow(key string) bool {
|
||||
tb.mu.Lock()
|
||||
defer tb.mu.Unlock()
|
||||
|
||||
b, ok := tb.buckets[key]
|
||||
if !ok {
|
||||
b = &bucket{tokens: float64(tb.burst), lastTime: time.Now()}
|
||||
tb.buckets[key] = b
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
elapsed := now.Sub(b.lastTime).Seconds()
|
||||
b.tokens += elapsed * tb.rate
|
||||
if b.tokens > float64(tb.burst) {
|
||||
b.tokens = float64(tb.burst)
|
||||
}
|
||||
b.lastTime = now
|
||||
|
||||
if b.tokens < 1 {
|
||||
return false
|
||||
}
|
||||
b.tokens--
|
||||
return true
|
||||
}
|
||||
|
||||
func (tb *TokenBucket) cleanup() {
|
||||
ticker := time.NewTicker(5 * time.Minute)
|
||||
defer ticker.Stop()
|
||||
for range ticker.C {
|
||||
tb.mu.Lock()
|
||||
cutoff := time.Now().Add(-10 * time.Minute)
|
||||
for key, b := range tb.buckets {
|
||||
if b.lastTime.Before(cutoff) {
|
||||
delete(tb.buckets, key)
|
||||
}
|
||||
}
|
||||
tb.mu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// Limit wraps an http.Handler with rate limiting.
|
||||
func (tb *TokenBucket) Limit(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
ip := r.Header.Get("X-Forwarded-For")
|
||||
if ip == "" {
|
||||
ip = r.RemoteAddr
|
||||
}
|
||||
if !tb.allow(ip) {
|
||||
slog.Warn("rate limit exceeded", "ip", ip, "path", r.URL.Path)
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Header().Set("Retry-After", "10")
|
||||
w.WriteHeader(http.StatusTooManyRequests)
|
||||
w.Write([]byte(`{"error":"rate limit exceeded, try again later"}`))
|
||||
return
|
||||
}
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
// LimitFunc wraps an http.HandlerFunc with rate limiting.
|
||||
func (tb *TokenBucket) LimitFunc(next http.HandlerFunc) http.HandlerFunc {
|
||||
limited := tb.Limit(http.HandlerFunc(next))
|
||||
return limited.ServeHTTP
|
||||
}
|
||||
70
backend/internal/middleware/ratelimit_test.go
Normal file
70
backend/internal/middleware/ratelimit_test.go
Normal file
@@ -0,0 +1,70 @@
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestTokenBucket_AllowsBurst(t *testing.T) {
|
||||
tb := NewTokenBucket(1.0, 5) // 1/sec, burst 5
|
||||
|
||||
handler := tb.LimitFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
})
|
||||
|
||||
// Should allow burst of 5 requests
|
||||
for i := 0; i < 5; i++ {
|
||||
req := httptest.NewRequest("GET", "/test", nil)
|
||||
w := httptest.NewRecorder()
|
||||
handler.ServeHTTP(w, req)
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("request %d: expected 200, got %d", i+1, w.Code)
|
||||
}
|
||||
}
|
||||
|
||||
// 6th request should be rate limited
|
||||
req := httptest.NewRequest("GET", "/test", nil)
|
||||
w := httptest.NewRecorder()
|
||||
handler.ServeHTTP(w, req)
|
||||
if w.Code != http.StatusTooManyRequests {
|
||||
t.Fatalf("request 6: expected 429, got %d", w.Code)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTokenBucket_DifferentIPs(t *testing.T) {
|
||||
tb := NewTokenBucket(1.0, 2) // 1/sec, burst 2
|
||||
|
||||
handler := tb.LimitFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
})
|
||||
|
||||
// Exhaust IP1's bucket
|
||||
for i := 0; i < 2; i++ {
|
||||
req := httptest.NewRequest("GET", "/test", nil)
|
||||
req.Header.Set("X-Forwarded-For", "1.2.3.4")
|
||||
w := httptest.NewRecorder()
|
||||
handler.ServeHTTP(w, req)
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("ip1 request %d: expected 200, got %d", i+1, w.Code)
|
||||
}
|
||||
}
|
||||
|
||||
// IP1 should now be limited
|
||||
req := httptest.NewRequest("GET", "/test", nil)
|
||||
req.Header.Set("X-Forwarded-For", "1.2.3.4")
|
||||
w := httptest.NewRecorder()
|
||||
handler.ServeHTTP(w, req)
|
||||
if w.Code != http.StatusTooManyRequests {
|
||||
t.Fatalf("ip1 request 3: expected 429, got %d", w.Code)
|
||||
}
|
||||
|
||||
// IP2 should still work
|
||||
req = httptest.NewRequest("GET", "/test", nil)
|
||||
req.Header.Set("X-Forwarded-For", "5.6.7.8")
|
||||
w = httptest.NewRecorder()
|
||||
handler.ServeHTTP(w, req)
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("ip2 request 1: expected 200, got %d", w.Code)
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user