Intermediate Level Nginx : Part 3¶
19: WebSocket Support in Nginx¶
What are WebSockets?¶
WebSockets provide full-duplex, bidirectional communication between client and server over a single TCP connection.
Use Cases: - Real-time chat applications - Live notifications - Collaborative editing - Gaming applications - Stock tickers / live data feeds - IoT device communication
Benefits: - Low latency - Reduced overhead (no HTTP headers per message) - Server can push data to clients - Persistent connection
How WebSockets Work¶
- Client initiates HTTP request with Upgrade header
- Server responds with 101 Switching Protocols
- Connection upgrades to WebSocket protocol
- Bidirectional communication begins
Basic WebSocket Configuration¶
http {
# WebSocket upgrade mapping
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
server {
listen 80;
server_name example.com;
location /ws {
proxy_pass http://localhost:3000;
# WebSocket headers
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_set_header Host $host;
}
}
}
Complete WebSocket Configuration¶
http {
# Map for WebSocket upgrade
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
upstream websocket_backend {
server localhost:3000;
# Or multiple servers for load balancing
# server localhost:3001;
# server localhost:3002;
}
server {
listen 80;
server_name example.com;
location /ws {
proxy_pass http://websocket_backend;
# HTTP version must be 1.1
proxy_http_version 1.1;
# WebSocket upgrade headers
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
# Standard proxy headers
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# Timeouts for long-lived connections
proxy_read_timeout 3600s;
proxy_send_timeout 3600s;
# Buffering must be off for WebSockets
proxy_buffering off;
}
}
}
WebSocket with SSL/TLS (WSS)¶
http {
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
server {
listen 443 ssl http2;
server_name example.com;
ssl_certificate /etc/letsencrypt/live/example.com/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/example.com/privkey.pem;
# Regular HTTP traffic
location / {
root /var/www/example.com;
try_files $uri $uri/ =404;
}
# Secure WebSocket endpoint (wss://)
location /ws {
proxy_pass http://localhost:3000;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto https;
# Important: Long timeouts for persistent connections
proxy_connect_timeout 7d;
proxy_send_timeout 7d;
proxy_read_timeout 7d;
proxy_buffering off;
}
}
}
Load Balancing WebSockets¶
IP Hash (Session Persistence)¶
upstream websocket_backend {
# Use ip_hash for sticky sessions
ip_hash;
server backend1.example.com:3000;
server backend2.example.com:3000;
server backend3.example.com:3000;
}
server {
listen 80;
server_name example.com;
location /ws {
proxy_pass http://websocket_backend;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_set_header Host $host;
}
}
Hash by Custom Header¶
upstream websocket_backend {
# Hash by user ID or session token
hash $http_x_user_id consistent;
server backend1.example.com:3000;
server backend2.example.com:3000;
server backend3.example.com:3000;
}
server {
listen 80;
server_name example.com;
location /ws {
proxy_pass http://websocket_backend;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_set_header Host $host;
proxy_set_header X-User-ID $http_x_user_id;
}
}
Least Connections¶
upstream websocket_backend {
least_conn;
server backend1.example.com:3000 max_fails=3 fail_timeout=30s;
server backend2.example.com:3000 max_fails=3 fail_timeout=30s;
server backend3.example.com:3000 max_fails=3 fail_timeout=30s;
}
server {
listen 80;
server_name example.com;
location /ws {
proxy_pass http://websocket_backend;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
# Retry on failure
proxy_next_upstream error timeout http_502 http_503;
}
}
Multiple WebSocket Endpoints¶
http {
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
# Different backends for different services
upstream chat_backend {
server localhost:3000;
}
upstream notifications_backend {
server localhost:3001;
}
upstream game_backend {
ip_hash;
server localhost:3002;
server localhost:3003;
}
server {
listen 80;
server_name example.com;
# Chat WebSocket
location /ws/chat {
proxy_pass http://chat_backend;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_set_header Host $host;
proxy_read_timeout 3600s;
}
# Notifications WebSocket
location /ws/notifications {
proxy_pass http://notifications_backend;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_set_header Host $host;
proxy_read_timeout 3600s;
}
# Game WebSocket (with sticky sessions)
location /ws/game {
proxy_pass http://game_backend;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_set_header Host $host;
proxy_read_timeout 7200s;
}
}
}
WebSocket with Authentication¶
Query Parameter Authentication¶
http {
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
# Validate token (simplified example)
map $arg_token $valid_token {
default 0;
"secret123" 1;
"secret456" 1;
}
server {
listen 80;
server_name example.com;
location /ws {
# Check token
if ($valid_token = 0) {
return 401;
}
proxy_pass http://localhost:3000;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_set_header Host $host;
# Pass token to backend
proxy_set_header X-Auth-Token $arg_token;
}
}
}
Header-Based Authentication¶
server {
listen 80;
server_name example.com;
location /ws {
# Validate Authorization header
if ($http_authorization = "") {
return 401 '{"error":"Missing authorization"}';
}
proxy_pass http://localhost:3000;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_set_header Host $host;
# Pass auth header to backend
proxy_set_header Authorization $http_authorization;
}
}
Cookie-Based Authentication¶
server {
listen 80;
server_name example.com;
location /ws {
# Check for session cookie
if ($cookie_session_id = "") {
return 401 '{"error":"Not authenticated"}';
}
proxy_pass http://localhost:3000;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_set_header Host $host;
proxy_set_header Cookie $http_cookie;
}
}
Rate Limiting WebSocket Connections¶
# Limit WebSocket connections per IP
limit_conn_zone $binary_remote_addr zone=ws_conn:10m;
# Limit connection rate
limit_req_zone $binary_remote_addr zone=ws_req:10m rate=10r/m;
server {
listen 80;
server_name example.com;
location /ws {
# Limit new connections
limit_req zone=ws_req burst=5;
limit_conn ws_conn 5;
proxy_pass http://localhost:3000;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_set_header Host $host;
proxy_read_timeout 3600s;
}
}
Monitoring WebSocket Connections¶
Custom Log Format¶
log_format websocket '$remote_addr - [$time_local] "$request" '
'$status $body_bytes_sent '
'upgrade=$http_upgrade '
'connection=$connection_upgrade '
'request_time=$request_time '
'upstream=$upstream_addr';
server {
listen 80;
server_name example.com;
access_log /var/log/nginx/websocket.log websocket;
location /ws {
proxy_pass http://localhost:3000;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
}
}
Check Active Connections¶
# View WebSocket connections in access log
grep "upgrade=websocket" /var/log/nginx/access.log
# Count active WebSocket connections
netstat -an | grep :3000 | grep ESTABLISHED | wc -l
# Monitor in real-time
watch 'netstat -an | grep :3000 | grep ESTABLISHED | wc -l'
Complete Production Configuration¶
http {
# WebSocket upgrade mapping
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
# Rate limiting
limit_conn_zone $binary_remote_addr zone=ws_conn:10m;
limit_req_zone $binary_remote_addr zone=ws_req:10m rate=10r/m;
# Backend servers
upstream websocket_backend {
ip_hash;
server backend1.example.com:3000 max_fails=3 fail_timeout=30s;
server backend2.example.com:3000 max_fails=3 fail_timeout=30s;
server backend3.example.com:3000 backup;
}
# Custom log format
log_format ws_log '$remote_addr - [$time_local] "$request" '
'$status $body_bytes_sent '
'upgrade=$http_upgrade '
'backend=$upstream_addr '
'request_time=$request_time '
'upstream_time=$upstream_response_time';
server {
listen 443 ssl http2;
server_name example.com;
ssl_certificate /etc/letsencrypt/live/example.com/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/example.com/privkey.pem;
# Regular content
location / {
root /var/www/example.com;
index index.html;
try_files $uri $uri/ =404;
}
# WebSocket endpoint
location /ws {
access_log /var/log/nginx/websocket.log ws_log;
# Rate limiting
limit_req zone=ws_req burst=5 nodelay;
limit_conn ws_conn 10;
limit_req_status 429;
# Authentication check (example)
if ($http_authorization = "") {
return 401 '{"error":"Unauthorized"}';
}
# Proxy to backend
proxy_pass http://websocket_backend;
# HTTP version
proxy_http_version 1.1;
# WebSocket headers
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
# Standard headers
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto https;
# Authentication
proxy_set_header Authorization $http_authorization;
# Timeouts (24 hours for persistent connections)
proxy_connect_timeout 1d;
proxy_send_timeout 1d;
proxy_read_timeout 1d;
# Disable buffering
proxy_buffering off;
# Retry logic
proxy_next_upstream error timeout http_502 http_503;
proxy_next_upstream_tries 2;
}
# Health check endpoint
location /health {
access_log off;
return 200 "OK\n";
add_header Content-Type text/plain;
}
}
}
Testing WebSocket Connection¶
Using wscat (WebSocket client)¶
# Install wscat
npm install -g wscat
# Connect to WebSocket
wscat -c ws://example.com/ws
# Connect with SSL
wscat -c wss://example.com/ws
# With authentication
wscat -c "wss://example.com/ws?token=secret123"
# With headers
wscat -c wss://example.com/ws -H "Authorization: Bearer token123"
Using curl¶
# Test WebSocket upgrade
curl -i -N \
-H "Connection: Upgrade" \
-H "Upgrade: websocket" \
-H "Sec-WebSocket-Version: 13" \
-H "Sec-WebSocket-Key: test" \
http://example.com/ws
# Should return:
# HTTP/1.1 101 Switching Protocols
# Upgrade: websocket
# Connection: Upgrade
JavaScript Client¶
// Basic WebSocket client
const ws = new WebSocket('wss://example.com/ws');
ws.onopen = () => {
console.log('Connected');
ws.send('Hello Server!');
};
ws.onmessage = (event) => {
console.log('Received:', event.data);
};
ws.onerror = (error) => {
console.error('WebSocket error:', error);
};
ws.onclose = () => {
console.log('Disconnected');
};
// With authentication
const wsAuth = new WebSocket('wss://example.com/ws?token=secret123');
// Or with subprotocols
const wsProto = new WebSocket('wss://example.com/ws', ['protocol1', 'protocol2']);
Node.js Server Example¶
// Simple WebSocket server with ws library
const WebSocket = require('ws');
const wss = new WebSocket.Server({ port: 3000 });
wss.on('connection', (ws, req) => {
console.log('Client connected:', req.connection.remoteAddress);
// Get headers passed from Nginx
console.log('Real IP:', req.headers['x-real-ip']);
console.log('Auth:', req.headers['authorization']);
ws.on('message', (message) => {
console.log('Received:', message);
// Echo back
ws.send(`Echo: ${message}`);
});
ws.on('close', () => {
console.log('Client disconnected');
});
ws.on('error', (error) => {
console.error('WebSocket error:', error);
});
// Send periodic updates
const interval = setInterval(() => {
if (ws.readyState === WebSocket.OPEN) {
ws.send(JSON.stringify({ time: new Date() }));
}
}, 5000);
ws.on('close', () => clearInterval(interval));
});
console.log('WebSocket server running on port 3000');
Common WebSocket Issues¶
Issue: 502 Bad Gateway¶
# Ensure backend is running
curl http://localhost:3000
# Check proxy_http_version
proxy_http_version 1.1; # Must be 1.1
# Check Upgrade headers
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
# Check logs
tail -f /var/log/nginx/error.log
Issue: Connection Closes Prematurely¶
# Increase timeouts
proxy_connect_timeout 1d;
proxy_send_timeout 1d;
proxy_read_timeout 1d;
# Disable buffering
proxy_buffering off;
# Check for intermediate proxies timeout
Issue: Load Balancing Not Working¶
# Use ip_hash for sticky sessions
upstream websocket_backend {
ip_hash;
server backend1:3000;
server backend2:3000;
}
# Or use consistent hashing
upstream websocket_backend {
hash $http_x_user_id consistent;
server backend1:3000;
server backend2:3000;
}
Issue: Headers Not Passed to Backend¶
# Ensure headers are set correctly
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# Log headers for debugging
log_format debug '$remote_addr - $request - $http_upgrade';
access_log /var/log/nginx/debug.log debug;
WebSocket Security Best Practices¶
-
Always use WSS (WebSocket Secure)
-
Implement Authentication
-
Rate Limiting
-
Origin Validation
-
Set Reasonable Timeouts
-
Monitor Connections
Performance Tips
- Use ip_hash for sticky sessions
- Disable proxy_buffering
- Set appropriate timeouts
- Use HTTP/1.1 with keepalive
- Monitor connection count
- Implement connection limits
- Use multiple backend servers
- Enable TCP optimization (tcp_nodelay)
20: Nginx Caching Strategies for Better Performance¶
Why Cache with Nginx?¶
Caching stores responses and serves them directly without hitting backend servers:
Benefits: - Reduced latency: Instant responses from cache - Lower backend load: Fewer requests to application servers - Bandwidth savings: Serve cached content efficiently - Improved scalability: Handle more concurrent users - Cost reduction: Less backend infrastructure needed
Types of Caching in Nginx¶
- Proxy cache: Cache responses from backend servers
- FastCGI cache: Cache PHP/dynamic content
- Browser cache: Control client-side caching with headers
- Microcaching: Cache dynamic content for very short periods
Basic Proxy Caching¶
# Define cache path
proxy_cache_path /var/cache/nginx/proxy
levels=1:2
keys_zone=my_cache:10m
max_size=1g
inactive=60m;
upstream backend {
server localhost:3000;
}
server {
listen 80;
server_name example.com;
location / {
# Enable caching
proxy_cache my_cache;
# Cache successful responses for 10 minutes
proxy_cache_valid 200 10m;
# Add header to show cache status
add_header X-Cache-Status $upstream_cache_status;
proxy_pass http://backend;
proxy_set_header Host $host;
}
}
proxy_cache_path Directives Explained¶
proxy_cache_path /var/cache/nginx/proxy # Cache directory
levels=1:2 # Directory structure (e.g., /a/bc/)
keys_zone=cache:10m # Memory zone name and size
max_size=1g # Maximum cache size on disk
inactive=60m # Remove if not accessed in 60 min
use_temp_path=off # Write directly to cache path
loader_threshold=300ms # Time limit for loading cache
loader_files=200 # Max files to load per iteration
manager_files=100 # Max files to delete per iteration
manager_threshold=200ms; # Time limit for managing cache
Complete Caching Configuration¶
# Cache path configuration
proxy_cache_path /var/cache/nginx/proxy
levels=1:2
keys_zone=api_cache:10m
max_size=1g
inactive=60m
use_temp_path=off;
# Define cache key
proxy_cache_key "$scheme$request_method$host$request_uri";
upstream backend {
server localhost:3000;
}
server {
listen 80;
server_name example.com;
location / {
# Enable cache
proxy_cache api_cache;
# Cache status codes and durations
proxy_cache_valid 200 302 10m;
proxy_cache_valid 404 1m;
proxy_cache_valid any 1m;
# Cache methods
proxy_cache_methods GET HEAD;
# Minimum uses before caching
proxy_cache_min_uses 2;
# Cache even with Set-Cookie header
proxy_ignore_headers Set-Cookie;
# Cache key
proxy_cache_key "$scheme$request_method$host$request_uri$is_args$args";
# Headers
add_header X-Cache-Status $upstream_cache_status;
add_header X-Cache-Key "$scheme$request_method$host$request_uri";
# Proxy settings
proxy_pass http://backend;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
}
Cache Status Values¶
# $upstream_cache_status values:
# MISS - Response not found in cache, fetched from backend
# HIT - Response served from cache
# EXPIRED - Cached response expired, fetched from backend
# STALE - Serving stale content (backend unavailable)
# UPDATING - Content is stale but being updated in background
# REVALIDATED - Content revalidated with backend (304 response)
# BYPASS - Cache bypassed intentionally
Cache Bypass Conditions¶
# Don't cache if certain conditions are met
map $request_uri $no_cache {
default 0;
~^/admin 1;
~^/checkout 1;
}
map $http_cookie $no_cache {
default 0;
~*session 1;
~*logged_in 1;
}
server {
listen 80;
server_name example.com;
location / {
proxy_cache api_cache;
proxy_cache_valid 200 10m;
# Bypass cache based on variables
proxy_cache_bypass $no_cache;
proxy_no_cache $no_cache;
# Bypass if Authorization header present
proxy_cache_bypass $http_authorization;
proxy_no_cache $http_authorization;
add_header X-Cache-Status $upstream_cache_status;
proxy_pass http://backend;
}
}
Cache Purging¶
Manual Purge (Nginx Plus or with module)¶
# Requires ngx_cache_purge module
location ~ /purge(/.*) {
allow 127.0.0.1;
deny all;
proxy_cache_purge api_cache "$scheme$request_method$host$1";
}
# Purge cache:
# curl http://example.com/purge/api/users
Time-Based Cache Invalidation¶
location / {
proxy_cache api_cache;
# Cache for 5 minutes
proxy_cache_valid 200 5m;
# Serve stale content if backend is down
proxy_cache_use_stale error timeout updating http_500 http_502 http_503 http_504;
# Allow serving stale content while updating
proxy_cache_background_update on;
proxy_pass http://backend;
}
Cache Invalidation by Header¶
# Backend sends X-Cache-Invalidate header
map $upstream_http_x_cache_invalidate $no_cache {
default 0;
"1" 1;
}
location / {
proxy_cache api_cache;
proxy_cache_bypass $no_cache;
proxy_no_cache $no_cache;
proxy_pass http://backend;
}
Microcaching (Cache Dynamic Content Briefly)¶
# Cache dynamic content for 1 second
proxy_cache_path /var/cache/nginx/micro levels=1:2 keys_zone=microcache:10m max_size=100m inactive=1m;
server {
listen 80;
server_name example.com;
location / {
proxy_cache microcache;
# Cache for 1 second
proxy_cache_valid 200 1s;
# Don't cache if user is logged in
proxy_cache_bypass $cookie_session;
proxy_no_cache $cookie_session;
# Lock: only one request to backend for same resource
proxy_cache_lock on;
proxy_cache_lock_timeout 5s;
add_header X-Cache-Status $upstream_cache_status;
proxy_pass http://backend;
}
}
Benefits of Microcaching: - Protects backend from traffic spikes - Reduces database queries - Minimal staleness (1 second) - Huge performance improvement for high-traffic sites
FastCGI Caching (for PHP)¶
# FastCGI cache configuration
fastcgi_cache_path /var/cache/nginx/fastcgi
levels=1:2
keys_zone=php_cache:10m
max_size=1g
inactive=60m;
fastcgi_cache_key "$scheme$request_method$host$request_uri";
server {
listen 80;
server_name example.com;
root /var/www/example.com;
index index.php;
# Don't cache if these conditions are met
set $skip_cache 0;
if ($request_method = POST) {
set $skip_cache 1;
}
if ($query_string != "") {
set $skip_cache 1;
}
if ($request_uri ~* "/wp-admin/|/admin/|/cart/|/checkout/") {
set $skip_cache 1;
}
if ($http_cookie ~* "comment_author|wordpress_[a-f0-9]+|wp-postpass|wordpress_logged_in") {
set $skip_cache 1;
}
location / {
try_files $uri $uri/ /index.php?$args;
}
location ~ \.php$ {
fastcgi_pass unix:/var/run/php/php8.1-fpm.sock;
fastcgi_index index.php;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
include fastcgi_params;
# Enable FastCGI cache
fastcgi_cache php_cache;
fastcgi_cache_valid 200 60m;
fastcgi_cache_valid 404 10m;
fastcgi_cache_bypass $skip_cache;
fastcgi_no_cache $skip_cache;
add_header X-Cache-Status $upstream_cache_status;
}
}
Cache Lock (Prevent Cache Stampede)¶
location / {
proxy_cache api_cache;
proxy_cache_valid 200 10m;
# Enable cache lock
proxy_cache_lock on;
# Wait up to 5 seconds for lock
proxy_cache_lock_timeout 5s;
# If lock times out, still serve stale content
proxy_cache_use_stale updating;
proxy_pass http://backend;
}
What this does: - When cache expires, only ONE request goes to backend - Other requests wait for the first one to complete - Prevents "thundering herd" problem - Reduces backend load
Stale Content Strategy¶
location / {
proxy_cache api_cache;
proxy_cache_valid 200 10m;
# Serve stale content in these conditions:
proxy_cache_use_stale error # Backend error
timeout # Backend timeout
invalid_header # Invalid response
updating # Cache is being updated
http_500 # Backend returns 500
http_502 # Backend returns 502
http_503 # Backend returns 503
http_504; # Backend returns 504
# Update cache in background
proxy_cache_background_update on;
# Serve stale content up to 1 day old
proxy_cache_use_stale_timeout 1d;
proxy_pass http://backend;
}
Conditional Caching Based on Response¶
# Only cache if backend sends X-Cache-Enable header
map $upstream_http_x_cache_enable $cache_enabled {
default 0;
"1" 1;
}
location / {
proxy_cache api_cache;
proxy_cache_valid 200 10m;
# Only cache if backend allows it
proxy_no_cache $cache_enabled;
proxy_pass http://backend;
}
Cache Warming (Preload Cache)¶
#!/bin/bash
# cache-warm.sh - Preload cache with popular URLs
URLS=(
"https://example.com/"
"https://example.com/popular-page"
"https://example.com/api/products"
"https://example.com/api/categories"
)
for url in "${URLS[@]}"; do
echo "Warming cache: $url"
curl -s -o /dev/null -w "Status: %{http_code}\n" "$url"
sleep 0.5
done
echo "Cache warming complete"
# Run cache warming
./cache-warm.sh
# Or schedule with cron
# Every 5 minutes
*/5 * * * * /path/to/cache-warm.sh
Complete Production Caching Configuration¶
# Cache paths
proxy_cache_path /var/cache/nginx/api
levels=1:2
keys_zone=api_cache:10m
max_size=1g
inactive=60m
use_temp_path=off;
proxy_cache_path /var/cache/nginx/static
levels=1:2
keys_zone=static_cache:10m
max_size=2g
inactive=7d
use_temp_path=off;
# Cache bypass conditions
map $request_method $skip_cache {
default 0;
POST 1;
PUT 1;
DELETE 1;
}
map $http_cookie $skip_cache_cookie {
default 0;
~*session 1;
~*logged_in 1;
}
# Combine conditions
map $skip_cache$skip_cache_cookie $final_skip_cache {
default 0;
~*1 1;
}
upstream backend {
server backend1:3000;
server backend2:3000;
keepalive 32;
}
server {
listen 443 ssl http2;
server_name example.com;
ssl_certificate /etc/letsencrypt/live/example.com/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/example.com/privkey.pem;
# API endpoints with caching
location /api/ {
proxy_cache api_cache;
proxy_cache_valid 200 5m;
proxy_cache_valid 404 1m;
proxy_cache_methods GET HEAD;
proxy_cache_key "$scheme$request_method$host$request_uri";
# Cache control
proxy_cache_bypass $final_skip_cache;
proxy_no_cache $final_skip_cache;
proxy_cache_bypass $http_authorization;
# Stale content strategy
proxy_cache_use_stale error timeout updating http_500 http_502 http_503 http_504;
proxy_cache_background_update on;
proxy_cache_lock on;
proxy_cache_lock_timeout 5s;
# Ignore certain headers
proxy_ignore_headers Cache-Control Expires;
# Headers
add_header X-Cache-Status $upstream_cache_status always;
add_header Cache-Control "public, max-age=300";
# Proxy
proxy_pass http://backend;
proxy_http_version 1.1;
proxy_set_header Connection "";
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
# Static files with aggressive caching
location ~* \.(jpg|jpeg|png|gif|ico|css|js|svg|woff|woff2)$ {
proxy_cache static_cache;
proxy_cache_valid 200 7d;
proxy_cache_key "$scheme$request_method$host$request_uri";
add_header X-Cache-Status $upstream_cache_status;
add_header Cache-Control "public, max-age=604800, immutable";
expires 7d;
access_log off;
proxy_pass http://backend;
}
# Purge cache (restricted to localhost)
location ~ /purge(/.*) {
allow 127.0.0.1;
deny all;
proxy_cache_purge api_cache "$scheme$request_method$host$1";
}
# Cache status page
location /cache-status {
allow 127.0.0.1;
deny all;
default_type text/html;
content_by_lua_block {
-- Requires lua module
ngx.say("<h1>Cache Status</h1>")
-- Display cache statistics
}
}
}
Monitoring Cache Performance¶
Check Cache Directory¶
# View cache size
du -sh /var/cache/nginx/*
# Count cached files
find /var/cache/nginx/proxy -type f | wc -l
# View recent cache files
find /var/cache/nginx/proxy -type f -mmin -5 -ls
Analyze Cache Hit Rate¶
# Count cache hits and misses
awk '{print $NF}' /var/log/nginx/access.log | sort | uniq -c
# Calculate hit rate
grep "X-Cache-Status: HIT" /var/log/nginx/access.log | wc -l
grep "X-Cache-Status: MISS" /var/log/nginx/access.log | wc -l
# Hit rate percentage
HITS=$(grep "X-Cache-Status: HIT" /var/log/nginx/access.log | wc -l)
TOTAL=$(grep "X-Cache-Status" /var/log/nginx/access.log | wc -l)
echo "scale=2; $HITS * 100 / $TOTAL" | bc
Real-Time Cache Monitoring¶
# Watch cache status
tail -f /var/log/nginx/access.log | grep --color "X-Cache-Status"
# Monitor cache directory size
watch -n 5 'du -sh /var/cache/nginx/*'
Cache Maintenance¶
Clear Cache¶
# Clear all cache
sudo rm -rf /var/cache/nginx/proxy/*
# Clear specific cache zone
sudo rm -rf /var/cache/nginx/api/*
# Reload Nginx after clearing
sudo nginx -s reload
Automate Cache Cleanup¶
# /etc/cron.daily/nginx-cache-cleanup
#!/bin/bash
# Remove cache files older than 7 days
find /var/cache/nginx/proxy -type f -mtime +7 -delete
find /var/cache/nginx/proxy -type d -empty -delete
Best Practices
- Use appropriate cache durations: API=5m, Static=7d
- Implement cache bypass: For logged-in users, dynamic content
- Enable cache locking: Prevent stampede
- Serve stale content: When backend is unavailable
- Use microcaching: For dynamic content (1s cache)
- Monitor hit rates: Aim for 80%+ for cacheable content
- Set proper cache keys: Include relevant variables
- Implement purging: For content updates
- Use background updates: Update cache without blocking
- Size cache appropriately: Balance disk space and retention
Cache Performance Tips
- Use SSD for cache storage
- Adjust levels parameter based on file count
- Set inactive time to match access patterns
- Use proxy_cache_min_uses to cache only popular content
- Monitor cache size and adjust max_size
- Disable caching for user-specific content
- Use separate caches for different content types
- Enable background updates for frequently accessed content