Skip to content

Intermediate Level Nginx : Part 1

10: Reverse Proxy with Nginx: Concepts and Implementation

What is a Reverse Proxy?

A reverse proxy sits between clients and backend servers, forwarding client requests to the appropriate backend and returning responses to clients.

sequenceDiagram
    participant C as Client
    participant N as Nginx (Reverse Proxy)
    participant B as Backend Server(s)

    C->>N: HTTP Request
    N->>B: proxy_pass
    B-->>N: HTTP Response
    N-->>C: Client Response

Benefits of Reverse Proxy

  1. Load Distribution: Distribute traffic across multiple servers
  2. Security: Hide backend server details
  3. SSL Termination: Handle SSL/TLS encryption at the proxy level
  4. Caching: Cache responses to improve performance
  5. Compression: Compress responses before sending to clients
  6. Centralized Logging: Collect logs in one place
  7. Request Filtering: Block malicious requests

Basic Reverse Proxy Configuration

server {
    listen 80;
    server_name example.com;

    location / {
        proxy_pass http://localhost:3000;
    }
}

Complete Reverse Proxy Setup

server {
    listen 80;
    server_name example.com;

    location / {
        # Backend server
        proxy_pass http://localhost:3000;

        # Preserve client information
        proxy_set_header Host $host;
        proxy_set_header X-Real-IP $remote_addr;
        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
        proxy_set_header X-Forwarded-Proto $scheme;

        # Timeouts
        proxy_connect_timeout 60s;
        proxy_send_timeout 60s;
        proxy_read_timeout 60s;

        # Buffer settings
        proxy_buffering on;
        proxy_buffer_size 4k;
        proxy_buffers 8 4k;
        proxy_busy_buffers_size 8k;
    }
}

Proxying to Different Backend Applications

server {
    listen 80;
    server_name nodeapp.example.com;

    location / {
        proxy_pass http://localhost:3000;
        proxy_http_version 1.1;
        proxy_set_header Upgrade $http_upgrade;
        proxy_set_header Connection 'upgrade';
        proxy_set_header Host $host;
        proxy_cache_bypass $http_upgrade;
    }
}
server {
    listen 80;
    server_name djangoapp.example.com;

    location / {
        proxy_pass http://127.0.0.1:8000;
        proxy_set_header Host $host;
        proxy_set_header X-Real-IP $remote_addr;
        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
        proxy_set_header X-Forwarded-Proto $scheme;
    }

    location /static/ {
        alias /var/www/djangoapp/static/;
    }

    location /media/ {
        alias /var/www/djangoapp/media/;
    }
}
server {
    listen 80;
    server_name phpapp.example.com;
    root /var/www/phpapp;

    index index.php index.html;

    location / {
        try_files $uri $uri/ /index.php?$query_string;
    }

    location ~ \.php$ {
        fastcgi_pass unix:/var/run/php/php8.1-fpm.sock;
        fastcgi_index index.php;
        fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
        include fastcgi_params;
    }
}

Proxying to Multiple Backends

upstream backend_servers {
    server 192.168.1.10:8080;
    server 192.168.1.11:8080;
    server 192.168.1.12:8080;
}

server {
    listen 80;
    server_name example.com;

    location / {
        proxy_pass http://backend_servers;
        proxy_set_header Host $host;
        proxy_set_header X-Real-IP $remote_addr;
        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
    }
}

Path-Based Routing

server {
    listen 80;
    server_name example.com;

    # API requests to backend 1
    location /api/ {
        proxy_pass http://localhost:3000/;
        proxy_set_header Host $host;
    }

    # Admin panel to backend 2
    location /admin/ {
        proxy_pass http://localhost:4000/;
        proxy_set_header Host $host;
    }

    # Static files served directly
    location /static/ {
        root /var/www/html;
    }

    # Everything else to main app
    location / {
        proxy_pass http://localhost:5000;
        proxy_set_header Host $host;
    }
}

Important Proxy Headers Explained

# Original host requested by client
proxy_set_header Host $host;

# Client's real IP address
proxy_set_header X-Real-IP $remote_addr;

# Chain of proxy IPs (including real client)
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;

# Original protocol (http or https)
proxy_set_header X-Forwarded-Proto $scheme;

# Original port
proxy_set_header X-Forwarded-Port $server_port;

# Original host with port
proxy_set_header X-Forwarded-Host $host:$server_port;

Handling WebSocket Connections

map $http_upgrade $connection_upgrade {
    default upgrade;
    '' close;
}

server {
    listen 80;
    server_name websocket.example.com;

    location / {
        proxy_pass http://localhost:3000;
        proxy_http_version 1.1;
        proxy_set_header Upgrade $http_upgrade;
        proxy_set_header Connection $connection_upgrade;
        proxy_set_header Host $host;

        # Increase timeout for long-lived connections
        proxy_read_timeout 3600s;
        proxy_send_timeout 3600s;
    }
}

Proxy Buffering

server {
    listen 80;
    server_name example.com;

    location / {
        proxy_pass http://localhost:3000;

        # Enable buffering (default)
        proxy_buffering on;

        # Buffer size for response headers
        proxy_buffer_size 4k;

        # Number and size of buffers for response body
        proxy_buffers 8 4k;

        # Max buffer size when buffering is on
        proxy_busy_buffers_size 8k;

        # Max size of data buffered on disk
        proxy_max_temp_file_size 1024m;

        # Buffer size for reading from temp file
        proxy_temp_file_write_size 8k;
    }
}

Disable Buffering for Streaming

location /stream {
    proxy_pass http://localhost:3000;

    # Disable buffering for real-time streaming
    proxy_buffering off;
    proxy_cache off;
    proxy_set_header Connection '';
    proxy_http_version 1.1;
    chunked_transfer_encoding off;
}

Error Handling

server {
    listen 80;
    server_name example.com;

    location / {
        proxy_pass http://localhost:3000;

        # Custom error pages
        proxy_intercept_errors on;
        error_page 502 503 504 /50x.html;
    }

    location = /50x.html {
        root /var/www/html;
    }
}

Proxy with Custom Error Pages

server {
    listen 80;
    server_name example.com;

    location / {
        proxy_pass http://backend;
        proxy_next_upstream error timeout http_502 http_503 http_504;
        proxy_next_upstream_tries 3;
        proxy_next_upstream_timeout 10s;
    }
}

Testing Backend Availability

upstream backend {
    server backend1.example.com:8080 max_fails=3 fail_timeout=30s;
    server backend2.example.com:8080 max_fails=3 fail_timeout=30s;
}

server {
    listen 80;
    server_name example.com;

    location / {
        proxy_pass http://backend;
        proxy_next_upstream error timeout http_502;
    }
}

Preserve Original Request URI

# Without trailing slash - appends URI
location /api {
    proxy_pass http://localhost:3000;
    # Request: /api/users → Backend: /api/users
}

# With trailing slash - replaces URI
location /api/ {
    proxy_pass http://localhost:3000/;
    # Request: /api/users → Backend: /users
}

# Specific path replacement
location /old-api/ {
    proxy_pass http://localhost:3000/new-api/;
    # Request: /old-api/users → Backend: /new-api/users
}

Advanced Configuration Example

upstream backend {
    least_conn;
    server backend1.example.com:8080 weight=3;
    server backend2.example.com:8080 weight=2;
    server backend3.example.com:8080 backup;

    keepalive 32;
}

server {
    listen 80;
    server_name example.com;

    access_log /var/log/nginx/proxy-access.log;
    error_log /var/log/nginx/proxy-error.log;

    location / {
        # Proxy to upstream
        proxy_pass http://backend;

        # HTTP version and keepalive
        proxy_http_version 1.1;
        proxy_set_header Connection "";

        # Headers
        proxy_set_header Host $host;
        proxy_set_header X-Real-IP $remote_addr;
        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
        proxy_set_header X-Forwarded-Proto $scheme;

        # Timeouts
        proxy_connect_timeout 30s;
        proxy_send_timeout 30s;
        proxy_read_timeout 30s;

        # Retry logic
        proxy_next_upstream error timeout http_502 http_503;
        proxy_next_upstream_tries 2;

        # Buffer settings
        proxy_buffering on;
        proxy_buffer_size 4k;
        proxy_buffers 8 4k;

        # Hide backend headers
        proxy_hide_header X-Powered-By;
        proxy_hide_header Server;
    }
}

Debugging Proxy Issues

# Enable detailed error logging
error_log /var/log/nginx/debug.log debug;

# Add debug headers (remove in production!)
location / {
    proxy_pass http://backend;
    add_header X-Proxy-Backend $proxy_host;
    add_header X-Proxy-Port $proxy_port;
    add_header X-Proxy-URI $request_uri;
}

Common Proxy Directives Reference

Directive Purpose Default
proxy_pass Backend server URL -
proxy_set_header Set headers sent to backend -
proxy_buffering Enable/disable buffering on
proxy_connect_timeout Timeout for connecting 60s
proxy_send_timeout Timeout for sending request 60s
proxy_read_timeout Timeout for reading response 60s
proxy_next_upstream When to try next server error timeout
proxy_redirect Modify Location headers default
proxy_http_version HTTP version to use 1.0

Best Practices

  1. Always set proper headers: Include Host, X-Real-IP, X-Forwarded-For
  2. Use keepalive connections: Improve performance with HTTP/1.1
  3. Configure appropriate timeouts: Match your application's needs
  4. Handle errors gracefully: Use custom error pages
  5. Monitor backend health: Use max_fails and fail_timeout
  6. Secure headers: Hide sensitive backend information
  7. Test thoroughly: Verify headers reach backend correctly
  8. Use buffering wisely: Disable for streaming, enable for normal traffic

Troubleshooting Common Issues

502 Bad Gateway

# Check backend is running
curl http://localhost:3000

# Check nginx error logs
sudo tail -f /var/log/nginx/error.log

# Verify proxy_pass URL is correct

Headers not passed correctly

# Log headers for debugging
log_format debug '$remote_addr - $remote_user [$time_local] '
                '"$request" $status $body_bytes_sent '
                '"$http_referer" "$http_user_agent" '
                '"$http_x_forwarded_for"';

access_log /var/log/nginx/debug.log debug;

Timeout errors

# Increase timeouts
proxy_connect_timeout 120s;
proxy_send_timeout 120s;
proxy_read_timeout 120s;

11: Load Balancing with Nginx: Methods and Strategies

What is Load Balancing?

Load balancing distributes incoming traffic across multiple backend servers to: - Increase application availability - Improve response times - Prevent server overload - Enable horizontal scaling - Provide redundancy

Basic Load Balancing Setup

# Define backend servers
upstream backend {
    server backend1.example.com;
    server backend2.example.com;
    server backend3.example.com;
}

server {
    listen 80;
    server_name example.com;

    location / {
        proxy_pass http://backend;
        proxy_set_header Host $host;
        proxy_set_header X-Real-IP $remote_addr;
    }
}

Load Balancing Methods

Distributes requests evenly in sequence.

upstream backend {
    # Round robin is default
    server backend1.example.com;
    server backend2.example.com;
    server backend3.example.com;
}

When to use: Equal server capacity, simple setup

Routes to server with fewest active connections.

upstream backend {
    least_conn;

    server backend1.example.com;
    server backend2.example.com;
    server backend3.example.com;
}

When to use: Requests have variable processing times

Same client IP always goes to same server (session persistence).

upstream backend {
    ip_hash;

    server backend1.example.com;
    server backend2.example.com;
    server backend3.example.com;
}

When to use: Sessions stored locally on servers

Hash based on custom key (URL, cookie, header, etc.).

# Hash based on request URI
upstream backend {
    hash $request_uri consistent;

    server backend1.example.com;
    server backend2.example.com;
    server backend3.example.com;
}

# Hash based on custom header
upstream backend {
    hash $http_x_user_id consistent;

    server backend1.example.com;
    server backend2.example.com;
}

When to use: Custom caching strategies, specific routing needs

Routes to server with lowest average response time.

upstream backend {
    least_time header;  # or 'last_byte'

    server backend1.example.com;
    server backend2.example.com;
    server backend3.example.com;
}

When to use: Optimizing for fastest response times

Randomly selects a server.

upstream backend {
    random;

    server backend1.example.com;
    server backend2.example.com;
    server backend3.example.com;
}

# Random with least connections consideration
upstream backend {
    random two least_conn;

    server backend1.example.com;
    server backend2.example.com;
    server backend3.example.com;
}

When to use: Simple distribution without state

Server Weights

Assign different weights to servers with different capacities.

upstream backend {
    server backend1.example.com weight=3;  # Gets 3x traffic
    server backend2.example.com weight=2;  # Gets 2x traffic
    server backend3.example.com weight=1;  # Gets 1x traffic (default)
}

Distribution: Out of 6 requests: backend1=3, backend2=2, backend3=1

Server Parameters

upstream backend {
    server backend1.example.com weight=3 max_fails=3 fail_timeout=30s;
    server backend2.example.com weight=2 max_conns=100;
    server backend3.example.com backup;
    server backend4.example.com down;
}

Parameters:

  • weight=N: Server weight for load distribution (default: 1)
  • max_fails=N: Failed attempts before marking unavailable (default: 1)
  • fail_timeout=time: Time server is unavailable after max_fails (default: 10s)
  • max_conns=N: Maximum concurrent connections to server
  • backup: Only used when primary servers are unavailable
  • down: Permanently mark server as unavailable

Health Checks (Passive)

Nginx monitors backend health based on actual traffic.

upstream backend {
    server backend1.example.com max_fails=3 fail_timeout=30s;
    server backend2.example.com max_fails=3 fail_timeout=30s;
    server backend3.example.com max_fails=3 fail_timeout=30s;
}

server {
    listen 80;
    server_name example.com;

    location / {
        proxy_pass http://backend;

        # Try next server on these errors
        proxy_next_upstream error timeout http_502 http_503 http_504;
        proxy_next_upstream_tries 2;
        proxy_next_upstream_timeout 5s;
    }
}

Active Health Checks (Nginx Plus)

upstream backend {
    zone backend 64k;

    server backend1.example.com;
    server backend2.example.com;
    server backend3.example.com;
}

server {
    listen 80;
    server_name example.com;

    location / {
        proxy_pass http://backend;
        health_check interval=5s fails=3 passes=2 uri=/health;
    }
}

Session Persistence (Sticky Sessions)

upstream backend {
    ip_hash;

    server backend1.example.com;
    server backend2.example.com;
    server backend3.example.com;
}
upstream backend {
    server backend1.example.com;
    server backend2.example.com;

    sticky cookie srv_id expires=1h domain=.example.com path=/;
}
upstream backend {
    hash $cookie_jsessionid consistent;

    server backend1.example.com;
    server backend2.example.com;
    server backend3.example.com;
}

Connection Keepalive

Improve performance with persistent connections to backends.

upstream backend {
    server backend1.example.com;
    server backend2.example.com;

    # Keep 32 idle connections open
    keepalive 32;

    # Keepalive requests per connection
    keepalive_requests 100;

    # Keepalive timeout
    keepalive_timeout 60s;
}

server {
    listen 80;
    server_name example.com;

    location / {
        proxy_pass http://backend;

        # Required for keepalive
        proxy_http_version 1.1;
        proxy_set_header Connection "";
    }
}

Complete Load Balancing Example

# Backend servers with health monitoring
upstream app_servers {
    least_conn;

    # Primary servers with health checks
    server app1.example.com:8080 weight=3 max_fails=3 fail_timeout=30s;
    server app2.example.com:8080 weight=3 max_fails=3 fail_timeout=30s;
    server app3.example.com:8080 weight=2 max_fails=3 fail_timeout=30s;

    # Backup server
    server app4.example.com:8080 backup;

    # Connection pooling
    keepalive 32;
    keepalive_requests 100;
    keepalive_timeout 60s;
}

server {
    listen 80;
    server_name example.com;

    access_log /var/log/nginx/lb-access.log;
    error_log /var/log/nginx/lb-error.log;

    location / {
        # Load balance to app servers
        proxy_pass http://app_servers;

        # Headers
        proxy_set_header Host $host;
        proxy_set_header X-Real-IP $remote_addr;
        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
        proxy_set_header X-Forwarded-Proto $scheme;

        # Keepalive
        proxy_http_version 1.1;
        proxy_set_header Connection "";

        # Timeouts
        proxy_connect_timeout 5s;
        proxy_send_timeout 10s;
        proxy_read_timeout 10s;

        # Retry logic
        proxy_next_upstream error timeout http_502 http_503 http_504;
        proxy_next_upstream_tries 2;
        proxy_next_upstream_timeout 5s;

        # Buffer settings
        proxy_buffering on;
        proxy_buffer_size 4k;
        proxy_buffers 8 4k;
    }

    # Health check endpoint (don't proxy)
    location /nginx-health {
        access_log off;
        return 200 "healthy\n";
        add_header Content-Type text/plain;
    }
}

Multi-tier Load Balancing

# Application servers
upstream app_backend {
    least_conn;
    server app1.example.com:3000;
    server app2.example.com:3000;
    keepalive 16;
}

# API servers
upstream api_backend {
    ip_hash;
    server api1.example.com:4000;
    server api2.example.com:4000;
    keepalive 16;
}

# Static content servers
upstream static_backend {
    server static1.example.com:8080;
    server static2.example.com:8080;
    keepalive 16;
}

server {
    listen 80;
    server_name example.com;

    # Route API requests
    location /api/ {
        proxy_pass http://api_backend;
        proxy_http_version 1.1;
        proxy_set_header Connection "";
    }

    # Route static content
    location ~* \.(jpg|jpeg|png|gif|css|js)$ {
        proxy_pass http://static_backend;
        proxy_http_version 1.1;
        proxy_set_header Connection "";
        expires 30d;
    }

    # Route app requests
    location / {
        proxy_pass http://app_backend;
        proxy_http_version 1.1;
        proxy_set_header Connection "";
    }
}

Geographic Load Balancing

# US servers
upstream us_backend {
    server us1.example.com;
    server us2.example.com;
}

# EU servers
upstream eu_backend {
    server eu1.example.com;
    server eu2.example.com;
}

# Asia servers
upstream asia_backend {
    server asia1.example.com;
    server asia2.example.com;
}

# Route based on GeoIP
geo $closest_backend {
    default us_backend;

    # European IP ranges
    2.0.0.0/8 eu_backend;
    5.0.0.0/8 eu_backend;

    # Asian IP ranges
    1.0.0.0/8 asia_backend;
    14.0.0.0/8 asia_backend;
}

server {
    listen 80;
    server_name example.com;

    location / {
        proxy_pass http://$closest_backend;
        proxy_set_header Host $host;
    }
}

Load Balancing with SSL

upstream backend {
    server backend1.example.com:443;
    server backend2.example.com:443;
    keepalive 16;
}

server {
    listen 443 ssl http2;
    server_name example.com;

    ssl_certificate /etc/nginx/ssl/cert.pem;
    ssl_certificate_key /etc/nginx/ssl/key.pem;

    location / {
        proxy_pass https://backend;

        # SSL settings for backend
        proxy_ssl_verify off;  # or 'on' with proper CA
        proxy_ssl_server_name on;
        proxy_ssl_session_reuse on;

        # Standard proxy headers
        proxy_http_version 1.1;
        proxy_set_header Connection "";
        proxy_set_header Host $host;
        proxy_set_header X-Real-IP $remote_addr;
        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
        proxy_set_header X-Forwarded-Proto $scheme;
    }
}

Monitoring and Debugging

Log Backend Server Used

log_format upstream '$remote_addr - $remote_user [$time_local] '
                    '"$request" $status $body_bytes_sent '
                    '"$http_referer" "$http_user_agent" '
                    'upstream: $upstream_addr '
                    'upstream_status: $upstream_status '
                    'request_time: $request_time '
                    'upstream_response_time: $upstream_response_time';

access_log /var/log/nginx/upstream.log upstream;

Status Page (Nginx Plus)

server {
    listen 8080;

    location /status {
        status;
    }

    location /api {
        api write=on;
    }
}

Custom Health Check Endpoint

server {
    listen 8080;

    location /lb-status {
        access_log off;

        # Return custom JSON status
        default_type application/json;
        return 200 '{"status":"ok","timestamp":"$date_gmt"}';
    }
}

Testing Load Balancing

# Test multiple requests
for i in {1..10}; do
    curl -s http://example.com | grep "Server:"
done

# With headers to see backend
curl -H "X-Debug: true" http://example.com

# Test with Apache Bench
ab -n 1000 -c 10 http://example.com/

# Test with wrk
wrk -t4 -c100 -d30s http://example.com/

Comparison of Load Balancing Methods

Method Use Case Session Persistence Complexity
Round Robin Equal servers, simple distribution No Low
Least Connections Variable request durations No Low
IP Hash Session persistence needed Yes Low
Generic Hash Custom routing logic Configurable Medium
Weighted Servers with different capacities Depends Low
Least Time Performance optimization No Medium

Best Practices

  1. Match method to use case: Choose appropriate algorithm
  2. Configure health checks: Enable max_fails and fail_timeout
  3. Use keepalive: Reduce connection overhead
  4. Monitor performance: Log upstream response times
  5. Plan for failures: Use backup servers
  6. Test thoroughly: Verify distribution and failover
  7. Consider sessions: Use appropriate persistence method
  8. Scale horizontally: Add servers as needed
  9. Use weights wisely: Match server capabilities
  10. Document configuration: Maintain clear comments

Troubleshooting

Issue: Uneven distribution

# Check access logs for distribution
awk '{print $11}' /var/log/nginx/upstream.log | sort | uniq -c

# Verify weights are correct
# Check for sticky sessions (ip_hash)

Issue: Server marked as down

# Check error logs
tail -f /var/log/nginx/error.log

# Verify backend is running
curl http://backend-server:port

# Adjust max_fails and fail_timeout

Issue: Poor performance

# Enable keepalive connections
# Increase keepalive connections count
# Check upstream_response_time in logs
# Verify server capacity


12: Setting Up SSL/TLS Certificates with Let's Encrypt and Nginx

What is SSL/TLS?

SSL/TLS encrypts data between clients and servers, providing: - Encryption: Protects data in transit - Authentication: Verifies server identity - Integrity: Prevents data tampering - Trust: Required for modern browsers

What is Let's Encrypt?

Let's Encrypt is a free, automated Certificate Authority (CA) that provides SSL/TLS certificates. Benefits: - Free certificates - Automated renewal - Trusted by all major browsers - Easy to use

Prerequisites

# Update system
sudo apt update && sudo apt upgrade -y

# Install Certbot
sudo apt install certbot python3-certbot-nginx -y

# Verify Nginx is running
sudo systemctl status nginx

# Verify domain points to your server
nslookup yourdomain.com

Method 1: Automatic Configuration with Certbot

This is the easiest method - Certbot automatically configures Nginx.

Step 1: Basic Nginx Configuration

# /etc/nginx/sites-available/yourdomain.com
server {
    listen 80;
    listen [::]:80;

    server_name yourdomain.com www.yourdomain.com;

    root /var/www/yourdomain.com;
    index index.html;

    location / {
        try_files $uri $uri/ =404;
    }
}

Enable the site:

sudo ln -s /etc/nginx/sites-available/yourdomain.com /etc/nginx/sites-enabled/
sudo nginx -t
sudo systemctl reload nginx

Step 2: Obtain Certificate

# Obtain and install certificate
sudo certbot --nginx -d yourdomain.com -d www.yourdomain.com

# Follow the prompts:
# 1. Enter email address
# 2. Agree to Terms of Service
# 3. Choose whether to redirect HTTP to HTTPS (recommended: Yes)

Certbot will automatically: - Obtain the certificate - Modify Nginx configuration - Set up HTTPS - Configure HTTP to HTTPS redirect

Step 3: Verify Configuration

# Test Nginx configuration
sudo nginx -t

# View the modified configuration
sudo cat /etc/nginx/sites-available/yourdomain.com

# Test SSL
curl -I https://yourdomain.com

Step 4: Test Automatic Renewal

# Dry run renewal
sudo certbot renew --dry-run

# Check renewal timer (systemd)
sudo systemctl status certbot.timer

# Manual renewal (if needed)
sudo certbot renew

Method 2: Manual Configuration with Certbot

For more control over the configuration.

Step 1: Obtain Certificate Only

# Get certificate without modifying Nginx
sudo certbot certonly --nginx -d yourdomain.com -d www.yourdomain.com

# Or use webroot method
sudo certbot certonly --webroot -w /var/www/yourdomain.com -d yourdomain.com -d www.yourdomain.com

Certificates are saved to: - Certificate: /etc/letsencrypt/live/yourdomain.com/fullchain.pem - Private Key: /etc/letsencrypt/live/yourdomain.com/privkey.pem

Step 2: Configure Nginx Manually

# /etc/nginx/sites-available/yourdomain.com

# HTTP - Redirect to HTTPS
server {
    listen 80;
    listen [::]:80;

    server_name yourdomain.com www.yourdomain.com;

    # Let's Encrypt validation
    location ^~ /.well-known/acme-challenge/ {
        root /var/www/yourdomain.com;
    }

    # Redirect all other traffic to HTTPS
    location / {
        return 301 https://$server_name$request_uri;
    }
}

# HTTPS
server {
    listen 443 ssl http2;
    listen [::]:443 ssl http2;

    server_name yourdomain.com www.yourdomain.com;

    root /var/www/yourdomain.com;
    index index.html;

    # SSL Certificates
    ssl_certificate /etc/letsencrypt/live/yourdomain.com/fullchain.pem;
    ssl_certificate_key /etc/letsencrypt/live/yourdomain.com/privkey.pem;

    # SSL Configuration
    ssl_protocols TLSv1.2 TLSv1.3;
    ssl_prefer_server_ciphers on;
    ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384;

    # SSL Session
    ssl_session_cache shared:SSL:10m;
    ssl_session_timeout 10m;
    ssl_session_tickets off;

    # OCSP Stapling
    ssl_stapling on;
    ssl_stapling_verify on;
    ssl_trusted_certificate /etc/letsencrypt/live/yourdomain.com/chain.pem;
    resolver 8.8.8.8 8.8.4.4 valid=300s;
    resolver_timeout 5s;

    # Security Headers
    add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload" always;
    add_header X-Frame-Options "SAMEORIGIN" always;
    add_header X-Content-Type-Options "nosniff" always;
    add_header X-XSS-Protection "1; mode=block" always;

    location / {
        try_files $uri $uri/ =404;
    }
}

Step 3: Apply Configuration

# Test configuration
sudo nginx -t

# Reload Nginx
sudo systemctl reload nginx

Optimized SSL Configuration

Create a reusable SSL configuration file:

/etc/nginx/snippets/ssl-params.conf
# SSL Protocols and Ciphers
ssl_protocols TLSv1.2 TLSv1.3;
ssl_prefer_server_ciphers on;
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384;

# SSL Session
ssl_session_cache shared:SSL:50m;
ssl_session_timeout 1d;
ssl_session_tickets off;

# Diffie-Hellman parameter
ssl_dhparam /etc/nginx/dhparam.pem;

# OCSP Stapling
ssl_stapling on;
ssl_stapling_verify on;
resolver 8.8.8.8 8.8.4.4 valid=300s;
resolver_timeout 5s;

# Security Headers
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload" always;
add_header X-Frame-Options "SAMEORIGIN" always;
add_header X-Content-Type-Options "nosniff" always;
add_header X-XSS-Protection "1; mode=block" always;

Generate Diffie-Hellman parameters:

sudo openssl dhparam -out /etc/nginx/dhparam.pem 2048

Use in your server block:

server {
    listen 443 ssl http2;
    server_name yourdomain.com;

    ssl_certificate /etc/letsencrypt/live/yourdomain.com/fullchain.pem;
    ssl_certificate_key /etc/letsencrypt/live/yourdomain.com/privkey.pem;
    ssl_trusted_certificate /etc/letsencrypt/live/yourdomain.com/chain.pem;

    # Include SSL parameters
    include snippets/ssl-params.conf;

    # ... rest of configuration
}

Multiple Domains/Subdomains

# Single certificate for multiple domains
sudo certbot --nginx -d example.com -d www.example.com -d api.example.com -d admin.example.com

# Separate certificates for different domains
sudo certbot --nginx -d domain1.com -d www.domain1.com
sudo certbot --nginx -d domain2.com -d www.domain2.com

Wildcard Certificates

Requires DNS validation:

# Obtain wildcard certificate
sudo certbot certonly --manual --preferred-challenges=dns -d example.com -d *.example.com

# Follow instructions to add DNS TXT record
# After verification, configure Nginx manually
server {
    listen 443 ssl http2;
    server_name *.example.com example.com;

    ssl_certificate /etc/letsencrypt/live/example.com/fullchain.pem;
    ssl_certificate_key /etc/letsencrypt/live/example.com/privkey.pem;

    # ... rest of configuration
}

SSL for Reverse Proxy

server {
    listen 443 ssl http2;
    server_name api.example.com;

    ssl_certificate /etc/letsencrypt/live/api.example.com/fullchain.pem;
    ssl_certificate_key /etc/letsencrypt/live/api.example.com/privkey.pem;
    include snippets/ssl-params.conf;

    location / {
        proxy_pass http://localhost:3000;
        proxy_set_header Host $host;
        proxy_set_header X-Real-IP $remote_addr;
        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
        proxy_set_header X-Forwarded-Proto $scheme;
    }
}

Automatic Renewal Configuration

Certbot automatically creates a renewal timer. Verify:

# Check timer status
sudo systemctl status certbot.timer

# List scheduled renewals
sudo certbot certificates

# View renewal configuration
sudo cat /etc/letsencrypt/renewal/yourdomain.com.conf

Create a renewal hook to reload Nginx:

# Create post-renewal hook
sudo nano /etc/letsencrypt/renewal-hooks/post/reload-nginx.sh
#!/bin/bash
systemctl reload nginx
# Make executable
sudo chmod +x /etc/letsencrypt/renewal-hooks/post/reload-nginx.sh

Testing SSL Configuration

Online Tools

https://www.ssllabs.com/ssltest/
https://www.sslchecker.com/
https://www.immuniweb.com/ssl/

Command Line

# Check certificate details
openssl s_client -connect yourdomain.com:443 -servername yourdomain.com

# Check certificate expiration
echo | openssl s_client -servername yourdomain.com -connect yourdomain.com:443 2>/dev/null | openssl x509 -noout -dates

# Test SSL protocols
nmap --script ssl-enum-ciphers -p 443 yourdomain.com

# Curl with verbose SSL info
curl -vI https://yourdomain.com

Troubleshooting

Issue: Certificate not found

# List certificates
sudo certbot certificates

# Verify certificate files exist
ls -la /etc/letsencrypt/live/yourdomain.com/

# Check Nginx configuration paths
sudo nginx -T | grep ssl_certificate

Issue: Renewal fails

# Check renewal logs
sudo cat /var/log/letsencrypt/letsencrypt.log

# Test renewal manually
sudo certbot renew --dry-run --verbose

# Check webroot is accessible
curl http://yourdomain.com/.well-known/acme-challenge/test

Issue: Mixed content warnings

# Ensure all resources use HTTPS
# Add to server block:
add_header Content-Security-Policy "upgrade-insecure-requests";

Certificate Management Commands

# List all certificates
sudo certbot certificates

# Renew specific certificate
sudo certbot renew --cert-name yourdomain.com

# Renew all certificates
sudo certbot renew

# Revoke certificate
sudo certbot revoke --cert-name yourdomain.com

# Delete certificate
sudo certbot delete --cert-name yourdomain.com

# Expand certificate (add domains)
sudo certbot --nginx -d yourdomain.com -d www.yourdomain.com -d new.yourdomain.com --expand

Complete Example: Production Setup

# HTTP - Redirect to HTTPS
server {
    listen 80;
    listen [::]:80;
    server_name example.com www.example.com;

    location ^~ /.well-known/acme-challenge/ {
        root /var/www/example.com;
    }

    location / {
        return 301 https://example.com$request_uri;
    }
}

# HTTPS - www redirect
server {
    listen 443 ssl http2;
    listen [::]:443 ssl http2;
    server_name www.example.com;

    ssl_certificate /etc/letsencrypt/live/example.com/fullchain.pem;
    ssl_certificate_key /etc/letsencrypt/live/example.com/privkey.pem;
    ssl_trusted_certificate /etc/letsencrypt/live/example.com/chain.pem;
    include snippets/ssl-params.conf;

    return 301 https://example.com$request_uri;
}

# HTTPS - Main site
server {
    listen 443 ssl http2;
    listen [::]:443 ssl http2;
    server_name example.com;

    root /var/www/example.com;
    index index.html;

    # SSL Configuration
    ssl_certificate /etc/letsencrypt/live/example.com/fullchain.pem;
    ssl_certificate_key /etc/letsencrypt/live/example.com/privkey.pem;
    ssl_trusted_certificate /etc/letsencrypt/live/example.com/chain.pem;
    include snippets/ssl-params.conf;

    # Logging
    access_log /var/log/nginx/example.com.access.log;
    error_log /var/log/nginx/example.com.error.log;

    location / {
        try_files $uri $uri/ =404;
    }
}

Best Practices

  1. Always redirect HTTP to HTTPS
  2. Use HTTP/2 for better performance
  3. Enable HSTS with reasonable max-age
  4. Implement OCSP stapling for performance
  5. Use strong ciphers and protocols (TLS 1.2+)
  6. Test with SSL Labs to verify configuration
  7. Monitor expiration dates (though auto-renewal handles this)
  8. Keep Certbot updated: sudo apt update && sudo apt upgrade certbot
  9. Test renewal regularly: sudo certbot renew --dry-run
  10. Use security headers (CSP, X-Frame-Options, etc.)

13: URL Rewriting and Redirects in Nginx

Understanding the Difference

Redirects: Server tells client to request a different URL - Client sees new URL in browser - Makes a new HTTP request - SEO impact (301 vs 302)

Rewrites: Server processes request internally - Client URL stays the same - No additional HTTP request - Transparent to user

HTTP Redirect Status Codes

  • 301: Permanent redirect (SEO-friendly, cached by browsers)
  • 302: Temporary redirect (not cached, can change)
  • 303: See Other (POST → GET redirect)
  • 307: Temporary redirect (preserves HTTP method)
  • 308: Permanent redirect (preserves HTTP method)

Basic Redirects

Simple Redirect

server {
    listen 80;
    server_name example.com;

    # Redirect single page
    location /old-page {
        return 301 /new-page;
    }
}

Redirect Entire Domain

server {
    listen 80;
    server_name old-domain.com;

    # Redirect to new domain, preserving path
    return 301 https://new-domain.com$request_uri;
}

Redirect www to non-www

server {
    listen 80;
    listen 443 ssl http2;
    server_name www.example.com;

    return 301 https://example.com$request_uri;
}

server {
    listen 80;
    listen 443 ssl http2;
    server_name example.com;

    # Main site configuration
}

Redirect non-www to www

server {
    listen 80;
    listen 443 ssl http2;
    server_name example.com;

    return 301 https://www.example.com$request_uri;
}

server {
    listen 80;
    listen 443 ssl http2;
    server_name www.example.com;

    # Main site configuration
}

HTTP to HTTPS Redirect

server {
    listen 80;
    server_name example.com www.example.com;

    return 301 https://example.com$request_uri;
}

server {
    listen 443 ssl http2;
    server_name example.com www.example.com;

    # HTTPS configuration
}

Using return vs rewrite for Redirects

return (Preferred - More Efficient)

location /old-page {
    return 301 /new-page;
}

location /blog {
    return 301 https://blog.example.com$request_uri;
}

rewrite (More Flexible, Regex Support)

location /old-page {
    rewrite ^ /new-page permanent;
}

location /blog {
    rewrite ^/blog(.*)$ https://blog.example.com$1 permanent;
}

Syntax: rewrite regex replacement [flag];

Flags: - permanent: 301 redirect - redirect: 302 redirect - break: Stop processing, use rewritten URI - last: Stop processing rewrites, restart location matching

Basic Rewrites

Simple Rewrite

server {
    listen 80;
    server_name example.com;

    # Rewrite /user/123 to /profile?id=123
    location /user {
        rewrite ^/user/(.*)$ /profile?id=$1 last;
    }
}

Multiple Rewrites

server {
    listen 80;
    server_name example.com;

    # Multiple rewrite rules
    rewrite ^/old-url1$ /new-url1 permanent;
    rewrite ^/old-url2$ /new-url2 permanent;
    rewrite ^/posts/(.*)$ /blog/$1 permanent;
}

Advanced Rewrite Examples

Remove File Extensions

# Remove .html extension
location / {
    try_files $uri $uri.html $uri/ =404;
}

# Or using rewrite
location / {
    if (!-e $request_filename) {
        rewrite ^/(.*)$ /$1.html last;
    }
}

Add Trailing Slash

# Add trailing slash to directories
rewrite ^([^.]*[^/])$ $1/ permanent;

# Or using try_files
location / {
    try_files $uri $uri/ =404;
}

Remove Trailing Slash

# Remove trailing slash
rewrite ^/(.*)/$ /$1 permanent;

Clean URLs (Remove index.php)

# Laravel/PHP framework style
location / {
    try_files $uri $uri/ /index.php?$query_string;
}

# WordPress style
location / {
    try_files $uri $uri/ /index.php?$args;
}

Category/Product URL Rewrite

# /category/product-name to /product.php?id=...
location /category {
    rewrite ^/category/([a-z-]+)$ /product.php?slug=$1 last;
}

# /blog/2026/03/post-title to /blog.php?year=2026&month=03&slug=post-title
location /blog {
    rewrite ^/blog/([0-9]{4})/([0-9]{2})/([a-z-]+)$ /blog.php?year=$1&month=$2&slug=$3 last;
}

Conditional Redirects

Redirect Based on User Agent

# Redirect mobile users
if ($http_user_agent ~* (mobile|android|iphone|ipad)) {
    return 301 https://m.example.com$request_uri;
}

Redirect Based on Country (GeoIP)

# Redirect based on country
geo $country_redirect {
    default 0;
    US 0;
    GB 1;
    FR 1;
    DE 1;
}

server {
    listen 80;
    server_name example.com;

    if ($country_redirect) {
        return 301 https://eu.example.com$request_uri;
    }
}

Redirect Based on Referer

# Redirect if coming from specific site
if ($http_referer ~* "spamsite.com") {
    return 403;
}

try_files Directive

Most powerful and efficient way to handle rewrites.

# Syntax
try_files file1 file2 ... fallback;

Common Patterns

# Try file, then directory, then 404
location / {
    try_files $uri $uri/ =404;
}

# Try file, then directory, then index.php
location / {
    try_files $uri $uri/ /index.php?$query_string;
}

# Serve static files, proxy to backend if not found
location / {
    try_files $uri $uri/ @backend;
}

location @backend {
    proxy_pass http://localhost:3000;
}

Static File Optimization

location / {
    # Try static file first, then backend
    try_files $uri @backend;
}

location @backend {
    proxy_pass http://app_server;
    proxy_set_header Host $host;
}

# Explicitly handle static assets
location ~* \.(jpg|jpeg|png|gif|ico|css|js)$ {
    expires 30d;
    add_header Cache-Control "public, immutable";
}

Regex in Rewrites

Regex Basics

# ^ - Start of string
# $ - End of string
# . - Any character
# * - Zero or more of previous
# + - One or more of previous
# ? - Zero or one of previous
# [] - Character class
# () - Capture group
# | - OR operator
# ~* - Case-insensitive match
# ~ - Case-sensitive match

Regex Examples

# Match any digits
rewrite ^/user/([0-9]+)$ /profile?id=$1 last;

# Match letters and hyphens
rewrite ^/blog/([a-z-]+)$ /post.php?slug=$1 last;

# Match specific patterns
rewrite ^/(products|services)/(.*)$ /catalog.php?type=$1&item=$2 last;

# Case-insensitive match
location ~* \.(jpg|jpeg|png|gif)$ {
    expires 30d;
}

Named Captures (Nginx 1.11.0+)

# Named capture groups
location ~ ^/(?<category>products|services)/(?<item>.+)$ {
    rewrite ^ /catalog.php?type=$category&item=$item last;
}

# Another example
location ~ ^/user/(?<username>[a-z0-9_-]+)$ {
    try_files /profiles/$username.html @backend;
}

map Directive for Complex Logic

# Map old URLs to new ones
map $request_uri $new_uri {
    /old-page-1   /new-page-1;
    /old-page-2   /new-page-2;
    /old-blog     /blog;
    default       "";
}

server {
    listen 80;
    server_name example.com;

    if ($new_uri != "") {
        return 301 $new_uri;
    }
}

Map with Regex

map $request_uri $redirect_uri {
    ~^/old-category/(.*)$  /new-category/$1;
    ~^/blog/(\d{4})/(.*)$  /archive/$1/$2;
    default                "";
}

server {
    if ($redirect_uri != "") {
        return 301 $redirect_uri;
    }
}

WordPress-Style Rewrites

server {
    listen 80;
    server_name example.com;
    root /var/www/wordpress;
    index index.php;

    location / {
        try_files $uri $uri/ /index.php?$args;
    }

    location ~ \.php$ {
        fastcgi_pass unix:/var/run/php/php8.1-fpm.sock;
        fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
        include fastcgi_params;
    }

    # Deny access to sensitive files
    location ~ /\. {
        deny all;
    }

    location ~* /(?:uploads|files)/.*\.php$ {
        deny all;
    }
}

Laravel-Style Rewrites

server {
    listen 80;
    server_name example.com;
    root /var/www/laravel/public;
    index index.php;

    location / {
        try_files $uri $uri/ /index.php?$query_string;
    }

    location ~ \.php$ {
        fastcgi_pass unix:/var/run/php/php8.1-fpm.sock;
        fastcgi_param SCRIPT_FILENAME $realpath_root$fastcgi_script_name;
        include fastcgi_params;
    }

    location ~ /\.(?!well-known).* {
        deny all;
    }
}

Maintenance Mode Redirect

server {
    listen 80;
    server_name example.com;

    set $maintenance 0;

    if (-f $document_root/maintenance.html) {
        set $maintenance 1;
    }

    if ($remote_addr = "1.2.3.4") {
        set $maintenance 0;
    }

    if ($maintenance = 1) {
        return 503;
    }

    error_page 503 @maintenance;

    location @maintenance {
        rewrite ^(.*)$ /maintenance.html break;
    }
}

Bulk Redirects from File

# Create redirect map
# /etc/nginx/conf.d/redirects.map
map $request_uri $redirect_destination {
    include /etc/nginx/redirects.txt;
}

server {
    listen 80;
    server_name example.com;

    if ($redirect_destination) {
        return 301 $redirect_destination;
    }
}
/etc/nginx/redirects.txt
/old-url-1  /new-url-1;
/old-url-2  /new-url-2;
/old-url-3  /new-url-3;

Testing Redirects and Rewrites

# Test redirect with curl (don't follow)
curl -I http://example.com/old-page

# Follow redirects
curl -L http://example.com/old-page

# Verbose output
curl -v http://example.com/old-page

# Test specific headers
curl -H "User-Agent: Mobile" http://example.com

# Test rewrite
curl http://example.com/user/123

Common Pitfalls and Best Practices

1. Avoid if When Possible

# BAD - if is evil
if ($request_uri = "/old-page") {
    return 301 /new-page;
}

# GOOD - use location
location = /old-page {
    return 301 /new-page;
}

2. Use return for Simple Redirects

# GOOD - return is faster
location /old {
    return 301 /new;
}

# Less efficient - rewrite
location /old {
    rewrite ^ /new permanent;
}

3. Order Matters

# Specific rules before general ones
location = /exact-match {
    return 301 /new-exact;
}

location ^~ /prefix {
    return 301 /new-prefix;
}

location ~ \.php$ {
    # PHP processing
}

location / {
    # General rules
}

4. Preserve Query Strings

# Preserve query strings
return 301 /new-page$is_args$args;

# Or
return 301 /new-page?$query_string;

5. Test Thoroughly

# Always test configuration
sudo nginx -t

# Check access logs for verification
tail -f /var/log/nginx/access.log

Complete Redirect/Rewrite Example

# Redirect map for bulk redirects
map $request_uri $redirect_uri {
    /old-about      /about-us;
    /old-contact    /contact;
    /old-blog       /blog;
}

server {
    listen 80;
    server_name example.com www.example.com;

    # Redirect www to non-www
    if ($host = www.example.com) {
        return 301 https://example.com$request_uri;
    }

    # Redirect HTTP to HTTPS
    return 301 https://example.com$request_uri;
}

server {
    listen 443 ssl http2;
    server_name www.example.com;

    # Redirect www to non-www
    return 301 https://example.com$request_uri;
}

server {
    listen 443 ssl http2;
    server_name example.com;

    root /var/www/example.com;
    index index.html index.php;

    # SSL configuration
    ssl_certificate /etc/letsencrypt/live/example.com/fullchain.pem;
    ssl_certificate_key /etc/letsencrypt/live/example.com/privkey.pem;

    # Bulk redirects from map
    if ($redirect_uri) {
        return 301 $redirect_uri;
    }

    # Specific page redirects
    location = /old-page {
        return 301 /new-page;
    }

    # Redirect old blog structure
    location ~ ^/blog/(\d{4})/(\d{2})/(.*)$ {
        return 301 /blog/$3;
    }

    # Remove trailing slashes
    rewrite ^/(.*)/$ /$1 permanent;

    # Clean URLs (remove .html)
    location / {
        try_files $uri $uri.html $uri/ @backend;
    }

    # Backend fallback
    location @backend {
        proxy_pass http://localhost:3000;
        proxy_set_header Host $host;
    }

    # PHP processing
    location ~ \.php$ {
        try_files $uri =404;
        fastcgi_pass unix:/var/run/php/php8.1-fpm.sock;
        fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
        include fastcgi_params;
    }
}

14: Nginx as an API Gateway

What is an API Gateway?

An API Gateway is a server that acts as a single entry point for multiple backend services. It handles: - Request routing - Load balancing - Authentication/Authorization - Rate limiting - Request/response transformation - Caching - Logging and monitoring - SSL termination

Benefits of Using Nginx as API Gateway

  1. High Performance: Handle thousands of concurrent connections
  2. Low Resource Usage: Efficient async architecture
  3. Flexibility: Powerful routing and rewriting capabilities
  4. Security: Centralized authentication and rate limiting
  5. Observability: Centralized logging and monitoring
  6. Cost-Effective: Open-source solution

Basic API Gateway Setup

# Microservices backend definitions
upstream user_service {
    server localhost:3001;
}

upstream product_service {
    server localhost:3002;
}

upstream order_service {
    server localhost:3003;
}

server {
    listen 80;
    server_name api.example.com;

    # Route to user service
    location /api/users {
        proxy_pass http://user_service;
        proxy_set_header Host $host;
        proxy_set_header X-Real-IP $remote_addr;
    }

    # Route to product service
    location /api/products {
        proxy_pass http://product_service;
        proxy_set_header Host $host;
        proxy_set_header X-Real-IP $remote_addr;
    }

    # Route to order service
    location /api/orders {
        proxy_pass http://order_service;
        proxy_set_header Host $host;
        proxy_set_header X-Real-IP $remote_addr;
    }
}

Advanced Routing Patterns

Path-Based Routing with Stripping

# Strip /api/v1/users prefix before proxying
location /api/v1/users/ {
    rewrite ^/api/v1/users/(.*)$ /$1 break;
    proxy_pass http://user_service;
    proxy_set_header Host $host;
}

# Or using proxy_pass with trailing slash
location /api/v1/users/ {
    proxy_pass http://user_service/;  # Trailing slash strips prefix
    proxy_set_header Host $host;
}

Versioned APIs

# API v1
location /api/v1/ {
    proxy_pass http://api_v1_backend/;
    proxy_set_header Host $host;
}

# API v2
location /api/v2/ {
    proxy_pass http://api_v2_backend/;
    proxy_set_header Host $host;
}

# Legacy support - redirect v1 to v2
location /api/v1/deprecated-endpoint {
    return 301 /api/v2/new-endpoint$is_args$args;
}

Method-Based Routing

# Different backends for different methods
location /api/data {
    # Read operations to read replicas
    if ($request_method = GET) {
        proxy_pass http://read_service;
    }

    # Write operations to primary
    if ($request_method ~ ^(POST|PUT|DELETE)$) {
        proxy_pass http://write_service;
    }

    proxy_set_header Host $host;
}

Authentication and Authorization

API Key Authentication

map $http_x_api_key $api_client_name {
    default                 "";
    "key123abc"            "client1";
    "key456def"            "client2";
    "key789ghi"            "client3";
}

server {
    listen 80;
    server_name api.example.com;

    location /api/ {
        # Validate API key
        if ($api_client_name = "") {
            return 401 '{"error":"Invalid API Key"}';
        }

        # Pass client name to backend
        proxy_set_header X-Client-Name $api_client_name;
        proxy_pass http://backend;
    }
}

JWT Token Validation (with lua)

Requires nginx-module-lua or OpenResty.

location /api/protected {
    access_by_lua_block {
        local jwt = require "resty.jwt"
        local jwt_token = ngx.var.http_authorization

        if not jwt_token then
            ngx.status = 401
            ngx.say('{"error":"Missing token"}')
            ngx.exit(401)
        end

        -- Remove "Bearer " prefix
        jwt_token = string.gsub(jwt_token, "Bearer ", "")

        -- Verify token
        local jwt_obj = jwt:verify("your-secret-key", jwt_token)
        if not jwt_obj.verified then
            ngx.status = 401
            ngx.say('{"error":"Invalid token"}')
            ngx.exit(401)
        end

        -- Pass user info to backend
        ngx.req.set_header("X-User-ID", jwt_obj.payload.sub)
    }

    proxy_pass http://backend;
}

Basic Auth

location /api/admin {
    auth_basic "Restricted API";
    auth_basic_user_file /etc/nginx/.htpasswd;

    proxy_pass http://admin_service;
    proxy_set_header Host $host;
}

Create password file:

sudo apt install apache2-utils
sudo htpasswd -c /etc/nginx/.htpasswd admin

Rate Limiting

Basic Rate Limiting

# Define rate limit zone
limit_req_zone $binary_remote_addr zone=api_limit:10m rate=10r/s;

server {
    listen 80;
    server_name api.example.com;

    location /api/ {
        # Apply rate limit
        limit_req zone=api_limit burst=20 nodelay;
        limit_req_status 429;

        proxy_pass http://backend;
    }
}

Per-API-Key Rate Limiting

# Rate limit by API key
limit_req_zone $http_x_api_key zone=api_key_limit:10m rate=100r/s;

# Rate limit by IP
limit_req_zone $binary_remote_addr zone=ip_limit:10m rate=10r/s;

server {
    listen 80;
    server_name api.example.com;

    location /api/ {
        # Apply both limits
        limit_req zone=api_key_limit burst=50;
        limit_req zone=ip_limit burst=20;

        proxy_pass http://backend;
    }
}

Different Limits for Different Endpoints

limit_req_zone $binary_remote_addr zone=general:10m rate=10r/s;
limit_req_zone $binary_remote_addr zone=search:10m rate=5r/s;
limit_req_zone $binary_remote_addr zone=create:10m rate=2r/s;

server {
    listen 80;
    server_name api.example.com;

    # General endpoints
    location /api/read {
        limit_req zone=general burst=20;
        proxy_pass http://backend;
    }

    # Search endpoint (more restrictive)
    location /api/search {
        limit_req zone=search burst=10;
        proxy_pass http://backend;
    }

    # Write operations (most restrictive)
    location /api/create {
        limit_req zone=create burst=5;
        proxy_pass http://backend;
    }
}

Response Caching

# Define cache path
proxy_cache_path /var/cache/nginx/api levels=1:2 keys_zone=api_cache:10m max_size=1g inactive=60m;

upstream backend {
    server localhost:3000;
}

server {
    listen 80;
    server_name api.example.com;

    # Cache GET requests
    location /api/public {
        proxy_cache api_cache;
        proxy_cache_valid 200 10m;
        proxy_cache_valid 404 1m;
        proxy_cache_methods GET HEAD;
        proxy_cache_key "$request_method$request_uri$is_args$args";

        # Cache headers
        add_header X-Cache-Status $upstream_cache_status;

        # Only cache for authorized users
        proxy_cache_bypass $http_authorization;
        proxy_no_cache $http_authorization;

        proxy_pass http://backend;
    }

    # Never cache POST/PUT/DELETE
    location /api/write {
        proxy_pass http://backend;
    }
}

Request/Response Transformation

Add/Modify Headers

location /api/ {
    # Remove headers before proxying
    proxy_set_header Authorization "";

    # Add custom headers
    proxy_set_header X-Gateway "nginx";
    proxy_set_header X-Request-ID $request_id;
    proxy_set_header X-Real-IP $remote_addr;

    # Remove headers from response
    proxy_hide_header X-Powered-By;
    proxy_hide_header Server;

    # Add headers to response
    add_header X-Gateway-Time $request_time;
    add_header X-Backend-Server $upstream_addr;

    proxy_pass http://backend;
}

CORS Headers

location /api/ {
    # Handle preflight requests
    if ($request_method = 'OPTIONS') {
        add_header 'Access-Control-Allow-Origin' '$http_origin' always;
        add_header 'Access-Control-Allow-Methods' 'GET, POST, PUT, DELETE, OPTIONS' always;
        add_header 'Access-Control-Allow-Headers' 'Authorization, Content-Type, X-API-Key' always;
        add_header 'Access-Control-Max-Age' 86400 always;
        return 204;
    }

    # Add CORS headers to responses
    add_header 'Access-Control-Allow-Origin' '$http_origin' always;
    add_header 'Access-Control-Allow-Credentials' 'true' always;

    proxy_pass http://backend;
}

Load Balancing Backends

upstream backend_cluster {
    least_conn;

    server backend1.example.com:3000 weight=3 max_fails=3 fail_timeout=30s;
    server backend2.example.com:3000 weight=2 max_fails=3 fail_timeout=30s;
    server backend3.example.com:3000 weight=1 backup;

    keepalive 32;
}

server {
    listen 80;
    server_name api.example.com;

    location /api/ {
        proxy_pass http://backend_cluster;
        proxy_http_version 1.1;
        proxy_set_header Connection "";

        proxy_next_upstream error timeout http_502 http_503;
        proxy_next_upstream_tries 2;
    }
}

Circuit Breaker Pattern

upstream backend {
    server backend1.example.com:3000 max_fails=3 fail_timeout=30s;
    server backend2.example.com:3000 max_fails=3 fail_timeout=30s;
}

server {
    listen 80;
    server_name api.example.com;

    location /api/ {
        proxy_pass http://backend;

        # Circuit breaker logic
        proxy_next_upstream error timeout http_502 http_503 http_504;
        proxy_next_upstream_tries 3;
        proxy_next_upstream_timeout 10s;

        # Custom error responses
        proxy_intercept_errors on;
        error_page 502 503 504 = @fallback;
    }

    location @fallback {
        return 503 '{"error":"Service temporarily unavailable"}';
        add_header Content-Type application/json;
    }
}

Logging and Monitoring

Custom Log Format for APIs

log_format api_log '$remote_addr - $remote_user [$time_local] '
                   '"$request" $status $body_bytes_sent '
                   '"$http_referer" "$http_user_agent" '
                   '"$http_x_api_key" '
                   'request_id=$request_id '
                   'request_time=$request_time '
                   'upstream_addr=$upstream_addr '
                   'upstream_status=$upstream_status '
                   'upstream_response_time=$upstream_response_time '
                   'upstream_connect_time=$upstream_connect_time';

server {
    listen 80;
    server_name api.example.com;

    access_log /var/log/nginx/api_access.log api_log;
    error_log /var/log/nginx/api_error.log;

    location /api/ {
        proxy_pass http://backend;
    }
}

JSON Logging

log_format api_json escape=json '{'
    '"time": "$time_local",'
    '"remote_addr": "$remote_addr",'
    '"request_method": "$request_method",'
    '"request_uri": "$request_uri",'
    '"status": $status,'
    '"body_bytes_sent": $body_bytes_sent,'
    '"request_time": $request_time,'
    '"upstream_addr": "$upstream_addr",'
    '"upstream_status": "$upstream_status",'
    '"upstream_response_time": "$upstream_response_time",'
    '"api_key": "$http_x_api_key",'
    '"user_agent": "$http_user_agent"'
'}';

access_log /var/log/nginx/api.log api_json;

Complete API Gateway Example

# Rate limiting zones
limit_req_zone $http_x_api_key zone=api_key:10m rate=100r/s;
limit_req_zone $binary_remote_addr zone=ip:10m rate=10r/s;

# API key validation
map $http_x_api_key $api_valid {
    default         0;
    "key123"        1;
    "key456"        1;
}

# Cache configuration
proxy_cache_path /var/cache/nginx/api levels=1:2 keys_zone=api_cache:10m max_size=1g inactive=60m;

# Backend services
upstream user_service {
    least_conn;
    server localhost:3001 max_fails=3 fail_timeout=30s;
    server localhost:3002 max_fails=3 fail_timeout=30s;
    keepalive 32;
}

upstream product_service {
    least_conn;
    server localhost:4001 max_fails=3 fail_timeout=30s;
    server localhost:4002 max_fails=3 fail_timeout=30s;
    keepalive 32;
}

# Custom log format
log_format api_log '$remote_addr - [$time_local] "$request" '
                   '$status $body_bytes_sent '
                   'request_time=$request_time '
                   'upstream=$upstream_addr '
                   'api_key=$http_x_api_key';

server {
    listen 443 ssl http2;
    server_name api.example.com;

    ssl_certificate /etc/letsencrypt/live/api.example.com/fullchain.pem;
    ssl_certificate_key /etc/letsencrypt/live/api.example.com/privkey.pem;

    access_log /var/log/nginx/api_access.log api_log;
    error_log /var/log/nginx/api_error.log;

    # Health check endpoint
    location /health {
        access_log off;
        return 200 '{"status":"ok"}';
        add_header Content-Type application/json;
    }

    # Public endpoints (no auth, cached)
    location /api/v1/public {
        limit_req zone=ip burst=20;

        proxy_cache api_cache;
        proxy_cache_valid 200 5m;
        add_header X-Cache-Status $upstream_cache_status;

        proxy_pass http://product_service;
        proxy_http_version 1.1;
        proxy_set_header Connection "";
        proxy_set_header Host $host;

        # CORS
        add_header 'Access-Control-Allow-Origin' '$http_origin' always;
    }

    # Protected endpoints (auth required)
    location /api/v1/users {
        # Validate API key
        if ($api_valid = 0) {
            return 401 '{"error":"Invalid API Key"}';
        }

        # Rate limiting
        limit_req zone=api_key burst=50;
        limit_req_status 429;

        # Route to service
        proxy_pass http://user_service;
        proxy_http_version 1.1;
        proxy_set_header Connection "";
        proxy_set_header Host $host;
        proxy_set_header X-Real-IP $remote_addr;
        proxy_set_header X-API-Key $http_x_api_key;

        # Timeouts
        proxy_connect_timeout 5s;
        proxy_send_timeout 10s;
        proxy_read_timeout 10s;

        # Retry logic
        proxy_next_upstream error timeout http_502 http_503;
        proxy_next_upstream_tries 2;
    }

    # Admin endpoints (basic auth)
    location /api/v1/admin {
        auth_basic "Admin API";
        auth_basic_user_file /etc/nginx/.htpasswd;

        proxy_pass http://user_service;
        proxy_set_header Host $host;
    }

    # Handle OPTIONS for CORS
    if ($request_method = 'OPTIONS') {
        add_header 'Access-Control-Allow-Origin' '$http_origin' always;
        add_header 'Access-Control-Allow-Methods' 'GET, POST, PUT, DELETE, OPTIONS' always;
        add_header 'Access-Control-Allow-Headers' 'Authorization, Content-Type, X-API-Key' always;
        add_header 'Access-Control-Max-Age' 86400 always;
        return 204;
    }
}

Testing API Gateway

# Test without API key
curl -I https://api.example.com/api/v1/users

# Test with API key
curl -H "X-API-Key: key123" https://api.example.com/api/v1/users

# Test rate limiting
for i in {1..50}; do curl -s -o /dev/null -w "%{http_code}\n" \
  -H "X-API-Key: key123" https://api.example.com/api/v1/users; done

# Test caching
curl -I https://api.example.com/api/v1/public/products

# Load testing
ab -n 1000 -c 10 -H "X-API-Key: key123" https://api.example.com/api/v1/users/

Best Practices

  1. Use SSL/TLS: Always encrypt API traffic
  2. Implement Authentication: API keys, JWT, OAuth
  3. Rate Limiting: Protect against abuse
  4. Versioning: Support multiple API versions
  5. Caching: Cache GET requests when appropriate
  6. Error Handling: Return meaningful error messages
  7. Logging: Log requests for monitoring and debugging
  8. CORS: Configure properly for web clients
  9. Timeouts: Set appropriate timeout values
  10. Health Checks: Monitor backend service health
  11. Circuit Breakers: Fail fast when backends are down
  12. Documentation: Document routing and authentication

15: Serving Static Files and Media Content Efficiently

Why Optimize Static File Serving?

Static files (HTML, CSS, JavaScript, images, videos) often comprise 70-90% of web traffic. Efficient serving: - Reduces server load - Improves page load times - Reduces bandwidth costs - Enhances user experience - Improves SEO rankings

Basic Static File Configuration

server {
    listen 80;
    server_name example.com;

    root /var/www/example.com;
    index index.html index.htm;

    location / {
        try_files $uri $uri/ =404;
    }
}

Optimized Static File Serving

server {
    listen 80;
    server_name example.com;

    root /var/www/example.com;
    index index.html;

    # Disable access logging for static files (optional)
    location ~* \.(jpg|jpeg|png|gif|ico|css|js|svg|woff|woff2|ttf|eot)$ {
        access_log off;
        log_not_found off;
    }

    # Enable sendfile for better performance
    sendfile on;
    tcp_nopush on;
    tcp_nodelay on;
}

Browser Caching with Expires Headers

server {
    listen 80;
    server_name example.com;
    root /var/www/example.com;

    # Cache images for 30 days
    location ~* \.(jpg|jpeg|png|gif|ico|svg)$ {
        expires 30d;
        add_header Cache-Control "public, immutable";
    }

    # Cache CSS and JavaScript for 1 year
    location ~* \.(css|js)$ {
        expires 1y;
        add_header Cache-Control "public, immutable";
    }

    # Cache fonts for 1 year
    location ~* \.(woff|woff2|ttf|eot)$ {
        expires 1y;
        add_header Cache-Control "public, immutable";
    }

    # Cache videos for 30 days
    location ~* \.(mp4|webm|ogg)$ {
        expires 30d;
        add_header Cache-Control "public, immutable";
    }

    # Don't cache HTML (or use short cache)
    location ~* \.html$ {
        expires -1;
        add_header Cache-Control "no-cache, must-revalidate";
    }
}

Gzip Compression

Already covered in detail in 16, but here's a quick reference:

http {
    # Enable gzip
    gzip on;
    gzip_vary on;
    gzip_min_length 1024;
    gzip_types text/plain text/css text/xml text/javascript 
               application/xml application/json application/javascript 
               application/rss+xml image/svg+xml;

    server {
        # Your configuration
    }
}

Sendfile Optimization

http {
    # Enable sendfile (kernel-level file sending)
    sendfile on;

    # Send headers in one packet
    tcp_nopush on;

    # Don't buffer data-sends
    tcp_nodelay on;

    server {
        # Your configuration
    }
}

What these do: - sendfile on: Uses kernel's sendfile() for efficient file transfers - tcp_nopush on: Optimizes packet sending (used with sendfile) - tcp_nodelay on: Sends small packets immediately

Open File Cache

Cache file descriptors for frequently accessed files:

http {
    # Open file cache
    open_file_cache max=10000 inactive=30s;
    open_file_cache_valid 60s;
    open_file_cache_min_uses 2;
    open_file_cache_errors on;

    server {
        # Your configuration
    }
}

Parameters:

  • max=10000: Cache up to 10,000 file descriptors
  • inactive=30s: Remove from cache if not accessed in 30s
  • valid=60s: Re-validate cache entries after 60s
  • min_uses=2: Cache files accessed at least 2 times
  • errors=on: Cache file lookup errors

Serving Different Media Types

server {
    listen 80;
    server_name example.com;
    root /var/www/example.com;

    # Images
    location /images/ {
        expires 30d;
        add_header Cache-Control "public, immutable";
        access_log off;
    }

    # CSS and JavaScript
    location /assets/ {
        expires 1y;
        add_header Cache-Control "public, immutable";
        access_log off;

        # Enable gzip for text-based assets
        gzip_static on;
    }

    # Videos
    location /videos/ {
        expires 30d;
        add_header Cache-Control "public, immutable";

        # Enable range requests for seeking
        add_header Accept-Ranges bytes;

        # Limit bandwidth (optional)
        limit_rate 1m;
    }

    # Downloads
    location /downloads/ {
        # Force download
        add_header Content-Disposition "attachment";

        # Limit bandwidth per connection
        limit_rate_after 10m;
        limit_rate 500k;
    }
}

CDN-Style Multiple Domains

Browsers limit concurrent connections per domain. Use multiple subdomains:

# static1.example.com
server {
    listen 80;
    server_name static1.example.com;
    root /var/www/static;

    expires 1y;
    add_header Cache-Control "public, immutable";
    access_log off;
}

# static2.example.com
server {
    listen 80;
    server_name static2.example.com;
    root /var/www/static;

    expires 1y;
    add_header Cache-Control "public, immutable";
    access_log off;
}

# static3.example.com
server {
    listen 80;
    server_name static3.example.com;
    root /var/www/static;

    expires 1y;
    add_header Cache-Control "public, immutable";
    access_log off;
}

Then use in HTML:

<img src="http://static1.example.com/image1.jpg">
<img src="http://static2.example.com/image2.jpg">
<script src="http://static3.example.com/script.js"></script>

Separate Static Content Server

# Main application server
server {
    listen 80;
    server_name example.com;

    # Application logic
    location / {
        proxy_pass http://app_backend;
    }

    # Static files served by Nginx
    location /static/ {
        alias /var/www/static/;
        expires 1y;
        add_header Cache-Control "public, immutable";
        access_log off;
    }

    location /media/ {
        alias /var/www/media/;
        expires 30d;
        add_header Cache-Control "public";
    }
}

Image Optimization

WebP Support with Fallback

map $http_accept $webp_suffix {
    default "";
    "~*webp" ".webp";
}

server {
    listen 80;
    server_name example.com;

    location /images/ {
        root /var/www;

        # Try WebP version first, fallback to original
        try_files $uri$webp_suffix $uri =404;

        expires 30d;
        add_header Cache-Control "public, immutable";
        add_header Vary Accept;
    }
}

Responsive Images

map $http_cookie $image_size {
    default "medium";
    "~*screen_width=([0-9]+)" $1;
}

map $image_size $image_dir {
    default "medium";
    ~*^[0-9]{3}$ "small";     # < 1000px
    ~*^1[0-9]{3}$ "medium";   # 1000-1999px
    ~*^[2-9][0-9]{3}$ "large"; # 2000px+
}

server {
    listen 80;
    server_name example.com;

    location /images/ {
        root /var/www/$image_dir;
        try_files $uri /var/www/medium$uri =404;
        expires 30d;
    }
}

Video Streaming

MP4 Streaming

server {
    listen 80;
    server_name example.com;

    location /videos/ {
        root /var/www;

        # Enable MP4 streaming
        mp4;
        mp4_buffer_size 1m;
        mp4_max_buffer_size 5m;

        # Enable range requests
        add_header Accept-Ranges bytes;

        # Cache
        expires 30d;
        add_header Cache-Control "public, immutable";

        # Bandwidth limiting (optional)
        limit_rate_after 5m;
        limit_rate 1m;
    }
}

HLS Streaming (HTTP Live Streaming)

server {
    listen 80;
    server_name example.com;

    location /hls/ {
        root /var/www;

        # MIME types
        types {
            application/vnd.apple.mpegurl m3u8;
            video/mp2t ts;
        }

        # CORS for streaming
        add_header Access-Control-Allow-Origin *;
        add_header Cache-Control "no-cache";
    }
}

Security for Static Files

server {
    listen 80;
    server_name example.com;
    root /var/www/example.com;

    # Deny access to hidden files
    location ~ /\. {
        deny all;
        access_log off;
        log_not_found off;
    }

    # Deny access to backup files
    location ~ ~$ {
        deny all;
        access_log off;
        log_not_found off;
    }

    # Prevent hotlinking
    location /images/ {
        valid_referers none blocked example.com *.example.com;
        if ($invalid_referer) {
            return 403;
        }

        expires 30d;
    }

    # Secure downloads directory
    location /private-files/ {
        internal;
        alias /var/www/private/;
    }
}

Bandwidth Limiting

server {
    listen 80;
    server_name example.com;

    # Limit after first 10MB at full speed
    location /downloads/ {
        root /var/www;

        limit_rate_after 10m;
        limit_rate 500k;  # 500 KB/s per connection
    }

    # Different limits for different file types
    location ~* \.(mp4|avi|mov)$ {
        limit_rate_after 5m;
        limit_rate 1m;  # 1 MB/s for videos
    }

    location ~* \.(zip|tar|gz)$ {
        limit_rate_after 10m;
        limit_rate 500k;  # 500 KB/s for archives
    }
}

Complete Optimized Static Site Configuration

http {
    # Performance optimizations
    sendfile on;
    tcp_nopush on;
    tcp_nodelay on;

    # Gzip compression
    gzip on;
    gzip_vary on;
    gzip_min_length 1024;
    gzip_types text/css text/javascript application/javascript 
               application/json application/xml text/xml image/svg+xml;

    # Open file cache
    open_file_cache max=10000 inactive=30s;
    open_file_cache_valid 60s;
    open_file_cache_min_uses 2;
    open_file_cache_errors on;

    server {
        listen 80;
        server_name example.com;
        root /var/www/example.com/public;
        index index.html;

        # Security
        add_header X-Frame-Options "SAMEORIGIN" always;
        add_header X-Content-Type-Options "nosniff" always;
        add_header X-XSS-Protection "1; mode=block" always;

        # HTML files - no cache
        location / {
            try_files $uri $uri/ /index.html;
            expires -1;
            add_header Cache-Control "no-cache, must-revalidate";
        }

        # Images - cache 1 year
        location ~* \.(jpg|jpeg|png|gif|ico|svg|webp)$ {
            expires 1y;
            add_header Cache-Control "public, immutable";
            access_log off;
            log_not_found off;
        }

        # CSS and JavaScript - cache 1 year, use versioning
        location ~* \.(css|js)$ {
            expires 1y;
            add_header Cache-Control "public, immutable";
            access_log off;
            gzip_static on;
        }

        # Fonts - cache 1 year
        location ~* \.(woff|woff2|ttf|eot)$ {
            expires 1y;
            add_header Cache-Control "public, immutable";
            add_header Access-Control-Allow-Origin *;
            access_log off;
        }

        # Videos - cache 30 days, enable streaming
        location ~* \.(mp4|webm|ogg)$ {
            mp4;
            expires 30d;
            add_header Cache-Control "public, immutable";
            add_header Accept-Ranges bytes;
            access_log off;
        }

        # Downloads - force download
        location /downloads/ {
            add_header Content-Disposition "attachment";
            limit_rate_after 10m;
            limit_rate 500k;
        }

        # Deny access to sensitive files
        location ~ /\.(git|htaccess|env) {
            deny all;
        }

        # Prevent hotlinking
        valid_referers none blocked example.com *.example.com;
        if ($invalid_referer) {
            return 403;
        }
    }
}

Testing Performance

# Test file download speed
curl -o /dev/null -w "Speed: %{speed_download} bytes/sec\n" http://example.com/large-file.zip

# Test caching headers
curl -I http://example.com/style.css

# Test gzip compression
curl -H "Accept-Encoding: gzip" -I http://example.com/script.js

# Load testing
ab -n 1000 -c 10 http://example.com/image.jpg

# Check file cache hit rate
grep "open()" /var/log/nginx/error.log | wc -l

Monitoring Static File Performance

log_format static '$remote_addr - [$time_local] "$request" '
                  '$status $body_bytes_sent '
                  '"$http_referer" "$http_user_agent" '
                  'cache=$upstream_cache_status '
                  'time=$request_time';

server {
    location ~* \.(jpg|css|js)$ {
        access_log /var/log/nginx/static.log static;
        # ... rest of configuration
    }
}

Best Practices

  1. Use sendfile: Enable for better kernel-level performance
  2. Enable gzip: Compress text-based files
  3. Set long cache times: Use versioned filenames (style.v123.css)
  4. Use CDN or multiple domains: Increase parallel downloads
  5. Optimize images: Use modern formats (WebP), correct sizing
  6. Enable file descriptor caching: Reduce open() syscalls
  7. Disable access logs: For static files in production
  8. Use HTTP/2: Better multiplexing and compression
  9. Implement security headers: Prevent hotlinking and attacks
  10. Monitor performance: Track cache hit rates and load times