Skip to content

Intermediate Level Nginx : Part 2

16: Gzip Compression and Performance Optimization

Gzip compression reduces file sizes before sending to clients, providing:

  • Reduced bandwidth: 50-80% reduction for text files
  • Faster page loads: Less data to transfer
  • Cost savings: Lower bandwidth costs
  • Better SEO: Faster sites rank higher

Trade-off: Slight CPU overhead for compression (negligible on modern servers)

Basic Gzip Configuration

http {
    # Enable gzip
    gzip on;

    # Compression level (1-9, 6 is good balance)
    gzip_comp_level 6;

    # Minimum file size to compress (bytes)
    gzip_min_length 1000;

    # Compress these MIME types
    gzip_types text/plain text/css text/xml text/javascript 
               application/xml application/json application/javascript 
               application/rss+xml application/atom+xml image/svg+xml;

    # Add Vary: Accept-Encoding header
    gzip_vary on;

    server {
        listen 80;
        server_name example.com;
        root /var/www/example.com;
    }
}

Complete Gzip Configuration

http {
    # Enable gzip compression
    gzip on;

    # Compression level (1=fastest, 9=best compression)
    # 5-6 is recommended balance
    gzip_comp_level 6;

    # Don't compress files smaller than this
    gzip_min_length 1000;

    # Compress data even for proxied requests
    gzip_proxied any;

    # Enable compression for all clients (including old IE6)
    gzip_disable "msie6";

    # Add Vary: Accept-Encoding header
    gzip_vary on;

    # Compression buffer
    gzip_buffers 16 8k;

    # HTTP version (1.1 recommended)
    gzip_http_version 1.1;

    # MIME types to compress
    gzip_types
        text/plain
        text/css
        text/xml
        text/javascript
        application/xml
        application/xml+rss
        application/rss+xml
        application/atom+xml
        application/javascript
        application/x-javascript
        application/json
        application/ld+json
        application/manifest+json
        application/x-web-app-manifest+json
        image/svg+xml
        text/x-component
        text/x-cross-domain-policy;

    server {
        listen 80;
        server_name example.com;
        root /var/www/example.com;
    }
}

Gzip Directives Explained

# Enable/disable gzip
gzip on;

# Compression level (1-9)
# 1 = fastest, least compression
# 9 = slowest, best compression
# 5-6 = recommended for production
gzip_comp_level 6;

# Minimum length to compress (in bytes)
# Files smaller than this won't be compressed
gzip_min_length 1000;

# Enable compression for proxied requests
# off     = disable for all proxied requests
# expired = enable if Cache-Control: no-cache
# no-cache = enable if Cache-Control: no-cache or no-store
# no-store = enable if Cache-Control: no-store
# private = enable if Cache-Control: private
# no_last_modified = enable if no Last-Modified header
# no_etag = enable if no ETag header
# auth = enable if Authorization header present
# any = enable for all proxied requests
gzip_proxied any;

# Disable for specific user agents (old IE6)
gzip_disable "msie6";

# Add Vary: Accept-Encoding header
# Tells caches that response varies based on Accept-Encoding
gzip_vary on;

# Number and size of compression buffers
gzip_buffers 16 8k;

# Minimum HTTP version required
gzip_http_version 1.1;

# MIME types to compress (text/html always compressed)
gzip_types text/plain text/css;

Static Gzip (Pre-compressed Files)

Pre-compress files at build time for maximum performance.

http {
    # Enable serving pre-compressed files
    gzip_static on;

    server {
        listen 80;
        server_name example.com;
        root /var/www/example.com;

        location / {
            # Nginx will look for .gz version first
            # If style.css.gz exists, serve it instead of compressing
            try_files $uri $uri/ =404;
        }
    }
}

Pre-compress files:

# Compress all CSS files
find /var/www/example.com -name '*.css' -exec gzip -k9 {} \;

# Compress all JavaScript files
find /var/www/example.com -name '*.js' -exec gzip -k9 {} \;

# Compress HTML files
find /var/www/example.com -name '*.html' -exec gzip -k9 {} \;

# Build script example
#!/bin/bash
FILES="/var/www/example.com"
find $FILES -type f \( -name '*.css' -o -name '*.js' -o -name '*.html' -o -name '*.svg' \) -exec gzip -k9 {} \;

Brotli Compression (Alternative to Gzip)

Brotli provides better compression than gzip (15-25% smaller).

Installation:

# Install Nginx with Brotli module
sudo apt install nginx-module-brotli

# Or compile from source with ngx_brotli module

Configuration:

# Load module (if not compiled in)
load_module modules/ngx_http_brotli_filter_module.so;
load_module modules/ngx_http_brotli_static_module.so;

http {
    # Brotli compression
    brotli on;
    brotli_comp_level 6;
    brotli_types text/plain text/css application/json application/javascript 
                 text/xml application/xml application/xml+rss text/javascript 
                 image/svg+xml;

    # Static brotli (pre-compressed .br files)
    brotli_static on;

    # Gzip fallback for browsers that don't support Brotli
    gzip on;
    gzip_comp_level 6;
    gzip_types text/plain text/css application/json application/javascript;

    server {
        listen 80;
        server_name example.com;
        root /var/www/example.com;
    }
}

Testing Compression

Check if Gzip is Working

# Test gzip compression
curl -H "Accept-Encoding: gzip" -I http://example.com/style.css

# Should see:
# Content-Encoding: gzip
# Vary: Accept-Encoding

# Test with verbose output
curl -H "Accept-Encoding: gzip,deflate,br" -v http://example.com/script.js

# Compare compressed vs uncompressed size
curl -H "Accept-Encoding: gzip" --write-out "%{size_download}\n" --silent --output /dev/null http://example.com/large.js
curl --write-out "%{size_download}\n" --silent --output /dev/null http://example.com/large.js

Online Testing Tools

https://www.giftofspeed.com/gzip-test/
https://varvy.com/tools/gzip/
https://developers.google.com/speed/pagespeed/insights/

Performance Optimization Beyond Compression

1. Worker Processes and Connections

# Set to number of CPU cores
worker_processes auto;

# Maximum connections per worker
events {
    worker_connections 1024;
    use epoll;  # Efficient method for Linux
    multi_accept on;
}

2. Keepalive Connections

http {
    # Keepalive timeout
    keepalive_timeout 65;

    # Maximum requests per connection
    keepalive_requests 100;

    server {
        # Your configuration
    }
}

3. Buffer Sizes

http {
    # Client body buffer
    client_body_buffer_size 128k;
    client_max_body_size 10m;

    # Client header buffer
    client_header_buffer_size 1k;
    large_client_header_buffers 4 8k;

    # Response buffers
    output_buffers 1 32k;
    postpone_output 1460;

    server {
        # Your configuration
    }
}

4. Timeouts

http {
    # Client timeouts
    client_body_timeout 12;
    client_header_timeout 12;

    # Send timeout
    send_timeout 10;

    # Keepalive timeout
    keepalive_timeout 65;

    server {
        # Your configuration
    }
}

5. File I/O Optimization

http {
    # Enable sendfile
    sendfile on;

    # Optimize packet sending
    tcp_nopush on;
    tcp_nodelay on;

    # File cache
    open_file_cache max=10000 inactive=30s;
    open_file_cache_valid 60s;
    open_file_cache_min_uses 2;
    open_file_cache_errors on;

    server {
        # Your configuration
    }
}

6. Limit Connections and Requests

http {
    # Limit connections per IP
    limit_conn_zone $binary_remote_addr zone=addr:10m;

    # Limit requests per IP
    limit_req_zone $binary_remote_addr zone=req:10m rate=10r/s;

    server {
        listen 80;
        server_name example.com;

        # Apply connection limit
        limit_conn addr 10;

        # Apply request rate limit
        limit_req zone=req burst=20 nodelay;
    }
}

HTTP/2 Optimization

server {
    listen 443 ssl http2;
    server_name example.com;

    ssl_certificate /etc/letsencrypt/live/example.com/fullchain.pem;
    ssl_certificate_key /etc/letsencrypt/live/example.com/privkey.pem;

    # HTTP/2 push (preload resources)
    location = /index.html {
        http2_push /style.css;
        http2_push /script.js;
        http2_push /logo.png;
    }

    # HTTP/2 settings
    http2_max_field_size 16k;
    http2_max_header_size 32k;
}

Complete Optimized Configuration

# Global settings
user www-data;
worker_processes auto;
worker_rlimit_nofile 65535;
pid /run/nginx.pid;

events {
    worker_connections 2048;
    use epoll;
    multi_accept on;
}

http {
    # Basic settings
    sendfile on;
    tcp_nopush on;
    tcp_nodelay on;
    keepalive_timeout 65;
    keepalive_requests 100;
    types_hash_max_size 2048;
    server_tokens off;

    # Buffer sizes
    client_body_buffer_size 128k;
    client_max_body_size 10m;
    client_header_buffer_size 1k;
    large_client_header_buffers 4 8k;
    output_buffers 1 32k;
    postpone_output 1460;

    # Timeouts
    client_body_timeout 12;
    client_header_timeout 12;
    send_timeout 10;

    # File cache
    open_file_cache max=10000 inactive=30s;
    open_file_cache_valid 60s;
    open_file_cache_min_uses 2;
    open_file_cache_errors on;

    # Gzip compression
    gzip on;
    gzip_vary on;
    gzip_proxied any;
    gzip_comp_level 6;
    gzip_min_length 1000;
    gzip_disable "msie6";
    gzip_http_version 1.1;
    gzip_types
        text/plain
        text/css
        text/xml
        text/javascript
        application/json
        application/javascript
        application/xml+rss
        application/atom+xml
        image/svg+xml;

    # Static gzip
    gzip_static on;

    # MIME types
    include /etc/nginx/mime.types;
    default_type application/octet-stream;

    # Logging
    log_format main '$remote_addr - $remote_user [$time_local] "$request" '
                    '$status $body_bytes_sent "$http_referer" '
                    '"$http_user_agent" "$http_x_forwarded_for"';

    access_log /var/log/nginx/access.log main;
    error_log /var/log/nginx/error.log warn;

    # Rate limiting
    limit_req_zone $binary_remote_addr zone=general:10m rate=10r/s;
    limit_conn_zone $binary_remote_addr zone=addr:10m;

    # SSL settings
    ssl_protocols TLSv1.2 TLSv1.3;
    ssl_prefer_server_ciphers on;
    ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384;
    ssl_session_cache shared:SSL:50m;
    ssl_session_timeout 1d;
    ssl_session_tickets off;
    ssl_stapling on;
    ssl_stapling_verify on;

    # Security headers
    add_header X-Frame-Options "SAMEORIGIN" always;
    add_header X-Content-Type-Options "nosniff" always;
    add_header X-XSS-Protection "1; mode=block" always;

    server {
        listen 443 ssl http2;
        listen [::]:443 ssl http2;
        server_name example.com;

        root /var/www/example.com;
        index index.html;

        ssl_certificate /etc/letsencrypt/live/example.com/fullchain.pem;
        ssl_certificate_key /etc/letsencrypt/live/example.com/privkey.pem;

        # Rate limiting
        limit_req zone=general burst=20 nodelay;
        limit_conn addr 10;

        # Static files with cache
        location ~* \.(jpg|jpeg|png|gif|ico|css|js|svg|woff|woff2)$ {
            expires 1y;
            add_header Cache-Control "public, immutable";
            access_log off;
        }

        # HTML files
        location / {
            try_files $uri $uri/ =404;
            expires -1;
            add_header Cache-Control "no-cache";
        }

        # Security
        location ~ /\. {
            deny all;
        }
    }
}

Measuring Performance

Using curl

# Measure response time
curl -w "\ntime_total: %{time_total}\n" -o /dev/null -s http://example.com

# Detailed timing
curl -w "\nDNS: %{time_namelookup}\nConnect: %{time_connect}\nTTFB: %{time_starttransfer}\nTotal: %{time_total}\n" -o /dev/null -s http://example.com

Using Apache Bench

# Test with 1000 requests, 10 concurrent
ab -n 1000 -c 10 http://example.com/

# With keepalive
ab -n 1000 -c 10 -k http://example.com/

Using wrk

# Install wrk
sudo apt install wrk

# Test for 30 seconds, 4 threads, 100 connections
wrk -t4 -c100 -d30s http://example.com/

# With custom script
wrk -t4 -c100 -d30s -s script.lua http://example.com/

Monitor Nginx Performance

# Install stub_status module
# Already included in most Nginx builds

# Add to configuration
server {
    listen 8080;
    location /nginx_status {
        stub_status;
        access_log off;
        allow 127.0.0.1;
        deny all;
    }
}

# Check status
curl http://localhost:8080/nginx_status

# Output shows:
# Active connections: 1 
# server accepts handled requests
#  10 10 15 
# Reading: 0 Writing: 1 Waiting: 0

Nginx Amplify (Official Monitoring)

# Install Nginx Amplify Agent
curl -sS -L -O https://github.com/nginxinc/nginx-amplify-agent/raw/master/packages/install.sh
API_KEY='your_api_key' sh ./install.sh

# View metrics at: https://amplify.nginx.com

Best Practices Summary

  1. Enable gzip compression: 5-6 compression level is optimal
  2. Use gzip_static: Pre-compress files at build time
  3. Consider Brotli: Better compression than gzip
  4. Optimize worker processes: Set to CPU core count
  5. Configure buffers: Match your traffic patterns
  6. Enable file caching: Reduce file system operations
  7. Use HTTP/2: Better multiplexing and compression
  8. Set appropriate timeouts: Prevent hung connections
  9. Implement rate limiting: Protect against abuse
  10. Monitor performance: Use metrics to identify issues
  11. Cache static assets: Long expiration times with versioning
  12. Minimize access logging: Or use buffered logging

Common Performance Issues

Issue: High CPU Usage

# Reduce gzip compression level
gzip_comp_level 4;  # Instead of 6-9

# Limit worker connections
events {
    worker_connections 1024;  # Instead of 4096
}

Issue: High Memory Usage

# Reduce buffer sizes
client_body_buffer_size 64k;  # Instead of 128k

# Reduce file cache
open_file_cache max=5000 inactive=20s;  # Instead of 10000

Issue: Slow Response Times

# Enable sendfile and tcp optimizations
sendfile on;
tcp_nopush on;
tcp_nodelay on;

# Increase buffers
output_buffers 8 32k;

# Enable gzip_static for pre-compressed files
gzip_static on;

Compression Comparison Table

File Type Original Size Gzip (Level 6) Brotli (Level 6) Savings (Gzip) Savings (Brotli)
HTML 100 KB 20 KB 17 KB 80% 83%
CSS 100 KB 15 KB 12 KB 85% 88%
JavaScript 100 KB 30 KB 25 KB 70% 75%
JSON 100 KB 10 KB 8 KB 90% 92%
SVG 100 KB 25 KB 20 KB 75% 80%

17: Rate Limiting and DDoS Protection with Nginx

Understanding Rate Limiting

Rate limiting controls the rate of requests from clients to: - Prevent abuse: Block excessive requests - Protect resources: Limit server load - Ensure fair usage: Prevent one client from monopolizing resources - Mitigate DDoS attacks: Reduce impact of attacks - API quota enforcement: Implement usage limits

Basic Rate Limiting

# Define rate limit zone (10MB zone, 10 requests per second)
limit_req_zone $binary_remote_addr zone=basic:10m rate=10r/s;

server {
    listen 80;
    server_name example.com;

    location / {
        # Apply rate limit
        limit_req zone=basic;

        proxy_pass http://backend;
    }
}

Rate Limiting Directives

limit_req_zone

Defines the rate limit zone.

# Syntax
limit_req_zone key zone=name:size rate=rate;

# Examples
limit_req_zone $binary_remote_addr zone=perip:10m rate=10r/s;
limit_req_zone $server_name zone=perserver:10m rate=100r/s;
limit_req_zone $http_x_api_key zone=perkey:10m rate=50r/s;

# Rates can be per second (r/s) or per minute (r/m)
limit_req_zone $binary_remote_addr zone=strict:10m rate=1r/m;
limit_req_zone $binary_remote_addr zone=normal:10m rate=10r/s;

Key variables: - $binary_remote_addr: Client IP (binary, saves space) - $remote_addr: Client IP (text) - $server_name: Server name - $http_x_api_key: Custom header (API key) - $request_uri: Request URI

limit_req

Applies the rate limit.

location / {
    # Basic usage
    limit_req zone=basic;

    # With burst
    limit_req zone=basic burst=20;

    # With burst and nodelay
    limit_req zone=basic burst=20 nodelay;

    # Custom error status
    limit_req_status 429;

    proxy_pass http://backend;
}

Parameters:

  • zone=name: Which zone to use
  • burst=number: Allow bursts up to this number
  • nodelay: Process burst requests immediately
  • delay=number: Allow number of excessive requests without delay

Understanding Burst and Nodelay

Without Burst

limit_req_zone $binary_remote_addr zone=noburst:10m rate=10r/s;

location / {
    limit_req zone=noburst;
    proxy_pass http://backend;
}

Behavior: - Exactly 10 requests per second allowed - 11th request in same second: rejected (503) - No queuing

With Burst (Queuing)

limit_req_zone $binary_remote_addr zone=withburst:10m rate=10r/s;

location / {
    limit_req zone=withburst burst=20;
    proxy_pass http://backend;
}

Behavior: - Up to 30 total requests allowed (10 + burst of 20) - Excess requests queued - Processed at specified rate (10r/s) - Can cause delays for legitimate users

With Burst and Nodelay (Best for Most Cases)

limit_req_zone $binary_remote_addr zone=nodelay:10m rate=10r/s;

location / {
    limit_req zone=nodelay burst=20 nodelay;
    proxy_pass http://backend;
}

Behavior: - Up to 30 requests allowed immediately - No queuing delays - After burst, back to normal rate - Best user experience

Connection Limiting

Limit number of concurrent connections from an IP.

# Define connection limit zone
limit_conn_zone $binary_remote_addr zone=conn_limit:10m;

server {
    listen 80;
    server_name example.com;

    # Limit to 10 concurrent connections per IP
    limit_conn conn_limit 10;

    location /downloads/ {
        # More restrictive for downloads
        limit_conn conn_limit 2;

        root /var/www;
    }
}

Different Limits for Different Endpoints

# Different zones for different purposes
limit_req_zone $binary_remote_addr zone=general:10m rate=10r/s;
limit_req_zone $binary_remote_addr zone=login:10m rate=5r/m;
limit_req_zone $binary_remote_addr zone=api:10m rate=100r/s;
limit_req_zone $binary_remote_addr zone=search:10m rate=5r/s;

server {
    listen 80;
    server_name example.com;

    # General pages
    location / {
        limit_req zone=general burst=20 nodelay;
        proxy_pass http://backend;
    }

    # Login endpoint (more restrictive)
    location /login {
        limit_req zone=login burst=5;
        limit_req_status 429;
        proxy_pass http://backend;
    }

    # API (higher limit)
    location /api/ {
        limit_req zone=api burst=50 nodelay;
        proxy_pass http://backend;
    }

    # Search (moderate limit)
    location /search {
        limit_req zone=search burst=10;
        proxy_pass http://backend;
    }
}

API Key-Based Rate Limiting

# Rate limit by API key instead of IP
limit_req_zone $http_x_api_key zone=api_key:10m rate=100r/s;

# Also limit by IP as backup
limit_req_zone $binary_remote_addr zone=api_ip:10m rate=10r/s;

server {
    listen 80;
    server_name api.example.com;

    location /api/ {
        # Apply both limits
        limit_req zone=api_key burst=50 nodelay;
        limit_req zone=api_ip burst=20 nodelay;

        proxy_pass http://backend;
    }
}

Whitelist IPs from Rate Limiting

# Define whitelist
geo $limit {
    default 1;
    10.0.0.0/8 0;      # Internal network
    192.168.1.100 0;   # Trusted IP
    1.2.3.4 0;         # Another trusted IP
}

# Map to actual limit key
map $limit $limit_key {
    0 "";
    1 $binary_remote_addr;
}

# Use mapped key in zone
limit_req_zone $limit_key zone=smart:10m rate=10r/s;

server {
    listen 80;
    server_name example.com;

    location / {
        limit_req zone=smart burst=20 nodelay;
        proxy_pass http://backend;
    }
}

Custom Error Pages for Rate Limiting

limit_req_zone $binary_remote_addr zone=limited:10m rate=10r/s;

server {
    listen 80;
    server_name example.com;

    # Custom error page for rate limit
    error_page 429 /rate_limit.html;

    location / {
        limit_req zone=limited burst=20;
        limit_req_status 429;
        proxy_pass http://backend;
    }

    location = /rate_limit.html {
        root /var/www/errors;
        internal;
    }
}

Or return JSON:

location / {
    limit_req zone=limited burst=20;
    limit_req_status 429;

    proxy_pass http://backend;

    # Intercept rate limit errors
    proxy_intercept_errors on;
}

error_page 429 = @ratelimit;

location @ratelimit {
    default_type application/json;
    return 429 '{"error":"Rate limit exceeded","retry_after":60}';
}

DDoS Protection Strategies

1. Connection Limiting

limit_conn_zone $binary_remote_addr zone=addr:10m;
limit_conn_zone $server_name zone=perserver:10m;

server {
    listen 80;
    server_name example.com;

    # Limit connections per IP
    limit_conn addr 10;

    # Limit total connections to server
    limit_conn perserver 1000;
}

2. Request Rate Limiting

limit_req_zone $binary_remote_addr zone=one:10m rate=10r/s;

server {
    listen 80;
    server_name example.com;

    location / {
        limit_req zone=one burst=20 nodelay;
        limit_req_status 444;  # Close connection without response
        proxy_pass http://backend;
    }
}

3. Block Common Attack Patterns

server {
    listen 80;
    server_name example.com;

    # Block requests with no User-Agent
    if ($http_user_agent = "") {
        return 444;
    }

    # Block suspicious user agents
    if ($http_user_agent ~* (bot|crawler|spider|scraper)) {
        return 403;
    }

    # Block requests with no referer (except certain paths)
    location / {
        valid_referers none blocked example.com *.example.com;
        if ($invalid_referer) {
            return 403;
        }
    }

    # Block request methods
    if ($request_method !~ ^(GET|POST|HEAD)$) {
        return 405;
    }
}

4. Slow Connection Handling

server {
    listen 80;
    server_name example.com;

    # Timeout for slow clients
    client_body_timeout 5s;
    client_header_timeout 5s;
    send_timeout 5s;

    # Limit request body size
    client_max_body_size 1m;
}

5. Geographic Blocking (GeoIP)

Requires GeoIP module:

# Install GeoIP
sudo apt install nginx-module-geoip geoip-database

# Or for GeoIP2
sudo apt install nginx-module-geoip2 libmaxminddb0 libmaxminddb-dev mmdb-bin
# Load module
load_module modules/ngx_http_geoip_module.so;

http {
    # GeoIP database
    geoip_country /usr/share/GeoIP/GeoIP.dat;

    # Block specific countries
    map $geoip_country_code $allowed_country {
        default yes;
        CN no;  # Block China
        RU no;  # Block Russia
        KP no;  # Block North Korea
    }

    server {
        listen 80;
        server_name example.com;

        if ($allowed_country = no) {
            return 403;
        }
    }
}

6. Fail2Ban Integration

Install and configure Fail2Ban:

# Install Fail2Ban
sudo apt install fail2ban

# Create Nginx jail
sudo nano /etc/fail2ban/jail.local
[nginx-req-limit]
enabled = true
filter = nginx-req-limit
action = iptables-multiport[name=ReqLimit, port="http,https", protocol=tcp]
logpath = /var/log/nginx/error.log
findtime = 600
bantime = 7200
maxretry = 10

[nginx-conn-limit]
enabled = true
filter = nginx-conn-limit
action = iptables-multiport[name=ConnLimit, port="http,https", protocol=tcp]
logpath = /var/log/nginx/error.log
findtime = 600
bantime = 7200
maxretry = 10

Create filters:

sudo nano /etc/fail2ban/filter.d/nginx-req-limit.conf
[Definition]
failregex = limiting requests, excess:.* by zone.*client: <HOST>
ignoreregex =
sudo nano /etc/fail2ban/filter.d/nginx-conn-limit.conf
[Definition]
failregex = limiting connections by zone.*client: <HOST>
ignoreregex =

Restart Fail2Ban:

sudo systemctl restart fail2ban
sudo fail2ban-client status nginx-req-limit

Complete DDoS Protection Configuration

# Rate limiting zones
limit_req_zone $binary_remote_addr zone=general:10m rate=10r/s;
limit_req_zone $binary_remote_addr zone=strict:10m rate=2r/s;
limit_conn_zone $binary_remote_addr zone=addr:10m;
limit_conn_zone $server_name zone=perserver:10m;

# Whitelist
geo $limit {
    default 1;
    10.0.0.0/8 0;
    127.0.0.1 0;
}

map $limit $limit_key {
    0 "";
    1 $binary_remote_addr;
}

# Bad bot detection
map $http_user_agent $bad_bot {
    default 0;
    ~*(bot|crawler|spider|scraper|curl|wget) 1;
}

server {
    listen 80;
    listen [::]:80;
    server_name example.com;

    # Global connection limits
    limit_conn addr 10;
    limit_conn perserver 1000;

    # Timeouts
    client_body_timeout 10s;
    client_header_timeout 10s;
    send_timeout 10s;

    # Request size limits
    client_max_body_size 1m;
    client_body_buffer_size 128k;

    # Block empty user agents
    if ($http_user_agent = "") {
        return 444;
    }

    # Block bad bots
    if ($bad_bot) {
        return 403;
    }

    # Block invalid request methods
    if ($request_method !~ ^(GET|POST|HEAD)$) {
        return 405;
    }

    # General pages
    location / {
        limit_req zone=general burst=20 nodelay;
        limit_req_status 429;

        proxy_pass http://backend;
        proxy_set_header Host $host;
        proxy_set_header X-Real-IP $remote_addr;
    }

    # Login/authentication (more restrictive)
    location ~ ^/(login|register|password-reset) {
        limit_req zone=strict burst=5;
        limit_req_status 429;

        proxy_pass http://backend;
    }

    # API endpoints
    location /api/ {
        # API-specific rate limiting
        limit_req zone=general burst=50 nodelay;

        proxy_pass http://backend;
    }

    # Static files (less restrictive)
    location ~* \.(jpg|jpeg|png|gif|ico|css|js)$ {
        expires 1y;
        add_header Cache-Control "public, immutable";
        access_log off;
    }

    # Health check (no limits)
    location /health {
        access_log off;
        return 200 "OK\n";
    }

    # Custom rate limit error page
    error_page 429 /rate_limit.html;
    location = /rate_limit.html {
        internal;
        root /var/www/errors;
    }
}

Monitoring Rate Limiting

Log Format

log_format limit '$remote_addr - [$time_local] "$request" '
                 '$status $body_bytes_sent '
                 'limit_req_status=$limit_req_status';

server {
    access_log /var/log/nginx/access.log limit;
}

Check Logs

# View rate limit events
grep "limiting requests" /var/log/nginx/error.log

# Count rate limit events by IP
awk '/limiting requests/ {print $11}' /var/log/nginx/error.log | sort | uniq -c | sort -rn

# View connection limit events
grep "limiting connections" /var/log/nginx/error.log

# Real-time monitoring
tail -f /var/log/nginx/error.log | grep --color "limiting"

Testing Rate Limiting

# Simple test with curl
for i in {1..50}; do
    curl -s -o /dev/null -w "%{http_code}\n" http://example.com/
done

# Test with Apache Bench
ab -n 100 -c 10 http://example.com/

# Test with specific IP (if proxying)
for i in {1..50}; do
    curl -H "X-Forwarded-For: 1.2.3.4" -s -o /dev/null -w "%{http_code}\n" http://example.com/
done

# Test API with key
for i in {1..200}; do
    curl -H "X-API-Key: testkey123" -s -o /dev/null -w "%{http_code}\n" http://api.example.com/endpoint
done

!!! "Best Practices"

1. **Use burst with nodelay**: Better user experience
2. **Different limits for different endpoints**: Login more restrictive than static files
3. **Whitelist trusted IPs**: Internal networks, monitoring services
4. **Log and monitor**: Track who's being limited
5. **Return appropriate status codes**: 429 for rate limits, 403 for bans
6. **Combine with Fail2Ban**: Auto-ban repeat offenders
7. **Test thoroughly**: Ensure legitimate users aren't affected
8. **Use connection limits**: Protect against slowloris attacks
9. **Implement timeouts**: Prevent slow connections from tying up resources
10. **Regular review**: Adjust limits based on traffic patterns
Endpoint Type Rate Limit Burst Notes
Static files 100r/s 200 Very permissive
General pages 10r/s 20 Normal browsing
Search 5r/s 10 Resource intensive
Login/Auth 5r/m 10 Prevent brute force
API (authenticated) 100r/s 50 Per API key
API (public) 10r/s 20 Per IP
Admin pages 2r/s 5 Extra protection

18: HTTP/2 Configuration and Benefits

What is HTTP/2?

HTTP/2 is the second major version of HTTP protocol, offering significant performance improvements over HTTP/1.1:

Key Features: - Multiplexing: Multiple requests over single connection - Header compression: Reduced overhead with HPACK - Server push: Proactively send resources - Binary protocol: More efficient parsing - Stream prioritization: Critical resources first

HTTP/1.1 vs HTTP/2

Feature HTTP/1.1 HTTP/2
Requests per connection 1 (or 6-8 parallel) Unlimited
Header compression No Yes (HPACK)
Server push No Yes
Protocol Text Binary
Multiplexing No Yes
Stream priority No Yes

Enabling HTTP/2 in Nginx

HTTP/2 requires Nginx 1.9.5+ and SSL/TLS.

Basic Configuration

server {
    # Enable HTTP/2 (requires SSL)
    listen 443 ssl http2;
    listen [::]:443 ssl http2;

    server_name example.com;

    # SSL certificates
    ssl_certificate /etc/letsencrypt/live/example.com/fullchain.pem;
    ssl_certificate_key /etc/letsencrypt/live/example.com/privkey.pem;

    root /var/www/example.com;
    index index.html;
}

Verify HTTP/2 is Working

# Check Nginx supports HTTP/2
nginx -V 2>&1 | grep http_v2

# Test with curl (requires curl 7.47+)
curl -I --http2 https://example.com

# Should see:
# HTTP/2 200

# Detailed test
curl -I --http2 -v https://example.com 2>&1 | grep "ALPN"

# Browser DevTools
# Network tab → Protocol column shows "h2"

HTTP/2 Server Push

Proactively send resources before client requests them.

server {
    listen 443 ssl http2;
    server_name example.com;

    ssl_certificate /etc/letsencrypt/live/example.com/fullchain.pem;
    ssl_certificate_key /etc/letsencrypt/live/example.com/privkey.pem;

    root /var/www/example.com;

    # Push critical resources when index.html is requested
    location = /index.html {
        http2_push /css/style.css;
        http2_push /js/app.js;
        http2_push /images/logo.png;
    }

    # Dynamic push based on response
    location / {
        add_header Link "</css/style.css>; rel=preload; as=style";
        add_header Link "</js/app.js>; rel=preload; as=script";
    }
}

HTTP/2 Configuration Directives

http {
    # HTTP/2 settings

    # Maximum concurrent streams per connection
    http2_max_concurrent_streams 128;

    # Maximum field size in request/response
    http2_max_field_size 16k;

    # Maximum header size
    http2_max_header_size 32k;

    # Body chunk size
    http2_chunk_size 8k;

    # Timeout for HTTP/2 connections
    http2_recv_timeout 30s;

    # HTTP/2 push preload
    http2_push_preload on;

    server {
        listen 443 ssl http2;
        # ...
    }
}

Optimizing for HTTP/2

1. Remove Domain Sharding

HTTP/1.1 best practice was multiple domains. Not needed with HTTP/2.

# OLD (HTTP/1.1): Multiple domains
# static1.example.com, static2.example.com, static3.example.com

# NEW (HTTP/2): Single domain
server {
    listen 443 ssl http2;
    server_name example.com;

    # All resources from one domain
    root /var/www/example.com;
}

2. Don't Concatenate Files

HTTP/1.1 concatenated CSS/JS to reduce requests. HTTP/2 handles multiple files efficiently.

server {
    listen 443 ssl http2;
    server_name example.com;

    # Serve individual files, let HTTP/2 multiplex
    location /css/ {
        expires 1y;
        add_header Cache-Control "public, immutable";
    }

    location /js/ {
        expires 1y;
        add_header Cache-Control "public, immutable";
    }
}
<!-- OLD (HTTP/1.1): Concatenated -->
<link rel="stylesheet" href="/css/all.min.css">
<script src="/js/all.min.js"></script>

<!-- NEW (HTTP/2): Separate files -->
<link rel="stylesheet" href="/css/normalize.css">
<link rel="stylesheet" href="/css/main.css">
<link rel="stylesheet" href="/css/components.css">
<script src="/js/utils.js"></script>
<script src="/js/app.js"></script>
<script src="/js/analytics.js"></script>

3. Use Server Push Strategically

server {
    listen 443 ssl http2;
    server_name example.com;

    # Push only critical, render-blocking resources
    location = /index.html {
        # Critical CSS
        http2_push /css/critical.css;

        # NOT recommended: Push everything
        # http2_push /css/style.css;
        # http2_push /css/theme.css;
        # http2_push /js/app.js;
        # http2_push /js/vendor.js;
        # ... (can overwhelm client)
    }
}

4. Enable Header Compression

HTTP/2 automatically uses HPACK compression for headers. Ensure headers are consistent:

server {
    listen 443 ssl http2;
    server_name example.com;

    # Consistent headers across requests
    add_header X-Frame-Options "SAMEORIGIN" always;
    add_header X-Content-Type-Options "nosniff" always;

    # These will be compressed by HPACK
    location / {
        proxy_set_header Host $host;
        proxy_set_header X-Real-IP $remote_addr;
        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
        proxy_set_header X-Forwarded-Proto $scheme;
    }
}

HTTP/2 with Reverse Proxy

# Backend servers
upstream backend {
    server backend1.example.com:8080;
    server backend2.example.com:8080;
    keepalive 32;
}

server {
    listen 443 ssl http2;
    server_name example.com;

    ssl_certificate /etc/letsencrypt/live/example.com/fullchain.pem;
    ssl_certificate_key /etc/letsencrypt/live/example.com/privkey.pem;

    location / {
        # HTTP/2 to client, HTTP/1.1 to backend
        proxy_pass http://backend;
        proxy_http_version 1.1;
        proxy_set_header Connection "";
        proxy_set_header Host $host;
        proxy_set_header X-Real-IP $remote_addr;
        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
        proxy_set_header X-Forwarded-Proto $scheme;
    }
}

Complete Optimized HTTP/2 Configuration

http {
    # HTTP/2 settings
    http2_max_concurrent_streams 128;
    http2_max_field_size 16k;
    http2_max_header_size 32k;
    http2_chunk_size 8k;
    http2_push_preload on;

    # SSL optimization for HTTP/2
    ssl_protocols TLSv1.2 TLSv1.3;
    ssl_prefer_server_ciphers on;
    ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384;
    ssl_session_cache shared:SSL:50m;
    ssl_session_timeout 1d;
    ssl_session_tickets off;
    ssl_stapling on;
    ssl_stapling_verify on;

    # Performance
    sendfile on;
    tcp_nopush on;
    tcp_nodelay on;
    keepalive_timeout 65;

    # Gzip (still useful for HTTP/2)
    gzip on;
    gzip_vary on;
    gzip_comp_level 6;
    gzip_types text/plain text/css application/json application/javascript text/xml application/xml;

    server {
        listen 443 ssl http2;
        listen [::]:443 ssl http2;
        server_name example.com;

        ssl_certificate /etc/letsencrypt/live/example.com/fullchain.pem;
        ssl_certificate_key /etc/letsencrypt/live/example.com/privkey.pem;
        ssl_trusted_certificate /etc/letsencrypt/live/example.com/chain.pem;

        root /var/www/example.com;
        index index.html;

        # Security headers
        add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload" always;
        add_header X-Frame-Options "SAMEORIGIN" always;
        add_header X-Content-Type-Options "nosniff" always;

        # Main page with server push
        location = / {
            # Push critical resources
            http2_push /css/critical.css;
            http2_push /js/app.js;

            try_files /index.html =404;
        }

        # CSS files
        location /css/ {
            expires 1y;
            add_header Cache-Control "public, immutable";
            access_log off;
        }

        # JavaScript files
        location /js/ {
            expires 1y;
            add_header Cache-Control "public, immutable";
            access_log off;
        }

        # Images
        location /images/ {
            expires 1y;
            add_header Cache-Control "public, immutable";
            access_log off;
        }

        # Fonts (with CORS for HTTP/2)
        location /fonts/ {
            expires 1y;
            add_header Cache-Control "public, immutable";
            add_header Access-Control-Allow-Origin *;
            access_log off;
        }
    }
}

Backend can control what to push:

server {
    listen 443 ssl http2;
    server_name example.com;

    # Enable automatic push from Link headers
    http2_push_preload on;

    location / {
        proxy_pass http://backend;
        proxy_set_header Host $host;
    }
}

Backend sends:

Link: </css/style.css>; rel=preload; as=style
Link: </js/app.js>; rel=preload; as=script

Nginx automatically pushes these resources.

Measuring HTTP/2 Performance

Using nghttp2

# Install nghttp2
sudo apt install nghttp2-client

# Test HTTP/2 connection
nghttp -nv https://example.com

# Verbose output with timing
nghttp -nvs https://example.com

# Test server push
nghttp -nv https://example.com | grep "push"

Using Browser DevTools

  1. Open Chrome DevTools
  2. Network tab
  3. Right-click columns → Enable "Protocol"
  4. Look for "h2" in Protocol column
  5. Timing tab shows multiplexing benefits

Performance Comparison

# HTTP/1.1 (6 concurrent connections)
time curl -s https://example.com/page1 https://example.com/page2 https://example.com/page3 https://example.com/page4 https://example.com/page5 https://example.com/page6 > /dev/null

# HTTP/2 (single connection, multiplexed)
time curl --http2 -s https://example.com/page1 https://example.com/page2 https://example.com/page3 https://example.com/page4 https://example.com/page5 https://example.com/page6 > /dev/null

Common HTTP/2 Issues and Solutions

Issue: HTTP/2 Not Working

# Check Nginx version
nginx -v  # Need 1.9.5+

# Check HTTP/2 module
nginx -V 2>&1 | grep http_v2

# Check SSL is configured
nginx -T | grep "listen.*443.*ssl"

# Test with curl
curl -I --http2 https://example.com

Issue: Server Push Not Working

# Ensure http2_push_preload is on
http2_push_preload on;

# Check paths are correct (relative to root)
location = /index.html {
    http2_push /css/style.css;  # Correct
    # NOT: http2_push css/style.css;
    # NOT: http2_push https://example.com/css/style.css;
}

# Verify with nghttp2
nghttp -nv https://example.com | grep "push"

Issue: Performance Worse with HTTP/2

# Reduce concurrent streams if overwhelming clients
http2_max_concurrent_streams 64;  # Default is 128

# Don't push too many resources
location = /index.html {
    # Push only 2-3 critical resources
    http2_push /css/critical.css;
    http2_push /js/app.js;
    # NOT 10+ resources
}

# Ensure connection reuse
keepalive_timeout 65;

HTTP/2 vs HTTP/3

HTTP/3 (based on QUIC) is the next evolution:

# HTTP/3 support (Nginx 1.25.0+ with quic module)
server {
    listen 443 ssl http3 reuseport;
    listen 443 ssl http2;

    server_name example.com;

    # Advertise HTTP/3 support
    add_header Alt-Svc 'h3=":443"; ma=86400';

    ssl_certificate /etc/letsencrypt/live/example.com/fullchain.pem;
    ssl_certificate_key /etc/letsencrypt/live/example.com/privkey.pem;
}

Best Practices

  1. Always use HTTPS: HTTP/2 requires TLS
  2. Use single domain: No need for domain sharding
  3. Don't concatenate files: Serve individual files
  4. Push selectively: Only critical resources (2-3 files)
  5. Keep connections alive: Use keepalive
  6. Monitor protocol usage: Check h2 adoption in logs
  7. Test thoroughly: Use nghttp2 and browser DevTools
  8. Optimize TLS: Session resumption, OCSP stapling
  9. Use modern ciphers: ECDHE for forward secrecy
  10. Enable HPACK compression: Consistent headers

Checklist for HTTP/2

  • Nginx 1.9.5 or higher
  • SSL/TLS configured with valid certificate
  • http2 parameter in listen directive
  • Tested with curl or nghttp2
  • Server push configured for critical resources
  • Domain sharding removed
  • File concatenation reviewed
  • Performance tested and compared
  • Browser DevTools shows "h2" protocol
  • Monitoring enabled for HTTP/2 metrics