Introduction
Nginx excels as a reverse proxy, sitting between clients and backend servers to provide load balancing, SSL termination, caching, and request routing. This guide covers practical Nginx configurations for common reverse proxy scenarios.
Installation
# Ubuntu/Debian
sudo apt update
sudo apt install nginx
# CentOS/RHEL
sudo dnf install nginx
# Start and enable
sudo systemctl start nginx
sudo systemctl enable nginx
Basic Reverse Proxy
Simple Proxy Pass
# /etc/nginx/sites-available/app.conf
server {
listen 80;
server_name app.example.com;
location / {
proxy_pass http://localhost:3000;
proxy_http_version 1.1;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
Enable Configuration
sudo ln -s /etc/nginx/sites-available/app.conf /etc/nginx/sites-enabled/
sudo nginx -t
sudo systemctl reload nginx
Load Balancing
Round Robin (Default)
upstream backend {
server 192.168.1.10:3000;
server 192.168.1.11:3000;
server 192.168.1.12:3000;
}
server {
listen 80;
server_name app.example.com;
location / {
proxy_pass http://backend;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
}
}
Weighted Load Balancing
upstream backend {
server 192.168.1.10:3000 weight=5; # 5x more requests
server 192.168.1.11:3000 weight=3;
server 192.168.1.12:3000 weight=1;
}
Least Connections
upstream backend {
least_conn;
server 192.168.1.10:3000;
server 192.168.1.11:3000;
server 192.168.1.12:3000;
}
IP Hash (Session Persistence)
upstream backend {
ip_hash;
server 192.168.1.10:3000;
server 192.168.1.11:3000;
server 192.168.1.12:3000;
}
Health Checks
upstream backend {
server 192.168.1.10:3000 max_fails=3 fail_timeout=30s;
server 192.168.1.11:3000 max_fails=3 fail_timeout=30s;
server 192.168.1.12:3000 backup; # Only used when others fail
}
SSL/TLS Configuration
With Let's Encrypt
# Install Certbot
sudo apt install certbot python3-certbot-nginx
# Obtain certificate
sudo certbot --nginx -d app.example.com
Manual SSL Configuration
server {
listen 80;
server_name app.example.com;
return 301 https://$server_name$request_uri;
}
server {
listen 443 ssl http2;
server_name app.example.com;
ssl_certificate /etc/letsencrypt/live/app.example.com/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/app.example.com/privkey.pem;
# SSL settings
ssl_protocols TLSv1.2 TLSv1.3;
ssl_prefer_server_ciphers on;
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256;
ssl_session_cache shared:SSL:10m;
ssl_session_timeout 1d;
ssl_session_tickets off;
# HSTS
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
location / {
proxy_pass http://localhost:3000;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
WebSocket Support
server {
listen 80;
server_name app.example.com;
location / {
proxy_pass http://localhost:3000;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
# Timeout for WebSocket connections
proxy_read_timeout 86400;
}
# Separate location for WebSocket endpoint
location /ws {
proxy_pass http://localhost:3000;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_read_timeout 86400;
}
}
Path-Based Routing
server {
listen 80;
server_name app.example.com;
# API backend
location /api/ {
proxy_pass http://localhost:3000/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
}
# Static files backend
location /static/ {
proxy_pass http://localhost:4000/;
}
# WebSocket backend
location /socket.io/ {
proxy_pass http://localhost:5000/socket.io/;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
# Frontend (default)
location / {
proxy_pass http://localhost:8080;
proxy_set_header Host $host;
}
}
Caching
Proxy Cache
# Define cache zone in http block
http {
proxy_cache_path /var/cache/nginx levels=1:2 keys_zone=my_cache:10m
max_size=1g inactive=60m use_temp_path=off;
}
server {
listen 80;
server_name app.example.com;
location / {
proxy_pass http://localhost:3000;
# Enable caching
proxy_cache my_cache;
proxy_cache_valid 200 302 10m;
proxy_cache_valid 404 1m;
proxy_cache_use_stale error timeout http_500 http_502 http_503 http_504;
# Cache key
proxy_cache_key $scheme$proxy_host$request_uri;
# Show cache status in response header
add_header X-Cache-Status $upstream_cache_status;
}
# Bypass cache for dynamic content
location /api/ {
proxy_pass http://localhost:3000;
proxy_cache_bypass 1;
proxy_no_cache 1;
}
}
Static File Caching
location ~* \.(jpg|jpeg|png|gif|ico|css|js|woff2)$ {
proxy_pass http://localhost:3000;
proxy_cache my_cache;
proxy_cache_valid 200 30d;
expires 30d;
add_header Cache-Control "public, immutable";
}
Rate Limiting
http {
# Define rate limit zones
limit_req_zone $binary_remote_addr zone=api_limit:10m rate=10r/s;
limit_req_zone $binary_remote_addr zone=login_limit:10m rate=1r/s;
}
server {
listen 80;
server_name app.example.com;
# General API rate limit
location /api/ {
limit_req zone=api_limit burst=20 nodelay;
proxy_pass http://localhost:3000;
}
# Strict rate limit for login
location /api/login {
limit_req zone=login_limit burst=5;
proxy_pass http://localhost:3000;
}
}
Security Headers
server {
listen 443 ssl http2;
server_name app.example.com;
# Security headers
add_header X-Frame-Options "SAMEORIGIN" always;
add_header X-Content-Type-Options "nosniff" always;
add_header X-XSS-Protection "1; mode=block" always;
add_header Referrer-Policy "strict-origin-when-cross-origin" always;
add_header Content-Security-Policy "default-src 'self'; script-src 'self' 'unsafe-inline'" always;
# Hide server version
server_tokens off;
location / {
proxy_pass http://localhost:3000;
}
}
Passing Client IP
Configure Nginx
location / {
proxy_pass http://localhost:3000;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
Application Configuration
// Laravel: trust proxy
// app/Http/Middleware/TrustProxies.php
protected $proxies = '*'; // Or a specific IP
// Node.js/Express
app.set('trust proxy', true);
Apache Backend
# Enable mod_remoteip
RemoteIPHeader X-Forwarded-For
RemoteIPTrustedProxy 127.0.0.1
Timeouts and Buffering
server {
listen 80;
server_name app.example.com;
# Timeouts
proxy_connect_timeout 60s;
proxy_send_timeout 60s;
proxy_read_timeout 60s;
# Buffering
proxy_buffering on;
proxy_buffer_size 4k;
proxy_buffers 8 16k;
proxy_busy_buffers_size 24k;
# For large file uploads
client_max_body_size 100M;
proxy_request_buffering off; # Stream uploads directly
location / {
proxy_pass http://localhost:3000;
}
# Disable buffering for SSE/streaming
location /events {
proxy_pass http://localhost:3000;
proxy_buffering off;
proxy_cache off;
}
}
Microservices Configuration
upstream auth_service {
server auth-server:3000;
}
upstream user_service {
server user-server:3001;
}
upstream order_service {
server order-server:3002;
}
server {
listen 80;
server_name api.example.com;
location /auth/ {
proxy_pass http://auth_service/;
}
location /users/ {
proxy_pass http://user_service/;
}
location /orders/ {
proxy_pass http://order_service/;
}
}
Nginx as a Kubernetes Ingress Proxy
When running Kubernetes with Nginx Ingress Controller deployed as NodePort, configure external Nginx to proxy to the cluster:
Upstream Configuration for Kubernetes
# /etc/nginx/conf.d/k8s.conf
upstream k8s_ingresses_80 {
server localhost:31180; # NodePort for HTTP
# Or for multiple nodes:
# server node1.cluster:31180;
# server node2.cluster:31180;
}
upstream k8s_ingresses_443 {
server localhost:31443; # NodePort for HTTPS
}
# HTTPS termination at external Nginx
server {
listen 443 ssl default_server;
server_name _;
ssl_certificate "/path/to/certs/fullchain.pem";
ssl_certificate_key "/path/to/certs/privkey.pem";
ssl_session_cache shared:SSL:1m;
ssl_session_timeout 10m;
ssl_prefer_server_ciphers on;
location / {
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Port $server_port;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_pass http://k8s_ingresses_80;
}
}
# HTTP redirect to HTTPS
server {
listen 80 default_server;
server_name _;
return 301 https://$host$request_uri;
}
Client Certificates (mTLS)
For secure service-to-service communication:
server {
listen 443 ssl;
server_name api.internal.example.com;
ssl_certificate /path/to/server.crt;
ssl_certificate_key /path/to/server.key;
# Require client certificate
ssl_client_certificate /path/to/ca.crt;
ssl_verify_client on;
ssl_verify_depth 2;
location / {
# Pass client certificate info to backend
proxy_set_header X-Client-Cert $ssl_client_cert;
proxy_set_header X-Client-Verify $ssl_client_verify;
proxy_pass http://backend;
}
}
Troubleshooting
Test Configuration
sudo nginx -t
View Logs
# Access log
tail -f /var/log/nginx/access.log
# Error log
tail -f /var/log/nginx/error.log
Debug Configuration
# Enable debug logging
error_log /var/log/nginx/error.log debug;
Best Practices
- Use HTTP/2 for SSL connections
- Set appropriate timeouts for your backend
- Enable gzip compression for text content
- Use caching for static content
- Implement rate limiting for API endpoints
- Add security headers to all responses
- Monitor upstream health with fail checks
- Use keepalive connections to backends
Conclusion
Nginx as a reverse proxy provides essential features for production deployments: SSL termination, load balancing, caching, and security. Configure appropriately for your backend needs, implement caching where beneficial, and monitor logs for issues. The configurations in this guide cover most common proxy scenarios.