Sobre nós Guias Projetos Contactos
Админка
please wait

Introdução

O Nginx destaca-se como reverse proxy, posicionando-se entre os clientes e os servidores de backend para fornecer balanceamento de carga, terminação SSL, caching e encaminhamento de pedidos. Este guia abrange configurações práticas do Nginx para cenários comuns de reverse proxy.

Instalação

# Ubuntu/Debian
sudo apt update
sudo apt install nginx
# CentOS/RHEL
sudo dnf install nginx
# Iniciar e ativar
sudo systemctl start nginx
sudo systemctl enable nginx

Reverse proxy básico

Proxy pass simples

# /etc/nginx/sites-available/app.conf
server {
listen 80;
server_name app.example.com;
location / {
proxy_pass http://localhost:3000;
proxy_http_version 1.1;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}

Ativar configuração

sudo ln -s /etc/nginx/sites-available/app.conf /etc/nginx/sites-enabled/
sudo nginx -t
sudo systemctl reload nginx

Balanceamento de carga

Round robin (predefinição)

upstream backend {
server 192.168.1.10:3000;
server 192.168.1.11:3000;
server 192.168.1.12:3000;
}
server {
listen 80;
server_name app.example.com;
location / {
proxy_pass http://backend;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
}
}

Balanceamento de carga ponderado

upstream backend {
server 192.168.1.10:3000 weight=5; # 5x more requests
server 192.168.1.11:3000 weight=3;
server 192.168.1.12:3000 weight=1;
}

Menor número de ligações

upstream backend {
least_conn;
server 192.168.1.10:3000;
server 192.168.1.11:3000;
server 192.168.1.12:3000;
}

IP hash (persistência de sessão)

upstream backend {
ip_hash;
server 192.168.1.10:3000;
server 192.168.1.11:3000;
server 192.168.1.12:3000;
}

Verificações de saúde

upstream backend {
server 192.168.1.10:3000 max_fails=3 fail_timeout=30s;
server 192.168.1.11:3000 max_fails=3 fail_timeout=30s;
server 192.168.1.12:3000 backup; # Only used when others fail
}

Configuração SSL/TLS

Com Let's Encrypt

# Instalar o certbot
sudo apt install certbot python3-certbot-nginx
# Obter certificado
sudo certbot --nginx -d app.example.com

Configuração SSL manual

server {
listen 80;
server_name app.example.com;
return 301 https://$server_name$request_uri;
}
server {
listen 443 ssl http2;
server_name app.example.com;
ssl_certificate /etc/letsencrypt/live/app.example.com/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/app.example.com/privkey.pem;
# SSL settings
ssl_protocols TLSv1.2 TLSv1.3;
ssl_prefer_server_ciphers on;
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256;
ssl_session_cache shared:SSL:10m;
ssl_session_timeout 1d;
ssl_session_tickets off;
# HSTS
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
location / {
proxy_pass http://localhost:3000;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}

Suporte WebSocket

server {
listen 80;
server_name app.example.com;
location / {
proxy_pass http://localhost:3000;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
# Timeout for WebSocket connections
proxy_read_timeout 86400;
}
# Separate location for WebSocket endpoint
location /ws {
proxy_pass http://localhost:3000;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_read_timeout 86400;
}
}

Encaminhamento baseado em caminho

server {
listen 80;
server_name app.example.com;
# API backend
location /api/ {
proxy_pass http://localhost:3000/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
}
# Static files backend
location /static/ {
proxy_pass http://localhost:4000/;
}
# WebSocket backend
location /socket.io/ {
proxy_pass http://localhost:5000/socket.io/;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
# Frontend (default)
location / {
proxy_pass http://localhost:8080;
proxy_set_header Host $host;
}
}

Caching

Proxy cache

# Define cache zone in http block
http {
proxy_cache_path /var/cache/nginx levels=1:2 keys_zone=my_cache:10m
max_size=1g inactive=60m use_temp_path=off;
}
server {
listen 80;
server_name app.example.com;
location / {
proxy_pass http://localhost:3000;
# Enable caching
proxy_cache my_cache;
proxy_cache_valid 200 302 10m;
proxy_cache_valid 404 1m;
proxy_cache_use_stale error timeout http_500 http_502 http_503 http_504;
# Cache key
proxy_cache_key $scheme$proxy_host$request_uri;
# Show cache status in response header
add_header X-Cache-Status $upstream_cache_status;
}
# Bypass cache for dynamic content
location /api/ {
proxy_pass http://localhost:3000;
proxy_cache_bypass 1;
proxy_no_cache 1;
}
}

Caching de ficheiros estáticos

location ~* \.(jpg|jpeg|png|gif|ico|css|js|woff2)$ {
proxy_pass http://localhost:3000;
proxy_cache my_cache;
proxy_cache_valid 200 30d;
expires 30d;
add_header Cache-Control "public, immutable";
}

Limitação de taxa

http {
# Define rate limit zones
limit_req_zone $binary_remote_addr zone=api_limit:10m rate=10r/s;
limit_req_zone $binary_remote_addr zone=login_limit:10m rate=1r/s;
}
server {
listen 80;
server_name app.example.com;
# General API rate limit
location /api/ {
limit_req zone=api_limit burst=20 nodelay;
proxy_pass http://localhost:3000;
}
# Strict rate limit for login
location /api/login {
limit_req zone=login_limit burst=5;
proxy_pass http://localhost:3000;
}
}

Cabeçalhos de segurança

server {
listen 443 ssl http2;
server_name app.example.com;
# Security headers
add_header X-Frame-Options "SAMEORIGIN" always;
add_header X-Content-Type-Options "nosniff" always;
add_header X-XSS-Protection "1; mode=block" always;
add_header Referrer-Policy "strict-origin-when-cross-origin" always;
add_header Content-Security-Policy "default-src 'self'; script-src 'self' 'unsafe-inline'" always;
# Hide server version
server_tokens off;
location / {
proxy_pass http://localhost:3000;
}
}

Encaminhar IP do cliente

Configurar o Nginx

location / {
proxy_pass http://localhost:3000;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}

Configuração da aplicação

// Laravel - confiar no proxy
// app/Http/Middleware/TrustProxies.php
protected $proxies = '*'; // Ou IP específico
// Node.js/Express
app.set('trust proxy', true);

Backend Apache

# Enable mod_remoteip
RemoteIPHeader X-Forwarded-For
RemoteIPTrustedProxy 127.0.0.1

Timeouts e buffering

server {
listen 80;
server_name app.example.com;
# Timeouts
proxy_connect_timeout 60s;
proxy_send_timeout 60s;
proxy_read_timeout 60s;
# Buffering
proxy_buffering on;
proxy_buffer_size 4k;
proxy_buffers 8 16k;
proxy_busy_buffers_size 24k;
# For large file uploads
client_max_body_size 100M;
proxy_request_buffering off; # Stream uploads directly
location / {
proxy_pass http://localhost:3000;
}
# Disable buffering for SSE/streaming
location /events {
proxy_pass http://localhost:3000;
proxy_buffering off;
proxy_cache off;
}
}

Configuração de microservices

upstream auth_service {
server auth-server:3000;
}
upstream user_service {
server user-server:3001;
}
upstream order_service {
server order-server:3002;
}
server {
listen 80;
server_name api.example.com;
location /auth/ {
proxy_pass http://auth_service/;
}
location /users/ {
proxy_pass http://user_service/;
}
location /orders/ {
proxy_pass http://order_service/;
}
}

Nginx como proxy de ingress do Kubernetes

Ao executar Kubernetes com o Nginx Ingress Controller implementado como NodePort, configure o Nginx externo para fazer proxy para o cluster:

Configuração de upstream para Kubernetes

# /etc/nginx/conf.d/k8s.conf
upstream k8s_ingresses_80 {
server localhost:31180; # NodePort for HTTP
# Or for multiple nodes:
# server node1.cluster:31180;
# server node2.cluster:31180;
}
upstream k8s_ingresses_443 {
server localhost:31443; # NodePort for HTTPS
}
# HTTPS termination at external Nginx
server {
listen 443 ssl default_server;
server_name _;
ssl_certificate "/path/to/certs/fullchain.pem";
ssl_certificate_key "/path/to/certs/privkey.pem";
ssl_session_cache shared:SSL:1m;
ssl_session_timeout 10m;
ssl_prefer_server_ciphers on;
location / {
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Port $server_port;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_pass http://k8s_ingresses_80;
}
}
# HTTP redirect to HTTPS
server {
listen 80 default_server;
server_name _;
return 301 https://$host$request_uri;
}

Certificados de cliente (mTLS)

Para comunicação segura service-to-service:

server {
listen 443 ssl;
server_name api.internal.example.com;
ssl_certificate /path/to/server.crt;
ssl_certificate_key /path/to/server.key;
# Require client certificate
ssl_client_certificate /path/to/ca.crt;
ssl_verify_client on;
ssl_verify_depth 2;
location / {
# Pass client certificate info to backend
proxy_set_header X-Client-Cert $ssl_client_cert;
proxy_set_header X-Client-Verify $ssl_client_verify;
proxy_pass http://backend;
}
}

Resolução de problemas

Testar configuração

sudo nginx -t

Ver logs

# Registo de acesso
tail -f /var/log/nginx/access.log
# Registo de erros
tail -f /var/log/nginx/error.log

Depurar configuração

# Enable debug logging
error_log /var/log/nginx/error.log debug;

Boas práticas

  1. Utilize HTTP/2 para ligações SSL
  2. Defina timeouts adequados para o seu backend
  3. Ative a compressão gzip para conteúdo de texto
  4. Utilize caching para conteúdo estático
  5. Implemente limitação de taxa para endpoints de API
  6. Adicione cabeçalhos de segurança a todas as respostas
  7. Monitorize a saúde do upstream com verificações de falha
  8. Utilize ligações keepalive para os backends

Conclusão

O Nginx como reverse proxy disponibiliza funcionalidades essenciais para implementações em produção: terminação SSL, balanceamento de carga, caching e segurança. Configure-o de forma adequada às necessidades do seu backend, implemente caching quando for benéfico e monitorize os logs para detetar problemas. As configurações neste guia cobrem a maioria dos cenários comuns de proxy.

 
 
 
Языки
Темы
Copyright © 1999 — 2026
ZK Interactive