How to prevent DoS attacks using nginx

As a selfhoster, I want to achieve balance between privacy of my users and protection of my VPS. I can't use cloudflare as it invades privacy of people and in same time I want to protect my server from bad people so today you will learn how to protect your VPS from bad actors and in same time without invade privacy of your users.

To be honest most VPSes now days offer free ddos protection out of the box so unless you asked VPS and they said no we do not support it, then you may continue to read this article

But there are some drawbacks, like: 1. It's not so good, in fact it just throttles connections of connected peers to your website 2. It throttles all of websites as default and if you want to allow more quota for website you need to figure it our on site by site basics as there is no template for all websites. for example in my case it was invidious, I wanted to allow more connections so videoed does not hang while users watch so yup! 3. Again it's not perfect, so if someone with multiple PCs tried to bring your site down, nginx will not help you :)

but good side is, it's simple and does not invade users' privacy so yup it depends on your case or use case for me I use this method until crowedsec (an open source cloudflare like application) implement proper nginx support.

So without so much talk, I will of course assume you have already installed nginx and know how to deal with it so here is our nginx.conf (located in: /etc/nginx/nginx.conf):

user www-data;
worker_processes auto;
pid /run/;
include /etc/nginx/modules-enabled/*.conf;

# DoS
#worker_processes  4;
worker_priority -5;
timer_resolution 100ms;
worker_rlimit_nofile 100000;

events {
    #worker_connections 768;
    #multi_accept on;
    worker_connections  1024;
    use epoll;
    # Accept as many connections as possible, after nginx gets notification about a new connection.
    multi_accept on;

http {
        server_names_hash_bucket_size  128;

        # Basic Settings

        sendfile on;
        tcp_nopush on;
        types_hash_max_size 2048;
        server_tokens off;

        # server_names_hash_bucket_size 64;
        server_name_in_redirect off;

        include /etc/nginx/mime.types;
        default_type application/octet-stream;

        # SSL Settings

        ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3; # Dropping SSLv3, ref: POODLE
        ssl_prefer_server_ciphers on;

        # Logging Settings

        access_log /var/log/nginx/access.log;
        error_log /var/log/nginx/error.log;

        # Gzip Settings

        # gzip on;

        # gzip_vary on;
        # gzip_proxied any;
        # gzip_comp_level 6;
        # gzip_buffers 16 8k;
        # gzip_http_version 1.1;
        # gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript;

        # Virtual Host Configs

        include /etc/nginx/conf.d/*.conf;
        # include /etc/nginx/dos.conf;
        include /etc/nginx/sites-enabled/*;

        # DoS
    log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
                      '$status $body_bytes_sent "$http_referer" '
                      '"$http_user_agent" "$http_x_forwarded_for"';

    access_log /var/log/nginx/access.log  main buffer=16k;
    access_log off;
    # Timeouts, do not keep connections open longer then necessary to reduce
    # resource usage and deny Slowloris type attacks.

    # reset timed out connections freeing ram
    reset_timedout_connection on;
    # maximum time between packets the client can pause when sending nginx any data
    client_body_timeout 10s;
    # maximum time the client has to send the entire header to nginx
    client_header_timeout 10s;
    # timeout which a single keep-alive client connection will stay open
    keepalive_timeout 65s;
    # maximum time between packets nginx is allowed to pause when sending the client data
    send_timeout 10s;

    # number of requests per connection, does not affect SPDY
    keepalive_requests 100; 
    # buffers

    fastcgi_buffer_size 128k;
    fastcgi_buffers 256 16k;
    fastcgi_busy_buffers_size 256k;
    fastcgi_temp_file_write_size 256k;

    proxy_buffer_size   128k; 
    proxy_buffers   4 256k;
    proxy_busy_buffers_size   256k;

    fastcgi_read_timeout 150;

    tcp_nodelay on;

    #postpone_output 0;

    gzip on;
    gzip_vary on;
    gzip_comp_level 2;
    gzip_min_length 1000;
    gzip_proxied expired no-cache no-store private auth;
    gzip_types text/plain application/json text/xml application/xml;
    gzip_disable "msie6";

    client_max_body_size 20m;

    # fastcgi cache, caching request without session variable initialized by session_start()
    fastcgi_cache_path /var/cache/nginx/fastcgi_cache levels=1:2 keys_zone=fastcgi_cache:16m max_size=256m inactive=1d;
    fastcgi_temp_path /var/cache/nginx/fastcgi_temp 1 2;

    # DDoS Mitigation 
    limit_conn_zone $binary_remote_addr zone=perip:10m;
    limit_conn perip 100;

    limit_req_zone $binary_remote_addr zone=engine:10m rate=2r/s;
    limit_req_zone $binary_remote_addr zone=static:10m rate=100r/s;

    client_body_buffer_size 200K;
    client_header_buffer_size 2k;
    large_client_header_buffers 4 8k;

Feel free to adjust settings on your needs and as I said if you want to allow certain website more connections or more upload size (nextcloud for example), you need to add it site by site by editing their config

For more info you can get back to: and

#howto #selfhost #nginx