Skip to content
Go back

Performance Testing and Load Balancing Strategies

Performance Testing and Load Balancing Strategies

Introduction

Performance testing and load balancing ensure applications handle traffic efficiently. This guide covers testing frameworks, load balancer configuration, and scaling strategies.

Prerequisites

Step 1: Performance Testing with k6

Create comprehensive load testing scenarios:

import http from 'k6/http';
import { check, sleep } from 'k6';
import { Rate, Trend, Counter } from 'k6/metrics';

// Custom metrics
const errorRate = new Rate('error_rate');
const responseTime = new Trend('response_time', true);
const requestsPerSecond = new Counter('requests_per_second');

// Test configuration
export const options = {
  stages: [
    { duration: '2m', target: 100 }, // Ramp up to 100 users
    { duration: '5m', target: 100 }, // Stay at 100 users
    { duration: '2m', target: 200 }, // Ramp up to 200 users
    { duration: '5m', target: 200 }, // Stay at 200 users
    { duration: '2m', target: 0 },   // Ramp down to 0 users
  ],
  thresholds: {
    http_req_duration: ['p(95)<2000'], // 95% of requests under 2s
    http_req_failed: ['rate<0.1'],     // Error rate under 10%
    error_rate: ['rate<0.05'],         // Custom error rate under 5%
  },
};

// Test data
const users = JSON.parse(open('../data/users.json'));
const products = JSON.parse(open('../data/products.json'));

export function setup() {
  // Setup phase - create test data
  console.log('Setting up test environment...');
  
  const adminToken = getAdminToken();
  
  // Create test products if needed
  products.forEach(product => {
    http.post(`${__ENV.BASE_URL}/api/products`, JSON.stringify(product), {
      headers: {
        'Content-Type': 'application/json',
        'Authorization': `Bearer ${adminToken}`
      }
    });
  });
  
  return { adminToken };
}

export default function(data) {
  const baseUrl = __ENV.BASE_URL || 'http://localhost:3000';
  const user = users[Math.floor(Math.random() * users.length)];
  
  // User journey: Login -> Browse -> Add to cart -> Checkout
  
  // 1. Login
  const loginResponse = http.post(`${baseUrl}/api/auth/login`, JSON.stringify({
    email: user.email,
    password: user.password
  }), {
    headers: { 'Content-Type': 'application/json' }
  });
  
  const loginSuccess = check(loginResponse, {
    'login successful': (r) => r.status === 200,
    'login response time < 1000ms': (r) => r.timings.duration < 1000,
  });
  
  if (!loginSuccess) {
    errorRate.add(1);
    return;
  }
  
  const token = JSON.parse(loginResponse.body).token;
  const headers = {
    'Authorization': `Bearer ${token}`,
    'Content-Type': 'application/json'
  };
  
  sleep(1);
  
  // 2. Browse products
  const productsResponse = http.get(`${baseUrl}/api/products?page=1&limit=20`, {
    headers
  });
  
  check(productsResponse, {
    'products loaded': (r) => r.status === 200,
    'products response time < 500ms': (r) => r.timings.duration < 500,
  });
  
  responseTime.add(productsResponse.timings.duration);
  requestsPerSecond.add(1);
  
  sleep(2);
  
  // 3. View product details
  const productId = Math.floor(Math.random() * products.length) + 1;
  const productResponse = http.get(`${baseUrl}/api/products/${productId}`, {
    headers
  });
  
  check(productResponse, {
    'product details loaded': (r) => r.status === 200,
    'product response time < 300ms': (r) => r.timings.duration < 300,
  });
  
  sleep(1);
  
  // 4. Add to cart
  const cartResponse = http.post(`${baseUrl}/api/cart/items`, JSON.stringify({
    productId,
    quantity: Math.floor(Math.random() * 3) + 1
  }), { headers });
  
  const cartSuccess = check(cartResponse, {
    'added to cart': (r) => r.status === 201,
    'cart response time < 500ms': (r) => r.timings.duration < 500,
  });
  
  if (!cartSuccess) {
    errorRate.add(1);
  }
  
  sleep(3);
  
  // 5. Simulate checkout process (80% complete, 20% abandon)
  if (Math.random() < 0.8) {
    const checkoutResponse = http.post(`${baseUrl}/api/orders`, JSON.stringify({
      paymentMethod: 'credit_card',
      shippingAddress: user.address
    }), { headers });
    
    check(checkoutResponse, {
      'checkout successful': (r) => r.status === 201,
      'checkout response time < 2000ms': (r) => r.timings.duration < 2000,
    });
    
    if (checkoutResponse.status !== 201) {
      errorRate.add(1);
    }
  }
  
  sleep(1);
}

export function teardown(data) {
  // Cleanup phase
  console.log('Tearing down test environment...');
}

function getAdminToken() {
  const response = http.post(`${__ENV.BASE_URL}/api/auth/login`, JSON.stringify({
    email: 'admin@example.com',
    password: 'admin123'
  }), {
    headers: { 'Content-Type': 'application/json' }
  });
  
  return JSON.parse(response.body).token;
}

Step 2: Stress Testing Scenarios

import http from 'k6/http';
import { check } from 'k6';

export const options = {
  // Stress test configuration
  stages: [
    { duration: '10m', target: 1000 }, // Ramp up to 1000 users
    { duration: '30m', target: 1000 }, // Stay at 1000 users
    { duration: '10m', target: 2000 }, // Ramp up to 2000 users  
    { duration: '10m', target: 2000 }, // Stay at 2000 users
    { duration: '10m', target: 0 },    // Ramp down
  ],
  thresholds: {
    // System should maintain performance under stress
    http_req_duration: ['p(99)<5000'], // 99% under 5s
    http_req_failed: ['rate<0.2'],     // Error rate under 20%
  },
};

export default function() {
  // Simulate various endpoints under stress
  const endpoints = [
    '/api/products',
    '/api/categories',
    '/api/search?q=laptop',
    '/api/users/profile',
    '/api/cart'
  ];
  
  const endpoint = endpoints[Math.floor(Math.random() * endpoints.length)];
  const response = http.get(`${__ENV.BASE_URL}${endpoint}`);
  
  check(response, {
    'status is 200 or 500': (r) => [200, 500].includes(r.status),
    'response time under load': (r) => r.timings.duration < 10000,
  });
}

Step 3: NGINX Load Balancer Configuration

# Main configuration
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;

events {
    worker_connections 4096;
    use epoll;
    multi_accept on;
}

http {
    include /etc/nginx/mime.types;
    default_type application/octet-stream;

    # Logging format
    log_format main '$remote_addr - $remote_user [$time_local] "$request" '
                    '$status $body_bytes_sent "$http_referer" '
                    '"$http_user_agent" "$http_x_forwarded_for" '
                    'rt=$request_time uct="$upstream_connect_time" '
                    'uht="$upstream_header_time" urt="$upstream_response_time"';

    access_log /var/log/nginx/access.log main;

    # Performance settings
    sendfile on;
    tcp_nopush on;
    tcp_nodelay on;
    keepalive_timeout 65;
    keepalive_requests 1000;

    # Gzip compression
    gzip on;
    gzip_vary on;
    gzip_min_length 1024;
    gzip_proxied any;
    gzip_comp_level 6;
    gzip_types
        text/plain
        text/css
        text/xml
        text/javascript
        application/json
        application/javascript
        application/xml+rss
        application/atom+xml
        image/svg+xml;

    # Rate limiting
    limit_req_zone $binary_remote_addr zone=api:10m rate=10r/s;
    limit_req_zone $binary_remote_addr zone=login:10m rate=5r/m;

    # Upstream servers
    upstream app_servers {
        least_conn;
        server app1:3000 max_fails=3 fail_timeout=30s weight=1;
        server app2:3000 max_fails=3 fail_timeout=30s weight=1;
        server app3:3000 max_fails=3 fail_timeout=30s weight=1;
        
        # Keepalive connections to upstream
        keepalive 32;
    }

    upstream static_servers {
        server static1:80 weight=1;
        server static2:80 weight=1;
    }

    # Health check endpoint
    server {
        listen 8080;
        location /health {
            access_log off;
            return 200 "healthy\n";
            add_header Content-Type text/plain;
        }
    }

    # Main application server
    server {
        listen 80;
        server_name api.example.com;
        
        # Security headers
        add_header X-Frame-Options "SAMEORIGIN" always;
        add_header X-XSS-Protection "1; mode=block" always;
        add_header X-Content-Type-Options "nosniff" always;
        add_header Referrer-Policy "no-referrer-when-downgrade" always;
        add_header Content-Security-Policy "default-src 'self' http: https: data: blob: 'unsafe-inline'" always;

        # Rate limiting for different endpoints
        location /api/auth/login {
            limit_req zone=login burst=5 nodelay;
            proxy_pass http://app_servers;
            include proxy_params;
        }

        location /api/ {
            limit_req zone=api burst=20 nodelay;
            proxy_pass http://app_servers;
            include proxy_params;
        }

        # Static files
        location /static/ {
            expires 1y;
            add_header Cache-Control "public, immutable";
            proxy_pass http://static_servers;
        }

        # WebSocket support
        location /ws/ {
            proxy_pass http://app_servers;
            proxy_http_version 1.1;
            proxy_set_header Upgrade $http_upgrade;
            proxy_set_header Connection "upgrade";
            proxy_set_header Host $host;
            proxy_cache_bypass $http_upgrade;
        }

        # Health checks
        location /health {
            access_log off;
            proxy_pass http://app_servers;
            proxy_connect_timeout 1s;
            proxy_send_timeout 1s;
            proxy_read_timeout 1s;
        }
    }

    # HTTPS redirect
    server {
        listen 443 ssl http2;
        server_name api.example.com;
        
        ssl_certificate /etc/nginx/ssl/cert.pem;
        ssl_certificate_key /etc/nginx/ssl/key.pem;
        ssl_protocols TLSv1.2 TLSv1.3;
        ssl_ciphers ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384;
        ssl_prefer_server_ciphers off;

        # HSTS
        add_header Strict-Transport-Security "max-age=63072000" always;

        location / {
            proxy_pass http://app_servers;
            include proxy_params;
        }
    }
}

# Proxy parameters
# /etc/nginx/proxy_params
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_buffering on;
proxy_buffer_size 128k;
proxy_buffers 4 256k;
proxy_busy_buffers_size 256k;
proxy_connect_timeout 60s;
proxy_send_timeout 60s;
proxy_read_timeout 60s;

Step 4: HAProxy Configuration

global
    daemon
    chroot /var/lib/haproxy
    stats socket /run/haproxy/admin.sock mode 660 level admin
    stats timeout 30s
    user haproxy
    group haproxy

    # Default SSL material locations
    ca-base /etc/ssl/certs
    crt-base /etc/ssl/private

    # Intermediate configuration
    ssl-default-bind-ciphers ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384
    ssl-default-bind-options ssl-min-ver TLSv1.2 no-tls-tickets

defaults
    mode http
    timeout connect 5000ms
    timeout client 50000ms
    timeout server 50000ms
    option httplog
    option dontlognull
    option log-health-checks
    option forwardfor
    option httpclose
    
    # Error pages
    errorfile 400 /etc/haproxy/errors/400.http
    errorfile 403 /etc/haproxy/errors/403.http
    errorfile 408 /etc/haproxy/errors/408.http
    errorfile 500 /etc/haproxy/errors/500.http
    errorfile 502 /etc/haproxy/errors/502.http
    errorfile 503 /etc/haproxy/errors/503.http
    errorfile 504 /etc/haproxy/errors/504.http

# Frontend configuration
frontend web_frontend
    bind *:80
    bind *:443 ssl crt /etc/ssl/certs/example.com.pem
    
    # Redirect HTTP to HTTPS
    redirect scheme https if !{ ssl_fc }
    
    # Rate limiting
    stick-table type ip size 100k expire 30s store http_req_rate(10s)
    http-request track-sc0 src
    http-request deny if { sc_http_req_rate(0) gt 50 }
    
    # ACLs for routing
    acl is_api path_beg /api/
    acl is_static path_beg /static/
    acl is_websocket hdr(Upgrade) -i websocket
    
    # Backend selection
    use_backend api_servers if is_api
    use_backend static_servers if is_static
    use_backend websocket_servers if is_websocket
    default_backend web_servers

# API backend
backend api_servers
    balance roundrobin
    option httpchk GET /health
    http-check expect status 200
    
    server api1 10.0.1.10:3000 check inter 5s rise 2 fall 3 maxconn 100
    server api2 10.0.1.11:3000 check inter 5s rise 2 fall 3 maxconn 100
    server api3 10.0.1.12:3000 check inter 5s rise 2 fall 3 maxconn 100

# Web backend with cookie-based session persistence
backend web_servers
    balance roundrobin
    cookie SERVERID insert indirect nocache
    option httpchk GET /health
    
    server web1 10.0.1.20:3000 cookie web1 check
    server web2 10.0.1.21:3000 cookie web2 check
    server web3 10.0.1.22:3000 cookie web3 check

# WebSocket backend
backend websocket_servers
    balance source
    option httpchk GET /health
    timeout tunnel 3600s
    
    server ws1 10.0.1.30:3000 check
    server ws2 10.0.1.31:3000 check

# Static file backend
backend static_servers
    balance roundrobin
    option httpchk GET /health
    
    server static1 10.0.1.40:80 check
    server static2 10.0.1.41:80 check

# Statistics page
listen stats
    bind *:8404
    option httplog
    stats enable
    stats hide-version
    stats refresh 30s
    stats show-node
    stats auth admin:password
    stats uri /stats

Step 5: Application-Level Load Balancing

class ApplicationLoadBalancer {
  constructor() {
    this.servers = [];
    this.algorithms = {
      roundRobin: this.roundRobin.bind(this),
      leastConnections: this.leastConnections.bind(this),
      weightedRoundRobin: this.weightedRoundRobin.bind(this),
      consistentHash: this.consistentHash.bind(this),
    };
    this.currentIndex = 0;
  }

  addServer(server) {
    this.servers.push({
      ...server,
      connections: 0,
      totalRequests: 0,
      errors: 0,
      responseTime: 0,
      healthy: true,
      weight: server.weight || 1,
    });
  }

  async selectServer(algorithm = 'roundRobin', clientId = null) {
    const healthyServers = this.servers.filter(server => server.healthy);
    
    if (healthyServers.length === 0) {
      throw new Error('No healthy servers available');
    }

    return this.algorithms[algorithm](healthyServers, clientId);
  }

  roundRobin(servers) {
    const server = servers[this.currentIndex % servers.length];
    this.currentIndex++;
    return server;
  }

  leastConnections(servers) {
    return servers.reduce((min, server) => 
      server.connections < min.connections ? server : min
    );
  }

  weightedRoundRobin(servers) {
    const totalWeight = servers.reduce((sum, server) => sum + server.weight, 0);
    let random = Math.random() * totalWeight;
    
    for (const server of servers) {
      random -= server.weight;
      if (random <= 0) {
        return server;
      }
    }
    
    return servers[0];
  }

  consistentHash(servers, clientId) {
    if (!clientId) {
      return this.roundRobin(servers);
    }
    
    const hash = this.simpleHash(clientId);
    const index = hash % servers.length;
    return servers[index];
  }

  simpleHash(str) {
    let hash = 0;
    for (let i = 0; i < str.length; i++) {
      const char = str.charCodeAt(i);
      hash = ((hash << 5) - hash) + char;
      hash = hash & hash; // Convert to 32-bit integer
    }
    return Math.abs(hash);
  }

  async healthCheck() {
    const promises = this.servers.map(async (server) => {
      try {
        const response = await fetch(`${server.url}/health`, {
          timeout: 5000
        });
        
        const wasHealthy = server.healthy;
        server.healthy = response.status === 200;
        
        if (!wasHealthy && server.healthy) {
          console.log(`Server ${server.url} is back online`);
        } else if (wasHealthy && !server.healthy) {
          console.log(`Server ${server.url} is down`);
        }
      } catch (error) {
        server.healthy = false;
      }
    });

    await Promise.all(promises);
  }

  startHealthChecks(interval = 30000) {
    setInterval(() => {
      this.healthCheck();
    }, interval);
  }

  getServerStats() {
    return this.servers.map(server => ({
      url: server.url,
      healthy: server.healthy,
      connections: server.connections,
      totalRequests: server.totalRequests,
      errors: server.errors,
      avgResponseTime: server.responseTime,
      weight: server.weight,
    }));
  }
}

module.exports = ApplicationLoadBalancer;

Step 6: Auto-scaling Configuration

apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
  name: app-hpa
spec:
  scaleTargetRef:
    apiVersion: apps/v1
    kind: Deployment
    name: app-deployment
  minReplicas: 3
  maxReplicas: 50
  metrics:
  - type: Resource
    resource:
      name: cpu
      target:
        type: Utilization
        averageUtilization: 70
  - type: Resource
    resource:
      name: memory
      target:
        type: Utilization
        averageUtilization: 80
  - type: Pods
    pods:
      metric:
        name: requests_per_second
      target:
        type: AverageValue
        averageValue: "100"
  behavior:
    scaleDown:
      stabilizationWindowSeconds: 300
      policies:
      - type: Percent
        value: 50
        periodSeconds: 60
    scaleUp:
      stabilizationWindowSeconds: 60
      policies:
      - type: Percent
        value: 100
        periodSeconds: 15
      - type: Pods
        value: 5
        periodSeconds: 60
      selectPolicy: Max

Summary

Performance testing with k6 and load balancing with NGINX/HAProxy ensure application scalability. Implement health checks, auto-scaling, and monitoring to maintain optimal performance under varying loads.


Share this post on:

Previous Post
Setting Up a Monorepo with Turborepo and pnpm
Next Post
Advanced Database Optimization and Query Performance