Node.js Performance Optimization: Production-Ready Server Development
Node.js performance optimization is crucial for building scalable, production-ready applications. This comprehensive guide covers memory management, CPU optimization, clustering, caching strategies, and monitoring techniques for high-performance server applications.
Why Optimize Node.js Performance?
- Scalability: Handle more concurrent users with better resource utilization
- Cost Efficiency: Reduce server costs through optimized resource usage
- User Experience: Faster response times improve user satisfaction
- Reliability: Optimized code reduces crashes and memory leaks
- Competitive Advantage: Better performance drives business success
Step 1: Development Environment Setup
Set up a comprehensive Node.js performance testing environment:
# Create project directory
mkdir nodejs-performance-app
cd nodejs-performance-app
# Initialize package.json
npm init -y
# Install core dependencies
npm install express compression helmet cors
npm install dotenv morgan winston
npm install redis ioredis
npm install @prisma/client prisma
# Install performance and monitoring tools
npm install clinic autocannon
npm install @elastic/apm-node newrelic
npm install prom-client
npm install pidusage systeminformation
# Install development dependencies
npm install -D typescript @types/node
npm install -D nodemon concurrently
npm install -D jest supertest @types/jest
npm install -D eslint @typescript-eslint/eslint-plugin
npm install -D prettier husky lint-staged
Step 2: Memory Management Optimization
Implement comprehensive memory management strategies:
import { EventEmitter } from "events";
import * as v8 from "v8";
import * as process from "process";
interface MemoryStats {
heapUsed: number;
heapTotal: number;
external: number;
arrayBuffers: number;
rss: number;
heapUsedPercentage: number;
timestamp: number;
}
interface MemoryThresholds {
heapUsedWarning: number; // 70% of max heap
heapUsedCritical: number; // 85% of max heap
heapUsedShutdown: number; // 95% of max heap
}
export class MemoryManager extends EventEmitter {
private monitoringInterval: NodeJS.Timeout | null = null;
private stats: MemoryStats[] = [];
private maxStatsHistory = 100;
private thresholds: MemoryThresholds;
private isMonitoring = false;
constructor() {
super();
// Calculate thresholds based on max heap size
const heapStatistics = v8.getHeapStatistics();
const maxHeapSize = heapStatistics.heap_size_limit;
this.thresholds = {
heapUsedWarning: maxHeapSize * 0.7,
heapUsedCritical: maxHeapSize * 0.85,
heapUsedShutdown: maxHeapSize * 0.95,
};
}
// Start memory monitoring
startMonitoring(intervalMs = 30000): void {
if (this.isMonitoring) return;
this.isMonitoring = true;
// Initial memory check
this.checkMemory();
// Set up periodic monitoring
this.monitoringInterval = setInterval(() => {
this.checkMemory();
}, intervalMs);
console.log(`Memory monitoring started (interval: ${intervalMs}ms)`);
}
// Stop memory monitoring
stopMonitoring(): void {
if (this.monitoringInterval) {
clearInterval(this.monitoringInterval);
this.monitoringInterval = null;
}
this.isMonitoring = false;
console.log("Memory monitoring stopped");
}
// Get current memory statistics
getMemoryStats(): MemoryStats {
const memUsage = process.memoryUsage();
const heapStats = v8.getHeapStatistics();
const stats: MemoryStats = {
heapUsed: memUsage.heapUsed,
heapTotal: memUsage.heapTotal,
external: memUsage.external,
arrayBuffers: memUsage.arrayBuffers,
rss: memUsage.rss,
heapUsedPercentage: (memUsage.heapUsed / heapStats.heap_size_limit) * 100,
timestamp: Date.now(),
};
// Store stats history
this.stats.push(stats);
if (this.stats.length > this.maxStatsHistory) {
this.stats.shift();
}
return stats;
}
// Check memory usage and emit warnings
private checkMemory(): void {
const stats = this.getMemoryStats();
if (stats.heapUsed > this.thresholds.heapUsedShutdown) {
this.emit("critical", {
message: "Critical memory usage - immediate action required",
stats,
level: "shutdown",
});
} else if (stats.heapUsed > this.thresholds.heapUsedCritical) {
this.emit("critical", {
message: "Critical memory usage detected",
stats,
level: "critical",
});
} else if (stats.heapUsed > this.thresholds.heapUsedWarning) {
this.emit("warning", {
message: "High memory usage detected",
stats,
level: "warning",
});
}
}
// Force garbage collection (if exposed)
forceGarbageCollection(): boolean {
if (global.gc) {
const beforeGC = this.getMemoryStats();
global.gc();
const afterGC = this.getMemoryStats();
console.log("Garbage collection completed:", {
heapBefore: this.formatBytes(beforeGC.heapUsed),
heapAfter: this.formatBytes(afterGC.heapUsed),
freed: this.formatBytes(beforeGC.heapUsed - afterGC.heapUsed),
});
return true;
}
console.warn(
"Garbage collection not exposed. Start Node.js with --expose-gc flag."
);
return false;
}
// Get memory usage trends
getMemoryTrends(): {
currentUsage: number;
averageUsage: number;
trend: "increasing" | "decreasing" | "stable";
peakUsage: number;
} {
if (this.stats.length < 2) {
const current = this.getMemoryStats();
return {
currentUsage: current.heapUsed,
averageUsage: current.heapUsed,
trend: "stable",
peakUsage: current.heapUsed,
};
}
const recent = this.stats.slice(-10);
const current = recent[recent.length - 1];
const average =
recent.reduce((sum, stat) => sum + stat.heapUsed, 0) / recent.length;
const peak = Math.max(...this.stats.map(stat => stat.heapUsed));
// Calculate trend
const first = recent[0];
const last = recent[recent.length - 1];
const change = (last.heapUsed - first.heapUsed) / first.heapUsed;
let trend: "increasing" | "decreasing" | "stable" = "stable";
if (change > 0.05) trend = "increasing";
else if (change < -0.05) trend = "decreasing";
return {
currentUsage: current.heapUsed,
averageUsage: average,
trend,
peakUsage: peak,
};
}
// Get detailed memory report
getMemoryReport(): string {
const stats = this.getMemoryStats();
const trends = this.getMemoryTrends();
const heapStats = v8.getHeapStatistics();
return `
Memory Report (${new Date().toISOString()})
==========================================
Current Usage:
- Heap Used: ${this.formatBytes(stats.heapUsed)} (${stats.heapUsedPercentage.toFixed(1)}%)
- Heap Total: ${this.formatBytes(stats.heapTotal)}
- Heap Limit: ${this.formatBytes(heapStats.heap_size_limit)}
- RSS: ${this.formatBytes(stats.rss)}
- External: ${this.formatBytes(stats.external)}
- Array Buffers: ${this.formatBytes(stats.arrayBuffers)}
Trends:
- Average Usage: ${this.formatBytes(trends.averageUsage)}
- Peak Usage: ${this.formatBytes(trends.peakUsage)}
- Trend: ${trends.trend}
Heap Statistics:
- Total Available Size: ${this.formatBytes(heapStats.total_available_size)}
- Total Physical Size: ${this.formatBytes(heapStats.total_physical_size)}
- Used Heap Size: ${this.formatBytes(heapStats.used_heap_size)}
- Heap Size Limit: ${this.formatBytes(heapStats.heap_size_limit)}
Warnings:
- Warning Threshold: ${this.formatBytes(this.thresholds.heapUsedWarning)}
- Critical Threshold: ${this.formatBytes(this.thresholds.heapUsedCritical)}
- Shutdown Threshold: ${this.formatBytes(this.thresholds.heapUsedShutdown)}
`;
}
// Format bytes to human-readable format
private formatBytes(bytes: number): string {
const units = ["B", "KB", "MB", "GB"];
let size = bytes;
let unitIndex = 0;
while (size >= 1024 && unitIndex < units.length - 1) {
size /= 1024;
unitIndex++;
}
return `${size.toFixed(2)} ${units[unitIndex]}`;
}
}
// Singleton instance
export const memoryManager = new MemoryManager();
// Memory optimization utilities
export class MemoryOptimizer {
// Create object pools for frequently used objects
static createObjectPool<T>(
createFn: () => T,
resetFn: (obj: T) => void,
initialSize = 10
) {
const pool: T[] = [];
// Pre-populate pool
for (let i = 0; i < initialSize; i++) {
pool.push(createFn());
}
return {
acquire(): T {
return pool.pop() || createFn();
},
release(obj: T): void {
resetFn(obj);
pool.push(obj);
},
size(): number {
return pool.length;
},
};
}
// WeakMap-based cache for preventing memory leaks
static createWeakCache<K extends object, V>(): {
get: (key: K) => V | undefined;
set: (key: K, value: V) => void;
has: (key: K) => boolean;
delete: (key: K) => boolean;
} {
const cache = new WeakMap<K, V>();
return {
get: (key: K) => cache.get(key),
set: (key: K, value: V) => cache.set(key, value),
has: (key: K) => cache.has(key),
delete: (key: K) => cache.delete(key),
};
}
// Buffer pool for networking operations
static createBufferPool(bufferSize = 1024, poolSize = 10) {
const pool: Buffer[] = [];
for (let i = 0; i < poolSize; i++) {
pool.push(Buffer.allocUnsafe(bufferSize));
}
return {
acquire(): Buffer {
return pool.pop() || Buffer.allocUnsafe(bufferSize);
},
release(buffer: Buffer): void {
buffer.fill(0); // Clear buffer
pool.push(buffer);
},
};
}
// Stream processing for large data
static async processLargeData<T, R>(
data: T[],
processor: (chunk: T[]) => Promise<R[]>,
chunkSize = 1000
): Promise<R[]> {
const results: R[] = [];
for (let i = 0; i < data.length; i += chunkSize) {
const chunk = data.slice(i, i + chunkSize);
const chunkResults = await processor(chunk);
results.push(...chunkResults);
// Allow event loop to process other operations
await new Promise(resolve => setImmediate(resolve));
}
return results;
}
}
src/utils/memoryManager.ts
Step 3: CPU Optimization and Clustering
Implement CPU optimization and clustering strategies:
import cluster from "cluster";
import os from "os";
import process from "process";
interface ClusterConfig {
workers?: number;
respawn?: boolean;
killTimeout?: number;
restartDelay?: number;
maxRestarts?: number;
}
export class ClusterManager {
private config: Required<ClusterConfig>;
private workerRestarts = new Map<number, number>();
constructor(config: ClusterConfig = {}) {
this.config = {
workers: config.workers || os.cpus().length,
respawn: config.respawn !== false,
killTimeout: config.killTimeout || 5000,
restartDelay: config.restartDelay || 1000,
maxRestarts: config.maxRestarts || 10,
};
}
// Start cluster
start(): void {
if (cluster.isPrimary) {
this.startMaster();
} else {
this.startWorker();
}
}
private startMaster(): void {
console.log(`Master ${process.pid} is running`);
console.log(`Starting ${this.config.workers} workers`);
// Fork workers
for (let i = 0; i < this.config.workers; i++) {
this.forkWorker();
}
// Handle worker events
cluster.on("exit", (worker, code, signal) => {
console.log(
`Worker ${worker.process.pid} died with code ${code} and signal ${signal}`
);
if (this.config.respawn) {
this.handleWorkerExit(worker);
}
});
// Graceful shutdown
process.on("SIGTERM", () => this.shutdown());
process.on("SIGINT", () => this.shutdown());
// Worker management commands
process.on("SIGUSR2", () => this.reloadWorkers());
}
private startWorker(): void {
console.log(`Worker ${process.pid} started`);
// Import and start the actual application
require("./server");
// Handle graceful shutdown
process.on("message", msg => {
if (msg === "shutdown") {
this.gracefulShutdown();
}
});
}
private forkWorker(): cluster.Worker {
const worker = cluster.fork();
this.workerRestarts.set(worker.id, 0);
// Set up worker monitoring
worker.on("message", message => {
this.handleWorkerMessage(worker, message);
});
return worker;
}
private handleWorkerExit(worker: cluster.Worker): void {
const restarts = this.workerRestarts.get(worker.id) || 0;
if (restarts >= this.config.maxRestarts) {
console.error(
`Worker ${worker.id} has been restarted ${restarts} times. Not restarting.`
);
return;
}
setTimeout(() => {
const newWorker = this.forkWorker();
this.workerRestarts.set(newWorker.id, restarts + 1);
console.log(
`Restarted worker ${worker.id} as ${newWorker.id} (restart #${restarts + 1})`
);
}, this.config.restartDelay);
}
private handleWorkerMessage(worker: cluster.Worker, message: any): void {
if (message.type === "health-check") {
worker.send({ type: "health-response", status: "ok" });
}
}
// Graceful shutdown of all workers
private async shutdown(): Promise<void> {
console.log("Shutting down cluster...");
const workers = Object.values(cluster.workers || {});
const shutdownPromises = workers.map(worker => {
if (worker) {
return this.shutdownWorker(worker);
}
return Promise.resolve();
});
await Promise.all(shutdownPromises);
console.log("All workers shut down. Exiting master.");
process.exit(0);
}
private shutdownWorker(worker: cluster.Worker): Promise<void> {
return new Promise(resolve => {
const timeout = setTimeout(() => {
console.log(`Force killing worker ${worker.process.pid}`);
worker.kill("SIGKILL");
resolve();
}, this.config.killTimeout);
worker.on("exit", () => {
clearTimeout(timeout);
resolve();
});
worker.send("shutdown");
});
}
private gracefulShutdown(): void {
console.log(`Worker ${process.pid} shutting down...`);
// Close server gracefully
if (global.server) {
global.server.close(() => {
console.log(`Worker ${process.pid} closed all connections.`);
process.exit(0);
});
// Force exit after timeout
setTimeout(() => {
console.log(`Worker ${process.pid} force exit.`);
process.exit(1);
}, this.config.killTimeout);
} else {
process.exit(0);
}
}
// Rolling restart of workers
private async reloadWorkers(): Promise<void> {
console.log("Reloading all workers...");
const workers = Object.values(cluster.workers || {});
for (const worker of workers) {
if (worker) {
await this.reloadWorker(worker);
// Wait before reloading next worker
await new Promise(resolve => setTimeout(resolve, 1000));
}
}
}
private reloadWorker(worker: cluster.Worker): Promise<void> {
return new Promise(resolve => {
const newWorker = this.forkWorker();
newWorker.on("listening", () => {
worker.disconnect();
setTimeout(() => {
if (!worker.isDead()) {
worker.kill("SIGTERM");
}
resolve();
}, 1000);
});
});
}
}
// CPU optimization utilities
export class CPUOptimizer {
// Distribute CPU-intensive tasks
static async distributeCPUTask<T, R>(
items: T[],
processor: (item: T) => R,
maxConcurrency?: number
): Promise<R[]> {
const concurrency = maxConcurrency || os.cpus().length;
const results: R[] = [];
for (let i = 0; i < items.length; i += concurrency) {
const batch = items.slice(i, i + concurrency);
const batchResults = await Promise.all(
batch.map(item => Promise.resolve(processor(item)))
);
results.push(...batchResults);
}
return results;
}
// Non-blocking JSON parsing for large payloads
static async parseJSONAsync(jsonString: string): Promise<any> {
return new Promise((resolve, reject) => {
setImmediate(() => {
try {
const result = JSON.parse(jsonString);
resolve(result);
} catch (error) {
reject(error);
}
});
});
}
// Batch processing with backpressure
static createBatchProcessor<T>(
processor: (items: T[]) => Promise<void>,
options: {
batchSize: number;
maxConcurrency: number;
flushInterval?: number;
}
) {
const queue: T[] = [];
let processing = 0;
let flushTimeout: NodeJS.Timeout | null = null;
const flush = async () => {
if (queue.length === 0 || processing >= options.maxConcurrency) {
return;
}
const batch = queue.splice(0, options.batchSize);
processing++;
try {
await processor(batch);
} finally {
processing--;
// Process more if queue has items
if (queue.length > 0) {
setImmediate(flush);
}
}
};
const scheduleFlush = () => {
if (flushTimeout) return;
flushTimeout = setTimeout(() => {
flushTimeout = null;
flush();
}, options.flushInterval || 10);
};
return {
add(item: T): void {
queue.push(item);
if (queue.length >= options.batchSize) {
flush();
} else {
scheduleFlush();
}
},
flush: () => flush(),
getQueueSize: () => queue.length,
};
}
}
src/utils/clusterManager.ts
Step 4: Advanced Caching Strategies
Implement comprehensive caching solutions:
import Redis from "ioredis";
import { LRUCache } from "lru-cache";
interface CacheOptions {
ttl?: number; // Time to live in seconds
compress?: boolean;
namespace?: string;
}
interface CacheStats {
hits: number;
misses: number;
sets: number;
deletes: number;
errors: number;
}
// Multi-layer caching system
export class CacheManager {
private redis: Redis;
private memoryCache: LRUCache<string, any>;
private stats: CacheStats = {
hits: 0,
misses: 0,
sets: 0,
deletes: 0,
errors: 0,
};
private namespace: string;
constructor(redisUrl?: string, options: CacheOptions = {}) {
this.namespace = options.namespace || "cache";
// Initialize Redis
this.redis = new Redis(
redisUrl || process.env.REDIS_URL || "redis://localhost:6379",
{
maxRetriesPerRequest: 3,
retryDelayOnFailover: 100,
lazyConnect: true,
family: 4,
}
);
// Initialize memory cache (L1)
this.memoryCache = new LRUCache({
max: 1000, // Maximum items
ttl: 1000 * 60 * 5, // 5 minutes
allowStale: false,
updateAgeOnGet: false,
updateAgeOnHas: false,
});
// Handle Redis events
this.redis.on("error", error => {
console.error("Redis connection error:", error);
this.stats.errors++;
});
this.redis.on("connect", () => {
console.log("Connected to Redis");
});
}
// Get value from cache (L1 -> L2 -> source)
async get<T = any>(
key: string,
fallback?: () => Promise<T>,
options: CacheOptions = {}
): Promise<T | null> {
const namespacedKey = this.getNamespacedKey(key);
try {
// Try L1 cache (memory)
let value = this.memoryCache.get(namespacedKey);
if (value !== undefined) {
this.stats.hits++;
return value;
}
// Try L2 cache (Redis)
const redisValue = await this.redis.get(namespacedKey);
if (redisValue) {
const parsedValue = JSON.parse(redisValue);
// Populate L1 cache
this.memoryCache.set(namespacedKey, parsedValue);
this.stats.hits++;
return parsedValue;
}
this.stats.misses++;
// Use fallback if provided
if (fallback) {
const fallbackValue = await fallback();
if (fallbackValue !== null && fallbackValue !== undefined) {
await this.set(key, fallbackValue, options);
}
return fallbackValue;
}
return null;
} catch (error) {
console.error("Cache get error:", error);
this.stats.errors++;
// Try fallback on error
if (fallback) {
return await fallback();
}
return null;
}
}
// Set value in cache
async set<T = any>(
key: string,
value: T,
options: CacheOptions = {}
): Promise<boolean> {
const namespacedKey = this.getNamespacedKey(key);
const ttl = options.ttl || 3600; // Default 1 hour
try {
const serializedValue = JSON.stringify(value);
// Set in L1 cache
this.memoryCache.set(namespacedKey, value);
// Set in L2 cache with TTL
await this.redis.setex(namespacedKey, ttl, serializedValue);
this.stats.sets++;
return true;
} catch (error) {
console.error("Cache set error:", error);
this.stats.errors++;
return false;
}
}
// Delete from cache
async delete(key: string): Promise<boolean> {
const namespacedKey = this.getNamespacedKey(key);
try {
// Delete from L1
this.memoryCache.delete(namespacedKey);
// Delete from L2
const result = await this.redis.del(namespacedKey);
this.stats.deletes++;
return result > 0;
} catch (error) {
console.error("Cache delete error:", error);
this.stats.errors++;
return false;
}
}
// Batch operations
async mget<T = any>(keys: string[]): Promise<(T | null)[]> {
const namespacedKeys = keys.map(key => this.getNamespacedKey(key));
const results: (T | null)[] = [];
try {
// Try L1 cache first
const l1Results = namespacedKeys.map(key => this.memoryCache.get(key));
const missedIndices: number[] = [];
const missedKeys: string[] = [];
l1Results.forEach((result, index) => {
if (result !== undefined) {
results[index] = result;
this.stats.hits++;
} else {
results[index] = null;
missedIndices.push(index);
missedKeys.push(namespacedKeys[index]);
}
});
// Fetch missed keys from L2 cache
if (missedKeys.length > 0) {
const redisResults = await this.redis.mget(...missedKeys);
redisResults.forEach((redisValue, missedIndex) => {
const originalIndex = missedIndices[missedIndex];
if (redisValue) {
const parsedValue = JSON.parse(redisValue);
results[originalIndex] = parsedValue;
// Populate L1 cache
this.memoryCache.set(missedKeys[missedIndex], parsedValue);
this.stats.hits++;
} else {
this.stats.misses++;
}
});
}
return results;
} catch (error) {
console.error("Cache mget error:", error);
this.stats.errors++;
return keys.map(() => null);
}
}
// Cache with automatic refresh
async getWithRefresh<T>(
key: string,
fetcher: () => Promise<T>,
options: CacheOptions & { refreshThreshold?: number } = {}
): Promise<T> {
const ttl = options.ttl || 3600;
const refreshThreshold = options.refreshThreshold || ttl * 0.8;
const namespacedKey = this.getNamespacedKey(key);
try {
// Check if key exists and get TTL
const [value, keyTtl] = await Promise.all([
this.get<T>(key),
this.redis.ttl(namespacedKey),
]);
// If value exists and doesn't need refresh, return it
if (value && keyTtl > refreshThreshold) {
return value;
}
// If value exists but needs refresh, return it and refresh in background
if (value && keyTtl > 0) {
// Background refresh
setImmediate(async () => {
try {
const freshValue = await fetcher();
await this.set(key, freshValue, options);
} catch (error) {
console.error("Background refresh error:", error);
}
});
return value;
}
// No value or expired, fetch fresh data
const freshValue = await fetcher();
await this.set(key, freshValue, options);
return freshValue;
} catch (error) {
console.error("Cache getWithRefresh error:", error);
// Fallback to direct fetcher
return await fetcher();
}
}
// Pattern-based operations
async deletePattern(pattern: string): Promise<number> {
try {
const fullPattern = this.getNamespacedKey(pattern);
const keys = await this.redis.keys(fullPattern);
if (keys.length === 0) {
return 0;
}
// Delete from L1 cache
keys.forEach(key => this.memoryCache.delete(key));
// Delete from L2 cache
const result = await this.redis.del(...keys);
this.stats.deletes += result;
return result;
} catch (error) {
console.error("Cache deletePattern error:", error);
this.stats.errors++;
return 0;
}
}
// Cache warming
async warm<T>(
items: Array<{ key: string; value: T; options?: CacheOptions }>
): Promise<void> {
const pipeline = this.redis.pipeline();
items.forEach(({ key, value, options = {} }) => {
const namespacedKey = this.getNamespacedKey(key);
const ttl = options.ttl || 3600;
const serializedValue = JSON.stringify(value);
// Set in L1 cache
this.memoryCache.set(namespacedKey, value);
// Add to Redis pipeline
pipeline.setex(namespacedKey, ttl, serializedValue);
});
try {
await pipeline.exec();
this.stats.sets += items.length;
console.log(`Cache warmed with ${items.length} items`);
} catch (error) {
console.error("Cache warming error:", error);
this.stats.errors++;
}
}
// Get cache statistics
getStats(): CacheStats & {
memorySize: number;
hitRate: number;
} {
const total = this.stats.hits + this.stats.misses;
const hitRate = total > 0 ? (this.stats.hits / total) * 100 : 0;
return {
...this.stats,
memorySize: this.memoryCache.size,
hitRate: parseFloat(hitRate.toFixed(2)),
};
}
// Clear all caches
async clear(): Promise<void> {
try {
// Clear L1 cache
this.memoryCache.clear();
// Clear L2 cache (namespace pattern)
const pattern = this.getNamespacedKey("*");
const keys = await this.redis.keys(pattern);
if (keys.length > 0) {
await this.redis.del(...keys);
}
console.log("All caches cleared");
} catch (error) {
console.error("Cache clear error:", error);
}
}
// Close connections
async close(): Promise<void> {
try {
await this.redis.quit();
console.log("Cache manager connections closed");
} catch (error) {
console.error("Error closing cache connections:", error);
}
}
private getNamespacedKey(key: string): string {
return `${this.namespace}:${key}`;
}
}
// Distributed caching utilities
export class DistributedCache {
static createCacheKey(...parts: (string | number)[]): string {
return parts.join(":");
}
static createTimeBasedKey(baseKey: string, intervalMinutes = 5): string {
const now = new Date();
const interval =
Math.floor(now.getMinutes() / intervalMinutes) * intervalMinutes;
const timeKey = `${now.getFullYear()}-${now.getMonth()}-${now.getDate()}-${now.getHours()}-${interval}`;
return `${baseKey}:${timeKey}`;
}
static async cacheResult<T>(
cache: CacheManager,
key: string,
fn: () => Promise<T>,
options: CacheOptions = {}
): Promise<T> {
return cache.get(key, fn, options) as Promise<T>;
}
}
// Export singleton instance
export const cacheManager = new CacheManager();
src/utils/cacheManager.ts
Step 5: Performance Monitoring and Metrics
Implement comprehensive performance monitoring:
import { performance, PerformanceObserver } from "perf_hooks";
import { EventEmitter } from "events";
import * as pidusage from "pidusage";
interface PerformanceMetrics {
timestamp: number;
cpu: {
usage: number;
user: number;
system: number;
};
memory: {
heapUsed: number;
heapTotal: number;
rss: number;
external: number;
heapUsedPercentage: number;
};
eventLoop: {
lag: number;
utilization: number;
};
gc: {
collections: number;
duration: number;
type?: string;
};
requests: {
total: number;
active: number;
avgResponseTime: number;
p95ResponseTime: number;
p99ResponseTime: number;
};
}
interface AlertThreshold {
metric: keyof PerformanceMetrics | string;
threshold: number;
comparison: "gt" | "lt" | "eq";
duration?: number; // How long the threshold must be exceeded
}
export class PerformanceMonitor extends EventEmitter {
private metrics: PerformanceMetrics[] = [];
private maxMetricsHistory = 1000;
private monitoringInterval: NodeJS.Timeout | null = null;
private isMonitoring = false;
private performanceObserver: PerformanceObserver | null = null;
private eventLoopStart = performance.now();
private requestMetrics = {
total: 0,
active: 0,
responseTimes: [] as number[],
maxResponseTimes: 1000,
};
private gcMetrics = {
collections: 0,
totalDuration: 0,
};
private alertThresholds: AlertThreshold[] = [];
private alertStates = new Map<
string,
{ startTime: number; alerted: boolean }
>();
constructor() {
super();
this.setupPerformanceObserver();
this.setupDefaultThresholds();
}
// Setup performance observer for GC and other events
private setupPerformanceObserver(): void {
this.performanceObserver = new PerformanceObserver(list => {
for (const entry of list.getEntries()) {
if (entry.entryType === "gc") {
this.gcMetrics.collections++;
this.gcMetrics.totalDuration += entry.duration;
}
}
});
this.performanceObserver.observe({
entryTypes: ["gc", "measure", "navigation"],
});
}
// Setup default alert thresholds
private setupDefaultThresholds(): void {
this.alertThresholds = [
{ metric: "cpu.usage", threshold: 80, comparison: "gt", duration: 30000 },
{
metric: "memory.heapUsedPercentage",
threshold: 85,
comparison: "gt",
duration: 10000,
},
{
metric: "eventLoop.lag",
threshold: 100,
comparison: "gt",
duration: 5000,
},
{
metric: "requests.p99ResponseTime",
threshold: 5000,
comparison: "gt",
duration: 10000,
},
];
}
// Start monitoring
startMonitoring(intervalMs = 5000): void {
if (this.isMonitoring) return;
this.isMonitoring = true;
// Initial metrics collection
this.collectMetrics();
// Set up periodic monitoring
this.monitoringInterval = setInterval(() => {
this.collectMetrics();
}, intervalMs);
console.log(`Performance monitoring started (interval: ${intervalMs}ms)`);
}
// Stop monitoring
stopMonitoring(): void {
if (this.monitoringInterval) {
clearInterval(this.monitoringInterval);
this.monitoringInterval = null;
}
if (this.performanceObserver) {
this.performanceObserver.disconnect();
}
this.isMonitoring = false;
console.log("Performance monitoring stopped");
}
// Collect comprehensive metrics
private async collectMetrics(): Promise<void> {
try {
const [cpuInfo, memoryUsage] = await Promise.all([
this.getCPUUsage(),
this.getMemoryUsage(),
]);
const eventLoopMetrics = this.getEventLoopMetrics();
const requestMetrics = this.getRequestMetrics();
const gcMetrics = this.getGCMetrics();
const metrics: PerformanceMetrics = {
timestamp: Date.now(),
cpu: cpuInfo,
memory: memoryUsage,
eventLoop: eventLoopMetrics,
gc: gcMetrics,
requests: requestMetrics,
};
// Store metrics
this.metrics.push(metrics);
if (this.metrics.length > this.maxMetricsHistory) {
this.metrics.shift();
}
// Check alerts
this.checkAlerts(metrics);
// Emit metrics event
this.emit("metrics", metrics);
} catch (error) {
console.error("Error collecting metrics:", error);
}
}
// Get CPU usage
private async getCPUUsage(): Promise<PerformanceMetrics["cpu"]> {
try {
const stats = await pidusage(process.pid);
return {
usage: stats.cpu,
user: 0, // pidusage doesn't provide user/system breakdown
system: 0,
};
} catch (error) {
return { usage: 0, user: 0, system: 0 };
}
}
// Get memory usage
private getMemoryUsage(): PerformanceMetrics["memory"] {
const memUsage = process.memoryUsage();
const heapUsedPercentage = (memUsage.heapUsed / memUsage.heapTotal) * 100;
return {
heapUsed: memUsage.heapUsed,
heapTotal: memUsage.heapTotal,
rss: memUsage.rss,
external: memUsage.external,
heapUsedPercentage,
};
}
// Get event loop metrics
private getEventLoopMetrics(): PerformanceMetrics["eventLoop"] {
const now = performance.now();
const lag = now - this.eventLoopStart;
this.eventLoopStart = now;
// Calculate event loop utilization (simplified)
const utilization = Math.min((lag / 16.67) * 100, 100); // 16.67ms = 60fps
return {
lag,
utilization,
};
}
// Get request metrics
private getRequestMetrics(): PerformanceMetrics["requests"] {
const responseTimes = this.requestMetrics.responseTimes.slice(-100); // Last 100 requests
const avgResponseTime =
responseTimes.length > 0
? responseTimes.reduce((a, b) => a + b, 0) / responseTimes.length
: 0;
const sortedTimes = responseTimes.slice().sort((a, b) => a - b);
const p95Index = Math.ceil(sortedTimes.length * 0.95) - 1;
const p99Index = Math.ceil(sortedTimes.length * 0.99) - 1;
return {
total: this.requestMetrics.total,
active: this.requestMetrics.active,
avgResponseTime,
p95ResponseTime: sortedTimes[p95Index] || 0,
p99ResponseTime: sortedTimes[p99Index] || 0,
};
}
// Get GC metrics
private getGCMetrics(): PerformanceMetrics["gc"] {
return {
collections: this.gcMetrics.collections,
duration: this.gcMetrics.totalDuration,
};
}
// Track request start
trackRequestStart(): string {
const requestId = `req_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
this.requestMetrics.active++;
return requestId;
}
// Track request end
trackRequestEnd(requestId: string, duration: number): void {
this.requestMetrics.total++;
this.requestMetrics.active--;
this.requestMetrics.responseTimes.push(duration);
// Keep only recent response times
if (
this.requestMetrics.responseTimes.length >
this.requestMetrics.maxResponseTimes
) {
this.requestMetrics.responseTimes.shift();
}
}
// Check alert thresholds
private checkAlerts(metrics: PerformanceMetrics): void {
for (const threshold of this.alertThresholds) {
const value = this.getMetricValue(metrics, threshold.metric);
const alertKey = `${threshold.metric}_${threshold.threshold}`;
const isThresholdExceeded = this.compareValue(
value,
threshold.threshold,
threshold.comparison
);
if (isThresholdExceeded) {
const alertState = this.alertStates.get(alertKey);
if (!alertState) {
// Start tracking this alert
this.alertStates.set(alertKey, {
startTime: Date.now(),
alerted: false,
});
} else if (!alertState.alerted && threshold.duration) {
// Check if duration threshold is met
const elapsed = Date.now() - alertState.startTime;
if (elapsed >= threshold.duration) {
this.emitAlert(threshold, value, metrics);
alertState.alerted = true;
}
} else if (!threshold.duration) {
// Immediate alert
this.emitAlert(threshold, value, metrics);
}
} else {
// Reset alert state
this.alertStates.delete(alertKey);
}
}
}
// Get nested metric value
private getMetricValue(metrics: PerformanceMetrics, path: string): number {
const keys = path.split(".");
let value: any = metrics;
for (const key of keys) {
value = value?.[key];
}
return typeof value === "number" ? value : 0;
}
// Compare values based on comparison type
private compareValue(
value: number,
threshold: number,
comparison: string
): boolean {
switch (comparison) {
case "gt":
return value > threshold;
case "lt":
return value < threshold;
case "eq":
return value === threshold;
default:
return false;
}
}
// Emit alert
private emitAlert(
threshold: AlertThreshold,
value: number,
metrics: PerformanceMetrics
): void {
const alert = {
metric: threshold.metric,
threshold: threshold.threshold,
currentValue: value,
comparison: threshold.comparison,
timestamp: Date.now(),
metrics,
};
this.emit("alert", alert);
console.warn(
`Performance Alert: ${threshold.metric} is ${value} (threshold: ${threshold.threshold})`
);
}
// Get performance summary
getPerformanceSummary(minutes = 5): {
timeRange: { start: Date; end: Date };
averages: Partial<PerformanceMetrics>;
peaks: Partial<PerformanceMetrics>;
alertCount: number;
} {
const cutoffTime = Date.now() - minutes * 60 * 1000;
const recentMetrics = this.metrics.filter(m => m.timestamp > cutoffTime);
if (recentMetrics.length === 0) {
return {
timeRange: { start: new Date(), end: new Date() },
averages: {},
peaks: {},
alertCount: 0,
};
}
const averages = this.calculateAverages(recentMetrics);
const peaks = this.calculatePeaks(recentMetrics);
return {
timeRange: {
start: new Date(recentMetrics[0].timestamp),
end: new Date(recentMetrics[recentMetrics.length - 1].timestamp),
},
averages,
peaks,
alertCount: this.alertStates.size,
};
}
// Calculate averages
private calculateAverages(
metrics: PerformanceMetrics[]
): Partial<PerformanceMetrics> {
if (metrics.length === 0) return {};
const sums = metrics.reduce(
(acc, metric) => {
acc.cpuUsage += metric.cpu.usage;
acc.heapUsed += metric.memory.heapUsed;
acc.heapUsedPercentage += metric.memory.heapUsedPercentage;
acc.eventLoopLag += metric.eventLoop.lag;
acc.avgResponseTime += metric.requests.avgResponseTime;
return acc;
},
{
cpuUsage: 0,
heapUsed: 0,
heapUsedPercentage: 0,
eventLoopLag: 0,
avgResponseTime: 0,
}
);
const count = metrics.length;
return {
cpu: { usage: sums.cpuUsage / count, user: 0, system: 0 },
memory: {
heapUsed: sums.heapUsed / count,
heapTotal: 0,
rss: 0,
external: 0,
heapUsedPercentage: sums.heapUsedPercentage / count,
},
eventLoop: { lag: sums.eventLoopLag / count, utilization: 0 },
requests: {
total: 0,
active: 0,
avgResponseTime: sums.avgResponseTime / count,
p95ResponseTime: 0,
p99ResponseTime: 0,
},
} as Partial<PerformanceMetrics>;
}
// Calculate peaks
private calculatePeaks(
metrics: PerformanceMetrics[]
): Partial<PerformanceMetrics> {
if (metrics.length === 0) return {};
const peaks = metrics.reduce(
(acc, metric) => {
acc.cpuUsage = Math.max(acc.cpuUsage, metric.cpu.usage);
acc.heapUsed = Math.max(acc.heapUsed, metric.memory.heapUsed);
acc.heapUsedPercentage = Math.max(
acc.heapUsedPercentage,
metric.memory.heapUsedPercentage
);
acc.eventLoopLag = Math.max(acc.eventLoopLag, metric.eventLoop.lag);
acc.p99ResponseTime = Math.max(
acc.p99ResponseTime,
metric.requests.p99ResponseTime
);
return acc;
},
{
cpuUsage: 0,
heapUsed: 0,
heapUsedPercentage: 0,
eventLoopLag: 0,
p99ResponseTime: 0,
}
);
return {
cpu: { usage: peaks.cpuUsage, user: 0, system: 0 },
memory: {
heapUsed: peaks.heapUsed,
heapTotal: 0,
rss: 0,
external: 0,
heapUsedPercentage: peaks.heapUsedPercentage,
},
eventLoop: { lag: peaks.eventLoopLag, utilization: 0 },
requests: {
total: 0,
active: 0,
avgResponseTime: 0,
p95ResponseTime: 0,
p99ResponseTime: peaks.p99ResponseTime,
},
} as Partial<PerformanceMetrics>;
}
// Add custom alert threshold
addAlertThreshold(threshold: AlertThreshold): void {
this.alertThresholds.push(threshold);
}
// Remove alert threshold
removeAlertThreshold(metric: string, thresholdValue: number): void {
const index = this.alertThresholds.findIndex(
t => t.metric === metric && t.threshold === thresholdValue
);
if (index > -1) {
this.alertThresholds.splice(index, 1);
}
}
// Get current metrics
getCurrentMetrics(): PerformanceMetrics | null {
return this.metrics[this.metrics.length - 1] || null;
}
// Get metrics history
getMetricsHistory(minutes?: number): PerformanceMetrics[] {
if (!minutes) return [...this.metrics];
const cutoffTime = Date.now() - minutes * 60 * 1000;
return this.metrics.filter(m => m.timestamp > cutoffTime);
}
}
// Export singleton instance
export const performanceMonitor = new PerformanceMonitor();
src/utils/performanceMonitor.ts
Best Practices Summary
- Memory Management: Use object pools, weak references, and monitor heap usage
- CPU Optimization: Implement clustering, distribute tasks, and avoid blocking operations
- Caching Strategies: Use multi-layer caching with Redis and memory caches
- Performance Monitoring: Track key metrics and set up alerting
- Event Loop Optimization: Avoid blocking operations and use async patterns
- Connection Pooling: Manage database and external service connections
- Resource Cleanup: Properly close connections and clear timers
- Graceful Shutdown: Handle SIGTERM and SIGINT signals properly
- Load Testing: Use tools like autocannon for performance testing
- Production Monitoring: Use APM tools and custom metrics
Development Commands
# Start with clustering
npm run start:cluster
# Run performance tests
npm run test:performance
# Monitor memory usage
npm run monitor:memory
# Profile CPU usage
npm run profile:cpu
# Load testing
npm run test:load
Your Node.js application is now optimized for production with comprehensive performance monitoring, memory management, clustering, and caching strategies!