Performance profiling helps identify bottlenecks in your Node.js applications.
CPU Profiling#
Using Chrome DevTools#
# Start with inspector
node --inspect app.js
# Or attach to running process
node --inspect=9229 app.jsOpen chrome://inspect and click "inspect" to access DevTools.
Programmatic Profiling#
1const { Session } = require('inspector');
2const fs = require('fs');
3
4const session = new Session();
5session.connect();
6
7// Start profiling
8session.post('Profiler.enable');
9session.post('Profiler.start');
10
11// Your code runs here...
12
13// Stop and save
14session.post('Profiler.stop', (err, { profile }) => {
15 fs.writeFileSync('profile.cpuprofile', JSON.stringify(profile));
16});Using clinic.js#
1npm install -g clinic
2
3# CPU profiling
4clinic doctor -- node app.js
5
6# Flame graphs
7clinic flame -- node app.js
8
9# Event loop analysis
10clinic bubbleprof -- node app.jsMemory Profiling#
Heap Snapshots#
1const v8 = require('v8');
2const fs = require('fs');
3
4function takeHeapSnapshot() {
5 const snapshotStream = v8.writeHeapSnapshot();
6 console.log(`Heap snapshot written to ${snapshotStream}`);
7}
8
9// Take snapshot on signal
10process.on('SIGUSR2', takeHeapSnapshot);Detecting Memory Leaks#
1// Track memory usage
2setInterval(() => {
3 const usage = process.memoryUsage();
4 console.log({
5 heapUsed: Math.round(usage.heapUsed / 1024 / 1024) + 'MB',
6 heapTotal: Math.round(usage.heapTotal / 1024 / 1024) + 'MB',
7 external: Math.round(usage.external / 1024 / 1024) + 'MB',
8 });
9}, 5000);Common Memory Leaks#
1// ❌ Growing array (leak)
2const cache = [];
3function addToCache(item) {
4 cache.push(item); // Never cleaned up
5}
6
7// ✅ Fixed with LRU cache
8const LRU = require('lru-cache');
9const cache = new LRU({ max: 500 });
10
11// ❌ Event listener leak
12function handleRequest(req, res) {
13 emitter.on('data', handler); // Never removed
14}
15
16// ✅ Fixed with cleanup
17function handleRequest(req, res) {
18 const handler = (data) => res.write(data);
19 emitter.on('data', handler);
20 res.on('close', () => emitter.off('data', handler));
21}
22
23// ❌ Closure retaining references
24function createHandler(largeData) {
25 return () => {
26 console.log(largeData.id); // Retains entire largeData
27 };
28}
29
30// ✅ Fixed by extracting needed values
31function createHandler(largeData) {
32 const id = largeData.id;
33 return () => {
34 console.log(id); // Only retains id
35 };
36}Event Loop Monitoring#
1const { monitorEventLoopDelay } = require('perf_hooks');
2
3const histogram = monitorEventLoopDelay({ resolution: 20 });
4histogram.enable();
5
6setInterval(() => {
7 console.log({
8 min: histogram.min / 1e6,
9 max: histogram.max / 1e6,
10 mean: histogram.mean / 1e6,
11 percentile99: histogram.percentile(99) / 1e6,
12 });
13 histogram.reset();
14}, 5000);Async Operations#
1const { performance, PerformanceObserver } = require('perf_hooks');
2
3// Measure async operations
4async function measureAsync(name, fn) {
5 const start = performance.now();
6 try {
7 return await fn();
8 } finally {
9 const duration = performance.now() - start;
10 console.log(`${name}: ${duration.toFixed(2)}ms`);
11 }
12}
13
14// Usage
15await measureAsync('database-query', async () => {
16 return db.users.findMany();
17});Optimization Techniques#
Avoid Blocking the Event Loop#
1// ❌ Blocking
2function processLargeArray(items) {
3 return items.map(item => heavyComputation(item));
4}
5
6// ✅ Non-blocking with batching
7async function processLargeArray(items) {
8 const results = [];
9 const BATCH_SIZE = 100;
10
11 for (let i = 0; i < items.length; i += BATCH_SIZE) {
12 const batch = items.slice(i, i + BATCH_SIZE);
13 results.push(...batch.map(heavyComputation));
14
15 // Yield to event loop
16 await new Promise(resolve => setImmediate(resolve));
17 }
18
19 return results;
20}Use Worker Threads for CPU-Intensive Work#
1const { Worker, isMainThread, parentPort } = require('worker_threads');
2
3if (isMainThread) {
4 function runWorker(data) {
5 return new Promise((resolve, reject) => {
6 const worker = new Worker(__filename, { workerData: data });
7 worker.on('message', resolve);
8 worker.on('error', reject);
9 });
10 }
11
12 // Use worker for heavy computation
13 const result = await runWorker({ numbers: largeArray });
14} else {
15 const { workerData } = require('worker_threads');
16 const result = heavyComputation(workerData);
17 parentPort.postMessage(result);
18}Connection Pooling#
1// Use connection pools
2const { Pool } = require('pg');
3
4const pool = new Pool({
5 max: 20,
6 idleTimeoutMillis: 30000,
7 connectionTimeoutMillis: 2000,
8});
9
10// Reuse connections
11const result = await pool.query('SELECT * FROM users');Monitoring in Production#
1const client = require('prom-client');
2
3// Collect default metrics
4client.collectDefaultMetrics();
5
6// Custom metrics
7const httpDuration = new client.Histogram({
8 name: 'http_request_duration_seconds',
9 help: 'HTTP request duration',
10 labelNames: ['method', 'route', 'status'],
11});
12
13app.use((req, res, next) => {
14 const end = httpDuration.startTimer();
15 res.on('finish', () => {
16 end({ method: req.method, route: req.route?.path, status: res.statusCode });
17 });
18 next();
19});Profile regularly, monitor in production, and optimize based on real data rather than assumptions.