Performance Profiling

Patterns for identifying and fixing performance bottlenecks.

Overview#

Profiling helps you find and fix performance issues systematically. This pattern covers:

  • React DevTools Profiler
  • Server-side profiling
  • Database query profiling
  • Custom performance metrics
  • Web Vitals monitoring

Prerequisites#

npm install web-vitals @opentelemetry/api

Code Example#

React DevTools Profiler#

1// components/ProfiledComponent.tsx 2'use client' 3 4import { Profiler, ProfilerOnRenderCallback } from 'react' 5 6const onRender: ProfilerOnRenderCallback = ( 7 id, 8 phase, 9 actualDuration, 10 baseDuration, 11 startTime, 12 commitTime 13) => { 14 // Log or send to analytics 15 console.log({ 16 id, 17 phase, // "mount" or "update" 18 actualDuration, // Time spent rendering 19 baseDuration, // Estimated time without memoization 20 startTime, 21 commitTime 22 }) 23 24 // Send to monitoring service in production 25 if (process.env.NODE_ENV === 'production') { 26 sendMetric('react_render', { 27 component: id, 28 phase, 29 duration: actualDuration 30 }) 31 } 32} 33 34export function ProfiledDashboard({ children }: { children: React.ReactNode }) { 35 return ( 36 <Profiler id="Dashboard" onRender={onRender}> 37 {children} 38 </Profiler> 39 ) 40}

Custom Performance Hooks#

1// hooks/usePerformance.ts 2'use client' 3 4import { useEffect, useRef } from 'react' 5 6export function useRenderCount(componentName: string) { 7 const renderCount = useRef(0) 8 9 useEffect(() => { 10 renderCount.current++ 11 12 if (process.env.NODE_ENV === 'development') { 13 console.log(`${componentName} rendered ${renderCount.current} times`) 14 } 15 }) 16 17 return renderCount.current 18} 19 20export function useMountTime(componentName: string) { 21 const mountTime = useRef(performance.now()) 22 23 useEffect(() => { 24 const duration = performance.now() - mountTime.current 25 26 if (duration > 100) { 27 console.warn(`${componentName} took ${duration.toFixed(2)}ms to mount`) 28 } 29 }, [componentName]) 30} 31 32export function useUpdateTime(componentName: string, deps: any[]) { 33 const lastUpdate = useRef(performance.now()) 34 35 useEffect(() => { 36 const now = performance.now() 37 const duration = now - lastUpdate.current 38 lastUpdate.current = now 39 40 if (duration > 16) { // More than one frame 41 console.warn(`${componentName} update took ${duration.toFixed(2)}ms`) 42 } 43 }, deps) 44}

Server-Side Performance Tracing#

1// lib/performance.ts 2import { trace, SpanStatusCode } from '@opentelemetry/api' 3 4const tracer = trace.getTracer('my-app') 5 6export async function withSpan<T>( 7 name: string, 8 fn: () => Promise<T>, 9 attributes?: Record<string, string | number> 10): Promise<T> { 11 return tracer.startActiveSpan(name, async span => { 12 if (attributes) { 13 span.setAttributes(attributes) 14 } 15 16 try { 17 const result = await fn() 18 span.setStatus({ code: SpanStatusCode.OK }) 19 return result 20 } catch (error) { 21 span.setStatus({ 22 code: SpanStatusCode.ERROR, 23 message: error instanceof Error ? error.message : 'Unknown error' 24 }) 25 span.recordException(error as Error) 26 throw error 27 } finally { 28 span.end() 29 } 30 }) 31} 32 33// Usage 34const users = await withSpan( 35 'fetch-users', 36 () => prisma.user.findMany(), 37 { 'db.operation': 'findMany', 'db.table': 'users' } 38)

Database Query Profiling#

1// lib/db-profiler.ts 2import { Prisma } from '@prisma/client' 3import { prisma } from '@/lib/db' 4 5// Query logging middleware 6export const queryLogger: Prisma.Middleware = async (params, next) => { 7 const before = Date.now() 8 9 const result = await next(params) 10 11 const after = Date.now() 12 const duration = after - before 13 14 // Log slow queries 15 if (duration > 100) { 16 console.warn('Slow query detected:', { 17 model: params.model, 18 action: params.action, 19 duration: `${duration}ms`, 20 args: JSON.stringify(params.args).slice(0, 200) 21 }) 22 } 23 24 // Send metrics 25 sendMetric('db_query', { 26 model: params.model ?? 'unknown', 27 action: params.action, 28 duration 29 }) 30 31 return result 32} 33 34// prisma.$use(queryLogger) 35 36// Query explain for debugging 37export async function explainQuery(query: string) { 38 const result = await prisma.$queryRawUnsafe(`EXPLAIN ANALYZE ${query}`) 39 return result 40}

Web Vitals Monitoring#

1// app/layout.tsx 2import { WebVitals } from '@/components/WebVitals' 3 4export default function RootLayout({ 5 children 6}: { 7 children: React.ReactNode 8}) { 9 return ( 10 <html lang="en"> 11 <body> 12 {children} 13 <WebVitals /> 14 </body> 15 </html> 16 ) 17} 18 19// components/WebVitals.tsx 20'use client' 21 22import { useEffect } from 'react' 23import { onCLS, onFID, onLCP, onFCP, onTTFB } from 'web-vitals' 24 25export function WebVitals() { 26 useEffect(() => { 27 function sendToAnalytics(metric: any) { 28 const body = JSON.stringify({ 29 name: metric.name, 30 value: metric.value, 31 rating: metric.rating, 32 delta: metric.delta, 33 id: metric.id, 34 navigationType: metric.navigationType 35 }) 36 37 // Use sendBeacon for reliability 38 if (navigator.sendBeacon) { 39 navigator.sendBeacon('/api/vitals', body) 40 } else { 41 fetch('/api/vitals', { body, method: 'POST', keepalive: true }) 42 } 43 } 44 45 onCLS(sendToAnalytics) // Cumulative Layout Shift 46 onFID(sendToAnalytics) // First Input Delay 47 onLCP(sendToAnalytics) // Largest Contentful Paint 48 onFCP(sendToAnalytics) // First Contentful Paint 49 onTTFB(sendToAnalytics) // Time to First Byte 50 }, []) 51 52 return null 53}

Performance Budget#

1// lib/performance-budget.ts 2interface PerformanceBudget { 3 lcp: number // Largest Contentful Paint (ms) 4 fid: number // First Input Delay (ms) 5 cls: number // Cumulative Layout Shift 6 ttfb: number // Time to First Byte (ms) 7 bundleSize: number // JS bundle size (KB) 8} 9 10const BUDGET: PerformanceBudget = { 11 lcp: 2500, 12 fid: 100, 13 cls: 0.1, 14 ttfb: 800, 15 bundleSize: 200 16} 17 18export function checkBudget(metrics: Partial<PerformanceBudget>) { 19 const violations: string[] = [] 20 21 for (const [key, threshold] of Object.entries(BUDGET)) { 22 const value = metrics[key as keyof PerformanceBudget] 23 if (value !== undefined && value > threshold) { 24 violations.push( 25 `${key}: ${value} exceeds budget of ${threshold}` 26 ) 27 } 28 } 29 30 return { 31 passed: violations.length === 0, 32 violations 33 } 34} 35 36// CI integration 37// scripts/check-performance.ts 38async function checkPerformanceBudget() { 39 const metrics = await runLighthouse('https://example.com') 40 41 const result = checkBudget({ 42 lcp: metrics.lcp, 43 cls: metrics.cls, 44 ttfb: metrics.ttfb 45 }) 46 47 if (!result.passed) { 48 console.error('Performance budget exceeded:') 49 result.violations.forEach(v => console.error(` - ${v}`)) 50 process.exit(1) 51 } 52 53 console.log('Performance budget passed!') 54}

Custom Performance Marks#

1// lib/marks.ts 2export function startMark(name: string) { 3 if (typeof performance !== 'undefined') { 4 performance.mark(`${name}-start`) 5 } 6} 7 8export function endMark(name: string) { 9 if (typeof performance !== 'undefined') { 10 performance.mark(`${name}-end`) 11 12 try { 13 performance.measure(name, `${name}-start`, `${name}-end`) 14 15 const entries = performance.getEntriesByName(name) 16 const duration = entries[entries.length - 1]?.duration 17 18 console.log(`${name}: ${duration?.toFixed(2)}ms`) 19 20 // Cleanup 21 performance.clearMarks(`${name}-start`) 22 performance.clearMarks(`${name}-end`) 23 performance.clearMeasures(name) 24 25 return duration 26 } catch (e) { 27 // Mark not found 28 } 29 } 30} 31 32// Usage 33startMark('data-fetch') 34const data = await fetchData() 35endMark('data-fetch') // Logs: data-fetch: 123.45ms

Usage Instructions#

  1. Use React DevTools Profiler to identify slow renders
  2. Add performance tracing to server-side operations
  3. Monitor database query performance
  4. Track Core Web Vitals in production
  5. Set and enforce performance budgets

Best Practices#

  • Profile in production mode - Development mode adds overhead
  • Test on real devices - Use device lab or BrowserStack
  • Establish baselines - Know your starting point
  • Automate checks - Include performance tests in CI
  • Monitor trends - Watch for regressions over time
  • Focus on user impact - Prioritize visible performance issues
  • Use sampling - Don't log every request in production