Docker containers provide consistent environments from development to production. Here's how to build efficient, secure Node.js containers.
Multi-Stage Builds#
1# Build stage
2FROM node:20-alpine AS builder
3
4WORKDIR /app
5
6# Copy package files first for better caching
7COPY package*.json ./
8RUN npm ci
9
10# Copy source and build
11COPY . .
12RUN npm run build
13
14# Prune dev dependencies
15RUN npm prune --production
16
17# Production stage
18FROM node:20-alpine AS production
19
20WORKDIR /app
21
22# Create non-root user
23RUN addgroup -g 1001 -S nodejs && \
24 adduser -S nodejs -u 1001
25
26# Copy only necessary files
27COPY /app/dist ./dist
28COPY /app/node_modules ./node_modules
29COPY /app/package.json ./
30
31USER nodejs
32
33EXPOSE 3000
34
35CMD ["node", "dist/index.js"]Optimized Layer Caching#
1FROM node:20-alpine
2
3WORKDIR /app
4
5# Dependencies change less frequently
6COPY package.json package-lock.json ./
7RUN npm ci --only=production
8
9# Source changes more frequently
10COPY . .
11
12CMD ["node", "src/index.js"].dockerignore#
# Dependencies
node_modules
npm-debug.log
# Build artifacts
dist
build
coverage
# Development files
.git
.gitignore
.env*
.editorconfig
.eslintrc*
.prettierrc*
tsconfig.json
# Documentation
README.md
docs
# Tests
__tests__
*.test.js
*.spec.js
jest.config.js
# IDE
.vscode
.idea
# Docker files
Dockerfile*
docker-compose*
.dockerignore
Security Hardening#
1FROM node:20-alpine
2
3# Update packages and remove cache
4RUN apk update && \
5 apk upgrade && \
6 rm -rf /var/cache/apk/*
7
8WORKDIR /app
9
10# Create non-root user early
11RUN addgroup -g 1001 -S appgroup && \
12 adduser -S appuser -u 1001 -G appgroup
13
14# Copy package files
15COPY package*.json ./
16
17# Install dependencies as root, then switch
18RUN npm ci --only=production && \
19 npm cache clean --force
20
21# Copy application
22COPY . .
23
24# Switch to non-root user
25USER appuser
26
27# Don't run as PID 1 - use tini or dumb-init
28RUN apk add --no-cache tini
29ENTRYPOINT ["/sbin/tini", "--"]
30
31# Health check
32HEALTHCHECK \
33 CMD wget --no-verbose --tries=1 --spider http://localhost:3000/health || exit 1
34
35EXPOSE 3000
36CMD ["node", "src/index.js"]Environment Variables#
1FROM node:20-alpine
2
3WORKDIR /app
4
5# Build-time arguments
6ARG NODE_ENV=production
7ARG APP_VERSION=unknown
8
9# Runtime environment variables
10ENV NODE_ENV=$NODE_ENV \
11 APP_VERSION=$APP_VERSION \
12 PORT=3000
13
14COPY package*.json ./
15RUN npm ci --only=production
16
17COPY . .
18
19EXPOSE $PORT
20CMD ["node", "src/index.js"]1# docker-compose.yml
2version: '3.8'
3
4services:
5 app:
6 build:
7 context: .
8 args:
9 NODE_ENV: production
10 APP_VERSION: ${APP_VERSION:-1.0.0}
11 environment:
12 - DATABASE_URL=${DATABASE_URL}
13 - REDIS_URL=${REDIS_URL}
14 - JWT_SECRET=${JWT_SECRET}
15 env_file:
16 - .env.productionDevelopment Setup#
1# docker-compose.yml
2version: '3.8'
3
4services:
5 app:
6 build:
7 context: .
8 target: development
9 volumes:
10 - .:/app
11 - /app/node_modules
12 ports:
13 - "3000:3000"
14 environment:
15 - NODE_ENV=development
16 command: npm run dev
17
18 db:
19 image: postgres:15-alpine
20 volumes:
21 - postgres_data:/var/lib/postgresql/data
22 environment:
23 POSTGRES_USER: dev
24 POSTGRES_PASSWORD: dev
25 POSTGRES_DB: app_dev
26 ports:
27 - "5432:5432"
28
29 redis:
30 image: redis:7-alpine
31 ports:
32 - "6379:6379"
33
34volumes:
35 postgres_data:1# Multi-target Dockerfile
2FROM node:20-alpine AS base
3WORKDIR /app
4COPY package*.json ./
5
6FROM base AS development
7RUN npm install
8COPY . .
9CMD ["npm", "run", "dev"]
10
11FROM base AS builder
12RUN npm ci
13COPY . .
14RUN npm run build
15RUN npm prune --production
16
17FROM node:20-alpine AS production
18WORKDIR /app
19COPY /app/dist ./dist
20COPY /app/node_modules ./node_modules
21CMD ["node", "dist/index.js"]Health Checks#
1// src/health.ts
2import express from 'express';
3
4const router = express.Router();
5
6router.get('/health', (req, res) => {
7 res.json({ status: 'ok' });
8});
9
10router.get('/ready', async (req, res) => {
11 try {
12 // Check database connection
13 await db.$queryRaw`SELECT 1`;
14
15 // Check Redis
16 await redis.ping();
17
18 res.json({ status: 'ready' });
19 } catch (error) {
20 res.status(503).json({ status: 'not ready', error: error.message });
21 }
22});
23
24export default router;HEALTHCHECK \
CMD node -e "require('http').get('http://localhost:3000/health', (r) => process.exit(r.statusCode === 200 ? 0 : 1))"Logging#
1// Don't write to files in containers
2const logger = pino({
3 level: process.env.LOG_LEVEL || 'info',
4 // Write to stdout/stderr
5 transport: process.env.NODE_ENV === 'development'
6 ? { target: 'pino-pretty' }
7 : undefined,
8});
9
10// Log to stdout, let Docker handle the rest1# docker-compose.yml
2services:
3 app:
4 logging:
5 driver: json-file
6 options:
7 max-size: "10m"
8 max-file: "3"Resource Limits#
1# docker-compose.yml
2services:
3 app:
4 deploy:
5 resources:
6 limits:
7 cpus: '1'
8 memory: 512M
9 reservations:
10 cpus: '0.5'
11 memory: 256M# Set Node.js memory limit
ENV NODE_OPTIONS="--max-old-space-size=450"Graceful Shutdown#
1// Handle SIGTERM for graceful shutdown
2process.on('SIGTERM', async () => {
3 console.log('SIGTERM received, shutting down gracefully');
4
5 // Stop accepting new requests
6 server.close(async () => {
7 console.log('HTTP server closed');
8
9 // Close database connections
10 await db.$disconnect();
11
12 // Close Redis
13 await redis.quit();
14
15 process.exit(0);
16 });
17
18 // Force exit after timeout
19 setTimeout(() => {
20 console.error('Forced shutdown after timeout');
21 process.exit(1);
22 }, 30000);
23});Best Practices#
Image Size:
✓ Use Alpine-based images
✓ Multi-stage builds
✓ Remove dev dependencies
✓ Clean npm cache
Security:
✓ Run as non-root user
✓ Update base images regularly
✓ Scan for vulnerabilities
✓ Don't store secrets in images
Performance:
✓ Optimize layer caching
✓ Use .dockerignore
✓ Set resource limits
✓ Configure health checks
Development:
✓ Use docker-compose
✓ Mount source for hot reload
✓ Match production environment
✓ Use consistent versions
Conclusion#
Docker enables consistent deployments across environments. Use multi-stage builds for smaller images, run as non-root for security, and implement health checks for reliability. The extra setup time pays off in deployment confidence.