Continuous deployment means every change that passes automated tests goes to production. Here's how to do it safely and confidently.
Deployment Strategies#
Rolling Deployment#
1# Kubernetes rolling update
2apiVersion: apps/v1
3kind: Deployment
4metadata:
5 name: api-server
6spec:
7 replicas: 4
8 strategy:
9 type: RollingUpdate
10 rollingUpdate:
11 maxSurge: 1 # Add 1 new pod at a time
12 maxUnavailable: 0 # Keep all existing pods running
13
14# Process:
15# 1. Start 1 new pod with v2
16# 2. Wait for health check
17# 3. Terminate 1 old pod
18# 4. Repeat until all pods are v2Blue-Green Deployment#
1# Two identical environments
2# Blue (current) ──▶ Load Balancer
3# Green (new)
4
5# Steps:
6# 1. Deploy v2 to Green
7# 2. Test Green internally
8# 3. Switch load balancer to Green
9# 4. Blue becomes standby
10
11# Nginx configuration
12upstream blue {
13 server blue-app:3000;
14}
15
16upstream green {
17 server green-app:3000;
18}
19
20server {
21 location / {
22 # Switch by changing this line
23 proxy_pass http://green; # or http://blue
24 }
25}Canary Deployment#
1# Gradual rollout to subset of users
2# v1 (95%) ──┬──▶ Load Balancer
3# v2 (5%) ──┘
4
5# Istio canary configuration
6apiVersion: networking.istio.io/v1alpha3
7kind: VirtualService
8metadata:
9 name: api-server
10spec:
11 hosts:
12 - api.example.com
13 http:
14 - route:
15 - destination:
16 host: api-v1
17 weight: 95
18 - destination:
19 host: api-v2
20 weight: 5
21
22# Gradually increase v2 percentage:
23# 5% → 10% → 25% → 50% → 100%Feature Flags#
Basic Implementation#
1interface FeatureFlags {
2 [key: string]: boolean | ((context: FlagContext) => boolean);
3}
4
5interface FlagContext {
6 userId?: string;
7 userEmail?: string;
8 userRole?: string;
9 percentage?: number;
10}
11
12const flags: FeatureFlags = {
13 newCheckout: true,
14 darkMode: false,
15 betaFeatures: ({ userRole }) => userRole === 'beta-tester',
16 gradualRollout: ({ userId }) => {
17 // 10% of users
18 return hashString(userId) % 100 < 10;
19 },
20};
21
22function isEnabled(flagName: string, context: FlagContext = {}): boolean {
23 const flag = flags[flagName];
24 if (typeof flag === 'function') {
25 return flag(context);
26 }
27 return !!flag;
28}
29
30// Usage
31if (isEnabled('newCheckout', { userId: user.id })) {
32 renderNewCheckout();
33} else {
34 renderOldCheckout();
35}Feature Flag Service (LaunchDarkly-style)#
1class FeatureFlagService {
2 private flags: Map<string, Flag> = new Map();
3
4 constructor(private apiKey: string) {
5 this.initialize();
6 }
7
8 async initialize(): Promise<void> {
9 const response = await fetch(`https://flags.example.com/api/flags`, {
10 headers: { Authorization: `Bearer ${this.apiKey}` },
11 });
12 const flagsData = await response.json();
13
14 for (const flag of flagsData) {
15 this.flags.set(flag.key, flag);
16 }
17
18 // Subscribe to real-time updates
19 this.subscribeToUpdates();
20 }
21
22 isEnabled(key: string, context: FlagContext): boolean {
23 const flag = this.flags.get(key);
24 if (!flag) return false;
25
26 // Check kill switch
27 if (!flag.enabled) return false;
28
29 // Check targeting rules
30 for (const rule of flag.rules) {
31 if (this.matchesRule(rule, context)) {
32 return rule.serve;
33 }
34 }
35
36 // Check percentage rollout
37 if (flag.percentage !== undefined) {
38 const bucket = this.getBucket(context.userId, key);
39 return bucket < flag.percentage;
40 }
41
42 return flag.defaultValue;
43 }
44
45 private getBucket(userId: string, flagKey: string): number {
46 const hash = hashString(`${userId}:${flagKey}`);
47 return hash % 100;
48 }
49}
50
51// React hook
52function useFeatureFlag(key: string): boolean {
53 const flagService = useContext(FeatureFlagContext);
54 const user = useCurrentUser();
55
56 return flagService.isEnabled(key, {
57 userId: user?.id,
58 userEmail: user?.email,
59 userRole: user?.role,
60 });
61}Database Migrations#
Backward-Compatible Changes#
1-- Always deploy database changes before code changes
2
3-- Step 1: Add column (nullable)
4ALTER TABLE users ADD COLUMN phone VARCHAR(20);
5
6-- Step 2: Deploy code that writes to both old and new columns
7
8-- Step 3: Backfill data
9UPDATE users SET phone = legacy_phone WHERE phone IS NULL;
10
11-- Step 4: Deploy code that reads from new column
12
13-- Step 5: Add constraints
14ALTER TABLE users ALTER COLUMN phone SET NOT NULL;
15
16-- Step 6: Remove old column (in a later deployment)
17ALTER TABLE users DROP COLUMN legacy_phone;Migration Tooling#
1// Prisma migration workflow
2// 1. Update schema
3// 2. Generate migration
4// 3. Test migration
5// 4. Deploy migration
6// 5. Deploy code
7
8// prisma/schema.prisma
9model User {
10 id Int @id @default(autoincrement())
11 email String @unique
12 phone String? // New field - nullable first
13}
14
15// Commands
16// npx prisma migrate dev --name add_phone_column
17// npx prisma migrate deployRollback Strategies#
Instant Rollback#
1# Kubernetes rollback
2kubectl rollout undo deployment/api-server
3
4# Or to specific revision
5kubectl rollout undo deployment/api-server --to-revision=2
6
7# Check history
8kubectl rollout history deployment/api-serverFeature Flag Rollback#
1// Instant disable without deployment
2async function handleIncident(flagKey: string): Promise<void> {
3 // Turn off feature flag
4 await flagService.disable(flagKey);
5
6 // All users immediately get old behavior
7 // No deployment needed
8}Database Rollback#
1-- Prepare rollback scripts before migration
2-- rollback_add_phone_column.sql
3ALTER TABLE users DROP COLUMN phone;
4
5-- Keep scripts ready but prefer forward-fixes
6-- Rolling back data changes is riskyMonitoring and Alerts#
Deployment Metrics#
1// Track key metrics during deployment
2interface DeploymentMetrics {
3 errorRate: number;
4 latencyP50: number;
5 latencyP99: number;
6 requestsPerSecond: number;
7}
8
9async function monitorDeployment(
10 baselineMetrics: DeploymentMetrics,
11 threshold: number = 1.5,
12): Promise<boolean> {
13 const currentMetrics = await getMetrics();
14
15 // Check for degradation
16 if (currentMetrics.errorRate > baselineMetrics.errorRate * threshold) {
17 await alertOncall('Error rate increased during deployment');
18 return false;
19 }
20
21 if (currentMetrics.latencyP99 > baselineMetrics.latencyP99 * threshold) {
22 await alertOncall('Latency increased during deployment');
23 return false;
24 }
25
26 return true;
27}Automated Rollback#
1# Argo Rollouts with automated analysis
2apiVersion: argoproj.io/v1alpha1
3kind: Rollout
4metadata:
5 name: api-server
6spec:
7 strategy:
8 canary:
9 steps:
10 - setWeight: 10
11 - pause: { duration: 5m }
12 - analysis:
13 templates:
14 - templateName: success-rate
15 - setWeight: 50
16 - pause: { duration: 10m }
17 - analysis:
18 templates:
19 - templateName: success-rate
20 - setWeight: 100
21
22---
23apiVersion: argoproj.io/v1alpha1
24kind: AnalysisTemplate
25metadata:
26 name: success-rate
27spec:
28 metrics:
29 - name: success-rate
30 provider:
31 prometheus:
32 address: http://prometheus:9090
33 query: |
34 sum(rate(http_requests_total{status=~"2.."}[5m])) /
35 sum(rate(http_requests_total[5m])) * 100
36 successCondition: result >= 99
37 failureLimit: 3CI/CD Pipeline#
1# GitHub Actions
2name: Deploy
3
4on:
5 push:
6 branches: [main]
7
8jobs:
9 test:
10 runs-on: ubuntu-latest
11 steps:
12 - uses: actions/checkout@v3
13 - run: npm ci
14 - run: npm test
15 - run: npm run build
16
17 deploy-staging:
18 needs: test
19 runs-on: ubuntu-latest
20 steps:
21 - name: Deploy to staging
22 run: kubectl apply -f k8s/staging/
23
24 - name: Run smoke tests
25 run: npm run test:smoke -- --env=staging
26
27 deploy-production:
28 needs: deploy-staging
29 runs-on: ubuntu-latest
30 steps:
31 - name: Deploy canary (10%)
32 run: |
33 kubectl apply -f k8s/production/
34 kubectl set image deployment/api-server api=api:${{ github.sha }}
35
36 - name: Monitor canary
37 run: |
38 sleep 300
39 npm run check-metrics -- --threshold=1.5
40
41 - name: Full rollout
42 run: kubectl rollout resume deployment/api-serverConclusion#
Continuous deployment requires confidence in your testing, monitoring, and rollback capabilities. Start with feature flags and rolling deployments, then add canary releases as your monitoring matures.
The goal is shipping safely, not shipping fast. With good practices, you get both.