OpenAI Integration Pattern

Integrate OpenAI's GPT models and APIs into your Next.js application for chat, completions, and embeddings.

Overview#

OpenAI provides powerful language models including GPT-4 for text generation and text-embedding models for vector search. This pattern covers common integration scenarios in Next.js applications.

When to use:

  • Chat interfaces and conversational AI
  • Content generation
  • Text embeddings and semantic search
  • Code generation and assistance
  • Data extraction and analysis

Key features:

  • Chat completions API
  • Streaming responses
  • Function calling
  • Text embeddings
  • Error handling
  • Rate limiting awareness

Code Example#

Client Setup#

1// lib/openai.ts 2import OpenAI from 'openai' 3 4export const openai = new OpenAI({ 5 apiKey: process.env.OPENAI_API_KEY 6})

Basic Chat Completion#

1// lib/openai.ts 2export async function chat(prompt: string) { 3 const response = await openai.chat.completions.create({ 4 model: 'gpt-4-turbo-preview', 5 messages: [{ role: 'user', content: prompt }], 6 max_tokens: 1024 7 }) 8 9 return response.choices[0].message.content 10}

With System Prompt#

1// lib/openai.ts 2export async function chatWithSystem( 3 system: string, 4 prompt: string 5) { 6 const response = await openai.chat.completions.create({ 7 model: 'gpt-4-turbo-preview', 8 messages: [ 9 { role: 'system', content: system }, 10 { role: 'user', content: prompt } 11 ], 12 max_tokens: 1024 13 }) 14 15 return response.choices[0].message.content 16}

Streaming Response#

1// app/api/chat/route.ts 2import { openai } from '@/lib/openai' 3import { OpenAIStream, StreamingTextResponse } from 'ai' 4 5export async function POST(req: Request) { 6 const { messages } = await req.json() 7 8 const response = await openai.chat.completions.create({ 9 model: 'gpt-4-turbo-preview', 10 messages, 11 stream: true 12 }) 13 14 const stream = OpenAIStream(response) 15 return new StreamingTextResponse(stream) 16}

Function Calling#

1// lib/openai.ts 2const functions = [ 3 { 4 name: 'get_weather', 5 description: 'Get current weather for a location', 6 parameters: { 7 type: 'object', 8 properties: { 9 location: { 10 type: 'string', 11 description: 'City and country, e.g., London, UK' 12 }, 13 units: { 14 type: 'string', 15 enum: ['celsius', 'fahrenheit'], 16 description: 'Temperature units' 17 } 18 }, 19 required: ['location'] 20 } 21 } 22] 23 24export async function chatWithFunctions(prompt: string) { 25 const response = await openai.chat.completions.create({ 26 model: 'gpt-4-turbo-preview', 27 messages: [{ role: 'user', content: prompt }], 28 functions, 29 function_call: 'auto' 30 }) 31 32 const message = response.choices[0].message 33 34 if (message.function_call) { 35 const functionName = message.function_call.name 36 const args = JSON.parse(message.function_call.arguments) 37 38 // Execute the function 39 const result = await executeFunction(functionName, args) 40 41 // Send result back to the model 42 const followUp = await openai.chat.completions.create({ 43 model: 'gpt-4-turbo-preview', 44 messages: [ 45 { role: 'user', content: prompt }, 46 message, 47 { 48 role: 'function', 49 name: functionName, 50 content: JSON.stringify(result) 51 } 52 ] 53 }) 54 55 return followUp.choices[0].message.content 56 } 57 58 return message.content 59} 60 61async function executeFunction(name: string, args: any) { 62 switch (name) { 63 case 'get_weather': 64 // Call weather API 65 return { temperature: 22, condition: 'sunny' } 66 default: 67 throw new Error(`Unknown function: ${name}`) 68 } 69}

Text Embeddings#

1// lib/openai.ts 2export async function getEmbedding(text: string) { 3 const response = await openai.embeddings.create({ 4 model: 'text-embedding-3-small', 5 input: text 6 }) 7 8 return response.data[0].embedding 9} 10 11export async function getEmbeddings(texts: string[]) { 12 const response = await openai.embeddings.create({ 13 model: 'text-embedding-3-small', 14 input: texts 15 }) 16 17 return response.data.map(d => d.embedding) 18}

Multi-Turn Conversation#

1// lib/openai.ts 2interface Message { 3 role: 'user' | 'assistant' | 'system' 4 content: string 5} 6 7export async function conversation( 8 messages: Message[], 9 systemPrompt?: string 10) { 11 const allMessages: Message[] = systemPrompt 12 ? [{ role: 'system', content: systemPrompt }, ...messages] 13 : messages 14 15 const response = await openai.chat.completions.create({ 16 model: 'gpt-4-turbo-preview', 17 messages: allMessages, 18 max_tokens: 1024 19 }) 20 21 return response.choices[0].message.content 22}

Error Handling#

1// lib/openai.ts 2import OpenAI from 'openai' 3 4export async function safeChatCompletion(prompt: string) { 5 try { 6 const response = await openai.chat.completions.create({ 7 model: 'gpt-4-turbo-preview', 8 messages: [{ role: 'user', content: prompt }] 9 }) 10 11 return { 12 success: true, 13 text: response.choices[0].message.content 14 } 15 } catch (error) { 16 if (error instanceof OpenAI.APIError) { 17 // Handle specific error types 18 if (error.status === 429) { 19 return { 20 success: false, 21 error: 'Rate limit exceeded. Please try again later.', 22 retryAfter: error.headers?.['retry-after'] 23 } 24 } 25 if (error.status === 401) { 26 return { 27 success: false, 28 error: 'Invalid API key' 29 } 30 } 31 return { 32 success: false, 33 error: error.message, 34 status: error.status 35 } 36 } 37 throw error 38 } 39}

API Route Handler#

1// app/api/chat/route.ts 2import { openai } from '@/lib/openai' 3import { NextRequest, NextResponse } from 'next/server' 4import { auth } from '@/lib/auth' 5 6export async function POST(request: NextRequest) { 7 const { userId } = await auth() 8 if (!userId) { 9 return NextResponse.json({ error: 'Unauthorized' }, { status: 401 }) 10 } 11 12 try { 13 const { messages, system } = await request.json() 14 15 const response = await openai.chat.completions.create({ 16 model: 'gpt-4-turbo-preview', 17 messages: [ 18 ...(system ? [{ role: 'system' as const, content: system }] : []), 19 ...messages 20 ], 21 max_tokens: 1024 22 }) 23 24 return NextResponse.json({ 25 response: response.choices[0].message.content 26 }) 27 } catch (error) { 28 console.error('OpenAI API error:', error) 29 return NextResponse.json( 30 { error: 'Failed to generate response' }, 31 { status: 500 } 32 ) 33 } 34}

Vercel AI SDK Integration#

1// app/api/chat/route.ts 2import { openai } from '@ai-sdk/openai' 3import { streamText } from 'ai' 4 5export async function POST(req: Request) { 6 const { messages } = await req.json() 7 8 const result = streamText({ 9 model: openai('gpt-4-turbo-preview'), 10 messages 11 }) 12 13 return result.toDataStreamResponse() 14}

Usage Instructions#

  1. Install dependencies: npm install openai ai @ai-sdk/openai
  2. Configure API key: Add OPENAI_API_KEY to environment variables
  3. Create client: Initialize the OpenAI client with your key
  4. Make requests: Use chat completions, embeddings, or function calling
  5. Handle responses: Parse the response and handle errors

Best Practices#

  1. Use environment variables - Never expose API keys in client code
  2. Implement rate limiting - Handle 429 errors gracefully
  3. Set token limits - Use max_tokens to control costs
  4. Stream long responses - Improve UX for content generation
  5. Cache when possible - Store embeddings and repeated queries
  6. Handle errors - Implement proper error handling and retries
  7. Monitor usage - Track API calls and costs
  8. Use the right model - Choose model based on task complexity and cost