OpenAI Integration
Integrate GPT-4 and other OpenAI models into your application.
Dependencies#
npm install openaiEnvironment Variables#
# .env.local
OPENAI_API_KEY=sk-...Client Setup#
1// lib/openai.ts
2import OpenAI from 'openai';
3
4export const openai = new OpenAI({
5 apiKey: process.env.OPENAI_API_KEY,
6});Basic Chat Completion#
1// lib/ai/chat.ts
2import { openai } from '@/lib/openai';
3
4export async function chat(
5 messages: Array<{ role: 'user' | 'assistant' | 'system'; content: string }>,
6 options?: {
7 model?: string;
8 temperature?: number;
9 maxTokens?: number;
10 }
11) {
12 const response = await openai.chat.completions.create({
13 model: options?.model || 'gpt-4-turbo-preview',
14 messages,
15 temperature: options?.temperature ?? 0.7,
16 max_tokens: options?.maxTokens ?? 1000,
17 });
18
19 return response.choices[0].message.content;
20}Streaming Response#
1// app/api/chat/route.ts
2import { openai } from '@/lib/openai';
3import { OpenAIStream, StreamingTextResponse } from 'ai';
4
5export const runtime = 'edge';
6
7export async function POST(request: Request) {
8 const { messages } = await request.json();
9
10 const response = await openai.chat.completions.create({
11 model: 'gpt-4-turbo-preview',
12 messages,
13 stream: true,
14 });
15
16 const stream = OpenAIStream(response);
17
18 return new StreamingTextResponse(stream);
19}Chat Component with Streaming#
1// components/Chat.tsx
2'use client';
3
4import { useState, useRef, useEffect } from 'react';
5import { useChat } from 'ai/react';
6import { Send, Loader2 } from 'lucide-react';
7
8export function Chat() {
9 const { messages, input, handleInputChange, handleSubmit, isLoading } =
10 useChat({
11 api: '/api/chat',
12 });
13
14 const messagesEndRef = useRef<HTMLDivElement>(null);
15
16 useEffect(() => {
17 messagesEndRef.current?.scrollIntoView({ behavior: 'smooth' });
18 }, [messages]);
19
20 return (
21 <div className="flex flex-col h-[600px] border rounded-xl">
22 {/* Messages */}
23 <div className="flex-1 overflow-y-auto p-4 space-y-4">
24 {messages.map((message) => (
25 <div
26 key={message.id}
27 className={`flex ${
28 message.role === 'user' ? 'justify-end' : 'justify-start'
29 }`}
30 >
31 <div
32 className={`max-w-[80%] rounded-lg px-4 py-2 ${
33 message.role === 'user'
34 ? 'bg-brand-600 text-white'
35 : 'bg-gray-100 dark:bg-gray-800'
36 }`}
37 >
38 {message.content}
39 </div>
40 </div>
41 ))}
42 <div ref={messagesEndRef} />
43 </div>
44
45 {/* Input */}
46 <form
47 onSubmit={handleSubmit}
48 className="border-t p-4 flex items-center gap-2"
49 >
50 <input
51 value={input}
52 onChange={handleInputChange}
53 placeholder="Type a message..."
54 className="flex-1 px-4 py-2 border rounded-lg focus:outline-none focus:ring-2 focus:ring-brand-500"
55 />
56 <button
57 type="submit"
58 disabled={isLoading || !input.trim()}
59 className="p-2 bg-brand-600 text-white rounded-lg disabled:opacity-50"
60 >
61 {isLoading ? (
62 <Loader2 className="w-5 h-5 animate-spin" />
63 ) : (
64 <Send className="w-5 h-5" />
65 )}
66 </button>
67 </form>
68 </div>
69 );
70}Function Calling#
1// lib/ai/functions.ts
2import { openai } from '@/lib/openai';
3import type { ChatCompletionTool } from 'openai/resources/chat/completions';
4
5const tools: ChatCompletionTool[] = [
6 {
7 type: 'function',
8 function: {
9 name: 'get_weather',
10 description: 'Get the current weather for a location',
11 parameters: {
12 type: 'object',
13 properties: {
14 location: {
15 type: 'string',
16 description: 'The city and state, e.g. San Francisco, CA',
17 },
18 },
19 required: ['location'],
20 },
21 },
22 },
23 {
24 type: 'function',
25 function: {
26 name: 'search_products',
27 description: 'Search for products in the catalog',
28 parameters: {
29 type: 'object',
30 properties: {
31 query: {
32 type: 'string',
33 description: 'The search query',
34 },
35 category: {
36 type: 'string',
37 enum: ['electronics', 'clothing', 'books'],
38 },
39 },
40 required: ['query'],
41 },
42 },
43 },
44];
45
46// Function implementations
47async function getWeather(location: string) {
48 // Call weather API
49 return { temperature: 72, condition: 'sunny' };
50}
51
52async function searchProducts(query: string, category?: string) {
53 // Search database
54 return [{ id: '1', name: 'Product 1', price: 29.99 }];
55}
56
57export async function chatWithFunctions(userMessage: string) {
58 const messages: any[] = [{ role: 'user', content: userMessage }];
59
60 const response = await openai.chat.completions.create({
61 model: 'gpt-4-turbo-preview',
62 messages,
63 tools,
64 tool_choice: 'auto',
65 });
66
67 const message = response.choices[0].message;
68
69 // Check if function call is requested
70 if (message.tool_calls) {
71 const toolCall = message.tool_calls[0];
72 const args = JSON.parse(toolCall.function.arguments);
73
74 let result;
75 switch (toolCall.function.name) {
76 case 'get_weather':
77 result = await getWeather(args.location);
78 break;
79 case 'search_products':
80 result = await searchProducts(args.query, args.category);
81 break;
82 default:
83 throw new Error(`Unknown function: ${toolCall.function.name}`);
84 }
85
86 // Continue conversation with function result
87 messages.push(message);
88 messages.push({
89 role: 'tool',
90 tool_call_id: toolCall.id,
91 content: JSON.stringify(result),
92 });
93
94 const finalResponse = await openai.chat.completions.create({
95 model: 'gpt-4-turbo-preview',
96 messages,
97 });
98
99 return finalResponse.choices[0].message.content;
100 }
101
102 return message.content;
103}Embeddings for Search#
1// lib/ai/embeddings.ts
2import { openai } from '@/lib/openai';
3import { prisma } from '@/lib/prisma';
4
5export async function createEmbedding(text: string): Promise<number[]> {
6 const response = await openai.embeddings.create({
7 model: 'text-embedding-3-small',
8 input: text,
9 });
10
11 return response.data[0].embedding;
12}
13
14export async function searchSimilar(
15 query: string,
16 limit: number = 5
17): Promise<any[]> {
18 const queryEmbedding = await createEmbedding(query);
19
20 // Using pgvector for similarity search
21 const results = await prisma.$queryRaw`
22 SELECT id, content,
23 1 - (embedding <=> ${queryEmbedding}::vector) as similarity
24 FROM documents
25 ORDER BY embedding <=> ${queryEmbedding}::vector
26 LIMIT ${limit}
27 `;
28
29 return results as any[];
30}
31
32export async function indexDocument(id: string, content: string) {
33 const embedding = await createEmbedding(content);
34
35 await prisma.document.update({
36 where: { id },
37 data: {
38 embedding,
39 indexedAt: new Date(),
40 },
41 });
42}Image Generation#
1// lib/ai/images.ts
2import { openai } from '@/lib/openai';
3
4export async function generateImage(prompt: string) {
5 const response = await openai.images.generate({
6 model: 'dall-e-3',
7 prompt,
8 n: 1,
9 size: '1024x1024',
10 quality: 'standard',
11 });
12
13 return response.data[0].url;
14}
15
16export async function editImage(
17 imageUrl: string,
18 mask: string,
19 prompt: string
20) {
21 const response = await openai.images.edit({
22 image: await fetch(imageUrl).then((r) => r.blob()),
23 mask: await fetch(mask).then((r) => r.blob()),
24 prompt,
25 n: 1,
26 size: '1024x1024',
27 });
28
29 return response.data[0].url;
30}Text-to-Speech#
1// lib/ai/speech.ts
2import { openai } from '@/lib/openai';
3import { writeFile } from 'fs/promises';
4
5export async function textToSpeech(text: string, outputPath: string) {
6 const response = await openai.audio.speech.create({
7 model: 'tts-1',
8 voice: 'alloy',
9 input: text,
10 });
11
12 const buffer = Buffer.from(await response.arrayBuffer());
13 await writeFile(outputPath, buffer);
14
15 return outputPath;
16}Rate Limiting#
1// lib/ai/rate-limit.ts
2import { Ratelimit } from '@upstash/ratelimit';
3import { Redis } from '@upstash/redis';
4
5const ratelimit = new Ratelimit({
6 redis: Redis.fromEnv(),
7 limiter: Ratelimit.slidingWindow(10, '1 m'), // 10 requests per minute
8});
9
10export async function checkAIRateLimit(userId: string) {
11 const { success, remaining, reset } = await ratelimit.limit(
12 `ai:${userId}`
13 );
14
15 if (!success) {
16 throw new Error(`Rate limit exceeded. Try again in ${reset}ms`);
17 }
18
19 return { remaining };
20}Error Handling#
1// lib/ai/errors.ts
2import OpenAI from 'openai';
3
4export async function safeChat(
5 messages: any[],
6 options?: any
7): Promise<string | null> {
8 try {
9 const response = await openai.chat.completions.create({
10 model: options?.model || 'gpt-4-turbo-preview',
11 messages,
12 ...options,
13 });
14
15 return response.choices[0].message.content;
16 } catch (error) {
17 if (error instanceof OpenAI.APIError) {
18 switch (error.status) {
19 case 429:
20 console.error('Rate limit exceeded');
21 break;
22 case 500:
23 console.error('OpenAI server error');
24 break;
25 case 503:
26 console.error('OpenAI service unavailable');
27 break;
28 default:
29 console.error('OpenAI API error:', error.message);
30 }
31 }
32 throw error;
33 }
34}