4 min read
Table of Contents
Getting Started
Prerequisites
- Node.js (v18+)
- npm or yarn
- An OpenAI account
- API key from OpenAI platform
Project Setup
First, create a new Node.js project:
mkdir openai-project
cd openai-project
npm init -y
Installation and Setup
Install the OpenAI package:
npm install openai dotenv
Create a .env
file for your API key:
OPENAI_API_KEY=your-api-key-here
Create an index.js
file with the basic setup:
require('dotenv').config();
const OpenAI = require('openai');
const openai = new OpenAI({
apiKey: process.env.OPENAI_API_KEY
});
Basic Usage
Making Your First API Call
async function quickStart() {
try {
const completion = await openai.chat.completions.create({
model: "gpt-3.5-turbo",
messages: [
{ role: "user", content: "Hello, how can you help me today?" }
],
});
console.log(completion.choices[0].message.content);
} catch (error) {
console.error('Error:', error);
}
}
quickStart();
Different Types of Requests
// Chat completion with system message
async function chatWithSystem() {
try {
const completion = await openai.chat.completions.create({
model: "gpt-3.5-turbo",
messages: [
{ role: "system", content: "You are a helpful assistant." },
{ role: "user", content: "What's the weather like?" }
],
temperature: 0.7,
max_tokens: 150
});
console.log(completion.choices[0].message.content);
} catch (error) {
console.error('Error:', error);
}
}
// Image generation
async function generateImage() {
try {
const response = await openai.images.generate({
model: "dall-e-3",
prompt: "A sunset over mountains",
size: "1024x1024",
quality: "standard",
n: 1,
});
console.log(response.data[0].url);
} catch (error) {
console.error('Error:', error);
}
}
Advanced Features
Managing Conversations
class ConversationManager {
constructor() {
this.messages = [
{ role: "system", content: "You are a helpful assistant." }
];
}
async sendMessage(userMessage) {
try {
// Add user message to conversation
this.messages.push({ role: "user", content: userMessage });
// Get response from API
const completion = await openai.chat.completions.create({
model: "gpt-3.5-turbo",
messages: this.messages
});
const assistantResponse = completion.choices[0].message.content;
// Add assistant response to conversation
this.messages.push({ role: "assistant", content: assistantResponse });
return assistantResponse;
} catch (error) {
console.error('Error:', error);
throw error;
}
}
getConversationHistory() {
return this.messages;
}
}
// Usage example:
const conversation = new ConversationManager();
async function demo() {
await conversation.sendMessage("Hi there!");
await conversation.sendMessage("What can you help me with?");
console.log(conversation.getConversationHistory());
}
Stream Responses
async function streamCompletion() {
try {
const stream = await openai.chat.completions.create({
model: "gpt-3.5-turbo",
messages: [{ role: "user", content: "Write a story about a dragon" }],
stream: true,
});
for await (const chunk of stream) {
const content = chunk.choices[0]?.delta?.content || '';
process.stdout.write(content);
}
} catch (error) {
console.error('Error:', error);
}
}
Best Practices
Rate Limiting and Batching
class RateLimiter {
constructor(requestsPerMinute) {
this.requestsPerMinute = requestsPerMinute;
this.queue = [];
this.processing = false;
}
async addRequest(prompt) {
return new Promise((resolve, reject) => {
this.queue.push({ prompt, resolve, reject });
if (!this.processing) {
this.processQueue();
}
});
}
async processQueue() {
this.processing = true;
while (this.queue.length > 0) {
const { prompt, resolve, reject } = this.queue.shift();
try {
const response = await openai.chat.completions.create({
model: "gpt-3.5-turbo",
messages: [{ role: "user", content: prompt }]
});
resolve(response.choices[0].message.content);
} catch (error) {
reject(error);
}
// Wait for rate limit
await new Promise(resolve => setTimeout(resolve, (60 / this.requestsPerMinute) * 1000));
}
this.processing = false;
}
}
// Usage
const limiter = new RateLimiter(60); // 60 requests per minute
const prompts = ["Hello", "How are you", "What's the weather"];
Promise.all(prompts.map(prompt => limiter.addRequest(prompt)))
.then(responses => console.log(responses));
Error Handling
class OpenAIHandler {
constructor() {
this.maxRetries = 3;
this.baseDelay = 1000; // 1 second
}
async makeRequest(prompt, retryCount = 0) {
try {
const response = await openai.chat.completions.create({
model: "gpt-3.5-turbo",
messages: [{ role: "user", content: prompt }]
});
return response.choices[0].message.content;
} catch (error) {
if (retryCount < this.maxRetries) {
const delay = this.baseDelay * Math.pow(2, retryCount);
console.log(`Retrying after ${delay}ms...`);
await new Promise(resolve => setTimeout(resolve, delay));
return this.makeRequest(prompt, retryCount + 1);
}
throw error;
}
}
}
Cost Management
const GPT3Tokenizer = require('gpt3-tokenizer').default;
class TokenCounter {
constructor() {
this.tokenizer = new GPT3Tokenizer({ type: 'gpt3' });
}
countTokens(text) {
const encoded = this.tokenizer.encode(text);
return encoded.bpe.length;
}
estimateCost(tokens, model = 'gpt-3.5-turbo') {
const costs = {
'gpt-3.5-turbo': 0.002 / 1000,
'gpt-4': 0.03 / 1000,
};
return tokens * costs[model];
}
}
// Usage tracking middleware
function trackUsage(response) {
const usage = response.usage;
console.log({
promptTokens: usage.prompt_tokens,
completionTokens: usage.completion_tokens,
totalTokens: usage.total_tokens,
estimatedCost: new TokenCounter().estimateCost(usage.total_tokens)
});
}
Remember to:
- Never commit your
.env
file - Use appropriate error handling in production
- Implement proper rate limiting
- Monitor your token usage and costs
- Keep your API key secure
- Use environment variables for sensitive data
Would you like me to explain any particular section in more detail or provide additional examples for specific use cases?
Related Posts
API management solutionsAPI management softwarebest API management toolsBlog
5 min read
APIs (Application Programming Interfaces) are the backbone of modern digital applications. They allow different software systems to communicate, exchange data, and collaborate seamlessly. As businesse...