Real-world examples of integrating SuperPrompts into your Node.js applications.
Build a REST API with Express that uses SuperPrompts for AI interactions:
import express from 'express';
import { createPromptClient } from 'superprompts';
import OpenAI from 'openai';
const app = express();
const promptClient = createPromptClient(process.env.SUPERPROMPTS_API_KEY);
const openai = new OpenAI({ apiKey: process.env.OPENAI_API_KEY });
app.use(express.json());
app.post('/api/chat', async (req, res) => {
try {
const { message, promptId } = req.body;
// Get structured prompt from SuperPrompts
const systemPrompt = await promptClient.prompt(promptId);
// Use with OpenAI
const completion = await openai.chat.completions.create({
model: 'gpt-4',
messages: [
{ role: 'system', content: systemPrompt },
{ role: 'user', content: message }
]
});
res.json({ response: completion.choices[0].message.content });
} catch (error) {
console.error('Error:', error);
res.status(500).json({ error: 'Failed to process request' });
}
});
app.listen(3000, () => {
console.log('Server running on port 3000');
});Use SuperPrompts in Next.js 14+ with the App Router:
// app/api/chat/route.ts
import { createPromptClient } from 'superprompts';
import { NextRequest, NextResponse } from 'next/server';
import OpenAI from 'openai';
const promptClient = createPromptClient(process.env.SUPERPROMPTS_API_KEY!);
const openai = new OpenAI({ apiKey: process.env.OPENAI_API_KEY });
export async function POST(request: NextRequest) {
try {
const { message, promptId } = await request.json();
// Get structured prompt
const systemPrompt = await promptClient.prompt(promptId);
const completion = await openai.chat.completions.create({
model: 'gpt-4',
messages: [
{ role: 'system', content: systemPrompt },
{ role: 'user', content: message }
]
});
return NextResponse.json({
response: completion.choices[0].message.content
});
} catch (error) {
return NextResponse.json(
{ error: 'Failed to process request' },
{ status: 500 }
);
}
}Use SuperPrompts with Anthropic's Claude:
import { createPromptClient } from 'superprompts';
import Anthropic from '@anthropic-ai/sdk';
const promptClient = createPromptClient(process.env.SUPERPROMPTS_API_KEY);
const anthropic = new Anthropic({ apiKey: process.env.ANTHROPIC_API_KEY });
async function chat(userMessage: string) {
// Get structured prompt
const systemPrompt = await promptClient.prompt('assistant');
const message = await anthropic.messages.create({
model: 'claude-3-opus-20240229',
max_tokens: 1024,
system: systemPrompt,
messages: [
{ role: 'user', content: userMessage }
]
});
return message.content[0].text;
}
// Usage
const response = await chat('Hello, how can you help me?');
console.log(response);Implement caching to reduce API calls:
import { createPromptClient } from 'superprompts';
const promptClient = createPromptClient(process.env.SUPERPROMPTS_API_KEY);
const cache = new Map<string, { content: string; timestamp: number }>();
const CACHE_TTL = 5 * 60 * 1000; // 5 minutes
async function getCachedPrompt(promptId: string): Promise<string> {
const cached = cache.get(promptId);
const now = Date.now();
// Return cached if valid
if (cached && (now - cached.timestamp) < CACHE_TTL) {
return cached.content;
}
// Fetch fresh prompt
const content = await promptClient.prompt(promptId);
cache.set(promptId, { content, timestamp: now });
return content;
}
// Usage
const prompt = await getCachedPrompt('my-prompt');
console.log(prompt);Implement robust error handling:
import { createPromptClient } from 'superprompts';
const promptClient = createPromptClient(process.env.SUPERPROMPTS_API_KEY);
async function getPromptWithRetry(
promptId: string,
maxRetries: number = 3
): Promise<string> {
let lastError: Error;
for (let i = 0; i < maxRetries; i++) {
try {
return await promptClient.prompt(promptId);
} catch (error) {
lastError = error as Error;
console.error(`Attempt ${i + 1} failed:`, error);
// Wait before retrying (exponential backoff)
if (i < maxRetries - 1) {
await new Promise(resolve =>
setTimeout(resolve, Math.pow(2, i) * 1000)
);
}
}
}
throw new Error(
`Failed to fetch prompt after ${maxRetries} attempts: ${lastError!.message}`
);
}
// Usage
try {
const prompt = await getPromptWithRetry('my-prompt');
console.log(prompt);
} catch (error) {
console.error('Failed to fetch prompt:', error);
}Fetch multiple prompts efficiently:
import { createPromptClient } from 'superprompts';
const promptClient = createPromptClient(process.env.SUPERPROMPTS_API_KEY);
async function getMultiplePrompts(promptIds: string[]) {
// Fetch all prompts in parallel
const prompts = await Promise.all(
promptIds.map(id => promptClient.prompt(id))
);
// Return as object with IDs as keys
return promptIds.reduce((acc, id, index) => {
acc[id] = prompts[index];
return acc;
}, {} as Record<string, string>);
}
// Usage
const prompts = await getMultiplePrompts([
'customer-support',
'sales-assistant',
'technical-support'
]);
console.log(prompts['customer-support']);
console.log(prompts['sales-assistant']);Add type safety to your integration:
import { createPromptClient } from 'superprompts';
// Define your prompt IDs as types
type PromptId =
| 'customer-support'
| 'sales-assistant'
| 'technical-support';
class PromptService {
private client: ReturnType<typeof createPromptClient>;
private cache: Map<PromptId, string>;
constructor(apiKey: string) {
this.client = createPromptClient(apiKey);
this.cache = new Map();
}
async getPrompt(promptId: PromptId): Promise<string> {
// Check cache first
const cached = this.cache.get(promptId);
if (cached) return cached;
// Fetch and cache
const content = await this.client.prompt(promptId);
this.cache.set(promptId, content);
return content;
}
clearCache(): void {
this.cache.clear();
}
}
// Usage
const service = new PromptService(process.env.SUPERPROMPTS_API_KEY!);
const prompt = await service.getPrompt('customer-support');
// TypeScript will error if you use an invalid prompt ID