Essential guidelines for using Electron Hub API effectively
# Good: Environment variable export ELECTRONHUB_API_KEY="your_api_key_here" # Bad: Hardcoded in source const apiKey = "ek-1234567890abcdef"; // Never do this!
// Good: Batch embedding requests const embeddings = await fetch('/v1/embeddings', { method: 'POST', body: JSON.stringify({ input: ['text1', 'text2', 'text3'], // Multiple inputs model: 'text-embedding-3-small' }) }); // Less efficient: Individual requests for (const text of texts) { const embedding = await fetch('/v1/embeddings', { method: 'POST', body: JSON.stringify({ input: text, model: 'text-embedding-3-small' }) }); }
// Implement response caching const cache = new Map(); async function getChatCompletion(messages, model) { const cacheKey = JSON.stringify({ messages, model }); if (cache.has(cacheKey)) { return cache.get(cacheKey); } const response = await electronhub.chat.completions.create({ messages, model }); cache.set(cacheKey, response); return response; }
async function makeAPICall(requestFn, maxRetries = 3) { for (let attempt = 1; attempt <= maxRetries; attempt++) { try { return await requestFn(); } catch (error) { if (error.status === 429) { // Rate limit - exponential backoff const delay = Math.pow(2, attempt) * 1000; await new Promise(resolve => setTimeout(resolve, delay)); continue; } if (error.status >= 500 && attempt < maxRetries) { // Server error - retry continue; } // Other errors or max retries reached throw error; } } }
// Good: Clear, specific prompt { "model": "gpt-3.5-turbo", "messages": [ { "role": "system", "content": "You are a helpful assistant that summarizes articles in exactly 3 bullet points." }, { "role": "user", "content": "Summarize this article: [article text]" } ] } // Less effective: Vague prompt { "model": "gpt-3.5-turbo", "messages": [ { "role": "user", "content": "Tell me about this article: [article text]" } ] }
// Structured logging example const logger = { logAPICall: (endpoint, model, tokens, responseTime, status) => { console.log(JSON.stringify({ timestamp: new Date().toISOString(), endpoint, model, tokens, responseTime, status, environment: process.env.NODE_ENV })); } }; // Usage const startTime = Date.now(); try { const response = await electronhub.chat.completions.create({ model: 'gpt-3.5-turbo', messages: messages }); logger.logAPICall( 'chat/completions', 'gpt-3.5-turbo', response.usage.total_tokens, Date.now() - startTime, 'success' ); } catch (error) { logger.logAPICall( 'chat/completions', 'gpt-3.5-turbo', 0, Date.now() - startTime, 'error' ); }