91 lines
2.3 KiB
TypeScript
91 lines
2.3 KiB
TypeScript
import OpenAI from 'openai';
|
|
|
|
let ovhAI: OpenAI | null = null;
|
|
|
|
function getClient(): OpenAI {
|
|
if (!ovhAI) {
|
|
ovhAI = new OpenAI({
|
|
apiKey: process.env.OVHCLOUD_API_KEY,
|
|
baseURL: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1'
|
|
});
|
|
}
|
|
return ovhAI;
|
|
}
|
|
|
|
interface AiResponse {
|
|
text: string;
|
|
tokensUsed: number;
|
|
}
|
|
|
|
const MAX_RETRIES = 3;
|
|
|
|
export async function queryAi(text: string): Promise<AiResponse> {
|
|
const requestId = Math.random().toString(36).substring(7);
|
|
const startTime = Date.now();
|
|
|
|
console.info(`[AI-${requestId}] Starting OVH AI request`, {
|
|
promptLength: text.length,
|
|
timestamp: new Date().toISOString()
|
|
});
|
|
|
|
let lastError: Error | null = null;
|
|
|
|
for (let attempt = 1; attempt <= MAX_RETRIES; attempt++) {
|
|
try {
|
|
console.info(
|
|
`[AI-${requestId}] OVH AI request attempt ${attempt}/${MAX_RETRIES}`
|
|
);
|
|
|
|
const completion = await getClient().chat.completions.create({
|
|
model: 'Meta-Llama-3_3-70B-Instruct',
|
|
temperature: 0.7,
|
|
max_tokens: 4096,
|
|
messages: [
|
|
{
|
|
role: 'user',
|
|
content: text
|
|
}
|
|
]
|
|
});
|
|
|
|
const responseText = completion.choices[0]?.message?.content || '';
|
|
const tokensUsed =
|
|
(completion.usage?.prompt_tokens || 0) +
|
|
(completion.usage?.completion_tokens || 0);
|
|
|
|
const duration = Date.now() - startTime;
|
|
console.info(
|
|
`[AI-${requestId}] OVH AI response received in ${duration}ms`,
|
|
{
|
|
responseLength: responseText.length,
|
|
tokensUsed,
|
|
usage: completion.usage
|
|
}
|
|
);
|
|
|
|
return {
|
|
text: responseText,
|
|
tokensUsed
|
|
};
|
|
} catch (error) {
|
|
const duration = Date.now() - startTime;
|
|
console.error(
|
|
`[AI-${requestId}] OVH AI attempt ${attempt} failed after ${duration}ms:`,
|
|
{
|
|
error: error instanceof Error ? error.message : String(error),
|
|
promptLength: text.length
|
|
}
|
|
);
|
|
lastError = error as Error;
|
|
|
|
if (attempt < MAX_RETRIES) {
|
|
const delay = 1000 * attempt;
|
|
console.info(`[AI-${requestId}] Retrying in ${delay}ms...`);
|
|
await new Promise(resolve => setTimeout(resolve, delay));
|
|
}
|
|
}
|
|
}
|
|
|
|
throw lastError || new Error('OVH AI error: all retry attempts failed.');
|
|
}
|