This commit is contained in:
@@ -1,54 +1,90 @@
|
||||
import { generateText } from 'ai';
|
||||
import OpenAI from 'openai';
|
||||
|
||||
interface AiGatewayResponse {
|
||||
let ovhAI: OpenAI | null = null;
|
||||
|
||||
function getClient(): OpenAI {
|
||||
if (!ovhAI) {
|
||||
ovhAI = new OpenAI({
|
||||
apiKey: process.env.OVHCLOUD_API_KEY,
|
||||
baseURL: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1'
|
||||
});
|
||||
}
|
||||
return ovhAI;
|
||||
}
|
||||
|
||||
interface AiResponse {
|
||||
text: string;
|
||||
tokensUsed: number;
|
||||
}
|
||||
|
||||
export async function queryAiGateway(
|
||||
text: string,
|
||||
model: string
|
||||
): Promise<AiGatewayResponse> {
|
||||
const MAX_RETRIES = 3;
|
||||
|
||||
export async function queryAi(text: string): Promise<AiResponse> {
|
||||
const requestId = Math.random().toString(36).substring(7);
|
||||
const startTime = Date.now();
|
||||
|
||||
console.info(`[AI-${requestId}] Starting Vercel Gateway AI request`, {
|
||||
console.info(`[AI-${requestId}] Starting OVH AI request`, {
|
||||
promptLength: text.length,
|
||||
model,
|
||||
timestamp: new Date().toISOString()
|
||||
});
|
||||
|
||||
try {
|
||||
const response = await generateText({
|
||||
model,
|
||||
prompt: text
|
||||
});
|
||||
let lastError: Error | null = null;
|
||||
|
||||
const duration = Date.now() - startTime;
|
||||
const tokensUsed = response.usage?.totalTokens || 0;
|
||||
for (let attempt = 1; attempt <= MAX_RETRIES; attempt++) {
|
||||
try {
|
||||
console.info(
|
||||
`[AI-${requestId}] OVH AI request attempt ${attempt}/${MAX_RETRIES}`
|
||||
);
|
||||
|
||||
console.info(
|
||||
`[AI-${requestId}] Vercel Gateway AI response received in ${duration}ms`,
|
||||
{
|
||||
responseLength: response.text.length,
|
||||
tokensUsed,
|
||||
usage: response.usage
|
||||
const completion = await getClient().chat.completions.create({
|
||||
model: 'Meta-Llama-3_3-70B-Instruct',
|
||||
temperature: 0.7,
|
||||
max_tokens: 4096,
|
||||
messages: [
|
||||
{
|
||||
role: 'user',
|
||||
content: text
|
||||
}
|
||||
]
|
||||
});
|
||||
|
||||
const responseText = completion.choices[0]?.message?.content || '';
|
||||
const tokensUsed =
|
||||
(completion.usage?.prompt_tokens || 0) +
|
||||
(completion.usage?.completion_tokens || 0);
|
||||
|
||||
const duration = Date.now() - startTime;
|
||||
console.info(
|
||||
`[AI-${requestId}] OVH AI response received in ${duration}ms`,
|
||||
{
|
||||
responseLength: responseText.length,
|
||||
tokensUsed,
|
||||
usage: completion.usage
|
||||
}
|
||||
);
|
||||
|
||||
return {
|
||||
text: responseText,
|
||||
tokensUsed
|
||||
};
|
||||
} catch (error) {
|
||||
const duration = Date.now() - startTime;
|
||||
console.error(
|
||||
`[AI-${requestId}] OVH AI attempt ${attempt} failed after ${duration}ms:`,
|
||||
{
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
promptLength: text.length
|
||||
}
|
||||
);
|
||||
lastError = error as Error;
|
||||
|
||||
if (attempt < MAX_RETRIES) {
|
||||
const delay = 1000 * attempt;
|
||||
console.info(`[AI-${requestId}] Retrying in ${delay}ms...`);
|
||||
await new Promise(resolve => setTimeout(resolve, delay));
|
||||
}
|
||||
);
|
||||
|
||||
return {
|
||||
text: response.text,
|
||||
tokensUsed
|
||||
};
|
||||
} catch (error) {
|
||||
const duration = Date.now() - startTime;
|
||||
console.error(
|
||||
`[AI-${requestId}] Vercel Gateway AI error after ${duration}ms:`,
|
||||
{
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
promptLength: text.length
|
||||
}
|
||||
);
|
||||
throw new Error(`Vercel Gateway AI error: ${JSON.stringify(error)}.`);
|
||||
}
|
||||
}
|
||||
|
||||
throw lastError || new Error('OVH AI error: all retry attempts failed.');
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user