import { GoogleGenAI } from "@google/genai"; import type { GenerateContentResponse } from "@google/genai"; import type { ProcessingMode, ModelConfig, SuggestedParamsResponse, ModelParameterGuidance, ParameterAdvice, StaticAiModelDetails, IterateProductResult } from "../types"; let API_KEY: string | undefined = undefined; try { if (typeof process !== 'undefined' && process.env && typeof process.env.API_KEY === 'string') { API_KEY = process.env.API_KEY; } else if (typeof process !== 'undefined' && process.env && process.env.API_KEY === undefined) { console.warn("process.env.API_KEY is undefined. Gemini API calls will be disabled if a key is not found by other means."); } } catch (e) { console.warn("Could not access process.env.API_KEY. This might be expected in some browser environments. Gemini API calls will be disabled if a key is not found.", e); } let ai: GoogleGenAI | null = null; let apiKeyAvailable = false; try { if (API_KEY) { if (typeof GoogleGenAI === 'function') { ai = new GoogleGenAI({ apiKey: API_KEY }); apiKeyAvailable = true; } else { console.error("GoogleGenAI constructor is not available from @google/genai module. Gemini API calls will be disabled."); apiKeyAvailable = false; } } else { console.warn("API_KEY was not found or is not configured. Gemini API calls will be disabled."); apiKeyAvailable = false; } } catch (error) { console.error("Error during GoogleGenAI initialization. Gemini API calls will be disabled.", error); ai = null; apiKeyAvailable = false; } const MODEL_NAME = 'gemini-2.5-flash-preview-04-17'; export const EXPLORATORY_MODE_DEFAULTS: ModelConfig = { temperature: 0.75, topP: 0.95, topK: 60 }; export const REFINEMENT_MODE_DEFAULTS: ModelConfig = { temperature: 0.5, topP: 0.9, topK: 40 }; export const DISTILLATION_MODE_DEFAULTS: ModelConfig = { temperature: 0.25, topP: 0.85, topK: 20 }; // Constants for text analysis const MIN_WORDS_FOR_ANALYSIS = 30; const SHORT_SENTENCE_THRESHOLD = 12; // words const LONG_SENTENCE_THRESHOLD = 25; // words const LOW_DIVERSITY_THRESHOLD = 0.45; const HIGH_DIVERSITY_THRESHOLD = 0.7; interface TextStats { wordCount: number; sentenceCount: number; avgSentenceLength: number; uniqueWordCount: number; lexicalDiversity: number; } function analyzeTextComplexity(text: string): TextStats { if (!text || !text.trim()) { return { wordCount: 0, sentenceCount: 0, avgSentenceLength: 0, uniqueWordCount: 0, lexicalDiversity: 0 }; } const words = text.toLowerCase().match(/\b(\w+)\b/g) || []; const wordCount = words.length; const sentences = text.split(/[.?!](?=\s+|$)/g).filter(s => s.trim().length > 0); const sentenceCount = Math.max(1, sentences.length); const uniqueWords = new Set(words); const uniqueWordCount = uniqueWords.size; const lexicalDiversity = wordCount > 0 ? uniqueWordCount / wordCount : 0; const avgSentenceLength = wordCount > 0 ? wordCount / sentenceCount : 0; return { wordCount, sentenceCount, avgSentenceLength, uniqueWordCount, lexicalDiversity, }; } export const getStaticModelDetails = (): StaticAiModelDetails => { return { modelName: MODEL_NAME, tools: "None", }; }; export const isApiKeyAvailable = (): boolean => apiKeyAvailable; export const suggestModelParameters = ( promptText: string, currentMode: ProcessingMode ): SuggestedParamsResponse => { const isPromptReallyProvided = promptText && promptText.trim().length > 0; let baseConfig: ModelConfig; let finalRationales: string[] = []; switch (currentMode) { case 'exploratory': baseConfig = { ...EXPLORATORY_MODE_DEFAULTS }; break; case 'refinement': baseConfig = { ...REFINEMENT_MODE_DEFAULTS }; break; case 'distillation': baseConfig = { ...DISTILLATION_MODE_DEFAULTS }; break; default: // Should not happen with typed modes console.warn(`Unknown processing mode: ${currentMode}. Falling back to exploratory defaults.`); baseConfig = { ...EXPLORATORY_MODE_DEFAULTS }; break; } if (!isPromptReallyProvided) { finalRationales.push(`No input prompt. Using default settings for '${currentMode}' mode.`); return { config: baseConfig, rationales: finalRationales }; } const stats = analyzeTextComplexity(promptText); if (stats.wordCount < MIN_WORDS_FOR_ANALYSIS) { finalRationales.push(`Input is short (${stats.wordCount} words). Using default settings for '${currentMode}' mode as detailed analysis is less reliable.`); return { config: baseConfig, rationales: finalRationales }; } let nextTemperature = baseConfig.temperature; let nextTopP = baseConfig.topP; let nextTopK = baseConfig.topK; let modified = false; const adjustmentReasons: string[] = []; if (currentMode === 'exploratory') { if (stats.avgSentenceLength < SHORT_SENTENCE_THRESHOLD && stats.lexicalDiversity < LOW_DIVERSITY_THRESHOLD) { nextTemperature += 0.08; nextTopP += 0.02; nextTopK += 8; adjustmentReasons.push("input appears concise with focused vocabulary; adjusting for broader creative exploration"); modified = true; } else if (stats.avgSentenceLength > LONG_SENTENCE_THRESHOLD || stats.lexicalDiversity > HIGH_DIVERSITY_THRESHOLD) { nextTemperature -= 0.05; nextTopP -= 0.02; nextTopK -= 5; const detailReason = stats.avgSentenceLength > LONG_SENTENCE_THRESHOLD && stats.lexicalDiversity > HIGH_DIVERSITY_THRESHOLD ? "input has long sentences and high lexical diversity" : stats.avgSentenceLength > LONG_SENTENCE_THRESHOLD ? "input features longer sentences" : "input shows high lexical diversity"; adjustmentReasons.push(`${detailReason}; adjusting to maintain coherence during broad expansion`); modified = true; } } else if (currentMode === 'refinement') { if (stats.avgSentenceLength > LONG_SENTENCE_THRESHOLD && stats.lexicalDiversity > HIGH_DIVERSITY_THRESHOLD) { nextTemperature -= 0.03; nextTopP -= 0.02; nextTopK -= 3; adjustmentReasons.push("input is complex; subtly adjusting for focused refinement and structural improvements"); modified = true; } } else { // distillation mode if (stats.avgSentenceLength > LONG_SENTENCE_THRESHOLD || stats.lexicalDiversity > HIGH_DIVERSITY_THRESHOLD) { nextTemperature -= 0.05; nextTopP -= 0.03; nextTopK -= 5; const detailReason = stats.avgSentenceLength > LONG_SENTENCE_THRESHOLD && stats.lexicalDiversity > HIGH_DIVERSITY_THRESHOLD ? "input has long sentences and high lexical diversity" : stats.avgSentenceLength > LONG_SENTENCE_THRESHOLD ? "input features longer sentences" : "input shows high lexical diversity"; adjustmentReasons.push(`${detailReason}; further tightening parameters for precise distillation`); modified = true; } } if (modified) { finalRationales.push(`Parameters adjusted for '${currentMode}' mode based on input analysis:`); adjustmentReasons.forEach(reason => finalRationales.push(`- ${reason}.`)); } else { finalRationales.push(`Input analysis suggests default settings for '${currentMode}' mode are a suitable starting point.`); } const finalConfig: ModelConfig = { temperature: Math.max(0.0, Math.min(2.0, parseFloat(nextTemperature.toFixed(2)))), topP: Math.max(0.01, Math.min(1.0, parseFloat(nextTopP.toFixed(2)))), topK: Math.max(1, Math.min(128, Math.round(nextTopK))), }; const configIsEffectivelyBase = finalConfig.temperature === baseConfig.temperature && finalConfig.topP === baseConfig.topP && finalConfig.topK === baseConfig.topK; if (configIsEffectivelyBase && !modified) { finalRationales = [`Using default settings for '${currentMode}' mode. Input analysis indicates these are appropriate.`]; } else if (configIsEffectivelyBase && modified) { finalRationales = [`Parameters for '${currentMode}' mode refined based on input analysis, resulting in values close to defaults:`]; adjustmentReasons.forEach(reason => finalRationales.push(`- ${reason}.`)); finalRationales.push(`Final settings align with mode defaults after adjustments.`); } return { config: finalConfig, rationales: Array.from(new Set(finalRationales)) }; }; export const getModelParameterGuidance = (config: ModelConfig): ModelParameterGuidance => { const warnings: string[] = []; const advice: ParameterAdvice = {}; if (config.temperature === 0) { advice.temperature = "Deterministic output; usually used with Top-K=1 for greedy decoding."; } else if (config.temperature < 0.3) { advice.temperature = "Very focused and less random output. Good for precision tasks, factual recall (e.g., Distillation)."; } else if (config.temperature < 0.7) { advice.temperature = "Balanced output. Good for factual responses or controlled creativity (e.g., Refinement)."; } else if (config.temperature <= 1.0) { advice.temperature = "More creative and diverse output. Good for brainstorming or idea generation (e.g., Exploratory)."; } else if (config.temperature <= 1.5) { advice.temperature = "Highly creative; may start to introduce more randomness or less coherence. Higher risk of hallucination."; } else { advice.temperature = "Extremely creative/random; high chance of unexpected or less coherent output. Not recommended for focused tasks."; warnings.push("Very high temperature ( > 1.5) might lead to highly random or incoherent output and increased hallucination risk."); } if (config.topP === 0 && config.temperature === 0) { advice.topP = "Not typically used when temperature is 0 (greedy decoding is active)."; } else if (config.topP === 0 && config.temperature > 0) { advice.topP = "Top-P is 0 but temperature > 0. This is an unusual setting; typically topP > 0.8 for nucleus sampling. May result in no tokens being selected if not handled carefully by the model."; warnings.push("Top-P = 0 with Temperature > 0 is an unusual setting and might lead to unexpected behavior or no output."); } else if (config.topP < 0.3 && config.topP > 0) { advice.topP = "Very restrictive selection of tokens. Output may be limited. Useful for high precision if combined with low temperature."; } else if (config.topP < 0.7) { advice.topP = "Moderately selective. Balances focus with some diversity."; } else if (config.topP < 0.9) { advice.topP = "Less restrictive, allowing for more diverse token choices. Good for Distillation or focused Refinement tasks."; } else if (config.topP <= 1.0) { advice.topP = "Allows a wider range of tokens, increasing diversity. Values around 0.9-0.95 are common for balanced creative tasks (e.g., Exploratory, broad Refinement)."; } if (config.topK === 1) { advice.topK = "Greedy decoding. Always picks the single most likely next token. Best for factual, deterministic output."; } else if (config.topK < 10) { advice.topK = "Very limited token choices. Output may be repetitive. Good for specific answers when combined with low temperature."; } else if (config.topK < 40) { advice.topK = "Moderately limited token choices. Good for focused output (e.g., Distillation, precise Refinement) with some variety."; } else if (config.topK < 80) { advice.topK = "Broader token selection. Good for Exploratory tasks or Refinement requiring creative yet focused exploration."; } else { advice.topK = "Very broad token selection. Relies more on Temperature and Top-P for filtering. Can increase randomness or lack of focus if not carefully managed."; } if (config.temperature > 0.8 && config.topK < 20 && config.topK > 1) { warnings.push("Higher temperature with low Top-K (< 20, not 1) can be unpredictable and may produce lower-quality results or limit creativity unexpectedly."); } if (config.topP < 0.7 && config.topP > 0 && config.temperature > 0.7) { warnings.push("Low Top-P (< 0.7) with moderate/high temperature can be restrictive and may cause erratic behavior by cutting off too many good options early."); } if (config.topP === 1.0 && config.topK === 1 && config.temperature > 0) { warnings.push("Using Top-P=1 and Top-K=1 simultaneously with temperature > 0 is unusual. Top-K=1 implies greedy decoding (if temp=0), making Top-P less relevant."); } if (config.topK < 10 && config.topK > 1 && config.temperature < 0.5) { warnings.push("Very low Top-K (< 10, not 1) with low temperature might overly restrict word choice, leading to repetitive or simplistic output."); } if (config.temperature == 0.0 && config.topK > 1) { warnings.push("Temperature of 0.0 typically implies greedy decoding (Top-K=1). Using Top-K > 1 with Temp=0.0 might yield inconsistent behavior; Top-K=1 is preferred here."); } if (config.temperature > 0.8 && config.topP > 0.95 && config.topK > 60) { warnings.push("High values for Temperature, Top-P, and Top-K together can lead to very random, less coherent, or potentially off-topic outputs. Consider reducing one or more for better focus."); } return { warnings, advice }; }; const getUserPromptComponents = ( originalPrompt: string, processingMode: ProcessingMode, iterationNumber: number, maxIterations: number, ): { systemInstruction: string; coreUserInstructions: string } => { let systemInstruction: string; let coreUserInstructions: string; switch (processingMode) { case 'exploratory': systemInstruction = `You are an AI process engine in 'EXPLORATORY' mode. Your goal is to iteratively evolve a "product" by creatively generating new ideas, expanding upon existing concepts, and exploring diverse facets related to the original user input. In each iteration, focus on adding distinct, novel information or elaborating imaginatively on existing points. Prioritize breadth of relevant ideas and creative connections. This mode encourages divergent thinking. Only if no further meaningful novel expansion or creative addition is possible without becoming trivial, redundant, or off-topic, prefix your response with "CONVERGED:". If your response is truncated, you will be prompted to continue.`; coreUserInstructions = ` 1. **Identify Areas for Creative Expansion**: Analyze the "Current State of Product". Brainstorm novel concepts, new related sections, or creative elaborations that build upon the "Original User Input". 2. **Generate Diverse and Novel Content**: For the identified area(s), provide fresh, imaginative, and detailed content. Aim for originality and a broad exploration of possibilities. 3. **Integrate Creatively**: Weave these new ideas into an enriched version of the product. 4. **Output Format**: Provide ONLY the new, expanded product. Do NOT include preambles. If converged, prefix with "CONVERGED:".`; break; case 'refinement': systemInstruction = `You are an AI process engine in 'REFINEMENT' mode. Your goal is to iteratively improve and optimize a "product" based on an original user input. Focus on enhancing clarity, correctness, conciseness (where appropriate), structure, efficiency, or overall quality. You might refactor, rewrite, or restructure elements. If the product is significantly improved and further refinement yields diminishing returns or is no longer substantially enhancing quality, you MAY prefix your response with "CONVERGED:". If your response is truncated, you will be prompted to continue.`; coreUserInstructions = ` 5. **Identify Areas for Improvement**: Analyze the "Current State of Product" for specific aspects that can be improved based on the "Original User Input" (e.g., clarity, conciseness, structure, code efficiency, argument strength, factual accuracy). 6. **Implement Targeted Enhancements**: Rewrite, refactor, add, or remove targeted content to improve the identified areas. The goal is to make the product measurably better in quality or function. 7. **Integrate into an Improved Product**: Formulate a new version of the product that incorporates these enhancements seamlessly. 8. **Output Format**: Provide ONLY the new, refined product. If converged, you may prefix with "CONVERGED:". Do NOT include other preambles.`; break; case 'distillation': systemInstruction = `You are an AI process engine in 'DISTILLATION' mode. Your goal is to iteratively distill a "product" based on an original user input down to its most essential, minimal, core representation. Focus on identifying the absolute fundamental concepts and removing elaborations, examples, or redundant information. Simplify phrasing and ensure factual accuracy and core meaning are preserved. If the product is maximally distilled and no further meaningful reduction is possible without losing its core identity, you MUST prefix your response with "CONVERGED:". If your response is truncated, you will be prompted to continue.`; coreUserInstructions = ` 9. **Identify Core Essence**: Analyze the "Current State of Product" for its fundamental concepts in relation to the "Original User Input." 10. **Distill and Reduce**: Remove elaborations, simplify phrasing, and consolidate ideas to their most direct and concise form, prioritizing the preservation of core meaning and factual accuracy. 11. **Integrate into Minimal Form**: Formulate a new, significantly more concise product representing this distilled essence. 12. **Output Format**: Provide ONLY the new, distilled product. If converged, prefix with "CONVERGED:". Do NOT include other preambles.`; break; default: // Should not happen systemInstruction = "Error: Unknown processing mode. Please select a valid mode."; coreUserInstructions = "No instructions due to unknown mode."; break; } return { systemInstruction, coreUserInstructions }; }; const buildFullUserPrompt = ( originalPrompt: string, productToProcess: string, iterationNumber: number, maxIterations: number, processingMode: ProcessingMode, coreUserInstructions: string, isContinuation: boolean ): string => { return ` Original User Input: "${originalPrompt}" ${isContinuation ? `PARTIAL PRODUCT CONTENT (GENERATED SO FAR FOR THIS ITERATION ${iterationNumber}):` : `Current State of Product (after Iteration ${iterationNumber - 1}):`} \`\`\` ${productToProcess} \`\`\` This is Iteration ${iterationNumber} of a maximum of ${maxIterations} iterations in ${processingMode.toUpperCase()} mode. ${isContinuation ? `\nIMPORTANT: YOU ARE CONTINUING A PREVIOUSLY TRUNCATED RESPONSE FOR THIS ITERATION. Your previous output was cut short.` : ''} Instructions for this iteration ${isContinuation ? '(CONTINUING PREVIOUS RESPONSE)' : ''}: ${coreUserInstructions} ${isContinuation ? `Please continue generating the product text EXACTLY where you left off from the "PARTIAL PRODUCT CONTENT" shown above. DO NOT repeat any part of the "PARTIAL PRODUCT CONTENT". DO NOT add any preambles, apologies, or explanations for continuing. DO NOT restart the whole product. Simply provide the NEXT CHUNK of the product text. If you believe the product is complete or converged (and you would have prefixed with "CONVERGED:"), ensure that prefix is at the very start of THIS CHUNK if it's the final part.` : `Provide the next version of the product (or the converged product with the prefix):`} `; }; export const iterateProduct = async ( previousProduct: string, iterationNumber: number, maxIterations: number, originalPrompt: string, processingMode: ProcessingMode, modelConfig: ModelConfig, onChunkReceived: (chunkText: string, isInitialChunkOfIteration: boolean) => void, isHaltSignalReceived: () => boolean ): Promise<IterateProductResult> => { if (!ai) { throw new Error("Gemini API client not initialized. API Key might be missing or client setup failed."); } const { systemInstruction, coreUserInstructions } = getUserPromptComponents(originalPrompt, processingMode, iterationNumber, maxIterations); let accumulatedIterationText = ""; let productContentForNextPrompt = previousProduct; let currentCallIsContinuation = false; let callCount = 0; const MAX_CONTINUATION_CALLS = 5; let isFirstChunkOfThisLogicalIteration = true; while (callCount < MAX_CONTINUATION_CALLS) { callCount++; if (currentCallIsContinuation && callCount > 1) { console.log(`Iteration ${iterationNumber}: Delaying for 1000ms before continuation call #${callCount}`); await new Promise(resolve => setTimeout(resolve, 1000)); // 1 second delay } if (isHaltSignalReceived()) { console.log(`Iteration ${iterationNumber}: Halt signal received before API call #${callCount}.`); return { product: accumulatedIterationText, status: 'HALTED' }; } const userPromptForThisCall = buildFullUserPrompt( originalPrompt, productContentForNextPrompt, iterationNumber, maxIterations, processingMode, coreUserInstructions, currentCallIsContinuation ); console.log(`Gemini API Stream Call #${callCount} for Iteration ${iterationNumber}. Mode: ${processingMode}. Continuation: ${currentCallIsContinuation}`); let textFromThisStreamCall = ""; let streamFinishReason: string | undefined; let safetyRatingsInfo: any = null; try { const stream = await ai.models.generateContentStream({ model: MODEL_NAME, contents: [{ role: 'user', parts: [{ text: userPromptForThisCall }] }], config: { systemInstruction: systemInstruction, temperature: modelConfig.temperature, topP: modelConfig.topP, topK: modelConfig.topK, }, }); for await (const chunk of stream) { if (isHaltSignalReceived()) { console.log(`Iteration ${iterationNumber}, API Call ${callCount}: Halt signal received during stream.`); streamFinishReason = "HALTED_BY_USER_DURING_STREAM"; // Custom internal reason // Ensure any received text in this partial chunk is added before halting const currentChunkText = chunk.text; if (typeof currentChunkText === 'string' && currentChunkText.length > 0) { textFromThisStreamCall += currentChunkText; onChunkReceived(currentChunkText, isFirstChunkOfThisLogicalIteration); isFirstChunkOfThisLogicalIteration = false; } break; } const chunkText = chunk.text; streamFinishReason = chunk.candidates?.[0]?.finishReason; if (chunk.candidates?.[0]?.safetyRatings) { safetyRatingsInfo = chunk.candidates[0].safetyRatings; } if (typeof chunkText === 'string' && chunkText.length > 0) { textFromThisStreamCall += chunkText; onChunkReceived(chunkText, isFirstChunkOfThisLogicalIteration); isFirstChunkOfThisLogicalIteration = false; } } accumulatedIterationText += textFromThisStreamCall; if (streamFinishReason === "HALTED_BY_USER_DURING_STREAM") { return { product: accumulatedIterationText, status: 'HALTED' }; } if (!textFromThisStreamCall && callCount === 1 && !currentCallIsContinuation && !accumulatedIterationText && !isHaltSignalReceived()) { let errorMessage = `Received no text content from Gemini API on initial stream attempt for Iteration ${iterationNumber}.`; if (streamFinishReason === "SAFETY") { errorMessage = `Generation stopped by API due to safety policy during Iteration ${iterationNumber}. Review input/product or adjust settings. Safety Ratings: ${JSON.stringify(safetyRatingsInfo)}`; } else if (streamFinishReason === "RECITATION") { errorMessage = `Generation stopped by API due to recitation policy during Iteration ${iterationNumber}.`; } else if (streamFinishReason) { errorMessage = `Generation failed with API reason: '${streamFinishReason}' during Iteration ${iterationNumber} on initial attempt.`; } console.warn(errorMessage, "Full diagnostic streamFinishReason:", streamFinishReason, "SafetyRatings:", safetyRatingsInfo); throw new Error(errorMessage); } const isTruncated = (streamFinishReason === "MAX_TOKENS" || streamFinishReason === "OTHER"); if (isTruncated && textFromThisStreamCall.length > 0 && !accumulatedIterationText.startsWith("CONVERGED:")) { currentCallIsContinuation = true; productContentForNextPrompt = accumulatedIterationText; if (callCount >= MAX_CONTINUATION_CALLS) { console.warn(`Iteration ${iterationNumber}: Max continuation calls (${MAX_CONTINUATION_CALLS}) reached. Returning possibly truncated content.`); break; } console.log(`Iteration ${iterationNumber}: Content stream truncated (reason: ${streamFinishReason}). Attempting continuation stream call ${callCount + 1}.`); } else { if (isTruncated && textFromThisStreamCall.length === 0 && !accumulatedIterationText.startsWith("CONVERGED:") && callCount < MAX_CONTINUATION_CALLS) { console.warn(`Iteration ${iterationNumber}: Content stream reported as truncated (reason: ${streamFinishReason}) but no new text was generated in this stream call. Stopping continuation for this iteration.`); } if (streamFinishReason === "SAFETY" || streamFinishReason === "RECITATION") { console.warn(`Iteration ${iterationNumber}, Call ${callCount}: Stream ended due to ${streamFinishReason}. Accumulated text so far: "${accumulatedIterationText.substring(0,100)}..." Safety: ${JSON.stringify(safetyRatingsInfo)}`); } break; } } catch (error) { // If a halt signal is received during error processing, prioritize halt. if (isHaltSignalReceived()) { console.warn(`Iteration ${iterationNumber}, API Call ${callCount}: Halt signal received during error handling. Prioritizing HALT. Original error:`, error); return { product: accumulatedIterationText, status: 'HALTED' }; } let errorMessage = `An unexpected error occurred during API stream call for Iteration ${iterationNumber}, API Call #${callCount}.`; if (error instanceof Error) { if (error.message.includes("429") || error.message.toUpperCase().includes("RESOURCE_EXHAUSTED")) { errorMessage = `API rate limit (429 RESOURCE_EXHAUSTED) hit during Iteration ${iterationNumber}. Please wait a minute and try again or reduce request frequency. Details: ${error.message}`; } else if (error.message.includes("Received no text content") || error.message.includes("safety policy") || error.message.includes("recitation policy")){ errorMessage = error.message; // Use specific message } else { errorMessage = `Error during API call for Iteration ${iterationNumber}: ${error.message}`; } } else { errorMessage = `Unknown error during API call for Iteration ${iterationNumber}: ${String(error)}`; } console.error(errorMessage, error); throw new Error(errorMessage); // Let App.tsx handle this thrown error } if (isHaltSignalReceived()) { console.log(`Iteration ${iterationNumber}: Halt signal received after API call #${callCount} processing.`); return { product: accumulatedIterationText, status: 'HALTED' }; } } if (isHaltSignalReceived()) { console.warn(`Iteration ${iterationNumber}: Halt signal received. Process HALTED.`); return { product: accumulatedIterationText, status: 'HALTED' }; } if (callCount >= MAX_CONTINUATION_CALLS && !accumulatedIterationText.startsWith("CONVERGED:")) { console.warn(`Iteration ${iterationNumber}: Finished due to max continuation calls. Product might be incomplete.`); } if (accumulatedIterationText.startsWith("CONVERGED:")) { const product = accumulatedIterationText.substring("CONVERGED:".length).trimStart(); return { product, status: 'CONVERGED' }; } return { product: accumulatedIterationText, status: 'COMPLETED' }; };