Compare commits

...

3 commits

Author SHA1 Message Date
Aditya Raj Prasad
0dc43e9477
Merge fa833b3574 into bcca1d6d7d 2025-12-04 18:28:31 +05:30
Aditya Raj Prasad
fa833b3574
Merge branch 'main' into fix-ai-no-response-error 2025-11-28 15:26:58 +05:30
Aditya Raj
5cdb8fefc5 fixed the #633 bug 2025-11-27 11:59:07 +05:30

View file

@ -42,7 +42,9 @@ export const createAgentStream = async ({
searchScopeRepoNames,
});
const stream = streamText({
let stream;
try {
stream = streamText({
model,
providerOptions,
system: baseSystemPrompt,
@ -56,51 +58,35 @@ export const createAgentStream = async ({
[toolNames.listAllRepos]: listAllReposTool,
},
prepareStep: async ({ stepNumber }) => {
// The first step attaches any mentioned sources to the system prompt.
if (stepNumber === 0 && inputSources.length > 0) {
const fileSources = inputSources.filter((source) => source.type === 'file');
const resolvedFileSources = (
await Promise.all(fileSources.map(resolveFileSource)))
.filter((source) => source !== undefined)
const fileSourcesSystemPrompt = await createFileSourcesSystemPrompt({
files: resolvedFileSources
});
return {
system: `${baseSystemPrompt}\n\n${fileSourcesSystemPrompt}`
}
}
if (stepNumber === env.SOURCEBOT_CHAT_MAX_STEP_COUNT - 1) {
return {
system: `**CRITICAL**: You have reached the maximum number of steps!! YOU MUST PROVIDE YOUR FINAL ANSWER NOW. DO NOT KEEP RESEARCHING.\n\n${answerInstructions}`,
activeTools: [],
}
}
return undefined;
},
temperature: env.SOURCEBOT_CHAT_MODEL_TEMPERATURE,
stopWhen: [
stepCountIsGTE(env.SOURCEBOT_CHAT_MAX_STEP_COUNT),
],
toolChoice: "auto", // Let the model decide when to use tools
toolChoice: "auto",
onStepFinish: ({ toolResults }) => {
// This takes care of extracting any sources that the LLM has seen as part of
// the tool calls it made.
toolResults.forEach(({ toolName, output, dynamic }) => {
// we don't care about dynamic tool results here.
if (dynamic) {
return;
}
if (isServiceError(output)) {
// is there something we want to do here?
return;
}
if (dynamic) return;
if (isServiceError(output)) return;
if (toolName === toolNames.readFiles) {
output.forEach((file) => {
onWriteSource({
@ -112,8 +98,7 @@ export const createAgentStream = async ({
name: file.path.split('/').pop() ?? file.path,
})
})
}
else if (toolName === toolNames.searchCode) {
} else if (toolName === toolNames.searchCode) {
output.files.forEach((file) => {
onWriteSource({
type: 'file',
@ -124,8 +109,7 @@ export const createAgentStream = async ({
name: file.fileName.split('/').pop() ?? file.fileName,
})
})
}
else if (toolName === toolNames.findSymbolDefinitions || toolName === toolNames.findSymbolReferences) {
} else if (toolName === toolNames.findSymbolDefinitions || toolName === toolNames.findSymbolReferences) {
output.forEach((file) => {
onWriteSource({
type: 'file',
@ -139,7 +123,6 @@ export const createAgentStream = async ({
}
})
},
// Only enable langfuse traces in cloud environments.
experimental_telemetry: {
isEnabled: clientEnv.NEXT_PUBLIC_SOURCEBOT_CLOUD_ENVIRONMENT !== undefined,
metadata: {
@ -150,7 +133,12 @@ export const createAgentStream = async ({
logger.error(error);
},
});
} catch (err) {
if (model?.providerId === 'openai-compatible') {
throw new Error('The selected AI provider does not support codebase tool calls. Please use a provider that supports function/tool calls for codebase-related questions.');
}
throw err;
}
return stream;
}