Compare commits

...

5 commits

Author SHA1 Message Date
Aditya Raj Prasad
b5b01d9589
Merge fa833b3574 into 7c72578765 2025-12-04 17:11:36 -08:00
bkellam
7c72578765 sourcebot v4.10.2
Some checks are pending
Update Roadmap Released / update (push) Waiting to run
Publish to ghcr / build (linux/amd64, blacksmith-4vcpu-ubuntu-2404) (push) Waiting to run
Publish to ghcr / build (linux/arm64, blacksmith-8vcpu-ubuntu-2204-arm) (push) Waiting to run
Publish to ghcr / merge (push) Blocked by required conditions
2025-12-04 10:41:41 -08:00
Brendan Kellam
483b433aab
fix(web): Respect disable telemetry flag for web server side events (#657)
* fix

* changelog
2025-12-04 10:32:32 -08:00
Aditya Raj Prasad
fa833b3574
Merge branch 'main' into fix-ai-no-response-error 2025-11-28 15:26:58 +05:30
Aditya Raj
5cdb8fefc5 fixed the #633 bug 2025-11-27 11:59:07 +05:30
3 changed files with 105 additions and 108 deletions

View file

@ -7,6 +7,11 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased] ## [Unreleased]
## [4.10.2] - 2025-12-04
### Fixed
- Fixed issue where the disable telemetry flag was not being respected for web server telemetry. [#657](https://github.com/sourcebot-dev/sourcebot/pull/657)
## [4.10.1] - 2025-12-03 ## [4.10.1] - 2025-12-03
### Added ### Added

View file

@ -42,115 +42,103 @@ export const createAgentStream = async ({
searchScopeRepoNames, searchScopeRepoNames,
}); });
const stream = streamText({ let stream;
model, try {
providerOptions, stream = streamText({
system: baseSystemPrompt, model,
messages: inputMessages, providerOptions,
tools: { system: baseSystemPrompt,
[toolNames.searchCode]: createCodeSearchTool(searchScopeRepoNames), messages: inputMessages,
[toolNames.readFiles]: readFilesTool, tools: {
[toolNames.findSymbolReferences]: findSymbolReferencesTool, [toolNames.searchCode]: createCodeSearchTool(searchScopeRepoNames),
[toolNames.findSymbolDefinitions]: findSymbolDefinitionsTool, [toolNames.readFiles]: readFilesTool,
[toolNames.searchRepos]: searchReposTool, [toolNames.findSymbolReferences]: findSymbolReferencesTool,
[toolNames.listAllRepos]: listAllReposTool, [toolNames.findSymbolDefinitions]: findSymbolDefinitionsTool,
}, [toolNames.searchRepos]: searchReposTool,
prepareStep: async ({ stepNumber }) => { [toolNames.listAllRepos]: listAllReposTool,
// The first step attaches any mentioned sources to the system prompt.
if (stepNumber === 0 && inputSources.length > 0) {
const fileSources = inputSources.filter((source) => source.type === 'file');
const resolvedFileSources = (
await Promise.all(fileSources.map(resolveFileSource)))
.filter((source) => source !== undefined)
const fileSourcesSystemPrompt = await createFileSourcesSystemPrompt({
files: resolvedFileSources
});
return {
system: `${baseSystemPrompt}\n\n${fileSourcesSystemPrompt}`
}
}
if (stepNumber === env.SOURCEBOT_CHAT_MAX_STEP_COUNT - 1) {
return {
system: `**CRITICAL**: You have reached the maximum number of steps!! YOU MUST PROVIDE YOUR FINAL ANSWER NOW. DO NOT KEEP RESEARCHING.\n\n${answerInstructions}`,
activeTools: [],
}
}
return undefined;
},
temperature: env.SOURCEBOT_CHAT_MODEL_TEMPERATURE,
stopWhen: [
stepCountIsGTE(env.SOURCEBOT_CHAT_MAX_STEP_COUNT),
],
toolChoice: "auto", // Let the model decide when to use tools
onStepFinish: ({ toolResults }) => {
// This takes care of extracting any sources that the LLM has seen as part of
// the tool calls it made.
toolResults.forEach(({ toolName, output, dynamic }) => {
// we don't care about dynamic tool results here.
if (dynamic) {
return;
}
if (isServiceError(output)) {
// is there something we want to do here?
return;
}
if (toolName === toolNames.readFiles) {
output.forEach((file) => {
onWriteSource({
type: 'file',
language: file.language,
repo: file.repository,
path: file.path,
revision: file.revision,
name: file.path.split('/').pop() ?? file.path,
})
})
}
else if (toolName === toolNames.searchCode) {
output.files.forEach((file) => {
onWriteSource({
type: 'file',
language: file.language,
repo: file.repository,
path: file.fileName,
revision: file.revision,
name: file.fileName.split('/').pop() ?? file.fileName,
})
})
}
else if (toolName === toolNames.findSymbolDefinitions || toolName === toolNames.findSymbolReferences) {
output.forEach((file) => {
onWriteSource({
type: 'file',
language: file.language,
repo: file.repository,
path: file.fileName,
revision: file.revision,
name: file.fileName.split('/').pop() ?? file.fileName,
})
})
}
})
},
// Only enable langfuse traces in cloud environments.
experimental_telemetry: {
isEnabled: clientEnv.NEXT_PUBLIC_SOURCEBOT_CLOUD_ENVIRONMENT !== undefined,
metadata: {
langfuseTraceId: traceId,
}, },
}, prepareStep: async ({ stepNumber }) => {
onError: (error) => { if (stepNumber === 0 && inputSources.length > 0) {
logger.error(error); const fileSources = inputSources.filter((source) => source.type === 'file');
}, const resolvedFileSources = (
}); await Promise.all(fileSources.map(resolveFileSource)))
.filter((source) => source !== undefined)
const fileSourcesSystemPrompt = await createFileSourcesSystemPrompt({
files: resolvedFileSources
});
return {
system: `${baseSystemPrompt}\n\n${fileSourcesSystemPrompt}`
}
}
if (stepNumber === env.SOURCEBOT_CHAT_MAX_STEP_COUNT - 1) {
return {
system: `**CRITICAL**: You have reached the maximum number of steps!! YOU MUST PROVIDE YOUR FINAL ANSWER NOW. DO NOT KEEP RESEARCHING.\n\n${answerInstructions}`,
activeTools: [],
}
}
return undefined;
},
temperature: env.SOURCEBOT_CHAT_MODEL_TEMPERATURE,
stopWhen: [
stepCountIsGTE(env.SOURCEBOT_CHAT_MAX_STEP_COUNT),
],
toolChoice: "auto",
onStepFinish: ({ toolResults }) => {
toolResults.forEach(({ toolName, output, dynamic }) => {
if (dynamic) return;
if (isServiceError(output)) return;
if (toolName === toolNames.readFiles) {
output.forEach((file) => {
onWriteSource({
type: 'file',
language: file.language,
repo: file.repository,
path: file.path,
revision: file.revision,
name: file.path.split('/').pop() ?? file.path,
})
})
} else if (toolName === toolNames.searchCode) {
output.files.forEach((file) => {
onWriteSource({
type: 'file',
language: file.language,
repo: file.repository,
path: file.fileName,
revision: file.revision,
name: file.fileName.split('/').pop() ?? file.fileName,
})
})
} else if (toolName === toolNames.findSymbolDefinitions || toolName === toolNames.findSymbolReferences) {
output.forEach((file) => {
onWriteSource({
type: 'file',
language: file.language,
repo: file.repository,
path: file.fileName,
revision: file.revision,
name: file.fileName.split('/').pop() ?? file.fileName,
})
})
}
})
},
experimental_telemetry: {
isEnabled: clientEnv.NEXT_PUBLIC_SOURCEBOT_CLOUD_ENVIRONMENT !== undefined,
metadata: {
langfuseTraceId: traceId,
},
},
onError: (error) => {
logger.error(error);
},
});
} catch (err) {
if (model?.providerId === 'openai-compatible') {
throw new Error('The selected AI provider does not support codebase tool calls. Please use a provider that supports function/tool calls for codebase-related questions.');
}
throw err;
}
return stream; return stream;
} }

View file

@ -48,6 +48,10 @@ const getPostHogCookie = (cookieStore: Pick<RequestCookies, 'get'>): PostHogCook
} }
export async function captureEvent<E extends PosthogEvent>(event: E, properties: PosthogEventMap[E]) { export async function captureEvent<E extends PosthogEvent>(event: E, properties: PosthogEventMap[E]) {
if (env.SOURCEBOT_TELEMETRY_DISABLED === 'true') {
return;
}
const cookieStore = await cookies(); const cookieStore = await cookies();
const cookie = getPostHogCookie(cookieStore); const cookie = getPostHogCookie(cookieStore);