mirror of
https://github.com/sourcebot-dev/sourcebot.git
synced 2025-12-12 04:15:30 +00:00
Clean up successful and failed jobs (#343)
* clean up * annoying log nit * feedback
This commit is contained in:
parent
9be5522c7b
commit
1ea377e7d6
7 changed files with 20 additions and 3 deletions
|
|
@ -13,6 +13,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|||
|
||||
### Fixed
|
||||
- Text highlighting clarity. [#342](https://github.com/sourcebot-dev/sourcebot/pull/342)
|
||||
- Clean up successful and failed jobs in Redis queues. [#343](https://github.com/sourcebot-dev/sourcebot/pull/343)
|
||||
|
||||
## [4.2.0] - 2025-06-09
|
||||
|
||||
|
|
|
|||
|
|
@ -23,6 +23,8 @@ The following environment variables allow you to configure your Sourcebot deploy
|
|||
| `EMAIL_FROM_ADDRESS` | `-` | <p>The email address that transactional emails will be sent from. See [this doc](/docs/configuration/transactional-emails) for more info.</p> |
|
||||
| `REDIS_DATA_DIR` | `$DATA_CACHE_DIR/redis` | <p>The data directory for the default Redis instance.</p> |
|
||||
| `REDIS_URL` | `redis://localhost:6379` | <p>Connection string of your Redis instance. By default, a Redis database is automatically provisioned at startup within the container.</p> |
|
||||
| `REDIS_REMOVE_ON_COMPLETE` | `0` | <p>Controls how many completed jobs are allowed to remain in Redis queues</p> |
|
||||
| `REDIS_REMOVE_ON_FAIL` | `100` | <p>Controls how many failed jobs are allowed to remain in Redis queues</p> |
|
||||
| `SHARD_MAX_MATCH_COUNT` | `10000` | <p>The maximum shard count per query</p> |
|
||||
| `SMTP_CONNECTION_URL` | `-` | <p>The url to the SMTP service used for sending transactional emails. See [this doc](/docs/configuration/transactional-emails) for more info.</p> |
|
||||
| `SOURCEBOT_ENCRYPTION_KEY` | Automatically generated at startup if no value is provided. Generated using `openssl rand -base64 24` | <p>Used to encrypt connection secrets and generate API keys.</p> |
|
||||
|
|
|
|||
|
|
@ -64,6 +64,9 @@ export class ConnectionManager implements IConnectionManager {
|
|||
connectionName: connection.name,
|
||||
orgId: connection.orgId,
|
||||
config: connectionConfig,
|
||||
}, {
|
||||
removeOnComplete: env.REDIS_REMOVE_ON_COMPLETE,
|
||||
removeOnFail: env.REDIS_REMOVE_ON_FAIL,
|
||||
});
|
||||
this.logger.info(`Added job to queue for connection ${connection.name} (id: ${connection.id})`);
|
||||
}).catch((err: unknown) => {
|
||||
|
|
|
|||
|
|
@ -35,6 +35,8 @@ export const env = createEnv({
|
|||
FALLBACK_GITEA_CLOUD_TOKEN: z.string().optional(),
|
||||
|
||||
REDIS_URL: z.string().url().default("redis://localhost:6379"),
|
||||
REDIS_REMOVE_ON_COMPLETE: numberSchema.default(0),
|
||||
REDIS_REMOVE_ON_FAIL: numberSchema.default(100),
|
||||
|
||||
NEXT_PUBLIC_SENTRY_BACKEND_DSN: z.string().optional(),
|
||||
NEXT_PUBLIC_SENTRY_ENVIRONMENT: z.string().optional(),
|
||||
|
|
|
|||
|
|
@ -10,6 +10,7 @@ import { existsSync, readdirSync, promises } from 'fs';
|
|||
import { indexGitRepository } from "./zoekt.js";
|
||||
import { PromClient } from './promClient.js';
|
||||
import * as Sentry from "@sentry/node";
|
||||
import { env } from './env.js';
|
||||
|
||||
interface IRepoManager {
|
||||
validateIndexedReposHaveShards: () => Promise<void>;
|
||||
|
|
@ -106,8 +107,10 @@ export class RepoManager implements IRepoManager {
|
|||
name: 'repoIndexJob',
|
||||
data: { repo },
|
||||
opts: {
|
||||
priority: priority
|
||||
}
|
||||
priority: priority,
|
||||
removeOnComplete: env.REDIS_REMOVE_ON_COMPLETE,
|
||||
removeOnFail: env.REDIS_REMOVE_ON_FAIL,
|
||||
},
|
||||
})));
|
||||
|
||||
// Increment pending jobs counter for each repo added
|
||||
|
|
@ -396,6 +399,10 @@ export class RepoManager implements IRepoManager {
|
|||
await this.gcQueue.addBulk(repos.map(repo => ({
|
||||
name: 'repoGarbageCollectionJob',
|
||||
data: { repo },
|
||||
opts: {
|
||||
removeOnComplete: env.REDIS_REMOVE_ON_COMPLETE,
|
||||
removeOnFail: env.REDIS_REMOVE_ON_FAIL,
|
||||
}
|
||||
})));
|
||||
|
||||
logger.info(`Added ${repos.length} jobs to gcQueue`);
|
||||
|
|
|
|||
|
|
@ -67,6 +67,9 @@ export const env = createEnv({
|
|||
STRIPE_WEBHOOK_SECRET: z.string().optional(),
|
||||
STRIPE_ENABLE_TEST_CLOCKS: booleanSchema.default('false'),
|
||||
|
||||
LOGTAIL_TOKEN: z.string().optional(),
|
||||
LOGTAIL_HOST: z.string().url().optional(),
|
||||
|
||||
// Misc
|
||||
CONFIG_MAX_REPOS_NO_TOKEN: numberSchema.default(Number.MAX_SAFE_INTEGER),
|
||||
NODE_ENV: z.enum(["development", "test", "production"]),
|
||||
|
|
|
|||
|
|
@ -79,7 +79,6 @@ export const getPlan = (): Plan => {
|
|||
|
||||
return licenseKey.seats === SOURCEBOT_UNLIMITED_SEATS ? "self-hosted:enterprise-unlimited" : "self-hosted:enterprise";
|
||||
} else {
|
||||
logger.info(`No valid license key found. Falling back to oss plan.`);
|
||||
return "oss";
|
||||
}
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in a new issue