chore: Remove unused env vars (#27596)

This commit is contained in:
Ben White
2025-01-17 10:15:14 +01:00
committed by GitHub
parent 8980b61920
commit 080bef59fa
13 changed files with 5 additions and 35 deletions

View File

@@ -14,7 +14,6 @@
<env name="SESSION_RECORDING_KAFKA_BATCH_SIZE" value="200" />
<env name="SESSION_RECORDING_MAX_BUFFER_AGE_SECONDS" value="180" />
<env name="SESSION_RECORDING_SUMMARY_INGESTION_ENABLED_TEAMS" value="all" />
<env name="WORKER_CONCURRENCY" value="2" />
<env name="SESSION_RECORDING_KAFKA_QUEUE_SIZE" value="600" />
</envs>
<method v="2" />

View File

@@ -11,11 +11,9 @@
set -e -o pipefail
export WORKER_CONCURRENCY=1
export KAFKA_MAX_MESSAGE_BATCH_SIZE=0
export APP_METRICS_FLUSH_FREQUENCY_MS=0 # Reduce the potential for spurious errors in tests that wait for metrics
export APP_METRICS_GATHERED_FOR_ALL=true
export PLUGINS_DEFAULT_LOG_LEVEL=0 # All logs, as debug logs are used in synchronization barriers
export PLUGINS_DEFAULT_LOG_LEVEL=0 # All logs, as debug logs are used in synchronization barriers
export NODE_ENV=production-functional-tests
export PLUGIN_SERVER_MODE=functional-tests # running all capabilities is too slow

View File

@@ -35,7 +35,6 @@ export function getDefaultConfig(): PluginsServerConfig {
CLICKHOUSE_PASSWORD: null,
CLICKHOUSE_CA: null,
CLICKHOUSE_SECURE: false,
CLICKHOUSE_DISABLE_EXTERNAL_SCHEMAS: true,
EVENT_OVERFLOW_BUCKET_CAPACITY: 1000,
EVENT_OVERFLOW_BUCKET_REPLENISH_RATE: 1.0,
SKIP_UPDATE_EVENT_AND_PROPERTIES_STEP: false,
@@ -75,7 +74,6 @@ export function getDefaultConfig(): PluginsServerConfig {
POSTHOG_REDIS_PORT: 6379,
BASE_DIR: '.',
PLUGINS_RELOAD_PUBSUB_CHANNEL: 'reload-plugins',
WORKER_CONCURRENCY: 1,
TASK_TIMEOUT: 30,
TASKS_PER_WORKER: 10,
INGESTION_CONCURRENCY: 10,
@@ -108,7 +106,6 @@ export function getDefaultConfig(): PluginsServerConfig {
HEALTHCHECK_MAX_STALE_SECONDS: 2 * 60 * 60, // 2 hours
SITE_URL: null,
KAFKA_PARTITIONS_CONSUMED_CONCURRENTLY: 1,
CLICKHOUSE_DISABLE_EXTERNAL_SCHEMAS_TEAMS: '',
CLICKHOUSE_JSON_EVENTS_KAFKA_TOPIC: KAFKA_EVENTS_JSON,
CLICKHOUSE_HEATMAPS_KAFKA_TOPIC: KAFKA_CLICKHOUSE_HEATMAP_EVENTS,
EXCEPTIONS_SYMBOLIFICATION_KAFKA_TOPIC: KAFKA_EXCEPTION_SYMBOLIFICATION_EVENTS,
@@ -124,7 +121,6 @@ export function getDefaultConfig(): PluginsServerConfig {
PLUGIN_SERVER_EVENTS_INGESTION_PIPELINE: null,
PLUGIN_LOAD_SEQUENTIALLY: false,
KAFKAJS_LOG_LEVEL: 'WARN',
APP_METRICS_GATHERED_FOR_ALL: isDevEnv() ? true : false,
MAX_TEAM_ID_TO_BUFFER_ANONYMOUS_EVENTS_FOR: 0,
USE_KAFKA_FOR_SCHEDULED_TASKS: true,
CLOUD_DEPLOYMENT: null,

View File

@@ -67,7 +67,7 @@ export async function eachBatchAppsOnEventHandlers(
payload,
(teamId) => queue.pluginsServer.pluginConfigsPerTeam.has(teamId),
(event) => eachMessageAppsOnEventHandlers(event, queue),
queue.pluginsServer.WORKER_CONCURRENCY * queue.pluginsServer.TASKS_PER_WORKER,
queue.pluginsServer.TASKS_PER_WORKER,
'on_event'
)
}

View File

@@ -82,7 +82,7 @@ export async function startPluginsServer(
}
status.updatePrompt(serverConfig.PLUGIN_SERVER_MODE)
status.info('', `${serverConfig.WORKER_CONCURRENCY} workers, ${serverConfig.TASKS_PER_WORKER} tasks per worker`)
status.info('', `${serverConfig.TASKS_PER_WORKER} tasks per worker`)
runStartupProfiles(serverConfig)
// Used to trigger reloads of plugin code/config

View File

@@ -127,7 +127,6 @@ export type CdpConfig = {
}
export interface PluginsServerConfig extends CdpConfig {
WORKER_CONCURRENCY: number // number of concurrent worker threads
TASKS_PER_WORKER: number // number of parallel tasks per worker thread
INGESTION_CONCURRENCY: number // number of parallel event ingestion queues per batch
INGESTION_BATCH_SIZE: number // kafka consumer batch size
@@ -150,8 +149,6 @@ export interface PluginsServerConfig extends CdpConfig {
CLICKHOUSE_PASSWORD: string | null
CLICKHOUSE_CA: string | null // ClickHouse CA certs
CLICKHOUSE_SECURE: boolean // whether to secure ClickHouse connection
CLICKHOUSE_DISABLE_EXTERNAL_SCHEMAS: boolean // whether to disallow external schemas like protobuf for clickhouse kafka engine
CLICKHOUSE_DISABLE_EXTERNAL_SCHEMAS_TEAMS: string // (advanced) a comma separated list of teams to disable clickhouse external schemas for
CLICKHOUSE_JSON_EVENTS_KAFKA_TOPIC: string // (advanced) topic to send events for clickhouse ingestion
CLICKHOUSE_HEATMAPS_KAFKA_TOPIC: string // (advanced) topic to send heatmap data for clickhouse ingestion
EXCEPTIONS_SYMBOLIFICATION_KAFKA_TOPIC: string // (advanced) topic to send exception event data for stack trace processing
@@ -233,7 +230,6 @@ export interface PluginsServerConfig extends CdpConfig {
PLUGIN_SERVER_EVENTS_INGESTION_PIPELINE: string | null // TODO: shouldn't be a string probably
PLUGIN_LOAD_SEQUENTIALLY: boolean // could help with reducing memory usage spikes on startup
KAFKAJS_LOG_LEVEL: 'NOTHING' | 'DEBUG' | 'INFO' | 'WARN' | 'ERROR'
APP_METRICS_GATHERED_FOR_ALL: boolean // whether to gather app metrics for all teams
MAX_TEAM_ID_TO_BUFFER_ANONYMOUS_EVENTS_FOR: number
USE_KAFKA_FOR_SCHEDULED_TASKS: boolean // distribute scheduled tasks across the scheduler workers
EVENT_OVERFLOW_BUCKET_CAPACITY: number

View File

@@ -13,7 +13,6 @@ jest.setTimeout(60000) // 60 sec timeout
const extraServerConfig: Partial<PluginsServerConfig> = {
TASK_TIMEOUT: 2,
WORKER_CONCURRENCY: 2,
KAFKA_CONSUMPTION_TOPIC: KAFKA_EVENTS_PLUGIN_INGESTION,
LOG_LEVEL: LogLevel.Log,
}

View File

@@ -31,13 +31,7 @@ describe('http server', () => {
await resetTestDatabase(testCode)
const pluginsServer = await startPluginsServer(
{
WORKER_CONCURRENCY: 0,
},
makePiscina,
{ http: true }
)
const pluginsServer = await startPluginsServer({}, makePiscina, { http: true })
await new Promise((resolve) =>
http.get(`http://localhost:${DEFAULT_HTTP_SERVER_PORT}/_health`, (res) => {
@@ -59,13 +53,7 @@ describe('http server', () => {
await resetTestDatabase(testCode)
const pluginsServer = await startPluginsServer(
{
WORKER_CONCURRENCY: 0,
},
makePiscina,
{ http: true, ingestion: true }
)
const pluginsServer = await startPluginsServer({}, makePiscina, { http: true, ingestion: true })
await new Promise((resolve) =>
http.get(`http://localhost:${DEFAULT_HTTP_SERVER_PORT}/_ready`, (res) => {

View File

@@ -134,7 +134,6 @@ describe('eachBatchX', () => {
queue = {
bufferSleep: jest.fn(),
pluginsServer: {
WORKER_CONCURRENCY: 1,
TASKS_PER_WORKER: 10,
INGESTION_CONCURRENCY: 4,
kafkaProducer: {

View File

@@ -20,7 +20,6 @@ jest.mock('../../../src/utils/status')
jest.setTimeout(70000) // 60 sec timeout
const extraServerConfig: Partial<PluginsServerConfig> = {
WORKER_CONCURRENCY: 1,
KAFKA_CONSUMPTION_TOPIC: KAFKA_EVENTS_PLUGIN_INGESTION,
LOG_LEVEL: LogLevel.Log,
}

View File

@@ -56,7 +56,6 @@ describe('teardown', () => {
const { hub, stop } = await startPluginsServer(
{
WORKER_CONCURRENCY: 2,
LOG_LEVEL: LogLevel.Log,
},
makePiscina,
@@ -102,7 +101,6 @@ describe('teardown', () => {
`)
const { hub, stop } = await startPluginsServer(
{
WORKER_CONCURRENCY: 2,
LOG_LEVEL: LogLevel.Log,
},
makePiscina,

View File

@@ -22,7 +22,6 @@ describe('server', () => {
function createPluginServer(config: Partial<PluginsServerConfig>, capabilities: PluginServerCapabilities) {
return startPluginsServer(
{
WORKER_CONCURRENCY: 2,
LOG_LEVEL: LogLevel.Debug,
...config,
},

View File

@@ -20,7 +20,6 @@ jest.mock('../../../src/utils/status')
jest.setTimeout(60000) // 60 sec timeout
const extraServerConfig: Partial<PluginsServerConfig> = {
WORKER_CONCURRENCY: 2,
LOG_LEVEL: LogLevel.Log,
}