feat(infra): add dual-mode support for Granian and Unit servers (#40847)

This commit is contained in:
Julian Bez
2025-11-12 11:31:57 +01:00
committed by GitHub
parent 610cb85850
commit 73635368bb
10 changed files with 181 additions and 312 deletions

View File

@@ -329,8 +329,7 @@ COPY --from=frontend-build --chown=posthog:posthog /code/frontend/dist /code/fro
# Copy the GeoLite2-City database from the fetch-geoip-db stage.
COPY --from=fetch-geoip-db --chown=posthog:posthog /code/share/GeoLite2-City.mmdb /code/share/GeoLite2-City.mmdb
# Add in the Gunicorn config, custom bin files and Django deps.
COPY --chown=posthog:posthog gunicorn.config.py ./
# Add in custom bin files and Django deps.
COPY --chown=posthog:posthog ./bin ./bin/
COPY --chown=posthog:posthog manage.py manage.py
COPY --chown=posthog:posthog posthog posthog/
@@ -339,9 +338,6 @@ COPY --chown=posthog:posthog common/hogvm common/hogvm/
COPY --chown=posthog:posthog dags dags/
COPY --chown=posthog:posthog products products/
# Keep server command backwards compatible
RUN cp ./bin/docker-server-unit ./bin/docker-server
# Setup ENV.
ENV NODE_ENV=production \
CHROME_BIN=/usr/bin/chromium \

View File

@@ -1,27 +1,4 @@
#!/bin/bash
set -e
./bin/migrate-check
# To ensure we are able to expose metrics from multiple processes, we need to
# provide a directory for `prometheus_client` to store a shared registry.
export PROMETHEUS_MULTIPROC_DIR=$(mktemp -d)
trap 'rm -rf "$PROMETHEUS_MULTIPROC_DIR"' EXIT
export PROMETHEUS_METRICS_EXPORT_PORT=8001
export STATSD_PORT=${STATSD_PORT:-8125}
exec gunicorn posthog.wsgi \
--config gunicorn.config.py \
--bind 0.0.0.0:8000 \
--log-file - \
--log-level info \
--access-logfile - \
--worker-tmp-dir /dev/shm \
--workers=2 \
--threads=8 \
--keep-alive=60 \
--backlog=${GUNICORN_BACKLOG:-1000} \
--worker-class=gthread \
${STATSD_HOST:+--statsd-host $STATSD_HOST:$STATSD_PORT} \
--limit-request-line=16384 $@
# Wrapper script for backward compatibility
# Calls the dual-mode server script (Granian/Unit)
exec "$(dirname "$0")/docker-server-unit" "$@"

View File

@@ -7,15 +7,48 @@ set -e
# provide a directory for `prometheus_client` to store a shared registry.
export PROMETHEUS_MULTIPROC_DIR=$(mktemp -d)
chmod -R 777 $PROMETHEUS_MULTIPROC_DIR
trap 'rm -rf "$PROMETHEUS_MULTIPROC_DIR"' EXIT
export PROMETHEUS_METRICS_EXPORT_PORT=8001
export STATSD_PORT=${STATSD_PORT:-8125}
export NGINX_UNIT_PYTHON_PROTOCOL=${NGINX_UNIT_PYTHON_PROTOCOL:-wsgi}
export NGINX_UNIT_APP_PROCESSES=${NGINX_UNIT_APP_PROCESSES:-4}
envsubst < /docker-entrypoint.d/unit.json.tpl > /docker-entrypoint.d/unit.json
# Dual-mode support: USE_GRANIAN env var switches between Granian and Unit (default)
if [ "${USE_GRANIAN:-false}" = "true" ]; then
echo "🚀 Starting with Granian ASGI server (opt-in via USE_GRANIAN=true)..."
# We need to run as --user root so that nginx unit can proxy the control socket for stats
# However each application is run as "nobody"
exec /usr/local/bin/docker-entrypoint.sh unitd --no-daemon --user root
# Granian configuration
export GRANIAN_WORKERS=${GRANIAN_WORKERS:-4}
export GRANIAN_THREADS=2
# Start metrics HTTP server in background on port 8001
python ./bin/granian_metrics.py &
METRICS_PID=$!
# Combined trap: kill metrics server and cleanup temp directory
trap 'kill $METRICS_PID 2>/dev/null; rm -rf "$PROMETHEUS_MULTIPROC_DIR"' EXIT
exec granian \
--interface asgi \
posthog.asgi:application \
--workers $GRANIAN_WORKERS \
--runtime-threads $GRANIAN_THREADS \
--runtime-mode mt \
--loop uvloop \
--host 0.0.0.0 \
--port 8000 \
--log-level warning \
--access-log \
--respawn-failed-workers
else
echo "🔧 Starting with Nginx Unit server (default, stable)..."
# Cleanup temp directory on exit
trap 'rm -rf "$PROMETHEUS_MULTIPROC_DIR"' EXIT
export NGINX_UNIT_PYTHON_PROTOCOL=${NGINX_UNIT_PYTHON_PROTOCOL:-wsgi}
export NGINX_UNIT_APP_PROCESSES=${NGINX_UNIT_APP_PROCESSES:-4}
envsubst < /docker-entrypoint.d/unit.json.tpl > /docker-entrypoint.d/unit.json
# We need to run as --user root so that nginx unit can proxy the control socket for stats
# However each application is run as "nobody"
exec /usr/local/bin/docker-entrypoint.sh unitd --no-daemon --user root
fi

66
bin/granian_metrics.py Executable file
View File

@@ -0,0 +1,66 @@
#!/usr/bin/env python3
"""
Metrics HTTP server for Granian multi-process setup.
Serves Prometheus metrics on port 8001 (configurable via PROMETHEUS_METRICS_EXPORT_PORT).
Aggregates metrics from all Granian worker processes using prometheus_client multiprocess mode.
Exposes Granian-equivalent metrics to maintain dashboard compatibility with previous Gunicorn setup.
"""
import os
import time
import logging
from prometheus_client import CollectorRegistry, Gauge, multiprocess, start_http_server
logger = logging.getLogger(__name__)
def create_granian_metrics(registry: CollectorRegistry) -> None:
"""
Create Granian-equivalent metrics to maintain compatibility with existing Grafana dashboards.
These metrics mirror the Gunicorn metrics that were previously exposed, allowing existing
dashboards and alerts to continue working with minimal changes.
"""
# Read Granian configuration from environment
workers = int(os.environ.get("GRANIAN_WORKERS", 4))
threads = int(os.environ.get("GRANIAN_THREADS", 2))
# Expose static configuration as gauges
# These provide equivalent metrics to what gunicorn/unit previously exposed
max_worker_threads = Gauge(
"granian_max_worker_threads",
"Maximum number of threads per worker",
registry=registry,
)
max_worker_threads.set(threads)
total_workers = Gauge(
"granian_workers_total",
"Total number of Granian workers configured",
registry=registry,
)
total_workers.set(workers)
def main():
"""Start HTTP server to expose Prometheus metrics from all workers."""
registry = CollectorRegistry()
multiprocess.MultiProcessCollector(registry)
# Create Granian-specific metrics for dashboard compatibility
create_granian_metrics(registry)
port = int(os.environ.get("PROMETHEUS_METRICS_EXPORT_PORT", 8001))
logger.info(f"Starting Prometheus metrics server on port {port}")
start_http_server(port=port, registry=registry)
# Keep the server running
while True:
time.sleep(3600)
if __name__ == "__main__":
main()

View File

@@ -25,4 +25,13 @@ else
echo "🐧 Linux detected, binding to Docker bridge gateway: $HOST_BIND"
fi
python ${DEBUG:+ -m debugpy --listen 127.0.0.1:5678} -m uvicorn --reload posthog.asgi:application --host $HOST_BIND --log-level debug --reload-include "posthog/" --reload-include "ee/" --reload-include "products/"
python ${DEBUG:+ -m debugpy --listen 127.0.0.1:5678} -m granian \
--interface asgi \
posthog.asgi:application \
--reload \
--reload-paths ./posthog \
--reload-paths ./ee \
--reload-paths ./products \
--host $HOST_BIND \
--log-level debug \
--workers 1

View File

@@ -302,7 +302,7 @@ docker:
hidden: true
docker:server:
bin_script: docker-server
description: Run gunicorn application server with Prometheus metrics support
description: Run dual-mode server (Granian/Unit) with Prometheus metrics support
hidden: true
docker:server-unit:
bin_script: docker-server-unit
@@ -433,6 +433,10 @@ tools:
bin_script: create-notebook-node.sh
description: Create a new NotebookNode file and update types and editor references
hidden: true
tool:granian-metrics:
bin_script: granian_metrics.py
description: HTTP server that aggregates Prometheus metrics from Granian workers
hidden: true
sync:storage:
bin_script: sync-storage
description: 'TODO: add description for sync-storage'

View File

@@ -1,253 +0,0 @@
#!/usr/bin/env python3
import os
import sys
import time
import socket
import struct
import logging
import threading
import structlog
from prometheus_client import CollectorRegistry, Gauge, multiprocess, start_http_server
loglevel = "error"
keepalive = 120
# Set the timeout to something lower than any downstreams, such that if the
# timeout is hit, then the worker will be killed and respawned, which will then
# we able to pick up any connections that were previously pending on the socket
# and serve the requests before the downstream timeout.
timeout = 15
grateful_timeout = 120
METRICS_UPDATE_INTERVAL_SECONDS = int(os.getenv("GUNICORN_METRICS_UPDATE_SECONDS", 5))
def when_ready(server):
"""
To ease being able to hide the /metrics endpoint when running in production,
we serve the metrics on a separate port, using the
prometheus_client.multiprocess Collector to pull in data from the worker
processes.
"""
registry = CollectorRegistry()
multiprocess.MultiProcessCollector(registry)
port = int(os.environ.get("PROMETHEUS_METRICS_EXPORT_PORT", 8001))
start_http_server(port=port, registry=registry)
# Start a thread in the Arbiter that will monitor the backlog on the sockets
# Gunicorn is listening on.
socket_monitor = SocketMonitor(server=server, registry=registry)
socket_monitor.start()
def post_fork(server, worker):
"""
Within each worker process, start a thread that will monitor the thread and
connection pool.
"""
worker_monitor = WorkerMonitor(worker=worker)
worker_monitor.start()
def worker_exit(server, worker):
"""
Ensure that we mark workers as dead with the prometheus_client such that
any cleanup can happen.
"""
multiprocess.mark_process_dead(worker.pid)
class SocketMonitor(threading.Thread):
"""
We have enabled the statsd collector for Gunicorn, but this doesn't include
the backlog due to concerns over portability, see
https://github.com/benoitc/gunicorn/pull/2407
Instead, we expose to Prometheus a gauge that will report the backlog size.
We can then:
1. use this to monitor how well the Gunicorn instances are keeping up with
requests.
2. use this metric to handle HPA scaling e.g. in Kubernetes
"""
def __init__(self, server, registry):
super().__init__()
self.daemon = True
self.server = server
self.registry = registry
def run(self):
"""
Every X seconds, check to see how many connections are pending for each
server socket.
We label each individually, as limits such as `--backlog` will apply to
each individually.
"""
if sys.platform != "linux":
# We use the assumption that we are on Linux to be able to get the
# socket backlog, so if we're not on Linux, we return immediately.
return
backlog_gauge = Gauge(
"gunicorn_pending_connections",
"The number of pending connections on all sockets. Linux only.",
registry=self.registry,
labelnames=["listener"],
)
while True:
for sock in self.server.LISTENERS:
backlog = self.get_backlog(sock=sock)
backlog_gauge.labels(listener=str(sock)).set(backlog)
time.sleep(METRICS_UPDATE_INTERVAL_SECONDS)
def get_backlog(self, sock):
# tcp_info struct from include/uapi/linux/tcp.h
fmt = "B" * 8 + "I" * 24
tcp_info_struct = sock.getsockopt(socket.IPPROTO_TCP, socket.TCP_INFO, 104)
# 12 is tcpi_unacked
return struct.unpack(fmt, tcp_info_struct)[12]
class WorkerMonitor(threading.Thread):
"""
There is a statsd logger support in Gunicorn that allows us to gather
metrics e.g. on the number of workers, requests, request duration etc. See
https://docs.gunicorn.org/en/stable/instrumentation.html for details.
To get a better understanding of the pool utilization, number of accepted
connections, we start a thread in head worker to report these via prometheus
metrics.
"""
def __init__(self, worker):
super().__init__()
self.daemon = True
self.worker = worker
def run(self):
"""
Every X seconds, check the status of the Thread pool, as well as the
"""
active_worker_connections = Gauge(
"gunicorn_active_worker_connections",
"Number of active connections.",
labelnames=["pid"],
)
max_worker_connections = Gauge(
"gunicorn_max_worker_connections",
"Maximum worker connections.",
labelnames=["pid"],
)
total_threads = Gauge(
"gunicorn_max_worker_threads",
"Size of the thread pool per worker.",
labelnames=["pid"],
)
active_threads = Gauge(
"gunicorn_active_worker_threads",
"Number of threads actively processing requests.",
labelnames=["pid"],
)
pending_requests = Gauge(
"gunicorn_pending_requests",
"Number of requests that have been read from a connection but have not completed yet",
labelnames=["pid"],
)
max_worker_connections.labels(pid=self.worker.pid).set(self.worker.cfg.worker_connections)
total_threads.labels(pid=self.worker.pid).set(self.worker.cfg.threads)
while True:
active_worker_connections.labels(pid=self.worker.pid).set(self.worker.nr_conns)
active_threads.labels(pid=self.worker.pid).set(min(self.worker.cfg.threads, len(self.worker.futures)))
pending_requests.labels(pid=self.worker.pid).set(len(self.worker.futures))
time.sleep(METRICS_UPDATE_INTERVAL_SECONDS)
LOGGING_FORMATTER_NAME = os.getenv("LOGGING_FORMATTER_NAME", "default")
# Setup stdlib logging to be handled by Structlog
def add_pid_and_tid(
logger: logging.Logger, method_name: str, event_dict: structlog.types.EventDict
) -> structlog.types.EventDict:
event_dict["pid"] = os.getpid()
event_dict["tid"] = threading.get_ident()
return event_dict
pre_chain = [
# Add the log level and a timestamp to the event_dict if the log entry
# is not from structlog.
structlog.stdlib.add_log_level,
structlog.stdlib.add_logger_name,
add_pid_and_tid,
structlog.processors.TimeStamper(fmt="iso"),
]
# This is a copy the default logging config for gunicorn but with additions to:
#
# 1. non propagate loggers to the root handlers (otherwise we get duplicate log
# lines)
# 2. use structlog for processing of log records
#
# See
# https://github.com/benoitc/gunicorn/blob/0b953b803786997d633d66c0f7c7b290df75e07c/gunicorn/glogging.py#L48
# for the default log settings.
logconfig_dict = {
"version": 1,
"disable_existing_loggers": True,
"formatters": {
"default": {
"()": structlog.stdlib.ProcessorFormatter,
"processor": structlog.dev.ConsoleRenderer(colors=True),
"foreign_pre_chain": pre_chain,
},
"json": {
"()": structlog.stdlib.ProcessorFormatter,
"processor": structlog.processors.JSONRenderer(),
"foreign_pre_chain": pre_chain,
},
},
"root": {"level": "INFO", "handlers": ["console"]},
"loggers": {
"gunicorn.error": {
"level": "INFO",
"handlers": ["error_console"],
"propagate": False,
"qualname": "gunicorn.error",
},
"gunicorn.access": {
"level": "INFO",
"handlers": ["console"],
"propagate": False,
"qualname": "gunicorn.access",
},
},
"handlers": {
"error_console": {
"class": "logging.StreamHandler",
"formatter": LOGGING_FORMATTER_NAME,
"stream": "ext://sys.stderr",
},
"console": {
"class": "logging.StreamHandler",
"formatter": LOGGING_FORMATTER_NAME,
"stream": "ext://sys.stdout",
},
},
}

View File

@@ -247,9 +247,9 @@ urlpatterns = [
if settings.DEBUG:
# If we have DEBUG=1 set, then let's expose the metrics for debugging. Note
# that in production we expose these metrics on a separate port, to ensure
# external clients cannot see them. See the gunicorn setup for details on
# what we do.
# that in production we expose these metrics on a separate port (8001), to ensure
# external clients cannot see them. See bin/granian_metrics.py and bin/unit_metrics.py
# for details on the production metrics setup.
urlpatterns.append(path("_metrics", ExportToDjangoView))
# Temporal codec server endpoint for UI decryption - locally only for now
urlpatterns.append(path("decode", decode_payloads, name="temporal_decode"))

View File

@@ -57,8 +57,8 @@ dependencies = [
"google-ads==26.1.0",
"google-cloud-bigquery==3.26",
"google-genai==1.46.0",
"granian[uvloop,reload,pname]==2.5.5",
"gspread==6.2.1",
"gunicorn==20.1.0",
"hogql-parser==1.2.11",
"infi-clickhouse-orm",
"jsonref==1.1.0",
@@ -358,7 +358,6 @@ python-version = "3.12"
[tool.ty.src]
exclude = [
"bin/**",
"gunicorn.config.py",
"manage.py",
"posthog/hogql/grammar/**",
]

66
uv.lock generated
View File

@@ -2404,6 +2404,38 @@ requests = [
{ name = "requests-toolbelt" },
]
[[package]]
name = "granian"
version = "2.5.5"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "click" },
]
sdist = { url = "https://files.pythonhosted.org/packages/07/85/3f5a1258567718c75719f5206d33457f7bd2b091b0fee0a618a395fda758/granian-2.5.5.tar.gz", hash = "sha256:da785fae71cb45e92ce3fbb8633dc48b12f6a5055a7358226d78176967a5d2c9", size = 112143, upload-time = "2025-10-07T17:39:38.751Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/9b/92/e4ea9ba8d04d6e2196db3524caa9b58c4739e36c4e9dab69b0db7e5cbc2a/granian-2.5.5-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:4b146e435799aa09cd9ccc46498d217757f76b77c59961d17e0d933e7b54469a", size = 2833889, upload-time = "2025-10-07T17:37:39.496Z" },
{ url = "https://files.pythonhosted.org/packages/d5/80/4d21a80f988a72173389663e2974417cc89bb00bdec11c23e53846478604/granian-2.5.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:909168f74eccee90e7909b0396ae407f6ec8cc7e793b8fe5ce84f522a3ef6b77", size = 2517773, upload-time = "2025-10-07T17:37:41.098Z" },
{ url = "https://files.pythonhosted.org/packages/e4/69/16218292c97dbee42b1a38cb0db37866a90f5cafffd1ddf84648b39bb9f1/granian-2.5.5-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c4a853e3d6fc1ea8eb80c00bd794a887c885163e08d01848dd07aa6ffe68926f", size = 3010194, upload-time = "2025-10-07T17:37:42.783Z" },
{ url = "https://files.pythonhosted.org/packages/09/48/ec988c6a5d025e1433d50f828893050d5228f4153a2c46b8d5967741c17f/granian-2.5.5-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6baa556ea84a078f5bb444615792032cfcfd2b6764e07915ecec0aec53f272f3", size = 2834463, upload-time = "2025-10-07T17:37:44.491Z" },
{ url = "https://files.pythonhosted.org/packages/c8/92/2acfc39b45089702098c647e3417b9c05af4698e2f0d9b53292e56bf8eb9/granian-2.5.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce38e5cbb50d3098c8a79362e2b9e598d56001595860060185aa46f94a73776d", size = 3117696, upload-time = "2025-10-07T17:37:46.058Z" },
{ url = "https://files.pythonhosted.org/packages/7b/43/80ff0139cc0973787433f6bfbe0b6ecb5a700ea39d8c85c1b9eca13b7e2b/granian-2.5.5-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:a40757947e422bed1c14703bbcb424417f1c2f9a27c74d19456b7b7af265992b", size = 2918702, upload-time = "2025-10-07T17:37:47.461Z" },
{ url = "https://files.pythonhosted.org/packages/d7/49/a2fda46a999d97330a22de1e1b2213635b5e4a97e1ebd646ca2c74a6ef50/granian-2.5.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:806f8bb7b1483552a1a21820b242b395675d5011764dd0fabebc695f5d9f4bee", size = 2981833, upload-time = "2025-10-07T17:37:48.821Z" },
{ url = "https://files.pythonhosted.org/packages/6c/13/7318be6322e0c4c5d196db44ff425df1e0574f224934507aa1093b872424/granian-2.5.5-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:bd670dc87c2d09d7373473b9d3330897207900e86c17a8220c4abec78ef4e4a7", size = 3146729, upload-time = "2025-10-07T17:37:50.557Z" },
{ url = "https://files.pythonhosted.org/packages/35/34/eec8a8b57de273c0eb1593b607d443d311b6df2eb46db8c493b4ae039718/granian-2.5.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:bdf7f68283c4449253f9bc57ac69d63216eacd463a97501915b5636386d12175", size = 3208409, upload-time = "2025-10-07T17:37:52.801Z" },
{ url = "https://files.pythonhosted.org/packages/f3/2b/8455add059d45514d952bf9cf110ce3b3a9c0ecfaa63e2de07d994b40ed1/granian-2.5.5-cp312-cp312-win_amd64.whl", hash = "sha256:32e4a39f8850298f1fe6900a871db2a1440aba0208b39363e7ca96e81ef2340f", size = 2179015, upload-time = "2025-10-07T17:37:54.594Z" },
]
[package.optional-dependencies]
pname = [
{ name = "setproctitle" },
]
reload = [
{ name = "watchfiles" },
]
uvloop = [
{ name = "uvloop", marker = "platform_python_implementation == 'CPython' and sys_platform != 'win32'" },
]
[[package]]
name = "graphene"
version = "3.4.3"
@@ -2515,18 +2547,6 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/27/76/563fb20dedd0e12794d9a12cfe0198458cc0501fdc7b034eee2166d035d5/gspread-6.2.1-py3-none-any.whl", hash = "sha256:6d4ec9f1c23ae3c704a9219026dac01f2b328ac70b96f1495055d453c4c184db", size = 59977, upload-time = "2025-05-14T15:56:24.014Z" },
]
[[package]]
name = "gunicorn"
version = "20.1.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "setuptools" },
]
sdist = { url = "https://files.pythonhosted.org/packages/28/5b/0d1f0296485a6af03366604142ea8f19f0833894db3512a40ed07b2a56dd/gunicorn-20.1.0.tar.gz", hash = "sha256:e0a968b5ba15f8a328fdfd7ab1fcb5af4470c28aaf7e55df02a99bc13138e6e8", size = 370601, upload-time = "2021-03-27T01:54:37.202Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/e4/dd/5b190393e6066286773a67dfcc2f9492058e9b57c4867a95f1ba5caf0a83/gunicorn-20.1.0-py3-none-any.whl", hash = "sha256:9dcc4547dbb1cb284accfb15ab5667a0e5d1881cc443e0677b4882a4067a807e", size = 79531, upload-time = "2021-04-27T12:16:23.375Z" },
]
[[package]]
name = "h11"
version = "0.16.0"
@@ -4264,8 +4284,8 @@ dependencies = [
{ name = "google-ads" },
{ name = "google-cloud-bigquery" },
{ name = "google-genai" },
{ name = "granian", extra = ["pname", "reload", "uvloop"] },
{ name = "gspread" },
{ name = "gunicorn" },
{ name = "hogql-parser" },
{ name = "infi-clickhouse-orm" },
{ name = "jsonref" },
@@ -4510,8 +4530,8 @@ requires-dist = [
{ name = "google-ads", specifier = "==26.1.0" },
{ name = "google-cloud-bigquery", specifier = "==3.26" },
{ name = "google-genai", specifier = "==1.46.0" },
{ name = "granian", extras = ["uvloop", "reload", "pname"], specifier = "==2.5.5" },
{ name = "gspread", specifier = "==6.2.1" },
{ name = "gunicorn", specifier = "==20.1.0" },
{ name = "hogql-parser", specifier = "==1.2.11" },
{ name = "infi-clickhouse-orm", git = "https://github.com/PostHog/infi.clickhouse_orm?rev=9578c79f29635ee2c1d01b7979e89adab8383de2" },
{ name = "jsonref", specifier = "==1.1.0" },
@@ -5885,6 +5905,24 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/a6/24/4d91e05817e92e3a61c8a21e08fd0f390f5301f1c448b137c57c4bc6e543/semver-3.0.4-py3-none-any.whl", hash = "sha256:9c824d87ba7f7ab4a1890799cec8596f15c1241cb473404ea1cb0c55e4b04746", size = 17912, upload-time = "2025-01-24T13:19:24.949Z" },
]
[[package]]
name = "setproctitle"
version = "1.3.7"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/8d/48/49393a96a2eef1ab418b17475fb92b8fcfad83d099e678751b05472e69de/setproctitle-1.3.7.tar.gz", hash = "sha256:bc2bc917691c1537d5b9bca1468437176809c7e11e5694ca79a9ca12345dcb9e", size = 27002, upload-time = "2025-09-05T12:51:25.278Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/fb/f0/2dc88e842077719d7384d86cc47403e5102810492b33680e7dadcee64cd8/setproctitle-1.3.7-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:2dc99aec591ab6126e636b11035a70991bc1ab7a261da428491a40b84376654e", size = 18049, upload-time = "2025-09-05T12:49:36.241Z" },
{ url = "https://files.pythonhosted.org/packages/f0/b4/50940504466689cda65680c9e9a1e518e5750c10490639fa687489ac7013/setproctitle-1.3.7-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cdd8aa571b7aa39840fdbea620e308a19691ff595c3a10231e9ee830339dd798", size = 13079, upload-time = "2025-09-05T12:49:38.088Z" },
{ url = "https://files.pythonhosted.org/packages/d0/99/71630546b9395b095f4082be41165d1078204d1696c2d9baade3de3202d0/setproctitle-1.3.7-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:2906b6c7959cdb75f46159bf0acd8cc9906cf1361c9e1ded0d065fe8f9039629", size = 32932, upload-time = "2025-09-05T12:49:39.271Z" },
{ url = "https://files.pythonhosted.org/packages/50/22/cee06af4ffcfb0e8aba047bd44f5262e644199ae7527ae2c1f672b86495c/setproctitle-1.3.7-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6915964a6dda07920a1159321dcd6d94fc7fc526f815ca08a8063aeca3c204f1", size = 33736, upload-time = "2025-09-05T12:49:40.565Z" },
{ url = "https://files.pythonhosted.org/packages/5c/00/a5949a8bb06ef5e7df214fc393bb2fb6aedf0479b17214e57750dfdd0f24/setproctitle-1.3.7-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:cff72899861c765bd4021d1ff1c68d60edc129711a2fdba77f9cb69ef726a8b6", size = 35605, upload-time = "2025-09-05T12:49:42.362Z" },
{ url = "https://files.pythonhosted.org/packages/b0/3a/50caca532a9343828e3bf5778c7a84d6c737a249b1796d50dd680290594d/setproctitle-1.3.7-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:b7cb05bd446687ff816a3aaaf831047fc4c364feff7ada94a66024f1367b448c", size = 33143, upload-time = "2025-09-05T12:49:43.515Z" },
{ url = "https://files.pythonhosted.org/packages/ca/14/b843a251296ce55e2e17c017d6b9f11ce0d3d070e9265de4ecad948b913d/setproctitle-1.3.7-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:3a57b9a00de8cae7e2a1f7b9f0c2ac7b69372159e16a7708aa2f38f9e5cc987a", size = 34434, upload-time = "2025-09-05T12:49:45.31Z" },
{ url = "https://files.pythonhosted.org/packages/c8/b7/06145c238c0a6d2c4bc881f8be230bb9f36d2bf51aff7bddcb796d5eed67/setproctitle-1.3.7-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:d8828b356114f6b308b04afe398ed93803d7fca4a955dd3abe84430e28d33739", size = 32795, upload-time = "2025-09-05T12:49:46.419Z" },
{ url = "https://files.pythonhosted.org/packages/ef/dc/ef76a81fac9bf27b84ed23df19c1f67391a753eed6e3c2254ebcb5133f56/setproctitle-1.3.7-cp312-cp312-win32.whl", hash = "sha256:b0304f905efc845829ac2bc791ddebb976db2885f6171f4a3de678d7ee3f7c9f", size = 12552, upload-time = "2025-09-05T12:49:47.635Z" },
{ url = "https://files.pythonhosted.org/packages/e2/5b/a9fe517912cd6e28cf43a212b80cb679ff179a91b623138a99796d7d18a0/setproctitle-1.3.7-cp312-cp312-win_amd64.whl", hash = "sha256:9888ceb4faea3116cf02a920ff00bfbc8cc899743e4b4ac914b03625bdc3c300", size = 13247, upload-time = "2025-09-05T12:49:49.16Z" },
]
[[package]]
name = "setuptools"
version = "80.9.0"