chore: print session messages as we go, save memory (#14671)

* chore: print session messages as we go, save memory

Wanted to make sure we're not getting close to any memory limits when
generating, stream to Kafka asap

* wip

* wip
This commit is contained in:
Harry Waye
2023-03-10 10:52:48 +00:00
committed by GitHub
parent 42401f6664
commit 2e4c4d4bb3
3 changed files with 10 additions and 15 deletions

View File

@@ -332,7 +332,7 @@ jobs:
- name: Run load test
run: |
cd plugin-server
SESSONS_COUNT=2000 ./bin/ci_session_recordings_load_test.sh
SESSIONS_COUNT=2500 ./bin/ci_session_recordings_load_test.sh
- name: Upload profile as artifact
uses: actions/upload-artifact@v2

View File

@@ -23,7 +23,7 @@ DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
# The number of sessions to generate and ingest, defaulting to 10 if not already
# set.
SESSONS_COUNT=${SESSONS_COUNT:-10}
SESSIONS_COUNT=${SESSIONS_COUNT:-10}
TOKEN=e2e_token_1239 # Created by the setup_dev management command.
LOG_FILE=$(mktemp)
@@ -87,9 +87,9 @@ docker compose \
--topic $SESSION_RECORDING_EVENTS_TOPIC >/dev/null 2>&1
# Generate the session recording events and send them to Kafka.
echo "Generating $SESSONS_COUNT session recording events"
echo "Generating $SESSIONS_COUNT session recording events"
"$DIR"/generate_session_recordings_messages.py \
--count "$SESSONS_COUNT" \
--count "$SESSIONS_COUNT" \
--token $TOKEN |
docker compose \
-f "$DIR"/../../docker-compose.dev.yml exec \
@@ -175,7 +175,7 @@ fi
# second that were ingested.
echo "Ingestion lag dropped to zero after $SECONDS seconds"
SESSIONS_PER_SECOND=$(echo "$SESSONS_COUNT $SECONDS" | awk '{printf "%.2f", $1 / $2}')
SESSIONS_PER_SECOND=$(echo "$SESSIONS_COUNT $SECONDS" | awk '{printf "%.2f", $1 / $2}')
echo "Sessions per second: $SESSIONS_PER_SECOND" | tee -a "$GITHUB_STEP_SUMMARY"
# Kill the plugin server process and poll for up to 60 seconds for it to exit.

View File

@@ -260,7 +260,6 @@ def generate_snapshot_messages(
ip = faker.ipv4()
site_url = faker.url()
snapshot_messages = []
for full_snapshot_count, incremental_snapshot_count in zip(
full_snapshot_count_samples, incremental_snapshot_count_samples
):
@@ -334,7 +333,8 @@ def generate_snapshot_messages(
"token": token,
}
snapshot_messages.append(message)
stdout.write(json.dumps(message))
stdout.write("\n")
for incremental_snapshot_size in incremental_snapshot_size_samples[:incremental_snapshot_count]:
incremental_snapshot_data = faker.pystr(
@@ -381,9 +381,8 @@ def generate_snapshot_messages(
"token": token,
}
snapshot_messages.append(message)
return snapshot_messages
stdout.write(json.dumps(message))
stdout.write("\n")
def main():
@@ -402,7 +401,7 @@ def main():
numpy.random.seed(args.seed_value)
snapshot_messages = generate_snapshot_messages(
generate_snapshot_messages(
faker=faker,
count=args.count,
full_snapshot_size_mean=args.full_snapshot_size_mean,
@@ -418,10 +417,6 @@ def main():
verbose=args.verbose,
)
for message in snapshot_messages:
stdout.write(json.dumps(message))
stdout.write("\n")
if __name__ == "__main__":
main()