feat(flags): Add evaluation tags for environment-specific feature flags (#37719)

Co-authored-by: github-actions <41898282+github-actions[bot]@users.noreply.github.com>
This commit is contained in:
Dylan Martin
2025-09-25 19:42:26 -04:00
committed by GitHub
parent f837ae75bd
commit db43b47e6f
30 changed files with 2648 additions and 2429 deletions

View File

@@ -5,7 +5,7 @@
- Environment:
- Auto-detect flox environment before running terminal commands
- If flox is available, and you run into trouble executing commands, try with `flox activate -- bash -c "<command>"` pattern
- Never use `flox activate` in interactive sessions (it hangs)
- Never use `flox activate` in interactive sessions (it hangs if you try)
- Tests:
- All tests: `pytest`
- Single test: `pytest path/to/test.py::TestClass::test_method`

View File

@@ -807,7 +807,7 @@ class TestAccessControlQueryCounts(BaseAccessControlTest):
for i in range(10):
FeatureFlag.objects.create(team=self.team, created_by=self.other_user, key=f"flag-{i}")
baseline = 15 # This is a lot! There is currently an n+1 issue with the legacy access control system
baseline = 16 # This is a lot! There is currently an n+1 issue with the legacy access control system
with self.assertNumQueries(baseline + 7): # org, roles, preloaded permissions acs, preloaded acs for the list
self.client.get("/api/projects/@current/feature_flags/")

View File

@@ -55,7 +55,7 @@ class TestExperimentCRUD(APILicensedTest):
format="json",
).json()
with self.assertNumQueries(FuzzyInt(16, 17)):
with self.assertNumQueries(FuzzyInt(17, 18)):
response = self.client.get(f"/api/projects/{self.team.id}/experiments")
self.assertEqual(response.status_code, status.HTTP_200_OK)
@@ -72,7 +72,7 @@ class TestExperimentCRUD(APILicensedTest):
format="json",
).json()
with self.assertNumQueries(FuzzyInt(16, 17)):
with self.assertNumQueries(FuzzyInt(21, 22)):
response = self.client.get(f"/api/projects/{self.team.id}/experiments")
self.assertEqual(response.status_code, status.HTTP_200_OK)
@@ -1701,7 +1701,7 @@ class TestExperimentCRUD(APILicensedTest):
).json()
# TODO: Make sure permission bool doesn't cause n + 1
with self.assertNumQueries(21):
with self.assertNumQueries(22):
response = self.client.get(f"/api/projects/{self.team.id}/feature_flags")
self.assertEqual(response.status_code, status.HTTP_200_OK)
result = response.json()

View File

@@ -25,7 +25,7 @@ from posthog.api.documentation import extend_schema
from posthog.api.forbid_destroy_model import ForbidDestroyModel
from posthog.api.routing import TeamAndOrgViewSetMixin
from posthog.api.shared import UserBasicSerializer
from posthog.api.tagged_item import TaggedItemSerializerMixin, TaggedItemViewSetMixin
from posthog.api.tagged_item import TaggedItemSerializerMixin, TaggedItemViewSetMixin, tagify
from posthog.api.utils import ClassicBehaviorBooleanFieldSerializer, action
from posthog.auth import PersonalAPIKeyAuthentication, ProjectSecretAPIKeyAuthentication, TemporaryTokenAuthentication
from posthog.constants import SURVEY_TARGETING_FLAG_PREFIX, FlagRequestType
@@ -39,14 +39,20 @@ from posthog.helpers.encrypted_flag_payloads import (
encrypt_flag_payloads,
get_decrypted_flag_payloads,
)
from posthog.models import FeatureFlag
from posthog.models import FeatureFlag, Tag
from posthog.models.activity_logging.activity_log import Detail, changes_between, load_activity, log_activity
from posthog.models.activity_logging.activity_page import activity_page_response
from posthog.models.activity_logging.model_activity import ImpersonatedContext
from posthog.models.cohort import Cohort
from posthog.models.cohort.util import get_all_cohort_dependencies
from posthog.models.experiment import Experiment
from posthog.models.feature_flag import FeatureFlagDashboards, get_all_feature_flags, get_user_blast_radius
from posthog.models.feature_flag import (
FeatureFlagDashboards,
FeatureFlagEvaluationTag,
get_all_feature_flags,
get_user_blast_radius,
set_feature_flags_for_team_in_cache,
)
from posthog.models.feature_flag.flag_analytics import increment_request_count
from posthog.models.feature_flag.flag_matching import check_flag_evaluation_query_is_ok
from posthog.models.feature_flag.flag_status import FeatureFlagStatus, FeatureFlagStatusChecker
@@ -113,8 +119,135 @@ class RemoteConfigThrottle(BurstRateThrottle):
return super().allow_request(request, view)
class EvaluationTagSerializerMixin(serializers.Serializer):
"""
Serializer mixin that handles evaluation tags for feature flags.
Evaluation tags mark which organizational tags also serve as runtime evaluation constraints.
Note: SDK clients must send 'evaluation_environments' in their flag evaluation requests
for these constraints to take effect. Without this parameter, all flags are evaluated
regardless of their evaluation tags.
"""
evaluation_tags = serializers.ListField(required=False, write_only=True)
def validate(self, attrs):
"""Validate that evaluation_tags are a subset of tags.
This ensures that evaluation tags (which control runtime evaluation)
are always a subset of organizational tags. This maintains the conceptual
model where evaluation tags are tags that ALSO serve as constraints.
"""
attrs = super().validate(attrs)
# Only validate if we have initial_data (not during partial updates without these fields)
if not hasattr(self, "initial_data"):
return attrs
# Get evaluation_tags from the request
evaluation_tags = self.initial_data.get("evaluation_tags")
# Only validate if evaluation_tags are provided and non-empty
# Note: evaluation_tags=[] is valid (clears all evaluation tags)
if evaluation_tags is not None and evaluation_tags:
from posthog.api.tagged_item import tagify
# Get tags from initial_data, defaulting to empty list if not provided
# Important: We validate against the raw request data, not processed attrs,
# because TaggedItemSerializerMixin handles tags separately
tags = self.initial_data.get("tags", [])
# Normalize both lists using tagify for consistent comparison
# tagify handles case normalization and special characters
# NB: this _does_ make flag updates more expensive whenever we update flags with tags.
# It's a small use case, but wanted to call it out as a potential (but unlikely bottleneck)
normalized_tags = {tagify(t) for t in tags or []}
normalized_eval_tags = {tagify(t) for t in evaluation_tags}
# Evaluation tags must be a subset of organizational tags
invalid_tags = normalized_eval_tags - normalized_tags
if invalid_tags:
raise serializers.ValidationError(
f"Evaluation tags must be a subset of tags. Invalid evaluation tags: {', '.join(sorted(invalid_tags))}"
)
return attrs
def _attempt_set_evaluation_tags(self, evaluation_tags, obj):
"""Update evaluation tags for a feature flag using efficient diff logic.
Instead of deleting all tags and recreating them (which causes unnecessary
DB operations and activity logs), we calculate the diff and only modify
what has actually changed.
"""
if not obj or evaluation_tags is None:
return
# Normalize and dedupe tags (same as TaggedItemSerializerMixin does)
# evaluation_tags=[] is valid and means "clear all evaluation tags"
deduped_tags = list({tagify(t) for t in evaluation_tags or []})
# Get current evaluation tags from the database
# We fetch the tag names directly to avoid loading full objects
current_eval_tags = set(
FeatureFlagEvaluationTag.objects.filter(feature_flag=obj)
.select_related("tag")
.values_list("tag__name", flat=True)
)
# Calculate the diff: what needs to be added vs removed
# This minimizes database operations and activity log noise
deduped_tags_set = set(deduped_tags)
tags_to_add = deduped_tags_set - current_eval_tags
tags_to_remove = current_eval_tags - deduped_tags_set
# Remove evaluation tags that are no longer needed
if tags_to_remove:
FeatureFlagEvaluationTag.objects.filter(feature_flag=obj, tag__name__in=tags_to_remove).delete()
# Add new evaluation tags
if tags_to_add:
# Create tags if they don't exist (matching TaggedItemSerializerMixin behavior)
# Note: Our validation ensures these are subset of organizational tags,
# but we still create them here for consistency with TaggedItemSerializerMixin
for tag_name in tags_to_add:
tag, _ = Tag.objects.get_or_create(name=tag_name, team_id=obj.team_id)
FeatureFlagEvaluationTag.objects.create(feature_flag=obj, tag=tag)
# Only invalidate cache if there were actual changes
# This avoids unnecessary cache churn on no-op updates
if tags_to_add or tags_to_remove:
try:
set_feature_flags_for_team_in_cache(obj.team.project_id)
except Exception as e:
capture_exception(e)
pass # Don't fail if cache invalidation fails
def to_representation(self, obj):
ret = super().to_representation(obj)
# Include evaluation tags in the serialized output
if hasattr(obj, "evaluation_tags"):
# Django's prefetch_related creates a cache in _prefetched_objects_cache.
# If the viewset used prefetch_related (which it should for performance),
# we can access the tags without hitting the database again.
if hasattr(obj, "_prefetched_objects_cache") and "evaluation_tags" in obj._prefetched_objects_cache:
# Use prefetched data (already in memory) - no DB query
ret["evaluation_tags"] = [et.tag.name for et in obj.evaluation_tags.all()]
else:
# Fallback to database query with select_related to minimize queries
# This should rarely happen as the viewset prefetches evaluation_tags
ret["evaluation_tags"] = [et.tag.name for et in obj.evaluation_tags.select_related("tag").all()]
else:
ret["evaluation_tags"] = []
return ret
class FeatureFlagSerializer(
TaggedItemSerializerMixin, UserAccessControlSerializerMixin, serializers.HyperlinkedModelSerializer
TaggedItemSerializerMixin,
EvaluationTagSerializerMixin,
UserAccessControlSerializerMixin,
serializers.HyperlinkedModelSerializer,
):
created_by = UserBasicSerializer(read_only=True)
version = serializers.IntegerField(required=False, default=0)
@@ -179,6 +312,7 @@ class FeatureFlagSerializer(
"performed_rollback",
"can_edit",
"tags",
"evaluation_tags",
"usage_dashboard",
"analytics_dashboards",
"has_enriched_analytics",
@@ -495,6 +629,7 @@ class FeatureFlagSerializer(
validated_data["team_id"] = self.context["team_id"]
validated_data["version"] = 1 # This is the first version of the feature flag
tags = validated_data.pop("tags", None) # tags are created separately below as global tag relationships
evaluation_tags = validated_data.pop("evaluation_tags", None) # evaluation tags are created separately
creation_context = validated_data.pop(
"creation_context", "feature_flags"
) # default to "feature_flags" if an alternative value is not provided
@@ -520,6 +655,7 @@ class FeatureFlagSerializer(
instance: FeatureFlag = super().create(validated_data)
self._attempt_set_tags(tags, instance)
self._attempt_set_evaluation_tags(evaluation_tags, instance)
if should_create_usage_dashboard:
_create_usage_dashboard(instance, request.user)
@@ -541,6 +677,9 @@ class FeatureFlagSerializer(
request.data = {}
validated_data["last_modified_by"] = request.user
# Prevent DRF from attempting to set reverse FK relation directly
# We manage evaluation tags via _attempt_set_evaluation_tags below
validated_data.pop("evaluation_tags", None)
if "deleted" in validated_data and validated_data["deleted"] is True:
# Check for linked early access features
@@ -618,6 +757,10 @@ class FeatureFlagSerializer(
# Continue with the update outside of the transaction. This is an intentional choice
# to avoid deadlocks. Not to mention, before making the concurrency changes, these
# updates were already occurring outside of a transaction.
# Handle evaluation tags (uses initial_data like TaggedItemSerializerMixin does)
self._attempt_set_evaluation_tags(self.initial_data.get("evaluation_tags"), instance)
analytics_dashboards = validated_data.pop("analytics_dashboards", None)
if analytics_dashboards is not None:
@@ -811,6 +954,7 @@ def _update_feature_flag_dashboard(feature_flag: FeatureFlag, old_key: str) -> N
class MinimalFeatureFlagSerializer(serializers.ModelSerializer):
filters = serializers.DictField(source="get_filters", required=False)
evaluation_tags = serializers.SerializerMethodField()
class Meta:
model = FeatureFlag
@@ -826,8 +970,19 @@ class MinimalFeatureFlagSerializer(serializers.ModelSerializer):
"has_encrypted_payloads",
"version",
"evaluation_runtime",
"evaluation_tags",
]
def get_evaluation_tags(self, feature_flag: FeatureFlag) -> list[str]:
# Prefer cached/provided names; fallback to relation.
try:
names = getattr(feature_flag, "evaluation_tag_names", None)
if names is None:
names = [et.tag.name for et in feature_flag.evaluation_tags.select_related("tag").all()]
return names or []
except Exception:
return []
class FeatureFlagViewSet(
TeamAndOrgViewSetMixin,
@@ -924,11 +1079,24 @@ class FeatureFlagViewSet(
return queryset
def safely_get_queryset(self, queryset) -> QuerySet:
from posthog.models.feature_flag import FeatureFlagEvaluationTag
# Always prefetch experiment_set since it's used in both list and retrieve
queryset = queryset.prefetch_related(
Prefetch("experiment_set", queryset=Experiment.objects.filter(deleted=False), to_attr="_active_experiments")
)
# Prefetch evaluation tags to avoid N+1 queries when serializing.
# Without this, each flag would trigger a separate query to fetch its
# evaluation tags. With prefetch_related, Django loads all evaluation
# tags in a single query and caches them on the model instances.
queryset = queryset.prefetch_related(
Prefetch(
"evaluation_tags",
queryset=FeatureFlagEvaluationTag.objects.select_related("tag"),
)
)
if self.action == "list":
queryset = (
queryset.filter(deleted=False)

View File

@@ -860,12 +860,17 @@
"posthog_featureflag"."has_enriched_analytics",
"posthog_featureflag"."is_remote_configuration",
"posthog_featureflag"."has_encrypted_payloads",
"posthog_featureflag"."evaluation_runtime"
"posthog_featureflag"."evaluation_runtime",
ARRAY_AGG(DISTINCT "posthog_tag"."name") FILTER (
WHERE "posthog_featureflagevaluationtag"."id" IS NOT NULL) AS "evaluation_tag_names_agg"
FROM "posthog_featureflag"
INNER JOIN "posthog_team" ON ("posthog_featureflag"."team_id" = "posthog_team"."id")
LEFT OUTER JOIN "posthog_featureflagevaluationtag" ON ("posthog_featureflag"."id" = "posthog_featureflagevaluationtag"."feature_flag_id")
LEFT OUTER JOIN "posthog_tag" ON ("posthog_featureflagevaluationtag"."tag_id" = "posthog_tag"."id")
WHERE ("posthog_featureflag"."active"
AND NOT "posthog_featureflag"."deleted"
AND "posthog_team"."project_id" = 99999)
GROUP BY "posthog_featureflag"."id"
'''
# ---
# name: TestDecide.test_decide_doesnt_error_out_when_database_is_down.32
@@ -1624,12 +1629,17 @@
"posthog_featureflag"."has_enriched_analytics",
"posthog_featureflag"."is_remote_configuration",
"posthog_featureflag"."has_encrypted_payloads",
"posthog_featureflag"."evaluation_runtime"
"posthog_featureflag"."evaluation_runtime",
ARRAY_AGG(DISTINCT "posthog_tag"."name") FILTER (
WHERE "posthog_featureflagevaluationtag"."id" IS NOT NULL) AS "evaluation_tag_names_agg"
FROM "posthog_featureflag"
INNER JOIN "posthog_team" ON ("posthog_featureflag"."team_id" = "posthog_team"."id")
LEFT OUTER JOIN "posthog_featureflagevaluationtag" ON ("posthog_featureflag"."id" = "posthog_featureflagevaluationtag"."feature_flag_id")
LEFT OUTER JOIN "posthog_tag" ON ("posthog_featureflagevaluationtag"."tag_id" = "posthog_tag"."id")
WHERE ("posthog_featureflag"."active"
AND NOT "posthog_featureflag"."deleted"
AND "posthog_team"."project_id" = 99999)
GROUP BY "posthog_featureflag"."id"
'''
# ---
# name: TestDecide.test_flag_with_behavioural_cohorts.9
@@ -2144,12 +2154,17 @@
"posthog_featureflag"."has_enriched_analytics",
"posthog_featureflag"."is_remote_configuration",
"posthog_featureflag"."has_encrypted_payloads",
"posthog_featureflag"."evaluation_runtime"
"posthog_featureflag"."evaluation_runtime",
ARRAY_AGG(DISTINCT "posthog_tag"."name") FILTER (
WHERE "posthog_featureflagevaluationtag"."id" IS NOT NULL) AS "evaluation_tag_names_agg"
FROM "posthog_featureflag"
INNER JOIN "posthog_team" ON ("posthog_featureflag"."team_id" = "posthog_team"."id")
LEFT OUTER JOIN "posthog_featureflagevaluationtag" ON ("posthog_featureflag"."id" = "posthog_featureflagevaluationtag"."feature_flag_id")
LEFT OUTER JOIN "posthog_tag" ON ("posthog_featureflagevaluationtag"."tag_id" = "posthog_tag"."id")
WHERE ("posthog_featureflag"."active"
AND NOT "posthog_featureflag"."deleted"
AND "posthog_team"."project_id" = 99999)
GROUP BY "posthog_featureflag"."id"
'''
# ---
# name: TestDecide.test_flag_with_regular_cohorts.9
@@ -2282,12 +2297,17 @@
"posthog_featureflag"."has_enriched_analytics",
"posthog_featureflag"."is_remote_configuration",
"posthog_featureflag"."has_encrypted_payloads",
"posthog_featureflag"."evaluation_runtime"
"posthog_featureflag"."evaluation_runtime",
ARRAY_AGG(DISTINCT "posthog_tag"."name") FILTER (
WHERE "posthog_featureflagevaluationtag"."id" IS NOT NULL) AS "evaluation_tag_names_agg"
FROM "posthog_featureflag"
INNER JOIN "posthog_team" ON ("posthog_featureflag"."team_id" = "posthog_team"."id")
LEFT OUTER JOIN "posthog_featureflagevaluationtag" ON ("posthog_featureflag"."id" = "posthog_featureflagevaluationtag"."feature_flag_id")
LEFT OUTER JOIN "posthog_tag" ON ("posthog_featureflagevaluationtag"."tag_id" = "posthog_tag"."id")
WHERE ("posthog_featureflag"."active"
AND NOT "posthog_featureflag"."deleted"
AND "posthog_team"."project_id" = 99999)
GROUP BY "posthog_featureflag"."id"
'''
# ---
# name: TestDecide.test_web_app_queries.2
@@ -5065,12 +5085,17 @@
"posthog_featureflag"."has_enriched_analytics",
"posthog_featureflag"."is_remote_configuration",
"posthog_featureflag"."has_encrypted_payloads",
"posthog_featureflag"."evaluation_runtime"
"posthog_featureflag"."evaluation_runtime",
ARRAY_AGG(DISTINCT "posthog_tag"."name") FILTER (
WHERE "posthog_featureflagevaluationtag"."id" IS NOT NULL) AS "evaluation_tag_names_agg"
FROM "posthog_featureflag"
INNER JOIN "posthog_team" ON ("posthog_featureflag"."team_id" = "posthog_team"."id")
LEFT OUTER JOIN "posthog_featureflagevaluationtag" ON ("posthog_featureflag"."id" = "posthog_featureflagevaluationtag"."feature_flag_id")
LEFT OUTER JOIN "posthog_tag" ON ("posthog_featureflagevaluationtag"."tag_id" = "posthog_tag"."id")
WHERE ("posthog_featureflag"."active"
AND NOT "posthog_featureflag"."deleted"
AND "posthog_team"."project_id" = 99999)
GROUP BY "posthog_featureflag"."id"
'''
# ---
# name: TestDecideRemoteConfig.test_decide_doesnt_error_out_when_database_is_down.63
@@ -7026,12 +7051,17 @@
"posthog_featureflag"."has_enriched_analytics",
"posthog_featureflag"."is_remote_configuration",
"posthog_featureflag"."has_encrypted_payloads",
"posthog_featureflag"."evaluation_runtime"
"posthog_featureflag"."evaluation_runtime",
ARRAY_AGG(DISTINCT "posthog_tag"."name") FILTER (
WHERE "posthog_featureflagevaluationtag"."id" IS NOT NULL) AS "evaluation_tag_names_agg"
FROM "posthog_featureflag"
INNER JOIN "posthog_team" ON ("posthog_featureflag"."team_id" = "posthog_team"."id")
LEFT OUTER JOIN "posthog_featureflagevaluationtag" ON ("posthog_featureflag"."id" = "posthog_featureflagevaluationtag"."feature_flag_id")
LEFT OUTER JOIN "posthog_tag" ON ("posthog_featureflagevaluationtag"."tag_id" = "posthog_tag"."id")
WHERE ("posthog_featureflag"."active"
AND NOT "posthog_featureflag"."deleted"
AND "posthog_team"."project_id" = 99999)
GROUP BY "posthog_featureflag"."id"
'''
# ---
# name: TestDecideRemoteConfig.test_flag_with_behavioural_cohorts.28
@@ -8967,12 +8997,17 @@
"posthog_featureflag"."has_enriched_analytics",
"posthog_featureflag"."is_remote_configuration",
"posthog_featureflag"."has_encrypted_payloads",
"posthog_featureflag"."evaluation_runtime"
"posthog_featureflag"."evaluation_runtime",
ARRAY_AGG(DISTINCT "posthog_tag"."name") FILTER (
WHERE "posthog_featureflagevaluationtag"."id" IS NOT NULL) AS "evaluation_tag_names_agg"
FROM "posthog_featureflag"
INNER JOIN "posthog_team" ON ("posthog_featureflag"."team_id" = "posthog_team"."id")
LEFT OUTER JOIN "posthog_featureflagevaluationtag" ON ("posthog_featureflag"."id" = "posthog_featureflagevaluationtag"."feature_flag_id")
LEFT OUTER JOIN "posthog_tag" ON ("posthog_featureflagevaluationtag"."tag_id" = "posthog_tag"."id")
WHERE ("posthog_featureflag"."active"
AND NOT "posthog_featureflag"."deleted"
AND "posthog_team"."project_id" = 99999)
GROUP BY "posthog_featureflag"."id"
'''
# ---
# name: TestDecideRemoteConfig.test_flag_with_regular_cohorts.28

File diff suppressed because it is too large Load Diff

View File

@@ -2,6 +2,7 @@ import json
from datetime import UTC, datetime, timedelta
from typing import Optional
import pytest
from freezegun.api import freeze_time
from posthog.test.base import (
APIBaseTest,
@@ -28,7 +29,7 @@ from rest_framework import status
from posthog import redis
from posthog.api.cohort import get_cohort_actors_for_feature_flag
from posthog.api.feature_flag import FeatureFlagSerializer
from posthog.models import Experiment, FeatureFlag, GroupTypeMapping, User
from posthog.models import Experiment, FeatureFlag, GroupTypeMapping, Tag, TaggedItem, User
from posthog.models.cohort import Cohort
from posthog.models.dashboard import Dashboard
from posthog.models.feature_flag import (
@@ -2394,7 +2395,7 @@ class TestFeatureFlag(APIBaseTest, ClickhouseTestMixin):
format="json",
).json()
with self.assertNumQueries(FuzzyInt(9, 10)):
with self.assertNumQueries(FuzzyInt(10, 11)):
response = self.client.get(f"/api/projects/{self.team.id}/feature_flags/my_flags")
self.assertEqual(response.status_code, status.HTTP_200_OK)
@@ -2409,7 +2410,7 @@ class TestFeatureFlag(APIBaseTest, ClickhouseTestMixin):
format="json",
).json()
with self.assertNumQueries(FuzzyInt(9, 10)):
with self.assertNumQueries(FuzzyInt(12, 13)):
response = self.client.get(f"/api/projects/{self.team.id}/feature_flags/my_flags")
self.assertEqual(response.status_code, status.HTTP_200_OK)
@@ -2424,7 +2425,7 @@ class TestFeatureFlag(APIBaseTest, ClickhouseTestMixin):
format="json",
).json()
with self.assertNumQueries(FuzzyInt(16, 17)):
with self.assertNumQueries(FuzzyInt(17, 18)):
response = self.client.get(f"/api/projects/{self.team.id}/feature_flags")
self.assertEqual(response.status_code, status.HTTP_200_OK)
@@ -2439,7 +2440,7 @@ class TestFeatureFlag(APIBaseTest, ClickhouseTestMixin):
format="json",
).json()
with self.assertNumQueries(FuzzyInt(16, 17)):
with self.assertNumQueries(FuzzyInt(17, 18)):
response = self.client.get(f"/api/projects/{self.team.id}/feature_flags")
self.assertEqual(response.status_code, status.HTTP_200_OK)
@@ -2463,7 +2464,7 @@ class TestFeatureFlag(APIBaseTest, ClickhouseTestMixin):
name="Flag role access",
)
with self.assertNumQueries(FuzzyInt(16, 17)):
with self.assertNumQueries(FuzzyInt(17, 18)):
response = self.client.get(f"/api/projects/{self.team.id}/feature_flags")
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.json()["results"]), 2)
@@ -3140,7 +3141,7 @@ class TestFeatureFlag(APIBaseTest, ClickhouseTestMixin):
self.client.logout()
with self.assertNumQueries(FuzzyInt(18, 19)):
with self.assertNumQueries(FuzzyInt(23, 24)):
# 1. SAVEPOINT
# 2. SELECT "posthog_personalapikey"."id",
# 3. RELEASE SAVEPOINT
@@ -3160,6 +3161,7 @@ class TestFeatureFlag(APIBaseTest, ClickhouseTestMixin):
# 17. SELECT "posthog_team"."id", "posthog_team"."uuid",
# 18. SELECT "posthog_cohort". id = cohort from other team
# 19. SELECT "posthog_grouptypemapping"."id", -- group type mapping
# TODO add the evaluation tag queries
response = self.client.get(
f"/api/feature_flag/local_evaluation?token={self.team.api_token}&send_cohorts",
@@ -6493,7 +6495,7 @@ class TestCohortGenerationForFeatureFlag(APIBaseTest, ClickhouseTestMixin):
)
# TODO: Ensure server-side cursors are disabled, since in production we use this with pgbouncer
with snapshot_postgres_queries_context(self), self.assertNumQueries(24):
with snapshot_postgres_queries_context(self), self.assertNumQueries(26):
get_cohort_actors_for_feature_flag(cohort.pk, "some-feature2", self.team.pk)
cohort.refresh_from_db()
@@ -6547,7 +6549,7 @@ class TestCohortGenerationForFeatureFlag(APIBaseTest, ClickhouseTestMixin):
)
# Extra queries because each batch adds its own queries
with snapshot_postgres_queries_context(self), self.assertNumQueries(37):
with snapshot_postgres_queries_context(self), self.assertNumQueries(41):
get_cohort_actors_for_feature_flag(cohort.pk, "some-feature2", self.team.pk, batchsize=2)
cohort.refresh_from_db()
@@ -6558,7 +6560,7 @@ class TestCohortGenerationForFeatureFlag(APIBaseTest, ClickhouseTestMixin):
self.assertEqual(len(response.json()["results"]), 3, response)
# if the batch is big enough, it's fewer queries
with self.assertNumQueries(21):
with self.assertNumQueries(23):
get_cohort_actors_for_feature_flag(cohort.pk, "some-feature2", self.team.pk, batchsize=10)
cohort.refresh_from_db()
@@ -6622,7 +6624,7 @@ class TestCohortGenerationForFeatureFlag(APIBaseTest, ClickhouseTestMixin):
name="some cohort",
)
with snapshot_postgres_queries_context(self), self.assertNumQueries(21):
with snapshot_postgres_queries_context(self), self.assertNumQueries(25):
# no queries to evaluate flags, because all evaluated using override properties
get_cohort_actors_for_feature_flag(cohort.pk, "some-feature2", self.team.pk)
@@ -6639,7 +6641,7 @@ class TestCohortGenerationForFeatureFlag(APIBaseTest, ClickhouseTestMixin):
name="some cohort2",
)
with snapshot_postgres_queries_context(self), self.assertNumQueries(21):
with snapshot_postgres_queries_context(self), self.assertNumQueries(25):
# person3 doesn't match filter conditions so is pre-filtered out
get_cohort_actors_for_feature_flag(cohort2.pk, "some-feature-new", self.team.pk)
@@ -6733,7 +6735,7 @@ class TestCohortGenerationForFeatureFlag(APIBaseTest, ClickhouseTestMixin):
name="some cohort",
)
with snapshot_postgres_queries_context(self), self.assertNumQueries(38):
with snapshot_postgres_queries_context(self), self.assertNumQueries(40):
# forced to evaluate flags by going to db, because cohorts need db query to evaluate
get_cohort_actors_for_feature_flag(cohort.pk, "some-feature-new", self.team.pk)
@@ -8193,6 +8195,291 @@ class TestResiliency(TransactionTestCase, QueryMatchingTest):
self.assertFalse(errors)
class TestFeatureFlagEvaluationTags(APIBaseTest):
def setUp(self):
super().setUp()
cache.clear()
# Create a license to enable tagging feature
from datetime import datetime as dt
from typing import cast
from ee.models.license import License, LicenseManager
# Use a date that's always 50 years in the future to avoid expiration
future_year = dt.now().year + 50
super(LicenseManager, cast(LicenseManager, License.objects)).create(
key="test_license_key",
plan="enterprise",
valid_until=dt(future_year, 1, 19, 3, 14, 7),
)
@pytest.mark.ee
def test_create_feature_flag_with_evaluation_tags(self):
response = self.client.post(
f"/api/projects/{self.team.id}/feature_flags/",
{
"name": "Flag with evaluation tags",
"key": "flag-with-eval-tags",
"filters": {"groups": [{"properties": [], "rollout_percentage": 100}]},
"tags": ["app", "marketing", "docs"],
"evaluation_tags": ["app", "docs"],
},
format="json",
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
flag = FeatureFlag.objects.get(key="flag-with-eval-tags")
# Check that tags are created
tagged_items = TaggedItem.objects.filter(feature_flag=flag)
self.assertEqual(tagged_items.count(), 3)
tag_names = sorted([item.tag.name for item in tagged_items])
self.assertEqual(tag_names, ["app", "docs", "marketing"])
# Check that evaluation tags are created
from posthog.models.feature_flag.feature_flag import FeatureFlagEvaluationTag
eval_tags = FeatureFlagEvaluationTag.objects.filter(feature_flag=flag)
self.assertEqual(eval_tags.count(), 2)
eval_tag_names = sorted([tag.tag.name for tag in eval_tags])
self.assertEqual(eval_tag_names, ["app", "docs"])
@pytest.mark.ee
def test_update_feature_flag_evaluation_tags(self):
flag = FeatureFlag.objects.create(
team=self.team,
key="test-flag",
name="Test Flag",
filters={"groups": [{"properties": [], "rollout_percentage": 100}]},
created_by=self.user,
)
# Add initial tags and evaluation tags
response = self.client.patch(
f"/api/projects/{self.team.id}/feature_flags/{flag.id}/",
{
"tags": ["app", "marketing"],
"evaluation_tags": ["app"],
},
format="json",
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
from posthog.models.feature_flag.feature_flag import FeatureFlagEvaluationTag
eval_tags = FeatureFlagEvaluationTag.objects.filter(feature_flag=flag)
self.assertEqual(eval_tags.count(), 1)
first_tag = eval_tags.first()
assert first_tag is not None
self.assertEqual(first_tag.tag.name, "app")
# Update evaluation tags
response = self.client.patch(
f"/api/projects/{self.team.id}/feature_flags/{flag.id}/",
{
"tags": ["app", "marketing", "docs"],
"evaluation_tags": ["marketing", "docs"],
},
format="json",
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
eval_tags = FeatureFlagEvaluationTag.objects.filter(feature_flag=flag)
self.assertEqual(eval_tags.count(), 2)
# eval_tags is a QuerySet[FeatureFlagEvaluationTag]; materialize to list for mypy
eval_tag_names = sorted([tag.tag.name for tag in list(eval_tags)])
self.assertEqual(eval_tag_names, ["docs", "marketing"])
@pytest.mark.ee
def test_remove_all_evaluation_tags(self):
flag = FeatureFlag.objects.create(
team=self.team,
key="test-flag",
name="Test Flag",
filters={"groups": [{"properties": [], "rollout_percentage": 100}]},
created_by=self.user,
)
# Add initial tags and evaluation tags
self.client.patch(
f"/api/projects/{self.team.id}/feature_flags/{flag.id}/",
{
"tags": ["app", "marketing"],
"evaluation_tags": ["app", "marketing"],
},
format="json",
)
from posthog.models.feature_flag.feature_flag import FeatureFlagEvaluationTag
self.assertEqual(FeatureFlagEvaluationTag.objects.filter(feature_flag=flag).count(), 2)
# Remove all evaluation tags but keep regular tags
response = self.client.patch(
f"/api/projects/{self.team.id}/feature_flags/{flag.id}/",
{
"tags": ["app", "marketing"],
"evaluation_tags": [],
},
format="json",
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
# Evaluation tags should be removed
self.assertEqual(FeatureFlagEvaluationTag.objects.filter(feature_flag=flag).count(), 0)
# Regular tags should still exist
tagged_items = TaggedItem.objects.filter(feature_flag=flag)
self.assertEqual(tagged_items.count(), 2)
@pytest.mark.ee
def test_evaluation_tags_in_minimal_serializer(self):
from posthog.api.feature_flag import MinimalFeatureFlagSerializer
from posthog.models.feature_flag.feature_flag import FeatureFlagEvaluationTag
flag = FeatureFlag.objects.create(
team=self.team,
key="test-flag",
name="Test Flag",
filters={"groups": [{"properties": [], "rollout_percentage": 100}]},
created_by=self.user,
)
# Create tags and evaluation tags
app_tag = Tag.objects.create(name="app", team_id=self.team.id)
docs_tag = Tag.objects.create(name="docs", team_id=self.team.id)
FeatureFlagEvaluationTag.objects.create(feature_flag=flag, tag=app_tag)
FeatureFlagEvaluationTag.objects.create(feature_flag=flag, tag=docs_tag)
serializer = MinimalFeatureFlagSerializer(flag)
data = serializer.data
self.assertIn("evaluation_tags", data)
self.assertEqual(sorted(data["evaluation_tags"]), ["app", "docs"])
@pytest.mark.ee
def test_evaluation_tags_validation(self):
"""Test that evaluation_tags must be a subset of tags"""
# Valid case: evaluation_tags is subset of tags
response = self.client.post(
f"/api/projects/{self.team.id}/feature_flags/",
{
"name": "Valid evaluation tags",
"key": "valid-eval-tags",
"filters": {"groups": [{"properties": [], "rollout_percentage": 100}]},
"tags": ["app", "marketing", "docs"],
"evaluation_tags": ["app", "docs"],
},
format="json",
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
# Invalid case: evaluation_tags contains tag not in tags
response = self.client.post(
f"/api/projects/{self.team.id}/feature_flags/",
{
"name": "Invalid evaluation tags",
"key": "invalid-eval-tags",
"filters": {"groups": [{"properties": [], "rollout_percentage": 100}]},
"tags": ["app", "docs"],
"evaluation_tags": ["app", "marketing"], # 'marketing' not in tags
},
format="json",
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn("Evaluation tags must be a subset of tags", str(response.data))
self.assertIn("marketing", str(response.data))
# Edge case: empty tags but non-empty evaluation_tags
response = self.client.post(
f"/api/projects/{self.team.id}/feature_flags/",
{
"name": "No tags but has evaluation tags",
"key": "no-tags-eval-tags",
"filters": {"groups": [{"properties": [], "rollout_percentage": 100}]},
"tags": [],
"evaluation_tags": ["app"],
},
format="json",
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn("Evaluation tags must be a subset of tags", str(response.data))
@pytest.mark.ee
def test_evaluation_tags_in_cache(self):
from posthog.models.feature_flag import set_feature_flags_for_team_in_cache
from posthog.models.feature_flag.feature_flag import FeatureFlagEvaluationTag
flag = FeatureFlag.objects.create(
team=self.team,
key="cached-flag",
name="Cached Flag",
filters={"groups": [{"properties": [], "rollout_percentage": 100}]},
created_by=self.user,
)
# Create evaluation tags
app_tag = Tag.objects.create(name="app", team_id=self.team.id)
FeatureFlagEvaluationTag.objects.create(feature_flag=flag, tag=app_tag)
# Set flags in cache
set_feature_flags_for_team_in_cache(self.team.project_id)
# Get flags from cache
cached_flags = get_feature_flags_for_team_in_cache(self.team.project_id)
self.assertIsNotNone(cached_flags)
assert cached_flags is not None
self.assertEqual(len(cached_flags), 1)
cached_flag = cached_flags[0]
self.assertEqual(cached_flag.key, "cached-flag")
# Evaluation tag names should be exposed via the property when populated from cache
self.assertIsNotNone(cached_flag.evaluation_tag_names)
self.assertEqual(cached_flag.evaluation_tag_names, ["app"])
@pytest.mark.ee
def test_evaluation_tags_cache_invalidation(self):
"""Test that cache is invalidated when evaluation tags are updated"""
from posthog.models.feature_flag import get_feature_flags_for_team_in_cache, set_feature_flags_for_team_in_cache
flag = FeatureFlag.objects.create(
team=self.team,
key="cache-invalidation-test",
name="Cache Invalidation Test",
filters={"groups": [{"properties": [], "rollout_percentage": 100}]},
created_by=self.user,
)
# Set initial cache
set_feature_flags_for_team_in_cache(self.team.project_id)
# Verify flag is in cache without evaluation tags
cached_flags = get_feature_flags_for_team_in_cache(self.team.project_id)
assert cached_flags is not None
cached_flag = next((f for f in cached_flags if f.key == "cache-invalidation-test"), None)
assert cached_flag is not None
self.assertEqual(cached_flag.evaluation_tag_names, [])
# Update flag with evaluation tags via API
response = self.client.patch(
f"/api/projects/{self.team.id}/feature_flags/{flag.id}/",
{
"tags": ["app", "docs"],
"evaluation_tags": ["app"],
},
format="json",
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
# Cache should be automatically invalidated and refreshed
cached_flags = get_feature_flags_for_team_in_cache(self.team.project_id)
assert cached_flags is not None
cached_flag = next((f for f in cached_flags if f.key == "cache-invalidation-test"), None)
assert cached_flag is not None
self.assertEqual(cached_flag.evaluation_tag_names, ["app"])
class TestFeatureFlagStatus(APIBaseTest, ClickhouseTestMixin):
def setUp(self):
cache.clear()

View File

@@ -142,6 +142,7 @@ class TestOrganizationFeatureFlagCopy(APIBaseTest, QueryMatchingTest):
"analytics_dashboards": [],
"has_enriched_analytics": False,
"tags": [],
"evaluation_tags": [],
"user_access_level": "manager",
"is_remote_configuration": False,
"has_encrypted_payloads": False,
@@ -218,6 +219,7 @@ class TestOrganizationFeatureFlagCopy(APIBaseTest, QueryMatchingTest):
"can_edit": True,
"has_enriched_analytics": False,
"tags": [],
"evaluation_tags": [],
"id": ANY,
"created_at": ANY,
"usage_dashboard": ANY,
@@ -348,6 +350,7 @@ class TestOrganizationFeatureFlagCopy(APIBaseTest, QueryMatchingTest):
"can_edit": True,
"has_enriched_analytics": False,
"tags": [],
"evaluation_tags": [],
"id": ANY,
"created_at": ANY,
"usage_dashboard": ANY,

View File

@@ -405,7 +405,7 @@ class TestSurvey(APIBaseTest):
format="json",
).json()
with self.assertNumQueries(22):
with self.assertNumQueries(23):
response = self.client.get(f"/api/projects/{self.team.id}/feature_flags")
self.assertEqual(response.status_code, status.HTTP_200_OK)
result = response.json()
@@ -1137,6 +1137,7 @@ class TestSurvey(APIBaseTest):
"has_encrypted_payloads": False,
"version": ANY, # Add version field with ANY matcher
"evaluation_runtime": "all",
"evaluation_tags": [],
},
"linked_flag": None,
"linked_flag_id": None,

View File

@@ -0,0 +1,37 @@
# Generated by Django 4.2.22 on 2025-09-23 21:53
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("posthog", "0863_experimentmetricresult"),
]
operations = [
migrations.CreateModel(
name="FeatureFlagEvaluationTag",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("created_at", models.DateTimeField(auto_now_add=True)),
(
"feature_flag",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="evaluation_tags",
to="posthog.featureflag",
),
),
(
"tag",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, related_name="evaluation_flags", to="posthog.tag"
),
),
],
options={
"unique_together": {("feature_flag", "tag")},
},
),
]

View File

@@ -1 +1 @@
0863_experimentmetricresult
0864_create_feature_flag_evaluation_tag_table

View File

@@ -3,6 +3,7 @@
from .feature_flag import (
FeatureFlag,
FeatureFlagDashboards,
FeatureFlagEvaluationTag,
get_feature_flags_for_team_in_cache,
set_feature_flags_for_team_in_cache,
)

View File

@@ -3,7 +3,7 @@ from typing import TYPE_CHECKING, Optional, cast
from django.contrib.auth.base_user import AbstractBaseUser
from django.core.cache import cache
from django.db import models, transaction
from django.db import DatabaseError, models, transaction
from django.db.models import QuerySet
from django.db.models.signals import post_delete, post_save
from django.http import HttpRequest
@@ -86,6 +86,13 @@ class FeatureFlag(FileSystemSyncMixin, ModelActivityMixin, RootTeamMixin, models
help_text="Specifies where this feature flag should be evaluated",
)
# Cache projection: evaluation_tag_names is stored in Redis but isn't a DB field.
# This allows us to include evaluation tags in the cached flag data without
# modifying the FeatureFlag model schema. The Redis cache stores the serialized
# JSON including evaluation_tags, and when we deserialize, we store them here
# temporarily to avoid N+1 queries when accessing evaluation tags.
_evaluation_tag_names: Optional[list[str]] = None
class Meta:
constraints = [models.UniqueConstraint(fields=["team", "key"], name="unique key for team")]
@@ -176,6 +183,24 @@ class FeatureFlag(FileSystemSyncMixin, ModelActivityMixin, RootTeamMixin, models
if tile.insight
)
@property
def evaluation_tag_names(self) -> list[str] | None:
"""
Returns evaluation tag names for this flag.
Preferred source is the cache-populated list from Redis (set on instances
as `_evaluation_tag_names`). If not present, falls back to the DB relation
via `evaluation_tags` → `Tag.name`.
"""
cached = getattr(self, "_evaluation_tag_names", None)
if cached is not None:
return cached
try:
return [et.tag.name for et in self.evaluation_tags.select_related("tag").all()]
except (AttributeError, DatabaseError):
return None
def get_filters(self) -> dict:
if isinstance(self.filters, dict) and "groups" in self.filters:
return self.filters
@@ -463,16 +488,41 @@ def set_feature_flags_for_team_in_cache(
feature_flags: Optional[list[FeatureFlag]] = None,
using_database: str = "default",
) -> list[FeatureFlag]:
from django.contrib.postgres.aggregates import ArrayAgg
from django.db.models import Q
from posthog.api.feature_flag import MinimalFeatureFlagSerializer
if feature_flags is not None:
all_feature_flags = feature_flags
else:
all_feature_flags = list(
FeatureFlag.objects.db_manager(using_database).filter(
team__project_id=project_id, active=True, deleted=False
# Single-shot query: flags plus evaluation tag names aggregated to a string array.
# We use ArrayAgg to fetch all evaluation tag names in one query instead of
# doing a separate query per flag. This is crucial for performance when we have
# many flags. The evaluation tags are stored as a many-to-many relationship
# through FeatureFlagEvaluationTag, but we aggregate them here for efficiency.
qs = (
FeatureFlag.objects.db_manager(using_database)
.filter(team__project_id=project_id, active=True, deleted=False)
.annotate(
evaluation_tag_names_agg=ArrayAgg(
"evaluation_tags__tag__name",
filter=Q(evaluation_tags__isnull=False),
distinct=True,
)
)
)
all_feature_flags = list(qs)
# Transfer the aggregated tag names to the _evaluation_tag_names attribute
# so the serializer can access them without additional queries. This is a
# cache projection pattern - we're storing derived data on the model instance
# that will be serialized to Redis.
for _flag in all_feature_flags:
try:
_flag._evaluation_tag_names = getattr(_flag, "evaluation_tag_names_agg", None)
except AttributeError:
# evaluation_tag_names_agg field missing from aggregation query
_flag._evaluation_tag_names = None
serialized_flags = MinimalFeatureFlagSerializer(all_feature_flags, many=True).data
@@ -497,7 +547,21 @@ def get_feature_flags_for_team_in_cache(project_id: int) -> Optional[list[Featur
if flag_data is not None:
try:
parsed_data = json.loads(flag_data)
return [FeatureFlag(**flag) for flag in parsed_data]
flags = []
for flag_data in parsed_data:
# Cache projection pattern: evaluation_tags is included in the Redis cache
# even though it's not a field on the FeatureFlag model. We extract it
# before creating the model instance and store it as a temporary attribute.
# This avoids N+1 queries when the Rust service needs to access evaluation
# tags for many flags at once.
evaluation_tags_list = flag_data.pop("evaluation_tags", None)
flag = FeatureFlag(**flag_data)
# Store the evaluation tags as a private attribute. The evaluation_tag_names
# property will check this first before falling back to a database query.
# This makes cache retrieval extremely fast - no DB queries needed.
flag._evaluation_tag_names = evaluation_tags_list
flags.append(flag)
return flags
except Exception as e:
logger.exception("Error parsing flags from cache")
capture_exception(e)
@@ -519,3 +583,25 @@ class FeatureFlagDashboards(models.Model):
name="unique feature flag for a dashboard",
)
]
class FeatureFlagEvaluationTag(models.Model):
"""
Marks an existing tag as also being an evaluation constraint for a feature flag.
When a tag is marked as an evaluation tag, it serves dual purpose:
1. It remains an organizational tag (via the TaggedItem relationship)
2. It acts as an evaluation constraint - the flag will only evaluate when
the SDK/client provides matching environment tags
This allows for user-specified evaluation environments like "docs-page",
"marketing-site", "app", etc.
"""
feature_flag = models.ForeignKey("FeatureFlag", on_delete=models.CASCADE, related_name="evaluation_tags")
tag = models.ForeignKey("Tag", on_delete=models.CASCADE, related_name="evaluation_flags")
created_at = models.DateTimeField(auto_now_add=True)
class Meta:
unique_together = [["feature_flag", "tag"]]
def __str__(self) -> str:
return f"{self.feature_flag.key} - {self.tag.name}"

View File

@@ -297,10 +297,15 @@ class TestEnvironmentsRollbackTask(TransactionTestCase):
mock_posthog_client = MagicMock()
mock_get_client.return_value = mock_posthog_client
project = Project.objects.create(id=1001, organization=self.organization, name="Main Project")
# Get a unique ID from the sequence to avoid conflicts
unique_id = Team.objects.increment_id_sequence()
# Create project and team with the same ID to simulate main environment
project = Project.objects.create(id=unique_id, organization=self.organization, name="Main Project")
production_env = Team.objects.create(
id=project.id, organization=self.organization, name="Production", project_id=project.id
id=unique_id, organization=self.organization, name="Production", project_id=project.id
)
staging_env = Team.objects.create(organization=self.organization, name="Staging", project_id=project.id)
production_insight = Insight.objects.create(team=production_env, name="Production Insight")

View File

@@ -72,12 +72,25 @@ impl FeatureFlagList {
f.active,
f.ensure_experience_continuity,
f.version,
f.evaluation_runtime
f.evaluation_runtime,
COALESCE(
ARRAY_AGG(tag.name) FILTER (WHERE tag.name IS NOT NULL),
'{}'::text[]
) AS evaluation_tags
FROM posthog_featureflag AS f
JOIN posthog_team AS t ON (f.team_id = t.id)
-- Evaluation tags are distinct from organizational tags. This bridge table links
-- flags to tags that constrain runtime evaluation. We use LEFT JOIN to retain flags
-- with zero evaluation tags, so ARRAY_AGG(...) returns an empty array rather than
-- dropping the flag row entirely.
LEFT JOIN posthog_featureflagevaluationtag AS et ON (f.id = et.feature_flag_id)
-- Only fetch names for tags that are evaluation constraints (not all org tags)
LEFT JOIN posthog_tag AS tag ON (et.tag_id = tag.id)
WHERE t.project_id = $1
AND f.deleted = false
AND f.active = true
GROUP BY f.id, f.team_id, f.name, f.key, f.filters, f.deleted, f.active,
f.ensure_experience_continuity, f.version, f.evaluation_runtime
"#;
let flags_row = sqlx::query_as::<_, FeatureFlagRow>(query)
.bind(project_id)
@@ -108,6 +121,7 @@ impl FeatureFlagList {
ensure_experience_continuity: row.ensure_experience_continuity,
version: row.version,
evaluation_runtime: row.evaluation_runtime,
evaluation_tags: row.evaluation_tags,
}),
Err(e) => {
tracing::warn!(
@@ -311,6 +325,7 @@ mod tests {
ensure_experience_continuity: Some(false),
version: Some(1),
evaluation_runtime: Some("all".to_string()),
evaluation_tags: None,
};
let flag2 = FeatureFlagRow {
@@ -324,6 +339,7 @@ mod tests {
ensure_experience_continuity: Some(false),
version: Some(1),
evaluation_runtime: Some("all".to_string()),
evaluation_tags: None,
};
// Insert multiple flags for the team
@@ -347,4 +363,48 @@ mod tests {
assert_eq!(flag.team_id, team.id);
}
}
#[tokio::test]
async fn test_fetch_flags_with_evaluation_tags_from_pg() {
let context = TestContext::new(None).await;
let team = context
.insert_new_team(None)
.await
.expect("Failed to insert team");
let flag_row = context
.insert_flag(team.id, None)
.await
.expect("Failed to insert flag");
// Insert evaluation tags for the flag
context
.insert_evaluation_tags_for_flag(
flag_row.id,
team.id,
vec!["docs-page", "marketing-site", "app"],
)
.await
.expect("Failed to insert evaluation tags");
let (flags_from_pg, _) =
FeatureFlagList::from_pg(context.non_persons_reader.clone(), team.project_id)
.await
.expect("Failed to fetch flags from pg");
assert_eq!(flags_from_pg.flags.len(), 1);
let flag = flags_from_pg.flags.first().expect("Should have one flag");
assert_eq!(flag.key, "flag1");
assert_eq!(flag.team_id, team.id);
// Check that evaluation tags were properly fetched
let tags = flag
.evaluation_tags
.as_ref()
.expect("Should have evaluation tags");
assert_eq!(tags.len(), 3);
assert!(tags.contains(&"docs-page".to_string()));
assert!(tags.contains(&"marketing-site".to_string()));
assert!(tags.contains(&"app".to_string()));
}
}

View File

@@ -1133,6 +1133,7 @@ mod tests {
ensure_experience_continuity: flag.ensure_experience_continuity,
version: flag.version,
evaluation_runtime: flag.evaluation_runtime,
evaluation_tags: flag.evaluation_tags,
};
// Insert the feature flag into the database
@@ -1235,6 +1236,7 @@ mod tests {
ensure_experience_continuity: Some(true),
version: Some(1),
evaluation_runtime: None,
evaluation_tags: None,
};
context
.insert_flag(team.id, Some(flag_row))
@@ -1350,6 +1352,7 @@ mod tests {
ensure_experience_continuity: Some(true),
version: Some(1),
evaluation_runtime: None,
evaluation_tags: None,
};
context
.insert_flag(team.id, Some(flag_row))
@@ -1470,6 +1473,7 @@ mod tests {
ensure_experience_continuity: Some(true),
version: Some(1),
evaluation_runtime: None,
evaluation_tags: None,
};
let inactive_flag = FeatureFlagRow {
@@ -1483,6 +1487,7 @@ mod tests {
ensure_experience_continuity: Some(true),
version: Some(1),
evaluation_runtime: None,
evaluation_tags: None,
};
let deleted_flag = FeatureFlagRow {
@@ -1496,6 +1501,7 @@ mod tests {
ensure_experience_continuity: Some(true),
version: Some(1),
evaluation_runtime: None,
evaluation_tags: None,
};
let no_continuity_flag = FeatureFlagRow {
@@ -1509,6 +1515,7 @@ mod tests {
ensure_experience_continuity: Some(false), // No experience continuity
version: Some(1),
evaluation_runtime: None,
evaluation_tags: None,
};
context
@@ -1600,6 +1607,7 @@ mod tests {
ensure_experience_continuity: Some(true),
version: Some(1),
evaluation_runtime: None,
evaluation_tags: None,
};
context
.insert_flag(team.id, Some(flag_row))
@@ -1684,6 +1692,7 @@ mod tests {
ensure_experience_continuity: Some(true),
version: Some(1),
evaluation_runtime: None,
evaluation_tags: None,
};
context
.insert_flag(team.id, Some(flag_row))

View File

@@ -98,6 +98,8 @@ pub struct FeatureFlag {
pub version: Option<i32>,
#[serde(default)]
pub evaluation_runtime: Option<String>,
#[serde(default)]
pub evaluation_tags: Option<Vec<String>>,
}
#[derive(Debug, Serialize, sqlx::FromRow)]
@@ -113,6 +115,8 @@ pub struct FeatureFlagRow {
pub version: Option<i32>,
#[serde(default)]
pub evaluation_runtime: Option<String>,
#[serde(default)]
pub evaluation_tags: Option<Vec<String>>,
}
#[derive(Clone, Debug, Default, Deserialize, Serialize)]

View File

@@ -226,6 +226,7 @@ mod tests {
ensure_experience_continuity: Some(false),
version: None,
evaluation_runtime: Some("all".to_string()),
evaluation_tags: None,
};
let deps = flag_no_deps.extract_dependencies().unwrap();
@@ -261,6 +262,7 @@ mod tests {
ensure_experience_continuity: Some(false),
version: None,
evaluation_runtime: Some("all".to_string()),
evaluation_tags: None,
};
let deps = flag_with_dep.extract_dependencies().unwrap();
@@ -310,6 +312,7 @@ mod tests {
ensure_experience_continuity: Some(false),
version: None,
evaluation_runtime: Some("all".to_string()),
evaluation_tags: None,
};
let deps = flag_with_multiple_deps.extract_dependencies().unwrap();
@@ -355,6 +358,7 @@ mod tests {
ensure_experience_continuity: Some(false),
version: None,
evaluation_runtime: Some("all".to_string()),
evaluation_tags: None,
};
let deps = flag_with_mixed_props.extract_dependencies().unwrap();
@@ -466,6 +470,7 @@ mod tests {
ensure_experience_continuity: Some(false),
version: Some(1),
evaluation_runtime: Some("all".to_string()),
evaluation_tags: None,
}),
)
.await
@@ -567,6 +572,7 @@ mod tests {
ensure_experience_continuity: Some(false),
version: Some(1),
evaluation_runtime: Some("all".to_string()),
evaluation_tags: None,
}),
)
.await
@@ -699,6 +705,7 @@ mod tests {
ensure_experience_continuity: Some(false),
version: Some(1),
evaluation_runtime: Some("all".to_string()),
evaluation_tags: None,
}),
)
.await
@@ -798,6 +805,7 @@ mod tests {
ensure_experience_continuity: Some(false),
version: Some(1),
evaluation_runtime: Some("all".to_string()),
evaluation_tags: None,
}),
)
.await
@@ -886,6 +894,7 @@ mod tests {
ensure_experience_continuity: Some(false),
version: Some(1),
evaluation_runtime: Some("all".to_string()),
evaluation_tags: None,
}),
)
.await
@@ -905,6 +914,7 @@ mod tests {
ensure_experience_continuity: Some(false),
version: Some(1),
evaluation_runtime: Some("all".to_string()),
evaluation_tags: None,
}),
)
.await
@@ -1003,6 +1013,7 @@ mod tests {
ensure_experience_continuity: Some(false),
version: Some(1),
evaluation_runtime: Some("all".to_string()),
evaluation_tags: None,
}),
)
.await
@@ -1084,6 +1095,7 @@ mod tests {
ensure_experience_continuity: Some(false),
version: Some(1),
evaluation_runtime: Some("all".to_string()),
evaluation_tags: None,
}),
)
.await
@@ -1176,6 +1188,7 @@ mod tests {
ensure_experience_continuity: Some(false),
version: Some(1),
evaluation_runtime: Some("all".to_string()),
evaluation_tags: None,
}),
)
.await
@@ -1263,6 +1276,7 @@ mod tests {
ensure_experience_continuity: Some(false),
version: Some(1),
evaluation_runtime: Some("all".to_string()),
evaluation_tags: None,
}),
)
.await
@@ -1382,6 +1396,7 @@ mod tests {
ensure_experience_continuity: Some(false),
version: Some(1),
evaluation_runtime: Some("all".to_string()),
evaluation_tags: None,
}),
)
.await

View File

@@ -65,6 +65,8 @@ pub struct FlagRequest {
pub timezone: Option<String>,
#[serde(default)]
pub cookieless_hash_extra: Option<String>,
#[serde(default)]
pub evaluation_environments: Option<Vec<String>>,
}
impl FlagRequest {

View File

@@ -316,6 +316,7 @@ mod tests {
ensure_experience_continuity: Some(false),
version: Some(1),
evaluation_runtime: Some("all".to_string()),
evaluation_tags: None,
},
FeatureFlag {
id: 2,
@@ -335,6 +336,7 @@ mod tests {
ensure_experience_continuity: Some(false),
version: Some(1),
evaluation_runtime: Some("all".to_string()),
evaluation_tags: None,
},
FeatureFlag {
id: 3,
@@ -365,6 +367,7 @@ mod tests {
ensure_experience_continuity: Some(false),
version: Some(1),
evaluation_runtime: Some("all".to_string()),
evaluation_tags: None,
},
],
};

View File

@@ -1242,6 +1242,7 @@ mod tests {
ensure_experience_continuity: Some(false),
version: Some(1),
evaluation_runtime: Some("all".to_string()),
evaluation_tags: None,
}
}
@@ -4159,6 +4160,7 @@ mod tests {
ensure_experience_continuity: Some(false),
version: Some(1),
evaluation_runtime: Some("all".to_string()),
evaluation_tags: None,
};
// Test user "11" - should get first-variant
@@ -5427,6 +5429,7 @@ mod tests {
ensure_experience_continuity: Some(false),
version: Some(1),
evaluation_runtime: Some("all".to_string()),
evaluation_tags: None,
};
let router = context.create_postgres_router();

View File

@@ -59,5 +59,6 @@ pub fn create_simple_flag(properties: Vec<PropertyFilter>, rollout_percentage: f
ensure_experience_continuity: Some(false),
version: Some(1),
evaluation_runtime: Some("all".to_string()),
evaluation_tags: None,
}
}

View File

@@ -119,6 +119,7 @@ mod tests {
ensure_experience_continuity: Some(false),
version: Some(1),
evaluation_runtime: Some("all".to_string()),
evaluation_tags: None,
}
}

View File

@@ -152,6 +152,7 @@ pub async fn fetch_and_filter(
query_params: &FlagsQueryParams,
headers: &axum::http::HeaderMap,
explicit_runtime: Option<EvaluationRuntime>,
environment_tags: Option<&Vec<String>>,
) -> Result<(FeatureFlagList, bool), FlagError> {
let flag_result = flag_service.get_flags_from_cache_or_pg(project_id).await?;
@@ -168,14 +169,19 @@ pub async fn fetch_and_filter(
let flags_after_runtime_filter =
filter_flags_by_runtime(flags_after_survey_filter, current_runtime);
// Finally filter by evaluation tags
let flags_after_tag_filter =
filter_flags_by_evaluation_tags(flags_after_runtime_filter, environment_tags);
tracing::debug!(
"Runtime filtering: detected_runtime={:?}, flags_count={}",
"Flag filtering: detected_runtime={:?}, environment_tags={:?}, final_count={}",
current_runtime,
flags_after_runtime_filter.len()
environment_tags,
flags_after_tag_filter.len()
);
Ok((
FeatureFlagList::new(flags_after_runtime_filter),
FeatureFlagList::new(flags_after_tag_filter),
flag_result.had_deserialization_errors,
))
}
@@ -193,6 +199,29 @@ fn filter_survey_flags(flags: Vec<FeatureFlag>, only_survey_flags: bool) -> Vec<
}
}
/// Filters flags based on evaluation tags.
/// Only includes flags that either:
/// 1. Have no evaluation tags (backward compatibility)
/// 2. Have at least one evaluation tag that matches the provided environment tags
fn filter_flags_by_evaluation_tags(
flags: Vec<FeatureFlag>,
environment_tags: Option<&Vec<String>>,
) -> Vec<FeatureFlag> {
let env_tags = match environment_tags {
Some(t) if !t.is_empty() => t,
_ => return flags,
};
flags
.into_iter()
.filter(|flag| match &flag.evaluation_tags {
None => true,
Some(flag_tags) if flag_tags.is_empty() => true,
Some(flag_tags) => flag_tags.iter().any(|tag| env_tags.contains(tag)),
})
.collect()
}
#[allow(clippy::too_many_arguments)]
pub async fn evaluate_for_request(
state: &State<router::State>,
@@ -254,6 +283,28 @@ mod tests {
ensure_experience_continuity: None,
version: None,
evaluation_runtime,
evaluation_tags: None,
}
}
fn create_test_flag_with_tags(
id: i32,
key: &str,
evaluation_runtime: Option<String>,
evaluation_tags: Option<Vec<String>>,
) -> FeatureFlag {
FeatureFlag {
id,
team_id: 1,
name: Some(format!("Test Flag {id}")),
key: key.to_string(),
filters: FlagFilters::default(),
deleted: false,
active: true,
ensure_experience_continuity: None,
version: None,
evaluation_runtime,
evaluation_tags,
}
}
@@ -574,4 +625,110 @@ mod tests {
// Note: Even if flag_keys explicitly requests a server-only flag from a client,
// the runtime filter will prevent it from being evaluated
}
#[test]
fn test_filter_flags_by_evaluation_tags_no_environment_tags() {
// When no environment tags are provided, all flags should be returned
let flags = vec![
create_test_flag_with_tags(1, "flag1", None, None),
create_test_flag_with_tags(2, "flag2", None, Some(vec!["app".to_string()])),
create_test_flag_with_tags(
3,
"flag3",
None,
Some(vec!["docs".to_string(), "marketing".to_string()]),
),
];
let filtered = filter_flags_by_evaluation_tags(flags.clone(), None);
assert_eq!(filtered.len(), 3);
let filtered = filter_flags_by_evaluation_tags(flags.clone(), Some(&vec![]));
assert_eq!(filtered.len(), 3);
}
#[test]
fn test_filter_flags_by_evaluation_tags_with_matching_tags() {
let flags = vec![
create_test_flag_with_tags(1, "no-tags", None, None),
create_test_flag_with_tags(2, "app-only", None, Some(vec!["app".to_string()])),
create_test_flag_with_tags(3, "docs-only", None, Some(vec!["docs".to_string()])),
create_test_flag_with_tags(
4,
"multi-env",
None,
Some(vec!["app".to_string(), "docs".to_string()]),
),
];
// Test with "app" environment
let app_env = vec!["app".to_string()];
let filtered = filter_flags_by_evaluation_tags(flags.clone(), Some(&app_env));
assert_eq!(filtered.len(), 3); // no-tags, app-only, multi-env
assert!(filtered.iter().any(|f| f.key == "no-tags"));
assert!(filtered.iter().any(|f| f.key == "app-only"));
assert!(filtered.iter().any(|f| f.key == "multi-env"));
assert!(!filtered.iter().any(|f| f.key == "docs-only"));
// Test with "docs" environment
let docs_env = vec!["docs".to_string()];
let filtered = filter_flags_by_evaluation_tags(flags.clone(), Some(&docs_env));
assert_eq!(filtered.len(), 3); // no-tags, docs-only, multi-env
assert!(filtered.iter().any(|f| f.key == "no-tags"));
assert!(filtered.iter().any(|f| f.key == "docs-only"));
assert!(filtered.iter().any(|f| f.key == "multi-env"));
assert!(!filtered.iter().any(|f| f.key == "app-only"));
}
#[test]
fn test_filter_flags_by_evaluation_tags_no_matching_tags() {
let flags = vec![
create_test_flag_with_tags(1, "app-only", None, Some(vec!["app".to_string()])),
create_test_flag_with_tags(2, "docs-only", None, Some(vec!["docs".to_string()])),
];
// Test with unmatched environment
let marketing_env = vec!["marketing".to_string()];
let filtered = filter_flags_by_evaluation_tags(flags.clone(), Some(&marketing_env));
assert_eq!(filtered.len(), 0);
}
#[test]
fn test_filter_flags_by_evaluation_tags_empty_flag_tags() {
// Flags with empty evaluation_tags should be included (backward compatibility)
let flags = vec![
create_test_flag_with_tags(1, "empty-tags", None, Some(vec![])),
create_test_flag_with_tags(2, "app-only", None, Some(vec!["app".to_string()])),
];
let app_env = vec!["app".to_string()];
let filtered = filter_flags_by_evaluation_tags(flags.clone(), Some(&app_env));
assert_eq!(filtered.len(), 2);
assert!(filtered.iter().any(|f| f.key == "empty-tags"));
assert!(filtered.iter().any(|f| f.key == "app-only"));
}
#[test]
fn test_filter_flags_by_evaluation_tags_multiple_environment_tags() {
let flags = vec![
create_test_flag_with_tags(1, "app-only", None, Some(vec!["app".to_string()])),
create_test_flag_with_tags(2, "docs-only", None, Some(vec!["docs".to_string()])),
create_test_flag_with_tags(
3,
"marketing-only",
None,
Some(vec!["marketing".to_string()]),
),
create_test_flag_with_tags(4, "no-tags", None, None),
];
// Test with multiple environment tags - should match ANY of them
let multi_env = vec!["app".to_string(), "docs".to_string()];
let filtered = filter_flags_by_evaluation_tags(flags.clone(), Some(&multi_env));
assert_eq!(filtered.len(), 3); // app-only, docs-only, no-tags
assert!(filtered.iter().any(|f| f.key == "app-only"));
assert!(filtered.iter().any(|f| f.key == "docs-only"));
assert!(filtered.iter().any(|f| f.key == "no-tags"));
assert!(!filtered.iter().any(|f| f.key == "marketing-only"));
}
}

View File

@@ -86,6 +86,7 @@ pub async fn process_request(context: RequestContext) -> Result<FlagsResponse, F
&context.meta,
&context.headers,
None,
request.evaluation_environments.as_ref(),
)
.await?;

View File

@@ -166,6 +166,7 @@ async fn test_evaluate_feature_flags() {
ensure_experience_continuity: Some(false),
version: Some(1),
evaluation_runtime: Some("all".to_string()),
evaluation_tags: None,
};
let feature_flag_list = FeatureFlagList { flags: vec![flag] };
@@ -256,6 +257,7 @@ async fn test_evaluate_feature_flags_with_errors() {
ensure_experience_continuity: Some(false),
version: Some(1),
evaluation_runtime: Some("all".to_string()),
evaluation_tags: None,
}];
let feature_flag_list = FeatureFlagList { flags };
@@ -632,6 +634,7 @@ async fn test_evaluate_feature_flags_multiple_flags() {
ensure_experience_continuity: Some(false),
version: Some(1),
evaluation_runtime: Some("all".to_string()),
evaluation_tags: None,
},
FeatureFlag {
name: Some("Flag 2".to_string()),
@@ -655,6 +658,7 @@ async fn test_evaluate_feature_flags_multiple_flags() {
ensure_experience_continuity: Some(false),
version: Some(1),
evaluation_runtime: Some("all".to_string()),
evaluation_tags: None,
},
];
@@ -731,6 +735,7 @@ async fn test_evaluate_feature_flags_details() {
ensure_experience_continuity: Some(false),
version: Some(1),
evaluation_runtime: Some("all".to_string()),
evaluation_tags: None,
},
FeatureFlag {
name: Some("Flag 2".to_string()),
@@ -754,6 +759,7 @@ async fn test_evaluate_feature_flags_details() {
ensure_experience_continuity: Some(false),
version: Some(1),
evaluation_runtime: Some("all".to_string()),
evaluation_tags: None,
},
];
@@ -898,6 +904,7 @@ async fn test_evaluate_feature_flags_with_overrides() {
ensure_experience_continuity: Some(false),
version: Some(1),
evaluation_runtime: Some("all".to_string()),
evaluation_tags: None,
};
let feature_flag_list = FeatureFlagList { flags: vec![flag] };
@@ -994,6 +1001,7 @@ async fn test_long_distinct_id() {
ensure_experience_continuity: Some(false),
version: Some(1),
evaluation_runtime: Some("all".to_string()),
evaluation_tags: None,
};
let feature_flag_list = FeatureFlagList { flags: vec![flag] };
@@ -1161,6 +1169,7 @@ async fn test_fetch_and_filter_flags() {
ensure_experience_continuity: Some(false),
version: Some(1),
evaluation_runtime: Some("all".to_string()),
evaluation_tags: None,
},
FeatureFlag {
name: Some("Survey Flag 2".to_string()),
@@ -1173,6 +1182,7 @@ async fn test_fetch_and_filter_flags() {
ensure_experience_continuity: Some(false),
version: Some(1),
evaluation_runtime: Some("all".to_string()),
evaluation_tags: None,
},
FeatureFlag {
name: Some("Regular Flag 1".to_string()),
@@ -1185,6 +1195,7 @@ async fn test_fetch_and_filter_flags() {
ensure_experience_continuity: Some(false),
version: Some(1),
evaluation_runtime: Some("all".to_string()),
evaluation_tags: None,
},
FeatureFlag {
name: Some("Regular Flag 2".to_string()),
@@ -1197,6 +1208,7 @@ async fn test_fetch_and_filter_flags() {
ensure_experience_continuity: Some(false),
version: Some(1),
evaluation_runtime: Some("all".to_string()),
evaluation_tags: None,
},
];
@@ -1222,6 +1234,7 @@ async fn test_fetch_and_filter_flags() {
&query_params,
&axum::http::HeaderMap::new(),
None,
None,
)
.await
.unwrap();
@@ -1243,6 +1256,7 @@ async fn test_fetch_and_filter_flags() {
&query_params,
&axum::http::HeaderMap::new(),
None,
None,
)
.await
.unwrap();
@@ -1257,6 +1271,7 @@ async fn test_fetch_and_filter_flags() {
&query_params,
&axum::http::HeaderMap::new(),
None,
None,
)
.await
.unwrap();
@@ -1279,6 +1294,7 @@ async fn test_fetch_and_filter_flags() {
&query_params,
&axum::http::HeaderMap::new(),
None,
None,
)
.await
.unwrap();

View File

@@ -374,6 +374,7 @@ pub async fn insert_flag_for_team_in_pg(
}),
version: None,
evaluation_runtime: Some("all".to_string()),
evaluation_tags: None,
},
};
@@ -389,6 +390,49 @@ pub async fn insert_flag_for_team_in_pg(
Ok(payload_flag)
}
pub async fn insert_evaluation_tags_for_flag_in_pg(
client: Arc<dyn Client + Send + Sync>,
flag_id: i32,
team_id: i32,
tag_names: Vec<&str>,
) -> Result<(), Error> {
let mut conn = client.get_connection().await?;
for tag_name in tag_names {
// First, insert the tag if it doesn't exist
let tag_uuid = Uuid::now_v7();
let tag_id: Uuid = sqlx::query_scalar(
r#"
INSERT INTO posthog_tag (id, name, team_id)
VALUES ($1, $2, $3)
ON CONFLICT (name, team_id) DO UPDATE
SET name = EXCLUDED.name
RETURNING id
"#,
)
.bind(tag_uuid)
.bind(tag_name)
.bind(team_id)
.fetch_one(&mut *conn)
.await?;
// Then, create the association
sqlx::query(
r#"
INSERT INTO posthog_featureflagevaluationtag (feature_flag_id, tag_id, created_at)
VALUES ($1, $2, NOW())
ON CONFLICT (feature_flag_id, tag_id) DO NOTHING
"#,
)
.bind(flag_id)
.bind(tag_id)
.execute(&mut *conn)
.await?;
}
Ok(())
}
pub async fn insert_person_for_team_in_pg(
client: Arc<dyn Client + Send + Sync>,
team_id: i32,
@@ -618,6 +662,7 @@ pub fn create_test_flag(
ensure_experience_continuity: Some(ensure_experience_continuity.unwrap_or(false)),
version: Some(1),
evaluation_runtime: Some("all".to_string()),
evaluation_tags: None,
}
}
@@ -803,6 +848,21 @@ impl TestContext {
.await
}
pub async fn insert_evaluation_tags_for_flag(
&self,
flag_id: i32,
team_id: i32,
tag_names: Vec<&str>,
) -> Result<(), Error> {
insert_evaluation_tags_for_flag_in_pg(
self.non_persons_writer.clone(),
flag_id,
team_id,
tag_names,
)
.await
}
pub async fn add_person_to_cohort(
&self,
cohort_id: CohortId,

View File

@@ -36,6 +36,7 @@ async fn test_experience_continuity_matches_python() -> Result<()> {
ensure_experience_continuity: Some(true),
version: Some(1),
evaluation_runtime: None,
evaluation_tags: None,
};
context.insert_flag(team.id, Some(flag_row)).await?;
@@ -160,6 +161,7 @@ async fn test_experience_continuity_with_merge() -> Result<()> {
ensure_experience_continuity: Some(true),
version: Some(1),
evaluation_runtime: None,
evaluation_tags: None,
};
context.insert_flag(team.id, Some(flag_row)).await?;