mirror of
https://github.com/BillyOutlast/posthog.git
synced 2026-02-04 03:01:23 +01:00
fix(experiments): select feature flag modal filters (#39442)
This commit is contained in:
@@ -4,7 +4,7 @@ from enum import Enum
|
||||
from typing import Any, Literal
|
||||
from zoneinfo import ZoneInfo
|
||||
|
||||
from django.db.models import Case, F, Q, QuerySet, Value, When
|
||||
from django.db.models import Case, F, Prefetch, Q, QuerySet, Value, When
|
||||
from django.db.models.functions import Now
|
||||
from django.dispatch import receiver
|
||||
|
||||
@@ -22,9 +22,11 @@ from posthog.api.routing import TeamAndOrgViewSetMixin
|
||||
from posthog.api.shared import UserBasicSerializer
|
||||
from posthog.api.utils import action
|
||||
from posthog.hogql_queries.experiments.experiment_metric_fingerprint import compute_metric_fingerprint
|
||||
from posthog.models import Survey
|
||||
from posthog.models.activity_logging.activity_log import Detail, changes_between, log_activity
|
||||
from posthog.models.cohort import Cohort
|
||||
from posthog.models.experiment import Experiment, ExperimentHoldout, ExperimentMetricResult, ExperimentSavedMetric
|
||||
from posthog.models.feature_flag.feature_flag import FeatureFlag
|
||||
from posthog.models.feature_flag.feature_flag import FeatureFlag, FeatureFlagEvaluationTag
|
||||
from posthog.models.filters.filter import Filter
|
||||
from posthog.models.signals import model_activity_signal
|
||||
from posthog.models.team.team import Team
|
||||
@@ -741,6 +743,120 @@ class EnterpriseExperimentsViewSet(
|
||||
experiment.save(update_fields=["exposure_cohort"])
|
||||
return Response({"cohort": cohort_serializer.data}, status=201)
|
||||
|
||||
@action(methods=["GET"], detail=False, required_scopes=["feature_flag:read"])
|
||||
def eligible_feature_flags(self, request: Request, **kwargs: Any) -> Response:
|
||||
"""
|
||||
Returns a paginated list of feature flags eligible for use in experiments.
|
||||
|
||||
Eligible flags must:
|
||||
- Be multivariate with at least 2 variants
|
||||
- Have "control" as the first variant key
|
||||
|
||||
Query parameters:
|
||||
- search: Filter by flag key or name (case insensitive)
|
||||
- limit: Number of results per page (default: 20)
|
||||
- offset: Pagination offset (default: 0)
|
||||
- active: Filter by active status ("true" or "false")
|
||||
- created_by_id: Filter by creator user ID
|
||||
- order: Sort order field
|
||||
- evaluation_runtime: Filter by evaluation runtime
|
||||
"""
|
||||
# validate limit and offset
|
||||
try:
|
||||
limit = min(int(request.query_params.get("limit", 20)), 100)
|
||||
offset = max(int(request.query_params.get("offset", 0)), 0)
|
||||
except ValueError:
|
||||
return Response({"error": "Invalid limit or offset"}, status=400)
|
||||
|
||||
queryset = FeatureFlag.objects.filter(team__project_id=self.project_id, deleted=False)
|
||||
|
||||
# Filter for multivariate flags with at least 2 variants and first variant is "control"
|
||||
queryset = queryset.extra(
|
||||
where=[
|
||||
"""
|
||||
jsonb_array_length(filters->'multivariate'->'variants') >= 2
|
||||
AND filters->'multivariate'->'variants'->0->>'key' = 'control'
|
||||
"""
|
||||
]
|
||||
)
|
||||
|
||||
# Exclude survey targeting flags (same as regular feature flag list endpoint)
|
||||
survey_targeting_flags = Survey.objects.filter(
|
||||
team__project_id=self.project_id, targeting_flag__isnull=False
|
||||
).values_list("targeting_flag_id", flat=True)
|
||||
survey_internal_targeting_flags = Survey.objects.filter(
|
||||
team__project_id=self.project_id, internal_targeting_flag__isnull=False
|
||||
).values_list("internal_targeting_flag_id", flat=True)
|
||||
excluded_flag_ids = set(survey_targeting_flags) | set(survey_internal_targeting_flags)
|
||||
queryset = queryset.exclude(id__in=excluded_flag_ids)
|
||||
|
||||
# Apply search filter
|
||||
search = request.query_params.get("search")
|
||||
if search:
|
||||
queryset = queryset.filter(Q(key__icontains=search) | Q(name__icontains=search))
|
||||
|
||||
# Apply active filter
|
||||
active = request.query_params.get("active")
|
||||
if active is not None:
|
||||
queryset = queryset.filter(active=active.lower() == "true")
|
||||
|
||||
# Apply created_by filter
|
||||
created_by_id = request.query_params.get("created_by_id")
|
||||
if created_by_id:
|
||||
queryset = queryset.filter(created_by_id=created_by_id)
|
||||
|
||||
# Apply evaluation_runtime filter
|
||||
evaluation_runtime = request.query_params.get("evaluation_runtime")
|
||||
if evaluation_runtime:
|
||||
queryset = queryset.filter(evaluation_runtime=evaluation_runtime)
|
||||
|
||||
# Ordering
|
||||
order = request.query_params.get("order")
|
||||
if order:
|
||||
queryset = queryset.order_by(order)
|
||||
else:
|
||||
queryset = queryset.order_by("-created_at")
|
||||
|
||||
# Prefetch related data to avoid N+1 queries (same as regular feature flag list)
|
||||
queryset = queryset.prefetch_related(
|
||||
Prefetch(
|
||||
"experiment_set", queryset=Experiment.objects.filter(deleted=False), to_attr="_active_experiments"
|
||||
),
|
||||
"features",
|
||||
"analytics_dashboards",
|
||||
"surveys_linked_flag",
|
||||
Prefetch(
|
||||
"evaluation_tags",
|
||||
queryset=FeatureFlagEvaluationTag.objects.select_related("tag"),
|
||||
),
|
||||
Prefetch(
|
||||
"team__cohort_set",
|
||||
queryset=Cohort.objects.filter(deleted=False).only("id", "name"),
|
||||
to_attr="available_cohorts",
|
||||
),
|
||||
).select_related("created_by", "last_modified_by")
|
||||
|
||||
total_count = queryset.count()
|
||||
results = queryset[offset : offset + limit]
|
||||
|
||||
# Serialize using the standard FeatureFlagSerializer
|
||||
serializer = FeatureFlagSerializer(
|
||||
results,
|
||||
many=True,
|
||||
context={
|
||||
"request": request,
|
||||
"team_id": self.team_id,
|
||||
"project_id": self.project_id,
|
||||
},
|
||||
)
|
||||
|
||||
return Response(
|
||||
{
|
||||
"results": serializer.data,
|
||||
"count": total_count,
|
||||
}
|
||||
)
|
||||
|
||||
@action(methods=["GET"], detail=True, required_scopes=["experiment:read"])
|
||||
def timeseries_results(self, request: Request, *args: Any, **kwargs: Any) -> Response:
|
||||
"""
|
||||
|
||||
@@ -38,7 +38,6 @@ import { SceneTitleSection } from '~/layout/scenes/components/SceneTitleSection'
|
||||
import { AccessControlLevel, AccessControlResourceType, FeatureFlagType } from '~/types'
|
||||
|
||||
import { experimentLogic } from './experimentLogic'
|
||||
import { featureFlagEligibleForExperiment } from './utils'
|
||||
|
||||
const ExperimentFormFields = (): JSX.Element => {
|
||||
const { formMode, experiment, groupTypes, aggregationLabel, hasPrimaryMetricSet, validExistingFeatureFlag } =
|
||||
@@ -500,13 +499,7 @@ const SelectExistingFeatureFlagModal = ({
|
||||
{filtersSection}
|
||||
<LemonTable
|
||||
id="ff"
|
||||
dataSource={featureFlagModalFeatureFlags.results.filter((featureFlag) => {
|
||||
try {
|
||||
return featureFlagEligibleForExperiment(featureFlag)
|
||||
} catch {
|
||||
return false
|
||||
}
|
||||
})}
|
||||
dataSource={featureFlagModalFeatureFlags.results}
|
||||
loading={featureFlagModalFeatureFlagsLoading}
|
||||
useURLForSorting={false}
|
||||
columns={[
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import '@testing-library/jest-dom'
|
||||
import { cleanup, render, screen } from '@testing-library/react'
|
||||
import { cleanup, render, screen, waitFor } from '@testing-library/react'
|
||||
import userEvent from '@testing-library/user-event'
|
||||
|
||||
import { useMocks } from '~/mocks/jest'
|
||||
@@ -73,10 +73,10 @@ describe('SelectExistingFeatureFlagModal', () => {
|
||||
},
|
||||
]
|
||||
|
||||
beforeEach(() => {
|
||||
beforeEach(async () => {
|
||||
useMocks({
|
||||
get: {
|
||||
'/api/projects/@current/feature_flags/': () => [
|
||||
'/api/projects/@current/experiments/eligible_feature_flags/': () => [
|
||||
200,
|
||||
{
|
||||
results: mockFeatureFlags,
|
||||
@@ -89,6 +89,11 @@ describe('SelectExistingFeatureFlagModal', () => {
|
||||
logic = selectExistingFeatureFlagModalLogic()
|
||||
logic.mount()
|
||||
logic.actions.openSelectExistingFeatureFlagModal()
|
||||
|
||||
await waitFor(() => {
|
||||
expect(logic.values.featureFlagsLoading).toBe(false)
|
||||
})
|
||||
|
||||
jest.clearAllMocks()
|
||||
})
|
||||
|
||||
|
||||
@@ -8,7 +8,6 @@ import { urls } from 'scenes/urls'
|
||||
|
||||
import { FeatureFlagType } from '~/types'
|
||||
|
||||
import { featureFlagEligibleForExperiment } from '../utils'
|
||||
import { selectExistingFeatureFlagModalLogic } from './selectExistingFeatureFlagModalLogic'
|
||||
|
||||
export const SelectExistingFeatureFlagModal = ({
|
||||
@@ -50,13 +49,7 @@ export const SelectExistingFeatureFlagModal = ({
|
||||
{filtersSection}
|
||||
<LemonTable
|
||||
id="ff"
|
||||
dataSource={featureFlags.results.filter((featureFlag) => {
|
||||
try {
|
||||
return featureFlagEligibleForExperiment(featureFlag)
|
||||
} catch {
|
||||
return false
|
||||
}
|
||||
})}
|
||||
dataSource={featureFlags.results}
|
||||
loading={featureFlagsLoading}
|
||||
useURLForSorting={false}
|
||||
columns={[
|
||||
|
||||
@@ -79,7 +79,7 @@ describe('VariantsPanel', () => {
|
||||
beforeEach(() => {
|
||||
useMocks({
|
||||
get: {
|
||||
'/api/projects/@current/feature_flags/': (req) => {
|
||||
'/api/projects/@current/experiments/eligible_feature_flags/': (req) => {
|
||||
const url = new URL(req.url)
|
||||
const search = url.searchParams.get('search')
|
||||
|
||||
@@ -382,7 +382,10 @@ describe('VariantsPanel', () => {
|
||||
it('handles empty feature flags list', async () => {
|
||||
useMocks({
|
||||
get: {
|
||||
'/api/projects/@current/feature_flags/': () => [200, { results: [], count: 0 }],
|
||||
'/api/projects/@current/experiments/eligible_feature_flags/': () => [
|
||||
200,
|
||||
{ results: [], count: 0 },
|
||||
],
|
||||
'/api/projects/@current/experiments': () => [200, { results: [], count: 0 }],
|
||||
},
|
||||
})
|
||||
|
||||
@@ -92,7 +92,7 @@ describe('selectExistingFeatureFlagModalLogic', () => {
|
||||
beforeEach(() => {
|
||||
useMocks({
|
||||
get: {
|
||||
'/api/projects/@current/feature_flags/': (req) => {
|
||||
'/api/projects/@current/experiments/eligible_feature_flags/': (req) => {
|
||||
const url = new URL(req.url, 'http://localhost')
|
||||
const search = url.searchParams.get('search')
|
||||
|
||||
@@ -274,7 +274,7 @@ describe('selectExistingFeatureFlagModalLogic', () => {
|
||||
it('calculates pagination correctly when no results', async () => {
|
||||
useMocks({
|
||||
get: {
|
||||
'/api/projects/@current/feature_flags/': () => [
|
||||
'/api/projects/@current/experiments/eligible_feature_flags/': () => [
|
||||
200,
|
||||
{
|
||||
results: [],
|
||||
@@ -320,7 +320,7 @@ describe('selectExistingFeatureFlagModalLogic', () => {
|
||||
it('enables forward button when there are more pages', async () => {
|
||||
useMocks({
|
||||
get: {
|
||||
'/api/projects/@current/feature_flags/': () => [
|
||||
'/api/projects/@current/experiments/eligible_feature_flags/': () => [
|
||||
200,
|
||||
{
|
||||
results: mockFeatureFlags,
|
||||
@@ -349,7 +349,7 @@ describe('selectExistingFeatureFlagModalLogic', () => {
|
||||
it('enables backward button when on page 2+', async () => {
|
||||
useMocks({
|
||||
get: {
|
||||
'/api/projects/@current/feature_flags/': () => [
|
||||
'/api/projects/@current/experiments/eligible_feature_flags/': () => [
|
||||
200,
|
||||
{
|
||||
results: mockFeatureFlags,
|
||||
@@ -378,7 +378,7 @@ describe('selectExistingFeatureFlagModalLogic', () => {
|
||||
it('updates page when onForward is called', async () => {
|
||||
useMocks({
|
||||
get: {
|
||||
'/api/projects/@current/feature_flags/': () => [
|
||||
'/api/projects/@current/experiments/eligible_feature_flags/': () => [
|
||||
200,
|
||||
{
|
||||
results: mockFeatureFlags,
|
||||
@@ -408,7 +408,7 @@ describe('selectExistingFeatureFlagModalLogic', () => {
|
||||
it('updates page when onBackward is called', async () => {
|
||||
useMocks({
|
||||
get: {
|
||||
'/api/projects/@current/feature_flags/': () => [
|
||||
'/api/projects/@current/experiments/eligible_feature_flags/': () => [
|
||||
200,
|
||||
{
|
||||
results: mockFeatureFlags,
|
||||
@@ -440,7 +440,7 @@ describe('selectExistingFeatureFlagModalLogic', () => {
|
||||
it('never goes below page 1 when onBackward is called', async () => {
|
||||
useMocks({
|
||||
get: {
|
||||
'/api/projects/@current/feature_flags/': () => [
|
||||
'/api/projects/@current/experiments/eligible_feature_flags/': () => [
|
||||
200,
|
||||
{
|
||||
results: mockFeatureFlags,
|
||||
|
||||
@@ -79,9 +79,10 @@ export const selectExistingFeatureFlagModalLogic = kea<selectExistingFeatureFlag
|
||||
{ results: [], count: 0 } as { results: FeatureFlagType[]; count: number },
|
||||
{
|
||||
loadFeatureFlags: async () => {
|
||||
const response = await api.get(
|
||||
`api/projects/@current/feature_flags/?${toParams(values.paramsFromFilters)}`
|
||||
)
|
||||
const url = `api/projects/@current/experiments/eligible_feature_flags/?${toParams({
|
||||
...values.paramsFromFilters,
|
||||
})}`
|
||||
const response = await api.get(url)
|
||||
return response
|
||||
},
|
||||
},
|
||||
|
||||
@@ -121,7 +121,9 @@ describe('experimentsLogic', () => {
|
||||
})
|
||||
.toFinishAllListeners()
|
||||
|
||||
expect(api.get).toHaveBeenCalledWith(expect.stringMatching(/api\/projects\/\d+\/feature_flags\/\?/))
|
||||
expect(api.get).toHaveBeenCalledWith(
|
||||
expect.stringMatching(/api\/projects\/\d+\/experiments\/eligible_feature_flags\/\?/)
|
||||
)
|
||||
})
|
||||
|
||||
it('hides pagination when insufficient results', () => {
|
||||
|
||||
@@ -211,7 +211,9 @@ export const experimentsLogic = kea<experimentsLogicType>([
|
||||
{
|
||||
loadFeatureFlagModalFeatureFlags: async () => {
|
||||
const response = await api.get(
|
||||
`api/projects/${values.currentProjectId}/feature_flags/?${toParams(values.featureFlagModalParamsFromFilters)}`
|
||||
`api/projects/${values.currentProjectId}/experiments/eligible_feature_flags/?${toParams({
|
||||
...values.featureFlagModalParamsFromFilters,
|
||||
})}`
|
||||
)
|
||||
return response
|
||||
},
|
||||
|
||||
Reference in New Issue
Block a user