feat(mcp): move mcp server into monorepo (#39217)

Co-authored-by: github-actions <41898282+github-actions[bot]@users.noreply.github.com>
This commit is contained in:
Joshua Snyder
2025-10-08 10:26:55 +01:00
committed by GitHub
parent 2473c0c1d1
commit 96a67434f6
156 changed files with 61759 additions and 151 deletions

133
.github/workflows/ci-mcp.yml vendored Normal file
View File

@@ -0,0 +1,133 @@
name: MCP CI
on:
push:
branches: [master]
pull_request:
jobs:
changes:
permissions:
contents: read
runs-on: ubuntu-24.04
timeout-minutes: 5
name: Determine need to run MCP checks
outputs:
mcp: ${{ steps.filter.outputs.mcp }}
steps:
- uses: actions/checkout@v4
- uses: dorny/paths-filter@4512585405083f25c027a35db413c2b3b9006d50 # v2
id: filter
with:
filters: |
mcp:
- 'products/mcp/**'
- '.github/workflows/mcp-ci.yml'
- '.github/workflows/mcp-publish.yml'
lint-and-format:
name: Lint, Format, and Type Check
runs-on: ubuntu-latest
needs: changes
if: needs.changes.outputs.mcp == 'true'
permissions:
contents: read
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Install pnpm
uses: pnpm/action-setup@a7487c7e89a18df4991f7f222e4898a00d66ddda # v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: 22
cache: 'pnpm'
- name: Install dependencies
run: pnpm install
- name: Run linter
run: cd products/mcp && pnpm run lint
- name: Run formatter
run: cd products/mcp && pnpm run format
- name: Run type check
run: cd products/mcp && pnpm run typecheck
- name: Check for changes
run: |
if [ -n "$(git status --porcelain)" ]; then
echo "Code formatting or linting changes detected!"
git diff
exit 1
fi
unit-tests:
name: Unit Tests
runs-on: ubuntu-latest
needs: changes
permissions:
contents: read
if: needs.changes.outputs.mcp == 'true'
concurrency:
group: ${{ github.workflow }}-unit-${{ github.head_ref || github.ref }}
cancel-in-progress: true
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup pnpm
uses: pnpm/action-setup@a7487c7e89a18df4991f7f222e4898a00d66ddda # v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: 22
cache: 'pnpm'
- name: Install dependencies
run: pnpm install
- name: Run unit tests
run: cd products/mcp && pnpm run test
integration-tests:
name: Integration Tests
runs-on: ubuntu-latest
needs: [changes, unit-tests]
if: needs.changes.outputs.mcp == 'true'
permissions:
contents: read
concurrency:
group: ${{ github.workflow }}-integration-${{ github.head_ref || github.ref }}
cancel-in-progress: true
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup pnpm
uses: pnpm/action-setup@a7487c7e89a18df4991f7f222e4898a00d66ddda # v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: 22
cache: 'pnpm'
- name: Install dependencies
run: pnpm install
- name: Run integration tests
run: cd products/mcp && pnpm run test:integration
env:
TEST_POSTHOG_API_BASE_URL: ${{ secrets.MCP_TEST_API_BASE_URL }}
TEST_POSTHOG_PERSONAL_API_KEY: ${{ secrets.MCP_TEST_PERSONAL_API_KEY }}
TEST_ORG_ID: ${{ secrets.MCP_TEST_ORG_ID }}
TEST_PROJECT_ID: ${{ secrets.MCP_TEST_PROJECT_ID }}

86
.github/workflows/mcp-publish.yml vendored Normal file
View File

@@ -0,0 +1,86 @@
name: 'MCP Publish'
on:
push:
branches:
- master
workflow_dispatch:
permissions:
contents: read
jobs:
changes:
runs-on: ubuntu-24.04
timeout-minutes: 5
name: Determine need to run MCP publish
outputs:
mcp: ${{ steps.filter.outputs.mcp }}
steps:
- uses: actions/checkout@v4
- uses: dorny/paths-filter@4512585405083f25c027a35db413c2b3b9006d50 # v2
id: filter
with:
filters: |
mcp:
- 'mcp/**'
- '.github/workflows/mcp-ci.yml'
- '.github/workflows/mcp-publish.yml'
check-package-version:
name: Check package version and detect an update
runs-on: ubuntu-24.04
needs: changes
permissions:
contents: read
if: needs.changes.outputs.mcp == 'true' || github.event_name == 'workflow_dispatch'
outputs:
committed-version: ${{ steps.check-package-version.outputs.committed-version }}
published-version: ${{ steps.check-package-version.outputs.published-version }}
is-new-version: ${{ steps.check-package-version.outputs.is-new-version }}
steps:
- name: Checkout the repository
uses: actions/checkout@v4
- name: Check package version and detect an update
id: check-package-version
uses: PostHog/check-package-version@v2
with:
path: mcp/typescript/
release:
name: Publish release if new version
runs-on: ubuntu-24.04
needs: check-package-version
if: needs.check-package-version.outputs.is-new-version == 'true'
permissions:
contents: read
env:
COMMITTED_VERSION: ${{ needs.check-package-version.outputs.committed-version }}
PUBLISHED_VERSION: ${{ needs.check-package-version.outputs.published-version }}
steps:
- name: Checkout the repository
uses: actions/checkout@v4
with:
fetch-depth: 0
token: ${{ secrets.POSTHOG_BOT_GITHUB_TOKEN }}
- name: Set up Node 22
uses: actions/setup-node@v4
with:
node-version: 22
registry-url: https://registry.npmjs.org
- name: Install pnpm
run: npm install -g pnpm
- name: Install package.json dependencies with pnpm
run: pnpm install
- name: Build the package
run: cd products/mcp/typescript && pnpm build
- name: Publish the package in the npm registry
run: cd products/mcp/typescript && pnpm publish --access public
env:
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}

View File

@@ -186,9 +186,7 @@ const config: Config = {
// ],
// An array of regexp pattern strings that are matched against all test paths, matched tests are skipped
// testPathIgnorePatterns: [
// "/node_modules/"
// ],
testPathIgnorePatterns: ['/node_modules/', '/products/mcp/'],
// The regexp pattern or array of patterns that Jest uses to detect test files
// testRegex: [],

3186
pnpm-lock.yaml generated

File diff suppressed because it is too large Load Diff

View File

@@ -10,4 +10,5 @@ packages:
- playwright
- plugin-server
- products/*
- products/mcp/typescript
- rust/cyclotron-node

27
products/mcp/.gitignore vendored Normal file
View File

@@ -0,0 +1,27 @@
node_modules
# wrangler files
.wrangler
.dev.vars*
.mcp.json
.env.test
# Editor-specific files
.cursor/
.claude/
.vscode/
# Python
python/venv/
__pycache__/
.mypy_cache/.ruff_cache/
.pytest_cache/
.ruff_cache/
.env
dist/
.venv/
.husy/

View File

@@ -0,0 +1,2 @@
pnpm run lint
pnpm run format

View File

@@ -0,0 +1,12 @@
{
"$schema": "./node_modules/oxlint/configuration_schema.json",
"files": {
"ignore": ["**/generated.ts", "schema/tool-inputs.json", "python/**", ".mypy_cache/**"]
},
"rules": {
"no-debugger": "off",
"no-console": "off",
"@typescript-eslint/explicit-function-return-type": "off",
"jest/no-restricted-matchers": "off"
}
}

View File

@@ -0,0 +1,5 @@
schema/tool-inputs.json
python/**
**/generated.ts
node_modules/
.mypy_cache/

7
products/mcp/.prettierrc Normal file
View File

@@ -0,0 +1,7 @@
{
"trailingComma": "es5",
"tabWidth": 4,
"semi": false,
"singleQuote": true,
"printWidth": 100
}

15
products/mcp/Dockerfile Normal file
View File

@@ -0,0 +1,15 @@
# Use a lightweight Node.js image
FROM node:20-slim
# Set the working directory inside the container
WORKDIR /usr/src/app
# Install mcp-remote globally to avoid npx overhead on each run
RUN npm install -g mcp-remote@latest
# Set default environment variable for POSTHOG_REMOTE_MCP_URL
ENV POSTHOG_REMOTE_MCP_URL=https://mcp.posthog.com/mcp
# The entrypoint will run mcp-remote with proper stdio handling
# POSTHOG_AUTH_HEADER should be just the token (e.g., phx_...), we'll add "Bearer " prefix
ENTRYPOINT ["sh", "-c", "mcp-remote \"${POSTHOG_REMOTE_MCP_URL}\" --header \"Authorization:Bearer ${POSTHOG_AUTH_HEADER}\""]

21
products/mcp/LICENSE Normal file
View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2025 PostHog
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

191
products/mcp/README.md Normal file
View File

@@ -0,0 +1,191 @@
# PostHog MCP
Documentation: https://posthog.com/docs/model-context-protocol
## Use the MCP Server
### Quick install
You can install the MCP server automatically into Cursor, Claude, Claude Code, VS Code and Zed by running the following command:
```bash
npx @posthog/wizard@latest mcp add
```
### Manual install
1. Obtain a personal API key using the [MCP Server preset](https://app.posthog.com/settings/user-api-keys?preset=mcp_server).
2. Add the MCP configuration to your desktop client (e.g. Cursor, Windsurf, Claude Desktop) and add your personal API key
```json
{
"mcpServers": {
"posthog": {
"command": "npx",
"args": [
"-y",
"mcp-remote@latest",
"https://mcp.posthog.com/mcp", // You can replace this with https://mcp.posthog.com/sse if your client does not support Streamable HTTP
"--header",
"Authorization:${POSTHOG_AUTH_HEADER}"
],
"env": {
"POSTHOG_AUTH_HEADER": "Bearer {INSERT_YOUR_PERSONAL_API_KEY_HERE}"
}
}
}
}
```
### Docker install
If you prefer to use Docker instead of running npx directly:
1. Build the Docker image:
```bash
pnpm docker:build
# or
docker build -t posthog-mcp .
```
2. Configure your MCP client with Docker:
```json
{
"mcpServers": {
"posthog": {
"type": "stdio",
"command": "docker",
"args": [
"run",
"-i",
"--rm",
"--env",
"POSTHOG_AUTH_HEADER=${POSTHOG_AUTH_HEADER}",
"--env",
"POSTHOG_REMOTE_MCP_URL=${POSTHOG_REMOTE_MCP_URL:-https://mcp.posthog.com/mcp}",
"posthog-mcp"
],
"env": {
"POSTHOG_AUTH_HEADER": "Bearer {INSERT_YOUR_PERSONAL_API_KEY_HERE}",
"POSTHOG_REMOTE_MCP_URL": "https://mcp.posthog.com/mcp"
}
}
}
}
```
3. Test Docker with MCP Inspector:
```bash
pnpm docker:inspector
# or
npx @modelcontextprotocol/inspector docker run -i --rm --env POSTHOG_AUTH_HEADER=${POSTHOG_AUTH_HEADER} posthog-mcp
```
**Environment Variables:**
- `POSTHOG_AUTH_HEADER`: Your PostHog API token (required)
- `POSTHOG_REMOTE_MCP_URL`: The MCP server URL (optional, defaults to `https://mcp.posthog.com/mcp`)
This approach allows you to use the PostHog MCP server without needing Node.js or npm installed locally.
### Example Prompts
- What feature flags do I have active?
- Add a new feature flag for our homepage redesign
- What are my most common errors?
- Show me my LLM costs this week
### Feature Filtering
You can limit which tools are available by adding query parameters to the MCP URL:
```text
https://mcp.posthog.com/mcp?features=flags,workspace
```
Available features:
- `workspace` - Organization and project management
- `error-tracking` - [Error monitoring and debugging](https://posthog.com/docs/errors)
- `dashboards` - [Dashboard creation and management](https://posthog.com/docs/product-analytics/dashboards)
- `insights` - [Analytics insights and SQL queries](https://posthog.com/docs/product-analytics/insights)
- `experiments` - [A/B testing experiments](https://posthog.com/docs/experiments)
- `flags` - [Feature flag management](https://posthog.com/docs/feature-flags)
- `llm-analytics` - [LLM usage and cost tracking](https://posthog.com/docs/llm-analytics)
- `docs` - PostHog documentation search
To view which tools are available per feature, see our [documentation](https://posthog.com/docs/model-context-protocol) or alternatively check out `schema/tool-definitions.json`,
### Data processing
The MCP server is hosted on a Cloudflare worker which can be located outside of the EU / US, for this reason the MCP server does not store any sensitive data outside of your cloud region.
### Using self-hosted instances
If you're using a self-hosted instance of PostHog, you can specify a custom base URL by adding the `POSTHOG_BASE_URL` [environment variable](https://developers.cloudflare.com/workers/configuration/environment-variables) when running the MCP server locally or on your own infrastructure, e.g. `POSTHOG_BASE_URL=https://posthog.example.com`
# Development
To run the MCP server locally, run the following command:
```bash
pnpm run dev
```
And replace `https://mcp.posthog.com/mcp` with `http://localhost:8787/mcp` in the MCP configuration.
## Project Structure
This repository is organized to support multiple language implementations:
- `typescript/` - TypeScript implementation of the MCP server & tools
- `schema/` - Shared schema files generated from TypeScript
### Development Commands
- `pnpm run dev` - Start development server
- `pnpm run schema:build:json` - Generate JSON schema for other language implementations
- `pnpm run lint && pnpm run format` - Format and lint code
### Adding New Tools
See the [tools documentation](typescript/src/tools/README.md) for a guide on adding new tools to the MCP server.
### Environment variables
- Create `.dev.vars` in the root
- Add Inkeep API key to enable `docs-search` tool (see `Inkeep API key - mcp`)
```bash
INKEEP_API_KEY="..."
```
### Configuring the Model Context Protocol Inspector
During development you can directly inspect the MCP tool call results using the [MCP Inspector](https://modelcontextprotocol.io/docs/tools/inspector).
You can run it using the following command:
```bash
npx @modelcontextprotocol/inspector npx -y mcp-remote@latest http://localhost:8787/mcp --header "\"Authorization: Bearer {INSERT_YOUR_PERSONAL_API_KEY_HERE}\""
```
Alternatively, you can use the following configuration in the MCP Inspector:
Use transport type `STDIO`.
**Command:**
```bash
npx
```
**Arguments:**
```bash
-y mcp-remote@latest http://localhost:8787/mcp --header "Authorization: Bearer {INSERT_YOUR_PERSONAL_API_KEY_HERE}"
```

View File

@@ -0,0 +1,29 @@
# PostHog AI SDK Integration Example
This example demonstrates how to use PostHog tools with the AI SDK using the `@posthog/agent-toolkit` package.
## Features
- Uses the `tool()` helper function from AI SDK for type-safe tool integration
- Automatically infers tool input types from Zod schemas
- Provides access to all PostHog MCP tools (feature flags, insights, dashboards, etc.)
## Setup
1. Install dependencies:
```bash
npm install
```
2. Copy the environment file and fill in your credentials:
```bash
cp .env.example .env
```
3. Run the example:
```bash
npm run dev
```

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,21 @@
{
"name": "posthog-ai-sdk-example",
"version": "1.0.0",
"description": "Example using PostHog tools with the AI SDK",
"type": "module",
"scripts": {
"dev": "tsx src/index.ts"
},
"dependencies": {
"@ai-sdk/openai": "^2.0.28",
"@posthog/agent-toolkit": "^0.2.1",
"ai": "^5.0.40",
"dotenv": "^16.4.7",
"zod": "^3.24.4"
},
"devDependencies": {
"@types/node": "^22.15.34",
"tsx": "^4.20.3",
"typescript": "^5.8.3"
}
}

View File

@@ -0,0 +1,51 @@
import { openai } from '@ai-sdk/openai'
import { PostHogAgentToolkit } from '@posthog/agent-toolkit/integrations/ai-sdk'
import { generateText, stepCountIs } from 'ai'
import 'dotenv/config'
async function analyzeProductUsage() {
const agentToolkit = new PostHogAgentToolkit({
posthogPersonalApiKey: process.env.POSTHOG_PERSONAL_API_KEY!,
posthogApiBaseUrl: process.env.POSTHOG_API_BASE_URL || 'https://us.posthog.com',
})
const result = await generateText({
model: openai('gpt-5-mini'),
tools: await agentToolkit.getTools(),
stopWhen: stepCountIs(30),
system: `You are a data analyst. Your task is to do a deep dive into what's happening in our product.`,
prompt: `Please analyze our product usage:
1. Get all available insights (limit 100) and pick the 5 most relevant ones
2. For each insight, query its data
3. Summarize the key findings in a brief report
Keep your response focused and data-driven.`,
})
// Show tool usage summary
const toolCalls = result.steps.flatMap((step) => step.toolCalls ?? [])
if (toolCalls.length > 0) {
const toolUsage = toolCalls.reduce(
(acc, call) => {
acc[call.toolName] = (acc[call.toolName] || 0) + 1
return acc
},
{} as Record<string, number>
)
for (const [tool, count] of Object.entries(toolUsage)) {
}
}
}
async function main() {
try {
await analyzeProductUsage()
} catch (error) {
console.error('Error:', error)
process.exit(1)
}
}
main().catch(console.error)

View File

@@ -0,0 +1,17 @@
{
"compilerOptions": {
"target": "ES2022",
"lib": ["ES2022"],
"module": "ESNext",
"moduleResolution": "bundler",
"allowSyntheticDefaultImports": true,
"strict": true,
"esModuleInterop": true,
"skipLibCheck": true,
"forceConsistentCasingInFileNames": true,
"resolveJsonModule": true,
"isolatedModules": true,
"noEmit": true
},
"include": ["src/**/*"]
}

View File

@@ -0,0 +1,5 @@
node_modules/
dist/
.env
.env.local
*.log

View File

@@ -0,0 +1,30 @@
# PostHog Langchain JS Integration Example
This example demonstrates how to use PostHog tools with Langchain JS using the `@posthog/agent-toolkit` package.
## Features
- Uses the `DynamicStructuredTool` class from Langchain for type-safe tool integration
- Automatically infers tool input types from Zod schemas
- Provides access to all PostHog MCP tools (feature flags, insights, dashboards, etc.)
- Works with any Langchain-compatible LLM
## Setup
1. Install dependencies:
```bash
npm install
```
2. Copy the environment file and fill in your credentials:
```bash
cp .env.example .env
```
3. Run the example:
```bash
npm run dev
```

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,22 @@
{
"name": "posthog-langchain-js-example",
"version": "1.0.0",
"description": "Example using PostHog tools with Langchain JS",
"type": "module",
"scripts": {
"dev": "tsx src/index.ts"
},
"dependencies": {
"@langchain/core": "^0.3.72",
"@langchain/openai": "^0.6.9",
"@posthog/agent-toolkit": "^0.2.1",
"dotenv": "^16.4.7",
"langchain": "^0.3.31",
"zod": "^3.24.4"
},
"devDependencies": {
"@types/node": "^22.15.34",
"tsx": "^4.20.3",
"typescript": "^5.8.3"
}
}

View File

@@ -0,0 +1,61 @@
import { ChatPromptTemplate, MessagesPlaceholder } from '@langchain/core/prompts'
import { ChatOpenAI } from '@langchain/openai'
import { PostHogAgentToolkit } from '@posthog/agent-toolkit/integrations/langchain'
import { AgentExecutor, createToolCallingAgent } from 'langchain/agents'
import 'dotenv/config'
async function analyzeProductUsage() {
const agentToolkit = new PostHogAgentToolkit({
posthogPersonalApiKey: process.env.POSTHOG_PERSONAL_API_KEY!,
posthogApiBaseUrl: process.env.POSTHOG_API_BASE_URL || 'https://us.posthog.com',
})
const tools = await agentToolkit.getTools()
const llm = new ChatOpenAI({
model: 'gpt-5-mini',
})
const prompt = ChatPromptTemplate.fromMessages([
[
'system',
"You are a data analyst. Your task is to do a deep dive into what's happening in our product. Be concise and data-driven in your responses.",
],
['human', '{input}'],
new MessagesPlaceholder('agent_scratchpad'),
])
const agent = createToolCallingAgent({
llm,
tools,
prompt,
})
const agentExecutor = new AgentExecutor({
agent,
tools,
verbose: false,
maxIterations: 5,
})
const result = await agentExecutor.invoke({
input: `Please analyze our product usage:
1. Get all available insights (limit 100) and pick the 5 most relevant ones
2. For each insight, query its data
3. Summarize the key findings in a brief report
Keep your response focused and data-driven.`,
})
}
async function main() {
try {
await analyzeProductUsage()
} catch (error) {
console.error('Error:', error)
process.exit(1)
}
}
main().catch(console.error)

View File

@@ -0,0 +1,19 @@
{
"compilerOptions": {
"target": "ES2022",
"module": "NodeNext",
"moduleResolution": "NodeNext",
"lib": ["ES2022"],
"outDir": "./dist",
"rootDir": "./src",
"strict": true,
"esModuleInterop": true,
"skipLibCheck": true,
"forceConsistentCasingInFileNames": true,
"resolveJsonModule": true,
"allowJs": true,
"types": ["node"]
},
"include": ["src/**/*"],
"exclude": ["node_modules", "dist"]
}

View File

@@ -0,0 +1,47 @@
# PostHog LangChain Python Integration Example
This example demonstrates how to use PostHog tools with LangChain using the `posthog_agent_toolkit` package, which provides a wrapper around the PostHog MCP (Model Context Protocol) server.
## Setup
1. Install dependencies:
```bash
pip install posthog-agent-toolkit
# Or if using uv:
uv sync
```
2. Copy the environment file and fill in your credentials:
```bash
cp .env.example .env
```
3. Update your `.env` file with:
- `POSTHOG_PERSONAL_API_KEY`: Your PostHog personal API key
- `OPENAI_API_KEY`: Your OpenAI API key
## Usage
Run the example:
```bash
python posthog_agent_example.py
# Or if using uv:
uv run python posthog_agent_example.py
```
The example will:
1. Connect to the PostHog MCP server using your personal API key
2. Load all available PostHog tools from the MCP server
3. Create a LangChain agent with access to PostHog data
4. Analyze product usage by:
- Getting available insights
- Querying data for the most relevant ones
- Summarizing key findings
## Available Tools
For a complete list of all available tools and their capabilities, see the [PostHog MCP documentation](https://posthog.com/docs/model-context-protocol).

View File

@@ -0,0 +1,103 @@
"""
PostHog LangChain Integration Example
This example demonstrates how to use PostHog tools with LangChain using
the local posthog_agent_toolkit package. It shows how to analyze product
usage data similar to the TypeScript example.
"""
import asyncio
import os
import sys
from dotenv import load_dotenv
from langchain_openai import ChatOpenAI
from langchain.agents import AgentExecutor, create_tool_calling_agent
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from posthog_agent_toolkit.integrations.langchain.toolkit import PostHogAgentToolkit
async def analyze_product_usage():
"""Analyze product usage using PostHog data."""
print("🚀 PostHog LangChain Agent - Product Usage Analysis\n")
# Initialize the PostHog toolkit with credentials
toolkit = PostHogAgentToolkit(
personal_api_key=os.getenv("POSTHOG_PERSONAL_API_KEY"),
url=os.getenv("POSTHOG_MCP_URL", "https://mcp.posthog.com/mcp"),
)
# Get the tools
tools = await toolkit.get_tools()
# Initialize the LLM
llm = ChatOpenAI(model="gpt-5-mini", temperature=0, api_key=os.getenv("OPENAI_API_KEY"))
# Create a system prompt for the agent
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You are a data analyst. Your task is to do a deep dive into what's happening in our product. "
"Be concise and data-driven in your responses.",
),
("human", "{input}"),
MessagesPlaceholder("agent_scratchpad"),
]
)
agent = create_tool_calling_agent(
llm=llm,
tools=tools,
prompt=prompt,
)
agent_executor = AgentExecutor(
agent=agent,
tools=tools,
verbose=False,
max_iterations=30,
)
# Invoke the agent with an analysis request
result = await agent_executor.ainvoke(
{
"input": """Please analyze our product usage:
1. Get all available insights (limit 100)
2. Pick the 5 MOST INTERESTING and VALUABLE insights - prioritize:
- User behavior and engagement metrics
- Conversion funnels
- Retention and growth metrics
- Product adoption insights
- Revenue or business KPIs
AVOID picking feature flag insights unless they show significant business impact
3. For each selected insight, query its data and explain why it's important
4. Summarize the key findings in a brief report with actionable recommendations
Focus on insights that tell a story about user behavior and business performance."""
}
)
print("\n📊 Analysis Complete!\n")
print("=" * 50)
print(result["output"])
print("=" * 50)
async def main():
"""Main function to run the product usage analysis."""
try:
# Load environment variables
load_dotenv()
# Run the analysis
await analyze_product_usage()
except Exception as error:
print(f"Error: {error}")
sys.exit(1)
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -0,0 +1,21 @@
[project]
name = "posthog-langchain-example"
version = "0.1.0"
description = "PostHog LangChain integration example"
readme = "README.md"
requires-python = ">=3.11"
dependencies = [
"langchain>=0.3.0",
"langchain-openai>=0.2.0",
"langchain-core>=0.3.0",
"python-dotenv>=1.0.0",
"posthog-agent-toolkit>=0.1.0",
]
[tool.ruff]
line-length = 120
target-version = "py311"
[tool.black]
line-length = 120
target-version = ['py311']

1218
products/mcp/examples/langchain/uv.lock generated Normal file

File diff suppressed because it is too large Load Diff

33
products/mcp/package.json Normal file
View File

@@ -0,0 +1,33 @@
{
"name": "posthog-mcp-monorepo",
"version": "0.0.0",
"private": true,
"scripts": {
"prepare": "husky",
"dev": "cd typescript && pnpm dev",
"build": "cd typescript && pnpm build",
"inspector": "cd typescript && pnpm inspector",
"schema:build:json": "tsx typescript/scripts/generate-tool-schema.ts",
"test": "cd typescript && pnpm test",
"test:integration": "cd typescript && pnpm test:integration",
"schema:build:python": "bash python/scripts/generate-pydantic-models.sh",
"schema:build": "pnpm run schema:build:json && pnpm run schema:build:python",
"format": "prettier --write . && oxlint --fix --fix-suggestions --quiet .",
"lint": "prettier --check . && oxlint .",
"format:python": "cd python && uv run ruff format .",
"lint:python": "cd python && uv run ruff check --fix .",
"test:python": "cd python && uv run pytest tests/ -v",
"typecheck": "cd typescript && pnpm typecheck",
"typecheck:python": "cd python && uvx ty check",
"docker:build": "docker build -t posthog-mcp .",
"docker:run": "docker run -i --rm --env POSTHOG_AUTH_HEADER=${POSTHOG_AUTH_HEADER} --env POSTHOG_REMOTE_MCP_URL=${POSTHOG_REMOTE_MCP_URL:-https://mcp.posthog.com/mcp} posthog-mcp",
"docker:inspector": "npx @modelcontextprotocol/inspector docker run -i --rm --env POSTHOG_AUTH_HEADER=${POSTHOG_AUTH_HEADER} --env POSTHOG_REMOTE_MCP_URL=${POSTHOG_REMOTE_MCP_URL:-https://mcp.posthog.com/mcp} posthog-mcp"
},
"devDependencies": {
"husky": "^9.1.7",
"oxlint": "^1.8.0",
"prettier": "^3.4.2",
"tsx": "^4.20.3"
},
"packageManager": "pnpm@9.15.5+sha256.8472168c3e1fd0bff287e694b053fccbbf20579a3ff9526b6333beab8df65a8d"
}

525
products/mcp/pnpm-lock.yaml generated Normal file
View File

@@ -0,0 +1,525 @@
lockfileVersion: '9.0'
settings:
autoInstallPeers: true
excludeLinksFromLockfile: false
importers:
.:
devDependencies:
'@biomejs/biome':
specifier: 1.9.4
version: 1.9.4
husky:
specifier: ^9.1.7
version: 9.1.7
tsx:
specifier: ^4.20.3
version: 4.20.3
packages:
'@biomejs/biome@1.9.4':
resolution:
{
integrity: sha512-1rkd7G70+o9KkTn5KLmDYXihGoTaIGO9PIIN2ZB7UJxFrWw04CZHPYiMRjYsaDvVV7hP1dYNRLxSANLaBFGpog==,
}
engines: { node: '>=14.21.3' }
hasBin: true
'@biomejs/cli-darwin-arm64@1.9.4':
resolution:
{
integrity: sha512-bFBsPWrNvkdKrNCYeAp+xo2HecOGPAy9WyNyB/jKnnedgzl4W4Hb9ZMzYNbf8dMCGmUdSavlYHiR01QaYR58cw==,
}
engines: { node: '>=14.21.3' }
cpu: [arm64]
os: [darwin]
'@biomejs/cli-darwin-x64@1.9.4':
resolution:
{
integrity: sha512-ngYBh/+bEedqkSevPVhLP4QfVPCpb+4BBe2p7Xs32dBgs7rh9nY2AIYUL6BgLw1JVXV8GlpKmb/hNiuIxfPfZg==,
}
engines: { node: '>=14.21.3' }
cpu: [x64]
os: [darwin]
'@biomejs/cli-linux-arm64-musl@1.9.4':
resolution:
{
integrity: sha512-v665Ct9WCRjGa8+kTr0CzApU0+XXtRgwmzIf1SeKSGAv+2scAlW6JR5PMFo6FzqqZ64Po79cKODKf3/AAmECqA==,
}
engines: { node: '>=14.21.3' }
cpu: [arm64]
os: [linux]
'@biomejs/cli-linux-arm64@1.9.4':
resolution:
{
integrity: sha512-fJIW0+LYujdjUgJJuwesP4EjIBl/N/TcOX3IvIHJQNsAqvV2CHIogsmA94BPG6jZATS4Hi+xv4SkBBQSt1N4/g==,
}
engines: { node: '>=14.21.3' }
cpu: [arm64]
os: [linux]
'@biomejs/cli-linux-x64-musl@1.9.4':
resolution:
{
integrity: sha512-gEhi/jSBhZ2m6wjV530Yy8+fNqG8PAinM3oV7CyO+6c3CEh16Eizm21uHVsyVBEB6RIM8JHIl6AGYCv6Q6Q9Tg==,
}
engines: { node: '>=14.21.3' }
cpu: [x64]
os: [linux]
'@biomejs/cli-linux-x64@1.9.4':
resolution:
{
integrity: sha512-lRCJv/Vi3Vlwmbd6K+oQ0KhLHMAysN8lXoCI7XeHlxaajk06u7G+UsFSO01NAs5iYuWKmVZjmiOzJ0OJmGsMwg==,
}
engines: { node: '>=14.21.3' }
cpu: [x64]
os: [linux]
'@biomejs/cli-win32-arm64@1.9.4':
resolution:
{
integrity: sha512-tlbhLk+WXZmgwoIKwHIHEBZUwxml7bRJgk0X2sPyNR3S93cdRq6XulAZRQJ17FYGGzWne0fgrXBKpl7l4M87Hg==,
}
engines: { node: '>=14.21.3' }
cpu: [arm64]
os: [win32]
'@biomejs/cli-win32-x64@1.9.4':
resolution:
{
integrity: sha512-8Y5wMhVIPaWe6jw2H+KlEm4wP/f7EW3810ZLmDlrEEy5KvBsb9ECEfu/kMWD484ijfQ8+nIi0giMgu9g1UAuuA==,
}
engines: { node: '>=14.21.3' }
cpu: [x64]
os: [win32]
'@esbuild/aix-ppc64@0.25.4':
resolution:
{
integrity: sha512-1VCICWypeQKhVbE9oW/sJaAmjLxhVqacdkvPLEjwlttjfwENRSClS8EjBz0KzRyFSCPDIkuXW34Je/vk7zdB7Q==,
}
engines: { node: '>=18' }
cpu: [ppc64]
os: [aix]
'@esbuild/android-arm64@0.25.4':
resolution:
{
integrity: sha512-bBy69pgfhMGtCnwpC/x5QhfxAz/cBgQ9enbtwjf6V9lnPI/hMyT9iWpR1arm0l3kttTr4L0KSLpKmLp/ilKS9A==,
}
engines: { node: '>=18' }
cpu: [arm64]
os: [android]
'@esbuild/android-arm@0.25.4':
resolution:
{
integrity: sha512-QNdQEps7DfFwE3hXiU4BZeOV68HHzYwGd0Nthhd3uCkkEKK7/R6MTgM0P7H7FAs5pU/DIWsviMmEGxEoxIZ+ZQ==,
}
engines: { node: '>=18' }
cpu: [arm]
os: [android]
'@esbuild/android-x64@0.25.4':
resolution:
{
integrity: sha512-TVhdVtQIFuVpIIR282btcGC2oGQoSfZfmBdTip2anCaVYcqWlZXGcdcKIUklfX2wj0JklNYgz39OBqh2cqXvcQ==,
}
engines: { node: '>=18' }
cpu: [x64]
os: [android]
'@esbuild/darwin-arm64@0.25.4':
resolution:
{
integrity: sha512-Y1giCfM4nlHDWEfSckMzeWNdQS31BQGs9/rouw6Ub91tkK79aIMTH3q9xHvzH8d0wDru5Ci0kWB8b3up/nl16g==,
}
engines: { node: '>=18' }
cpu: [arm64]
os: [darwin]
'@esbuild/darwin-x64@0.25.4':
resolution:
{
integrity: sha512-CJsry8ZGM5VFVeyUYB3cdKpd/H69PYez4eJh1W/t38vzutdjEjtP7hB6eLKBoOdxcAlCtEYHzQ/PJ/oU9I4u0A==,
}
engines: { node: '>=18' }
cpu: [x64]
os: [darwin]
'@esbuild/freebsd-arm64@0.25.4':
resolution:
{
integrity: sha512-yYq+39NlTRzU2XmoPW4l5Ifpl9fqSk0nAJYM/V/WUGPEFfek1epLHJIkTQM6bBs1swApjO5nWgvr843g6TjxuQ==,
}
engines: { node: '>=18' }
cpu: [arm64]
os: [freebsd]
'@esbuild/freebsd-x64@0.25.4':
resolution:
{
integrity: sha512-0FgvOJ6UUMflsHSPLzdfDnnBBVoCDtBTVyn/MrWloUNvq/5SFmh13l3dvgRPkDihRxb77Y17MbqbCAa2strMQQ==,
}
engines: { node: '>=18' }
cpu: [x64]
os: [freebsd]
'@esbuild/linux-arm64@0.25.4':
resolution:
{
integrity: sha512-+89UsQTfXdmjIvZS6nUnOOLoXnkUTB9hR5QAeLrQdzOSWZvNSAXAtcRDHWtqAUtAmv7ZM1WPOOeSxDzzzMogiQ==,
}
engines: { node: '>=18' }
cpu: [arm64]
os: [linux]
'@esbuild/linux-arm@0.25.4':
resolution:
{
integrity: sha512-kro4c0P85GMfFYqW4TWOpvmF8rFShbWGnrLqlzp4X1TNWjRY3JMYUfDCtOxPKOIY8B0WC8HN51hGP4I4hz4AaQ==,
}
engines: { node: '>=18' }
cpu: [arm]
os: [linux]
'@esbuild/linux-ia32@0.25.4':
resolution:
{
integrity: sha512-yTEjoapy8UP3rv8dB0ip3AfMpRbyhSN3+hY8mo/i4QXFeDxmiYbEKp3ZRjBKcOP862Ua4b1PDfwlvbuwY7hIGQ==,
}
engines: { node: '>=18' }
cpu: [ia32]
os: [linux]
'@esbuild/linux-loong64@0.25.4':
resolution:
{
integrity: sha512-NeqqYkrcGzFwi6CGRGNMOjWGGSYOpqwCjS9fvaUlX5s3zwOtn1qwg1s2iE2svBe4Q/YOG1q6875lcAoQK/F4VA==,
}
engines: { node: '>=18' }
cpu: [loong64]
os: [linux]
'@esbuild/linux-mips64el@0.25.4':
resolution:
{
integrity: sha512-IcvTlF9dtLrfL/M8WgNI/qJYBENP3ekgsHbYUIzEzq5XJzzVEV/fXY9WFPfEEXmu3ck2qJP8LG/p3Q8f7Zc2Xg==,
}
engines: { node: '>=18' }
cpu: [mips64el]
os: [linux]
'@esbuild/linux-ppc64@0.25.4':
resolution:
{
integrity: sha512-HOy0aLTJTVtoTeGZh4HSXaO6M95qu4k5lJcH4gxv56iaycfz1S8GO/5Jh6X4Y1YiI0h7cRyLi+HixMR+88swag==,
}
engines: { node: '>=18' }
cpu: [ppc64]
os: [linux]
'@esbuild/linux-riscv64@0.25.4':
resolution:
{
integrity: sha512-i8JUDAufpz9jOzo4yIShCTcXzS07vEgWzyX3NH2G7LEFVgrLEhjwL3ajFE4fZI3I4ZgiM7JH3GQ7ReObROvSUA==,
}
engines: { node: '>=18' }
cpu: [riscv64]
os: [linux]
'@esbuild/linux-s390x@0.25.4':
resolution:
{
integrity: sha512-jFnu+6UbLlzIjPQpWCNh5QtrcNfMLjgIavnwPQAfoGx4q17ocOU9MsQ2QVvFxwQoWpZT8DvTLooTvmOQXkO51g==,
}
engines: { node: '>=18' }
cpu: [s390x]
os: [linux]
'@esbuild/linux-x64@0.25.4':
resolution:
{
integrity: sha512-6e0cvXwzOnVWJHq+mskP8DNSrKBr1bULBvnFLpc1KY+d+irZSgZ02TGse5FsafKS5jg2e4pbvK6TPXaF/A6+CA==,
}
engines: { node: '>=18' }
cpu: [x64]
os: [linux]
'@esbuild/netbsd-arm64@0.25.4':
resolution:
{
integrity: sha512-vUnkBYxZW4hL/ie91hSqaSNjulOnYXE1VSLusnvHg2u3jewJBz3YzB9+oCw8DABeVqZGg94t9tyZFoHma8gWZQ==,
}
engines: { node: '>=18' }
cpu: [arm64]
os: [netbsd]
'@esbuild/netbsd-x64@0.25.4':
resolution:
{
integrity: sha512-XAg8pIQn5CzhOB8odIcAm42QsOfa98SBeKUdo4xa8OvX8LbMZqEtgeWE9P/Wxt7MlG2QqvjGths+nq48TrUiKw==,
}
engines: { node: '>=18' }
cpu: [x64]
os: [netbsd]
'@esbuild/openbsd-arm64@0.25.4':
resolution:
{
integrity: sha512-Ct2WcFEANlFDtp1nVAXSNBPDxyU+j7+tId//iHXU2f/lN5AmO4zLyhDcpR5Cz1r08mVxzt3Jpyt4PmXQ1O6+7A==,
}
engines: { node: '>=18' }
cpu: [arm64]
os: [openbsd]
'@esbuild/openbsd-x64@0.25.4':
resolution:
{
integrity: sha512-xAGGhyOQ9Otm1Xu8NT1ifGLnA6M3sJxZ6ixylb+vIUVzvvd6GOALpwQrYrtlPouMqd/vSbgehz6HaVk4+7Afhw==,
}
engines: { node: '>=18' }
cpu: [x64]
os: [openbsd]
'@esbuild/sunos-x64@0.25.4':
resolution:
{
integrity: sha512-Mw+tzy4pp6wZEK0+Lwr76pWLjrtjmJyUB23tHKqEDP74R3q95luY/bXqXZeYl4NYlvwOqoRKlInQialgCKy67Q==,
}
engines: { node: '>=18' }
cpu: [x64]
os: [sunos]
'@esbuild/win32-arm64@0.25.4':
resolution:
{
integrity: sha512-AVUP428VQTSddguz9dO9ngb+E5aScyg7nOeJDrF1HPYu555gmza3bDGMPhmVXL8svDSoqPCsCPjb265yG/kLKQ==,
}
engines: { node: '>=18' }
cpu: [arm64]
os: [win32]
'@esbuild/win32-ia32@0.25.4':
resolution:
{
integrity: sha512-i1sW+1i+oWvQzSgfRcxxG2k4I9n3O9NRqy8U+uugaT2Dy7kLO9Y7wI72haOahxceMX8hZAzgGou1FhndRldxRg==,
}
engines: { node: '>=18' }
cpu: [ia32]
os: [win32]
'@esbuild/win32-x64@0.25.4':
resolution:
{
integrity: sha512-nOT2vZNw6hJ+z43oP1SPea/G/6AbN6X+bGNhNuq8NtRHy4wsMhw765IKLNmnjek7GvjWBYQ8Q5VBoYTFg9y1UQ==,
}
engines: { node: '>=18' }
cpu: [x64]
os: [win32]
esbuild@0.25.4:
resolution:
{
integrity: sha512-8pgjLUcUjcgDg+2Q4NYXnPbo/vncAY4UmyaCm0jZevERqCHZIaWwdJHkf8XQtu4AxSKCdvrUbT0XUr1IdZzI8Q==,
}
engines: { node: '>=18' }
hasBin: true
fsevents@2.3.3:
resolution:
{
integrity: sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==,
}
engines: { node: ^8.16.0 || ^10.6.0 || >=11.0.0 }
os: [darwin]
get-tsconfig@4.10.1:
resolution:
{
integrity: sha512-auHyJ4AgMz7vgS8Hp3N6HXSmlMdUyhSUrfBF16w153rxtLIEOE+HGqaBppczZvnHLqQJfiHotCYpNhl0lUROFQ==,
}
husky@9.1.7:
resolution:
{
integrity: sha512-5gs5ytaNjBrh5Ow3zrvdUUY+0VxIuWVL4i9irt6friV+BqdCfmV11CQTWMiBYWHbXhco+J1kHfTOUkePhCDvMA==,
}
engines: { node: '>=18' }
hasBin: true
resolve-pkg-maps@1.0.0:
resolution:
{
integrity: sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw==,
}
tsx@4.20.3:
resolution:
{
integrity: sha512-qjbnuR9Tr+FJOMBqJCW5ehvIo/buZq7vH7qD7JziU98h6l3qGy0a/yPFjwO+y0/T7GFpNgNAvEcPPVfyT8rrPQ==,
}
engines: { node: '>=18.0.0' }
hasBin: true
snapshots:
'@biomejs/biome@1.9.4':
optionalDependencies:
'@biomejs/cli-darwin-arm64': 1.9.4
'@biomejs/cli-darwin-x64': 1.9.4
'@biomejs/cli-linux-arm64': 1.9.4
'@biomejs/cli-linux-arm64-musl': 1.9.4
'@biomejs/cli-linux-x64': 1.9.4
'@biomejs/cli-linux-x64-musl': 1.9.4
'@biomejs/cli-win32-arm64': 1.9.4
'@biomejs/cli-win32-x64': 1.9.4
'@biomejs/cli-darwin-arm64@1.9.4':
optional: true
'@biomejs/cli-darwin-x64@1.9.4':
optional: true
'@biomejs/cli-linux-arm64-musl@1.9.4':
optional: true
'@biomejs/cli-linux-arm64@1.9.4':
optional: true
'@biomejs/cli-linux-x64-musl@1.9.4':
optional: true
'@biomejs/cli-linux-x64@1.9.4':
optional: true
'@biomejs/cli-win32-arm64@1.9.4':
optional: true
'@biomejs/cli-win32-x64@1.9.4':
optional: true
'@esbuild/aix-ppc64@0.25.4':
optional: true
'@esbuild/android-arm64@0.25.4':
optional: true
'@esbuild/android-arm@0.25.4':
optional: true
'@esbuild/android-x64@0.25.4':
optional: true
'@esbuild/darwin-arm64@0.25.4':
optional: true
'@esbuild/darwin-x64@0.25.4':
optional: true
'@esbuild/freebsd-arm64@0.25.4':
optional: true
'@esbuild/freebsd-x64@0.25.4':
optional: true
'@esbuild/linux-arm64@0.25.4':
optional: true
'@esbuild/linux-arm@0.25.4':
optional: true
'@esbuild/linux-ia32@0.25.4':
optional: true
'@esbuild/linux-loong64@0.25.4':
optional: true
'@esbuild/linux-mips64el@0.25.4':
optional: true
'@esbuild/linux-ppc64@0.25.4':
optional: true
'@esbuild/linux-riscv64@0.25.4':
optional: true
'@esbuild/linux-s390x@0.25.4':
optional: true
'@esbuild/linux-x64@0.25.4':
optional: true
'@esbuild/netbsd-arm64@0.25.4':
optional: true
'@esbuild/netbsd-x64@0.25.4':
optional: true
'@esbuild/openbsd-arm64@0.25.4':
optional: true
'@esbuild/openbsd-x64@0.25.4':
optional: true
'@esbuild/sunos-x64@0.25.4':
optional: true
'@esbuild/win32-arm64@0.25.4':
optional: true
'@esbuild/win32-ia32@0.25.4':
optional: true
'@esbuild/win32-x64@0.25.4':
optional: true
esbuild@0.25.4:
optionalDependencies:
'@esbuild/aix-ppc64': 0.25.4
'@esbuild/android-arm': 0.25.4
'@esbuild/android-arm64': 0.25.4
'@esbuild/android-x64': 0.25.4
'@esbuild/darwin-arm64': 0.25.4
'@esbuild/darwin-x64': 0.25.4
'@esbuild/freebsd-arm64': 0.25.4
'@esbuild/freebsd-x64': 0.25.4
'@esbuild/linux-arm': 0.25.4
'@esbuild/linux-arm64': 0.25.4
'@esbuild/linux-ia32': 0.25.4
'@esbuild/linux-loong64': 0.25.4
'@esbuild/linux-mips64el': 0.25.4
'@esbuild/linux-ppc64': 0.25.4
'@esbuild/linux-riscv64': 0.25.4
'@esbuild/linux-s390x': 0.25.4
'@esbuild/linux-x64': 0.25.4
'@esbuild/netbsd-arm64': 0.25.4
'@esbuild/netbsd-x64': 0.25.4
'@esbuild/openbsd-arm64': 0.25.4
'@esbuild/openbsd-x64': 0.25.4
'@esbuild/sunos-x64': 0.25.4
'@esbuild/win32-arm64': 0.25.4
'@esbuild/win32-ia32': 0.25.4
'@esbuild/win32-x64': 0.25.4
fsevents@2.3.3:
optional: true
get-tsconfig@4.10.1:
dependencies:
resolve-pkg-maps: 1.0.0
husky@9.1.7: {}
resolve-pkg-maps@1.0.0: {}
tsx@4.20.3:
dependencies:
esbuild: 0.25.4
get-tsconfig: 4.10.1
optionalDependencies:
fsevents: 2.3.3

View File

@@ -0,0 +1,57 @@
# posthog-agent-toolkit
Tools to give agents access to your PostHog data, manage feature flags, create insights, and more.
This is a Python wrapper around the PostHog MCP (Model Context Protocol) server, providing easy integration with AI frameworks like LangChain.
## Installation
```bash
pip install posthog-agent-toolkit
```
## Quick Start
The toolkit provides integrations for popular AI frameworks:
### Using with LangChain
```python
from langchain_openai import ChatOpenAI
from langchain.agents import AgentExecutor, create_tool_calling_agent
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from posthog_agent_toolkit.integrations.langchain.toolkit import PostHogAgentToolkit
# Initialize the PostHog toolkit
toolkit = PostHogAgentToolkit(
personal_api_key="your_posthog_personal_api_key",
url="https://mcp.posthog.com/mcp" # or your own, if you are self hosting the MCP server
)
# Get the tools
tools = await toolkit.get_tools()
# Initialize the LLM
llm = ChatOpenAI(model="gpt-5-mini")
# Create a prompt
prompt = ChatPromptTemplate.from_messages([
("system", "You are a data analyst with access to PostHog analytics"),
("human", "{input}"),
MessagesPlaceholder("agent_scratchpad"),
])
# Create and run the agent
agent = create_tool_calling_agent(llm=llm, tools=tools, prompt=prompt)
executor = AgentExecutor(agent=agent, tools=tools)
result = await executor.ainvoke({
"input": "Analyze our product usage by getting the top 5 most interesting insights and summarising the data from them."
})
```
**[→ See full LangChain example](https://github.com/posthog/mcp/tree/main/examples/langchain)**
## Available Tools
For a list of all available tools, please see the [docs](https://posthog.com/docs/model-context-protocol).

View File

@@ -0,0 +1,3 @@
"""PostHog Agent Toolkit for Python."""
__version__ = "0.1.0"

View File

@@ -0,0 +1 @@
"""PostHog Agent Toolkit Integrations."""

View File

@@ -0,0 +1,5 @@
"""PostHog LangChain Integration."""
from .toolkit import PostHogAgentToolkit
__all__ = ["PostHogAgentToolkit"]

View File

@@ -0,0 +1,61 @@
"""PostHog Agent Toolkit for LangChain using MCP."""
from typing import Any
from langchain_core.tools import BaseTool
from langchain_mcp_adapters.client import MultiServerMCPClient
class PostHogAgentToolkit:
"""
A toolkit for interacting with PostHog tools via the MCP server.
"""
_tools: list[BaseTool] | None
client: MultiServerMCPClient
def __init__(
self,
url: str = "https://mcp.posthog.com/mcp",
personal_api_key: str | None = None,
):
"""
Initialize the PostHog Agent Toolkit.
Args:
url: The URL of the PostHog MCP server (default: https://mcp.posthog.com/mcp/)
personal_api_key: PostHog API key for authentication
"""
if not personal_api_key:
raise ValueError("A personal API key is required.")
config = self._get_config(url, personal_api_key)
self.client = MultiServerMCPClient(config)
self._tools: list[BaseTool] | None = None
@staticmethod
def _get_config(url: str, personal_api_key: str) -> dict[str, dict[str, Any]]:
return {
"posthog": {
"url": url,
"transport": "streamable_http",
"headers": {
"Authorization": f"Bearer {personal_api_key}",
"X-Client-Package": "posthog-agent-toolkit",
},
}
}
async def get_tools(self) -> list[BaseTool]:
"""
Get all available PostHog tools as LangChain compatible tools.
Returns:
List of BaseTool instances that can be used with LangChain agents
"""
if self._tools is None:
self._tools = await self.client.get_tools()
return self._tools

View File

@@ -0,0 +1,73 @@
[project]
name = "posthog-agent-toolkit"
version = "0.1.2"
description = "PostHog Agent Toolkit for LangChain and other AI frameworks"
readme = "README.md"
requires-python = ">=3.11"
authors = [
{ name = "PostHog", email = "hey@posthog.com" },
]
dependencies = [
"pydantic>=2.5.0",
"httpx>=0.25.0",
"typing-extensions>=4.8.0",
"python-dateutil>=2.8.2",
"python-dotenv>=1.0.0",
"langchain-mcp-adapters>=0.1.0",
"langchain-core>=0.1.0",
]
[dependency-groups]
dev = [
"datamodel-code-generator[http]>=0.25.0",
"ruff>=0.1.0",
"pytest>=7.0.0",
"pytest-asyncio>=0.21.0",
"build>=1.3.0",
"twine>=6.2.0",
]
[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"
[tool.hatch.build.targets.wheel]
packages = ["posthog_agent_toolkit"]
[tool.hatch.build.targets.sdist]
exclude = [
".venv/",
"dist/",
"*.egg-info/",
]
[tool.ruff]
target-version = "py311"
line-length = 160
indent-width = 4
exclude = [
"schema/tool_inputs.py", # Auto-generated file
]
[tool.ruff.lint]
select = [
"E", # pycodestyle errors
"W", # pycodestyle warnings
"F", # pyflakes
"I", # isort
"B", # flake8-bugbear
"C4", # flake8-comprehensions
"UP", # pyupgrade
]
ignore = [
"E501", # line too long
]
[tool.ruff.format]
quote-style = "double"
indent-style = "space"
skip-magic-trailing-comma = false
line-ending = "auto"
[tool.uv]
dev-dependencies = []

View File

@@ -0,0 +1,10 @@
[tool:pytest]
asyncio_mode = auto
testpaths = tests
python_files = test_*.py *_test.py
python_classes = Test*
python_functions = test_*
addopts = -v --tb=short
env =
PYTHONPATH = .
filterwarnings = ignore::DeprecationWarning

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,78 @@
#!/usr/bin/env bash
set -e
# Get the directory of this script
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
PYTHON_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
# Input and output paths
INPUT_PATH="$PROJECT_ROOT/schema/tool-inputs.json"
OUTPUT_PATH="$PYTHON_ROOT/schema/tool_inputs.py"
# Ensure output directory exists
mkdir -p "$(dirname "$OUTPUT_PATH")"
# Check if input file exists
if [ ! -f "$INPUT_PATH" ]; then
echo "❌ Error: JSON schema not found at $INPUT_PATH"
echo "Please run 'pnpm run schema:build:json' first to generate the JSON schema"
exit 1
fi
echo "🔧 Generating Pydantic models from $INPUT_PATH"
# Ensure uv environment is synced with dev dependencies
echo "🐍 Setting up uv environment..."
cd "$PYTHON_ROOT"
uv sync --dev
# Generate schema.py from schema.json
uv run datamodel-codegen \
--class-name='ToolInputs' \
--collapse-root-models \
--target-python-version 3.11 \
--disable-timestamp \
--use-one-literal-as-default \
--use-default \
--use-default-kwarg \
--use-subclass-enum \
--input "$INPUT_PATH" \
--input-file-type jsonschema \
--output "$OUTPUT_PATH" \
--output-model-type pydantic_v2.BaseModel \
--custom-file-header "# mypy: disable-error-code=\"assignment\"" \
--set-default-enum-member \
--capitalise-enum-members \
--wrap-string-literal \
--use-field-description \
--use-schema-description \
--field-constraints \
--use-annotated
echo "✅ Generated Pydantic models at $OUTPUT_PATH"
# Format with ruff
echo "📝 Formatting with ruff..."
uv run ruff format "$OUTPUT_PATH"
# Check and autofix with ruff
echo "🔍 Checking with ruff..."
uv run ruff check --fix "$OUTPUT_PATH"
# Replace class Foo(str, Enum) with class Foo(StrEnum) for proper handling in format strings in python 3.11
# Remove this when https://github.com/koxudaxi/datamodel-code-generator/issues/1313 is resolved
echo "🔄 Updating enum imports for Python 3.11+..."
if sed --version 2>&1 | grep -q GNU; then
# GNU sed
sed -i -e 's/str, Enum/StrEnum/g' "$OUTPUT_PATH"
sed -i 's/from enum import Enum/from enum import Enum, StrEnum/g' "$OUTPUT_PATH"
else
# BSD/macOS sed
sed -i '' -e 's/str, Enum/StrEnum/g' "$OUTPUT_PATH"
sed -i '' 's/from enum import Enum/from enum import Enum, StrEnum/g' "$OUTPUT_PATH"
fi
echo "🎉 Successfully generated Pydantic models!"
echo "📋 Output file: $OUTPUT_PATH"

1584
products/mcp/python/uv.lock generated Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,618 @@
{
"add-insight-to-dashboard": {
"description": "Add an existing insight to a dashboard. Requires insight ID and dashboard ID. Optionally supports layout and color customization.",
"category": "Dashboards",
"feature": "dashboards",
"summary": "Add an existing insight to a dashboard.",
"title": "Add insight to dashboard",
"required_scopes": ["dashboard:write"],
"annotations": {
"destructiveHint": false,
"idempotentHint": true,
"openWorldHint": true,
"readOnlyHint": false
}
},
"dashboard-create": {
"description": "Create a new dashboard in the project. Requires name and optional description, tags, and other properties.",
"category": "Dashboards",
"feature": "dashboards",
"summary": "Create a new dashboard in the project.",
"title": "Create dashboard",
"required_scopes": ["dashboard:write"],
"annotations": {
"destructiveHint": false,
"idempotentHint": false,
"openWorldHint": true,
"readOnlyHint": false
}
},
"dashboard-delete": {
"description": "Delete a dashboard by ID (soft delete - marks as deleted).",
"category": "Dashboards",
"feature": "dashboards",
"summary": "Delete a dashboard by ID.",
"title": "Delete dashboard",
"required_scopes": ["dashboard:write"],
"annotations": {
"destructiveHint": true,
"idempotentHint": true,
"openWorldHint": true,
"readOnlyHint": false
}
},
"dashboard-get": {
"description": "Get a specific dashboard by ID. The response will include insights / tiles that are on the dashboard.",
"category": "Dashboards",
"feature": "dashboards",
"summary": "Get a specific dashboard by ID, including insights that are on the dashboard.",
"title": "Get dashboard",
"required_scopes": ["dashboard:read"],
"annotations": {
"destructiveHint": false,
"idempotentHint": true,
"openWorldHint": true,
"readOnlyHint": true
}
},
"dashboards-get-all": {
"description": "Get all dashboards in the project with optional filtering. Can filter by pinned status, search term, or pagination.",
"category": "Dashboards",
"feature": "dashboards",
"summary": "Get all dashboards in the project with optional filtering.",
"title": "Get all dashboards",
"required_scopes": ["dashboard:read"],
"annotations": {
"destructiveHint": false,
"idempotentHint": true,
"openWorldHint": true,
"readOnlyHint": true
}
},
"dashboard-update": {
"description": "Update an existing dashboard by ID. Can update name, description, pinned status or tags.",
"category": "Dashboards",
"feature": "dashboards",
"summary": "Update an existing dashboard by ID.",
"title": "Update dashboard",
"required_scopes": ["dashboard:write"],
"annotations": {
"destructiveHint": false,
"idempotentHint": true,
"openWorldHint": true,
"readOnlyHint": false
}
},
"docs-search": {
"description": "Use this tool to search the PostHog documentation for information that can help the user with their request. Use it as a fallback when you cannot answer the user's request using other tools in this MCP. Only use this tool for PostHog related questions.",
"category": "Documentation",
"feature": "docs",
"summary": "Search the PostHog documentation for information.",
"title": "Search docs",
"required_scopes": [],
"annotations": {
"destructiveHint": false,
"idempotentHint": true,
"openWorldHint": true,
"readOnlyHint": true
}
},
"error-details": {
"description": "Use this tool to get the details of an error in the project.",
"category": "Error tracking",
"feature": "error-tracking",
"summary": "Get the details of an error in the project.",
"title": "Get error details",
"required_scopes": ["error_tracking:read"],
"annotations": {
"destructiveHint": false,
"idempotentHint": true,
"openWorldHint": true,
"readOnlyHint": true
}
},
"list-errors": {
"description": "Use this tool to list errors in the project.",
"category": "Error tracking",
"feature": "error-tracking",
"summary": "List errors in the project.",
"title": "List errors",
"required_scopes": ["error_tracking:read"],
"annotations": {
"destructiveHint": false,
"idempotentHint": true,
"openWorldHint": true,
"readOnlyHint": true
}
},
"create-feature-flag": {
"description": "Creates a new feature flag in the project. Once you have created a feature flag, you should: Ask the user if they want to add it to their codebase, Use the \"search-docs\" tool to find documentation on how to add feature flags to the codebase (search for the right language / framework), Clarify where it should be added and then add it.",
"category": "Feature flags",
"feature": "flags",
"summary": "Creates a new feature flag in the project.",
"title": "Create feature flag",
"required_scopes": ["feature_flag:write"],
"annotations": {
"destructiveHint": false,
"idempotentHint": false,
"openWorldHint": true,
"readOnlyHint": false
}
},
"delete-feature-flag": {
"description": "Delete a feature flag in the project.",
"category": "Feature flags",
"feature": "flags",
"summary": "Delete a feature flag in the project.",
"title": "Delete feature flag",
"required_scopes": ["feature_flag:write"],
"annotations": {
"destructiveHint": true,
"idempotentHint": true,
"openWorldHint": true,
"readOnlyHint": false
}
},
"feature-flag-get-all": {
"description": "Get all feature flags in the project.",
"category": "Feature flags",
"feature": "flags",
"summary": "Get all feature flags in the project.",
"title": "Get all feature flags",
"required_scopes": ["feature_flag:read"],
"annotations": {
"destructiveHint": false,
"idempotentHint": true,
"openWorldHint": true,
"readOnlyHint": true
}
},
"feature-flag-get-definition": {
"description": "Get the definition of a feature flag. You can provide either the flagId or the flagKey. If you provide both, the flagId will be used.",
"category": "Feature flags",
"feature": "flags",
"summary": "Get the definition of a feature flag.",
"title": "Get feature flag definition",
"required_scopes": ["feature_flag:read"],
"annotations": {
"destructiveHint": false,
"idempotentHint": true,
"openWorldHint": true,
"readOnlyHint": true
}
},
"update-feature-flag": {
"description": "Update a new feature flag in the project. To enable a feature flag, you should make sure it is active and the rollout percentage is set to 100 for the group you want to target. To disable a feature flag, you should make sure it is inactive, you can keep the rollout percentage as it is.",
"category": "Feature flags",
"feature": "flags",
"summary": "Update a feature flag in the project.",
"title": "Update feature flag",
"required_scopes": ["feature_flag:write"],
"annotations": {
"destructiveHint": false,
"idempotentHint": true,
"openWorldHint": true,
"readOnlyHint": false
}
},
"experiment-get-all": {
"description": "Get all experiments in the project.",
"category": "Experiments",
"feature": "experiments",
"summary": "Get all experiments in the project.",
"title": "Get all experiments",
"required_scopes": ["experiment:read"],
"annotations": {
"destructiveHint": false,
"idempotentHint": true,
"openWorldHint": true,
"readOnlyHint": true
}
},
"experiment-create": {
"description": "Create a comprehensive A/B test experiment. PROCESS: 1) Understand experiment goal and hypothesis 2) Search existing feature flags with 'feature-flags-get-all' tool first and suggest reuse or new key 3) Help user define success metrics by asking what they want to optimize 4) MOST IMPORTANT: Use 'event-definitions-list' tool to find available events in their project 5) For funnel metrics, ask for specific event sequence (e.g., ['product_view', 'add_to_cart', 'purchase']) and use funnel_steps parameter 6) Configure variants (default 50/50 control/test unless they specify otherwise) 7) Set targeting criteria if needed.",
"category": "Experiments",
"feature": "experiments",
"summary": "Create A/B test experiment with guided metric and feature flag setup",
"title": "Create experiment",
"required_scopes": ["experiment:write"],
"annotations": {
"destructiveHint": false,
"idempotentHint": false,
"openWorldHint": true,
"readOnlyHint": false
}
},
"experiment-delete": {
"description": "Delete an experiment by ID.",
"category": "Experiments",
"feature": "experiments",
"summary": "Delete an experiment by ID.",
"title": "Delete experiment",
"required_scopes": ["experiment:write"],
"annotations": {
"destructiveHint": true,
"idempotentHint": true,
"openWorldHint": true,
"readOnlyHint": false
}
},
"experiment-update": {
"description": "Update an existing experiment by ID. Can update name, description, lifecycle state, variants, metrics, and other properties. RESTART WORKFLOW: To restart a concluded experiment, set end_date=null, conclusion=null, conclusion_comment=null, and optionally set a new start_date. To make it draft again, also set start_date=null. COMMON PATTERNS: Launch draft (set start_date), stop running (set end_date + conclusion), archive (set archived=true), modify variants (update parameters.feature_flag_variants). NOTE: feature_flag_key cannot be changed after creation.",
"category": "Experiments",
"feature": "experiments",
"summary": "Update an existing experiment with lifecycle management and restart capability.",
"title": "Update experiment",
"required_scopes": ["experiment:write"],
"annotations": {
"destructiveHint": false,
"idempotentHint": true,
"openWorldHint": true,
"readOnlyHint": false
}
},
"experiment-get": {
"description": "Get details of a specific experiment by ID.",
"category": "Experiments",
"feature": "experiments",
"summary": "Get details of a specific experiment.",
"title": "Get experiment details",
"required_scopes": ["experiment:read"],
"annotations": {
"destructiveHint": false,
"idempotentHint": true,
"openWorldHint": true,
"readOnlyHint": true
}
},
"experiment-results-get": {
"description": "Get comprehensive experiment results including all metrics data (primary and secondary) and exposure data. This tool fetches the experiment details and executes the necessary queries to get complete experiment results. Only works with new experiments (not legacy experiments).",
"category": "Experiments",
"feature": "experiments",
"summary": "Get comprehensive experiment results including metrics and exposure data.",
"title": "Get experiment results",
"required_scopes": ["experiment:read"],
"annotations": {
"destructiveHint": false,
"idempotentHint": true,
"openWorldHint": true,
"readOnlyHint": true
}
},
"insight-create-from-query": {
"description": "Create an insight from a query that you have previously tested with 'query-run'. You should check the query runs, before creating an insight. Do not create an insight before running the query, unless you know already that it is correct (e.g. you are making a minor modification to an existing query you have seen).",
"category": "Insights & analytics",
"feature": "insights",
"summary": "Save a query as an insight.",
"title": "Create insight from query",
"required_scopes": ["insight:write"],
"annotations": {
"destructiveHint": false,
"idempotentHint": false,
"openWorldHint": true,
"readOnlyHint": false
}
},
"insight-delete": {
"description": "Delete an insight by ID (soft delete - marks as deleted).",
"category": "Insights & analytics",
"feature": "insights",
"summary": "Delete an insight by ID.",
"title": "Delete insight",
"required_scopes": ["insight:write"],
"annotations": {
"destructiveHint": true,
"idempotentHint": true,
"openWorldHint": true,
"readOnlyHint": false
}
},
"insight-get": {
"description": "Get a specific insight by ID.",
"category": "Insights & analytics",
"feature": "insights",
"summary": "Get a specific insight by ID.",
"title": "Get insight",
"required_scopes": ["insight:read"],
"annotations": {
"destructiveHint": false,
"idempotentHint": true,
"openWorldHint": true,
"readOnlyHint": true
}
},
"insight-query": {
"description": "Execute a query on an existing insight to get its results/data. Provide the insight ID to retrieve the current query results.",
"category": "Insights & analytics",
"feature": "insights",
"summary": "Execute a query on an existing insight to get its results/data.",
"title": "Query insight",
"required_scopes": ["query:read"],
"annotations": {
"destructiveHint": false,
"idempotentHint": true,
"openWorldHint": true,
"readOnlyHint": true
}
},
"insights-get-all": {
"description": "Get all insights in the project with optional filtering. Can filter by saved status, favorited status, or search term.",
"category": "Insights & analytics",
"feature": "insights",
"summary": "Get all insights in the project with optional filtering.",
"title": "Get all insights",
"required_scopes": ["insight:read"],
"annotations": {
"destructiveHint": false,
"idempotentHint": true,
"openWorldHint": true,
"readOnlyHint": true
}
},
"insight-update": {
"description": "Update an existing insight by ID. Can update name, description, filters, and other properties. You should get the insight before update it to see it's current query structure, and only modify the parts needed to answer the user's request.",
"category": "Insights & analytics",
"feature": "insights",
"summary": "Update an existing insight by ID.",
"title": "Update insight",
"required_scopes": ["insight:write"],
"annotations": {
"destructiveHint": false,
"idempotentHint": true,
"openWorldHint": true,
"readOnlyHint": false
}
},
"query-run": {
"description": "You should use this to answer questions that a user has about their data and for when you want to create a new insight. You can use 'event-definitions-list' to get events to use in the query, and 'event-properties-list' to get properties for those events. It can run a trend, funnel or HogQL query. Where possible, use a trend or funnel rather than a HogQL query, unless you know the HogQL is correct (e.g. it came from a previous insight.).",
"category": "Insights & analytics",
"summary": "Run a trend, funnel or HogQL query.",
"feature": "insights",
"title": "Run query",
"required_scopes": ["query:read"],
"annotations": {
"destructiveHint": false,
"idempotentHint": true,
"openWorldHint": true,
"readOnlyHint": true
}
},
"query-generate-hogql-from-question": {
"description": "This is a slow tool, and you should only use it once you have tried to create a query using the 'query-run' tool, or the query is too complicated to create a trend / funnel. Queries project's PostHog data based on a provided natural language question - don't provide SQL query as input but describe the output you want. When giving the results back to the user, first show the SQL query that was used, then provide results in reasily readable format. You should also offer to save the query as an insight if the user wants to.",
"category": "Insights & analytics",
"summary": "Queries project's PostHog data based on a provided natural language question.",
"feature": "insights",
"title": "Generate SQL",
"required_scopes": ["query:read"],
"annotations": {
"destructiveHint": false,
"idempotentHint": false,
"openWorldHint": true,
"readOnlyHint": true
}
},
"get-llm-total-costs-for-project": {
"description": "Fetches the total LLM daily costs for each model for a project over a given number of days. If no number of days is provided, it defaults to 7. The results are sorted by model name. The total cost is rounded to 4 decimal places. The query is executed against the project's data warehouse. Show the results as a Markdown formatted table with the following information for each model: Model name, Total cost in USD, Each day's date, Each day's cost in USD. Write in bold the model name with the highest total cost. Properly render the markdown table in the response.",
"category": "LLM analytics",
"feature": "llm-analytics",
"summary": "Fetches the total LLM daily costs for each model for a project over a given number of days.",
"title": "Get LLM costs",
"required_scopes": ["warehouse_table:read"],
"annotations": {
"destructiveHint": false,
"idempotentHint": true,
"openWorldHint": true,
"readOnlyHint": true
}
},
"organization-details-get": {
"description": "Get the details of the active organization.",
"category": "Organization & project management",
"feature": "workspace",
"summary": "Get the details of the active organization.",
"title": "Get organization details",
"required_scopes": ["organization:read"],
"annotations": {
"destructiveHint": false,
"idempotentHint": true,
"openWorldHint": true,
"readOnlyHint": true
}
},
"organizations-get": {
"description": "Get the organizations the user has access to.",
"category": "Organization & project management",
"feature": "workspace",
"summary": "Get the organizations the user has access to.",
"title": "Get organizations",
"required_scopes": ["user:read"],
"annotations": {
"destructiveHint": false,
"idempotentHint": true,
"openWorldHint": true,
"readOnlyHint": true
}
},
"switch-organization": {
"description": "Change the active organization from the default organization. You should only use this tool if the user asks you to change the organization - otherwise, the default organization will be used.",
"category": "Organization & project management",
"feature": "workspace",
"summary": "Change the active organization from the default organization.",
"title": "Switch active organization",
"required_scopes": ["organization:read"],
"annotations": {
"destructiveHint": false,
"idempotentHint": true,
"openWorldHint": true,
"readOnlyHint": false
}
},
"projects-get": {
"description": "Fetches projects that the user has access to in the current organization.",
"category": "Organization & project management",
"feature": "workspace",
"summary": "Fetches projects that the user has access to in the current organization.",
"title": "Get projects",
"required_scopes": ["organization:read"],
"annotations": {
"destructiveHint": false,
"idempotentHint": true,
"openWorldHint": true,
"readOnlyHint": true
}
},
"event-definitions-list": {
"description": "List all event definitions in the project with optional filtering. Can filter by search term.",
"category": "Events & properties",
"feature": "events",
"summary": "List all event definitions in the project with optional filtering.",
"title": "List all events",
"required_scopes": ["event_definition:read"],
"annotations": {
"destructiveHint": false,
"idempotentHint": true,
"openWorldHint": true,
"readOnlyHint": true
}
},
"properties-list": {
"description": "List properties for events or persons. If fetching event properties, you must provide an event name.",
"category": "Events & properties",
"feature": "events",
"summary": "Get properties for events or persons.",
"title": "Get properties",
"required_scopes": ["property_definition:read"],
"annotations": {
"destructiveHint": false,
"idempotentHint": true,
"openWorldHint": true,
"readOnlyHint": true
}
},
"property-definitions": {
"description": "Get event and property definitions for the project.",
"category": "Organization & project management",
"feature": "workspace",
"summary": "Get event and property definitions for the project.",
"title": "Get property definitions",
"required_scopes": ["property_definition:read"],
"annotations": {
"destructiveHint": false,
"idempotentHint": true,
"openWorldHint": true,
"readOnlyHint": true
}
},
"switch-project": {
"description": "Change the active project from the default project. You should only use this tool if the user asks you to change the project - otherwise, the default project will be used.",
"category": "Organization & project management",
"feature": "workspace",
"summary": "Change the active project from the default project.",
"title": "Switch active project",
"required_scopes": ["project:read"],
"annotations": {
"destructiveHint": false,
"idempotentHint": true,
"openWorldHint": true,
"readOnlyHint": false
}
},
"survey-create": {
"description": "Creates a new survey in the project. Surveys can be popover or API-based and support various question types including open-ended, multiple choice, rating, and link questions. Once created, you should ask the user if they want to add the survey to their application code.",
"category": "Surveys",
"summary": "Creates a new survey in the project.",
"required_scopes": ["survey:write"],
"feature": "surveys",
"title": "Create survey",
"annotations": {
"destructiveHint": false,
"idempotentHint": false,
"openWorldHint": true,
"readOnlyHint": false
}
},
"survey-get": {
"description": "Get a specific survey by ID. Returns the survey configuration including questions, targeting, and scheduling details.",
"category": "Surveys",
"summary": "Get a specific survey by ID.",
"required_scopes": ["survey:read"],
"feature": "surveys",
"title": "Get survey",
"annotations": {
"destructiveHint": false,
"idempotentHint": true,
"openWorldHint": true,
"readOnlyHint": true
}
},
"surveys-get-all": {
"description": "Get all surveys in the project with optional filtering. Can filter by search term or use pagination.",
"category": "Surveys",
"summary": "Get all surveys in the project with optional filtering.",
"required_scopes": ["survey:read"],
"feature": "surveys",
"title": "Get all surveys",
"annotations": {
"destructiveHint": false,
"idempotentHint": true,
"openWorldHint": true,
"readOnlyHint": true
}
},
"survey-update": {
"description": "Update an existing survey by ID. Can update name, description, questions, scheduling, and other survey properties.",
"category": "Surveys",
"summary": "Update an existing survey by ID.",
"required_scopes": ["survey:write"],
"feature": "surveys",
"title": "Update survey",
"annotations": {
"destructiveHint": false,
"idempotentHint": true,
"openWorldHint": true,
"readOnlyHint": false
}
},
"survey-delete": {
"description": "Delete a survey by ID (soft delete - marks as archived).",
"category": "Surveys",
"summary": "Delete a survey by ID.",
"required_scopes": ["survey:write"],
"feature": "surveys",
"title": "Delete survey",
"annotations": {
"destructiveHint": true,
"idempotentHint": true,
"openWorldHint": true,
"readOnlyHint": false
}
},
"surveys-global-stats": {
"description": "Get aggregated response statistics across all surveys in the project. Includes event counts (shown, dismissed, sent), unique respondents, conversion rates, and timing data. Supports optional date filtering.",
"category": "Surveys",
"summary": "Get aggregated response statistics across all surveys.",
"required_scopes": ["survey:read"],
"feature": "surveys",
"title": "Get all survey response stats",
"annotations": {
"destructiveHint": false,
"idempotentHint": true,
"openWorldHint": true,
"readOnlyHint": true
}
},
"survey-stats": {
"description": "Get response statistics for a specific survey. Includes detailed event counts (shown, dismissed, sent), unique respondents, conversion rates, and timing data. Supports optional date filtering.",
"category": "Surveys",
"summary": "Get response statistics for a specific survey.",
"required_scopes": ["survey:read"],
"feature": "surveys",
"title": "Get survey response stats",
"annotations": {
"destructiveHint": false,
"idempotentHint": true,
"openWorldHint": true,
"readOnlyHint": true
}
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,72 @@
# @posthog/agent-toolkit
Tools to give agents access to your PostHog data, manage feature flags, create insights, and more.
## Installation
```bash
npm install @posthog/agent-toolkit
```
## Quick Start
The toolkit provides integrations for popular AI frameworks:
### Using with Vercel AI SDK
```typescript
import { openai } from '@ai-sdk/openai'
import { generateText } from 'ai'
import { PostHogAgentToolkit } from '@posthog/agent-toolkit/integrations/ai-sdk'
const toolkit = new PostHogAgentToolkit({
posthogPersonalApiKey: process.env.POSTHOG_PERSONAL_API_KEY!,
posthogApiBaseUrl: 'https://us.posthog.com', // or https://eu.posthog.com if you are hosting in the EU
})
const result = await generateText({
model: openai('gpt-4'),
tools: await toolkit.getTools(),
prompt: 'Analyze our product usage by getting the top 5 most interesting insights and summarising the data from them.',
})
```
**[→ See full Vercel AI SDK example](https://github.com/posthog/mcp/tree/main/examples/ai-sdk)**
### Using with LangChain.js
```typescript
import { ChatPromptTemplate, MessagesPlaceholder } from '@langchain/core/prompts'
import { ChatOpenAI } from '@langchain/openai'
import { AgentExecutor, createToolCallingAgent } from 'langchain/agents'
import { PostHogAgentToolkit } from '@posthog/agent-toolkit/integrations/langchain'
const toolkit = new PostHogAgentToolkit({
posthogPersonalApiKey: process.env.POSTHOG_PERSONAL_API_KEY!,
posthogApiBaseUrl: 'https://us.posthog.com', // or https://eu.posthog.com if you are hosting in the EU
})
const tools = await toolkit.getTools()
const llm = new ChatOpenAI({ model: 'gpt-4' })
const prompt = ChatPromptTemplate.fromMessages([
['system', 'You are a data analyst with access to PostHog analytics'],
['human', '{input}'],
new MessagesPlaceholder('agent_scratchpad'),
])
const agent = createToolCallingAgent({ llm, tools, prompt })
const executor = new AgentExecutor({ agent, tools })
const result = await executor.invoke({
input: 'Analyze our product usage by getting the top 5 most interesting insights and summarising the data from them.',
})
```
**[→ See full LangChain.js example](https://github.com/posthog/mcp/tree/main/examples/langchain-js)**
## Available Tools
For a list of all available tools, please see the [docs](https://posthog.com/docs/model-context-protocol).

View File

@@ -0,0 +1,87 @@
{
"name": "@posthog/agent-toolkit",
"version": "0.2.2",
"description": "PostHog tools for AI agents",
"main": "dist/index.js",
"module": "dist/index.mjs",
"types": "dist/index.d.ts",
"exports": {
".": {
"types": "./dist/index.d.ts",
"import": "./dist/index.mjs",
"require": "./dist/index.js"
},
"./tools": {
"types": "./dist/tools.d.ts",
"import": "./dist/tools.mjs",
"require": "./dist/tools.js"
},
"./integrations/ai-sdk": {
"types": "./dist/ai-sdk.d.ts",
"import": "./dist/ai-sdk.mjs",
"require": "./dist/ai-sdk.js"
},
"./integrations/langchain": {
"types": "./dist/langchain.d.ts",
"import": "./dist/langchain.mjs",
"require": "./dist/langchain.js"
}
},
"scripts": {
"build": "tsup",
"dev": "wrangler dev",
"deploy": "wrangler deploy",
"cf-typegen": "wrangler types",
"inspector": "npx @modelcontextprotocol/inspector npx -y mcp-remote@latest http://localhost:8787/mcp",
"test": "vitest",
"test:integration": "vitest run --config vitest.integration.config.mts",
"test:watch": "vitest watch",
"typecheck": "tsc --noEmit",
"generate-client": "tsx scripts/update-openapi-client.ts"
},
"keywords": [
"posthog",
"mcp",
"ai",
"agents",
"analytics",
"feature-flags"
],
"author": "PostHog Inc.",
"license": "MIT",
"peerDependencies": {
"@langchain/core": "^0.3.72",
"@langchain/openai": "^0.6.9",
"ai": "^5.0.0",
"langchain": "^0.3.31"
},
"dependencies": {
"@modelcontextprotocol/sdk": "^1.17.3",
"agents": "^0.0.113",
"ai": "^5.0.18",
"posthog-node": "^4.18.0",
"uuid": "^11.1.0",
"zod": "^3.24.4"
},
"devDependencies": {
"@langchain/core": "^0.3.72",
"@langchain/openai": "^0.6.9",
"@types/dotenv": "^6.1.1",
"@types/node": "^22.15.34",
"dotenv": "^16.4.7",
"langchain": "^0.3.31",
"tsup": "^8.5.0",
"tsx": "^4.20.5",
"typed-openapi": "^2.2.2",
"typescript": "^5.8.3",
"vite": "^5.0.0",
"vite-tsconfig-paths": "^5.1.4",
"vitest": "^3.2.4",
"wrangler": "^4.14.4",
"zod-to-json-schema": "^3.24.6"
},
"files": [
"dist",
"README.md"
]
}

6132
products/mcp/typescript/pnpm-lock.yaml generated Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,58 @@
#!/usr/bin/env tsx
// Generates JSON schema from Zod tool-inputs schemas for Python Pydantic schema generation
import * as fs from 'node:fs'
import * as path from 'node:path'
import { zodToJsonSchema } from 'zod-to-json-schema'
import * as schemas from '../src/schema/tool-inputs'
const outputPath = path.join(__dirname, '../../schema/tool-inputs.json')
try {
// Convert all Zod schemas to JSON Schema
const jsonSchemas = {
$schema: 'http://json-schema.org/draft-07/schema#',
definitions: {} as Record<string, any>,
}
// Add each schema to the definitions
for (const [schemaName, zodSchema] of Object.entries(schemas)) {
if (schemaName.endsWith('Schema')) {
const jsonSchema = zodToJsonSchema(zodSchema, {
name: schemaName,
$refStrategy: 'none',
})
// Remove the top-level $schema to avoid conflicts
jsonSchema.$schema = undefined
// Extract the actual schema from nested definitions if present
let actualSchema = jsonSchema
const schemaObj = jsonSchema as any
// If there's nested definitions with the schema name, use that
if (schemaObj.definitions?.[schemaName]) {
actualSchema = schemaObj.definitions[schemaName]
}
// If there's a $ref pointing to itself, and definitions exist, extract the definition
else if (schemaObj.$ref?.includes(schemaName) && schemaObj.definitions) {
actualSchema = schemaObj.definitions[schemaName] || schemaObj
}
// Clean up any remaining $schema references
if (actualSchema.$schema) {
actualSchema.$schema = undefined
}
jsonSchemas.definitions[schemaName] = actualSchema
}
}
// Write the combined schema
const schemaString = JSON.stringify(jsonSchemas, null, 2)
fs.writeFileSync(outputPath, schemaString)
} catch (err) {
console.error('❌ Error generating schema:', err)
process.exit(1)
}

View File

@@ -0,0 +1,68 @@
#!/usr/bin/env tsx
import { execSync } from 'node:child_process'
import * as fs from 'node:fs'
const SCHEMA_URL = 'https://app.posthog.com/api/schema/'
const TEMP_SCHEMA_PATH = 'temp-openapi.yaml'
const OUTPUT_PATH = 'src/api/generated.ts'
async function fetchSchema() {
try {
const response = await fetch(SCHEMA_URL)
if (!response.ok) {
throw new Error(`Failed to fetch schema: ${response.status} ${response.statusText}`)
}
const schema = await response.text()
fs.writeFileSync(TEMP_SCHEMA_PATH, schema, 'utf-8')
return true
} catch (error) {
console.error('Error fetching schema:', error)
return false
}
}
function generateClient() {
try {
execSync(`pnpm typed-openapi ${TEMP_SCHEMA_PATH} --output ${OUTPUT_PATH}`, {
stdio: 'inherit',
})
return true
} catch (error) {
console.error('Error generating client:', error)
return false
}
}
function cleanup() {
try {
if (fs.existsSync(TEMP_SCHEMA_PATH)) {
fs.unlinkSync(TEMP_SCHEMA_PATH)
}
} catch (error) {
console.error('Warning: Could not clean up temporary file:', error)
}
}
async function main() {
const schemaFetched = await fetchSchema()
if (!schemaFetched) {
process.exit(1)
}
const clientGenerated = generateClient()
cleanup()
if (!clientGenerated) {
process.exit(1)
}
}
main().catch((error) => {
console.error('Unexpected error:', error)
process.exit(1)
})

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,52 @@
import type { ApiConfig } from './client'
import type { createApiClient } from './generated'
export const buildApiFetcher: (config: ApiConfig) => Parameters<typeof createApiClient>[0] = (
config
) => {
return {
fetch: async (input) => {
const headers = new Headers()
headers.set('Authorization', `Bearer ${config.apiToken}`)
// Handle query parameters
if (input.urlSearchParams) {
input.url.search = input.urlSearchParams.toString()
}
// Handle request body for mutation methods
const body = ['post', 'put', 'patch', 'delete'].includes(input.method.toLowerCase())
? JSON.stringify(input.parameters?.body)
: undefined
if (body) {
headers.set('Content-Type', 'application/json')
}
// Add custom headers
if (input.parameters?.header) {
for (const [key, value] of Object.entries(input.parameters.header)) {
if (value != null) {
headers.set(key, String(value))
}
}
}
const response = await fetch(input.url, {
method: input.method.toUpperCase(),
...(body && { body }),
headers,
...input.overrides,
})
if (!response.ok) {
const errorResponse = await response.json()
throw new Error(
`Failed request: [${response.status}] ${JSON.stringify(errorResponse)}`
)
}
return response
},
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,2 @@
export { getToolsFromContext, PostHogAgentToolkit, type PostHogToolsOptions } from './tools'
export type { Context, State, Tool } from './tools/types'

View File

@@ -0,0 +1,44 @@
export interface InkeepResponse {
choices: Array<{
message: {
content: string
}
}>
}
export async function docsSearch(apiKey: string, userQuery: string): Promise<string> {
if (!apiKey) {
throw new Error('No API key provided')
}
const response = await fetch('https://api.inkeep.com/v1/chat/completions', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${apiKey}`,
},
body: JSON.stringify({
model: 'inkeep-context-gpt-4o',
messages: [{ role: 'user', content: userQuery }],
}),
})
if (!response.ok) {
const errorText = await response.text()
console.error('Inkeep API error:', errorText)
throw new Error(`Error querying Inkeep API: ${response.status} ${errorText}`)
}
const data = (await response.json()) as InkeepResponse
if (
data.choices &&
data.choices.length > 0 &&
data.choices[0]?.message &&
data.choices[0].message.content
) {
return data.choices[0].message.content
}
console.error('Inkeep API response format unexpected:', data)
throw new Error('Unexpected response format from Inkeep API.')
}

View File

@@ -0,0 +1,77 @@
import { ApiClient } from '@/api/client'
import { SessionManager } from '@/lib/utils/SessionManager'
import { StateManager } from '@/lib/utils/StateManager'
import { MemoryCache } from '@/lib/utils/cache/MemoryCache'
import { hash } from '@/lib/utils/helper-functions'
import { getToolsFromContext } from '@/tools'
import type { Context } from '@/tools/types'
import { type Tool as VercelTool, tool } from 'ai'
import type { z } from 'zod'
/**
* Options for the PostHog Agent Toolkit
*/
export type PostHogToolsOptions = {
posthogPersonalApiKey: string
posthogApiBaseUrl: string
}
export class PostHogAgentToolkit {
public options: PostHogToolsOptions
/**
* Create a new PostHog Agent Toolkit
* @param options - The options for the PostHog Agent Toolkit
*/
constructor(options: PostHogToolsOptions) {
this.options = options
}
/**
* Get the context for the PostHog Agent Toolkit
* @returns A context object
*/
getContext(): Context {
const api = new ApiClient({
apiToken: this.options.posthogPersonalApiKey,
baseUrl: this.options.posthogApiBaseUrl,
})
const scope = hash(this.options.posthogPersonalApiKey)
const cache = new MemoryCache(scope)
return {
api,
cache,
env: {
INKEEP_API_KEY: undefined,
},
stateManager: new StateManager(cache, api),
sessionManager: new SessionManager(cache),
}
}
/**
* Get all the tools for the PostHog Agent Toolkit
* @returns A record of tool names to Vercel tools
*/
async getTools(): Promise<Record<string, VercelTool>> {
const context = this.getContext()
const allTools = await getToolsFromContext(context)
return allTools.reduce(
(acc, t) => {
acc[t.name] = tool({
description: t.description,
inputSchema: t.schema,
execute: async (arg: z.output<typeof t.schema>) => {
return t.handler(context, arg)
},
})
return acc
},
{} as Record<string, VercelTool>
)
}
}

View File

@@ -0,0 +1,81 @@
import { ApiClient } from '@/api/client'
import { SessionManager } from '@/lib/utils/SessionManager'
import { StateManager } from '@/lib/utils/StateManager'
import { MemoryCache } from '@/lib/utils/cache/MemoryCache'
import { hash } from '@/lib/utils/helper-functions'
import { getToolsFromContext } from '@/tools'
import type { Context } from '@/tools/types'
import { DynamicStructuredTool } from '@langchain/core/tools'
import type { z } from 'zod'
/**
* Options for the PostHog Agent Toolkit
*/
export type PostHogToolsOptions = {
posthogPersonalApiKey: string
posthogApiBaseUrl: string
}
export class PostHogAgentToolkit {
public options: PostHogToolsOptions
/**
* Create a new PostHog Agent Toolkit
* @param options - The options for the PostHog Agent Toolkit
*/
constructor(options: PostHogToolsOptions) {
this.options = options
}
/**
* Get the context for the PostHog Agent Toolkit
* @returns A context object
*/
getContext(): Context {
const api = new ApiClient({
apiToken: this.options.posthogPersonalApiKey,
baseUrl: this.options.posthogApiBaseUrl,
})
const scope = hash(this.options.posthogPersonalApiKey)
const cache = new MemoryCache(scope)
return {
api,
cache,
env: {
INKEEP_API_KEY: undefined,
},
stateManager: new StateManager(cache, api),
sessionManager: new SessionManager(cache),
}
}
/**
* Get all the tools for the PostHog Agent Toolkit
* @returns An array of DynamicStructuredTool tools
*/
async getTools(): Promise<DynamicStructuredTool[]> {
const context = this.getContext()
const allTools = await getToolsFromContext(context)
return allTools.map((t) => {
return new DynamicStructuredTool({
name: t.name,
description: t.description,
schema: t.schema,
func: async (arg: z.output<typeof t.schema>) => {
const result = await t.handler(context, arg)
if (typeof result === 'string') {
return result
}
const text = result.content.map((c: { text: string }) => c.text).join('\n')
return text
},
})
})
}
}

View File

@@ -0,0 +1,320 @@
import { McpServer, type ToolCallback } from '@modelcontextprotocol/sdk/server/mcp.js'
import { McpAgent } from 'agents/mcp'
import type { z } from 'zod'
import { ApiClient } from '@/api/client'
import { getPostHogClient } from '@/integrations/mcp/utils/client'
import { handleToolError } from '@/integrations/mcp/utils/handleToolError'
import type { AnalyticsEvent } from '@/lib/analytics'
import { CUSTOM_BASE_URL, MCP_DOCS_URL } from '@/lib/constants'
import { SessionManager } from '@/lib/utils/SessionManager'
import { StateManager } from '@/lib/utils/StateManager'
import { DurableObjectCache } from '@/lib/utils/cache/DurableObjectCache'
import { hash } from '@/lib/utils/helper-functions'
import { getToolsFromContext } from '@/tools'
import type { CloudRegion, Context, State, Tool } from '@/tools/types'
const INSTRUCTIONS = `
- You are a helpful assistant that can query PostHog API.
- If you get errors due to permissions being denied, check that you have the correct active project and that the user has access to the required project.
- If you cannot answer the user's PostHog related request or question using other available tools in this MCP, use the 'docs-search' tool to provide information from the documentation to guide user how they can do it themselves - when doing so provide condensed instructions with links to sources.
`
type RequestProperties = {
userHash: string
apiToken: string
sessionId?: string
features?: string[]
}
// Define our MCP agent with tools
export class MyMCP extends McpAgent<Env> {
server = new McpServer({
name: 'PostHog',
version: '1.0.0',
instructions: INSTRUCTIONS,
})
initialState: State = {
projectId: undefined,
orgId: undefined,
distinctId: undefined,
region: undefined,
apiKey: undefined,
}
_cache: DurableObjectCache<State> | undefined
_api: ApiClient | undefined
_sessionManager: SessionManager | undefined
get requestProperties() {
return this.props as RequestProperties
}
get cache() {
if (!this.requestProperties.userHash) {
throw new Error('User hash is required to use the cache')
}
if (!this._cache) {
this._cache = new DurableObjectCache<State>(
this.requestProperties.userHash,
this.ctx.storage
)
}
return this._cache
}
get sessionManager() {
if (!this._sessionManager) {
this._sessionManager = new SessionManager(this.cache)
}
return this._sessionManager
}
async detectRegion(): Promise<CloudRegion | undefined> {
const usClient = new ApiClient({
apiToken: this.requestProperties.apiToken,
baseUrl: 'https://us.posthog.com',
})
const euClient = new ApiClient({
apiToken: this.requestProperties.apiToken,
baseUrl: 'https://eu.posthog.com',
})
const [usResult, euResult] = await Promise.all([
usClient.users().me(),
euClient.users().me(),
])
if (usResult.success) {
await this.cache.set('region', 'us')
return 'us'
}
if (euResult.success) {
await this.cache.set('region', 'eu')
return 'eu'
}
return undefined
}
async getBaseUrl() {
if (CUSTOM_BASE_URL) {
return CUSTOM_BASE_URL
}
const region = (await this.cache.get('region')) || (await this.detectRegion())
if (region === 'eu') {
return 'https://eu.posthog.com'
}
return 'https://us.posthog.com'
}
async api() {
if (!this._api) {
const baseUrl = await this.getBaseUrl()
this._api = new ApiClient({
apiToken: this.requestProperties.apiToken,
baseUrl,
})
}
return this._api
}
async getDistinctId() {
let _distinctId = await this.cache.get('distinctId')
if (!_distinctId) {
const userResult = await (await this.api()).users().me()
if (!userResult.success) {
throw new Error(`Failed to get user: ${userResult.error.message}`)
}
await this.cache.set('distinctId', userResult.data.distinct_id)
_distinctId = userResult.data.distinct_id
}
return _distinctId
}
async trackEvent(event: AnalyticsEvent, properties: Record<string, any> = {}) {
try {
const distinctId = await this.getDistinctId()
const client = getPostHogClient()
client.capture({
distinctId,
event,
properties: {
...(this.requestProperties.sessionId
? {
$session_id: await this.sessionManager.getSessionUuid(
this.requestProperties.sessionId
),
}
: {}),
...properties,
},
})
} catch (error) {
//
}
}
registerTool<TSchema extends z.ZodRawShape>(
tool: Tool<z.ZodObject<TSchema>>,
handler: (params: z.infer<z.ZodObject<TSchema>>) => Promise<any>
): void {
const wrappedHandler = async (params: z.infer<z.ZodObject<TSchema>>) => {
const validation = tool.schema.safeParse(params)
if (!validation.success) {
await this.trackEvent('mcp tool call', {
tool: tool.name,
valid_input: false,
input: params,
})
return [
{
type: 'text',
text: `Invalid input: ${validation.error.message}`,
},
]
}
await this.trackEvent('mcp tool call', {
tool: tool.name,
valid_input: true,
input: params,
})
try {
const result = await handler(params)
await this.trackEvent('mcp tool response', {
tool: tool.name,
valid_input: true,
input: params,
output: result,
})
return result
} catch (error: any) {
const distinctId = await this.getDistinctId()
return handleToolError(
error,
tool.name,
distinctId,
this.requestProperties.sessionId
? await this.sessionManager.getSessionUuid(this.requestProperties.sessionId)
: undefined
)
}
}
this.server.registerTool(
tool.name,
{
title: tool.title,
description: tool.description,
inputSchema: tool.schema.shape,
annotations: tool.annotations,
},
wrappedHandler as unknown as ToolCallback<TSchema>
)
}
async getContext(): Promise<Context> {
const api = await this.api()
return {
api,
cache: this.cache,
env: this.env,
stateManager: new StateManager(this.cache, api),
sessionManager: this.sessionManager,
}
}
async init() {
const context = await this.getContext()
// Get features from request properties if available
const features = this.requestProperties.features
const allTools = await getToolsFromContext(context, features)
for (const tool of allTools) {
this.registerTool(tool, async (params) => tool.handler(context, params))
}
}
}
export default {
async fetch(request: Request, env: Env, ctx: ExecutionContext) {
const url = new URL(request.url)
if (url.pathname === '/') {
return new Response(
`<p>Welcome to the PostHog MCP Server. For setup and usage instructions, see: <a href="${MCP_DOCS_URL}">${MCP_DOCS_URL}</a></p>`,
{
headers: {
'content-type': 'text/html',
},
}
)
}
const token = request.headers.get('Authorization')?.split(' ')[1]
const sessionId = url.searchParams.get('sessionId')
if (!token) {
return new Response(
`No token provided, please provide a valid API token. View the documentation for more information: ${MCP_DOCS_URL}`,
{
status: 401,
}
)
}
if (!token.startsWith('phx_')) {
return new Response(
`Invalid token, please provide a valid API token. View the documentation for more information: ${MCP_DOCS_URL}`,
{
status: 401,
}
)
}
ctx.props = {
apiToken: token,
userHash: hash(token),
sessionId: sessionId || undefined,
}
// Search params are used to build up the list of available tools. If no features are provided, all tools are available.
// If features are provided, only tools matching those features will be available.
// Features are provided as a comma-separated list in the "features" query parameter.
// Example: ?features=org,insights
const featuresParam = url.searchParams.get('features')
const features = featuresParam ? featuresParam.split(',').filter(Boolean) : undefined
ctx.props = { ...ctx.props, features }
if (url.pathname.startsWith('/mcp')) {
return MyMCP.serve('/mcp').fetch(request, env, ctx)
}
if (url.pathname.startsWith('/sse')) {
return MyMCP.serveSSE('/sse').fetch(request, env, ctx)
}
return new Response('Not found', { status: 404 })
},
}

View File

@@ -0,0 +1,15 @@
import { PostHog } from 'posthog-node'
let _client: PostHog | undefined
export const getPostHogClient = () => {
if (!_client) {
_client = new PostHog('sTMFPsFhdP1Ssg', {
host: 'https://us.i.posthog.com',
flushAt: 1,
flushInterval: 0,
})
}
return _client
}

View File

@@ -0,0 +1,83 @@
import { getPostHogClient } from '@/integrations/mcp/utils/client'
import type { CallToolResult } from '@modelcontextprotocol/sdk/types.js'
export class MCPToolError extends Error {
public readonly tool: string
public readonly originalError: unknown
public readonly timestamp: Date
constructor(message: string, tool: string, originalError?: unknown) {
super(message)
this.name = 'MCPToolError'
this.tool = tool
this.originalError = originalError
this.timestamp = new Date()
}
getTrackingData() {
return {
tool: this.tool,
message: this.message,
timestamp: this.timestamp.toISOString(),
originalError:
this.originalError instanceof Error
? {
name: this.originalError.name,
message: this.originalError.message,
stack: this.originalError.stack,
}
: String(this.originalError),
}
}
}
/**
* Handles tool errors and returns a structured error message.
* Any errors that originate from the tool SHOULD be reported inside the result
* object, with `isError` set to true, _not_ as an MCP protocol-level error
* response. Otherwise, the LLM would not be able to see that an error occurred
* and self-correct.
*
* @param error - The error object.
* @param tool - Tool that caused the error.
* @param distinctId - User's distinct ID for tracking.
* @param sessionId - Session UUID for tracking.
* @returns A structured error message.
*/
export function handleToolError(
error: any,
tool?: string,
distinctId?: string,
sessionUuid?: string
): CallToolResult {
const mcpError =
error instanceof MCPToolError
? error
: new MCPToolError(
error instanceof Error ? error.message : String(error),
tool || 'unknown',
error
)
const properties: Record<string, any> = {
team: 'growth',
tool: mcpError.tool,
$exception_fingerprint: `${mcpError.tool}-${mcpError.message}`,
}
if (sessionUuid) {
properties.$session_id = sessionUuid
}
getPostHogClient().captureException(mcpError, distinctId, properties)
return {
content: [
{
type: 'text',
text: `Error: [${mcpError.tool}]: ${mcpError.message}`,
},
],
isError: true,
}
}

View File

@@ -0,0 +1 @@
export type AnalyticsEvent = 'mcp tool call' | 'mcp tool response'

View File

@@ -0,0 +1,7 @@
import { env } from 'cloudflare:workers'
export const DEV = false
export const CUSTOM_BASE_URL = env.POSTHOG_BASE_URL || (DEV ? 'http://localhost:8010' : undefined)
export const MCP_DOCS_URL = 'https://posthog.com/docs/model-context-protocol'

View File

@@ -0,0 +1,3 @@
export enum ErrorCode {
INVALID_API_KEY = 'INVALID_API_KEY',
}

View File

@@ -0,0 +1 @@
type PrefixedString<T extends string> = `${T}:${string}`

View File

@@ -0,0 +1,48 @@
import type { ScopedCache } from '@/lib/utils/cache/ScopedCache'
import type { State } from '@/tools'
import { v7 as uuidv7 } from 'uuid'
export class SessionManager {
private cache: ScopedCache<State>
constructor(cache: ScopedCache<State>) {
this.cache = cache
}
async _getKey(sessionId: string): Promise<PrefixedString<'session'>> {
return `session:${sessionId}`
}
async getSessionUuid(sessionId: string): Promise<string> {
const key = await this._getKey(sessionId)
const existingSession = await this.cache.get(key)
if (existingSession?.uuid) {
return existingSession.uuid
}
const newSessionUuid = uuidv7()
await this.cache.set(key, { uuid: newSessionUuid })
return newSessionUuid
}
async hasSession(sessionId: string): Promise<boolean> {
const key = await this._getKey(sessionId)
const session = await this.cache.get(key)
return !!session?.uuid
}
async removeSession(sessionId: string): Promise<void> {
const key = await this._getKey(sessionId)
await this.cache.delete(key)
}
async clearAllSessions(): Promise<void> {
await this.cache.clear()
}
}

View File

@@ -0,0 +1,145 @@
import type { ApiClient } from '@/api/client'
import type { ApiUser } from '@/schema/api'
import type { State } from '@/tools/types'
import type { ScopedCache } from './cache/ScopedCache'
export class StateManager {
private _cache: ScopedCache<State>
private _api: ApiClient
private _user?: ApiUser
constructor(cache: ScopedCache<State>, api: ApiClient) {
this._cache = cache
this._api = api
}
private async _fetchUser() {
const userResult = await this._api.users().me()
if (!userResult.success) {
throw new Error(`Failed to get user: ${userResult.error.message}`)
}
return userResult.data
}
async getUser() {
if (!this._user) {
this._user = await this._fetchUser()
}
return this._user
}
private async _fetchApiKey() {
const apiKeyResult = await this._api.apiKeys().current()
if (!apiKeyResult.success) {
throw new Error(`Failed to get API key: ${apiKeyResult.error.message}`)
}
return apiKeyResult.data
}
async getApiKey() {
let _apiKey = await this._cache.get('apiKey')
if (!_apiKey) {
_apiKey = await this._fetchApiKey()
await this._cache.set('apiKey', _apiKey)
}
return _apiKey
}
async getDistinctId() {
let _distinctId = await this._cache.get('distinctId')
if (!_distinctId) {
const user = await this.getUser()
await this._cache.set('distinctId', user.distinct_id)
_distinctId = user.distinct_id
}
return _distinctId
}
private async _getDefaultOrganizationAndProject(): Promise<{
organizationId?: string
projectId: number
}> {
const { scoped_organizations, scoped_teams } = await this.getApiKey()
const { organization: activeOrganization, team: activeTeam } = await this.getUser()
if (scoped_teams.length > 0) {
// Keys scoped to projects should only be scoped to one project
if (scoped_teams.length > 1) {
throw new Error(
'API key has access to multiple projects, please specify a single project ID or change the API key to have access to an organization to include the projects within it.'
)
}
const projectId = scoped_teams[0]!
return { projectId }
}
if (
scoped_organizations.length === 0 ||
scoped_organizations.includes(activeOrganization.id)
) {
return { organizationId: activeOrganization.id, projectId: activeTeam.id }
}
const organizationId = scoped_organizations[0]!
const projectsResult = await this._api
.organizations()
.projects({ orgId: organizationId })
.list()
if (!projectsResult.success) {
throw projectsResult.error
}
if (projectsResult.data.length === 0) {
throw new Error('API key does not have access to any projects')
}
const projectId = projectsResult.data[0]!
return { organizationId, projectId: Number(projectId) }
}
async setDefaultOrganizationAndProject() {
const { organizationId, projectId } = await this._getDefaultOrganizationAndProject()
if (organizationId) {
await this._cache.set('orgId', organizationId)
}
await this._cache.set('projectId', projectId.toString())
return { organizationId, projectId }
}
async getOrgID(): Promise<string | undefined> {
const orgId = await this._cache.get('orgId')
if (!orgId) {
const { organizationId } = await this.setDefaultOrganizationAndProject()
return organizationId
}
return orgId
}
async getProjectId(): Promise<string> {
const projectId = await this._cache.get('projectId')
if (!projectId) {
const { projectId } = await this.setDefaultOrganizationAndProject()
return projectId.toString()
}
return projectId
}
}

View File

@@ -0,0 +1,54 @@
import { ApiListResponseSchema } from '@/schema/api'
import type { z } from 'zod'
export const withPagination = async <T>(
url: string,
apiToken: string,
dataSchema: z.ZodType<T>
): Promise<T[]> => {
const response = await fetch(url, {
headers: {
Authorization: `Bearer ${apiToken}`,
},
})
if (!response.ok) {
throw new Error(`Failed to fetch ${url}: ${response.statusText}`)
}
const data = await response.json()
const responseSchema = ApiListResponseSchema<z.ZodType<T>>(dataSchema)
const parsedData = responseSchema.parse(data)
const results = parsedData.results.map((result: T) => result)
if (parsedData.next) {
const nextResults: T[] = await withPagination<T>(parsedData.next, apiToken, dataSchema)
return [...results, ...nextResults]
}
return results
}
export const hasScope = (scopes: string[], requiredScope: string) => {
if (scopes.includes('*')) {
return true
}
// if read scoped required, and write present, return true
if (
requiredScope.endsWith(':read') &&
scopes.includes(requiredScope.replace(':read', ':write'))
) {
return true
}
return scopes.includes(requiredScope)
}
export const hasScopes = (scopes: string[], requiredScopes: string[]) => {
return requiredScopes.every((scope) => hasScope(scopes, scope))
}

View File

@@ -0,0 +1,52 @@
import { ScopedCache } from '@/lib/utils/cache/ScopedCache'
interface DurableObjectStorage {
get<T = unknown>(key: string): Promise<T | undefined>
put<T>(key: string, value: T): Promise<void>
delete(key: string): Promise<boolean>
delete(keys: string[]): Promise<number>
list(options?: {
prefix?: string
start?: string
end?: string
limit?: number
reverse?: boolean
}): Promise<Map<string, unknown>>
}
export class DurableObjectCache<T extends Record<string, any>> extends ScopedCache<T> {
private storage: DurableObjectStorage
private userHash: string
constructor(scope: string, storage: DurableObjectStorage) {
super(scope)
this.userHash = scope
this.storage = storage
}
private getScopedKey(key: string): string {
return `user:${this.userHash}:${key}`
}
async get<K extends keyof T>(key: K): Promise<T[K] | undefined> {
const scopedKey = this.getScopedKey(key as string)
return await this.storage.get(scopedKey)
}
async set<K extends keyof T>(key: K, value: T[K]): Promise<void> {
const scopedKey = this.getScopedKey(key as string)
await this.storage.put(scopedKey, value)
}
async delete<K extends keyof T>(key: K): Promise<void> {
const scopedKey = this.getScopedKey(key as string)
await this.storage.delete(scopedKey)
}
async clear(): Promise<void> {
const prefix = `user:${this.userHash}:`
const keys = await this.storage.list({ prefix })
const keysArray = Array.from(keys.keys())
await this.storage.delete(keysArray)
}
}

View File

@@ -0,0 +1,30 @@
import { ScopedCache } from '@/lib/utils/cache/ScopedCache'
const _cacheStore = new Map<string, any>()
export class MemoryCache<T extends Record<string, any>> extends ScopedCache<T> {
private cache: Map<string, any> = new Map()
constructor(scope: string) {
super(scope)
this.cache = _cacheStore.get(scope) || new Map()
_cacheStore.set(scope, this.cache)
}
async get<K extends keyof T>(key: K): Promise<T[K] | undefined> {
return this.cache.get(key as string)
}
async set<K extends keyof T>(key: K, value: T[K]): Promise<void> {
this.cache.set(key as string, value)
return
}
async delete<K extends keyof T>(key: K): Promise<void> {
this.cache.delete(key as string)
}
async clear(): Promise<void> {
this.cache.clear()
}
}

View File

@@ -0,0 +1,8 @@
export abstract class ScopedCache<T extends Record<string, any>> {
constructor(private scope: string) {}
abstract get<K extends keyof T>(key: K): Promise<T[K] | undefined>
abstract set<K extends keyof T>(key: K, value: T[K]): Promise<void>
abstract delete<K extends keyof T>(key: K): Promise<void>
abstract clear(): Promise<void>
}

View File

@@ -0,0 +1,22 @@
import crypto from 'node:crypto'
export function hash(data: string) {
// Use PBKDF2 with sufficient computational effort for security
// 100,000 iterations provides good security while maintaining reasonable performance
const salt = crypto.createHash('sha256').update('posthog_mcp_salt').digest()
return crypto.pbkdf2Sync(data, salt, 100000, 32, 'sha256').toString('hex')
}
export function getSearchParamsFromRecord(
params: Record<string, string | number | boolean | undefined>
): URLSearchParams {
const searchParams = new URLSearchParams()
for (const [key, value] of Object.entries(params)) {
if (value !== undefined) {
searchParams.append(key, String(value))
}
}
return searchParams
}

View File

@@ -0,0 +1,71 @@
import { z } from 'zod'
export const ApiPropertyDefinitionSchema = z.object({
id: z.string(),
name: z.string(),
description: z.string().nullish(),
is_numerical: z.boolean().nullish(),
updated_at: z.string().nullish(),
updated_by: z.any().nullish(),
is_seen_on_filtered_events: z.boolean().nullish(),
property_type: z.enum(['String', 'Numeric', 'Boolean', 'DateTime']).nullish(),
verified: z.boolean().nullish(),
verified_at: z.string().nullish(),
verified_by: z.any().nullish(),
hidden: z.boolean().nullish(),
tags: z.array(z.string()).nullish(),
})
export const ApiEventDefinitionSchema = z.object({
id: z.string().uuid(),
name: z.string(),
owner: z.string().nullish(),
description: z.string().nullish(),
created_at: z.string().nullish(),
updated_at: z.string().nullish(),
updated_by: z.any().nullish(),
last_seen_at: z.string().nullish(),
verified: z.boolean().nullish(),
verified_at: z.string().nullish(),
verified_by: z.any().nullish(),
hidden: z.boolean().nullish(),
is_action: z.boolean().nullish(),
post_to_slack: z.boolean().nullish(),
default_columns: z.array(z.string().nullish()).nullish(),
tags: z.array(z.string().nullish()).nullish(),
})
export const ApiListResponseSchema = <T extends z.ZodType>(dataSchema: T) =>
z.object({
count: z.number().nullish(),
next: z.string().nullish(),
previous: z.string().nullish(),
results: z.array(dataSchema),
})
export const ApiUserSchema = z.object({
distinct_id: z.string(),
organizations: z.array(
z.object({
id: z.string().uuid(),
})
),
team: z.object({
id: z.number(),
organization: z.string().uuid(),
}),
organization: z.object({
id: z.string().uuid(),
}),
})
export const ApiRedactedPersonalApiKeySchema = z.object({
scopes: z.array(z.string()), // TODO: restrict available tools automatically based on scopes
scoped_teams: z.array(z.number()),
scoped_organizations: z.array(z.string()),
})
export type ApiPropertyDefinition = z.infer<typeof ApiPropertyDefinitionSchema>
export type ApiEventDefinition = z.infer<typeof ApiEventDefinitionSchema>
export type ApiUser = z.infer<typeof ApiUserSchema>
export type ApiRedactedPersonalApiKey = z.infer<typeof ApiRedactedPersonalApiKeySchema>

View File

@@ -0,0 +1,93 @@
import { z } from 'zod'
export const DashboardTileSchema = z.object({
insight: z.object({
short_id: z.string(),
name: z.string(),
derived_name: z.string().nullable(),
description: z.string().nullable(),
query: z.object({
kind: z.union([z.literal('InsightVizNode'), z.literal('DataVisualizationNode')]),
source: z
.any()
.describe(
'For new insights, use the query from your successful query-run tool call. For updates, the existing query can optionally be reused.'
), // NOTE: This is intentionally z.any() to avoid populating the context with the complicated query schema, but we prompt the LLM to use 'query-run' to check queries, before creating insights.
}),
created_at: z.string().nullish(),
updated_at: z.string().nullish(),
favorited: z.boolean().nullish(),
tags: z.array(z.string()).nullish(),
}),
order: z.number(),
color: z.string().nullish(),
layouts: z.record(z.any()).nullish(),
last_refresh: z.string().nullish(),
is_cached: z.boolean().nullish(),
})
// Base dashboard schema from PostHog API
export const DashboardSchema = z.object({
id: z.number().int().positive(),
name: z.string(),
description: z.string().nullish(),
pinned: z.boolean().nullish(),
created_at: z.string(),
created_by: z
.object({
email: z.string().email(),
})
.optional()
.nullable(),
is_shared: z.boolean().nullish(),
deleted: z.boolean().nullish(),
filters: z.record(z.any()).nullish(),
variables: z.record(z.any()).nullish(),
tags: z.array(z.string()).nullish(),
tiles: z.array(DashboardTileSchema.nullish()).nullish(),
})
export const SimpleDashboardSchema = DashboardSchema.pick({
id: true,
name: true,
description: true,
tiles: true,
})
// Input schema for creating dashboards
export const CreateDashboardInputSchema = z.object({
name: z.string().min(1, 'Dashboard name is required'),
description: z.string().optional(),
pinned: z.boolean().optional(),
tags: z.array(z.string()).optional(),
})
// Input schema for updating dashboards
export const UpdateDashboardInputSchema = z.object({
name: z.string().optional(),
description: z.string().optional(),
pinned: z.boolean().optional(),
tags: z.array(z.string()).optional(),
})
// Input schema for listing dashboards
export const ListDashboardsSchema = z.object({
limit: z.number().int().positive().optional(),
offset: z.number().int().nonnegative().optional(),
search: z.string().optional(),
pinned: z.boolean().optional(),
})
// Input schema for adding insight to dashboard
export const AddInsightToDashboardSchema = z.object({
insightId: z.string(),
dashboardId: z.number().int().positive(),
})
// Type exports
export type PostHogDashboard = z.infer<typeof DashboardSchema>
export type CreateDashboardInput = z.infer<typeof CreateDashboardInputSchema>
export type UpdateDashboardInput = z.infer<typeof UpdateDashboardInputSchema>
export type ListDashboardsData = z.infer<typeof ListDashboardsSchema>
export type AddInsightToDashboardInput = z.infer<typeof AddInsightToDashboardSchema>
export type SimpleDashboard = z.infer<typeof SimpleDashboardSchema>

View File

@@ -0,0 +1,40 @@
import { z } from 'zod'
export enum OrderByErrors {
Occurrences = 'occurrences',
FirstSeen = 'first_seen',
LastSeen = 'last_seen',
Users = 'users',
Sessions = 'sessions',
}
export enum OrderDirectionErrors {
Ascending = 'ASC',
Descending = 'DESC',
}
export enum StatusErrors {
Active = 'active',
Resolved = 'resolved',
All = 'all',
Suppressed = 'suppressed',
}
export const ListErrorsSchema = z.object({
orderBy: z.nativeEnum(OrderByErrors).optional(),
dateFrom: z.string().datetime().optional(),
dateTo: z.string().datetime().optional(),
orderDirection: z.nativeEnum(OrderDirectionErrors).optional(),
filterTestAccounts: z.boolean().optional(),
status: z.nativeEnum(StatusErrors).optional(),
})
export const ErrorDetailsSchema = z.object({
issueId: z.string().uuid(),
dateFrom: z.string().datetime().optional(),
dateTo: z.string().datetime().optional(),
})
export type ListErrorsData = z.infer<typeof ListErrorsSchema>
export type ErrorDetailsData = z.infer<typeof ErrorDetailsSchema>

View File

@@ -0,0 +1,560 @@
import { v4 as uuidv4 } from 'uuid'
import { z } from 'zod'
import { FeatureFlagSchema } from './flags'
import {
ExperimentCreateSchema as ToolExperimentCreateSchema,
ExperimentUpdateInputSchema as ToolExperimentUpdateInputSchema,
} from './tool-inputs'
const ExperimentType = ['web', 'product'] as const
const ExperimentConclusion = ['won', 'lost', 'inconclusive', 'stopped_early', 'invalid'] as const
/**
* This is the schema for the experiment metric base properties.
* It references the ExperimentMetricBaseProperties type from
* @posthog/frontend/src/queries/schema/schema-general.ts
*
* TODO: Add the schemas for FunnelConversionWindowTimeUnit
*/
export const ExperimentMetricBasePropertiesSchema = z.object({
kind: z.literal('ExperimentMetric'),
uuid: z.string().optional(),
name: z.string().optional(),
conversion_window: z.number().optional(),
conversion_window_unit: z.any().optional(), // FunnelConversionWindowTimeUnit
})
export type ExperimentMetricBaseProperties = z.infer<typeof ExperimentMetricBasePropertiesSchema>
/**
* This is the schema for the experiment metric outlier handling.
* It references the ExperimentMetricOutlierHandling type from
* @posthog/frontend/src/queries/schema/schema-general.ts
*/
export const ExperimentMetricOutlierHandlingSchema = z.object({
lower_bound_percentile: z.number().optional(),
upper_bound_percentile: z.number().optional(),
})
export type ExperimentMetricOutlierHandling = z.infer<typeof ExperimentMetricOutlierHandlingSchema>
/**
* This is the schema for the experiment metric source.
* It references the ExperimentMetricSource type from
* @posthog/frontend/src/queries/schema/schema-general.ts
*
* TODO: Add the schemas for the EventsNode and ActionsNode and ExperimentDataWarehouseNode
*/
export const ExperimentMetricSourceSchema = z.any() // EventsNode | ActionsNode | ExperimentDataWarehouseNode
/**
* This is the schema for the experiment funnel metric step.
* It references the ExperimentFunnelMetricStep type from
* @posthog/frontend/src/queries/schema/schema-general.ts
*
* TODO: Add the schemas for the EventsNode and ActionsNode
*/
export const ExperimentFunnelMetricStepSchema = z.any() // EventsNode | ActionsNode
/**
* This is the schema for the experiment mean metric.
* It references the ExperimentMeanMetric type from
* @posthog/frontend/src/queries/schema/schema-general.ts
*/
export const ExperimentMeanMetricSchema = z
.object({
metric_type: z.literal('mean'),
source: ExperimentMetricSourceSchema,
})
.merge(ExperimentMetricBasePropertiesSchema)
.merge(ExperimentMetricOutlierHandlingSchema)
export type ExperimentMeanMetric = z.infer<typeof ExperimentMeanMetricSchema>
/**
* This is the schema for the experiment funnel metric.
* It references the ExperimentFunnelMetric type from
* @posthog/frontend/src/queries/schema/schema-general.ts
*/
export const ExperimentFunnelMetricSchema = z
.object({
metric_type: z.literal('funnel'),
series: z.array(ExperimentFunnelMetricStepSchema),
funnel_order_type: z.any().optional(), // StepOrderValue
})
.merge(ExperimentMetricBasePropertiesSchema)
export type ExperimentFunnelMetric = z.infer<typeof ExperimentFunnelMetricSchema>
/**
* This is the schema for the experiment ratio metric.
* It references the ExperimentRatioMetric type from
* @posthog/frontend/src/queries/schema/schema-general.ts
*/
export const ExperimentRatioMetricSchema = z
.object({
metric_type: z.literal('ratio'),
numerator: ExperimentMetricSourceSchema,
denominator: ExperimentMetricSourceSchema,
})
.merge(ExperimentMetricBasePropertiesSchema)
export type ExperimentRatioMetric = z.infer<typeof ExperimentRatioMetricSchema>
/**
* This is the schema for the experiment metric.
* It references the ExperimentMetric type from
* @posthog/frontend/src/queries/schema/schema-general.ts
*/
export const ExperimentMetricSchema = z.union([
ExperimentMeanMetricSchema,
ExperimentFunnelMetricSchema,
ExperimentRatioMetricSchema,
])
export type ExperimentMetric = z.infer<typeof ExperimentMetricSchema>
/**
* This is the schema for the experiment exposure config.
* It references the ExperimentEventExposureConfig type from
* @posthog/frontend/src/queries/schema/schema-general.ts
*/
export const ExperimentEventExposureConfigSchema = z.object({
kind: z.literal('ExperimentEventExposureConfig'),
event: z.string(),
properties: z.array(z.any()), // this is an array of AnyPropertyFilter
})
/**
* This is the schema for the experiment exposure criteria.
* It references the ExperimentExposureCriteria type from
* @posthog/frontend/src/queries/schema/schema-general.ts
*/
export const ExperimentExposureCriteriaSchema = z.object({
filterTestAccounts: z.boolean().optional(),
exposure_config: ExperimentEventExposureConfigSchema.optional(),
multiple_variant_handling: z.enum(['exclude', 'first_seen']).optional(),
})
/**
* This is the schema for the experiment object.
* It references the Experiment type from
* @posthog/frontend/src/types.ts
*/
export const ExperimentSchema = z.object({
id: z.number(),
name: z.string(),
type: z.enum(ExperimentType).nullish(),
description: z.string().nullish(),
feature_flag_key: z.string(),
feature_flag: FeatureFlagSchema.nullish(),
exposure_cohort: z.number().nullish(),
exposure_criteria: ExperimentExposureCriteriaSchema.nullish(),
/**
* We only type ExperimentMetrics. Legacy metric formats are not validated.
*/
metrics: z.array(z.union([ExperimentMetricSchema, z.any()])).nullish(),
metrics_secondary: z.array(z.union([ExperimentMetricSchema, z.any()])).nullish(),
saved_metrics: z.array(z.any()).nullish(),
saved_metrics_ids: z.array(z.any()).nullable(),
parameters: z
.object({
feature_flag_variants: z
.array(
z.object({
key: z.string(),
name: z.string().nullish(),
rollout_percentage: z.number().nullish(),
})
)
.nullish(),
minimum_detectable_effect: z.number().nullish(),
recommended_running_time: z.number().nullish(),
recommended_sample_size: z.number().nullish(),
})
.nullish(),
start_date: z.string().nullish(),
end_date: z.string().nullish(),
archived: z.boolean(),
deleted: z.boolean(),
created_at: z.string(),
updated_at: z.string(),
holdout: z.any().nullish(),
holdout_id: z.number().nullish(),
stats_config: z.any().optional(),
conclusion: z.enum(ExperimentConclusion).nullish(),
conclusion_comment: z.string().nullish(),
})
export type Experiment = z.infer<typeof ExperimentSchema>
/**
* Schema for the API payload when creating an experiment
* This is derived from ExperimentSchema with appropriate omissions
*/
export const ExperimentApiPayloadSchema = ExperimentSchema.omit({
id: true,
feature_flag: true,
exposure_cohort: true,
exposure_criteria: true,
saved_metrics: true,
saved_metrics_ids: true,
start_date: true,
end_date: true,
deleted: true,
archived: true,
created_at: true,
updated_at: true,
holdout: true,
stats_config: true,
conclusion: true,
conclusion_comment: true,
}).partial()
export type ExperimentApiPayload = z.infer<typeof ExperimentApiPayloadSchema>
/**
* Schema for the API payload when updating an experiment
* Derived from ExperimentSchema, omitting fields that cannot be updated
*/
export const ExperimentUpdateApiPayloadSchema = ExperimentSchema.omit({
id: true,
feature_flag: true,
feature_flag_key: true,
type: true,
exposure_cohort: true,
saved_metrics: true,
deleted: true,
created_at: true,
updated_at: true,
holdout: true,
holdout_id: true,
}).partial()
export type ExperimentUpdateApiPayload = z.infer<typeof ExperimentUpdateApiPayloadSchema>
/**
* Transform tool input metrics to ExperimentMetric format for API
*/
const transformMetricToApi = (metric: any): z.infer<typeof ExperimentMetricSchema> => {
const uuid = uuidv4()
const base = {
kind: 'ExperimentMetric' as const,
uuid,
name: metric.name,
}
switch (metric.metric_type) {
case 'mean':
return {
...base,
metric_type: 'mean',
source: {
kind: 'EventsNode',
event: metric.event_name,
properties: metric.properties || {},
},
}
case 'funnel':
return {
...base,
metric_type: 'funnel',
series: (metric.funnel_steps || [metric.event_name]).map((event: string) => ({
kind: 'EventsNode',
event,
properties: metric.properties || {},
})),
}
case 'ratio':
return {
...base,
metric_type: 'ratio',
numerator: {
kind: 'EventsNode',
event: metric.event_name,
properties: metric.properties?.numerator || metric.properties || {},
},
denominator: {
kind: 'EventsNode',
event: metric.properties?.denominator_event || metric.event_name,
properties: metric.properties?.denominator || metric.properties || {},
},
}
default:
throw new Error(`Unknown metric type: ${metric.metric_type}`)
}
}
/**
* Transform tool input to API payload format
* This bridges the gap between user-friendly input and PostHog API requirements
*/
export const ExperimentCreatePayloadSchema = ToolExperimentCreateSchema.transform((input) => {
// Transform metrics with proper UUIDs
const primaryMetrics = input.primary_metrics?.map(transformMetricToApi) || []
const secondaryMetrics = input.secondary_metrics?.map(transformMetricToApi) || []
return {
// Core fields
name: input.name,
description: input.description || null,
feature_flag_key: input.feature_flag_key, // Maps to get_feature_flag_key in serializer
type: input.type || 'product',
// Metrics - ensure arrays are never null, always empty arrays when no metrics
metrics: primaryMetrics,
metrics_secondary: secondaryMetrics,
// Metrics UUIDs for ordering - ensure arrays are never null
primary_metrics_ordered_uuids: primaryMetrics.map((m) => m.uuid),
secondary_metrics_ordered_uuids: secondaryMetrics.map((m) => m.uuid),
// Legacy fields still required by API
filters: {}, // Legacy but still in model
secondary_metrics: secondaryMetrics, // Use the same array as metrics_secondary
saved_metrics_ids: [], // Empty array for saved metrics
// Parameters with variants
parameters: {
feature_flag_variants: input.variants || [
{ key: 'control', name: 'Control', rollout_percentage: 50 },
{ key: 'test', name: 'Test', rollout_percentage: 50 },
],
minimum_detectable_effect: input.minimum_detectable_effect || 30,
},
// Exposure criteria
exposure_criteria: input.filter_test_accounts
? {
filterTestAccounts: input.filter_test_accounts,
}
: null,
// Stats config (empty, will be filled by backend)
stats_config: {},
// State fields
start_date: input.draft === false ? new Date().toISOString() : null,
end_date: null,
archived: false,
deleted: false,
// Optional holdout
holdout_id: input.holdout_id || null,
}
}).pipe(ExperimentApiPayloadSchema)
export type ExperimentCreatePayload = z.output<typeof ExperimentCreatePayloadSchema>
/**
* Transform user-friendly update input to API payload format for experiment updates
* This handles partial updates with the same transformation patterns as creation
*/
export const ExperimentUpdateTransformSchema = ToolExperimentUpdateInputSchema.transform(
(input) => {
const updatePayload: Record<string, any> = {}
// Basic fields - direct mapping
if (input.name !== undefined) {
updatePayload.name = input.name
}
if (input.description !== undefined) {
updatePayload.description = input.description
}
// Transform metrics if provided
if (input.primary_metrics !== undefined) {
updatePayload.metrics = input.primary_metrics.map(transformMetricToApi)
updatePayload.primary_metrics_ordered_uuids = updatePayload.metrics.map(
(m: any) => m.uuid!
)
}
if (input.secondary_metrics !== undefined) {
updatePayload.metrics_secondary = input.secondary_metrics.map(transformMetricToApi)
updatePayload.secondary_metrics_ordered_uuids = updatePayload.metrics_secondary.map(
(m: any) => m.uuid!
)
}
// Transform minimum detectable effect into parameters
if (input.minimum_detectable_effect !== undefined) {
updatePayload.parameters = {
...updatePayload.parameters,
minimum_detectable_effect: input.minimum_detectable_effect,
}
}
// Handle experiment state management
if (input.launch === true) {
updatePayload.start_date = new Date().toISOString()
}
if (input.conclude !== undefined) {
updatePayload.conclusion = input.conclude
updatePayload.end_date = new Date().toISOString()
if (input.conclusion_comment !== undefined) {
updatePayload.conclusion_comment = input.conclusion_comment
}
}
if (input.restart === true) {
updatePayload.end_date = null
updatePayload.conclusion = null
updatePayload.conclusion_comment = null
}
if (input.archive !== undefined) {
updatePayload.archived = input.archive
}
return updatePayload
}
).pipe(ExperimentUpdateApiPayloadSchema)
export type ExperimentUpdateTransform = z.output<typeof ExperimentUpdateTransformSchema>
/**
* This is the schema for the experiment exposure query.
* It references the ExperimentExposureQuery type from
* @posthog/frontend/src/queries/schema/schema-general.ts
*/
export const ExperimentExposureQuerySchema = z.object({
kind: z.literal('ExperimentExposureQuery'),
experiment_id: z.number(),
experiment_name: z.string(),
exposure_criteria: ExperimentExposureCriteriaSchema.nullish(),
feature_flag: FeatureFlagSchema.optional(),
start_date: z.string().nullish(),
end_date: z.string().nullish(),
holdout: z.any().optional(),
})
export type ExperimentExposureQuery = z.infer<typeof ExperimentExposureQuerySchema>
export const ExperimentExposureTimeSeriesSchema = z.object({
variant: z.string(),
days: z.array(z.string()),
exposure_counts: z.array(z.number()),
})
export const ExperimentExposureQueryResponseSchema = z.object({
kind: z.literal('ExperimentExposureQuery'), // API returns the query kind, not a response kind
timeseries: z.array(ExperimentExposureTimeSeriesSchema),
total_exposures: z.record(z.string(), z.number()),
date_range: z.object({
date_from: z.string(),
date_to: z.string().nullable(), // API can return null for date_to
}),
})
export type ExperimentExposureQueryResponse = z.infer<typeof ExperimentExposureQueryResponseSchema>
export const ExperimentResultsResponseSchema = z
.object({
experiment: ExperimentSchema.pick({
id: true,
name: true,
description: true,
feature_flag_key: true,
start_date: true,
end_date: true,
metrics: true,
metrics_secondary: true,
parameters: true, // Pick parameters to extract variants
}).transform((data) => ({
id: data.id,
name: data.name,
description: data.description,
feature_flag_key: data.feature_flag_key,
metrics: data.metrics,
metrics_secondary: data.metrics_secondary,
start_date: data.start_date,
end_date: data.end_date,
status: data.start_date ? (data.end_date ? 'completed' : 'running') : 'draft',
variants: data.parameters?.feature_flag_variants || [],
})),
exposures: ExperimentExposureQueryResponseSchema,
primaryMetricsResults: z.array(z.any()),
secondaryMetricsResults: z.array(z.any()),
})
.transform(({ experiment, exposures, primaryMetricsResults, secondaryMetricsResults }) => {
return {
experiment,
exposures,
metrics: {
primary: {
count: primaryMetricsResults.length,
results: primaryMetricsResults
.map((result, index) => ({
index,
data: result,
}))
.filter((item) => item.data !== null),
},
secondary: {
count: secondaryMetricsResults.length,
results: secondaryMetricsResults
.map((result, index) => ({
index,
data: result,
}))
.filter((item) => item.data !== null),
},
},
}
})
/**
* Schema for updating existing experiments
* All fields are optional to support partial updates
*/
export const ExperimentUpdatePayloadSchema = z
.object({
name: z.string().optional(),
description: z.string().nullish(),
start_date: z.string().nullish(),
end_date: z.string().nullish(),
// Parameters
parameters: z
.object({
feature_flag_variants: z
.array(
z.object({
key: z.string(),
name: z.string().optional(),
rollout_percentage: z.number(),
})
)
.optional(),
minimum_detectable_effect: z.number().nullish(),
recommended_running_time: z.number().nullish(),
recommended_sample_size: z.number().nullish(),
variant_screenshot_media_ids: z.record(z.array(z.string())).optional(),
})
.optional(),
// Metrics
metrics: z.array(ExperimentMetricSchema).optional(),
metrics_secondary: z.array(ExperimentMetricSchema).optional(),
primary_metrics_ordered_uuids: z.array(z.string()).nullish(),
secondary_metrics_ordered_uuids: z.array(z.string()).nullish(),
// State management
archived: z.boolean().optional(),
conclusion: z.enum(ExperimentConclusion).nullish(),
conclusion_comment: z.string().nullish(),
// Configuration
exposure_criteria: ExperimentExposureCriteriaSchema.optional(),
saved_metrics_ids: z.array(z.any()).nullish(),
stats_config: z.any().optional(),
})
.strict()
export type ExperimentUpdatePayload = z.infer<typeof ExperimentUpdatePayloadSchema>

View File

@@ -0,0 +1,126 @@
import { z } from 'zod'
export interface PostHogFeatureFlag {
id: number
key: string
name: string
}
export interface PostHogFlagsResponse {
results?: PostHogFeatureFlag[]
}
const base = ['exact', 'is_not', 'is_set', 'is_not_set'] as const
const stringOps = [
...base,
'icontains',
'not_icontains',
'regex',
'not_regex',
'is_cleaned_path_exact',
] as const
const numberOps = [...base, 'gt', 'gte', 'lt', 'lte', 'min', 'max'] as const
const booleanOps = [...base] as const
const arrayOps = ['in', 'not_in'] as const
const operatorSchema = z.enum([
...stringOps,
...numberOps,
...booleanOps,
...arrayOps,
] as unknown as [string, ...string[]])
export const PersonPropertyFilterSchema = z
.object({
key: z.string(),
value: z.union([
z.string(),
z.number(),
z.boolean(),
z.array(z.string()),
z.array(z.number()),
]),
operator: operatorSchema.optional(),
})
.superRefine((data, ctx) => {
const { value, operator } = data
if (!operator) {
return
}
const isArray = Array.isArray(value)
const valid =
(typeof value === 'string' && stringOps.includes(operator as any)) ||
(typeof value === 'number' && numberOps.includes(operator as any)) ||
(typeof value === 'boolean' && booleanOps.includes(operator as any)) ||
(isArray && arrayOps.includes(operator as any))
if (!valid) {
ctx.addIssue({
code: z.ZodIssueCode.custom,
message: `operator "${operator}" is not valid for value type "${isArray ? 'array' : typeof value}"`,
})
}
if (!isArray && arrayOps.includes(operator as any)) {
ctx.addIssue({
code: z.ZodIssueCode.custom,
message: `operator "${operator}" requires an array value`,
})
}
})
.transform((data) => {
// when using is_set or is_not_set, set the value the same as the operator
if (data.operator === 'is_set' || data.operator === 'is_not_set') {
data.value = data.operator
}
return {
...data,
type: 'person',
}
})
export type PersonPropertyFilter = z.infer<typeof PersonPropertyFilterSchema>
export const FiltersSchema = z.object({
properties: z.array(PersonPropertyFilterSchema),
rollout_percentage: z.number(),
})
export type Filters = z.infer<typeof FiltersSchema>
export const FilterGroupsSchema = z.object({
groups: z.array(FiltersSchema),
})
export type FilterGroups = z.infer<typeof FilterGroupsSchema>
export const CreateFeatureFlagInputSchema = z.object({
name: z.string(),
key: z.string(),
description: z.string(),
filters: FilterGroupsSchema,
active: z.boolean(),
tags: z.array(z.string()).optional(),
})
export type CreateFeatureFlagInput = z.infer<typeof CreateFeatureFlagInputSchema>
export const UpdateFeatureFlagInputSchema = CreateFeatureFlagInputSchema.omit({
key: true,
}).partial()
export type UpdateFeatureFlagInput = z.infer<typeof UpdateFeatureFlagInputSchema>
export const FeatureFlagSchema = z.object({
id: z.number(),
key: z.string(),
name: z.string(),
description: z.string().nullish(),
filters: z.any().nullish(),
active: z.boolean(),
tags: z.array(z.string()).optional(),
})
export type FeatureFlag = z.infer<typeof FeatureFlagSchema>

View File

@@ -0,0 +1,97 @@
import { z } from 'zod'
export const InsightSchema = z.object({
id: z.number(),
short_id: z.string(),
name: z.string().nullish(),
description: z.string().nullish(),
filters: z.record(z.any()),
query: z.any(),
result: z.any().optional(),
created_at: z.string(),
updated_at: z.string(),
created_by: z
.object({
id: z.number(),
uuid: z.string().uuid(),
distinct_id: z.string(),
first_name: z.string(),
email: z.string(),
})
.optional()
.nullable(),
favorited: z.boolean().nullish(),
deleted: z.boolean(),
dashboard: z.number().nullish(),
layouts: z.record(z.any()).nullish(),
color: z.string().nullish(),
last_refresh: z.string().nullish(),
refreshing: z.boolean().nullish(),
tags: z.array(z.string()).nullish(),
})
export const SimpleInsightSchema = InsightSchema.pick({
id: true,
name: true,
short_id: true,
description: true,
filters: true,
query: true,
created_at: true,
updated_at: true,
favorited: true,
})
export const CreateInsightInputSchema = z.object({
name: z.string(),
query: z.object({
kind: z.union([z.literal('InsightVizNode'), z.literal('DataVisualizationNode')]),
source: z
.any()
.describe(
'For new insights, use the query from your successful query-run tool call. For updates, the existing query can optionally be reused.'
), // NOTE: This is intentionally z.any() to avoid populating the context with the complicated query schema, but we prompt the LLM to use 'query-run' to check queries, before creating insights.
}),
description: z.string().optional(),
favorited: z.boolean(),
tags: z.array(z.string()).optional(),
})
export const UpdateInsightInputSchema = z.object({
name: z.string().optional(),
description: z.string().optional(),
filters: z.record(z.any()).optional(),
query: z.object({
kind: z.union([z.literal('InsightVizNode'), z.literal('DataVisualizationNode')]),
source: z
.any()
.describe(
'For new insights, use the query from your successful query-run tool call. For updates, the existing query can optionally be reused'
), // NOTE: This is intentionally z.any() to avoid populating the context with the complicated query schema, and to allow the LLM to make a change to an existing insight whose schema we do not support in our simplified subset of the full insight schema.
}),
favorited: z.boolean().optional(),
dashboard: z.number().optional(),
tags: z.array(z.string()).optional(),
})
export const ListInsightsSchema = z.object({
limit: z.number().optional(),
offset: z.number().optional(),
favorited: z.boolean().optional(),
search: z.string().optional(),
})
export type PostHogInsight = z.infer<typeof InsightSchema>
export type CreateInsightInput = z.infer<typeof CreateInsightInputSchema>
export type UpdateInsightInput = z.infer<typeof UpdateInsightInputSchema>
export type ListInsightsData = z.infer<typeof ListInsightsSchema>
export type SimpleInsight = z.infer<typeof SimpleInsightSchema>
export const SQLInsightResponseSchema = z.array(
z.object({
type: z.string(),
data: z.record(z.any()),
})
)
export type SQLInsightResponse = z.infer<typeof SQLInsightResponseSchema>

View File

@@ -0,0 +1,8 @@
import { z } from 'zod'
export const OrganizationSchema = z.object({
id: z.string().uuid(),
name: z.string(),
})
export type Organization = z.infer<typeof OrganizationSchema>

View File

@@ -0,0 +1,9 @@
import { z } from 'zod'
export const ProjectSchema = z.object({
id: z.number(),
name: z.string(),
organization: z.string().uuid(),
})
export type Project = z.infer<typeof ProjectSchema>

View File

@@ -0,0 +1,21 @@
import {
ApiEventDefinitionSchema,
type ApiListResponseSchema,
ApiPropertyDefinitionSchema,
} from '@/schema/api'
import type { z } from 'zod'
export const PropertyDefinitionSchema = ApiPropertyDefinitionSchema.pick({
name: true,
property_type: true,
})
export const EventDefinitionSchema = ApiEventDefinitionSchema.pick({
name: true,
last_seen_at: true,
})
export type PropertyDefinition = z.infer<typeof ApiPropertyDefinitionSchema>
export type PropertyDefinitionsResponse = z.infer<
ReturnType<typeof ApiListResponseSchema<typeof ApiPropertyDefinitionSchema>>
>

View File

@@ -0,0 +1,251 @@
import { z } from 'zod'
// Common enums and types
const NodeKind = z.enum(['TrendsQuery', 'FunnelsQuery', 'HogQLQuery', 'EventsNode'])
const IntervalType = z.enum(['hour', 'day', 'week', 'month'])
const ChartDisplayType = z.enum([
'ActionsLineGraph',
'ActionsTable',
'ActionsPie',
'ActionsBar',
'ActionsBarValue',
'WorldMap',
'BoldNumber',
])
// NOTE: Breakdowns are restricted to either person or event for simplicity
const BreakdownType = z.enum(['person', 'event'])
const FunnelVizType = z.enum(['steps', 'time_to_convert', 'trends'])
const FunnelOrderType = z.enum(['ordered', 'unordered', 'strict'])
const FunnelStepReference = z.enum(['total', 'previous'])
const BreakdownAttributionType = z.enum(['first_touch', 'last_touch', 'all_events'])
const FunnelLayout = z.enum(['horizontal', 'vertical'])
const FunnelConversionWindowTimeUnit = z.enum(['minute', 'hour', 'day', 'week', 'month'])
// Base schemas
const DateRange = z.object({
date_from: z.string().nullable().optional(),
date_to: z.string().nullable().optional(),
explicitDate: z.boolean().optional(),
})
const PropertyFilter = z.object({
key: z.string(),
value: z
.union([z.string(), z.number(), z.array(z.string()), z.array(z.number())])
.nullable()
.optional(),
operator: z.string().optional(),
type: z.string().optional(),
})
// NOTE: Only a single level of nesting is supported here, since we can't specify recursive schema for tool inputs.
const PropertyGroupFilter = z.object({
type: z.enum(['AND', 'OR']),
values: z.array(PropertyFilter),
})
const AnyPropertyFilter = z.union([PropertyFilter, PropertyGroupFilter])
const HogQLVariable = z.object({
variableId: z.string(),
code_name: z.string(),
value: z.any().optional(),
isNull: z.boolean().optional(),
})
const HogQLFilters = z.object({
properties: z.array(AnyPropertyFilter).optional(),
dateRange: DateRange.optional(),
filterTestAccounts: z.boolean().optional(),
})
// Math types that don't require a property
const BaseMathType = z.enum([
'total',
'dau',
'weekly_active',
'monthly_active',
'unique_session',
'first_time_for_user',
'first_matching_event_for_user',
])
// Math types that require a math_property
const PropertyMathType = z.enum(['avg', 'sum', 'min', 'max', 'median', 'p75', 'p90', 'p95', 'p99'])
// Combined math types
const MathType = z.union([BaseMathType, PropertyMathType])
const PROPERTY_MATH_TYPES = ['avg', 'sum', 'min', 'max', 'median', 'p75', 'p90', 'p95', 'p99']
// Base entity object without refinement for extension
const BaseEntityObject = z.object({
custom_name: z.string().describe('A display name'),
math: MathType.optional(),
math_property: z.string().optional(),
properties: z.union([z.array(AnyPropertyFilter), PropertyGroupFilter]).optional(),
})
const EventsNode = BaseEntityObject.extend({
kind: z.literal('EventsNode'),
event: z.string().optional(),
limit: z.number().optional(),
}).refine(
(data) => {
if (PROPERTY_MATH_TYPES.includes(data.math || '')) {
return !!data.math_property
}
return true
},
{
message: `math_property is required for ${PROPERTY_MATH_TYPES.join(', ')} math types`,
}
)
const AnyEntityNode = EventsNode
// Base query interface
const InsightsQueryBase = z.object({
dateRange: DateRange.optional(),
filterTestAccounts: z.boolean().optional().default(false),
properties: z
.union([z.array(AnyPropertyFilter), PropertyGroupFilter])
.optional()
.default([]),
})
// Breakdown filter
const BreakdownFilter = z.object({
breakdown_type: BreakdownType.nullable().optional().default('event'),
breakdown_limit: z.number().optional(),
breakdown: z
.union([z.string(), z.number(), z.array(z.union([z.string(), z.number()]))])
.nullable()
.optional(),
})
// Compare filter
const CompareFilter = z.object({
compare: z.boolean().optional().default(false),
compare_to: z.string().optional(),
})
// Trends filter
const TrendsFilter = z.object({
display: ChartDisplayType.optional().default('ActionsLineGraph'),
showLegend: z.boolean().optional().default(false),
})
// Trends query
const TrendsQuerySchema = InsightsQueryBase.extend({
kind: z.literal('TrendsQuery'),
interval: IntervalType.optional().default('day'),
series: z.array(AnyEntityNode),
trendsFilter: TrendsFilter.optional(),
breakdownFilter: BreakdownFilter.optional(),
compareFilter: CompareFilter.optional(),
conversionGoal: z.any().nullable().optional(),
})
// HogQL query
const HogQLQuerySchema = z.object({
kind: z.literal('HogQLQuery'),
query: z.string(),
filters: HogQLFilters.optional(),
})
// Funnels filter
const FunnelsFilter = z.object({
layout: FunnelLayout.optional(),
breakdownAttributionType: BreakdownAttributionType.optional(),
breakdownAttributionValue: z.number().optional(),
funnelToStep: z.number().optional(),
funnelFromStep: z.number().optional(),
funnelOrderType: FunnelOrderType.optional(),
funnelVizType: FunnelVizType.optional(),
funnelWindowInterval: z.number().optional().default(14),
funnelWindowIntervalUnit: FunnelConversionWindowTimeUnit.optional().default('day'),
funnelStepReference: FunnelStepReference.optional(),
})
// Funnels query
const FunnelsQuerySchema = InsightsQueryBase.extend({
kind: z.literal('FunnelsQuery'),
interval: IntervalType.optional(),
series: z.array(AnyEntityNode).min(2, 'At least two steps are required for a funnel'),
funnelsFilter: FunnelsFilter.optional(),
breakdownFilter: BreakdownFilter.optional(),
})
// Insight Schema
const InsightVizNodeSchema = z.object({
kind: z.literal('InsightVizNode'),
source: z.discriminatedUnion('kind', [TrendsQuerySchema, FunnelsQuerySchema]),
})
const DataVisualizationNodeSchema = z.object({
kind: z.literal('DataVisualizationNode'),
source: HogQLQuerySchema,
})
// Any insight query
const InsightQuerySchema = z.discriminatedUnion('kind', [
InsightVizNodeSchema,
DataVisualizationNodeSchema,
])
// Export all schemas
export {
// Enums
NodeKind,
IntervalType,
ChartDisplayType,
BreakdownType,
FunnelVizType,
FunnelOrderType,
FunnelStepReference,
BreakdownAttributionType,
FunnelLayout,
FunnelConversionWindowTimeUnit,
// Math types
BaseMathType,
PropertyMathType,
MathType,
// Base types
DateRange,
PropertyFilter,
PropertyGroupFilter,
AnyPropertyFilter,
// Entity nodes
EventsNode,
AnyEntityNode,
// Filters
BreakdownFilter,
CompareFilter,
TrendsFilter,
FunnelsFilter,
// HogQL types
HogQLVariable,
HogQLFilters,
// Queries
TrendsQuerySchema,
FunnelsQuerySchema,
HogQLQuerySchema,
InsightVizNodeSchema,
DataVisualizationNodeSchema,
InsightQuerySchema,
}
export type TrendsQuery = z.infer<typeof TrendsQuerySchema>
export type FunnelsQuery = z.infer<typeof FunnelsQuerySchema>
export type HogQLQuery = z.infer<typeof HogQLQuerySchema>
export type InsightQuery = z.infer<typeof InsightQuerySchema>

View File

@@ -0,0 +1,675 @@
import { z } from 'zod'
import { FilterGroupsSchema } from './flags.js'
// Survey question types
const BaseSurveyQuestionSchema = z.object({
question: z.string(),
description: z.string().optional(),
descriptionContentType: z.enum(['html', 'text']).optional(),
optional: z.boolean().optional(),
buttonText: z.string().optional(),
})
// Branching logic schemas
const NextQuestionBranching = z.object({
type: z.literal('next_question'),
})
const EndBranching = z.object({
type: z.literal('end'),
})
// Choice response branching - uses numeric choice indices (0, 1, 2, etc.)
const ChoiceResponseBranching = z
.object({
type: z.literal('response_based'),
responseValues: z
.record(z.string(), z.union([z.number(), z.literal('end')]))
.describe(
"Only include keys for responses that should branch to a specific question or 'end'. Omit keys for responses that should proceed to the next question (default behavior)."
),
})
.describe(
'For single choice questions: use choice indices as string keys ("0", "1", "2", etc.)'
)
// NPS sentiment branching - uses sentiment categories
const NPSSentimentBranching = z
.object({
type: z.literal('response_based'),
responseValues: z
.record(
z
.enum(['detractors', 'passives', 'promoters'])
.describe(
'NPS sentiment categories: detractors (0-6), passives (7-8), promoters (9-10)'
),
z.union([z.number(), z.literal('end')])
)
.describe(
"Only include keys for responses that should branch to a specific question or 'end'. Omit keys for responses that should proceed to the next question (default behavior)."
),
})
.describe(
'For NPS rating questions: use sentiment keys based on score ranges - detractors (0-6), passives (7-8), promoters (9-10)'
)
// Match type enum for URL and device type targeting
const MatchTypeEnum = z
.enum(['regex', 'not_regex', 'exact', 'is_not', 'icontains', 'not_icontains'])
.describe(
"URL/device matching types: 'regex' (matches regex pattern), 'not_regex' (does not match regex pattern), 'exact' (exact string match), 'is_not' (not exact match), 'icontains' (case-insensitive contains), 'not_icontains' (case-insensitive does not contain)"
)
// Rating sentiment branching - uses sentiment categories
const RatingSentimentBranching = z
.object({
type: z.literal('response_based'),
responseValues: z
.record(
z
.enum(['negative', 'neutral', 'positive'])
.describe(
'Rating sentiment categories: negative (lower third of scale), neutral (middle third), positive (upper third)'
),
z.union([z.number(), z.literal('end')])
)
.describe(
"Only include keys for responses that should branch to a specific question or 'end'. Omit keys for responses that should proceed to the next question (default behavior)."
),
})
.describe(
'For rating questions: use sentiment keys based on scale thirds - negative (lower third), neutral (middle third), positive (upper third)'
)
const SpecificQuestionBranching = z.object({
type: z.literal('specific_question'),
index: z.number(),
})
// Branching schema unions for different question types
const ChoiceBranching = z.union([
NextQuestionBranching,
EndBranching,
ChoiceResponseBranching,
SpecificQuestionBranching,
])
const NPSBranching = z.union([
NextQuestionBranching,
EndBranching,
NPSSentimentBranching,
SpecificQuestionBranching,
])
const RatingBranching = z.union([
NextQuestionBranching,
EndBranching,
RatingSentimentBranching,
SpecificQuestionBranching,
])
// Question schemas - cleaner naming without Schema suffix
const OpenQuestion = BaseSurveyQuestionSchema.extend({
type: z.literal('open'),
})
const LinkQuestion = BaseSurveyQuestionSchema.extend({
type: z.literal('link'),
link: z.string().url(),
})
const RatingQuestion = BaseSurveyQuestionSchema.extend({
type: z.literal('rating'),
display: z
.enum(['number', 'emoji'])
.optional()
.describe("Display format: 'number' shows numeric scale, 'emoji' shows emoji scale"),
scale: z
.union([z.literal(3), z.literal(5), z.literal(7)])
.optional()
.describe('Rating scale can be one of 3, 5, or 7'),
lowerBoundLabel: z
.string()
.optional()
.describe("Label for the lowest rating (e.g., 'Very Poor')"),
upperBoundLabel: z
.string()
.optional()
.describe("Label for the highest rating (e.g., 'Excellent')"),
branching: RatingBranching.optional(),
}).superRefine((data, ctx) => {
// Validate display-specific scale constraints
if (data.display === 'emoji' && data.scale && ![3, 5].includes(data.scale)) {
ctx.addIssue({
code: z.ZodIssueCode.custom,
message: 'Emoji display only supports scales of 3 or 5',
path: ['scale'],
})
}
if (data.display === 'number' && data.scale && ![5, 7].includes(data.scale)) {
ctx.addIssue({
code: z.ZodIssueCode.custom,
message: 'Number display only supports scales of 5 or 7',
path: ['scale'],
})
}
// Validate response-based branching for rating questions
if (data.branching?.type === 'response_based') {
const responseValues = data.branching.responseValues
const validSentiments = ['negative', 'neutral', 'positive']
// Check that all response keys are valid sentiment categories
for (const key of Object.keys(responseValues)) {
if (!validSentiments.includes(key)) {
ctx.addIssue({
code: z.ZodIssueCode.custom,
message: `Invalid sentiment key "${key}". Must be one of: ${validSentiments.join(', ')}`,
path: ['branching', 'responseValues', key],
})
}
}
}
})
const NPSRatingQuestion = BaseSurveyQuestionSchema.extend({
type: z.literal('rating'),
display: z.literal('number').describe('NPS questions always use numeric scale'),
scale: z.literal(10).describe('NPS questions always use 0-10 scale'),
lowerBoundLabel: z
.string()
.optional()
.describe("Label for 0 rating (typically 'Not at all likely')"),
upperBoundLabel: z
.string()
.optional()
.describe("Label for 10 rating (typically 'Extremely likely')"),
branching: NPSBranching.optional(),
}).superRefine((data, ctx) => {
// Validate response-based branching for NPS rating questions
if (data.branching?.type === 'response_based') {
const responseValues = data.branching.responseValues
const validNPSCategories = ['detractors', 'passives', 'promoters']
// Check that all response keys are valid NPS sentiment categories
for (const key of Object.keys(responseValues)) {
if (!validNPSCategories.includes(key)) {
ctx.addIssue({
code: z.ZodIssueCode.custom,
message: `Invalid NPS category "${key}". Must be one of: ${validNPSCategories.join(', ')}`,
path: ['branching', 'responseValues', key],
})
}
}
}
})
const SingleChoiceQuestion = BaseSurveyQuestionSchema.extend({
type: z.literal('single_choice'),
choices: z
.array(z.string().min(1, 'Choice text cannot be empty'))
.min(2, 'Must have at least 2 choices')
.max(20, 'Cannot have more than 20 choices')
.describe(
'Array of choice options. Choice indices (0, 1, 2, etc.) are used for branching logic'
),
shuffleOptions: z
.boolean()
.optional()
.describe('Whether to randomize the order of choices for each respondent'),
hasOpenChoice: z
.boolean()
.optional()
.describe("Whether the last choice (typically 'Other', is an open text input question"),
branching: ChoiceBranching.optional(),
}).superRefine((data, ctx) => {
// Validate unique choices
const uniqueChoices = new Set(data.choices)
if (uniqueChoices.size !== data.choices.length) {
ctx.addIssue({
code: z.ZodIssueCode.custom,
message: 'All choices must be unique',
path: ['choices'],
})
}
// Validate hasOpenChoice logic
if (data.hasOpenChoice && data.choices.length === 0) {
ctx.addIssue({
code: z.ZodIssueCode.custom,
message: 'Cannot have open choice without any regular choices',
path: ['hasOpenChoice'],
})
}
// Validate response-based branching for single choice questions
if (data.branching?.type === 'response_based') {
const responseValues = data.branching.responseValues
const choiceCount = data.choices.length
// Check that all response keys are valid choice indices
for (const key of Object.keys(responseValues)) {
const choiceIndex = Number.parseInt(key, 10)
if (Number.isNaN(choiceIndex) || choiceIndex < 0 || choiceIndex >= choiceCount) {
ctx.addIssue({
code: z.ZodIssueCode.custom,
message: `Invalid choice index "${key}". Must be between 0 and ${choiceCount - 1}`,
path: ['branching', 'responseValues', key],
})
}
}
}
})
const MultipleChoiceQuestion = BaseSurveyQuestionSchema.extend({
type: z.literal('multiple_choice'),
choices: z
.array(z.string().min(1, 'Choice text cannot be empty'))
.min(2, 'Must have at least 2 choices')
.max(20, 'Cannot have more than 20 choices')
.describe(
'Array of choice options. Multiple selections allowed. No branching logic supported.'
),
shuffleOptions: z
.boolean()
.optional()
.describe('Whether to randomize the order of choices for each respondent'),
hasOpenChoice: z
.boolean()
.optional()
.describe("Whether the last choice (typically 'Other', is an open text input question"),
})
// Input schema - strict validation for user input
export const SurveyQuestionInputSchema = z
.union([
OpenQuestion,
LinkQuestion,
RatingQuestion,
NPSRatingQuestion,
SingleChoiceQuestion,
MultipleChoiceQuestion,
])
.superRefine((data, ctx) => {
// Validate that branching is only used with supported question types
if (!('branching' in data) || !data.branching) {
return
}
const supportedTypes = ['rating', 'single_choice']
if (!supportedTypes.includes(data.type)) {
ctx.addIssue({
code: z.ZodIssueCode.custom,
message: `Branching is not supported for question type "${data.type}". Only supported for: ${supportedTypes.join(', ')}`,
path: ['branching'],
})
}
})
// Output schema - permissive for API responses
export const SurveyQuestionOutputSchema = z.object({
type: z.string(),
question: z.string().nullish(),
description: z.string().nullish(),
descriptionContentType: z.enum(['html', 'text']).nullish(),
optional: z.boolean().nullish(),
buttonText: z.string().nullish(),
// Rating question fields
display: z.string().nullish(),
scale: z.number().nullish(),
lowerBoundLabel: z.string().nullish(),
upperBoundLabel: z.string().nullish(),
// Choice question fields
choices: z.array(z.string()).nullish(),
shuffleOptions: z.boolean().nullish(),
hasOpenChoice: z.boolean().nullish(),
// Link question fields
link: z.string().nullish(),
// Branching logic
branching: z.any().nullish(),
})
// Survey targeting conditions - used in input schema
const SurveyConditions = z.object({
url: z.string().optional(),
selector: z.string().optional(),
seenSurveyWaitPeriodInDays: z
.number()
.optional()
.describe("Don't show this survey to users who saw any survey in the last x days."),
urlMatchType: MatchTypeEnum.optional(),
events: z
.object({
repeatedActivation: z
.boolean()
.optional()
.describe(
'Whether to show the survey every time one of the events is triggered (true), or just once (false)'
),
values: z
.array(
z.object({
name: z.string(),
})
)
.optional()
.describe('Array of event names that trigger the survey'),
})
.optional(),
deviceTypes: z.array(z.enum(['Desktop', 'Mobile', 'Tablet'])).optional(),
deviceTypesMatchType: MatchTypeEnum.optional(),
linkedFlagVariant: z
.string()
.optional()
.describe('The variant of the feature flag linked to this survey'),
})
// Survey appearance customization - input schema
const SurveyAppearance = z.object({
backgroundColor: z.string().optional(),
submitButtonColor: z.string().optional(),
textColor: z.string().optional(), // deprecated, use auto contrast text color instead
submitButtonText: z.string().optional(),
submitButtonTextColor: z.string().optional(),
descriptionTextColor: z.string().optional(),
ratingButtonColor: z.string().optional(),
ratingButtonActiveColor: z.string().optional(),
ratingButtonHoverColor: z.string().optional(),
whiteLabel: z.boolean().optional(),
autoDisappear: z.boolean().optional(),
displayThankYouMessage: z.boolean().optional(),
thankYouMessageHeader: z.string().optional(),
thankYouMessageDescription: z.string().optional(),
thankYouMessageDescriptionContentType: z.enum(['html', 'text']).optional(),
thankYouMessageCloseButtonText: z.string().optional(),
borderColor: z.string().optional(),
placeholder: z.string().optional(),
shuffleQuestions: z.boolean().optional(),
surveyPopupDelaySeconds: z.number().optional(),
widgetType: z.enum(['button', 'tab', 'selector']).optional(),
widgetSelector: z.string().optional(),
widgetLabel: z.string().optional(),
widgetColor: z.string().optional(),
fontFamily: z.string().optional(),
maxWidth: z.string().optional(),
zIndex: z.string().optional(),
disabledButtonOpacity: z.string().optional(),
boxPadding: z.string().optional(),
})
// User data from API responses - output schema
const User = z.object({
id: z.number(),
uuid: z.string(),
distinct_id: z.string(),
first_name: z.string(),
email: z.string(),
})
// Survey input schemas
export const CreateSurveyInputSchema = z.object({
name: z.string().min(1, 'Survey name cannot be empty'),
description: z.string().optional(),
type: z.enum(['popover', 'api', 'widget', 'external_survey']).optional(),
questions: z.array(SurveyQuestionInputSchema).min(1, 'Survey must have at least one question'),
appearance: SurveyAppearance.optional(),
start_date: z
.string()
.datetime()
.nullable()
.optional()
.default(null)
.describe(
"Setting this will launch the survey immediately. Don't add a start_date unless explicitly requested to do so."
),
responses_limit: z
.number()
.positive('Response limit must be positive')
.nullable()
.optional()
.describe('The maximum number of responses before automatically stopping the survey.'),
iteration_count: z
.number()
.positive('Iteration count must be positive')
.nullable()
.optional()
.describe(
"For a recurring schedule, this field specifies the number of times the survey should be shown to the user. Use 1 for 'once every X days', higher numbers for multiple repetitions. Works together with iteration_frequency_days to determine the overall survey schedule."
),
iteration_frequency_days: z
.number()
.positive('Iteration frequency must be positive')
.max(365, 'Iteration frequency cannot exceed 365 days')
.nullable()
.optional()
.describe(
'For a recurring schedule, this field specifies the interval in days between each survey instance shown to the user, used alongside iteration_count for precise scheduling.'
),
enable_partial_responses: z
.boolean()
.optional()
.describe(
'When at least one question is answered, the response is stored (true). The response is stored when all questions are answered (false).'
),
linked_flag_id: z
.number()
.nullable()
.optional()
.describe('The feature flag linked to this survey'),
targeting_flag_filters: FilterGroupsSchema.optional().describe(
"Target specific users based on their properties. Example: {groups: [{properties: [{key: 'email', value: ['@company.com'], operator: 'icontains'}], rollout_percentage: 100}]}"
),
})
export const UpdateSurveyInputSchema = z.object({
name: z.string().min(1, 'Survey name cannot be empty').optional(),
description: z.string().optional(),
type: z.enum(['popover', 'api', 'widget', 'external_survey']).optional(),
questions: z
.array(SurveyQuestionInputSchema)
.min(1, 'Survey must have at least one question')
.optional(),
conditions: SurveyConditions.optional(),
appearance: SurveyAppearance.optional(),
schedule: z
.enum(['once', 'recurring', 'always'])
.optional()
.describe(
"Survey scheduling behavior: 'once' = show once per user (default), 'recurring' = repeat based on iteration_count and iteration_frequency_days settings, 'always' = show every time conditions are met (mainly for widget surveys)"
),
start_date: z
.string()
.datetime()
.optional()
.describe(
'When the survey should start being shown to users. Setting this will launch the survey'
),
end_date: z
.string()
.datetime()
.optional()
.describe(
'When the survey stopped being shown to users. Setting this will complete the survey.'
),
archived: z.boolean().optional(),
responses_limit: z
.number()
.positive('Response limit must be positive')
.nullable()
.optional()
.describe('The maximum number of responses before automatically stopping the survey.'),
iteration_count: z
.number()
.positive('Iteration count must be positive')
.nullable()
.optional()
.describe(
"For a recurring schedule, this field specifies the number of times the survey should be shown to the user. Use 1 for 'once every X days', higher numbers for multiple repetitions. Works together with iteration_frequency_days to determine the overall survey schedule."
),
iteration_frequency_days: z
.number()
.positive('Iteration frequency must be positive')
.max(365, 'Iteration frequency cannot exceed 365 days')
.nullable()
.optional()
.describe(
'For a recurring schedule, this field specifies the interval in days between each survey instance shown to the user, used alongside iteration_count for precise scheduling.'
),
enable_partial_responses: z
.boolean()
.optional()
.describe(
'When at least one question is answered, the response is stored (true). The response is stored when all questions are answered (false).'
),
linked_flag_id: z
.number()
.nullable()
.optional()
.describe('The feature flag to link to this survey'),
targeting_flag_id: z
.number()
.optional()
.describe('An existing targeting flag to use for this survey'),
targeting_flag_filters: FilterGroupsSchema.optional().describe(
"Target specific users based on their properties. Example: {groups: [{properties: [{key: 'email', value: ['@company.com'], operator: 'icontains'}], rollout_percentage: 50}]}"
),
remove_targeting_flag: z
.boolean()
.optional()
.describe(
'Set to true to completely remove all targeting filters from the survey, making it visible to all users (subject to other display conditions like URL matching).'
),
})
export const ListSurveysInputSchema = z.object({
limit: z.number().optional(),
offset: z.number().optional(),
search: z.string().optional(),
})
// Survey output schemas - permissive, comprehensive
export const SurveyOutputSchema = z.object({
id: z.string(),
name: z.string(),
description: z.string().nullish(),
type: z.enum(['popover', 'api', 'widget', 'external_survey']),
questions: z.array(SurveyQuestionOutputSchema),
conditions: SurveyConditions.nullish(),
appearance: SurveyAppearance.nullish(),
created_at: z.string(),
created_by: User.nullish(),
start_date: z.string().nullish(),
end_date: z.string().nullish(),
archived: z.boolean().nullish(),
responses_limit: z.number().nullish(),
iteration_count: z.number().nullish(),
iteration_frequency_days: z.number().nullish(),
enable_partial_responses: z.boolean().nullish(),
linked_flag_id: z.number().nullish(),
schedule: z.string().nullish(),
targeting_flag: z
.any()
.nullish()
.describe(
"Target specific users based on their properties. Example: {groups: [{properties: [{key: 'email', value: ['@company.com'], operator: 'icontains'}], rollout_percentage: 50}]}"
),
})
// Survey list item - lightweight version for list endpoints
export const SurveyListItemOutputSchema = z.object({
id: z.string(),
name: z.string(),
description: z.string().nullish(),
type: z.enum(['popover', 'api', 'widget', 'external_survey']),
archived: z.boolean().nullish(),
created_at: z.string(),
created_by: User.nullish(),
start_date: z.string().nullish(),
end_date: z.string().nullish(),
conditions: z.any().nullish(),
responses_limit: z.number().nullish(),
targeting_flag: z.any().nullish(),
iteration_count: z.number().nullish(),
iteration_frequency_days: z.number().nullish(),
})
// Survey response statistics schemas
export const SurveyEventStatsOutputSchema = z.object({
total_count: z.number().nullish(),
total_count_only_seen: z.number().nullish(),
unique_persons: z.number().nullish(),
unique_persons_only_seen: z.number().nullish(),
first_seen: z.string().nullish(),
last_seen: z.string().nullish(),
})
export const SurveyRatesOutputSchema = z.object({
response_rate: z.number().nullish(),
dismissal_rate: z.number().nullish(),
unique_users_response_rate: z.number().nullish(),
unique_users_dismissal_rate: z.number().nullish(),
})
export const SurveyResponseStatsOutputSchema = z.object({
survey_id: z.string().nullish(),
start_date: z.string().nullish(),
end_date: z.string().nullish(),
stats: z
.object({
'survey shown': SurveyEventStatsOutputSchema.nullish(),
'survey dismissed': SurveyEventStatsOutputSchema.nullish(),
'survey sent': SurveyEventStatsOutputSchema.nullish(),
})
.nullish(),
rates: z.object({
response_rate: z.number().nullish(),
dismissal_rate: z.number().nullish(),
unique_users_response_rate: z.number().nullish(),
unique_users_dismissal_rate: z.number().nullish(),
}),
})
export const GetSurveyStatsInputSchema = z.object({
date_from: z
.string()
.datetime()
.optional()
.describe('Optional ISO timestamp for start date (e.g. 2024-01-01T00:00:00Z)'),
date_to: z
.string()
.datetime()
.optional()
.describe('Optional ISO timestamp for end date (e.g. 2024-01-31T23:59:59Z)'),
})
export const GetSurveySpecificStatsInputSchema = z.object({
survey_id: z.string(),
date_from: z
.string()
.datetime()
.optional()
.describe('Optional ISO timestamp for start date (e.g. 2024-01-01T00:00:00Z)'),
date_to: z
.string()
.datetime()
.optional()
.describe('Optional ISO timestamp for end date (e.g. 2024-01-31T23:59:59Z)'),
})
// Input types
export type CreateSurveyInput = z.infer<typeof CreateSurveyInputSchema>
export type UpdateSurveyInput = z.infer<typeof UpdateSurveyInputSchema>
export type ListSurveysInput = z.infer<typeof ListSurveysInputSchema>
export type GetSurveyStatsInput = z.infer<typeof GetSurveyStatsInputSchema>
export type GetSurveySpecificStatsInput = z.infer<typeof GetSurveySpecificStatsInputSchema>
export type SurveyQuestionInput = z.infer<typeof SurveyQuestionInputSchema>
// Output types
export type SurveyOutput = z.infer<typeof SurveyOutputSchema>
export type SurveyListItemOutput = z.infer<typeof SurveyListItemOutputSchema>
export type SurveyEventStatsOutput = z.infer<typeof SurveyEventStatsOutputSchema>
export type SurveyRatesOutput = z.infer<typeof SurveyRatesOutputSchema>
export type SurveyResponseStatsOutput = z.infer<typeof SurveyResponseStatsOutputSchema>
export type SurveyQuestionOutput = z.infer<typeof SurveyQuestionOutputSchema>

View File

@@ -0,0 +1,409 @@
import { z } from 'zod'
import {
AddInsightToDashboardSchema,
CreateDashboardInputSchema,
ListDashboardsSchema,
UpdateDashboardInputSchema,
} from './dashboards'
import { ErrorDetailsSchema, ListErrorsSchema } from './errors'
import { FilterGroupsSchema, UpdateFeatureFlagInputSchema } from './flags'
import { CreateInsightInputSchema, ListInsightsSchema, UpdateInsightInputSchema } from './insights'
import { InsightQuerySchema } from './query'
import {
CreateSurveyInputSchema,
GetSurveySpecificStatsInputSchema,
GetSurveyStatsInputSchema,
ListSurveysInputSchema,
UpdateSurveyInputSchema,
} from './surveys'
export const DashboardAddInsightSchema = z.object({
data: AddInsightToDashboardSchema,
})
export const DashboardCreateSchema = z.object({
data: CreateDashboardInputSchema,
})
export const DashboardDeleteSchema = z.object({
dashboardId: z.number(),
})
export const DashboardGetSchema = z.object({
dashboardId: z.number(),
})
export const DashboardGetAllSchema = z.object({
data: ListDashboardsSchema.optional(),
})
export const DashboardUpdateSchema = z.object({
dashboardId: z.number(),
data: UpdateDashboardInputSchema,
})
export const DocumentationSearchSchema = z.object({
query: z.string(),
})
export const ErrorTrackingDetailsSchema = ErrorDetailsSchema
export const ErrorTrackingListSchema = ListErrorsSchema
export const ExperimentGetAllSchema = z.object({})
export const ExperimentGetSchema = z.object({
experimentId: z.number().describe('The ID of the experiment to retrieve'),
})
export const ExperimentResultsGetSchema = z.object({
experimentId: z.number().describe('The ID of the experiment to get comprehensive results for'),
refresh: z.boolean().describe('Force refresh of results instead of using cached values'),
})
export const ExperimentDeleteSchema = z.object({
experimentId: z.number().describe('The ID of the experiment to delete'),
})
/**
* User-friendly input schema for experiment updates
* This provides a simplified interface that gets transformed to API format
*/
export const ExperimentUpdateInputSchema = z.object({
name: z.string().optional().describe('Update experiment name'),
description: z.string().optional().describe('Update experiment description'),
// Primary metrics with guidance
primary_metrics: z
.array(
z.object({
name: z.string().optional().describe('Human-readable metric name'),
metric_type: z
.enum(['mean', 'funnel', 'ratio'])
.describe(
"Metric type: 'mean' for average values, 'funnel' for conversion flows, 'ratio' for comparing two metrics"
),
event_name: z
.string()
.describe("PostHog event name (e.g., '$pageview', 'add_to_cart', 'purchase')"),
funnel_steps: z
.array(z.string())
.optional()
.describe('For funnel metrics only: Array of event names for each funnel step'),
properties: z.record(z.any()).optional().describe('Event properties to filter on'),
description: z.string().optional().describe('What this metric measures'),
})
)
.optional()
.describe('Update primary metrics'),
secondary_metrics: z
.array(
z.object({
name: z.string().optional().describe('Human-readable metric name'),
metric_type: z.enum(['mean', 'funnel', 'ratio']).describe('Metric type'),
event_name: z.string().describe('PostHog event name'),
funnel_steps: z
.array(z.string())
.optional()
.describe('For funnel metrics only: Array of event names'),
properties: z.record(z.any()).optional().describe('Event properties to filter on'),
description: z.string().optional().describe('What this metric measures'),
})
)
.optional()
.describe('Update secondary metrics'),
minimum_detectable_effect: z
.number()
.optional()
.describe('Update minimum detectable effect in percentage'),
// Experiment state management
launch: z.boolean().optional().describe('Launch experiment (set start_date) or keep as draft'),
conclude: z
.enum(['won', 'lost', 'inconclusive', 'stopped_early', 'invalid'])
.optional()
.describe('Conclude experiment with result'),
conclusion_comment: z.string().optional().describe('Comment about experiment conclusion'),
restart: z
.boolean()
.optional()
.describe('Restart concluded experiment (clears end_date and conclusion)'),
archive: z.boolean().optional().describe('Archive or unarchive experiment'),
})
export const ExperimentUpdateSchema = z.object({
experimentId: z.number().describe('The ID of the experiment to update'),
data: ExperimentUpdateInputSchema.describe(
'The experiment data to update using user-friendly format'
),
})
export const ExperimentCreateSchema = z.object({
name: z
.string()
.min(1)
.describe('Experiment name - should clearly describe what is being tested'),
description: z
.string()
.optional()
.describe(
'Detailed description of the experiment hypothesis, what changes are being tested, and expected outcomes'
),
feature_flag_key: z
.string()
.describe(
'Feature flag key (letters, numbers, hyphens, underscores only). IMPORTANT: First search for existing feature flags that might be suitable using the feature-flags-get-all tool, then suggest reusing existing ones or creating a new key based on the experiment name'
),
type: z
.enum(['product', 'web'])
.default('product')
.describe(
"Experiment type: 'product' for backend/API changes, 'web' for frontend UI changes"
),
// Primary metrics with guidance
primary_metrics: z
.array(
z.object({
name: z.string().optional().describe('Human-readable metric name'),
metric_type: z
.enum(['mean', 'funnel', 'ratio'])
.describe(
"Metric type: 'mean' for average values (revenue, time spent), 'funnel' for conversion flows, 'ratio' for comparing two metrics"
),
event_name: z
.string()
.describe(
"REQUIRED for metrics to work: PostHog event name (e.g., '$pageview', 'add_to_cart', 'purchase'). For funnels, this is the first step. Use '$pageview' if unsure. Search project-property-definitions tool for available events."
),
funnel_steps: z
.array(z.string())
.optional()
.describe(
"For funnel metrics only: Array of event names for each funnel step (e.g., ['product_view', 'add_to_cart', 'checkout', 'purchase'])"
),
properties: z.record(z.any()).optional().describe('Event properties to filter on'),
description: z
.string()
.optional()
.describe(
"What this metric measures and why it's important for the experiment"
),
})
)
.optional()
.describe(
'Primary metrics to measure experiment success. IMPORTANT: Each metric needs event_name to track data. For funnels, provide funnel_steps array with event names for each step. Ask user what events they track, or use project-property-definitions to find available events.'
),
// Secondary metrics for additional insights
secondary_metrics: z
.array(
z.object({
name: z.string().optional().describe('Human-readable metric name'),
metric_type: z
.enum(['mean', 'funnel', 'ratio'])
.describe(
"Metric type: 'mean' for average values, 'funnel' for conversion flows, 'ratio' for comparing two metrics"
),
event_name: z
.string()
.describe("REQUIRED: PostHog event name. Use '$pageview' if unsure."),
funnel_steps: z
.array(z.string())
.optional()
.describe('For funnel metrics only: Array of event names for each funnel step'),
properties: z.record(z.any()).optional().describe('Event properties to filter on'),
description: z.string().optional().describe('What this secondary metric measures'),
})
)
.optional()
.describe(
'Secondary metrics to monitor for potential side effects or additional insights. Each metric needs event_name.'
),
// Feature flag variants
variants: z
.array(
z.object({
key: z
.string()
.describe("Variant key (e.g., 'control', 'variant_a', 'new_design')"),
name: z.string().optional().describe('Human-readable variant name'),
rollout_percentage: z
.number()
.min(0)
.max(100)
.describe('Percentage of users to show this variant'),
})
)
.optional()
.describe(
'Experiment variants. If not specified, defaults to 50/50 control/test split. Ask user how many variants they need and what each tests'
),
// Experiment parameters
minimum_detectable_effect: z
.number()
.default(30)
.describe(
'Minimum detectable effect in percentage. Lower values require more users but detect smaller changes. Suggest 20-30% for most experiments'
),
// Exposure and targeting
filter_test_accounts: z
.boolean()
.default(true)
.describe('Whether to filter out internal test accounts'),
target_properties: z
.record(z.any())
.optional()
.describe('Properties to target specific user segments (e.g., country, subscription type)'),
// Control flags
draft: z
.boolean()
.default(true)
.describe(
'Create as draft (true) or launch immediately (false). Recommend draft for review first'
),
holdout_id: z
.number()
.optional()
.describe(
'Holdout group ID if this experiment should exclude users from other experiments'
),
})
export const FeatureFlagCreateSchema = z.object({
name: z.string(),
key: z.string(),
description: z.string(),
filters: FilterGroupsSchema,
active: z.boolean(),
tags: z.array(z.string()).optional(),
})
export const FeatureFlagDeleteSchema = z.object({
flagKey: z.string(),
})
export const FeatureFlagGetAllSchema = z.object({})
export const FeatureFlagGetDefinitionSchema = z.object({
flagId: z.number().int().positive().optional(),
flagKey: z.string().optional(),
})
export const FeatureFlagUpdateSchema = z.object({
flagKey: z.string(),
data: UpdateFeatureFlagInputSchema,
})
export const InsightCreateSchema = z.object({
data: CreateInsightInputSchema,
})
export const InsightDeleteSchema = z.object({
insightId: z.string(),
})
export const InsightGetSchema = z.object({
insightId: z.string(),
})
export const InsightGetAllSchema = z.object({
data: ListInsightsSchema.optional(),
})
export const InsightGenerateHogQLFromQuestionSchema = z.object({
question: z
.string()
.max(1000)
.describe('Your natural language query describing the SQL insight (max 1000 characters).'),
})
export const InsightQueryInputSchema = z.object({
insightId: z.string(),
})
export const InsightUpdateSchema = z.object({
insightId: z.string(),
data: UpdateInsightInputSchema,
})
export const LLMAnalyticsGetCostsSchema = z.object({
projectId: z.number().int().positive(),
days: z.number().optional(),
})
export const OrganizationGetDetailsSchema = z.object({})
export const OrganizationGetAllSchema = z.object({})
export const OrganizationSetActiveSchema = z.object({
orgId: z.string().uuid(),
})
export const ProjectGetAllSchema = z.object({})
export const ProjectEventDefinitionsSchema = z.object({
q: z
.string()
.optional()
.describe('Search query to filter event names. Only use if there are lots of events.'),
})
export const ProjectPropertyDefinitionsInputSchema = z.object({
type: z.enum(['event', 'person']).describe('Type of properties to get'),
eventName: z
.string()
.describe('Event name to filter properties by, required for event type')
.optional(),
includePredefinedProperties: z
.boolean()
.optional()
.describe('Whether to include predefined properties'),
})
export const ProjectSetActiveSchema = z.object({
projectId: z.number().int().positive(),
})
export const SurveyCreateSchema = CreateSurveyInputSchema
export const SurveyResponseCountsSchema = z.object({})
export const SurveyGlobalStatsSchema = GetSurveyStatsInputSchema
export const SurveyStatsSchema = GetSurveySpecificStatsInputSchema
export const SurveyDeleteSchema = z.object({
surveyId: z.string(),
})
export const SurveyGetSchema = z.object({
surveyId: z.string(),
})
export const SurveyGetAllSchema = ListSurveysInputSchema
export const SurveyUpdateSchema = UpdateSurveyInputSchema.extend({
surveyId: z.string(),
})
export const QueryRunInputSchema = z.object({
query: InsightQuerySchema,
})

View File

@@ -0,0 +1,253 @@
# Adding Tools to the PostHog MCP
This guide explains how to add new tools to the PostHog MCP server. Tools are the interface between MCP clients (like Claude Desktop) and PostHog.
## Quick Start
To add a new tool, you'll need to:
1. Define the tool's input schema
2. Create the tool handler
3. Add the tool definition
4. Write integration tests
5. Update the API client if needed
## Example: Creating a Feature Flag Tool
Let's walk through the `create-feature-flag` tool as a reference example.
### 1. Define Input Schema (`schema/tool-inputs.ts`)
Define your tool's input schema using Zod. Keep inputs **simple and user-friendly**, not necessarily matching the API exactly:
```typescript
export const FeatureFlagCreateSchema = z.object({
name: z.string(),
key: z.string(),
description: z.string(),
filters: FilterGroupsSchema,
active: z.boolean(),
tags: z.array(z.string()).optional(),
})
```
**Best Practices:**
- **Keep inputs simple**: Focus on what users would naturally want to provide
- **Make schemas tight for inputs**: Use strict validation to catch errors early
- **Make schemas loose for outputs**: Be permissive when parsing API responses
- **Use descriptive field names**: Prefer `name` over `flag_name` if it's clear from context
### 2. Create Tool Handler (`tools/featureFlags/create.ts`)
```typescript
import { FeatureFlagCreateSchema } from '@/schema/tool-inputs'
import type { Context, ToolBase } from '@/tools/types'
import type { z } from 'zod'
const schema = FeatureFlagCreateSchema
type Params = z.infer<typeof schema>
export const createHandler = async (context: Context, params: Params) => {
const { name, key, description, filters, active, tags } = params
const projectId = await context.stateManager.getProjectId()
// Call API client method
const flagResult = await context.api.featureFlags({ projectId }).create({
data: { name, key, description, filters, active, tags },
})
if (!flagResult.success) {
throw new Error(`Failed to create feature flag: ${flagResult.error.message}`)
}
// Add context that is useful, like in this case a URL for the LLM to link to.
const featureFlagWithUrl = {
...flagResult.data,
url: `${context.api.getProjectBaseUrl(projectId)}/feature_flags/${flagResult.data.id}`,
}
return {
content: [{ type: 'text', text: JSON.stringify(featureFlagWithUrl) }],
}
}
const tool = (): ToolBase<typeof schema> => ({
name: 'create-feature-flag',
schema,
handler: createHandler,
})
export default tool
```
**Key Points:**
- Use `context.stateManager.getProjectId()` to get the active project
- Use `context.api` to make API calls
- Add helpful information like URLs to responses
- Handle errors gracefully with descriptive messages
- Return `ToolBase` type - title, description, scopes and annotations are injected from JSON
### 3. Add Tool Definition (`schema/tool-definitions.json`)
Add a clear, actionable description for your tool, assign it to a feature, specify required API scopes, and include behavioral annotations:
```json
{
"create-feature-flag": {
"title": "Create Feature Flag",
"description": "Creates a new feature flag in the project. Once you have created a feature flag, you should: Ask the user if they want to add it to their codebase, Use the \"search-docs\" tool to find documentation on how to add feature flags to the codebase (search for the right language / framework), Clarify where it should be added and then add it.",
"category": "Feature flags", // This will be displayed in the docs, but not readable by the MCP client
"feature": "flags",
"summary": "Creates a new feature flag in the project.", // This will be displayed in the docs, but not readable by the MCP client.
"required_scopes": ["feature_flag:write"], // You can find a list of available scopes here: https://github.com/PostHog/posthog/blob/31082f4bcc4c45a0ac830777b8a3048e7752a1bc/frontend/src/lib/scopes.tsx
"annotations": {
"destructiveHint": false, // Does the tool delete or destructively modify data?
"idempotentHint": false, // Can the tool be safely called multiple times with same result?
"openWorldHint": true, // Does the tool interact with external systems or create new resources?
"readOnlyHint": false // Is the tool read-only (doesn't modify any state)?
}
}
}
```
**Available Features:**
- `flags` - [Feature flag management](https://posthog.com/docs/feature-flags)
- `workspace` - [Organization and project management](https://posthog.com/docs/getting-started/cloud)
- `error-tracking` - [Error monitoring and debugging](https://posthog.com/docs/errors)
- `dashboards` - [Dashboard creation and management](https://posthog.com/docs/product-analytics/dashboards)
- `insights` - [Analytics insights and SQL queries](https://posthog.com/docs/product-analytics/insights)
- `experiments` - [A/B testing experiments](https://posthog.com/docs/experiments)
- `llm-analytics` - [LLM usage and cost tracking](https://posthog.com/docs/llm-analytics)
- `docs` - PostHog documentation search
If your tool doesn't fit any of these features, you can create a new feature category yourself.
If you do add a new feature, make sure to update the `README.md` in the root of the repository to list the new feature and include it in the tests at `typescript/tests/unit/tool-filtering.test.ts`. You'll also need to update the `AVAILABLE_FEATURES` list in `https://github.com/posthog/wizard/` so it shows up during feature selection when running `wizard mcp add`.
**Tool Definition Tips:**
- **Title**: Human-readable name shown in UI
- **Description**: Be specific about what the tool does, include follow-up actions if relevant
- **Required Scopes**: Use highest required scope (write if creates/modifies, read if only reads)
- **Annotations**: Provide hints about tool behavior for MCP clients
- **Feature**: Assign to appropriate feature category for filtering
- **Category**: Groups the tools for display in the docs
### 4. Write Integration Tests (`tests/tools/featureFlags.integration.test.ts`)
Always include integration tests to help us catch if there is a change to the underlying API:
```typescript
import {
cleanupResources,
createTestClient,
createTestContext,
generateUniqueKey,
parseToolResponse,
setActiveProjectAndOrg,
} from '@/shared/test-utils'
import createFeatureFlagTool from '@/tools/featureFlags/create'
import { afterEach, beforeAll, describe, expect, it } from 'vitest'
describe('Feature Flags', () => {
let context: Context
const createdResources: CreatedResources = {
featureFlags: [],
insights: [],
dashboards: [],
}
beforeAll(async () => {
const client = createTestClient()
context = createTestContext(client)
await setActiveProjectAndOrg(context, TEST_PROJECT_ID!, TEST_ORG_ID!)
})
afterEach(async () => {
await cleanupResources(context.api, TEST_PROJECT_ID!, createdResources)
})
describe('create-feature-flag tool', () => {
const createTool = createFeatureFlagTool()
it('should create a feature flag with minimal required fields', async () => {
const params = {
name: 'Test Feature Flag',
key: generateUniqueKey('test-flag'),
description: 'Integration test flag',
filters: { groups: [] },
active: true,
}
const result = await createTool.handler(context, params)
const flagData = parseToolResponse(result)
expect(flagData.id).toBeDefined()
expect(flagData.key).toBe(params.key)
expect(flagData.name).toBe(params.name)
expect(flagData.active).toBe(params.active)
expect(flagData.url).toContain('/feature_flags/')
createdResources.featureFlags.push(flagData.id)
})
it('should create a feature flag with complex filters', async () => {
// Test with more complex scenarios
})
})
})
```
**Testing Best Practices:**
- Clean up created resources after each test
- Use unique keys/names to avoid conflicts
- Test both minimal and complex scenarios
- Verify the response structure and content
- Test error cases and edge conditions
### 5. Update API Client if Needed (`api/client.ts`)
If your tool requires new API endpoints, add them to the ApiClient:
```typescript
public featureFlags(params: { projectId: number }) {
return {
create: async ({ data }: { data: CreateFeatureFlagInput }) => {
return this.request<FeatureFlagResponseSchema>({
method: "POST",
path: `/api/projects/${params.projectId}/feature_flags/`,
body: data,
schema: FeatureFlagResponseSchema,
});
},
// Add other methods as needed
};
}
```
**API Client Guidelines:**
- Group related endpoints under resource methods
- Use consistent naming patterns
- Return `Result<T, Error>` types
- Add proper TypeScript types for all parameters and responses
- Include schema validation for responses
## Schema Design Philosophy
### Input Schemas
- **Be strict**: Validate inputs thoroughly to catch errors early
- **Be user-friendly**: Design inputs around what users naturally want to provide
- **Be minimal**: Only require essential fields, make others optional
- **Be clear**: Use descriptive names that don't require API knowledge
### Output Schemas
- **Be permissive**: Don't fail on unexpected fields from the API
- **Be comprehensive**: Include useful information in responses, but don't stuff the context window with unnecessary information
- **Add context**: Include helpful URLs, descriptions, or related data
- **Be consistent**: Use similar patterns across tools

View File

@@ -0,0 +1,50 @@
import { DashboardAddInsightSchema } from '@/schema/tool-inputs'
import { resolveInsightId } from '@/tools/insights/utils'
import type { Context, ToolBase } from '@/tools/types'
import type { z } from 'zod'
const schema = DashboardAddInsightSchema
type Params = z.infer<typeof schema>
export const addInsightHandler = async (context: Context, params: Params) => {
const { data } = params
const projectId = await context.stateManager.getProjectId()
const numericInsightId = await resolveInsightId(context, data.insightId, projectId)
const insightResult = await context.api
.insights({ projectId })
.get({ insightId: data.insightId })
if (!insightResult.success) {
throw new Error(`Failed to get insight: ${insightResult.error.message}`)
}
const result = await context.api.dashboards({ projectId }).addInsight({
data: {
...data,
insightId: numericInsightId,
},
})
if (!result.success) {
throw new Error(`Failed to add insight to dashboard: ${result.error.message}`)
}
const resultWithUrls = {
...result.data,
dashboard_url: `${context.api.getProjectBaseUrl(projectId)}/dashboard/${data.dashboardId}`,
insight_url: `${context.api.getProjectBaseUrl(projectId)}/insights/${insightResult.data.short_id}`,
}
return { content: [{ type: 'text', text: JSON.stringify(resultWithUrls) }] }
}
const tool = (): ToolBase<typeof schema> => ({
name: 'add-insight-to-dashboard',
schema,
handler: addInsightHandler,
})
export default tool

View File

@@ -0,0 +1,32 @@
import { DashboardCreateSchema } from '@/schema/tool-inputs'
import type { Context, ToolBase } from '@/tools/types'
import type { z } from 'zod'
const schema = DashboardCreateSchema
type Params = z.infer<typeof schema>
export const createHandler = async (context: Context, params: Params) => {
const { data } = params
const projectId = await context.stateManager.getProjectId()
const dashboardResult = await context.api.dashboards({ projectId }).create({ data })
if (!dashboardResult.success) {
throw new Error(`Failed to create dashboard: ${dashboardResult.error.message}`)
}
const dashboardWithUrl = {
...dashboardResult.data,
url: `${context.api.getProjectBaseUrl(projectId)}/dashboard/${dashboardResult.data.id}`,
}
return { content: [{ type: 'text', text: JSON.stringify(dashboardWithUrl) }] }
}
const tool = (): ToolBase<typeof schema> => ({
name: 'dashboard-create',
schema,
handler: createHandler,
})
export default tool

View File

@@ -0,0 +1,27 @@
import { DashboardDeleteSchema } from '@/schema/tool-inputs'
import type { Context, ToolBase } from '@/tools/types'
import type { z } from 'zod'
const schema = DashboardDeleteSchema
type Params = z.infer<typeof schema>
export const deleteHandler = async (context: Context, params: Params) => {
const { dashboardId } = params
const projectId = await context.stateManager.getProjectId()
const result = await context.api.dashboards({ projectId }).delete({ dashboardId })
if (!result.success) {
throw new Error(`Failed to delete dashboard: ${result.error.message}`)
}
return { content: [{ type: 'text', text: JSON.stringify(result.data) }] }
}
const tool = (): ToolBase<typeof schema> => ({
name: 'dashboard-delete',
schema,
handler: deleteHandler,
})
export default tool

View File

@@ -0,0 +1,27 @@
import { DashboardGetSchema } from '@/schema/tool-inputs'
import type { Context, ToolBase } from '@/tools/types'
import type { z } from 'zod'
const schema = DashboardGetSchema
type Params = z.infer<typeof schema>
export const getHandler = async (context: Context, params: Params) => {
const { dashboardId } = params
const projectId = await context.stateManager.getProjectId()
const dashboardResult = await context.api.dashboards({ projectId }).get({ dashboardId })
if (!dashboardResult.success) {
throw new Error(`Failed to get dashboard: ${dashboardResult.error.message}`)
}
return { content: [{ type: 'text', text: JSON.stringify(dashboardResult.data) }] }
}
const tool = (): ToolBase<typeof schema> => ({
name: 'dashboard-get',
schema,
handler: getHandler,
})
export default tool

View File

@@ -0,0 +1,29 @@
import { DashboardGetAllSchema } from '@/schema/tool-inputs'
import type { Context, ToolBase } from '@/tools/types'
import type { z } from 'zod'
const schema = DashboardGetAllSchema
type Params = z.infer<typeof schema>
export const getAllHandler = async (context: Context, params: Params) => {
const { data } = params
const projectId = await context.stateManager.getProjectId()
const dashboardsResult = await context.api
.dashboards({ projectId })
.list({ params: data ?? {} })
if (!dashboardsResult.success) {
throw new Error(`Failed to get dashboards: ${dashboardsResult.error.message}`)
}
return { content: [{ type: 'text', text: JSON.stringify(dashboardsResult.data) }] }
}
const tool = (): ToolBase<typeof schema> => ({
name: 'dashboards-get-all',
schema,
handler: getAllHandler,
})
export default tool

View File

@@ -0,0 +1,34 @@
import { DashboardUpdateSchema } from '@/schema/tool-inputs'
import type { Context, ToolBase } from '@/tools/types'
import type { z } from 'zod'
const schema = DashboardUpdateSchema
type Params = z.infer<typeof schema>
export const updateHandler = async (context: Context, params: Params) => {
const { dashboardId, data } = params
const projectId = await context.stateManager.getProjectId()
const dashboardResult = await context.api
.dashboards({ projectId })
.update({ dashboardId, data })
if (!dashboardResult.success) {
throw new Error(`Failed to update dashboard: ${dashboardResult.error.message}`)
}
const dashboardWithUrl = {
...dashboardResult.data,
url: `${context.api.getProjectBaseUrl(projectId)}/dashboard/${dashboardResult.data.id}`,
}
return { content: [{ type: 'text', text: JSON.stringify(dashboardWithUrl) }] }
}
const tool = (): ToolBase<typeof schema> => ({
name: 'dashboard-update',
schema,
handler: updateHandler,
})
export default tool

View File

@@ -0,0 +1,34 @@
import { docsSearch } from '@/inkeepApi'
import { DocumentationSearchSchema } from '@/schema/tool-inputs'
import type { Context, ToolBase } from '@/tools/types'
import type { z } from 'zod'
const schema = DocumentationSearchSchema
type Params = z.infer<typeof schema>
export const searchDocsHandler = async (context: Context, params: Params) => {
const { query } = params
const inkeepApiKey = context.env.INKEEP_API_KEY
if (!inkeepApiKey) {
return {
content: [
{
type: 'text',
text: 'Error: INKEEP_API_KEY is not configured.',
},
],
}
}
const resultText = await docsSearch(inkeepApiKey, query)
return { content: [{ type: 'text', text: resultText }] }
}
const tool = (): ToolBase<typeof schema> => ({
name: 'docs-search',
schema,
handler: searchDocsHandler,
})
export default tool

View File

@@ -0,0 +1,39 @@
import { ErrorTrackingDetailsSchema } from '@/schema/tool-inputs'
import type { Context, ToolBase } from '@/tools/types'
import type { z } from 'zod'
const schema = ErrorTrackingDetailsSchema
type Params = z.infer<typeof schema>
export const errorDetailsHandler = async (context: Context, params: Params) => {
const { issueId, dateFrom, dateTo } = params
const projectId = await context.stateManager.getProjectId()
const errorQuery = {
kind: 'ErrorTrackingQuery',
dateRange: {
date_from: dateFrom || new Date(Date.now() - 7 * 24 * 60 * 60 * 1000).toISOString(),
date_to: dateTo || new Date().toISOString(),
},
volumeResolution: 0,
issueId,
}
const errorsResult = await context.api.query({ projectId }).execute({ queryBody: errorQuery })
if (!errorsResult.success) {
throw new Error(`Failed to get error details: ${errorsResult.error.message}`)
}
return {
content: [{ type: 'text', text: JSON.stringify(errorsResult.data.results) }],
}
}
const tool = (): ToolBase<typeof schema> => ({
name: 'error-details',
schema,
handler: errorDetailsHandler,
})
export default tool

View File

@@ -0,0 +1,42 @@
import { ErrorTrackingListSchema } from '@/schema/tool-inputs'
import type { Context, ToolBase } from '@/tools/types'
import type { z } from 'zod'
const schema = ErrorTrackingListSchema
type Params = z.infer<typeof schema>
export const listErrorsHandler = async (context: Context, params: Params) => {
const { orderBy, dateFrom, dateTo, orderDirection, filterTestAccounts, status } = params
const projectId = await context.stateManager.getProjectId()
const errorQuery = {
kind: 'ErrorTrackingQuery',
orderBy: orderBy || 'occurrences',
dateRange: {
date_from: dateFrom || new Date(Date.now() - 7 * 24 * 60 * 60 * 1000).toISOString(),
date_to: dateTo || new Date().toISOString(),
},
volumeResolution: 1,
orderDirection: orderDirection || 'DESC',
filterTestAccounts: filterTestAccounts ?? true,
status: status || 'active',
}
const errorsResult = await context.api.query({ projectId }).execute({ queryBody: errorQuery })
if (!errorsResult.success) {
throw new Error(`Failed to list errors: ${errorsResult.error.message}`)
}
return {
content: [{ type: 'text', text: JSON.stringify(errorsResult.data.results) }],
}
}
const tool = (): ToolBase<typeof schema> => ({
name: 'list-errors',
schema,
handler: listErrorsHandler,
})
export default tool

View File

@@ -0,0 +1,44 @@
import { ExperimentCreateSchema } from '@/schema/tool-inputs'
import type { Context, ToolBase } from '@/tools/types'
import type { z } from 'zod'
const schema = ExperimentCreateSchema
type Params = z.infer<typeof schema>
/**
* Create a comprehensive A/B test experiment with guided setup
* This tool helps users create well-configured experiments through conversation
*/
export const createExperimentHandler = async (context: Context, params: Params) => {
const projectId = await context.stateManager.getProjectId()
const result = await context.api.experiments({ projectId }).create(params)
if (!result.success) {
throw new Error(`Failed to create experiment: ${result.error.message}`)
}
const experiment = result.data
const experimentWithUrl = {
...experiment,
url: `${context.api.getProjectBaseUrl(projectId)}/experiments/${experiment.id}`,
}
return {
content: [
{
type: 'text',
text: JSON.stringify(experimentWithUrl, null, 2),
},
],
}
}
const tool = (): ToolBase<typeof schema> => ({
name: 'experiment-create',
schema,
handler: createExperimentHandler,
})
export default tool

View File

@@ -0,0 +1,31 @@
import { ExperimentDeleteSchema } from '@/schema/tool-inputs'
import type { Context, ToolBase } from '@/tools/types'
import type { z } from 'zod'
const schema = ExperimentDeleteSchema
type Params = z.infer<typeof schema>
export const deleteHandler = async (context: Context, { experimentId }: Params) => {
const projectId = await context.stateManager.getProjectId()
const deleteResult = await context.api.experiments({ projectId }).delete({
experimentId,
})
if (!deleteResult.success) {
throw new Error(`Failed to delete experiment: ${deleteResult.error.message}`)
}
return {
content: [{ type: 'text', text: JSON.stringify(deleteResult.data) }],
}
}
const tool = (): ToolBase<typeof schema> => ({
name: 'experiment-delete',
schema,
handler: deleteHandler,
})
export default tool

View File

@@ -0,0 +1,29 @@
import { ExperimentGetSchema } from '@/schema/tool-inputs'
import type { Context, ToolBase } from '@/tools/types'
import type { z } from 'zod'
const schema = ExperimentGetSchema
type Params = z.infer<typeof schema>
export const getHandler = async (context: Context, { experimentId }: Params) => {
const projectId = await context.stateManager.getProjectId()
const result = await context.api.experiments({ projectId }).get({
experimentId: experimentId,
})
if (!result.success) {
throw new Error(`Failed to get experiment: ${result.error.message}`)
}
return { content: [{ type: 'text', text: JSON.stringify(result.data) }] }
}
const tool = (): ToolBase<typeof schema> => ({
name: 'experiment-get',
schema,
handler: getHandler,
})
export default tool

View File

@@ -0,0 +1,27 @@
import { ExperimentGetAllSchema } from '@/schema/tool-inputs'
import type { Context, ToolBase } from '@/tools/types'
import type { z } from 'zod'
const schema = ExperimentGetAllSchema
type Params = z.infer<typeof schema>
export const getAllHandler = async (context: Context, _params: Params) => {
const projectId = await context.stateManager.getProjectId()
const results = await context.api.experiments({ projectId }).list()
if (!results.success) {
throw new Error(`Failed to get experiments: ${results.error.message}`)
}
return { content: [{ type: 'text', text: JSON.stringify(results.data) }] }
}
const tool = (): ToolBase<typeof schema> => ({
name: 'experiment-get-all',
schema,
handler: getAllHandler,
})
export default tool

View File

@@ -0,0 +1,53 @@
import { ExperimentResultsResponseSchema } from '@/schema/experiments'
import { ExperimentResultsGetSchema } from '@/schema/tool-inputs'
import type { Context, ToolBase } from '@/tools/types'
import type { z } from 'zod'
const schema = ExperimentResultsGetSchema
type Params = z.infer<typeof schema>
/**
* Get experiment results including metrics and exposures data
* This tool fetches the experiment details and executes the necessary queries
* to get metrics results (both primary and secondary) and exposure data
*/
export const getResultsHandler = async (context: Context, params: Params) => {
const projectId = await context.stateManager.getProjectId()
const result = await context.api.experiments({ projectId }).getMetricResults({
experimentId: params.experimentId,
refresh: params.refresh,
})
if (!result.success) {
throw new Error(`Failed to get experiment results: ${result.error.message}`)
}
const { experiment, primaryMetricsResults, secondaryMetricsResults, exposures } = result.data
// Format the response using the schema
const parsedExperiment = ExperimentResultsResponseSchema.parse({
experiment,
primaryMetricsResults,
secondaryMetricsResults,
exposures,
})
return {
content: [
{
type: 'text',
text: JSON.stringify(parsedExperiment, null, 2),
},
],
}
}
const tool = (): ToolBase<typeof schema> => ({
name: 'experiment-results-get',
schema,
handler: getResultsHandler,
})
export default tool

View File

@@ -0,0 +1,54 @@
import { ExperimentUpdateTransformSchema } from '@/schema/experiments'
import { ExperimentUpdateSchema } from '@/schema/tool-inputs'
import { getToolDefinition } from '@/tools/toolDefinitions'
import type { Context, Tool } from '@/tools/types'
import type { z } from 'zod'
const schema = ExperimentUpdateSchema
type Params = z.infer<typeof schema>
export const updateHandler = async (context: Context, params: Params) => {
const { experimentId, data } = params
const projectId = await context.stateManager.getProjectId()
// Transform the tool input to API payload format
const apiPayload = ExperimentUpdateTransformSchema.parse(data)
const updateResult = await context.api.experiments({ projectId }).update({
experimentId,
updateData: apiPayload,
})
if (!updateResult.success) {
throw new Error(`Failed to update experiment: ${updateResult.error.message}`)
}
const experimentWithUrl = {
...updateResult.data,
url: `${context.api.getProjectBaseUrl(projectId)}/experiments/${updateResult.data.id}`,
}
return {
content: [{ type: 'text', text: JSON.stringify(experimentWithUrl, null, 2) }],
}
}
const definition = getToolDefinition('experiment-update')
const tool = (): Tool<typeof schema> => ({
name: 'experiment-update',
title: definition.title,
description: definition.description,
schema,
handler: updateHandler,
scopes: ['experiments:write'],
annotations: {
destructiveHint: false,
idempotentHint: true,
openWorldHint: true,
readOnlyHint: false,
},
})
export default tool

View File

@@ -0,0 +1,37 @@
import { FeatureFlagCreateSchema } from '@/schema/tool-inputs'
import type { Context, ToolBase } from '@/tools/types'
import type { z } from 'zod'
const schema = FeatureFlagCreateSchema
type Params = z.infer<typeof schema>
export const createHandler = async (context: Context, params: Params) => {
const { name, key, description, filters, active, tags } = params
const projectId = await context.stateManager.getProjectId()
const flagResult = await context.api.featureFlags({ projectId }).create({
data: { name, key, description, filters, active, tags },
})
if (!flagResult.success) {
throw new Error(`Failed to create feature flag: ${flagResult.error.message}`)
}
const featureFlagWithUrl = {
...flagResult.data,
url: `${context.api.getProjectBaseUrl(projectId)}/feature_flags/${flagResult.data.id}`,
}
return {
content: [{ type: 'text', text: JSON.stringify(featureFlagWithUrl) }],
}
}
const tool = (): ToolBase<typeof schema> => ({
name: 'create-feature-flag',
schema,
handler: createHandler,
})
export default tool

View File

@@ -0,0 +1,42 @@
import { FeatureFlagDeleteSchema } from '@/schema/tool-inputs'
import type { Context, ToolBase } from '@/tools/types'
import type { z } from 'zod'
const schema = FeatureFlagDeleteSchema
type Params = z.infer<typeof schema>
export const deleteHandler = async (context: Context, params: Params) => {
const { flagKey } = params
const projectId = await context.stateManager.getProjectId()
const flagResult = await context.api.featureFlags({ projectId }).findByKey({ key: flagKey })
if (!flagResult.success) {
throw new Error(`Failed to find feature flag: ${flagResult.error.message}`)
}
if (!flagResult.data) {
return {
content: [{ type: 'text', text: 'Feature flag is already deleted.' }],
}
}
const deleteResult = await context.api.featureFlags({ projectId }).delete({
flagId: flagResult.data.id,
})
if (!deleteResult.success) {
throw new Error(`Failed to delete feature flag: ${deleteResult.error.message}`)
}
return {
content: [{ type: 'text', text: JSON.stringify(deleteResult.data) }],
}
}
const tool = (): ToolBase<typeof schema> => ({
name: 'delete-feature-flag',
schema,
handler: deleteHandler,
})
export default tool

View File

@@ -0,0 +1,27 @@
import { FeatureFlagGetAllSchema } from '@/schema/tool-inputs'
import type { Context, ToolBase } from '@/tools/types'
import type { z } from 'zod'
const schema = FeatureFlagGetAllSchema
type Params = z.infer<typeof schema>
export const getAllHandler = async (context: Context, _params: Params) => {
const projectId = await context.stateManager.getProjectId()
const flagsResult = await context.api.featureFlags({ projectId }).list()
if (!flagsResult.success) {
throw new Error(`Failed to get feature flags: ${flagsResult.error.message}`)
}
return { content: [{ type: 'text', text: JSON.stringify(flagsResult.data) }] }
}
const tool = (): ToolBase<typeof schema> => ({
name: 'feature-flag-get-all',
schema,
handler: getAllHandler,
})
export default tool

View File

@@ -0,0 +1,72 @@
import { FeatureFlagGetDefinitionSchema } from '@/schema/tool-inputs'
import type { Context, ToolBase } from '@/tools/types'
import type { z } from 'zod'
const schema = FeatureFlagGetDefinitionSchema
type Params = z.infer<typeof schema>
export const getDefinitionHandler = async (context: Context, { flagId, flagKey }: Params) => {
if (!flagId && !flagKey) {
return {
content: [
{
type: 'text',
text: 'Error: Either flagId or flagKey must be provided.',
},
],
}
}
const projectId = await context.stateManager.getProjectId()
if (flagId) {
const flagResult = await context.api
.featureFlags({ projectId })
.get({ flagId: String(flagId) })
if (!flagResult.success) {
throw new Error(`Failed to get feature flag: ${flagResult.error.message}`)
}
return {
content: [{ type: 'text', text: JSON.stringify(flagResult.data) }],
}
}
if (flagKey) {
const flagResult = await context.api.featureFlags({ projectId }).findByKey({ key: flagKey })
if (!flagResult.success) {
throw new Error(`Failed to find feature flag: ${flagResult.error.message}`)
}
if (flagResult.data) {
return {
content: [{ type: 'text', text: JSON.stringify(flagResult.data) }],
}
}
return {
content: [
{
type: 'text',
text: `Error: Flag with key "${flagKey}" not found.`,
},
],
}
}
return {
content: [
{
type: 'text',
text: 'Error: Could not determine or find the feature flag.',
},
],
}
}
const tool = (): ToolBase<typeof schema> => ({
name: 'feature-flag-get-definition',
schema,
handler: getDefinitionHandler,
})
export default tool

Some files were not shown because too many files have changed in this diff Show More