Manage prompts in code (flexibile)
Continue to manage your prompts in code while strongly linking prompt versions to observability sessions
This approach gives you full control over prompts in version control while still leveraging Freeplay's strong associations between prompt versions and observability sessions. This allows you to instantly create datasets from observability, iterate on. prompts, and run online evaluations.
You'll need to create your prompts in Freeplay to obtain a template name and id, then create new versions of the templates as you iterate on them to enable the ability to associate your iterations with prompt performance metrics like evaluations, cost, and latency.
📌 Key Benefits
- Code-First Workflow: Prompts live in your Git repository
- Standard Review Process: Use your existing code review workflow for prompt changes
- Automatic Sync: Push prompts to Freeplay as part of your deployment pipeline
- Full Flexibility: Programmatically manage all prompt aspects
Best For:
- Teams who prefer infrastructure-as-code approaches
- Organizations with strict code review requirements for all changes
- Developers who want prompts versioned alongside application code
Step 1. Create a Prompt in Freeplay
To get started quickly you can follow the create, iterate, and test prompts in the UI quick start guide to create a prompt in the UI. The integrations page will list the project_id, template_name, and version you will need.
A more sustainable route is to use the API to create and update prompt templates as they are modified in code. When you keep Freeplay updated with your most recent prompt changes, Freeplay observability will track deployment changes.
Below are examples of how you can accomplish this via the SDKs and API.
import os
import json
import requests
from dotenv import load_dotenv
load_dotenv()
# Configuration
FREEPLAY_API_KEY = os.getenv("FREEPLAY_API_KEY")
project_id = "<YOUR-PROJECT-ID>"
# self hosted instances will need to change the hostname
base_api_url = "https://app.freeplay.ai/api/v2"
template_name = "api-test"
def create_template():
url = (
base_api_url
+ f"/projects/{project_id}/prompt-templates/name/{template_name}/versions?create_template_if_not_exists=1"
)
prompt_template_setup = {
"template_messages": load_prompt_content(prompt_file_path),
"provider": "openai",
"model": "gpt-4.1-mini-2025-04-14",
"llm_parameters": {
"temperature": 0.2,
"max_tokens": 256,
},
"version_name": "test-version-1",
"version_description": "Development test version with mustache variable",
}
return send_request(url, prompt_template_setup)
def update_template(template_id):
url = (
base_api_url
+ f"/projects/{project_id}/prompt-templates/id/{template_id}/versions"
)
# Load the base messages and modify the system message
messages = load_prompt_content(prompt_file_path)
for msg in messages:
if msg["role"] == "system":
msg["content"] = "You talk like a pirate. " + msg["content"]
prompt_template_setup = {
"template_messages": messages,
"provider": "openai",
"model": "gpt-4.1-mini-2025-04-14",
"llm_parameters": {
"temperature": 0.2,
"max_tokens": 256,
},
"version_name": "test-version-2",
"version_description": "Update-to-tone",
}
return send_request(url, prompt_template_setup)
# Read prompt content from file
def load_prompt_content(filepath):
# simulate loading a prompt from file
return [
{
"role": "system",
"content": "You are a helpful LLM assistant. Who always responds with the user's name."
},
{
"role": "user",
"content": "Hi, my name is {{name}}."
}
]
def send_request(url, prompt_template_data):
headers = {
"Authorization": f"Bearer {FREEPLAY_API_KEY}",
"Content-Type": "application/json",
}
try:
response = requests.post(url, headers=headers, json=prompt_template_data)
response.raise_for_status() # Raise an exception for bad HTTP status codes
return response.json()
except requests.exceptions.RequestException as e:
print(f"Error sending request: {e}")
return None
if __name__ == "__main__":
# Create template first
create_response = create_template()
print(create_response)
# Extract template ID from the create response
template_id = create_response.get("prompt_template_id")
template_version_id = create_response.get("prompt_template_version_id")
print(
f"created prompt template with template id {template_id} and version id {template_version_id}"
)
# If template ID exists, update the template
if template_id:
update_response = update_template(template_id)
print(update_response)import "dotenv/config";
// Configuration
const FREEPLAY_API_KEY = process.env.FREEPLAY_API_KEY;
const projectId = "<your-project-id>";
// self hosted instances will need to change the hostname
const baseApiUrl = "https://app.freeplay.ai/api/v2";
const templateName = "my-prompt";
// Read prompt content from file
function loadPromptContent() {
// simulate loading a prompt from file
return [
{
role: "system",
content: "You are a helpful LLM assistant. Who always responds with the user's name."
},
{
role: "user",
content: "Hi, my name is {{name}}."
}
];
}
async function sendRequest(url, promptTemplateData) {
const headers = {
Authorization: `Bearer ${FREEPLAY_API_KEY}`,
"Content-Type": "application/json",
};
try {
const response = await fetch(url, {
method: "POST",
headers,
body: JSON.stringify(promptTemplateData),
});
if (!response.ok) {
throw new Error(`HTTP error! status: ${response.status}`);
}
return await response.json();
} catch (error) {
console.error(`Error sending request: ${error}`);
return null;
}
}
async function createTemplate() {
const url =
baseApiUrl +
`/projects/${projectId}/prompt-templates/name/${templateName}/versions?create_template_if_not_exists=1`;
const promptTemplateSetup = {
template_messages: loadPromptContent(),
provider: "openai",
model: "gpt-4.1-mini-2025-04-14",
llm_parameters: {
temperature: 0.2,
max_tokens: 256,
},
version_name: "test-version-1",
version_description: "Development test version with mustache variable",
};
return sendRequest(url, promptTemplateSetup);
}
async function updateTemplate(templateId) {
const url =
baseApiUrl +
`/projects/${projectId}/prompt-templates/id/${templateId}/versions`;
// Load the base messages and modify the system message
const messages = loadPromptContent();
for (const msg of messages) {
if (msg.role === "system") {
msg.content = "You talk like a pirate. " + msg.content;
}
}
const promptTemplateSetup = {
template_messages: messages,
provider: "openai",
model: "gpt-4.1-mini-2025-04-14",
llm_parameters: {
temperature: 0.2,
max_tokens: 256,
},
version_name: "test-version-2",
version_description: "Update to tone",
};
return sendRequest(url, promptTemplateSetup);
}
// Main execution
async function main() {
// Create template first
const createResponse = await createTemplate();
console.log(createResponse);
// Extract template ID from the create response
const templateId = createResponse?.prompt_template_id;
const templateVersionId = createResponse?.prompt_template_version_id;
console.log(
`created prompt template with template id ${templateId} and version id ${templateVersionId}`
);
// If template ID exists, update the template
if (templateId) {
const updateResponse = await updateTemplate(templateId);
console.log(updateResponse);
}
}
main();
#!/bin/bash
# Load environment variables from .env file
if [ -f .env ]; then
export $(grep -v '^#' .env | xargs)
fi
# Configuration
PROJECT_ID="<your-project-id>"
# Self hosted instances will need to change the hostname
BASE_API_URL="https://app.freeplay.ai/api/v2"
TEMPLATE_NAME="api-test"
# Return inline prompt content (simulating loading from file)
load_prompt_content() {
cat <<'EOF'
[
{
"role": "system",
"content": "You are a helpful LLM assistant. Who always responds with the user's name."
},
{
"role": "user",
"content": "Hi, my name is {{name}}."
}
]
EOF
}
# Send POST request
send_request() {
local url="$1"
local data="$2"
curl -s -X POST "$url" \
-H "Authorization: Bearer $FREEPLAY_API_KEY" \
-H "Content-Type: application/json" \
-d "$data"
}
# Create template
create_template() {
local url="${BASE_API_URL}/projects/${PROJECT_ID}/prompt-templates/name/${TEMPLATE_NAME}/versions?create_template_if_not_exists=1"
local template_messages
template_messages=$(load_prompt_content)
# Build JSON payload using jq for proper escaping
local payload
payload=$(jq -n \
--argjson messages "$template_messages" \
'{
"template_messages": $messages,
"provider": "openai",
"model": "gpt-4.1-mini-2025-04-14",
"llm_parameters": {
"temperature": 0.2,
"max_tokens": 256
},
"version_name": "test-version-1",
"version_description": "Development test version with mustache variable"
}')
send_request "$url" "$payload"
}
# Update template
update_template() {
local template_id="$1"
local url="${BASE_API_URL}/projects/${PROJECT_ID}/prompt-templates/id/${template_id}/versions"
# Load the base messages and modify the system message
local template_messages
template_messages=$(load_prompt_content | jq 'map(if .role == "system" then .content = "You talk like a pirate. " + .content else . end)')
# Build JSON payload using jq for proper escaping
local payload
payload=$(jq -n \
--argjson messages "$template_messages" \
'{
"template_messages": $messages,
"provider": "openai",
"model": "gpt-4.1-mini-2025-04-14",
"llm_parameters": {
"temperature": 0.2,
"max_tokens": 256
},
"version_name": "test-version-2",
"version_description": "Update-to-tone"
}')
send_request "$url" "$payload"
}
# Main execution
main() {
# Create template first
echo "Creating template..."
create_response=$(create_template)
echo "$create_response" | jq .
# Extract template ID from the create response
template_id=$(echo "$create_response" | jq -r '.prompt_template_id')
template_version_id=$(echo "$create_response" | jq -r '.prompt_template_version_id')
echo "Created prompt template with template id ${template_id} and version id ${template_version_id}"
# If template ID exists, update the template
if [ "$template_id" != "null" ] && [ -n "$template_id" ]; then
echo "Updating template..."
update_response=$(update_template "$template_id")
echo "$update_response" | jq .
fi
}
main
Step 2: Record a session to Freeplay
After acquiring the template id and prompt template version id from the URL in the UI, record a session to Freeplay.
Continue building prompts in your application, but link them to Freeplay when logging:
from openai import OpenAI
import os
from freeplay import Freeplay, RecordPayload, CallInfo
from freeplay.resources.prompts import PromptVersionInfo
from google.colab import userdata
## Prepare Clients ##
# OpenAI
openai_client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
# Anthropic
anthropic_client = Anthropic(api_key=os.getenv("ANTHROPIC_API_KEY"))
# Freeplay
# TODO: Set these parameters
project_id="<YOUR-PROJECT-ID>"
base_api_url = "https://app.freeplay.ai/api"
# NOTE: found in the URL of prompt template edit page
# for example https://app.freeplay.ai/projects/<Project_ID>/templates/<Template_ID>/edit/<Version_ID>
template_version_id="<YOUR-TEMPLATE-VERSION-ID>"
fpClient = Freeplay(
freeplay_api_key=os.getenv("FREEPLAY_API_KEY"),
api_base=base_api_url #if self hosted, replace with your instance url
)
# TODO: Set your Provider
provider="openai"
# Load prompt content (simulating loading from file)
def load_prompt_content():
return [
{
"role": "system",
"content": "You talk like a pirate. You are a helpful LLM assistant. Who always responds with the user's name."
},
{
"role": "user",
"content": "Hi, my name is {{name}}."
}
]
# Format messages by replacing mustache variables with input values (can be handled using formatted prompts from Freeplay)
def format_messages(messages, variables):
formatted = []
for msg in messages:
content = msg["content"]
for key, value in variables.items():
content = content.replace("{{" + key + "}}", value)
formatted.append({"role": msg["role"], "content": content})
return formatted
# Define model and prompt variables
prompt_vars = {"name": "Jill"}
# Load and format messages with variables
messages = format_messages(load_prompt_content(), prompt_vars)
if provider == "openai":
model="gpt-4.1-mini-2025-04-14"
response = openai_client.chat.completions.create(
model=model,
messages=messages
)
# Add OpenAI response to all messages
all_messages = messages + [{"role": "assistant", "content": response.choices[0].message.content}]
elif provider == "anthropic":
# Select the system prompt
system_prompt = messages[0]["content"]
model="claude-sonnet-4-5-20250929"
response = anthropic_client.messages.create(
model=model,
max_tokens=1024,
system=system_prompt,
messages=[
{"role": "user", "content": messages[1]["content"]}
]
)
# Add Anthropic response to all messages
all_messages = messages + [{"role": "assistant", "content": response.content[0].text}]
# Record the completion data to Freeplay
fpClient.recordings.create(
RecordPayload(
project_id=project_id, # avaialble on the observability
call_info=CallInfo(provider=provider, model=model),
all_messages=all_messages,
inputs=prompt_vars,
prompt_version_info=PromptVersionInfo(
prompt_template_version_id=template_version_id,
environment="latest",
)
)
)import OpenAI from "openai";
import Anthropic from "@anthropic-ai/sdk";
import Freeplay from "freeplay";
const projectId = process.env.FREEPLAY_PROJECT_ID;
const baseUrl = "https://app.freeplay.ai/api";
const templateVersionId = "" // process.env.FREEPLAY_TEMPLATE_VERSION_ID;
const provider = "anthropic"; // "openai" | "anthropic"
const name = "Jill";
// Clients
const openai = new OpenAI({ apiKey: process.env.OPENAI_API_KEY });
const anthropic = new Anthropic({ apiKey: process.env.ANTHROPIC_API_KEY });
const fpClient = new Freeplay({
freeplayApiKey: process.env.FREEPLAY_API_KEY,
baseUrl,
});
// Load prompt content (simulating loading from file)
function loadPromptContent() {
return [
{
role: "system",
content:
"You talk like a pirate. You are a helpful LLM assistant. Who always responds with the user's name.",
},
{
role: "user",
content: "Hi, my name is {{name}}.",
},
];
}
// Replace mustache variables with input values
function formatMessages(messages, variables) {
return messages.map((msg) => {
let content = msg.content;
for (const [key, value] of Object.entries(variables)) {
content = content.replace(`{{${key}}}`, String(value));
}
return { role: msg.role, content };
});
}
// Define variables and format
const promptVars = { name };
const messages = formatMessages(loadPromptContent(), promptVars);
let model = "";
let allMessages = messages;
if (provider === "openai") {
model = "gpt-4.1-mini-2025-04-14";
const completion = await openai.chat.completions.create({
model,
messages,
});
const assistantText = completion.choices[0]?.message?.content ?? "";
allMessages = [...messages, { role: "assistant", content: assistantText }];
} else if (provider === "anthropic") {
const systemPrompt = messages[0].content;
const userPrompt = messages[1].content;
model = "claude-sonnet-4-5-20250929";
const completion = await anthropic.messages.create({
model,
max_tokens: 1024,
system: systemPrompt,
messages: [{ role: "user", content: userPrompt }],
});
const assistantText = completion.content?.[0]?.text ?? "";
allMessages = [...messages, { role: "assistant", content: assistantText }];
} else {
throw new Error(`Unsupported provider: ${provider}`);
}
// Record the completion data to Freeplay
const recordPayload = {
projectId,
allMessages,
inputs: promptVars,
callInfo: {
provider,
model,
},
};
// Include prompt version info if provided
if (templateVersionId) {
recordPayload.promptVersionInfo = {
promptTemplateVersionId: templateVersionId,
environment: "latest",
};
}
const recordResponse = await fpClient.recordings.create(recordPayload);
console.log("✓ Completion recorded to Freeplay. completionId:", recordResponse.completionId);#!/usr/bin/env bash
set -euo pipefail
# Load env from .env if present
if [[ -f .env ]]; then
# shellcheck disable=SC2046
export $(grep -v '^#' .env | xargs)
fi
# Config (env-first)
OPENAI_MODEL="gpt-4.1-mini"
OPENAI_API_KEY="${OPENAI_API_KEY}"
FREEPLAY_PROJECT_ID="${FREEPLAY_PROJECT_ID}"
FREEPLAY_API_KEY="${FREEPLAY_API_KEY}"
FREEPLAY_API_BASE="https://app.freeplay.ai/api"
###
# User prompt
###
NAME="Jill"
USER_PROMPT="Hi, my name is {{name}}."
USER_PROMPT_FORMATTED="${USER_PROMPT//\{\{name\}\}/$NAME}"
###
# 1) Call OpenAI
###
OPENAI_PAYLOAD="$(jq -n \
--arg model "$OPENAI_MODEL" \
--arg sys "You talk like a pirate. You are a helpful LLM assistant. Who always responds with the user's name." \
--arg prompt "$USER_PROMPT_FORMATTED" \
'{
model: $model,
messages: [
{role:"system", content: $sys},
{role:"user", content: $prompt}
]
}')"
OPENAI_RESPONSE_JSON="$(curl -sS https://api.openai.com/v1/chat/completions \
-H "Content-Type: application/json" \
-H "Authorization: Bearer ${OPENAI_API_KEY}" \
-d "$OPENAI_PAYLOAD")"
# Extract the assistant message text
ASSISTANT_MESSAGE="$(echo "$OPENAI_RESPONSE_JSON" | jq -r '.choices[0].message.content')"
echo "Assistant:"
echo "$ASSISTANT_MESSAGE"
###
# 2) Record to Freeplay
###
# Generate the UUID
FREEPLAY_SESSION_ID="$(uuidgen)"
FREEPLAY_URL="${FREEPLAY_API_BASE}/v2/projects/${FREEPLAY_PROJECT_ID}/sessions/${FREEPLAY_SESSION_ID}/completions"
FREEPLAY_REQUEST_BODY="$(jq -n \
--arg user "$USER_PROMPT_FORMATTED" \
--arg assistant "$ASSISTANT_MESSAGE" \
'{
messages: [
{role:"user", content:$user},
{role:"assistant", content:$assistant}
],
inputs: { question: "this is a test" }
}')"
FREEPLAY_RESPONSE="$(curl -sS --location "$FREEPLAY_URL" \
--header 'Content-Type: application/json' \
--header "Authorization: Bearer ${FREEPLAY_API_KEY}" \
--data "$FREEPLAY_REQUEST_BODY")"
echo "Recorded to Freeplay:"
echo "$FREEPLAY_RESPONSE" | jq .Additional Recording Functionality
You can also record tool calls, media, and group by traces to improve search, observability and reviews. Learn more in the Recording SDK or API documentation. You can also find additional examples in the common integration options guide.
Updated about 3 hours ago
