Lightweight observability (fast)

The fastest way to get started, but lacks important functionality

This integration method provides lightweight tracing of your AI applications. You may also use the UI to iterate on prompts, create offline evaluations, curate datasets and run tests. Note that until you integrate prompt templates via manage in code or freeplay prompt management, your prompts and variable inputs will not be connected to observability sessions and functionality will be limited.

Supported features

Unsupported features

  • Basic observability and search
  • Manual dataset curation
  • Prompt template editor
  • Evaluations
  • Test individual versions
  • Create datasets from observability records
  • Run inline evaluations targeting inputs and output
  • Sophisticated observability and search
    • (search prompts, prompt version, etc.)

📌 Key Benefits

  • No prompt migration needed: Keep your existing prompts in your codebase
  • Start logging immediately: Begin analyzing your LLM interactions right away
  • Automatic prompt interpretation: Ability to create Freeplay templates from logged messages
  • Gradual adoption: Add prompt management and testing capabilities when you're ready

Best For:

  • Teams wanting to start observing sessions immediately

Note: We recommend saving your prompts to Freeplay to allow strong linking between prompt template versions and observability sessions. Learn more.


Make Your LLM Call and pass the results to Freeplay

Continue using your existing LLM provider logic and prompts, collect all messages and record the call to Freeplay.

from openai import OpenAI
import os
from freeplay import Freeplay, RecordPayload, CallInfo

openai_client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))

project_name="<YOUR-PROJECT-ID>"

# Your existing prompt, including user message
messages = [
    {
        "role": "system",
        "content": "You are a helpful assistant that provides advice about evaluating LLMs."
    },
    {
        "role": "user",
        "content": "How can I evaluate the output structure of my LLM response?"
    }
]
    
model="gpt-4.1-mini-2025-04-14"
response = openai_client.chat.completions.create(
    messages=messages,
    model=model
)

# Append the response to your messages
# Note: the response should follow {"role": <role>, "content": <content>}
all_messages = messages + [{"role": "assistant", "content": response.choices[0].message.content}]
    

# create a freeplay client object
fpClient = Freeplay(
    freeplay_api_key=os.getenv("FREEPLAY_API_KEY"),
    api_base="https://app.freeplay.ai/api" #if self hosted, replace with your instance url
)

 # Record the completion data to Freeplay
fpClient.recordings.create(
  RecordPayload(
      project_id=project_id, # available on the observability
      call_info=CallInfo(provider="openai", model=model),
      all_messages=all_messages
    )
)
# !pip install freeplay anthropic

from anthropic import Anthropic
import os
from freeplay import Freeplay, RecordPayload, CallInfo

anthropic_client = Anthropic(api_key=os.getenv("ANTHROPIC_API_KEY"))

project_id="<YOUR-PROJECT-ID>"

# Your existing prompt, including user message
messages = [
    {
        "role": "system",
        "content": "You are a helpful assistant that provides advice about evaluating LLMs."
    },
    {
        "role": "user",
        "content": "How can I evaluate the output structure of my LLM response?"
    }
]

model = "claude-sonnet-4-5-20250929"

# Select the system prompt
system_prompt = messages[0]["content"]

response = anthropic_client.messages.create(
    model=model,
    max_tokens=1024,
    system=system_prompt,
    messages=[
        {"role": "user", "content": messages[1]["content"]}
    ]
)

print(vars(response))

# Prepare the messages to record to Freeplay
all_messages = messages + [
    {"role": response.role, "content": [msg.model_dump() for msg in response.content]}
]


# Record to Freepaly
# Create a Freeplay client object
fpClient = Freeplay(
    freeplay_api_key=os.getenv("FREEPLAY_API_KEY"),
    api_base="https://app.freeplay.ai/api"
)

# Record the completion data to Freeplay
fpClient.recordings.create(
    RecordPayload(
        project_id=project_id,
        call_info=CallInfo(provider="anthropic", model=model),
        all_messages=all_messages
    )
)

import Freeplay from "freeplay";
import OpenAI from "openai";

// Create OpenAI client
const openaiClient = new OpenAI({
    apiKey: process.env.OPENAI_API_KEY,
});

const projectId = "<YOUR-PROJECT-ID>";

// Your existing prompt and LLM call
const messages = [
    {
        role: "system",
        content: "You are a helpful assistant that provid advice about evaluating LLMs."
    },
    {
        role: "user",
        content: "How can I evaluate the output structure of my LLM response?"
    }
];

const model = "gpt-4o-mini";

const completion = await openaiClient.chat.completions.create({
    messages: messages,
    model: model
});

// Append the response to your messages
let allMessages = [...messages, completion.choices[0].message];

// Create a freeplay client object
const fpClient = new Freeplay({
    freeplayApiKey: process.env.FREEPLAY_API_KEY,
    baseUrl: "https://app.freeplay.ai/api" // if self hosted, replace with your instance url
});

// Record the completion data to Freeplay
await fpClient.recordings.create({
    projectId: projectId, // available on the observability
    callInfo: {
        provider: "openai",
        model: model
    },
    allMessages: allMessages
});


console.log("✓ Completion recorded to Freeplay successfully!");

import Freeplay from "freeplay";
import Anthropic from "@anthropic-ai/sdk";

// Create Anthropic client
const anthropicClient = new Anthropic({
    apiKey: process.env.ANTHROPIC_API_KEY,
});

// Create a Freeplay client object
const projectId = process.env.FREEPLAY_PROJECT_ID;
const fpClient = new Freeplay({
    freeplayApiKey: process.env.FREEPLAY_API_KEY,
    baseUrl: "https://app.freeplay.ai/api"
});

// Your existing prompt and LLM call
const messages = [
    {
        role: "system",
        content: "You are a helpful assistant that provide advice about evaluating LLMs."
    },
    {
        role: "user",
        content: "How can I evaluate the output structure of my LLM response?"
    }
];

// Anthropic Call
const systemPrompt = messages[0].content;
const model = "claude-sonnet-4-5-20250929";
const completion = await anthropicClient.messages.create({
    model,
    max_tokens: 1024,
    system: systemPrompt,
    messages: [
        {
            role: "user",
            content: messages[1].content
        }
    ]
});

// Append the response to your messages in OpenAI-style format
const allMessages = [
    ...messages,
    {
        role: "assistant",
        content: completion.content[0].text
    }
];

// Record the completion data to Freeplay
await fpClient.recordings.create({
    projectId,
    allMessages,
    callInfo: {
        provider: "anthropic",
        model
    },
});

console.log("✓ Completion recorded to Freeplay successfully!");
# OpenAI
OPENAI_MODEL="gpt-4.1-mini"    # or whatever model you want

# Freeplay
FREEPLAY_PROJECT_ID=""
FREEPLAY_SESSION_ID=""

# Optional metadata
PROMPT_TEMPLATE_VERSION_ID=""
ENVIRONMENT=""

###
# Safety checks
###

if [[ -z "${OPENAI_API_KEY:-}" ]]; then
  echo "ERROR: OPENAI_API_KEY not set" >&2
  exit 1
fi

if [[ -z "${FREEPLAY_API_KEY:-}" ]]; then
  echo "ERROR: FREEPLAY_API_KEY not set" >&2
  exit 1
fi

if ! command -v jq >/dev/null 2>&1; then
  echo "ERROR: jq is required (brew install jq or apt-get install jq)" >&2
  exit 1
fi

###
# Input prompt
###

USER_PROMPT="${1:-}"

if [[ -z "$USER_PROMPT" ]]; then
  USER_PROMPT="Generate a two word album name in the style of Taylor Swift"
fi

###
# 1) Call OpenAI
###

OPENAI_RESPONSE_JSON="$(curl -sS https://api.openai.com/v1/chat/completions \
  -H "Content-Type: application/json" \
  -H "Authorization: Bearer ${OPENAI_API_KEY}" \
  -d @- <<EOF
{
  "model": "${OPENAI_MODEL}",
  "messages": [
    {
      "role": "user",
      "content": "${USER_PROMPT}"
    }
  ]
}
EOF
)"

echo ">>> Raw OpenAI response:"
echo "$OPENAI_RESPONSE_JSON"
echo

# Extract the assistant message text
ASSISTANT_MESSAGE="$(echo "$OPENAI_RESPONSE_JSON" | jq -r '.choices[0].message.content')"

echo ">>> Parsed assistant message:"
echo "$ASSISTANT_MESSAGE"
echo

###
# 2) Record to Freeplay
###
# Generate the UUID
FREEPLAY_SESSION_ID="${FREEPLAY_SESSION_ID:-$(uuidgen)}"

FREEPLAY_URL="https://app.freeplay.ai/api/v2/projects/${FREEPLAY_PROJECT_ID}/sessions/${FREEPLAY_SESSION_ID}/completions"

echo ">>> Sending to Freeplay: ${FREEPLAY_URL}"
echo

FREEPLAY_REQUEST_BODY="$(cat <<EOF
{
  "messages": [
    {
      "role": "user",
      "content": "${USER_PROMPT}"
    },
    {
      "role": "assistant",
      "content": ${ASSISTANT_MESSAGE@Q}
    }
  ],
  "inputs": {
    "question": "this is a test"
  },
  "prompt_info": {
    "prompt_template_version_id": "${PROMPT_TEMPLATE_VERSION_ID}",
    "environment": "${ENVIRONMENT}"
  },
  "test_run_info": {
    "test_run_id": "${TEST_RUN_ID}",
    "test_case_id": "${TEST_CASE_ID}"
  }
}
EOF
)"

FREEPLAY_RESPONSE="$(echo "$FREEPLAY_REQUEST_BODY" | curl -sS --location "$FREEPLAY_URL" \
  --header 'Content-Type: application/json' \
  --header "Authorization: Bearer ${FREEPLAY_API_KEY}" \
  --data @- )"

echo ">>> Freeplay response:"
echo "$FREEPLAY_RESPONSE"
echo

That's it! You're now logging data to Freeplay 🙌. Go to the Observability tab in Freeplay to view your recorded data. Learn more about searching and reviewing your application in getting started with observability.


Better: Log with Variables

💡

Freeplay accepts the following types for inputs: Dict[str, Union[str, int, bool, float, Dict[str, Any], List[Any]]]

Pass variables to the RecordPayload to support migration to Freeplay-hosted prompts. Freeplay auto-interprets these and adds them directly to your prompt template, making for a smooth transition.

# seperate variables to enable dataset generation in Freeplay
prompt_vars = {"name": "Jill", "topic": "evaluating llm models"}

messages = [
    {"role": "system", "content": "You are a helpful teacher."},
    {"role": "user", "content": f"Hi {prompt_vars['name']}, explain {prompt_vars['topic']} to me."}
]

# After getting completion...
fpClient.recordings.create(
    RecordPayload(
        project_id=project_id,
        all_messages=all_messages,
        inputs=prompt_vars,  # Send variables
    )
)
const promptVars = { name: "Jill", topic: "evaluating llm models" };

allMessages = [
    { role: "system", content: "You are a helpful teacher." },
    { role: "user", content: `Hi ${promptVars.name}, explain ${promptVars.topic} to me.` }
];

// Record the completion data to Freeplay
await fpClient.recordings.create({
    projectId: projectId, // available on the observability
    callInfo: {
        provider: "openai",
        model: model,
    },
    allMessages: allMessages,
    inputs: promptVars,
});


console.log("✓ Completion recorded to Freeplay successfully!");
curl --location 'https://app.freeplay.ai/api/v2/projects/<PROJECT_ID>/sessions/<SESSION_ID>/completions' \
--header 'Content-Type: application/json' \
--header 'Authorization: Bearer FREEPLAY_API_KEY' \
--data '{
  "messages": [
    {
      "content": "Hi ${promptVars.name}, explain ${promptVars.topic} to me.",
      "role": "user"
    },
    {
      "role": "assistant",
      "content": "You are a helpful teacher."
    }
  ],
  "inputs": {
    "name": "Jill", "topic": "evaluating llm models"
  },
}
'

Additional Recording Functionality

You can also record tool calls, media, and group by traces to improve search, observability and reviews. Learn more in the Recording SDK or API documentation. You can also find additional examples in the common integration options guide.