Skip to main content

1. Setup clients

Initialize Freeplay and Google GenAI client SDKs.

2. Manage chat history

Maintain a running messages list across turns. Pass previous messages as history when fetching the formatted prompt so the model has full conversation context.

3. Fetch prompt from Freeplay

Pull in the formatted prompt with Freeplay. The prompt contains the system instruction and model parameters.

4. Call Google GenAI with the tools

When creating a new completion, pass in the tools configuration and formatted prompt contents.

5. Handle tool call

When the model responds with a function call, execute the external function in your service. As an example here, we are calling get_current_temperature function. Append both the function call and the function response to the messages list to maintain history.

6. Record to Freeplay

Pass in the completion response and messages to capture the tool call, its result, and the full conversation history.

Examples

import os
import time
from google.genai import types
from google import genai
from freeplay import Freeplay, RecordPayload, PromptInfo, SessionInfo, CallInfo

client = genai.Client(api_key=os.getenv("GOOGLE_API_KEY"))
fp_client = Freeplay(
    freeplay_api_key=os.getenv("FREEPLAY_API_KEY"),
    api_base="https://app.freeplay.ai/api",
)

prompt_template_name = "genai_tools"
project_id = os.getenv("FREEPLAY_PROJECT_ID")
EXIT_WORDS = {"exit", "quit", "bye", "goodbye"}

weather_function = {
    "name": "get_current_temperature",
    "description": "Gets the current temperature for a given location.",
    "parameters": {
        "type": "object",
        "properties": {
            "location": {
                "type": "string",
                "description": "The city name, e.g. San Francisco",
            },
        },
        "required": ["location"],
    },
}

tools = types.Tool(function_declarations=[weather_function])
messages = []


def execute_tool(name: str, args: dict) -> dict:
    if name == "get_current_temperature":
        return {
            "location": args.get("location", "unknown"),
            "temperature_f": 72,
            "condition": "sunny",
        }
    return {"error": "unknown function"}


def record_response(
    inputs: dict = {},
    prompt_info: PromptInfo = None,
    parent_id: str = None,
    session_info: SessionInfo = None,
    messages: list = None,
    call_info: CallInfo = None,
):
    return fp_client.recordings.create(
        RecordPayload(
            project_id=project_id,
            all_messages=messages,
            inputs=inputs or {},
            prompt_version_info=prompt_info,
            parent_id=parent_id,
            session_info=session_info,
            call_info=call_info,
        )
    )


print("Chat started. Type 'exit' or 'quit' to leave.\n")
session = fp_client.sessions.create()

while True:
    try:
        question = input("You: ").strip()
    except (EOFError, KeyboardInterrupt):
        print("\nExiting.")
        break

    if not question:
        continue
    if question.lower() in EXIT_WORDS:
        print("Goodbye!")
        break

    trace = session.create_trace(input=question, agent_name="Gemini Tools")

    formatted_prompt = fp_client.prompts.get_formatted(
        project_id=project_id,
        template_name=prompt_template_name,
        environment="latest",
        variables={"user_question": question},
        history=messages,
    )
    messages.append({"role": "user", "parts": [{"text": question}]})

    contents = list(formatted_prompt.llm_prompt)
    config = types.GenerateContentConfig(
        system_instruction=formatted_prompt.system_content or "",
        tools=[tools],
        **formatted_prompt.prompt_info.model_parameters,
    )

    start = time.time()
    response = client.models.generate_content(
        model=formatted_prompt.prompt_info.model, contents=contents, config=config
    )
    end = time.time()

    while response.candidates[0].content.parts[0].function_call:
        function_call = response.candidates[0].content.parts[0].function_call
        fc_args = dict(function_call.args)
        print(f"\n[Tool call] {function_call.name}({fc_args})")

        # Add the function call to the messages
        messages.append(
            {
                "role": "model",
                "parts": [
                    {"functionCall": {"name": function_call.name, "args": fc_args}}
                ],
            }
        )
        # Record the function call
        record_response(
            inputs=fc_args,
            prompt_info=formatted_prompt.prompt_info,
            parent_id=trace.trace_id,
            session_info=session,
            messages=messages,
            call_info=CallInfo.from_prompt_info(
                formatted_prompt.prompt_info, start, end
            ),
        )

        # Execute the tool
        result = execute_tool(function_call.name, fc_args)
        print(f"[Tool result] {result}")

        # Add the function response to the messages
        messages.append(
            {
                "role": "user",
                "parts": [
                    {
                        "functionResponse": {
                            "name": function_call.name,
                            "response": result,
                        }
                    }
                ],
            }
        )

        contents.append(response.candidates[0].content)
        contents.append(
            types.Content(
                parts=[
                    types.Part.from_function_response(
                        name=function_call.name, response=result
                    )
                ],
                role="user",
            )
        )

        start = time.time()
        response = client.models.generate_content(
            model=formatted_prompt.prompt_info.model, contents=contents, config=config
        )
        end = time.time()

    assistant_text = response.text
    print(f"\nAssistant: {assistant_text}\n")

    messages.append({"role": "model", "parts": [{"text": assistant_text}]})
    record_response(
        inputs={"question": question},
        prompt_info=formatted_prompt.prompt_info,
        parent_id=trace.trace_id,
        session_info=session.session_info,
        messages=messages,
        call_info=CallInfo.from_prompt_info(formatted_prompt.prompt_info, start, end),
    )

    trace.record_output(project_id=project_id, output=assistant_text)

    if any(word in assistant_text.lower() for word in EXIT_WORDS):
        print("The assistant ended the conversation.")
        break