Documentation Index
Fetch the complete documentation index at: https://docs.freeplay.ai/llms.txt
Use this file to discover all available pages before exploring further.
Manage conversation history for multi-turn chat applications with Freeplay.
Documentation Index
Fetch the complete documentation index at: https://docs.freeplay.ai/llms.txt
Use this file to discover all available pages before exploring further.
import json
import os
import time
from copy import deepcopy
from typing import Optional
import boto3
from anthropic import Anthropic, NotGiven
from openai import OpenAI
from freeplay import Freeplay, RecordPayload, CallInfo, SessionInfo, TraceInfo
fp_client = Freeplay(
freeplay_api_key=os.environ['FREEPLAY_API_KEY'],
api_base=f"{os.environ['FREEPLAY_API_URL']}/api"
)
project_id = os.environ['FREEPLAY_PROJECT_ID']
environment = 'dev'
anthropic_client = Anthropic(
api_key=os.environ.get("ANTHROPIC_API_KEY")
)
articles = [
"george washington was the first president of the united states",
"the sky is blue",
"the earth is round",
""
]
questions = [
"who was the first president of the united states?",
"what color is the sky?",
"what shape is the earth?",
"repeat the first question and answer"
]
input_pairs = list(zip(articles, questions))
template_prompt = fp_client.prompts.get(
project_id=project_id,
template_name='History-Basics',
environment=environment
)
def call_and_record(
project_id: str,
template_name: str,
env: str,
history: list,
input_variables: dict,
session_info: SessionInfo,
trace_info: Optional[TraceInfo] = None
) -> dict:
formatted_prompt = fp_client.prompts.get_formatted(
project_id=project_id,
template_name=template_name,
environment=env,
variables=input_variables,
history=history,
)
start = time.time()
completion = anthropic_client.messages.create(
system=formatted_prompt.system_content or NotGiven(),
messages=formatted_prompt.llm_prompt,
model=formatted_prompt.prompt_info.model,
**formatted_prompt.prompt_info.model_parameters
)
end = time.time()
llm_response = completion.content[0].text
print("Completion: %s" % llm_response)
assistant_response = {'role': 'assistant', 'content': llm_response}
all_messages = formatted_prompt.all_messages(new_message=assistant_response)
call_info = CallInfo.from_prompt_info(formatted_prompt.prompt_info, start, end)
record_response = fp_client.recordings.create(
RecordPayload(
all_messages=all_messages,
session_info=session_info,
inputs=input_variables,
prompt_info=formatted_prompt.prompt_info,
call_info=call_info,
trace_info=trace_info
)
)
return {'completion_id': record_response.completion_id,
'llm_response': assistant_response,
"all_messages": all_messages}
session = fp_client.sessions.create()
history = []
for inputs in input_pairs:
input_vars = {'question': inputs[1], 'article': inputs[0]}
record_response = call_and_record(
project_id=project_id,
template_name='History-QA',
env=environment,
history=history,
input_variables=input_vars,
session_info=session.session_info,
)
history = [msg for msg in record_response['all_messages'] if msg['role'] != 'system']