1. Instantiate Clients
Instantiate Clients for Freeplay and your LLM Provider2. Fetch Prompt Template
Fetch prompt template from Freeplay3. Format Prompt
format prompt including input variables and history4. Call LLM
Call your LLM provider, history will be merged into your prompt and ready to pass through5. Record Interaction
Record the interaction to Freeplay6. Manage history
Determine what to include in conversation history over each turn. Append the select messages to an arrayExamples
Copy
import json
import os
import time
from copy import deepcopy
from typing import Optional
import boto3
from anthropic import Anthropic, NotGiven
from openai import OpenAI
from freeplay import Freeplay, RecordPayload, ResponseInfo, CallInfo, SessionInfo, TraceInfo
fpclient = Freeplay(
freeplay_api_key=os.environ['FREEPLAY_API_KEY'],
api_base=f"{os.environ['FREEPLAY_API_URL']}/api"
)
project_id = os.environ['FREEPLAY_PROJECT_ID']
enviroment = 'dev'
anthropic_client = Anthropic(
api_key=os.environ.get("ANTHROPIC_API_KEY")
)
articles = [
"george washington was the first president of the united states",
"the sky is blue",
"the earth is round",
""
]
questions = [
"who was the first president of the united states?",
"what color is the sky?",
"what shape is the earth?",
"repeat the first question and answer"
]
input_pairs = list(zip(articles, questions))
template_prompt = fpclient.prompts.get(
project_id=project_id,
template_name='History-Basics',
environment=enviroment
)
def call_and_record(
project_id: str,
template_name: str,
env: str,
history: list,
input_variables: dict,
session_info: SessionInfo,
trace_info: Optional[TraceInfo] = None
) -> dict:
formatted_prompt = fpclient.prompts.get_formatted(
project_id=project_id,
template_name=template_name,
environment=env,
variables=input_variables,
history=history,
)
start = time.time()
completion = anthropic_client.messages.create(
system=formatted_prompt.system_content or NotGiven(),
messages=formatted_prompt.llm_prompt,
model=formatted_prompt.prompt_info.model,
**formatted_prompt.prompt_info.model_parameters
)
end = time.time()
llm_response = completion.content[0].text
print("Completion: %s" % llm_response)
assistant_response = {'role': 'assistant', 'content': llm_response}
all_messages = formatted_prompt.all_messages(new_message=assistant_response)
call_info = CallInfo.from_prompt_info(formatted_prompt.prompt_info, start, end)
response_info = ResponseInfo(
is_complete=True,
)
record_response = fpclient.recordings.create(
RecordPayload(
all_messages=all_messages,
session_info=session_info,
inputs=input_variables,
prompt_info=formatted_prompt.prompt_info,
call_info=call_info,
response_info=response_info,
trace_info=trace_info,
)
)
return {'completion_id': record_response.completion_id,
'llm_response': assistant_response,
"all_messages": all_messages}
session = fpclient.sessions.create()
history = []
for inputs in input_pairs:
input_vars = {'question': inputs[1], 'article': inputs[0]}
record_response = call_and_record(
project_id=project_id,
template_name='History-QA',
env=enviroment,
history=history,
input_variables=input_vars,
session_info=session.session_info,
)
history = [msg for msg in record_response['all_messages'] if msg['role'] != 'system']

