# iterate over each test case
for test_case in test_run.test_cases:
# format the prompt with the test case variables
formatted_prompt = template_prompt.bind(test_case.variables).format()
# make your llm call
s = time.time()
openaiClient = OpenAI(api_key=openai_key)
chat_response = openaiClient.chat.completions.create(
model=formatted_prompt.prompt_info.model,
messages=formatted_prompt.llm_prompt,
**formatted_prompt.prompt_info.model_parameters
)
e = time.time()
# append the results to the messages
all_messages = formatted_prompt.all_messages(
{'role': chat_response.choices[0].message.role,
'content': chat_response.choices[0].message.content}
)
call_info = CallInfo.from_prompt_info(formatted_prompt.prompt_info, start_time=s, end_time=e, usage=UsageTokens(chat_response.usage.prompt_tokens, chat_response.usage.completion_tokens))
# create a session which will create a UID
session = fpClient.sessions.create()
# build the record payload
payload = RecordPayload(
project_id=project_id,
all_messages=all_messages,
inputs=test_case.variables, # Variables from the test case are the inputs
session_info=session,
# IMPORTANT: link the record call to the test run and test case
test_run_info=test_run.get_test_run_info(test_case.id),
prompt_version_info=formatted_prompt.prompt_info, # log the prompt information
call_info=call_info,
response_info=ResponseInfo(
is_complete=chat_response.choices[0].finish_reason == 'stop'
)
)
# record the results to freeplay
fpClient.recordings.create(payload)