Initialize AgentOps at the beginning of your application to automatically track all OpenAI API calls:
import agentopsfrom openai import OpenAI# Initialize AgentOps agentops.init(<INSERT YOUR API KEY HERE>)# Create OpenAI clientclient = OpenAI(api_key="<YOUR_OPENAI_API_KEY>")# Make API calls as usual - AgentOps will track them automaticallyresponse = client.chat.completions.create( model="gpt-4", messages=[{"role":"system","content":"You are a helpful assistant."},{"role":"user","content":"What is the capital of France?"}])print(response.choices[0].message.content)
import agentopsfrom openai import OpenAI# Initialize AgentOps agentops.init(<INSERT YOUR API KEY HERE>)# Create OpenAI client client = OpenAI()# Make a streaming API callstream = client.chat.completions.create( model="gpt-4", messages=[{"role":"system","content":"You are a helpful assistant."},{"role":"user","content":"Write a short poem about AI."}], stream=True)# Process the streaming responsefor chunk in stream:if chunk.choices[0].delta.content isnotNone:print(chunk.choices[0].delta.content, end="")
AgentOps tracks function-calling conversations with OpenAI models:
import agentopsimport jsonfrom openai import OpenAI# Initialize AgentOps agentops.init(<INSERT YOUR API KEY HERE>)# Create OpenAI client client = OpenAI()# Define functionstools =[{"type":"function","function":{"name":"get_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA",}},"required":["location"],},},}]# Function implementationdefget_weather(location):return json.dumps({"location": location,"temperature":"72","unit":"fahrenheit","forecast":["sunny","windy"]})# Make a function call API requestresponse = client.chat.completions.create( model="gpt-4", messages=[{"role":"system","content":"You are a helpful weather assistant."},{"role":"user","content":"What's the weather like in Boston?"}], tools=tools, tool_choice="auto")# Process responseresponse_message = response.choices[0].messagetool_calls = response_message.tool_callsif tool_calls:# Handle tool calls messages =[{"role":"system","content":"You are a helpful weather assistant."},{"role":"user","content":"What's the weather like in Boston?"}, response_message]# Process each tool callfor tool_call in tool_calls: function_name = tool_call.function.name function_args = json.loads(tool_call.function.arguments)if function_name =="get_weather": function_response = get_weather(function_args.get("location")) messages.append({"tool_call_id": tool_call.id,"role":"tool","name": function_name,"content": function_response,})# Get a new response from the model second_response = client.chat.completions.create( model="gpt-4", messages=messages,)print(second_response.choices[0].message.content)else:print(response_message.content)