Track and analyze your IBM Watsonx.ai API calls with AgentOps
AgentOps provides seamless integration with IBM Watsonx.ai Python SDK, allowing you to track and analyze all your Watsonx.ai model interactions automatically.
Initialize AgentOps at the beginning of your application to automatically track all IBM Watsonx.ai API calls:
import agentopsfrom ibm_watsonx_ai import Credentialsfrom ibm_watsonx_ai.foundation_models import ModelInference# Initialize AgentOpsagentops.init(api_key="<INSERT YOUR API KEY HERE>")# Initialize credentialscredentials = Credentials( url="<YOUR_IBM_URL>", api_key="<YOUR_IBM_API_KEY>",)# Project IDproject_id ="<YOUR_PROJECT_ID>"# Create a model instancemodel = ModelInference( model_id="meta-llama/llama-3-3-70b-instruct", credentials=credentials, project_id=project_id)# Make a completion request - AgentOps will track it automaticallyresponse = model.generate_text("What is artificial intelligence?")print(f"Generated Text:\n{response}")# Don't forget to close connection when donemodel.close_persistent_connection()
Using the Watsonx.ai SDK for chat-based interactions:
import agentopsfrom ibm_watsonx_ai import Credentialsfrom ibm_watsonx_ai.foundation_models import ModelInference# Initialize AgentOpsagentops.init(api_key="<INSERT YOUR API KEY HERE>")# Initialize credentialscredentials = Credentials( url="<YOUR_IBM_URL>", api_key="<YOUR_IBM_API_KEY>",)# Project IDproject_id ="<YOUR_PROJECT_ID>"# Create model for chatchat_model = ModelInference( model_id="meta-llama/llama-3-3-70b-instruct", credentials=credentials, project_id=project_id)# Format messages for chat methodmessages =[{"role":"system","content":"You are a helpful AI assistant."},{"role":"user","content":"What are the three laws of robotics?"}]# Get chat responsechat_response = chat_model.chat(messages)print(f"Chat Response:\n{chat_response['choices'][0]['message']['content']}")# Close connectionchat_model.close_persistent_connection()
import agentopsfrom ibm_watsonx_ai import Credentialsfrom ibm_watsonx_ai.foundation_models import ModelInference# Initialize AgentOpsagentops.init(api_key="<INSERT YOUR API KEY HERE>")# Initialize credentialscredentials = Credentials( url="<YOUR_IBM_URL>", api_key="<YOUR_IBM_API_KEY>",)project_id ="<YOUR_PROJECT_ID>"# Create model for streamingmodel = ModelInference( model_id="google/flan-ul2", credentials=credentials, project_id=project_id)# Text streamingstream_response = model.generate_text_stream( prompt="Write a short poem about artificial intelligence.")print("Streaming Response:")for chunk in stream_response:ifisinstance(chunk,str):print(chunk, end="", flush=True)# Close connectionmodel.close_persistent_connection()
import agentopsfrom ibm_watsonx_ai import Credentialsfrom ibm_watsonx_ai.foundation_models import ModelInference# Initialize AgentOpsagentops.init(api_key="<INSERT YOUR API KEY HERE>")# Initialize credentialscredentials = Credentials( url="<YOUR_IBM_URL>", api_key="<YOUR_IBM_API_KEY>",)project_id ="<YOUR_PROJECT_ID>"# Create model for chat streamingchat_model = ModelInference( model_id="meta-llama/llama-3-3-70b-instruct", credentials=credentials, project_id=project_id)# Format messages for chat methodchat_messages =[{"role":"system","content":"You are a concise assistant."},{"role":"user","content":"Explain the concept of photosynthesis in one sentence."}]# Get streaming chat responsechat_stream = chat_model.chat_stream(messages=chat_messages)print("Chat Stream Response:")for chunk in chat_stream:if chunk and'choices'in chunk and chunk['choices']: delta = chunk['choices'][0].get('delta',{}) content = delta.get('content')if content:print(content, end="", flush=True)# Close connectionchat_model.close_persistent_connection()