Track and analyze your IBM Watsonx.ai API calls with AgentOps
pip install agentops ibm-watsonx-ai
.env
export IBM_WATSONX_API_KEY="your_ibm_api_key_here" export IBM_WATSONX_URL="your_ibm_url_here" export IBM_WATSONX_PROJECT_ID="your_project_id_here" export AGENTOPS_API_KEY="your_agentops_api_key_here"
from dotenv import load_dotenv import os # Load environment variables from .env file load_dotenv() # Set up environment variables with fallback values os.environ["IBM_WATSONX_API_KEY"] = os.getenv("IBM_WATSONX_API_KEY") os.environ["IBM_WATSONX_URL"] = os.getenv("IBM_WATSONX_URL") os.environ["IBM_WATSONX_PROJECT_ID"] = os.getenv("IBM_WATSONX_PROJECT_ID") os.environ["AGENTOPS_API_KEY"] = os.getenv("AGENTOPS_API_KEY")
import agentops from ibm_watsonx_ai import Credentials from ibm_watsonx_ai.foundation_models import ModelInference # Initialize AgentOps agentops.init(api_key="") # Initialize credentials credentials = Credentials( url=os.getenv("IBM_WATSONX_URL"), api_key=os.getenv("IBM_WATSONX_API_KEY"), ) # Project ID project_id = os.getenv("IBM_WATSONX_PROJECT_ID") # Create a model instance model = ModelInference( model_id="meta-llama/llama-3-3-70b-instruct", credentials=credentials, project_id=project_id ) # Make a completion request response = model.generate_text("What is artificial intelligence?") print(f"Generated Text:\n{response}") # Don't forget to close connection when done model.close_persistent_connection()