AI Agent Development with Function Calling (Tool Use)
Function Calling (Tool Use) is a mechanism where LLM generates a structured function call instead of a text response. The model analyzes the request, determines the needed tool and its parameters, and the host application executes the call and returns the result. This makes the agent capable of interacting with any external system.
OpenAI Function Calling
from openai import OpenAI
import json
client = OpenAI()
# Tool schema
tools = [
{
"type": "function",
"function": {
"name": "get_customer_info",
"description": "Get customer information by ID or email",
"parameters": {
"type": "object",
"properties": {
"customer_id": {"type": "string"},
"email": {"type": "string"},
"fields": {
"type": "array",
"items": {"type": "string"},
"description": "Needed fields: name, orders, balance, status"
}
},
}
}
},
{
"type": "function",
"function": {
"name": "create_support_ticket",
"description": "Create support ticket",
"parameters": {
"type": "object",
"properties": {
"customer_id": {"type": "string"},
"category": {"type": "string", "enum": ["billing", "technical", "account", "shipping"]},
"priority": {"type": "string", "enum": ["low", "medium", "high", "critical"]},
"description": {"type": "string"},
},
"required": ["customer_id", "category", "description"]
}
}
},
]
# Function registry
def get_customer_info(customer_id=None, email=None, fields=None) -> dict:
# Real implementation: CRM/DB query
return {"id": customer_id, "name": "John Smith", "balance": 15000, "status": "active"}
def create_support_ticket(customer_id: str, category: str, description: str, priority: str = "medium") -> dict:
# Real implementation: Jira/Zendesk API
return {"ticket_id": "TKT-12345", "status": "created"}
FUNCTION_MAP = {
"get_customer_info": get_customer_info,
"create_support_ticket": create_support_ticket,
}
# Agent loop with Function Calling
def run_support_agent(user_message: str) -> str:
messages = [
{"role": "system", "content": "You are a support agent. Use tools to help customers."},
{"role": "user", "content": user_message},
]
while True:
response = client.chat.completions.create(
model="gpt-4o",
messages=messages,
tools=tools,
tool_choice="auto",
parallel_tool_calls=True, # Parallel calls
)
message = response.choices[0].message
messages.append(message)
if not message.tool_calls:
return message.content
# Execute all calls (parallel if multiple)
for tool_call in message.tool_calls:
func_name = tool_call.function.name
func_args = json.loads(tool_call.function.arguments)
func = FUNCTION_MAP.get(func_name)
if func:
result = func(**func_args)
else:
result = {"error": f"Function {func_name} not found"}
messages.append({
"role": "tool",
"tool_call_id": tool_call.id,
"content": json.dumps(result, ensure_ascii=False),
})
Parallel Tool Calls
GPT-4o and Claude 3+ support parallel tool invocation:
# Example: agent can request data from multiple sources simultaneously
response = client.chat.completions.create(
model="gpt-4o",
messages=[{"role": "user", "content": "Show orders and balance for customer #12345"}],
tools=tools,
tool_choice="auto",
parallel_tool_calls=True,
)
# Model generates two parallel calls:
# get_orders(customer_id="12345") AND get_balance(customer_id="12345")
Anthropic Tool Use
import anthropic
client = anthropic.Anthropic()
tools = [
{
"name": "search_knowledge_base",
"description": "Search corporate knowledge base",
"input_schema": {
"type": "object",
"properties": {
"query": {"type": "string"},
"category": {"type": "string"},
},
"required": ["query"],
}
}
]
def run_claude_agent(user_message: str) -> str:
messages = [{"role": "user", "content": user_message}]
while True:
response = client.messages.create(
model="claude-opus-4-5",
max_tokens=4096,
tools=tools,
messages=messages,
)
if response.stop_reason == "end_turn":
# Final text response
return next(b.text for b in response.content if hasattr(b, "text"))
# Process tool_use blocks
messages.append({"role": "assistant", "content": response.content})
tool_results = []
for block in response.content:
if block.type == "tool_use":
result = execute_tool(block.name, block.input)
tool_results.append({
"type": "tool_result",
"tool_use_id": block.id,
"content": json.dumps(result),
})
messages.append({"role": "user", "content": tool_results})
Practical Case: Agent for HR Requests
Tools:
-
get_employee_info(employee_id)— employee data -
check_vacation_balance(employee_id)— vacation days balance -
submit_vacation_request(employee_id, start_date, end_date)— vacation request -
get_company_policy(policy_name)— search HR policy database
Request: "I want to take vacation from April 15 to April 25. Do I have enough days?"
Agent trajectory:
-
get_employee_info(employee_id="emp_789")— get ID from session context -
check_vacation_balance(employee_id="emp_789")— balance: 14 days -
get_company_policy("vacation_approval")— read approval rules - Final answer: "You have 14 vacation days. Period April 15–25 is 11 business days (accounting for holidays). You have sufficient balance. To proceed — submit_vacation_request. Your manager must approve within 3 business days per policy."
Metrics first month:
- Autonomous processing (no operator): 84%
- Balance/policy information accuracy: 97%
- Average response time: 4.2s
Tool Call Validation
from pydantic import BaseModel, validator
from datetime import date
class VacationRequest(BaseModel):
employee_id: str
start_date: date
end_date: date
@validator("end_date")
def end_after_start(cls, v, values):
if "start_date" in values and v <= values["start_date"]:
raise ValueError("end_date must be after start_date")
return v
# Validate before execution
def safe_tool_execution(func_name: str, func_args: dict) -> dict:
try:
if func_name == "submit_vacation_request":
VacationRequest(**func_args) # Validate
return FUNCTION_MAP[func_name](**func_args)
except Exception as e:
return {"error": str(e), "status": "validation_failed"}
Timeline
- Agent with 3–7 tools: 2–4 weeks
- Integration with corporate systems: 2–4 weeks
- Testing and monitoring: 1–2 weeks
- Total: 5–10 weeks







