How to add a human-in-the-loop for tools
There are certain tools that we don't trust a model to execute on its own. One thing we can do in such situations is require human approval before the tool is invoked.
This how-to guide shows a simple way to add human-in-the-loop for code running in a jupyter notebook or in a terminal.
To build a production application, you will need to do more work to keep track of application state appropriately.
We recommend using langgraph
for powering such a capability. For more details, please see this guide.
Setupβ
We'll need to install the following packages:
%pip install --upgrade --quiet langchain
And set these environment variables:
import getpass
import os
# If you'd like to use LangSmith, uncomment the below:
# os.environ["LANGCHAIN_TRACING_V2"] = "true"
# os.environ["LANGCHAIN_API_KEY"] = getpass.getpass()
Chainβ
Let's create a few simple (dummy) tools and a tool-calling chain:
- OpenAI
- Anthropic
- Azure
- Cohere
- NVIDIA
- FireworksAI
- Groq
- MistralAI
- TogetherAI
- AWS
pip install -qU langchain-openai
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass()
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(model="gpt-4o-mini")
pip install -qU langchain-anthropic
import getpass
import os
os.environ["ANTHROPIC_API_KEY"] = getpass.getpass()
from langchain_anthropic import ChatAnthropic
llm = ChatAnthropic(model="claude-3-5-sonnet-20240620")
pip install -qU langchain-openai
import getpass
import os
os.environ["AZURE_OPENAI_API_KEY"] = getpass.getpass()
from langchain_openai import AzureChatOpenAI
llm = AzureChatOpenAI(
azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"],
azure_deployment=os.environ["AZURE_OPENAI_DEPLOYMENT_NAME"],
openai_api_version=os.environ["AZURE_OPENAI_API_VERSION"],
)
pip install -qU langchain-google-vertexai
import getpass
import os
os.environ["GOOGLE_API_KEY"] = getpass.getpass()
from langchain_google_vertexai import ChatVertexAI
llm = ChatVertexAI(model="gemini-1.5-flash")
pip install -qU langchain-cohere
import getpass
import os
os.environ["COHERE_API_KEY"] = getpass.getpass()
from langchain_cohere import ChatCohere
llm = ChatCohere(model="command-r-plus")
pip install -qU langchain-nvidia-ai-endpoints
import getpass
import os
os.environ["NVIDIA_API_KEY"] = getpass.getpass()
from langchain_nvidia_ai_endpoints import ChatNVIDIA
llm = ChatNVIDIA(model="meta/llama3-70b-instruct")
pip install -qU langchain-fireworks
import getpass
import os
os.environ["FIREWORKS_API_KEY"] = getpass.getpass()
from langchain_fireworks import ChatFireworks
llm = ChatFireworks(model="accounts/fireworks/models/llama-v3p1-70b-instruct")
pip install -qU langchain-groq
import getpass
import os
os.environ["GROQ_API_KEY"] = getpass.getpass()
from langchain_groq import ChatGroq
llm = ChatGroq(model="llama3-8b-8192")
pip install -qU langchain-mistralai
import getpass
import os
os.environ["MISTRAL_API_KEY"] = getpass.getpass()
from langchain_mistralai import ChatMistralAI
llm = ChatMistralAI(model="mistral-large-latest")
pip install -qU langchain-openai
import getpass
import os
os.environ["TOGETHER_API_KEY"] = getpass.getpass()
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(
base_url="https://api.together.xyz/v1",
api_key=os.environ["TOGETHER_API_KEY"],
model="mistralai/Mixtral-8x7B-Instruct-v0.1",
)
pip install -qU langchain-aws
# Ensure your AWS credentials are configured
from langchain_aws import ChatBedrock
llm = ChatBedrock(model_id="anthropic.claude-3-5-sonnet-20240620-v1:0")
from typing import Dict, List
from langchain_core.messages import AIMessage
from langchain_core.runnables import Runnable, RunnablePassthrough
from langchain_core.tools import tool
@tool
def count_emails(last_n_days: int) -> int:
"""Dummy function to count number of e-mails. Returns 2 * last_n_days."""
return last_n_days * 2
@tool
def send_email(message: str, recipient: str) -> str:
"""Dummy function for sending an e-mail."""
return f"Successfully sent email to {recipient}."
tools = [count_emails, send_email]
llm_with_tools = llm.bind_tools(tools)
def call_tools(msg: AIMessage) -> List[Dict]:
"""Simple sequential tool calling helper."""
tool_map = {tool.name: tool for tool in tools}
tool_calls = msg.tool_calls.copy()
for tool_call in tool_calls:
tool_call["output"] = tool_map[tool_call["name"]].invoke(tool_call["args"])
return tool_calls
chain = llm_with_tools | call_tools
chain.invoke("how many emails did i get in the last 5 days?")
[{'name': 'count_emails',
'args': {'last_n_days': 5},
'id': 'toolu_01QYZdJ4yPiqsdeENWHqioFW',
'output': 10}]
Adding human approvalβ
Let's add a step in the chain that will ask a person to approve or reject the tall call request.
On rejection, the step will raise an exception which will stop execution of the rest of the chain.
import json
class NotApproved(Exception):
"""Custom exception."""
def human_approval(msg: AIMessage) -> AIMessage:
"""Responsible for passing through its input or raising an exception.
Args:
msg: output from the chat model
Returns:
msg: original output from the msg
"""
tool_strs = "\n\n".join(
json.dumps(tool_call, indent=2) for tool_call in msg.tool_calls
)
input_msg = (
f"Do you approve of the following tool invocations\n\n{tool_strs}\n\n"
"Anything except 'Y'/'Yes' (case-insensitive) will be treated as a no.\n >>>"
)
resp = input(input_msg)
if resp.lower() not in ("yes", "y"):
raise NotApproved(f"Tool invocations not approved:\n\n{tool_strs}")
return msg
chain = llm_with_tools | human_approval | call_tools
chain.invoke("how many emails did i get in the last 5 days?")
Do you approve of the following tool invocations
{
"name": "count_emails",
"args": {
"last_n_days": 5
},
"id": "toolu_01WbD8XeMoQaRFtsZezfsHor"
}
Anything except 'Y'/'Yes' (case-insensitive) will be treated as a no.
>>> yes
[{'name': 'count_emails',
'args': {'last_n_days': 5},
'id': 'toolu_01WbD8XeMoQaRFtsZezfsHor',
'output': 10}]
try:
chain.invoke("Send [email protected] an email saying 'What's up homie'")
except NotApproved as e:
print()
print(e)
Do you approve of the following tool invocations
{
"name": "send_email",
"args": {
"recipient": "[email protected]",
"message": "What's up homie"
},
"id": "toolu_014XccHFzBiVcc9GV1harV9U"
}
Anything except 'Y'/'Yes' (case-insensitive) will be treated as a no.
>>> no
``````output
Tool invocations not approved:
{
"name": "send_email",
"args": {
"recipient": "[email protected]",
"message": "What's up homie"
},
"id": "toolu_014XccHFzBiVcc9GV1harV9U"
}