I’m working on building an AI system that needs to route between different workflows based on user input. The system should decide if a user wants basic information lookup or needs complex processing through multiple stages. I’m encountering a TypeError when I attempt to access the AI response. Here’s my current setup:
import os
from dotenv import load_dotenv
from langchain_openai import ChatOpenAI
from langgraph import StateGraph
from langchain.schema import HumanMessage, AIMessage, SystemMessage
load_dotenv()
class WorkflowState(TypedDict):
conversations: Annotated[list, add_messages]
# Setup LLM
client = ChatOpenAI(
api_key=os.getenv("OPENAI_API_KEY"),
model="gpt-3.5-turbo",
temperature=0.1,
max_tokens=200
)
def build_agent_node(state, prompt_text):
user_msgs = [m for m in state["conversations"] if isinstance(m, HumanMessage)]
bot_msgs = [m for m in state["conversations"] if isinstance(m, AIMessage)]
sys_msg = [SystemMessage(content=prompt_text)]
all_msgs = sys_msg + user_msgs + bot_msgs
response = client.invoke(all_msgs)
return {"conversations": [response]}
def load_data_from_file():
data_store = {}
try:
with open('tech_data.csv', 'r') as file:
# Process file content
pass
except FileNotFoundError:
print("Data file missing")
return data_store
def routing_decision(state):
latest_query = [m for m in state["conversations"] if isinstance(m, HumanMessage)][-1].content
decision_prompt = """
Analyze the user request and classify it as either:
- 'info_lookup' for simple factual questions
- 'complex_task' for development or multi-step processes
Return only the classification.
"""
result = client.invoke([SystemMessage(content=decision_prompt)] + [HumanMessage(content=latest_query)])
# This line causes the error
choice = result["conversations"][0].lower()
if "info_lookup" in choice:
return "simple_lookup"
else:
return "task_processor"
# Define processing nodes
processor_a = lambda s: build_agent_node(s, "You handle initial analysis...")
processor_b = lambda s: build_agent_node(s, "You design solutions...")
processor_c = lambda s: build_agent_node(s, "You implement code...")
# Build workflow
workflow = StateGraph(WorkflowState)
workflow.add_node("task_processor", processor_a)
workflow.add_node("solution_designer", processor_b)
workflow.add_node("code_writer", processor_c)
workflow.add_node("simple_lookup", simple_info_node)
workflow.add_conditional_edges(START, routing_decision)
The error occurs when I attempt to access the response as if it were a dictionary. How can I properly extract the content from the AI response object? I need to obtain the text content to ensure the routing decisions work correctly.