Issue with Agent Communication in LangGraph
I’m working on a multi-agent system using LangGraph where two agents need to talk to each other. I made a simple example with a questioner and answerer setup. The questioner works fine and asks questions, but the answerer never gives any response back, even with clear system prompts.
Here’s my implementation:
from langchain_ollama import ChatOllama
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.messages import HumanMessage
from langgraph.graph.message import AnyMessage, add_messages
from langgraph.graph import END, START, StateGraph
from typing import TypedDict, Annotated
import functools
# Initialize the model
model = ChatOllama(
model="llama3.1:70b",
temperature=0,
)
# Agent creation function
def build_agent(model, prompt_text: str):
template = ChatPromptTemplate.from_messages(
[
("system",
"""
{prompt_text}
When the conversation ends, add END_CONVERSATION to your response.
Sample Dialog:
Questioner: Hi there! Could you tell me your name and age?
Answerer: Hello! My name is Sarah and I'm 28 years old.
Questioner: What do you do for work, Sarah?
Answerer: I work as a teacher at a local elementary school.
...
""",
),
MessagesPlaceholder(variable_name="messages"),
]
)
template = template.partial(prompt_text=prompt_text)
return template | model
# Create the questioner
questioner = build_agent(
model,
prompt_text="""You conduct interviews about people's careers and hobbies. Ask thoughtful questions that require detailed answers.
Be friendly and professional. Keep questions clear and focused.""",
)
# Create the answerer
answerer = build_agent(
model,
prompt_text="""You are being interviewed about your life and work. Provide honest and detailed responses.
Your Profile: You are Mike, 35 years old, working as a software developer. You love hiking and photography in your free time.
You've been coding for 10 years and enjoy solving complex problems.""",
)
# State definition
class ConversationState(TypedDict):
messages: Annotated[list[AnyMessage], add_messages]
current_speaker: str
# Node creation helper
def create_node(state, agent_instance):
message_list = state['messages']
result = agent_instance.invoke(message_list)
return {"messages": [result]}
# Define nodes
questioner_node = functools.partial(create_node, agent_instance=questioner)
answerer_node = functools.partial(create_node, agent_instance=answerer)
# Routing logic
def decide_next(state):
message_list = state["messages"]
recent_message = message_list[-1]
if "END_CONVERSATION" in recent_message.content:
return "__end__"
return "continue"
# Build workflow
graph = StateGraph(ConversationState)
graph.add_node("questioner", questioner_node)
graph.add_node("answerer", answerer_node)
graph.add_edge(START, "questioner")
graph.add_conditional_edges(
"questioner",
decide_next,
{"continue": "answerer", "__end__": END},
)
graph.add_conditional_edges(
"answerer",
decide_next,
{"continue": "questioner", "__end__": END},
)
# Run the conversation
runnable_graph = graph.compile()
results = runnable_graph.stream(
{
"messages": [
HumanMessage(
content="Start the interview process."
)
],
},
{"recursion_limit": 100},
)
for result in results:
print(result)
print("---")
What I’m seeing:
- The questioner agent generates questions properly
- The answerer agent returns completely empty content every time
- Token usage shows the answerer processes the input but outputs almost nothing
- Message state tracking appears correct
- LangSmith traces don’t reveal obvious problems
I’ve tried different prompt approaches but the second agent consistently fails to generate meaningful responses. Could this be more than just prompt engineering? Has anyone encountered similar behavior with Ollama and LangGraph?