Skip to content

Commit 3c3042a

Browse files
committed
fix: Complete LangChain integration example in README
- Add missing ChatPromptTemplate and MessagesPlaceholder imports - Include complete prompt definition with proper format - Add example invocation to show usage - Fix test.py to use dotenv and proper prompt format The previous example was incomplete and would cause 'max iterations' errors because the prompt format was wrong for tool-calling agents.
1 parent 9968f7b commit 3c3042a

File tree

2 files changed

+64
-1
lines changed

2 files changed

+64
-1
lines changed

README.md

Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -66,6 +66,7 @@ results = await client.search_long_term_memory(
6666
from agent_memory_client import create_memory_client
6767
from agent_memory_client.integrations.langchain import get_memory_tools
6868
from langchain.agents import create_tool_calling_agent, AgentExecutor
69+
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
6970
from langchain_openai import ChatOpenAI
7071

7172
# Get LangChain-compatible tools automatically
@@ -76,10 +77,19 @@ tools = get_memory_tools(
7677
user_id="alice"
7778
)
7879

79-
# Use with LangChain agents - no manual @tool wrapping needed!
80+
# Create prompt and agent
81+
prompt = ChatPromptTemplate.from_messages([
82+
("system", "You are a helpful assistant with memory."),
83+
("human", "{input}"),
84+
MessagesPlaceholder("agent_scratchpad"),
85+
])
86+
8087
llm = ChatOpenAI(model="gpt-4o")
8188
agent = create_tool_calling_agent(llm, tools, prompt)
8289
executor = AgentExecutor(agent=agent, tools=tools)
90+
91+
# Use the agent
92+
result = await executor.ainvoke({"input": "Remember that I love pizza"})
8393
```
8494

8595
> **Note**: While you can call client functions directly as shown above, using **MCP or SDK-provided tool calls** is recommended for AI agents as it provides better integration, automatic context management, and follows AI-native patterns. See **[Memory Integration Patterns](https://redis.github.io/agent-memory-server/memory-integration-patterns/)** for guidance on when to use each approach.

test.py

Lines changed: 53 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,53 @@
1+
import os
2+
3+
from agent_memory_client import create_memory_client
4+
from agent_memory_client.integrations.langchain import get_memory_tools
5+
from dotenv import load_dotenv
6+
from langchain.agents import AgentExecutor, create_tool_calling_agent
7+
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
8+
from langchain_openai import ChatOpenAI
9+
10+
11+
load_dotenv()
12+
13+
14+
async def main():
15+
# Check for API key
16+
if not os.getenv("OPENAI_API_KEY"):
17+
print("❌ OPENAI_API_KEY environment variable is required")
18+
print("Set it with: export OPENAI_API_KEY='your-key-here'")
19+
return
20+
# Initialize memory client
21+
memory_client = await create_memory_client("http://localhost:8000")
22+
23+
# Get LangChain-compatible tools automatically
24+
tools = get_memory_tools(
25+
memory_client=memory_client, session_id="my_session", user_id="alice"
26+
)
27+
28+
# Create the prompt with proper format for tool-calling agents
29+
prompt = ChatPromptTemplate.from_messages(
30+
[
31+
("system", "You are a helpful assistant with memory capabilities."),
32+
("human", "{input}"),
33+
MessagesPlaceholder("agent_scratchpad"),
34+
]
35+
)
36+
37+
# Use with LangChain agents - no manual @tool wrapping needed!
38+
llm = ChatOpenAI(model="gpt-4o")
39+
agent = create_tool_calling_agent(llm, tools, prompt)
40+
executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
41+
42+
result = await executor.ainvoke(
43+
{"input": "What is the capital of France? Remember that I like France."}
44+
)
45+
print("\n" + "=" * 60)
46+
print("RESULT:", result["output"])
47+
print("=" * 60)
48+
49+
50+
if __name__ == "__main__":
51+
import asyncio
52+
53+
asyncio.run(main())

0 commit comments

Comments
 (0)