🔍 快速索引

💡 使用方法

点击下方类别快速跳转到对应的 API 参考部分:

📦 核心导入语句

Python
# 模型接口
from langchain import init_chat_model, init_embeddings
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
from langchain_anthropic import ChatAnthropic
from langchain_google_genai import ChatGoogleGenerativeAI

# 消息系统
from langchain_core.messages import (
    SystemMessage,
    HumanMessage,
    AIMessage,
    ToolMessage
)

# 工具系统
from langchain.tools import tool, ToolRuntime

# Agent 创建
from langchain.agents import create_agent

# 中间件
from langchain.agents.middleware import (
    CustomMiddleware,
    PIIMiddleware,
    SummarizationMiddleware,
    HumanInTheLoopMiddleware
)

# Runtime 和上下文
from typing_extensions import TypedDict

# Checkpointer
from langgraph.checkpoint.memory import InMemorySaver
from langgraph.checkpoint.postgres import PostgresSaver

# Store
from langgraph.store.memory import InMemoryStore
from langgraph.store.postgres import PostgresStore

# 流式输出
# 使用 agent.stream() 方法

# LangGraph
from langgraph.graph import StateGraph, MessagesState, START, END
from langgraph.types import Command

# RAG 系统
from langchain_chroma import Chroma
from langchain_community.document_loaders import (
    PyPDFLoader,
    TextLoader,
    WebBaseLoader
)
from langchain_text_splitters import RecursiveCharacterTextSplitter

🤖 模型接口

init_chat_model()

参数 类型 说明
model str 模型名称,如 "gpt-4o"、"claude-3-5-sonnet"
model_provider str 提供商,如 "openai"、"anthropic"、"google"
temperature float 温度参数,0.0-2.0,默认 0.7
streaming bool 是否启用流式输出,默认 False
Python
# 基础用法
from langchain import init_chat_model

model = init_chat_model("gpt-4o", model_provider="openai", temperature=0.7)

# 带配置的用法
model = init_chat_model(
    "claude-3-5-sonnet-20241022",
    model_provider="anthropic",
    temperature=0.5,
    max_tokens=4096
)

init_embeddings()

参数 类型 说明
model str 嵌入模型名称,如 "text-embedding-3-small"
model_provider str 提供商,如 "openai"
Python
from langchain import init_embeddings

# 基础用法
embeddings = init_embeddings("text-embedding-3-small", model_provider="openai")

# 生成嵌入
vector = embeddings.embed_query("示例文本")
vectors = embeddings.embed_documents(["文本1", "文本2"])

💬 消息系统

消息类型

类型 用途 示例
SystemMessage 系统提示词 SystemMessage(content="你是AI助手")
HumanMessage 用户消息 HumanMessage(content="你好")
AIMessage AI 回复 AIMessage(content="你好!")
ToolMessage 工具调用结果 ToolMessage(content="结果", tool_call_id="123")
Python
from langchain_core.messages import SystemMessage, HumanMessage, AIMessage

# 创建消息
messages = [
    SystemMessage(content="你是专业的AI助手"),
    HumanMessage(content="什么是LangChain?"),
    AIMessage(content="LangChain是一个用于构建AI应用的框架")
]

# 访问 content_blocks(统一跨提供商)
for message in messages:
    for block in message.content_blocks:
        print(block.text)

🔧 工具系统

@tool 装饰器

Python
from langchain.tools import tool

# 基础工具
@tool
def search_database(query: str) -> str:
    """搜索数据库。

    Args:
        query: 搜索关键词

    Returns:
        搜索结果
    """
    return f"搜索结果:{query}"

# 带运行时上下文的工具
from langchain.tools import ToolRuntime
from typing_extensions import TypedDict

class MyContext(TypedDict):
    user_id: str

@tool
def get_user_data(runtime: ToolRuntime[MyContext]) -> str:
    """获取用户数据"""
    user_id = runtime.context["user_id"]
    return f"用户 {user_id} 的数据"

# 返回内容和元数据
from langchain.tools import content_and_artifact

@tool
def analyze_data(data: str) -> tuple[str, dict]:
    """分析数据"""
    result = "分析结果"
    metadata = {"score": 0.95, "confidence": 0.88}
    return content_and_artifact(result, metadata)

工具调用

Python
# 直接调用工具
result = search_database.invoke({"query": "LangChain"})

# 带运行时上下文调用
result = get_user_data.invoke({
    "runtime": ToolRuntime(context={"user_id": "user_123"})
})

🎯 Agent 创建

create_agent() 参数

参数 类型 说明
model str | ChatModel 模型名称或实例
tools list 工具列表
system_prompt str 系统提示词
middleware list 中间件列表
checkpointer Checkpointer 状态持久化器
store Store 长期存储
context_schema TypedDict 运行时上下文 Schema
Python
from langchain.agents import create_agent
from langgraph.checkpoint.memory import InMemorySaver
from langgraph.store.memory import InMemoryStore

# 基础 Agent
agent = create_agent(
    model="gpt-4o",
    tools=[search_database, get_user_data],
    system_prompt="你是专业的AI助手"
)

# 完整配置的 Agent
checkpointer = InMemorySaver()
store = InMemoryStore()

agent = create_agent(
    model="gpt-4o",
    tools=[search_database],
    middleware=[PIIMiddleware()],
    checkpointer=checkpointer,
    store=store,
    context_schema=MyContext,
    system_prompt="你是专业的AI助手"
)

# 调用 Agent
result = agent.invoke({
    "messages": [{"role": "user", "content": "你好"}]
})

# 带配置调用
config = {
    "configurable": {"thread_id": "thread_001"},
    "context": {"user_id": "user_123"}
}
result = agent.invoke({"messages": [...]}, config=config)

⚙️ 中间件系统

自定义中间件钩子

钩子 触发时机 参数
before_agent Agent 执行前 (state, config)
after_agent Agent 执行后 (state, config)
before_model 模型调用前 (state, config)
after_model 模型调用后 (state, config)
wrap_model_call 包装模型调用 (call_model, state, config)
wrap_tool_call 包装工具调用 (call_tool, tool_req, config)
Python
from langchain.agents.middleware import CustomMiddleware

class LoggingMiddleware(CustomMiddleware):
    """日志中间件"""

    def before_agent(self, state, config):
        print("Agent 开始执行")
        return state

    def after_agent(self, state, config):
        print("Agent 执行完成")
        return state

    def wrap_model_call(self, call_model, state, config):
        print("调用模型前")
        result = call_model(state, config)
        print("调用模型后")
        return result

预置中间件

Python
from langchain.agents.middleware import (
    PIIMiddleware,
    SummarizationMiddleware,
    HumanInTheLoopMiddleware
)

# PII 保护中间件
pii_middleware = PIIMiddleware(
    pii_types=["email", "phone_number", "credit_card"],
    strategy="redact"  # 选项:redact, mask, hash, block
)

# 摘要中间件
summarization = SummarizationMiddleware(
    max_messages=10,
    summarize_older=True
)

# 人工介入中间件
hitl = HumanInTheLoopMiddleware(
    interrupt_on={
        "sensitive_tool": True,
        "normal_tool": False
    }
)

🔌 Runtime 系统

定义上下文 Schema

Python
from typing_extensions import TypedDict

class UserContext(TypedDict):
    user_id: str
    session_id: str
    permissions: list[str]

使用 ToolRuntime

Python
from langchain.tools import tool, ToolRuntime

@tool
def get_user_profile(runtime: ToolRuntime[UserContext]) -> str:
    """获取用户资料"""
    user_id = runtime.context["user_id"]

    # 访问 Store
    profile = runtime.store.get(("users", user_id), "profile")

    return f"用户 {user_id} 的资料"

传递上下文

Python
config = {
    "configurable": {"thread_id": "thread_001"},
    "context": {
        "user_id": "user_123",
        "session_id": "session_456",
        "permissions": ["read", "write"]
    }
}

result = agent.invoke({"messages": [...]}, config=config)

💾 持久化系统

Checkpointer 类型

类型 用途 导入
InMemorySaver 内存持久化(开发) langgraph.checkpoint.memory
PostgresSaver PostgreSQL 持久化(生产) langgraph.checkpoint.postgres
Python
# 开发环境:内存 Checkpointer
from langgraph.checkpoint.memory import InMemorySaver

checkpointer = InMemorySaver()

# 生产环境:PostgreSQL Checkpointer
from langgraph.checkpoint.postgres import PostgresSaver

DB_URI = "postgresql://user:pass@localhost:5432/langchain"

with PostgresSaver.from_conn_string(DB_URI) as checkpointer:
    checkpointer.setup()

    agent = create_agent(
        model="gpt-4o",
        tools=[...],
        checkpointer=checkpointer
    )

获取状态历史

Python
config = {"configurable": {"thread_id": "thread_001"}}

# 获取所有历史状态
history = agent.get_state_history(config)

for state in history:
    print(f"时间:{state.metadata['timestamp']}")
    print(f"消息数:{len(state.values['messages'])}")

🗄️ 长期记忆(Store)

Store 类型

类型 用途 导入
InMemoryStore 内存存储(开发) langgraph.store.memory
PostgresStore PostgreSQL 存储(生产) langgraph.store.postgres
Python
# 开发环境
from langgraph.store.memory import InMemoryStore

store = InMemoryStore()

# 生产环境
from langgraph.store.postgres import PostgresStore

DB_URI = "postgresql://user:pass@localhost:5432/langchain"

with PostgresStore.from_conn_string(DB_URI) as store:
    store.setup()

    agent = create_agent(
        model="gpt-4o",
        tools=[...],
        store=store
    )

Store 操作

Python
# 在工具中使用 Store
@tool
def save_preference(
    key: str,
    value: str,
    runtime: ToolRuntime[UserContext]
) -> str:
    user_id = runtime.context["user_id"]
    store_obj = runtime.store

    # 保存数据
    store_obj.put(("users", user_id), key, value)

    return f"已保存 {key} = {value}"

@tool
def get_preference(
    key: str,
    runtime: ToolRuntime[UserContext]
) -> str:
    user_id = runtime.context["user_id"]
    store_obj = runtime.store

    # 获取数据
    item = store_obj.get(("users", user_id), key)

    if item:
        return f"{key} = {item.value}"
    else:
        return f"未设置 {key}"

🌊 流式输出

stream_mode 选项

模式 说明 返回格式
"messages" 流式返回消息 token (token, metadata)
"updates" 节点执行更新 {node_name: data}
"values" 完整状态值 {"messages": [...], ...}
Python
# 基础流式输出(只输出消息)
for chunk in agent.stream({"messages": [...]}, stream_mode="messages"):
    token, metadata = chunk
    if token:
        print(token, end="", flush=True)

# 多模式流式输出
for stream_mode, chunk in agent.stream(
    {"messages": [...]},
    stream_mode=["messages", "updates"]
):
    if stream_mode == "messages":
        token, metadata = chunk
        if token:
            print(token, end="", flush=True)

    elif stream_mode == "updates":
        for node, data in chunk.items():
            print(f"\n节点 {node} 执行完成")

📊 LangGraph

创建 StateGraph

Python
from langgraph.graph import StateGraph, MessagesState, START, END

# 使用预构建状态
graph = StateGraph(MessagesState)

# 自定义状态
from typing_extensions import TypedDict
from typing import Annotated
from operator import add

class CustomState(TypedDict):
    messages: Annotated[list, add]
    current_step: str
    count: int

graph = StateGraph(CustomState)

添加节点和边

Python
# 定义节点函数
def process_input(state):
    return {"current_step": "processed"}

def call_llm(state):
    return {"messages": [{"role": "ai", "content": "响应"}]}

# 添加节点
graph.add_node("process", process_input)
graph.add_node("llm", call_llm)

# 添加普通边
graph.add_edge(START, "process")
graph.add_edge("process", "llm")
graph.add_edge("llm", END)

# 添加条件边
def should_continue(state):
    if state.get("count", 0) < 5:
        return "continue"
    else:
        return "end"

graph.add_conditional_edges(
    "llm",
    should_continue,
    {"continue": "process", "end": END}
)

# 编译图
compiled = graph.compile()

Command 控制流

Python
from langgraph.types import Command
from typing import Literal

def process_node(state) -> Command[Literal["success", "retry", "fail"]]:
    """节点返回状态更新和路由指令"""

    if success:
        return Command(
            update={"result": "成功"},
            goto="success"
        )
    elif can_retry:
        return Command(
            update={"retry_count": state["retry_count"] + 1},
            goto="retry"
        )
    else:
        return Command(
            update={"error": "失败"},
            goto="fail"
        )

🔍 RAG 系统

文档加载

Python
from langchain_community.document_loaders import (
    PyPDFLoader,
    TextLoader,
    WebBaseLoader
)

# PDF 文档
pdf_loader = PyPDFLoader("document.pdf")
pdf_docs = pdf_loader.load()

# 文本文件
text_loader = TextLoader("document.txt")
text_docs = text_loader.load()

# 网页
web_loader = WebBaseLoader("https://example.com")
web_docs = web_loader.load()

文档分割

Python
from langchain_text_splitters import RecursiveCharacterTextSplitter

splitter = RecursiveCharacterTextSplitter(
    chunk_size=1000,
    chunk_overlap=200,
    separators=["\n\n", "\n", " ", ""]
)

chunks = splitter.split_documents(documents)

向量存储和检索

Python
from langchain_chroma import Chroma
from langchain_openai import OpenAIEmbeddings

# 创建 Embeddings
embeddings = OpenAIEmbeddings(model="text-embedding-3-small")

# 创建向量存储
vectorstore = Chroma.from_documents(
    documents=chunks,
    embedding=embeddings,
    collection_name="my_collection"
)

# 创建检索器
retriever = vectorstore.as_retriever(
    search_type="similarity",
    search_kwargs={"k": 3}
)

# 检索
results = retriever.invoke("查询文本")

RAG 检索工具

Python
from langchain.tools import tool

@tool
def search_knowledge_base(query: str) -> str:
    """搜索知识库"""
    docs = retriever.invoke(query)

    if not docs:
        return "未找到相关信息"

    context = "\n\n".join([doc.page_content for doc in docs])
    return f"相关信息:\n{context}"

💡 常用代码片段

完整的 Agent 配置

Python
from langchain.agents import create_agent
from langchain.agents.middleware import PIIMiddleware
from langgraph.checkpoint.memory import InMemorySaver
from langgraph.store.memory import InMemoryStore
from typing_extensions import TypedDict

# 定义上下文
class AppContext(TypedDict):
    user_id: str
    session_id: str

# 定义工具
@tool
def my_tool(param: str, runtime: ToolRuntime[AppContext]) -> str:
    """工具描述"""
    user_id = runtime.context["user_id"]
    return f"结果:{param}"

# 初始化组件
checkpointer = InMemorySaver()
store = InMemoryStore()
pii_middleware = PIIMiddleware(pii_types=["email"], strategy="redact")

# 创建 Agent
agent = create_agent(
    model="gpt-4o",
    tools=[my_tool],
    middleware=[pii_middleware],
    checkpointer=checkpointer,
    store=store,
    context_schema=AppContext,
    system_prompt="你是专业的AI助手"
)

# 调用 Agent
config = {
    "configurable": {"thread_id": "thread_001"},
    "context": {"user_id": "user_123", "session_id": "session_456"}
}

result = agent.invoke(
    {"messages": [{"role": "user", "content": "你好"}]},
    config=config
)

流式对话处理

Python
def chat_stream(user_message: str, thread_id: str, user_id: str):
    """流式对话处理函数"""

    config = {
        "configurable": {"thread_id": thread_id},
        "context": {"user_id": user_id, "session_id": thread_id}
    }

    print(f"用户: {user_message}")
    print("AI: ", end="", flush=True)

    full_response = ""

    for stream_mode, chunk in agent.stream(
        {"messages": [{"role": "user", "content": user_message}]},
        config=config,
        stream_mode=["messages", "updates"]
    ):
        if stream_mode == "messages":
            token, metadata = chunk
            if token:
                print(token, end="", flush=True)
                full_response += token

        elif stream_mode == "updates":
            for node, data in chunk.items():
                if "__interrupt__" in data:
                    print("\n[需要人工审核]")

    print("\n")
    return full_response

📖 参考资源