目录:
1、LangChain 的 LLM 调用功能
#!/usr/bin/env python
from langchain_openai import ChatOpenAI
from langchain_core.messages import SystemMessage
from langchain_core.messages import HumanMessage
import os
# from langchain.chat_models import ChatOpenAI
# llm = ChatOpenAI(openai_api_base="https://api.crond.dev/v1", openai_api_key="sk-aTU1v09zvzfZLJ6oCzhIxilgri7sFYZ0Xf1lItmqKCGgI2Mt", model="gpt-3.5-turbo")
# res = llm.predict("hello")
# print(res)
os.environ['http_proxy'] = '127.0.0.1:7890'
os.environ['https_proxy'] = '127.0.0.1:7890'
os.environ["LANGCHAIN_TRACING_V2"] = "true"
os.environ["LANGCHAIN_API_KEY"] = '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' #langsmith监控key
os.environ["OPENAI_API_KEY"] = 'aHP78iUOsuamufjyc2lkt0KD0iOFRKfly8fQ74QcdWrPbyrm' #openai的key
model = ChatOpenAI(model="gpt-4-turbo")
msg = [
SystemMessage(content='请将一下的内容翻译成意大利语') ,
HumanMessage(content='你好,请问你要去哪里?')
]
result = model.invoke(msg)
print(result)
------------------------------------开发中的实际用法-----------------------------
#得到解析器链
parser = StrOutputParser()
chain = model | parser
#调用chain
print(chain.invoke(msg))
响应的结果:
langsmith监控能查看大模型的响应过程:
直接将content内容解析出来:
2、LangChain 模板提示
2.1定义模版
2.2 使用LCEL连接组件调用接口
这是一个使用 LangChain 表达式 (LCEL) 连接 LangChain 模块的简单示例。这种方法有几个好处,包括优化的流式处理和追踪支持。
如果我们查看 LangSmith 追踪,我们可以看到所有三个组件出现在 LangSmith 追踪 中。
3、Langserver来部署你的Langchain程序
3.1安装Langserver
pip install "langserve[all]"
<!--IMPORTS:[{"imported": "ChatPromptTemplate", "source": "langchain_core.prompts", "docs": "https://python.langchain.com/api_reference/core/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html", "title": "Build a Simple LLM Application with LCEL"}, {"imported": "StrOutputParser", "source": "langchain_core.output_parsers", "docs": "https://python.langchain.com/api_reference/core/output_parsers/langchain_core.output_parsers.string.StrOutputParser.html", "title": "Build a Simple LLM Application with LCEL"}, {"imported": "ChatOpenAI", "source": "langchain_openai", "docs": "https://python.langchain.com/api_reference/openai/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html", "title": "Build a Simple LLM Application with LCEL"}]-->
#!/usr/bin/env python
from fastapi import FastAPI
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain_openai import ChatOpenAI
from langserve import add_routes
# 1. Create prompt template
system_template = "Translate the following into {language}:"
prompt_template = ChatPromptTemplate.from_messages([
('system', system_template),
('user', '{text}')
])
# 2. Create model
model = ChatOpenAI()
# 3. Create parser
parser = StrOutputParser()
# 4. Create chain
chain = prompt_template | model | parser
# 4. App definition
app = FastAPI(
title="LangChain Server",
version="1.0",
description="A simple API server using LangChain's Runnable interfaces",
)
# 5. Adding chain route
add_routes(
app,
chain,
path="/chainDemo",
)
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="localhost", port=8000)
就这样!如果我们执行这个文件:
python serve.py
我们应该能在 http://localhost:8000 看到我们的链被服务。
3.1程序的调用
4、LangChain构建聊天机器人
4.1、安装必要的依赖
#openai的安装
pip install -qU langchain-openai
#安装 langchain-community,因为我们将使用其中的集成来存储消息历史
pip install langchain_community
4.2、聊天机器人代码
import getpass
import os
from langchain_core.messages import HumanMessage, AIMessage
from langchain_core.chat_history import BaseChatMessageHistory, InMemoryChatMessageHistory
from langchain_core.runnables.history import RunnableWithMessageHistory
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
# 设置OpenAI API密钥
os.environ["OPENAI_API_KEY"] = getpass.getpass("请输入OpenAI API密钥: ")
from langchain_openai import ChatOpenAI
model = ChatOpenAI(model="gpt-3.5-turbo", temperature=0.7)
# 设置提示词模板
prompt = ChatPromptTemplate.from_messages([
("system", "你是一个乐于助人的助手,请尽可能详细地回答所有问题。"),
MessagesPlaceholder(variable_name="history"),
("human", "{input}"),
])
chain = prompt | model
# 会话存储 - 支持多用户
store = {}
def get_session_history(session_id: str) -> BaseChatMessageHistory:
"""获取或创建会话历史"""
if session_id not in store:
store[session_id] = InMemoryChatMessageHistory()
return store[session_id]
# 创建带历史记录的链
with_message_history = RunnableWithMessageHistory(
chain,
get_session_history,
input_messages_key="input",
history_messages_key="history"
)
def chat_with_bot(session_id: str):
"""与机器人进行持续对话的函数"""
print("\n" + "="*40)
print(f"对话已开始 (会话ID: {session_id})")
print("输入 'exit' 结束对话")
print("="*40 + "\n")
while True:
# 获取用户输入
user_input = input("你: ")
# 退出条件
if user_input.lower() in ["exit", "退出", "q"]:
print("\n对话结束!\n")
break
# 调用模型获取回复
try:
response = with_message_history.invoke(
{"input": user_input},
config={"configurable": {"session_id": session_id}}
)
# 打印机器人回复
print(f"\n机器人: {response.content}\n")
except Exception as e:
print(f"\n发生错误: {str(e)}\n")
if __name__ == "__main__":
# 启动聊天
session_id = input("请输入会话ID (回车使用默认值): ") or "default_session"
chat_with_bot(session_id)
4.3、代码优化流式处理
config = {"configurable": {"session_id": "abc15"}}
for r in with_message_history.stream(
{
"messages": [HumanMessage(content="hi! I'm todd. tell me a joke")],
"language": "English",
},
config=config,
):
print(r.content, end="|")
输出的结果:
|Hi| Todd|!| Sure|,| here|'s| a| joke| for| you|:| Why| couldn|'t| the| bicycle| find| its| way| home|?| Because| it| lost| its| bearings|!| 😄||