from langchain_openai import ChatOpenAI from langchain_core.prompts import ChatPromptTemplate from langchain_core.output_parsers import StrOutputParser
# 可以直接通过传入文档来运行它 from langchain_core.documents import Document
text ="langsmith can let you visualize test results" document_chain.invoke({ "input": "Langsmith 如何帮助进行测试?", "context": [Document(page_content=text)] })
from langchain.chains import create_retrieval_chain
# 创建向量存储检索器 retriever = vector.as_retriever() # 创建链,该链接收用户查询,然后将其传递给检索器以获取相关文档。然后将这些文档(和原始输入)传递到 LLM 以生成响应 retrieval_chain = create_retrieval_chain(retriever, document_chain) # 执行检索 这将返回一个字典 response = retrieval_chain.invoke({"input": "how can langsmith help with testing?"}) print(response["answer"])
答案应该更准确
1 2 3 4 5 6 7 8 9
LangSmith can helpwith testing in several ways.
1. LangSmith allows developers tocreate datasets, which are collections of inputs andreference outputs, anduse these to run tests on their LLM applications. Test cases can be uploaded inbulk, created on the fly, or exported from application traces.
2. LangSmith provides a user-friendly comparison viewfortest runs. This allows developers to compare the results of different configurations on the same datapoints side-by-side, helping them identify any regressions or improvements.
3. LangSmith supports custom evaluations, both LLM-based and heuristic-based, to score test results.
Overall, LangSmith enables developers to perform test-driven development andevaluate the performanceof their LLM applications during the prototyping and beta testing phases.
from langchain_community.tools.tavily_search import TavilySearchResults
search = TavilySearchResults()
创建代理
创建使用工具的列表
1
tools = [retriever_tool, search]
创建一个代理来使用工具
1 2 3 4 5 6 7 8 9 10 11 12 13
from langchain_openai import ChatOpenAI from langchain import hub from langchain.agents import create_openai_functions_agent from langchain.agents import AgentExecutor
from fastapi import FastAPI from langchain import hub from langchain.agents import AgentExecutor from langchain.agents import create_openai_functions_agent from langchain.pydantic_v1 import BaseModel, Field from langchain.tools.retriever import create_retriever_tool from langchain_community.document_loaders import WebBaseLoader from langchain_community.tools.tavily_search import TavilySearchResults from langchain_community.vectorstores import FAISS from langchain_core.messages import BaseMessage from langchain_openai import ChatOpenAI from langchain_openai import OpenAIEmbeddings from langchain_text_splitters import RecursiveCharacterTextSplitter from langserve import add_routes
# 2. 创建工具 # 检索器工具 retriever_tool = create_retriever_tool( retriever, "langsmith_search", "Search for information about LangSmith. For any questions about LangSmith, you must use this tool!", )
INFO: Started server process [18352] INFO: Waiting for application startup. INFO: Application startup complete. INFO: Uvicorn running on http://localhost:8000 (Press CTRL+C to quit)
LANGSERVE: Playground for chain "/agent/" is live at: LANGSERVE: │ LANGSERVE: └──> /agent/playground/ LANGSERVE: LANGSERVE: See all available routes at /docs/