LangChain系列文章
可运行的任务可以轻松地用来串联多个链。
多个链之间交互,第四个链接受来自第二和第三个链的输出
from langchain.prompts import PromptTemplate
from langchain_community.chat_models import ChatOpenAI
from langchain_core.runnables import ConfigurableField
# We add in a string output parser here so the outputs between the two are the same type
from langchain_core.output_parsers import StrOutputParser
from langchain.prompts import ChatPromptTemplate
# Now lets create a chain with the normal OpenAI model
from langchain_community.llms import OpenAI
from operator import itemgetter
from langchain.prompts import ChatPromptTemplate
from langchain.schema import StrOutputParser
from langchain_community.chat_models import ChatOpenAI
from dotenv import load_dotenv # 导入从 .env 文件加载环境变量的函数
load_dotenv() # 调用函数实际加载环境变量
from langchain.globals import set_debug # 导入在 langchain 中设置调试模式的函数
set_debug(True) # 启用 langchain 的调试模式
from langchain_core.runnables import RunnablePassthrough
prompt1 = ChatPromptTemplate.from_template(
"generate a {attribute} color. Return the name of the color and nothing else:"
)
prompt2 = ChatPromptTemplate.from_template(
"what is a fruit of color: {color}. Return the name of the fruit and nothing else:"
)
prompt3 = ChatPromptTemplate.from_template(
"what is a country with a flag that has the color: {color}. Return the name of the country and nothing else:"
)
prompt4 = ChatPromptTemplate.from_template(
"What is the color of {fruit} and the flag of {country}?"
)
model = ChatOpenAI()
model_parser = model | StrOutputParser()
color_generator = (
{"attribute": RunnablePassthrough()} | prompt1 | {"color": model_parser}
)
color_to_fruit = prompt2 | model_parser
color_to_country = prompt3 | model_parser
question_generator = (
color_generator | {"fruit": color_to_fruit, "country": color_to_country} | prompt4
)
prompt = question_generator.invoke("warm")
print('prompt >> ', prompt)
response = model.invoke(prompt)
print('response >> ', response)
输出
(.venv) zgpeace@zgpeaces-MacBook-Pro git:(develop) ✗[2] % python LCEL/chains_mul2.py ~/Workspace/LLM/langchain-llm-app
[chain/start] [1:chain:RunnableSequence] Entering Chain run with input:
{
"input": "warm"
}
[chain/start] [1:chain:RunnableSequence > 2:chain:RunnableParallel<attribute>] Entering Chain run with input:
{
"input": "warm"
}
[chain/start] [1:chain:RunnableSequence > 2:chain:RunnableParallel<attribute> > 3:chain:RunnablePassthrough] Entering Chain run with input:
{
"input": "warm"
}
[chain/end] [1:chain:RunnableSequence > 2:chain:RunnableParallel<attribute> > 3:chain:RunnablePassthrough] [3ms] Exiting Chain run with output:
{
"output": "warm"
}
[chain/end] [1:chain:RunnableSequence > 2:chain:RunnableParallel<attribute>] [12ms] Exiting Chain run with output:
{
"attribute": "warm"
}
[chain/start] [1:chain:RunnableSequence > 4:prompt:ChatPromptTemplate] Entering Prompt run with input:
{
"attribute": "warm"
}
[chain/end] [1:chain:RunnableSequence > 4:prompt:ChatPromptTemplate] [2ms] Exiting Prompt run with output:
{
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"prompts",
"chat",
"ChatPromptValue"
],
"kwargs": {
"messages": [
{
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"schema",
"messages",
"HumanMessage"
],
"kwargs": {
"content": "generate a warm color. Return the name of the color and nothing else:",
"additional_kwargs": {}
}
}
]
}
}
[chain/start] [1:chain:RunnableSequence > 5:chain:RunnableParallel<color>] Entering Chain run with input:
[inputs]
[chain/start] [1:chain:RunnableSequence > 5:chain:RunnableParallel<color> > 6:chain:RunnableSequence] Entering Chain run with input:
[inputs]
[llm/start] [1:chain:RunnableSequence > 5:chain:RunnableParallel<color> > 6:chain:RunnableSequence > 7:llm:ChatOpenAI] Entering LLM run with input:
{
"prompts": [
"Human: generate a warm color. Return the name of the color and nothing else:"
]
}
[llm/end] [1:chain:RunnableSequence > 5:chain:RunnableParallel<color> > 6:chain:RunnableSequence > 7:llm:ChatOpenAI] [4.06s] Exiting LLM run with output:
{
"generations": [
[
{
"text": "Red",
"generation_info": {
"finish_reason": "stop",
"logprobs": null
},
"type": "ChatGeneration",
"message": {
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"schema",
"messages",
"AIMessage"
],
"kwargs": {
"content": "Red",
"additional_kwargs": {}
}
}
}
]
],
"llm_output": {
"token_usage": {
"completion_tokens": 1,
"prompt_tokens": 22,
"total_tokens": 23
},
"model_name": "gpt-3.5-turbo",
"system_fingerprint": null
},
"run": null
}
[chain/start] [1:chain:RunnableSequence > 5:chain:RunnableParallel<color> > 6:chain:RunnableSequence > 8:parser:StrOutputParser] Entering Parser run with input:
[inputs]
[chain/end] [1:chain:RunnableSequence > 5:chain:RunnableParallel<color> > 6:chain:RunnableSequence > 8:parser:StrOutputParser] [1ms] Exiting Parser run with output:
{
"output": "Red"
}
[chain/end] [1:chain:RunnableSequence > 5:chain:RunnableParallel<color> > 6:chain:RunnableSequence] [4.07s] Exiting Chain run with output:
{
"output": "Red"
}
[chain/end] [1:chain:RunnableSequence > 5:chain:RunnableParallel<color>] [4.07s] Exiting Chain run with output:
{
"color": "Red"
}
[chain/start] [1:chain:RunnableSequence > 9:chain:RunnableParallel<fruit,country>] Entering Chain run with input:
{
"color": "Red"
}
[chain/start] [1:chain:RunnableSequence > 9:chain:RunnableParallel<fruit,country> > 10:chain:RunnableSequence] Entering Chain run with input:
{
"color": "Red"
}
[chain/start] [1:chain:RunnableSequence > 9:chain:RunnableParallel<fruit,country> > 10:chain:RunnableSequence > 11:prompt:ChatPromptTemplate] Entering Prompt run with input:
{
"color": "Red"
}
[chain/start] [1:chain:RunnableSequence > 9:chain:RunnableParallel<fruit,country> > 11:chain:RunnableSequence] Entering Chain run with input:
{
"color": "Red"
}
[chain/end] [1:chain:RunnableSequence > 9:chain:RunnableParallel<fruit,country> > 10:chain:RunnableSequence > 11:prompt:ChatPromptTemplate] [8ms] Exiting Prompt run with output:
{
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"prompts",
"chat",
"ChatPromptValue"
],
"kwargs": {
"messages": [
{
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"schema",
"messages",
"HumanMessage"
],
"kwargs": {
"content": "what is a fruit of color: Red. Return the name of the fruit and nothing else:",
"additional_kwargs": {}
}
}
]
}
}
[chain/start] [1:chain:RunnableSequence > 9:chain:RunnableParallel<fruit,country> > 11:chain:RunnableSequence > 12:prompt:ChatPromptTemplate] Entering Prompt run with input:
{
"color": "Red"
}
[llm/start] [1:chain:RunnableSequence > 9:chain:RunnableParallel<fruit,country> > 10:chain:RunnableSequence > 12:llm:ChatOpenAI] Entering LLM run with input:
{
"prompts": [
"Human: what is a fruit of color: Red. Return the name of the fruit and nothing else:"
]
}
[chain/end] [1:chain:RunnableSequence > 9:chain:RunnableParallel<fruit,country> > 11:chain:RunnableSequence > 12:prompt:ChatPromptTemplate] [7ms] Exiting Prompt run with output:
{
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"prompts",
"chat",
"ChatPromptValue"
],
"kwargs": {
"messages": [
{
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"schema",
"messages",
"HumanMessage"
],
"kwargs": {
"content": "what is a country with a flag that has the color: Red. Return the name of the country and nothing else:",
"additional_kwargs": {}
}
}
]
}
}
[llm/start] [1:chain:RunnableSequence > 9:chain:RunnableParallel<fruit,country> > 11:chain:RunnableSequence > 13:llm:ChatOpenAI] Entering LLM run with input:
{
"prompts": [
"Human: what is a country with a flag that has the color: Red. Return the name of the country and nothing else:"
]
}
[llm/end] [1:chain:RunnableSequence > 9:chain:RunnableParallel<fruit,country> > 10:chain:RunnableSequence > 12:llm:ChatOpenAI] [772ms] Exiting LLM run with output:
{
"generations": [
[
{
"text": "Strawberry.",
"generation_info": {
"finish_reason": "stop",
"logprobs": null
},
"type": "ChatGeneration",
"message": {
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"schema",
"messages",
"AIMessage"
],
"kwargs": {
"content": "Strawberry.",
"additional_kwargs": {}
}
}
}
]
],
"llm_output": {
"token_usage": {
"completion_tokens": 4,
"prompt_tokens": 26,
"total_tokens": 30
},
"model_name": "gpt-3.5-turbo",
"system_fingerprint": null
},
"run": null
}
[chain/start] [1:chain:RunnableSequence > 9:chain:RunnableParallel<fruit,country> > 10:chain:RunnableSequence > 13:parser:StrOutputParser] Entering Parser run with input:
[inputs]
[chain/end] [1:chain:RunnableSequence > 9:chain:RunnableParallel<fruit,country> > 10:chain:RunnableSequence > 13:parser:StrOutputParser] [1ms] Exiting Parser run with output:
{
"output": "Strawberry."
}
[chain/end] [1:chain:RunnableSequence > 9:chain:RunnableParallel<fruit,country> > 10:chain:RunnableSequence] [789ms] Exiting Chain run with output:
{
"output": "Strawberry."
}
[llm/end] [1:chain:RunnableSequence > 9:chain:RunnableParallel<fruit,country> > 11:chain:RunnableSequence > 13:llm:ChatOpenAI] [2.42s] Exiting LLM run with output:
{
"generations": [
[
{
"text": "China",
"generation_info": {
"finish_reason": "stop",
"logprobs": null
},
"type": "ChatGeneration",
"message": {
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"schema",
"messages",
"AIMessage"
],
"kwargs": {
"content": "China",
"additional_kwargs": {}
}
}
}
]
],
"llm_output": {
"token_usage": {
"completion_tokens": 1,
"prompt_tokens": 31,
"total_tokens": 32
},
"model_name": "gpt-3.5-turbo",
"system_fingerprint": null
},
"run": null
}
[chain/start] [1:chain:RunnableSequence > 9:chain:RunnableParallel<fruit,country> > 11:chain:RunnableSequence > 14:parser:StrOutputParser] Entering Parser run with input:
[inputs]
[chain/end] [1:chain:RunnableSequence > 9:chain:RunnableParallel<fruit,country> > 11:chain:RunnableSequence > 14:parser:StrOutputParser] [1ms] Exiting Parser run with output:
{
"output": "China"
}
[chain/end] [1:chain:RunnableSequence > 9:chain:RunnableParallel<fruit,country> > 11:chain:RunnableSequence] [2.45s] Exiting Chain run with output:
{
"output": "China"
}
[chain/end] [1:chain:RunnableSequence > 9:chain:RunnableParallel<fruit,country>] [2.46s] Exiting Chain run with output:
{
"fruit": "Strawberry.",
"country": "China"
}
[chain/start] [1:chain:RunnableSequence > 15:prompt:ChatPromptTemplate] Entering Prompt run with input:
{
"fruit": "Strawberry.",
"country": "China"
}
[chain/end] [1:chain:RunnableSequence > 15:prompt:ChatPromptTemplate] [1ms] Exiting Prompt run with output:
{
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"prompts",
"chat",
"ChatPromptValue"
],
"kwargs": {
"messages": [
{
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"schema",
"messages",
"HumanMessage"
],
"kwargs": {
"content": "What is the color of Strawberry. and the flag of China?",
"additional_kwargs": {}
}
}
]
}
}
[chain/end] [1:chain:RunnableSequence] [6.56s] Exiting Chain run with output:
[outputs]
prompt >> messages=[HumanMessage(content='What is the color of Strawberry. and the flag of China?')]
[llm/start] [1:llm:ChatOpenAI] Entering LLM run with input:
{
"prompts": [
"Human: What is the color of Strawberry. and the flag of China?"
]
}
[llm/end] [1:llm:ChatOpenAI] [1.42s] Exiting LLM run with output:
{
"generations": [
[
{
"text": "The color of a strawberry is typically red. The flag of China is predominantly red with a large yellow star in the upper left corner and four smaller yellow stars surrounding it.",
"generation_info": {
"finish_reason": "stop",
"logprobs": null
},
"type": "ChatGeneration",
"message": {
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"schema",
"messages",
"AIMessage"
],
"kwargs": {
"content": "The color of a strawberry is typically red. The flag of China is predominantly red with a large yellow star in the upper left corner and four smaller yellow stars surrounding it.",
"additional_kwargs": {}
}
}
}
]
],
"llm_output": {
"token_usage": {
"completion_tokens": 34,
"prompt_tokens": 20,
"total_tokens": 54
},
"model_name": "gpt-3.5-turbo",
"system_fingerprint": null
},
"run": null
}
response >> content='The color of a strawberry is typically red. The flag of China is predominantly red with a large yellow star in the upper left corner and four smaller yellow stars surrounding it.'
https://github.com/zgpeace/pets-name-langchain/tree/develop
https://python.langchain.com/docs/expression_language/cookbook/multiple_chains