from langchain.chat_models import ChatOpenAI #导入OpenAI模型
from langchain.prompts import ChatPromptTemplate #导入聊天提示模板
from langchain.chains import LLMChain #导入LLM链。
from langchain.chains import SimpleSequentialChain
llm = ChatOpenAI(temperature=0.9, openai_api_key = api_key)
# 提示模板 1 :这个提示将接受产品并返回最佳名称来描述该公司
first_prompt = ChatPromptTemplate.from_template(
"What is the best name to describe \
a company that makes {product}?"
)
# Chain 1
chain_one = LLMChain(llm=llm, prompt=first_prompt)
# 提示模板 2 :接受公司名称,然后输出该公司的长为20个单词的描述
second_prompt = ChatPromptTemplate.from_template(
"Write a 20 words description for the following \
company:{company_name}"
)
# chain 2
chain_two = LLMChain(llm=llm, prompt=second_prompt)
# 组合两个chain,便于在一个步骤中含有该公司名字的公司描述
overall_simple_chain = SimpleSequentialChain(chains=[chain_one, chain_two],
verbose=True)
product = "Queen Size Sheet Set"
overall_simple_chain.run(product)
# 结果: RegalRest Bedding
# RegalRest Bedding offers luxurious and comfortable mattresses and bedding accessories for a restful and rejuvenating sleep experience.
#子链1
# prompt模板 1: 翻译成英语(把下面的review翻译成英语)
first_prompt = ChatPromptTemplate.from_template(
"Translate the following review to english:"
"\n\n{Review}"
)
# chain 1: 输入:Review 输出: 英文的 Review
chain_one = LLMChain(llm=llm, prompt=first_prompt,
output_key="English_Review"
)
#子链2
# prompt模板 2: 用一句话总结下面的 review
second_prompt = ChatPromptTemplate.from_template(
"Can you summarize the following review in 1 sentence:"
"\n\n{English_Review}"
)
# chain 2: 输入:英文的Review 输出:总结
chain_two = LLMChain(llm=llm, prompt=second_prompt,
output_key="summary"
)
#子链3
# prompt模板 3: 下面review使用的什么语言
third_prompt = ChatPromptTemplate.from_template(
"What language is the following review:\n\n{Review}"
)
# chain 3: 输入:Review 输出:语言
chain_three = LLMChain(llm=llm, prompt=third_prompt,
output_key="language"
)
# prompt模板 4: 使用特定的语言对下面的总结写一个后续回复
fourth_prompt = ChatPromptTemplate.from_template(
"Write a follow up response to the following "
"summary in the specified language:"
"\n\nSummary: {summary}\n\nLanguage: {language}"
)
# chain 4: 输入: 总结, 语言 输出: 后续回复
chain_four = LLMChain(llm=llm, prompt=fourth_prompt,
output_key="followup_message"
)
# 对四个子链进行组合
#输入:review 输出:英文review,总结,后续回复
overall_chain = SequentialChain(
chains=[chain_one, chain_two, chain_three, chain_four],
input_variables=["Review"],
output_variables=["English_Review", "summary","followup_message"],
verbose=True
)
review = df.Review[5]
overall_chain(review)
结果如下,可以看到根据评论文本,子链1将文本翻译为英语,子链2将英文文本进行总结,子链3得到初始文本的语言,子链4对英文文本进行回复,并且是用初始语言。每个后面的子链可以利用前面链的outpu_key
变量。
{'Review': "Je trouve le goût médiocre. La mousse ne tient pas, c'est bizarre. J'achète les mêmes dans le commerce et le goût est bien meilleur...\nVieux lot ou contrefaçon !?",
'English_Review': "I find the taste mediocre. The foam doesn't hold, it's strange. I buy the same ones in stores and the taste is much better...\nOld batch or counterfeit!?",
'summary': 'The reviewer is disappointed with the taste and foam quality, suspecting that the product might be either an old batch or a counterfeit
version.',
'followup_message': "Après avoir examiné vos commentaires, nous sommes désolés d'apprendre que vous êtes déçu par le goût et la qualité de la mousse de notre produit. Nous comprenons vos préoccupations et nous nous excusons pour tout inconvénient que cela a pu causer. Votre avis est précieux pour nous et nous aimerions enquêter davantage sur cette situation. Nous vous assurons que notre produit est authentique et fabriqué avec les normes les plus élevées de qualité. Cependant, nous examinerons attentivement votre spéculation selon laquelle il pourrait s'agir d'un lot ancien ou d'une contrefaçon. Veuillez nous fournir plus de détails sur le produit que vous avez acheté, y compris la date d'expiration et le code de lot, afin que nous puissions résoudre ce problème de manière appropriée. Nous vous remercions de nous avoir informés de cette situation et nous nous engageons à améliorer constamment notre produit pour répondre aux attentes de nos clients."}
一个相当常见但基本的操作是根据输入将其路由到一条链,具体取决于该输入到底是什么。如果你有多个子链,每个子链都专门用于特定类型的输入,那么可以组成一个路由链,它首先决定将它传递给哪个子链,然后将它传递给那个链。
路由器由两个组件组成:
步骤:
{{{{
"destination": string \ name of the prompt to use or "DEFAULT"
"next_inputs": string \ a potentially modified version of the original input
}}}}
from langchain.chains import SequentialChain
from langchain.chat_models import ChatOpenAI #导入OpenAI模型
from langchain.prompts import ChatPromptTemplate #导入聊天提示模板
from langchain.chains import LLMChain #导入LLM链。
from langchain.chains.router import MultiPromptChain #导入多提示链
from langchain.chains.router.llm_router import LLMRouterChain,RouterOutputParser
from langchain.prompts import PromptTemplate
# example4
#第一个提示适合回答物理问题
physics_template = """You are a very smart physics professor. \
You are great at answering questions about physics in a concise\
and easy to understand manner. \
When you don't know the answer to a question you admit\
that you don't know.
Here is a question:
{input}"""
#第二个提示适合回答数学问题
math_template = """You are a very good mathematician. \
You are great at answering math questions. \
You are so good because you are able to break down \
hard problems into their component parts,
answer the component parts, and then put them together\
to answer the broader question.
Here is a question:
{input}"""
#第三个适合回答历史问题
history_template = """You are a very good historian. \
You have an excellent knowledge of and understanding of people,\
events and contexts from a range of historical periods. \
You have the ability to think, reflect, debate, discuss and \
evaluate the past. You have a respect for historical evidence\
and the ability to make use of it to support your explanations \
and judgements.
Here is a question:
{input}"""
#第四个适合回答计算机问题
computerscience_template = """ You are a successful computer scientist.\
You have a passion for creativity, collaboration,\
forward-thinking, confidence, strong problem-solving capabilities,\
understanding of theories and algorithms, and excellent communication \
skills. You are great at answering coding questions. \
You are so good because you know how to solve a problem by \
describing the solution in imperative steps \
that a machine can easily interpret and you know how to \
choose a solution that has a good balance between \
time complexity and space complexity.
Here is a question:
{input}"""
# 拥有了这些提示模板后,可以为每个模板命名,然后提供描述。
# 例如,第一个物理学的描述适合回答关于物理学的问题,这些信息将传递给路由链,然后由路由链决定何时使用此子链。
prompt_infos = [
{
"name": "physics",
"description": "Good for answering questions about physics",
"prompt_template": physics_template
},
{
"name": "math",
"description": "Good for answering math questions",
"prompt_template": math_template
},
{
"name": "History",
"description": "Good for answering history questions",
"prompt_template": history_template
},
{
"name": "computer science",
"description": "Good for answering computer science questions",
"prompt_template": computerscience_template
}
]
api_key = "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"
llm = ChatOpenAI(temperature=0, openai_api_key = api_key)
destination_chains = {}
for p_info in prompt_infos:
name = p_info["name"]
prompt_template = p_info["prompt_template"]
prompt = ChatPromptTemplate.from_template(template=prompt_template)
chain = LLMChain(llm=llm, prompt=prompt)
destination_chains[name] = chain
destinations = [f"{p['name']}: {p['description']}" for p in prompt_infos]
destinations_str = "\n".join(destinations)
# 创建默认目标链
default_prompt = ChatPromptTemplate.from_template("{input}")
default_chain = LLMChain(llm=llm, prompt=default_prompt)
# 创建LLM用于在不同链之间进行路由的模板
MULTI_PROMPT_ROUTER_TEMPLATE = """Given a raw text input to a \
language model select the model prompt best suited for the input. \
You will be given the names of the available prompts and a \
description of what the prompt is best suited for. \
You may also revise the original input if you think that revising\
it will ultimately lead to a better response from the language model.
<< FORMATTING >>
Return a markdown code snippet with a JSON object formatted to look like:
```json
{{{{
"destination": string \ name of the prompt to use or "DEFAULT"
"next_inputs": string \ a potentially modified version of the original input
}}}}
```
REMEMBER: "destination" MUST be one of the candidate prompt \
names specified below OR it can be "DEFAULT" if the input is not\
well suited for any of the candidate prompts.
REMEMBER: "next_inputs" can just be the original input \
if you don't think any modifications are needed.
<< CANDIDATE PROMPTS >>
{destinations}
<< INPUT >>
{{input}}
<< OUTPUT (remember to include the ```json)>>
eg:
<< INPUT >>
"What is black body radiation?"
<< OUTPUT >>
```json
{{{{
"destination": string \ name of the prompt to use or "DEFAULT"
"next_inputs": string \ a potentially modified version of the original input
}}}}
```
"""
# 构建路由链
router_template = MULTI_PROMPT_ROUTER_TEMPLATE.format(
destinations=destinations_str
)
router_prompt = PromptTemplate(
template=router_template,
input_variables=["input"],
output_parser=RouterOutputParser(),
)
router_chain = LLMRouterChain.from_llm(llm, router_prompt)
# 整合多提示链
chain = MultiPromptChain(router_chain=router_chain, #l路由链路
destination_chains=destination_chains, #目标链路
default_chain=default_chain, #默认链路
verbose=True
)
# 问题:什么是黑体辐射?
response = chain.run("What is black body radiation?")
print(response)
回答的结果为:
> Entering new MultiPromptChain chain...
physics: {'input': 'What is black body radiation?'}
> Finished chain.
Black body radiation refers to the electromagnetic radiation emitted by an object that absorbs all incident radiation and reflects or transmits none. It is called "black body" because it absorbs all wavelengths of light, appearing black at room temperature.
According to Planck's law, black body radiation is characterized by a continuous spectrum of wavelengths and intensities, which depend on the temperature of the object. As the temperature increases, the peak intensity of the radiation shifts to shorter wavelengths, resulting in a change in color from red to orange, yellow, white, and eventually blue at very high temperatures.
Black body radiation is a fundamental concept in physics and has various applications, including understanding the behavior of stars, explaining the cosmic microwave background radiation, and developing technologies like incandescent light bulbs and thermal imaging devices.
[1] https://python.langchain.com/docs/modules/chains/