# 要将上述 Flask 实现转换为 Pyramid 框架,首先确保你已经安装了 Pyramid 及其相关依赖。
# 然后,以下是基于 Pyramid 的实现:
from wsgiref.simple_server import make_server
from pyramid.config import Configurator
from pyramid.response import Response
from pyramid.view import view_config
import json
import random
import string
import time
from typing import Any
# Import your necessary modules/libraries here
# For instance, replace the 'aistudio' with your actual module/library
import aistudio
@view_config(route_name='chat_completions', renderer='json', request_method='POST')
def chat_completions(request):
model = request.json.get("model", "ernie-bot")
stream = request.json.get("stream", False)
messages = request.json.get("messages")
messagesstr = str(messages)
if len(messagesstr) > 4700:
messagesstr = messagesstr[-4700:]
chat_completion = aistudio.chat.create(
messages=[
{
"role": "user",
"content": messagesstr
}
]
)
response = chat_completion.result
completion_id = "".join(random.choices(string.ascii_letters + string.digits, k=28))
completion_timestamp = int(time.time())
if not stream:
return {
"id": f"chatcmpl-{completion_id}",
"object": "chat.completion",
"created": completion_timestamp,
"model": model,
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": response,
},
"finish_reason": "stop",
}
],
"usage": {
"prompt_tokens": None,
"completion_tokens": None,
"total_tokens": None,
},
}
def streaming():
for chunk in response:
completion_data = {
"id": f"chatcmpl-{completion_id}",
"object": "chat.completion.chunk",
"created": completion_timestamp,
"model": model,
"choices": [
{
"index": 0,
"delta": {
"content": chunk,
},
"finish_reason": None,
}
],
}
content = json.dumps(completion_data, separators=(",", ":"))
yield f"data: {content}\n\n"
time.sleep(0.01)
end_completion_data = {
"id": f"chatcmpl-{completion_id}",
"object": "chat.completion.chunk",
"created": completion_timestamp,
"model": model,
"choices": [
{
"index": 0,
"delta": {},
"finish_reason": "stop",
}
],
}
content = json.dumps(end_completion_data, separators=(",", ":"))
yield f"data: {content}\n\n"
return Response(streaming(), content_type="text/event-stream")
@view_config(route_name='embeddings', renderer='json', request_method='POST')
def embeddings(request):
input_text_list = request.json.get("input")
input_text = ' '.join(map(str, input_text_list))
token = request.headers.get('Authorization').replace("Bearer ", "")
embedding = get_embedding(input_text, token)
embeddings = aistudio.embed.embedding_v1(input=[input_text])
time.sleep(0.9)
# print(f"== type of embeddings:{type(embeddings)}, {embeddings}")
embedding = embeddings["data"][0]["embedding"]
print(embedding)
return {
"data": [
{
"embedding": embedding,
"index": 0,
"object": "embedding"
}
],
"model": "text-embedding-ada-002",
"object": "list",
"usage": {
"prompt_tokens": None,
"total_tokens": None
}
}
if __name__ == "__main__":
config = Configurator()
config.add_route('chat_completions', '/chat/completions')
config.add_route('embeddings', '/embeddings')
config.scan()
app = config.make_wsgi_app()
# server = app.serve('0.0.0.0', port=1337, debug=True)
server = make_server('0.0.0.0', 1338, app)
server.serve_forever()