Spaces:
Build error
Build error
Duplicate from gsaivinay/Llama-2-13B-GGML-server
Browse filesCo-authored-by: SVG <[email protected]>
- .gitattributes +35 -0
- Dockerfile +23 -0
- README.md +11 -0
- dialogue.py +239 -0
- main.py +113 -0
- requirements.txt +11 -0
.gitattributes
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
Dockerfile
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM python:latest
|
2 |
+
|
3 |
+
ENV PYTHONUNBUFFERED 1
|
4 |
+
|
5 |
+
EXPOSE 8000
|
6 |
+
|
7 |
+
RUN useradd -m -u 1000 user
|
8 |
+
USER user
|
9 |
+
ENV HOME=/home/user \
|
10 |
+
PATH=/home/user/.local/bin:$PATH
|
11 |
+
|
12 |
+
WORKDIR $HOME/app
|
13 |
+
|
14 |
+
COPY requirements.txt ./
|
15 |
+
RUN pip install --upgrade pip && \
|
16 |
+
pip install -r requirements.txt
|
17 |
+
|
18 |
+
|
19 |
+
COPY --chown=user . $HOME/app
|
20 |
+
|
21 |
+
RUN ls -al
|
22 |
+
|
23 |
+
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"]
|
README.md
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: Llama-2-13B GGML Server
|
3 |
+
emoji: 🚀
|
4 |
+
colorFrom: red
|
5 |
+
colorTo: indigo
|
6 |
+
sdk: docker
|
7 |
+
app_port: 8000
|
8 |
+
duplicated_from: gsaivinay/Llama-2-13B-GGML-server
|
9 |
+
---
|
10 |
+
|
11 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
dialogue.py
ADDED
@@ -0,0 +1,239 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
|
16 |
+
import json
|
17 |
+
import os
|
18 |
+
from dataclasses import asdict, dataclass
|
19 |
+
from pathlib import Path
|
20 |
+
from typing import Any, Dict, List, Optional, Type, TypeVar, Union
|
21 |
+
|
22 |
+
from huggingface_hub import ModelHubMixin, hf_hub_download
|
23 |
+
|
24 |
+
# Generic variable that is either ModelHubMixin or a subclass thereof
|
25 |
+
T = TypeVar("T", bound="ModelHubMixin")
|
26 |
+
|
27 |
+
TEMPLATE_FILENAME = "dialogue_template.json"
|
28 |
+
IGNORE_INDEX = -100
|
29 |
+
|
30 |
+
|
31 |
+
@dataclass
|
32 |
+
class DialogueTemplate(ModelHubMixin):
|
33 |
+
"""Converts all turns of a dialogue between a user and assistant to a standardized format.
|
34 |
+
Adapted from OpenAI's ChatML (https://github.com/openai/openai-python/blob/main/chatml.md) and Vicuna (https://github.com/lm-sys/FastChat/blob/main/fastchat/conversation.py)
|
35 |
+
"""
|
36 |
+
|
37 |
+
system: str
|
38 |
+
messages: List[Dict[str, str]] = None
|
39 |
+
system_token: str = "<|system|>"
|
40 |
+
user_token: str = "<|user|>"
|
41 |
+
assistant_token: str = "<|assistant|>"
|
42 |
+
end_token: str = "<|end|>"
|
43 |
+
|
44 |
+
def get_training_prompt(self) -> str:
|
45 |
+
prompt = self.system_token + "\n" + self.system + self.end_token + "\n"
|
46 |
+
if self.messages is None:
|
47 |
+
raise ValueError("Dialogue template must have at least one message.")
|
48 |
+
for message in self.messages:
|
49 |
+
if message["role"] == "user":
|
50 |
+
prompt += self.user_token + "\n" + message["content"] + self.end_token + "\n"
|
51 |
+
else:
|
52 |
+
prompt += self.assistant_token + "\n" + message["content"] + self.end_token + "\n"
|
53 |
+
return prompt
|
54 |
+
|
55 |
+
def get_inference_prompt(self) -> str:
|
56 |
+
prompt = self.system_token + "\n" + self.system + self.end_token + "\n"
|
57 |
+
if self.messages is None:
|
58 |
+
raise ValueError("Dialogue template must have at least one message.")
|
59 |
+
for message in self.messages:
|
60 |
+
if message["role"] == "user":
|
61 |
+
prompt += self.user_token + "\n" + message["content"] + self.end_token + "\n"
|
62 |
+
else:
|
63 |
+
prompt += self.assistant_token + "\n" + message["content"] + self.end_token + "\n"
|
64 |
+
prompt += self.assistant_token
|
65 |
+
return prompt
|
66 |
+
|
67 |
+
def get_dialogue(self):
|
68 |
+
"""Helper function to format the messages as an easy-to-read dialogue."""
|
69 |
+
prompt = ""
|
70 |
+
if self.messages is None:
|
71 |
+
raise ValueError("Dialogue template must have at least one message.")
|
72 |
+
for message in self.messages:
|
73 |
+
if message["role"] == "user":
|
74 |
+
prompt += "\n\nHuman: " + message["content"]
|
75 |
+
else:
|
76 |
+
prompt += "\n\nAssistant: " + message["content"]
|
77 |
+
return prompt
|
78 |
+
|
79 |
+
def get_special_tokens(self) -> List[str]:
|
80 |
+
return [self.system_token, self.user_token, self.assistant_token, self.end_token]
|
81 |
+
|
82 |
+
def copy(self):
|
83 |
+
return DialogueTemplate(
|
84 |
+
system=self.system,
|
85 |
+
messages=self.messages,
|
86 |
+
system_token=self.system_token,
|
87 |
+
user_token=self.user_token,
|
88 |
+
assistant_token=self.assistant_token,
|
89 |
+
end_token=self.end_token,
|
90 |
+
)
|
91 |
+
|
92 |
+
def to_dict(self) -> Dict[str, Any]:
|
93 |
+
return {k: v for k, v in asdict(self).items()}
|
94 |
+
|
95 |
+
@classmethod
|
96 |
+
def from_dict(cls, data):
|
97 |
+
return DialogueTemplate(
|
98 |
+
system=data["system"] if "system" in data else "",
|
99 |
+
messages=data["messages"] if "messages" in data else None,
|
100 |
+
system_token=data["system_token"] if "system_token" in data else "<|system|>",
|
101 |
+
user_token=data["user_token"] if "user_token" in data else "<|user|>",
|
102 |
+
assistant_token=data["assistant_token"] if "assistant_token" in data else "<|assistant|>",
|
103 |
+
end_token=data["end_token"] if "end_token" in data else "<|end|>",
|
104 |
+
)
|
105 |
+
|
106 |
+
def _save_pretrained(self, save_directory: Union[str, Path]) -> None:
|
107 |
+
save_directory = Path(save_directory)
|
108 |
+
save_directory.mkdir(exist_ok=True)
|
109 |
+
with open(save_directory / "dialogue_template.json", "w") as f:
|
110 |
+
json.dump(self.to_dict(), f, indent=2)
|
111 |
+
|
112 |
+
@classmethod
|
113 |
+
def _from_pretrained(
|
114 |
+
cls: Type[T],
|
115 |
+
*,
|
116 |
+
model_id: str,
|
117 |
+
revision: Optional[str],
|
118 |
+
cache_dir: Optional[Union[str, Path]],
|
119 |
+
force_download: bool,
|
120 |
+
proxies: Optional[Dict],
|
121 |
+
resume_download: bool,
|
122 |
+
local_files_only: bool,
|
123 |
+
token: Optional[Union[str, bool]],
|
124 |
+
**model_kwargs,
|
125 |
+
) -> T:
|
126 |
+
"""Loads the dialogue template from a local directory or the Huggingface Hub.
|
127 |
+
Args:
|
128 |
+
model_id (`str`):
|
129 |
+
ID of the model to load from the Huggingface Hub (e.g. `bigscience/bloom`).
|
130 |
+
revision (`str`, *optional*):
|
131 |
+
Revision of the model on the Hub. Can be a branch name, a git tag or any commit id. Defaults to the
|
132 |
+
latest commit on `main` branch.
|
133 |
+
force_download (`bool`, *optional*, defaults to `False`):
|
134 |
+
Whether to force (re-)downloading the model weights and configuration files from the Hub, overriding
|
135 |
+
the existing cache.
|
136 |
+
resume_download (`bool`, *optional*, defaults to `False`):
|
137 |
+
Whether to delete incompletely received files. Will attempt to resume the download if such a file exists.
|
138 |
+
proxies (`Dict[str, str]`, *optional*):
|
139 |
+
A dictionary of proxy servers to use by protocol or endpoint (e.g., `{'http': 'foo.bar:3128',
|
140 |
+
'http://hostname': 'foo.bar:4012'}`).
|
141 |
+
token (`str` or `bool`, *optional*):
|
142 |
+
The token to use as HTTP bearer authorization for remote files. By default, it will use the token
|
143 |
+
cached when running `huggingface-cli login`.
|
144 |
+
cache_dir (`str`, `Path`, *optional*):
|
145 |
+
Path to the folder where cached files are stored.
|
146 |
+
local_files_only (`bool`, *optional*, defaults to `False`):
|
147 |
+
If `True`, avoid downloading the file and return the path to the local cached file if it exists.
|
148 |
+
model_kwargs:
|
149 |
+
Additional keyword arguments passed along to the [`~ModelHubMixin._from_pretrained`] method.
|
150 |
+
"""
|
151 |
+
if os.path.isdir(model_id): # Can either be a local directory
|
152 |
+
print("Loading dialogue template from local directory")
|
153 |
+
template_file = os.path.join(model_id, TEMPLATE_FILENAME)
|
154 |
+
else: # Or a template on the Hub
|
155 |
+
template_file = hf_hub_download( # Download from the hub, passing same input args
|
156 |
+
repo_id=model_id,
|
157 |
+
filename=TEMPLATE_FILENAME,
|
158 |
+
revision=revision,
|
159 |
+
cache_dir=cache_dir,
|
160 |
+
force_download=force_download,
|
161 |
+
proxies=proxies,
|
162 |
+
resume_download=resume_download,
|
163 |
+
token=token,
|
164 |
+
local_files_only=local_files_only,
|
165 |
+
)
|
166 |
+
|
167 |
+
# Load template
|
168 |
+
with open(template_file, "r") as f:
|
169 |
+
data = json.load(f)
|
170 |
+
return cls.from_dict(data=data)
|
171 |
+
|
172 |
+
|
173 |
+
# A shortened version of the system message in Anthropic's HHH prompt: https://gist.github.com/jareddk/2509330f8ef3d787fc5aaac67aab5f11#file-hhh_prompt-txt
|
174 |
+
default_template = DialogueTemplate(
|
175 |
+
system="Below is a dialogue between a human user and an AI assistant. The assistant is happy to help with almost anything, and will do its best to understand exactly what is needed.",
|
176 |
+
)
|
177 |
+
|
178 |
+
# OpenAI and OpenAssistant train on few to no system messages.
|
179 |
+
# TODO: consider defining this as the `default` template
|
180 |
+
no_system_template = DialogueTemplate(
|
181 |
+
system="",
|
182 |
+
)
|
183 |
+
|
184 |
+
alpaca_template = DialogueTemplate(
|
185 |
+
system="Below is an instruction that describes a task. Write a response that appropriately completes the request.",
|
186 |
+
user_token="### Instruction:",
|
187 |
+
assistant_token="### Response:",
|
188 |
+
)
|
189 |
+
|
190 |
+
SUPPORTED_DIALOGUE_TEMPLATES = {
|
191 |
+
"default": default_template,
|
192 |
+
"no_system": no_system_template,
|
193 |
+
"alpaca": alpaca_template,
|
194 |
+
}
|
195 |
+
|
196 |
+
|
197 |
+
def get_dialogue_template(template: str) -> DialogueTemplate:
|
198 |
+
if template not in SUPPORTED_DIALOGUE_TEMPLATES.keys():
|
199 |
+
raise ValueError(f"Template {template} is not supported!")
|
200 |
+
return SUPPORTED_DIALOGUE_TEMPLATES[template].copy()
|
201 |
+
|
202 |
+
|
203 |
+
def prepare_dialogue(example, dialogue_template, is_train=True):
|
204 |
+
"""Format example to single- or multi-turn dialogue."""
|
205 |
+
# TODO: make this simpler by just ensuring every dataset has a messages column
|
206 |
+
if "messages" in example.keys() and example["messages"] is not None:
|
207 |
+
dialogue_template.messages = example["messages"]
|
208 |
+
elif all(k in example.keys() for k in ("prompt", "completion")):
|
209 |
+
# Construct single-turn dialogue from prompt and completion
|
210 |
+
dialogue_template.messages = [
|
211 |
+
{"role": "user", "content": example["prompt"]},
|
212 |
+
{"role": "assistant", "content": example["completion"]},
|
213 |
+
]
|
214 |
+
elif "prompt" in example.keys():
|
215 |
+
# Construct single-turn dialogue from prompt (inference only)
|
216 |
+
dialogue_template.messages = [
|
217 |
+
{"role": "user", "content": example["prompt"]},
|
218 |
+
]
|
219 |
+
else:
|
220 |
+
raise ValueError(
|
221 |
+
f"Could not format example as dialogue! Require either `messages` or `[prompt, completion]` or `[prompt]` keys but found {list(example.keys())}"
|
222 |
+
)
|
223 |
+
if is_train:
|
224 |
+
example["text"] = dialogue_template.get_training_prompt()
|
225 |
+
else:
|
226 |
+
example["text"] = dialogue_template.get_inference_prompt()
|
227 |
+
return example
|
228 |
+
|
229 |
+
|
230 |
+
def mask_user_labels(tokenizer, dialogue_template, labels):
|
231 |
+
"""Masks the user turns of a dialogue from the loss"""
|
232 |
+
user_token_id = tokenizer.convert_tokens_to_ids(dialogue_template.user_token)
|
233 |
+
assistant_token_id = tokenizer.convert_tokens_to_ids(dialogue_template.assistant_token)
|
234 |
+
for idx, label_id in enumerate(labels):
|
235 |
+
if label_id == user_token_id:
|
236 |
+
current_idx = idx
|
237 |
+
while labels[current_idx] != assistant_token_id and current_idx < len(labels):
|
238 |
+
labels[current_idx] = IGNORE_INDEX
|
239 |
+
current_idx += 1
|
main.py
ADDED
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
from typing import List
|
3 |
+
import fastapi
|
4 |
+
import markdown
|
5 |
+
import uvicorn
|
6 |
+
from ctransformers import AutoModelForCausalLM
|
7 |
+
from fastapi import HTTPException
|
8 |
+
from fastapi.responses import HTMLResponse
|
9 |
+
from fastapi.middleware.cors import CORSMiddleware
|
10 |
+
from sse_starlette.sse import EventSourceResponse
|
11 |
+
from pydantic import BaseModel, Field
|
12 |
+
from typing_extensions import Literal
|
13 |
+
from dialogue import DialogueTemplate
|
14 |
+
|
15 |
+
# llm = AutoModelForCausalLM.from_pretrained("gsaivinay/airoboros-13B-gpt4-1.3-GGML",
|
16 |
+
# model_file="airoboros-13b-gpt4-1.3.ggmlv3.q4_1.bin",
|
17 |
+
# model_type="llama")
|
18 |
+
|
19 |
+
llm = AutoModelForCausalLM.from_pretrained("TheBloke/Llama-2-13B-chat-GGML",
|
20 |
+
model_file="llama-2-13b-chat.ggmlv3.q2_K.bin",
|
21 |
+
model_type="llama")
|
22 |
+
|
23 |
+
app = fastapi.FastAPI(title="Starchat Beta")
|
24 |
+
app.add_middleware(
|
25 |
+
CORSMiddleware,
|
26 |
+
allow_origins=["*"],
|
27 |
+
allow_credentials=True,
|
28 |
+
allow_methods=["*"],
|
29 |
+
allow_headers=["*"],
|
30 |
+
)
|
31 |
+
|
32 |
+
@app.get("/")
|
33 |
+
async def index():
|
34 |
+
with open("README.md", "r", encoding="utf-8") as readme_file:
|
35 |
+
md_template_string = readme_file.read()
|
36 |
+
html_content = markdown.markdown(md_template_string)
|
37 |
+
return HTMLResponse(content=html_content, status_code=200)
|
38 |
+
|
39 |
+
|
40 |
+
@app.get("/stream")
|
41 |
+
async def chat(prompt = "<|user|> Write an express server with server sent events. <|assistant|>"):
|
42 |
+
tokens = llm.tokenize(prompt)
|
43 |
+
async def server_sent_events(chat_chunks, llm):
|
44 |
+
yield prompt
|
45 |
+
for chat_chunk in llm.generate(chat_chunks):
|
46 |
+
yield llm.detokenize(chat_chunk)
|
47 |
+
yield ""
|
48 |
+
|
49 |
+
return EventSourceResponse(server_sent_events(tokens, llm))
|
50 |
+
|
51 |
+
|
52 |
+
class ChatCompletionRequestMessage(BaseModel):
|
53 |
+
role: Literal["system", "user", "assistant"] = Field(
|
54 |
+
default="user", description="The role of the message."
|
55 |
+
)
|
56 |
+
content: str = Field(default="", description="The content of the message.")
|
57 |
+
|
58 |
+
class ChatCompletionRequest(BaseModel):
|
59 |
+
messages: List[ChatCompletionRequestMessage] = Field(
|
60 |
+
default=[], description="A list of messages to generate completions for."
|
61 |
+
)
|
62 |
+
|
63 |
+
system_message = "Below is a conversation between a human user and a helpful AI coding assistant."
|
64 |
+
|
65 |
+
@app.post("/v1/chat/completions")
|
66 |
+
async def chat(request: ChatCompletionRequest):
|
67 |
+
kwargs = request.dict()
|
68 |
+
dialogue_template = DialogueTemplate(
|
69 |
+
system=system_message, messages=kwargs['messages']
|
70 |
+
)
|
71 |
+
prompt = dialogue_template.get_inference_prompt()
|
72 |
+
tokens = llm.tokenize(combined_messages)
|
73 |
+
|
74 |
+
try:
|
75 |
+
chat_chunks = llm.generate(tokens)
|
76 |
+
except Exception as e:
|
77 |
+
raise HTTPException(status_code=500, detail=str(e))
|
78 |
+
|
79 |
+
async def format_response(chat_chunks: Generator) -> Any:
|
80 |
+
for chat_chunk in chat_chunks:
|
81 |
+
response = {
|
82 |
+
'choices': [
|
83 |
+
{
|
84 |
+
'message': {
|
85 |
+
'role': 'system',
|
86 |
+
'content': llm.detokenize(chat_chunk)
|
87 |
+
},
|
88 |
+
'finish_reason': 'stop' if llm.detokenize(chat_chunk) == "[DONE]" else 'unknown'
|
89 |
+
}
|
90 |
+
]
|
91 |
+
}
|
92 |
+
yield f"data: {json.dumps(response)}\n\n"
|
93 |
+
yield "event: done\ndata: {}\n\n"
|
94 |
+
|
95 |
+
return EventSourceResponse(format_response(chat_chunks), media_type="text/event-stream")
|
96 |
+
|
97 |
+
@app.post("/v0/chat/completions")
|
98 |
+
async def chatV0(request: ChatCompletionRequest, response_mode=None):
|
99 |
+
kwargs = request.dict()
|
100 |
+
dialogue_template = DialogueTemplate(
|
101 |
+
system=system_message, messages=kwargs['messages']
|
102 |
+
)
|
103 |
+
prompt = dialogue_template.get_inference_prompt()
|
104 |
+
tokens = llm.tokenize(prompt)
|
105 |
+
async def server_sent_events(chat_chunks, llm):
|
106 |
+
for token in llm.generate(chat_chunks):
|
107 |
+
yield dict(data=llm.detokenize(token))
|
108 |
+
yield dict(data="[DONE]")
|
109 |
+
|
110 |
+
return EventSourceResponse(server_sent_events(tokens, llm))
|
111 |
+
|
112 |
+
if __name__ == "__main__":
|
113 |
+
uvicorn.run(app, host="0.0.0.0", port=8000)
|
requirements.txt
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
uvicorn
|
2 |
+
markdown
|
3 |
+
fastapi
|
4 |
+
loguru
|
5 |
+
torch
|
6 |
+
numpy
|
7 |
+
transformers
|
8 |
+
ctransformers
|
9 |
+
accelerate
|
10 |
+
langchain
|
11 |
+
sse_starlette
|