OpenSourceRonin commited on
Commit
4ebfe5c
1 Parent(s): d39b1cd
Files changed (2) hide show
  1. app.py +14 -8
  2. requirements.txt +2 -1
app.py CHANGED
@@ -4,7 +4,11 @@ from huggingface_hub import InferenceClient
4
  """
5
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
  """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
 
 
 
 
8
 
9
 
10
  def respond(
@@ -27,18 +31,20 @@ def respond(
27
 
28
  response = ""
29
 
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
  ):
37
- token = message.choices[0].delta.content
38
 
39
  response += token
40
  yield response
41
 
 
 
42
  """
43
  For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
44
  """
 
4
  """
5
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
  """
7
+ #VPTQ-community/Qwen2.5-72B-Instruct-v8-k65536-0-woft
8
+
9
+ from vptq.app_utils import get_chat_loop_generator
10
+
11
+ chat_completion = get_chat_loop_generator("VPTQ-community/Meta-Llama-3.1-70B-Instruct-v8-k32768-0-woft")
12
 
13
 
14
  def respond(
 
31
 
32
  response = ""
33
 
34
+ for message in chat_completion(
35
+ messages,
36
+ max_tokens=max_tokens,
37
+ stream=True,
38
+ temperature=temperature,
39
+ top_p=top_p,
40
  ):
41
+ token = message
42
 
43
  response += token
44
  yield response
45
 
46
+
47
+
48
  """
49
  For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
50
  """
requirements.txt CHANGED
@@ -1 +1,2 @@
1
- huggingface_hub==0.22.2
 
 
1
+ huggingface_hub==0.22.2
2
+ https://github.com/microsoft/VPTQ/releases/download/v0.0.1/vptq-0.0.1-cp310-cp310-manylinux1_x86_64.whl