Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
@@ -1,6 +1,4 @@
|
|
1 |
import gradio as gr
|
2 |
-
|
3 |
-
import git
|
4 |
import os, gc, torch
|
5 |
from datetime import datetime
|
6 |
from huggingface_hub import hf_hub_download
|
@@ -10,14 +8,17 @@ gpu_h = nvmlDeviceGetHandleByIndex(0)
|
|
10 |
ctx_limit = 1024
|
11 |
title1 = "RWKV-4-Raven-7B-v9-Eng99%-Other1%-20230412-ctx8192"
|
12 |
|
|
|
|
|
|
|
13 |
from rwkv.model import RWKV
|
14 |
-
|
15 |
-
|
16 |
from rwkv.utils import PIPELINE, PIPELINE_ARGS
|
17 |
-
pipeline = PIPELINE(
|
|
|
|
|
18 |
|
19 |
-
os.environ["RWKV_JIT_ON"] = '1'
|
20 |
-
os.environ["RWKV_CUDA_ON"] = '1' # if '1' then use CUDA kernel for seq mode (much faster)
|
21 |
os.system('git clone https://github.com/Edresson/Coqui-TTS -b multilingual-torchaudio-SE TTS')
|
22 |
os.system('pip install -q -e TTS/')
|
23 |
os.system('pip install -q torchaudio==0.9.0')
|
@@ -238,7 +239,7 @@ def evaluate(
|
|
238 |
occurrence = {}
|
239 |
state = None
|
240 |
for i in range(int(200)):
|
241 |
-
out, state =
|
242 |
for n in occurrence:
|
243 |
out[n] -= (args.alpha_presence + occurrence[n] * args.alpha_frequency)
|
244 |
|
@@ -266,7 +267,7 @@ block = gr.Blocks()
|
|
266 |
with block:
|
267 |
with gr.Group():
|
268 |
gr.Markdown(
|
269 |
-
"""
|
270 |
## <center>🤖 - 让有人文关怀的AI造福每一个人!AI向善,文明璀璨!TalktoAI - Enable the future!</center>
|
271 |
### <center>注意❗:请不要输入或生成会对个人以及组织造成侵害的内容,此程序仅供科研、学习及娱乐使用。用户输入或生成的内容与程序开发者无关,请自觉合法合规使用,违反者一切后果自负。</center>
|
272 |
|
|
|
1 |
import gradio as gr
|
|
|
|
|
2 |
import os, gc, torch
|
3 |
from datetime import datetime
|
4 |
from huggingface_hub import hf_hub_download
|
|
|
8 |
ctx_limit = 1024
|
9 |
title1 = "RWKV-4-Raven-7B-v9-Eng99%-Other1%-20230412-ctx8192"
|
10 |
|
11 |
+
os.environ["RWKV_JIT_ON"] = '1'
|
12 |
+
os.environ["RWKV_CUDA_ON"] = '1' # if '1' then use CUDA kernel for seq mode (much faster)
|
13 |
+
|
14 |
from rwkv.model import RWKV
|
15 |
+
model_path1 = hf_hub_download(repo_id="BlinkDL/rwkv-4-raven", filename=f"{title1}.pth")
|
16 |
+
model1 = RWKV(model1=model_path1, strategy='cuda fp16i8 *8 -> cuda fp16')
|
17 |
from rwkv.utils import PIPELINE, PIPELINE_ARGS
|
18 |
+
pipeline = PIPELINE(model1, "20B_tokenizer.json")
|
19 |
+
|
20 |
+
import git
|
21 |
|
|
|
|
|
22 |
os.system('git clone https://github.com/Edresson/Coqui-TTS -b multilingual-torchaudio-SE TTS')
|
23 |
os.system('pip install -q -e TTS/')
|
24 |
os.system('pip install -q torchaudio==0.9.0')
|
|
|
239 |
occurrence = {}
|
240 |
state = None
|
241 |
for i in range(int(200)):
|
242 |
+
out, state = model1.forward(pipeline.encode(ctx)[-ctx_limit:] if i == 0 else [token], state)
|
243 |
for n in occurrence:
|
244 |
out[n] -= (args.alpha_presence + occurrence[n] * args.alpha_frequency)
|
245 |
|
|
|
267 |
with block:
|
268 |
with gr.Group():
|
269 |
gr.Markdown(
|
270 |
+
""" # <center>🥳💬💕 - TalktoAI,随时随地,谈天说地!</center>
|
271 |
## <center>🤖 - 让有人文关怀的AI造福每一个人!AI向善,文明璀璨!TalktoAI - Enable the future!</center>
|
272 |
### <center>注意❗:请不要输入或生成会对个人以及组织造成侵害的内容,此程序仅供科研、学习及娱乐使用。用户输入或生成的内容与程序开发者无关,请自觉合法合规使用,违反者一切后果自负。</center>
|
273 |
|