Spaces:
Paused
Paused
history
Browse files
app.py
CHANGED
@@ -42,17 +42,17 @@ def generate(
|
|
42 |
current_input += user
|
43 |
current_input += assistant
|
44 |
|
|
|
45 |
current_input += message
|
46 |
|
47 |
device = "cuda"
|
48 |
input_ids = tokenizer(current_input, return_tensors="pt").input_ids.to(device)
|
49 |
-
print(input_ids)
|
50 |
|
51 |
if len(input_ids) > MAX_INPUT_TOKEN_LENGTH:
|
52 |
input_ids = input_ids[-MAX_INPUT_TOKEN_LENGTH:]
|
53 |
gr.Warning("Trimmed input from conversation as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens.")
|
54 |
|
55 |
-
streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_special_tokens=
|
56 |
generate_kwargs = dict(
|
57 |
{"input_ids": input_ids},
|
58 |
streamer=streamer,
|
|
|
42 |
current_input += user
|
43 |
current_input += assistant
|
44 |
|
45 |
+
history = current_input
|
46 |
current_input += message
|
47 |
|
48 |
device = "cuda"
|
49 |
input_ids = tokenizer(current_input, return_tensors="pt").input_ids.to(device)
|
|
|
50 |
|
51 |
if len(input_ids) > MAX_INPUT_TOKEN_LENGTH:
|
52 |
input_ids = input_ids[-MAX_INPUT_TOKEN_LENGTH:]
|
53 |
gr.Warning("Trimmed input from conversation as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens.")
|
54 |
|
55 |
+
streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_special_tokens=True)
|
56 |
generate_kwargs = dict(
|
57 |
{"input_ids": input_ids},
|
58 |
streamer=streamer,
|