Spaces:
Sleeping
Sleeping
lukestanley
commited on
Commit
•
e4b918c
1
Parent(s):
811e485
Linting with Black
Browse files
chill.py
CHANGED
@@ -29,17 +29,24 @@ suggestions = []
|
|
29 |
|
30 |
start_time = time.time()
|
31 |
|
|
|
32 |
class ImprovedText(BaseModel):
|
33 |
text: str = Field(str, description="The improved text.")
|
34 |
|
|
|
35 |
class SpicyScore(BaseModel):
|
36 |
spicy_score: float = Field(float, description="The spiciness score of the text.")
|
37 |
|
|
|
38 |
class Critique(BaseModel):
|
39 |
critique: str = Field(str, description="The critique of the text.")
|
40 |
|
|
|
41 |
class FaithfulnessScore(BaseModel):
|
42 |
-
faithfulness_score: float = Field(
|
|
|
|
|
|
|
43 |
|
44 |
improve_prompt = """
|
45 |
Your task is to rephrase inflammatory text, so it is more calm and constructive, without changing the intended meaning.
|
@@ -135,7 +142,6 @@ Please score the text.
|
|
135 |
"""
|
136 |
|
137 |
|
138 |
-
|
139 |
def improve_text():
|
140 |
global suggestions
|
141 |
replacements = {
|
@@ -143,7 +149,7 @@ def improve_text():
|
|
143 |
"previous_suggestions": json.dumps(suggestions, indent=2),
|
144 |
}
|
145 |
resp_json = query_ai_prompt(improve_prompt, replacements, ImprovedText)
|
146 |
-
#print('resp_json', resp_json)
|
147 |
return resp_json["text"]
|
148 |
|
149 |
|
@@ -152,15 +158,11 @@ def critique_text(last_edit):
|
|
152 |
|
153 |
# Query the AI for each of the new prompts separately
|
154 |
|
155 |
-
critique_resp = query_ai_prompt(
|
156 |
-
critique_prompt, replacements, Critique
|
157 |
-
)
|
158 |
faithfulness_resp = query_ai_prompt(
|
159 |
faith_scorer_prompt, replacements, FaithfulnessScore
|
160 |
)
|
161 |
-
spiciness_resp = query_ai_prompt(
|
162 |
-
spicy_scorer_prompt, replacements, SpicyScore
|
163 |
-
)
|
164 |
|
165 |
# Combine the results from the three queries into a single dictionary
|
166 |
combined_resp = {
|
|
|
29 |
|
30 |
start_time = time.time()
|
31 |
|
32 |
+
|
33 |
class ImprovedText(BaseModel):
|
34 |
text: str = Field(str, description="The improved text.")
|
35 |
|
36 |
+
|
37 |
class SpicyScore(BaseModel):
|
38 |
spicy_score: float = Field(float, description="The spiciness score of the text.")
|
39 |
|
40 |
+
|
41 |
class Critique(BaseModel):
|
42 |
critique: str = Field(str, description="The critique of the text.")
|
43 |
|
44 |
+
|
45 |
class FaithfulnessScore(BaseModel):
|
46 |
+
faithfulness_score: float = Field(
|
47 |
+
float, description="The faithfulness score of the text."
|
48 |
+
)
|
49 |
+
|
50 |
|
51 |
improve_prompt = """
|
52 |
Your task is to rephrase inflammatory text, so it is more calm and constructive, without changing the intended meaning.
|
|
|
142 |
"""
|
143 |
|
144 |
|
|
|
145 |
def improve_text():
|
146 |
global suggestions
|
147 |
replacements = {
|
|
|
149 |
"previous_suggestions": json.dumps(suggestions, indent=2),
|
150 |
}
|
151 |
resp_json = query_ai_prompt(improve_prompt, replacements, ImprovedText)
|
152 |
+
# print('resp_json', resp_json)
|
153 |
return resp_json["text"]
|
154 |
|
155 |
|
|
|
158 |
|
159 |
# Query the AI for each of the new prompts separately
|
160 |
|
161 |
+
critique_resp = query_ai_prompt(critique_prompt, replacements, Critique)
|
|
|
|
|
162 |
faithfulness_resp = query_ai_prompt(
|
163 |
faith_scorer_prompt, replacements, FaithfulnessScore
|
164 |
)
|
165 |
+
spiciness_resp = query_ai_prompt(spicy_scorer_prompt, replacements, SpicyScore)
|
|
|
|
|
166 |
|
167 |
# Combine the results from the three queries into a single dictionary
|
168 |
combined_resp = {
|
utils.py
CHANGED
@@ -2,17 +2,22 @@ import json
|
|
2 |
from typing import Any, Dict, Union
|
3 |
import requests
|
4 |
|
5 |
-
from llama_cpp import
|
|
|
|
|
|
|
6 |
# The main interface is the HTTP server, not the library directly.
|
7 |
|
8 |
|
9 |
-
def llm_streaming(
|
|
|
|
|
10 |
schema = pydantic_model_class.model_json_schema()
|
11 |
-
|
12 |
# Optional example field from schema, is not needed for the grammar generation
|
13 |
if "example" in schema:
|
14 |
del schema["example"]
|
15 |
-
|
16 |
json_schema = json.dumps(schema)
|
17 |
grammar = json_schema_to_gbnf(json_schema)
|
18 |
|
@@ -21,19 +26,18 @@ def llm_streaming(prompt:str, pydantic_model_class, return_pydantic_object=False
|
|
21 |
"max_tokens": 1000,
|
22 |
"grammar": grammar,
|
23 |
"temperature": 1.0,
|
24 |
-
"messages": [
|
25 |
-
{
|
26 |
-
"role": "user",
|
27 |
-
"content": prompt
|
28 |
-
}
|
29 |
-
],
|
30 |
}
|
31 |
headers = {
|
32 |
"Content-Type": "application/json",
|
33 |
}
|
34 |
|
35 |
-
response = requests.post(
|
36 |
-
|
|
|
|
|
|
|
|
|
37 |
output_text = ""
|
38 |
for chunk in response.iter_lines():
|
39 |
if chunk:
|
@@ -43,10 +47,10 @@ def llm_streaming(prompt:str, pydantic_model_class, return_pydantic_object=False
|
|
43 |
if chunk.strip() == "[DONE]":
|
44 |
break
|
45 |
chunk = json.loads(chunk)
|
46 |
-
new_token = chunk.get(
|
47 |
if new_token:
|
48 |
output_text = output_text + new_token
|
49 |
-
print(new_token,sep=
|
50 |
|
51 |
if return_pydantic_object:
|
52 |
model_object = pydantic_model_class.model_validate_json(output_text)
|
@@ -55,14 +59,15 @@ def llm_streaming(prompt:str, pydantic_model_class, return_pydantic_object=False
|
|
55 |
json_output = json.loads(output_text)
|
56 |
return json_output
|
57 |
|
|
|
58 |
def replace_text(template: str, replacements: dict) -> str:
|
59 |
for key, value in replacements.items():
|
60 |
template = template.replace(f"{{{key}}}", value)
|
61 |
return template
|
62 |
|
|
|
63 |
def query_ai_prompt(prompt, replacements, model_class):
|
64 |
prompt = replace_text(prompt, replacements)
|
65 |
-
#print('prompt')
|
66 |
-
#print(prompt)
|
67 |
return llm_streaming(prompt, model_class)
|
68 |
-
|
|
|
2 |
from typing import Any, Dict, Union
|
3 |
import requests
|
4 |
|
5 |
+
from llama_cpp import (
|
6 |
+
json_schema_to_gbnf,
|
7 |
+
) # Only used directly to convert the JSON schema to GBNF,
|
8 |
+
|
9 |
# The main interface is the HTTP server, not the library directly.
|
10 |
|
11 |
|
12 |
+
def llm_streaming(
|
13 |
+
prompt: str, pydantic_model_class, return_pydantic_object=False
|
14 |
+
) -> Union[str, Dict[str, Any]]:
|
15 |
schema = pydantic_model_class.model_json_schema()
|
16 |
+
|
17 |
# Optional example field from schema, is not needed for the grammar generation
|
18 |
if "example" in schema:
|
19 |
del schema["example"]
|
20 |
+
|
21 |
json_schema = json.dumps(schema)
|
22 |
grammar = json_schema_to_gbnf(json_schema)
|
23 |
|
|
|
26 |
"max_tokens": 1000,
|
27 |
"grammar": grammar,
|
28 |
"temperature": 1.0,
|
29 |
+
"messages": [{"role": "user", "content": prompt}],
|
|
|
|
|
|
|
|
|
|
|
30 |
}
|
31 |
headers = {
|
32 |
"Content-Type": "application/json",
|
33 |
}
|
34 |
|
35 |
+
response = requests.post(
|
36 |
+
"http://localhost:5834/v1/chat/completions",
|
37 |
+
headers=headers,
|
38 |
+
json=payload,
|
39 |
+
stream=True,
|
40 |
+
)
|
41 |
output_text = ""
|
42 |
for chunk in response.iter_lines():
|
43 |
if chunk:
|
|
|
47 |
if chunk.strip() == "[DONE]":
|
48 |
break
|
49 |
chunk = json.loads(chunk)
|
50 |
+
new_token = chunk.get("choices")[0].get("delta").get("content")
|
51 |
if new_token:
|
52 |
output_text = output_text + new_token
|
53 |
+
print(new_token, sep="", end="", flush=True)
|
54 |
|
55 |
if return_pydantic_object:
|
56 |
model_object = pydantic_model_class.model_validate_json(output_text)
|
|
|
59 |
json_output = json.loads(output_text)
|
60 |
return json_output
|
61 |
|
62 |
+
|
63 |
def replace_text(template: str, replacements: dict) -> str:
|
64 |
for key, value in replacements.items():
|
65 |
template = template.replace(f"{{{key}}}", value)
|
66 |
return template
|
67 |
|
68 |
+
|
69 |
def query_ai_prompt(prompt, replacements, model_class):
|
70 |
prompt = replace_text(prompt, replacements)
|
71 |
+
# print('prompt')
|
72 |
+
# print(prompt)
|
73 |
return llm_streaming(prompt, model_class)
|
|