Spaces:
Running
Running
remove remaining st.secrets['secrets']
Browse files
app.py
CHANGED
@@ -144,7 +144,16 @@ data = load_data(data_path)
|
|
144 |
cache = None # load_content_cache(cache_path)
|
145 |
|
146 |
try:
|
147 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
148 |
# for Huggingface (no [secrets] section)
|
149 |
Wapi_key = st.secrets['WEAVIATE_API_KEY']
|
150 |
url = st.secrets['WEAVIATE_ENDPOINT']
|
@@ -503,7 +512,7 @@ def main():
|
|
503 |
|
504 |
|
505 |
GPTllm = GPT_Turbo(model=model_nameGPT,
|
506 |
-
api_key=
|
507 |
try:
|
508 |
# inserts chat stream from LLM
|
509 |
for resp in GPTllm.get_chat_completion(prompt=prompt,
|
@@ -676,11 +685,6 @@ def reword_query(query, guest, model_name='llama2-13b-chat', response_processing
|
|
676 |
"""
|
677 |
prompt = llama_prompt.format(**prompt_fields2)
|
678 |
|
679 |
-
hf_token = st.secrets['secrets']['LLAMA2_ENDPOINT_HF_TOKEN_chris']
|
680 |
-
# hf_token = st.secrets['secrets']['LLAMA2_ENDPOINT_HF_TOKEN']
|
681 |
-
|
682 |
-
hf_endpoint = st.secrets['secrets']['LLAMA2_ENDPOINT_UPLIMIT']
|
683 |
-
|
684 |
headers = {"Authorization": f"Bearer {hf_token}",
|
685 |
"Content-Type": "application/json",}
|
686 |
|
@@ -733,7 +737,7 @@ def reword_query(query, guest, model_name='llama2-13b-chat', response_processing
|
|
733 |
model_ids = ['gpt-3.5-turbo-16k', 'gpt-3.5-turbo-0613']
|
734 |
model_name = model_ids[1]
|
735 |
GPTllm = GPT_Turbo(model=model_name,
|
736 |
-
api_key=
|
737 |
|
738 |
openai_prompt = """
|
739 |
{your_task}\n
|
|
|
144 |
cache = None # load_content_cache(cache_path)
|
145 |
|
146 |
try:
|
147 |
+
s['secrets']['LLAMA2_ENDPOINT_UPLIMIT']
|
148 |
+
# # st.write("Secrets loaded from secrets.toml")
|
149 |
+
# # st.write("HF_TOKEN", hf_token)
|
150 |
+
# # st.write("Loading secrets from secrets.toml")
|
151 |
+
# Wapi_key = st.secrets['secrets']['WEAVIATE_API_KEY']
|
152 |
+
# url = st.secrets['secrets']['WEAVIATE_ENDPOINT']
|
153 |
+
# openai_api_key = st.secrets['secrets']['OPENAI_API_KEY']
|
154 |
+
|
155 |
+
# hf_token = st.secrets['secrets']['LLAMA2_ENDPOINT_HF_TOKEN_chris']
|
156 |
+
# hf_endpoint = st.secret
|
157 |
# for Huggingface (no [secrets] section)
|
158 |
Wapi_key = st.secrets['WEAVIATE_API_KEY']
|
159 |
url = st.secrets['WEAVIATE_ENDPOINT']
|
|
|
512 |
|
513 |
|
514 |
GPTllm = GPT_Turbo(model=model_nameGPT,
|
515 |
+
api_key=openai_api_key
|
516 |
try:
|
517 |
# inserts chat stream from LLM
|
518 |
for resp in GPTllm.get_chat_completion(prompt=prompt,
|
|
|
685 |
"""
|
686 |
prompt = llama_prompt.format(**prompt_fields2)
|
687 |
|
|
|
|
|
|
|
|
|
|
|
688 |
headers = {"Authorization": f"Bearer {hf_token}",
|
689 |
"Content-Type": "application/json",}
|
690 |
|
|
|
737 |
model_ids = ['gpt-3.5-turbo-16k', 'gpt-3.5-turbo-0613']
|
738 |
model_name = model_ids[1]
|
739 |
GPTllm = GPT_Turbo(model=model_name,
|
740 |
+
api_key=openai_api_key)
|
741 |
|
742 |
openai_prompt = """
|
743 |
{your_task}\n
|