Spaces:
Runtime error
Runtime error
Adjusted for hebrew
Browse files- app.py +3 -6
- scripts/fix_harness_import.py +0 -11
- src/about.py +1 -1
- src/envs.py +1 -1
app.py
CHANGED
@@ -30,14 +30,11 @@ from src.envs import API, DEVICE, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_R
|
|
30 |
from src.populate import get_evaluation_queue_df, get_leaderboard_df
|
31 |
from src.submission.submit import add_new_eval
|
32 |
|
33 |
-
|
34 |
-
subprocess.run(["python", "scripts/fix_harness_import.py"])
|
35 |
-
|
36 |
def restart_space():
|
37 |
API.restart_space(repo_id=REPO_ID)
|
38 |
|
39 |
-
def launch_backend():
|
40 |
-
_ = subprocess.run(["python", "main_backend.py"])
|
41 |
|
42 |
try:
|
43 |
print(EVAL_REQUESTS_PATH)
|
@@ -348,6 +345,6 @@ with demo:
|
|
348 |
|
349 |
scheduler = BackgroundScheduler()
|
350 |
scheduler.add_job(restart_space, "interval", seconds=1800)
|
351 |
-
scheduler.add_job(launch_backend, "interval", seconds=100) # will only allow one job to be run at the same time
|
352 |
scheduler.start()
|
353 |
demo.queue(default_concurrency_limit=40).launch()
|
|
|
30 |
from src.populate import get_evaluation_queue_df, get_leaderboard_df
|
31 |
from src.submission.submit import add_new_eval
|
32 |
|
|
|
|
|
|
|
33 |
def restart_space():
|
34 |
API.restart_space(repo_id=REPO_ID)
|
35 |
|
36 |
+
# def launch_backend():
|
37 |
+
# _ = subprocess.run(["python", "main_backend.py"])
|
38 |
|
39 |
try:
|
40 |
print(EVAL_REQUESTS_PATH)
|
|
|
345 |
|
346 |
scheduler = BackgroundScheduler()
|
347 |
scheduler.add_job(restart_space, "interval", seconds=1800)
|
348 |
+
# scheduler.add_job(launch_backend, "interval", seconds=100) # will only allow one job to be run at the same time
|
349 |
scheduler.start()
|
350 |
demo.queue(default_concurrency_limit=40).launch()
|
scripts/fix_harness_import.py
DELETED
@@ -1,11 +0,0 @@
|
|
1 |
-
"""This file should be used after pip install -r requirements.
|
2 |
-
It creates a folder not ported during harness package creation (as they don't use a Manifest file atm and it ignore `.json` files).
|
3 |
-
It will need to be updated if we want to use the harness' version of big bench to actually copy the json files.
|
4 |
-
"""
|
5 |
-
import os
|
6 |
-
|
7 |
-
import lm_eval
|
8 |
-
|
9 |
-
if __name__ == "__main__":
|
10 |
-
lm_eval_path = lm_eval.__path__[0]
|
11 |
-
os.makedirs(os.path.join(lm_eval_path, "datasets", "bigbench_resources"), exist_ok=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/about.py
CHANGED
@@ -14,7 +14,7 @@ class Tasks(Enum):
|
|
14 |
# task_key in the json file, metric_key in the json file, name to display in the leaderboard
|
15 |
task0 = Task("anli_r1", "acc", "ANLI")
|
16 |
task1 = Task("logiqa", "acc_norm", "LogiQA")
|
17 |
-
|
18 |
NUM_FEWSHOT = 0 # Change with your few shot
|
19 |
# ---------------------------------------------------
|
20 |
|
|
|
14 |
# task_key in the json file, metric_key in the json file, name to display in the leaderboard
|
15 |
task0 = Task("anli_r1", "acc", "ANLI")
|
16 |
task1 = Task("logiqa", "acc_norm", "LogiQA")
|
17 |
+
|
18 |
NUM_FEWSHOT = 0 # Change with your few shot
|
19 |
# ---------------------------------------------------
|
20 |
|
src/envs.py
CHANGED
@@ -6,7 +6,7 @@ from huggingface_hub import HfApi
|
|
6 |
# ----------------------------------
|
7 |
TOKEN = os.environ.get("TOKEN") # A read/write token for your org
|
8 |
|
9 |
-
OWNER = "
|
10 |
DEVICE = "cpu" # "cuda:0" if you add compute
|
11 |
LIMIT = 20 # !!!! Should be None for actual evaluations!!!
|
12 |
# ----------------------------------
|
|
|
6 |
# ----------------------------------
|
7 |
TOKEN = os.environ.get("TOKEN") # A read/write token for your org
|
8 |
|
9 |
+
OWNER = "dicta-hebrew-llm-leaderboard" # Change to your org - don't forget to create a results and request file
|
10 |
DEVICE = "cpu" # "cuda:0" if you add compute
|
11 |
LIMIT = 20 # !!!! Should be None for actual evaluations!!!
|
12 |
# ----------------------------------
|