meg-huggingface commited on
Commit
a49910a
1 Parent(s): 7689092

Updating to my own version

Browse files
Files changed (3) hide show
  1. app.py +2 -2
  2. main_backend_harness.py +1 -1
  3. src/envs.py +1 -1
app.py CHANGED
@@ -8,8 +8,8 @@ configure_root_logger()
8
  from functools import partial
9
 
10
  import gradio as gr
11
- from main_backend_lighteval import run_auto_eval
12
- # from main_backend_harness import run_auto_eval
13
  from src.display.log_visualizer import log_file_to_html_string
14
  from src.display.css_html_js import dark_mode_gradio_js
15
  from src.envs import REFRESH_RATE, REPO_ID, QUEUE_REPO, RESULTS_REPO
 
8
  from functools import partial
9
 
10
  import gradio as gr
11
+ #from main_backend_lighteval import run_auto_eval
12
+ from main_backend_harness import run_auto_eval
13
  from src.display.log_visualizer import log_file_to_html_string
14
  from src.display.css_html_js import dark_mode_gradio_js
15
  from src.envs import REFRESH_RATE, REPO_ID, QUEUE_REPO, RESULTS_REPO
main_backend_harness.py CHANGED
@@ -5,7 +5,7 @@ from huggingface_hub import snapshot_download
5
 
6
  logging.getLogger("openai").setLevel(logging.WARNING)
7
 
8
- from backend.run_eval_suite_harness import run_evaluation
9
  from src.backend.manage_requests import check_completed_evals, get_eval_requests, set_eval_request
10
  from src.backend.sort_queue import sort_models_by_priority
11
 
 
5
 
6
  logging.getLogger("openai").setLevel(logging.WARNING)
7
 
8
+ from src.backend.run_eval_suite_harness import run_evaluation
9
  from src.backend.manage_requests import check_completed_evals, get_eval_requests, set_eval_request
10
  from src.backend.sort_queue import sort_models_by_priority
11
 
src/envs.py CHANGED
@@ -6,7 +6,7 @@ from huggingface_hub import HfApi
6
  # ----------------------------------
7
  TOKEN = os.environ.get("HF_TOKEN") # A read/write token for your org
8
 
9
- OWNER = "demo-leaderboard-backend" # Change to your org - don't forget to create a results and request dataset
10
 
11
  # For harness evaluations
12
  DEVICE = "cpu" # "cuda:0" if you add compute, for harness evaluations
 
6
  # ----------------------------------
7
  TOKEN = os.environ.get("HF_TOKEN") # A read/write token for your org
8
 
9
+ OWNER = "meg" # Change to your org - don't forget to create a results and request dataset
10
 
11
  # For harness evaluations
12
  DEVICE = "cpu" # "cuda:0" if you add compute, for harness evaluations