Create evaluation_config.py
Browse files
my_model/config/evaluation_config.py
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FUZZY_SCORE = 80
|
2 |
+
USE_FUZZY = False # this was used at the initial stage of the evaluation only, then manually reviewed as detailed in the report.
|
3 |
+
EVALUATION_DATA_PATH = 'my_model/results/evaluation_results.xlsx'
|
4 |
+
MODEL_NAMES = ['13b', '7b'] # LLaMA-2 varients used.
|
5 |
+
MODEL_CONFIGURATIONS = ['caption+detic', 'caption+yolov5', 'only_caption', 'only_detic', 'only_yolov5'] # Ablation study on multiple model configurations
|
6 |
+
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
|
7 |
+
GPT4_MAX_TOKENS=100
|
8 |
+
GPT4_TEMPERATURE = 0.1
|
9 |
+
GPT4_SEED = 123
|