Ikala-allen commited on
Commit
8a998bc
1 Parent(s): 58f50d0

Update relation_extraction.py

Browse files
Files changed (1) hide show
  1. relation_extraction.py +26 -17
relation_extraction.py CHANGED
@@ -13,21 +13,19 @@ year={2020}
13
 
14
  # TODO: Add description of the module here
15
  _DESCRIPTION = """\
16
- This new module is designed to solve this great ML task and is crafted with a lot of care.
17
  """
18
 
19
 
20
- # TODO: Add description of the arguments of the module here
21
  _KWARGS_DESCRIPTION = """
22
- Calculates how good are predictions given some references, using certain scores
23
  Args:
24
- predictions (list of list of dictionary): relation and its type of prediction
25
-
26
- references (list of list of dictionary): references for each relation and its type
27
 
28
  Returns:
29
- **output** (`dictionary` of `dictionary`s) with multiple key-value pairs
30
- - **entity type** (`dictionary`): score of all of the type
31
  - **tp** : true positive count
32
  - **fp** : false positive count
33
  - **fn** : false negative count
@@ -38,12 +36,24 @@ Returns:
38
  - **Macro_p** : macro precision
39
  - **Macro_r** : macro recall
40
  Examples:
41
- Examples should be written in doctest format, and should illustrate how
42
- to use the function.
43
- my_new_module = evaluate.load("my_new_module")
44
- results = my_new_module.compute(references=[0, 1], predictions=[0, 1])
45
- print(results)
46
- {'accuracy': 1.0}
 
 
 
 
 
 
 
 
 
 
 
 
47
  """
48
 
49
 
@@ -66,6 +76,7 @@ def convert_format(data:list):
66
  'tail_type': ['product', 'product'...]}
67
  ...
68
  ]
 
69
  """
70
  predictions = []
71
  for item in data:
@@ -84,7 +95,7 @@ def convert_format(data:list):
84
 
85
  @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
86
  class relation_extraction(evaluate.Metric):
87
- """TODO: Short description of my evaluation module."""
88
 
89
  def _info(self):
90
  # TODO: Specifies the evaluate.EvaluationModuleInfo object
@@ -119,8 +130,6 @@ class relation_extraction(evaluate.Metric):
119
  )
120
 
121
  def _download_and_prepare(self, dl_manager):
122
- """Optional: download external resources useful to compute the scores"""
123
- # TODO: Download external resources if needed
124
  pass
125
 
126
  def _compute(self, predictions, references, mode, detailed_scores=False, relation_types=[]):
 
13
 
14
  # TODO: Add description of the module here
15
  _DESCRIPTION = """\
16
+ This metric is used for evaluating the quality of relation extraction output. By calculating the Micro and Macro F1 score of every relation extraction outputs to ensure the quality.
17
  """
18
 
19
 
 
20
  _KWARGS_DESCRIPTION = """
21
+ Calculates how good are predictions given some references, using Precision, Recall, F1 Score.
22
  Args:
23
+ predictions (list of list of dictionary): A list of predicted relations from the model.
24
+ references (list of list of dictionary): A list of ground-truth or reference relations to compare the predictions against.
 
25
 
26
  Returns:
27
+ **output** (`dictionary` of `dictionary`s) A dictionary mapping each entity type to its respective scoring metrics such as Precision, Recall, F1 Score.
28
+ - **entity type** (`dictionary`): score of selected relation type
29
  - **tp** : true positive count
30
  - **fp** : false positive count
31
  - **fn** : false negative count
 
36
  - **Macro_p** : macro precision
37
  - **Macro_r** : macro recall
38
  Examples:
39
+ metric_path = "Ikala-allen/relation_extraction"
40
+ module = evaluate.load(metric_path)
41
+ references = [
42
+ [
43
+ {"head": "phipigments", "head_type": "brand", "type": "sell", "tail": "國際認證之色乳", "tail_type": "product"},
44
+ {"head": "tinadaviespigments", "head_type": "brand", "type": "sell", "tail": "國際認證之色乳", "tail_type": "product"},
45
+ {'head': 'A醛賦活緊緻精華', 'tail': 'Serum', 'head_type': 'product', 'tail_type': 'category', 'type': 'belongs_to'},
46
+ ]
47
+ ]
48
+ predictions = [
49
+ [
50
+ {"head": "phipigments", "head_type": "product", "type": "sell", "tail": "國際認證之色乳", "tail_type": "product"},
51
+ {"head": "tinadaviespigments", "head_type": "brand", "type": "sell", "tail": "國際認證之色乳", "tail_type": "product"},
52
+ ]
53
+ ]
54
+ evaluation_scores = module.compute(predictions=predictions, references=references, mode="strict", detailed_scores=False, relation_types=[])
55
+ print(evaluation_scores)
56
+ {'tp': 1, 'fp': 1, 'fn': 2, 'p': 50.0, 'r': 33.333333333333336, 'f1': 40.0, 'Macro_f1': 25.0, 'Macro_p': 25.0, 'Macro_r': 25.0}
57
  """
58
 
59
 
 
76
  'tail_type': ['product', 'product'...]}
77
  ...
78
  ]
79
+
80
  """
81
  predictions = []
82
  for item in data:
 
95
 
96
  @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
97
  class relation_extraction(evaluate.Metric):
98
+ """evaluating the quality of relation extraction output"""
99
 
100
  def _info(self):
101
  # TODO: Specifies the evaluate.EvaluationModuleInfo object
 
130
  )
131
 
132
  def _download_and_prepare(self, dl_manager):
 
 
133
  pass
134
 
135
  def _compute(self, predictions, references, mode, detailed_scores=False, relation_types=[]):