Ikala-allen commited on
Commit
82009ff
1 Parent(s): f6db68b

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +28 -18
README.md CHANGED
@@ -22,14 +22,16 @@ This metric can be used in relation extraction evaluation.
22
  ## How to Use
23
  This metric takes 2 inputs, prediction and references(ground truth). Both of them are a list of list of dictionary of entity's name and entity's type:
24
  ```
25
- import evaluate
 
 
26
 
27
- #### load metric
28
  >>> metric_path = "Ikala-allen/relation_extraction"
29
  >>> module = evaluate.load(metric_path)
30
 
31
- #### Define your predictions and references
32
- #### Example references (ground truth)
 
33
  >>> references = [
34
  ... [
35
  ... {"head": "phip igments", "head_type": "brand", "type": "sell", "tail": "國際認證之色乳", "tail_type": "product"},
@@ -37,7 +39,8 @@ import evaluate
37
  ... ]
38
  ... ]
39
 
40
- #### Example predictions
 
41
  >>> predictions = [
42
  ... [
43
  ... {"head": "phipigments", "head_type": "product", "type": "sell", "tail": "國際認證之色乳", "tail_type": "product"},
@@ -45,9 +48,10 @@ import evaluate
45
  ... ]
46
  ... ]
47
 
48
- #### Calculate evaluation scores using the loaded metric
49
- >>> evaluation_scores = module.compute(predictions=predictions, references=references)
50
- >>> print(evaluation_scores)
 
51
  {'sell': {'tp': 1, 'fp': 1, 'fn': 1, 'p': 50.0, 'r': 50.0, 'f1': 50.0}, 'ALL': {'tp': 1, 'fp': 1, 'fn': 1, 'p': 50.0, 'r': 50.0, 'f1': 50.0, 'Macro_f1': 50.0, 'Macro_p': 50.0, 'Macro_r': 50.0}}
52
  ```
53
 
@@ -89,8 +93,9 @@ Example of only one prediction and reference:
89
  >>> metric_path = "Ikala-allen/relation_extraction"
90
  >>> module = evaluate.load(metric_path)
91
 
92
- #### Define your predictions and references
93
- #### Example references (ground truth)
 
94
  >>> references = [
95
  ... [
96
  ... {"head": "phip igments", "head_type": "brand", "type": "sell", "tail": "國際認證之色乳", "tail_type": "product"},
@@ -98,7 +103,8 @@ Example of only one prediction and reference:
98
  ... ]
99
  ... ]
100
 
101
- #### Example predictions
 
102
  >>> predictions = [
103
  ... [
104
  ... {"head": "phipigments", "head_type": "product", "type": "sell", "tail": "國際認證之色乳", "tail_type": "product"},
@@ -106,7 +112,8 @@ Example of only one prediction and reference:
106
  ... ]
107
  ... ]
108
 
109
- #### Calculate evaluation scores using the loaded metric
 
110
  >>> evaluation_scores = module.compute(predictions=predictions, references=references)
111
  >>> print(evaluation_scores)
112
  {'sell': {'tp': 1, 'fp': 1, 'fn': 1, 'p': 50.0, 'r': 50.0, 'f1': 50.0}, 'ALL': {'tp': 1, 'fp': 1, 'fn': 1, 'p': 50.0, 'r': 50.0, 'f1': 50.0, 'Macro_f1': 50.0, 'Macro_p': 50.0, 'Macro_r': 50.0}}
@@ -117,8 +124,9 @@ Example with two or more prediction and reference:
117
  >>> metric_path = "Ikala-allen/relation_extraction"
118
  >>> module = evaluate.load(metric_path)
119
 
120
- #### Define your predictions and references
121
- #### Example references (ground truth)
 
122
  >>> references = [
123
  ... [
124
  ... {"head": "phip igments", "head_type": "brand", "type": "sell", "tail": "國際認證之色乳", "tail_type": "product"},
@@ -129,7 +137,8 @@ Example with two or more prediction and reference:
129
  ... ]
130
  ... ]
131
 
132
- #### Example predictions
 
133
  >>> predictions = [
134
  ... [
135
  ... {"head": "phipigments", "head_type": "product", "type": "sell", "tail": "國際認證之色乳", "tail_type": "product"},
@@ -140,9 +149,10 @@ Example with two or more prediction and reference:
140
  ... ]
141
  ... ]
142
 
143
- #### Calculate evaluation scores using the loaded metric
144
- >>> evaluation_scores = module.compute(predictions=predictions, references=references)
145
- >>> print(evaluation_scores)
 
146
  {'sell': {'tp': 2, 'fp': 2, 'fn': 1, 'p': 50.0, 'r': 66.66666666666667, 'f1': 57.142857142857146}, 'ALL': {'tp': 2, 'fp': 2, 'fn': 1, 'p': 50.0, 'r': 66.66666666666667, 'f1': 57.142857142857146, 'Macro_f1': 57.142857142857146, 'Macro_p': 50.0, 'Macro_r': 66.66666666666667}}
147
  ```
148
 
 
22
  ## How to Use
23
  This metric takes 2 inputs, prediction and references(ground truth). Both of them are a list of list of dictionary of entity's name and entity's type:
24
  ```
25
+ >>> import evaluate
26
+
27
+ load metric
28
 
 
29
  >>> metric_path = "Ikala-allen/relation_extraction"
30
  >>> module = evaluate.load(metric_path)
31
 
32
+ Define your predictions and references
33
+ Example references (ground truth)
34
+
35
  >>> references = [
36
  ... [
37
  ... {"head": "phip igments", "head_type": "brand", "type": "sell", "tail": "國際認證之色乳", "tail_type": "product"},
 
39
  ... ]
40
  ... ]
41
 
42
+ Example predictions
43
+
44
  >>> predictions = [
45
  ... [
46
  ... {"head": "phipigments", "head_type": "product", "type": "sell", "tail": "國際認證之色乳", "tail_type": "product"},
 
48
  ... ]
49
  ... ]
50
 
51
+ Calculate evaluation scores using the loaded metric
52
+
53
+ >>> evaluation_scores = module.compute(predictions=predictions, references=references)
54
+ >>> print(evaluation_scores)
55
  {'sell': {'tp': 1, 'fp': 1, 'fn': 1, 'p': 50.0, 'r': 50.0, 'f1': 50.0}, 'ALL': {'tp': 1, 'fp': 1, 'fn': 1, 'p': 50.0, 'r': 50.0, 'f1': 50.0, 'Macro_f1': 50.0, 'Macro_p': 50.0, 'Macro_r': 50.0}}
56
  ```
57
 
 
93
  >>> metric_path = "Ikala-allen/relation_extraction"
94
  >>> module = evaluate.load(metric_path)
95
 
96
+ Define your predictions and references
97
+ Example references (ground truth)
98
+
99
  >>> references = [
100
  ... [
101
  ... {"head": "phip igments", "head_type": "brand", "type": "sell", "tail": "國際認證之色乳", "tail_type": "product"},
 
103
  ... ]
104
  ... ]
105
 
106
+ Example predictions
107
+
108
  >>> predictions = [
109
  ... [
110
  ... {"head": "phipigments", "head_type": "product", "type": "sell", "tail": "國際認證之色乳", "tail_type": "product"},
 
112
  ... ]
113
  ... ]
114
 
115
+ Calculate evaluation scores using the loaded metric
116
+
117
  >>> evaluation_scores = module.compute(predictions=predictions, references=references)
118
  >>> print(evaluation_scores)
119
  {'sell': {'tp': 1, 'fp': 1, 'fn': 1, 'p': 50.0, 'r': 50.0, 'f1': 50.0}, 'ALL': {'tp': 1, 'fp': 1, 'fn': 1, 'p': 50.0, 'r': 50.0, 'f1': 50.0, 'Macro_f1': 50.0, 'Macro_p': 50.0, 'Macro_r': 50.0}}
 
124
  >>> metric_path = "Ikala-allen/relation_extraction"
125
  >>> module = evaluate.load(metric_path)
126
 
127
+ Define your predictions and references
128
+ Example references (ground truth)
129
+
130
  >>> references = [
131
  ... [
132
  ... {"head": "phip igments", "head_type": "brand", "type": "sell", "tail": "國際認證之色乳", "tail_type": "product"},
 
137
  ... ]
138
  ... ]
139
 
140
+ Example predictions
141
+
142
  >>> predictions = [
143
  ... [
144
  ... {"head": "phipigments", "head_type": "product", "type": "sell", "tail": "國際認證之色乳", "tail_type": "product"},
 
149
  ... ]
150
  ... ]
151
 
152
+ Calculate evaluation scores using the loaded metric
153
+
154
+ >>> evaluation_scores = module.compute(predictions=predictions, references=references)
155
+ >>> print(evaluation_scores)
156
  {'sell': {'tp': 2, 'fp': 2, 'fn': 1, 'p': 50.0, 'r': 66.66666666666667, 'f1': 57.142857142857146}, 'ALL': {'tp': 2, 'fp': 2, 'fn': 1, 'p': 50.0, 'r': 66.66666666666667, 'f1': 57.142857142857146, 'Macro_f1': 57.142857142857146, 'Macro_p': 50.0, 'Macro_r': 66.66666666666667}}
157
  ```
158