Update negbleurt.py
Browse files- negbleurt.py +3 -15
negbleurt.py
CHANGED
@@ -16,15 +16,15 @@ _KWARGS_DESCRIPTION = """
|
|
16 |
Calculates the NegBLEURT scores between references and predictions
|
17 |
Args:
|
18 |
predictions: list of predictions to score. Each prediction should be a string.
|
19 |
-
references:
|
20 |
batch_size: batch_size for model inference. Default is 16
|
21 |
Returns:
|
22 |
negBLEURT: List of NegBLEURT scores for all predictions
|
23 |
Examples:
|
24 |
>>> negBLEURT = evaluate.load('MiriUll/negbleurt')
|
25 |
>>> predictions = ["Ray Charles is a legend.", "Ray Charles isn’t legendary."]
|
26 |
-
>>>
|
27 |
-
>>> results = negBLEURT.compute(predictions=predictions, references=
|
28 |
>>> print(results)
|
29 |
{'negBLERUT': [0.8409, 0.2601]}
|
30 |
"""
|
@@ -37,12 +37,6 @@ class NegBLEURT(evaluate.Metric):
|
|
37 |
citation=_CITATION,
|
38 |
inputs_description=_KWARGS_DESCRIPTION,
|
39 |
features=[
|
40 |
-
datasets.Features(
|
41 |
-
{
|
42 |
-
"references": datasets.Value("string", id=None),
|
43 |
-
"predictions": datasets.Value("string", id="sequence"),
|
44 |
-
}
|
45 |
-
),
|
46 |
datasets.Features(
|
47 |
{
|
48 |
"predictions": datasets.Value("string", id="sequence"),
|
@@ -61,12 +55,6 @@ class NegBLEURT(evaluate.Metric):
|
|
61 |
def _compute(
|
62 |
self, predictions, references, batch_size=16
|
63 |
):
|
64 |
-
single_ref = isinstance(references, str)
|
65 |
-
print(single_ref, references)
|
66 |
-
if single_ref:
|
67 |
-
print("single reference")
|
68 |
-
references = [references] * len(predictions)
|
69 |
-
|
70 |
scores_negbleurt = []
|
71 |
for i in range(0, len(references), batch_size):
|
72 |
tokenized = self.tokenizer(references[i:i+batch_size], predictions[i:i+batch_size], return_tensors='pt', padding=True, max_length=512, truncation=True)
|
|
|
16 |
Calculates the NegBLEURT scores between references and predictions
|
17 |
Args:
|
18 |
predictions: list of predictions to score. Each prediction should be a string.
|
19 |
+
references: list of references, one for each prediction. Each reference should be a string
|
20 |
batch_size: batch_size for model inference. Default is 16
|
21 |
Returns:
|
22 |
negBLEURT: List of NegBLEURT scores for all predictions
|
23 |
Examples:
|
24 |
>>> negBLEURT = evaluate.load('MiriUll/negbleurt')
|
25 |
>>> predictions = ["Ray Charles is a legend.", "Ray Charles isn’t legendary."]
|
26 |
+
>>> references = ["Ray Charles is legendary.", "Ray Charles is legendary."]
|
27 |
+
>>> results = negBLEURT.compute(predictions=predictions, references=references)
|
28 |
>>> print(results)
|
29 |
{'negBLERUT': [0.8409, 0.2601]}
|
30 |
"""
|
|
|
37 |
citation=_CITATION,
|
38 |
inputs_description=_KWARGS_DESCRIPTION,
|
39 |
features=[
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
datasets.Features(
|
41 |
{
|
42 |
"predictions": datasets.Value("string", id="sequence"),
|
|
|
55 |
def _compute(
|
56 |
self, predictions, references, batch_size=16
|
57 |
):
|
|
|
|
|
|
|
|
|
|
|
|
|
58 |
scores_negbleurt = []
|
59 |
for i in range(0, len(references), batch_size):
|
60 |
tokenized = self.tokenizer(references[i:i+batch_size], predictions[i:i+batch_size], return_tensors='pt', padding=True, max_length=512, truncation=True)
|