|
from collections import defaultdict |
|
from statistics import mean |
|
from transformers import AutoModelForSequenceClassification, AutoTokenizer, pipeline |
|
|
|
|
|
|
|
class Regard: |
|
|
|
def __init__(self, config_name): |
|
self.config_name = config_name |
|
regard_tokenizer = AutoTokenizer.from_pretrained("sasha/regardv3") |
|
regard_model = AutoModelForSequenceClassification.from_pretrained("sasha/regardv3") |
|
self.regard_classifier = pipeline( |
|
"text-classification", model=regard_model, top_k=4, tokenizer=regard_tokenizer, truncation=True) |
|
|
|
def regard(self,group): |
|
group_scores = defaultdict(list) |
|
group_regard = self.regard_classifier(group) |
|
for pred in group_regard: |
|
for pred_score in pred: |
|
group_scores[pred_score["label"]].append(pred_score["score"]) |
|
return group_regard, dict(group_scores) |
|
|
|
def compute( |
|
self, |
|
data, |
|
references=None, |
|
aggregation=None, |
|
): |
|
if self.config_name == "compare": |
|
pred_scores, pred_regard = self.regard(data) |
|
ref_scores, ref_regard = self.regard(references) |
|
pred_mean = {k: mean(v) for k, v in pred_regard.items()} |
|
pred_max = {k: max(v) for k, v in pred_regard.items()} |
|
ref_mean = {k: mean(v) for k, v in ref_regard.items()} |
|
ref_max = {k: max(v) for k, v in ref_regard.items()} |
|
if aggregation == "maximum": |
|
return { |
|
"max_data_regard": pred_max, |
|
"max_references_regard": ref_max, |
|
} |
|
elif aggregation == "average": |
|
return {"average_data_regard": pred_mean, "average_references_regard": ref_mean} |
|
else: |
|
return {"regard_difference": {key: pred_mean[key] - ref_mean.get(key, 0) for key in pred_mean}} |
|
elif self.config_name == "inner_compare": |
|
pred_scores, pred_regard = self.regard(data) |
|
ref_scores, ref_regard = self.regard(references) |
|
|
|
postive_pred_regard = pred_regard['positive'] |
|
positive_ref_regard = ref_regard['positive'] |
|
postive_diff_regard = list(range(len(postive_pred_regard))) |
|
for score_index in range(len(postive_pred_regard)): |
|
postive_diff_regard[score_index] = postive_pred_regard[score_index] - positive_ref_regard[score_index] |
|
|
|
negative_pred_regard = pred_regard['negative'] |
|
negative_ref_regard = ref_regard['negative'] |
|
negative_diff_regard = list(range(len(negative_pred_regard))) |
|
for score_index in range(len(negative_pred_regard)): |
|
negative_diff_regard[score_index] = negative_pred_regard[score_index] - negative_ref_regard[score_index] |
|
|
|
ref_diff_regard = {'positive': postive_diff_regard, 'negative': negative_diff_regard} |
|
ref_diff_mean = {k: mean(v) for k, v in ref_diff_regard.items()} |
|
no_ref_diff_regard = {'positive': postive_pred_regard, 'negative': negative_pred_regard} |
|
no_ref_diff_mean = {k: mean(v) for k, v in no_ref_diff_regard.items()} |
|
|
|
return {"ref_diff_mean": ref_diff_mean, |
|
'no_ref_diff_mean': no_ref_diff_mean} |
|
|
|
|