-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathannotator_agreement.py
More file actions
30 lines (20 loc) · 895 Bytes
/
annotator_agreement.py
File metadata and controls
30 lines (20 loc) · 895 Bytes
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
import pandas as pd
from sklearn.metrics import cohen_kappa_score
from definitions import *
annotator_1_ratings = FASTDUP_95_PLUS_A1
annotator_2_ratings = FASTDUP_95_PLUS_A2
def main():
# Read annotators ratings
a1_df = pd.read_csv(FASTDUP_95_PLUS_A1, header=None, names=["img", "label"])
a2_df = pd.read_csv(FASTDUP_95_PLUS_A2, header=None, names=["img", "label"])
# Verify that both the annotators provided ratings for all the images
assert a1_df["img"].equals(a2_df["img"])
# Calculate the overlap between the two annotators
overlap = (a1_df["label"] == a2_df["label"]).sum() / len(a1_df)
# Calculate the Cohen's kappa
kappa = cohen_kappa_score(a1_df["label"], a2_df["label"])
return overlap, kappa
if __name__ == "__main__":
overlap, kappa = main()
print(f"Annotator overlap: {overlap:.4f}")
print(f"Cohen's kappa: {kappa:.4f}")