-
Notifications
You must be signed in to change notification settings - Fork 3
/
val.py
70 lines (57 loc) · 2.39 KB
/
val.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
#!/usr/bin/env python3
"""
Running validation testing.
"""
import torch
from transformers import DistilBertForSequenceClassification, DistilBertTokenizerFast
import pandas as pd
import numpy as np
model = DistilBertForSequenceClassification.from_pretrained("distilbert-base-uncased")
model.load_state_dict(torch.load("./results/model_weights.pth"))
model.eval()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model.to(device)
tokenizer = DistilBertTokenizerFast.from_pretrained("distilbert-base-uncased")
test = pd.read_csv("./data/testing.csv")
test.drop("id", axis = 1, inplace=True)
def to_check_results(test_encoding):
input_ids = torch.tensor(test_encoding["input_ids"]).to(device)
attention_mask = torch.tensor(test_encoding["attention_mask"]).to(device)
with torch.no_grad():
outputs = model(input_ids.unsqueeze(0), attention_mask.unsqueeze(0))
y = np.argmax(outputs[0].to('cpu').numpy())
return y
l2 = []
for i in test['contents']:
test_encoding1 = tokenizer(i, truncation=True, padding=True)
input_ids = torch.tensor(test_encoding1['input_ids']).to(device)
attention_mask = torch.tensor(test_encoding1['attention_mask']).to(device)
op = to_check_results(test_encoding1)
l2.append(op)
true_positive = 0
false_positive = 0
true_negative = 0
false_negative = 0
# Generates count of inference quadrants
for real, predicted in zip(test["label"], l2):
if real == 1 and predicted == 1:
true_positive += 1
elif real == 0 and predicted == 0:
true_negative += 1
elif real == 1 and predicted == 0:
false_negative += 1
elif real == 0 and predicted == 1:
false_positive += 1
print(l2)
print("True Positive:", true_positive)
print("True Negative:", true_negative)
print("False Positive:", false_positive)
print("False Negative:", false_negative)
# Machine Learning statistics and visuals. https://towardsdatascience.com/accuracy-recall-precision-f-score-specificity-which-to-optimize-on-867d3f11124
print("Accuracy:", (true_positive + true_negative)/(true_positive + true_negative + false_positive + false_negative))
precision = true_positive/(true_positive + false_positive)
print("Precision:", precision)
recall = true_positive/(true_positive + false_negative)
print("Recall:", recall)
print("F1-score", 2*(recall * precision)/(recall + precision))
print("Specificity:", true_negative/(true_negative + false_positive))