forked from Tencent/NeuralNLP-NeuralClassifier
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathpredict.py
167 lines (157 loc) · 7.48 KB
/
predict.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
#!/usr/bin/env python
# coding: utf-8
"""
Tencent is pleased to support the open source community by making NeuralClassifier available.
Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License
is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
or implied. See the License for thespecific language governing permissions and limitations under
the License.
"""
import codecs
import math
import numpy as np
import os
import sys
import json
import torch
from torch.utils.data import DataLoader
from config import Config
from dataset.classification_dataset import ClassificationDataset
from dataset.collator import ClassificationCollator
from dataset.collator import ClassificationType
from dataset.collator import FastTextCollator
from model.classification.drnn import DRNN
from model.classification.fasttext import FastText
from model.classification.textcnn import TextCNN
from model.classification.textvdcnn import TextVDCNN
from model.classification.textrnn import TextRNN
from model.classification.textrcnn import TextRCNN
from model.classification.transformer import Transformer
from model.classification.dpcnn import DPCNN
from model.classification.attentive_convolution import AttentiveConvNet
from model.classification.region_embedding import RegionEmbedding
from model.model_util import get_optimizer, get_hierar_relations
ClassificationDataset, ClassificationCollator, FastTextCollator,FastText, TextCNN, TextRNN, TextRCNN, DRNN, TextVDCNN, Transformer, DPCNN, AttentiveConvNet, RegionEmbedding
class Predictor(object):
def __init__(self, config):
self.config = config
self.model_name = config.model_name
self.use_cuda = config.device.startswith("cuda")
self.dataset_name = "ClassificationDataset"
self.collate_name = "FastTextCollator" if self.model_name == "FastText" \
else "ClassificationCollator"
self.dataset = globals()[self.dataset_name](config, [], mode="infer")
self.collate_fn = globals()[self.collate_name](config, len(self.dataset.label_map))
self.model = Predictor._get_classification_model(self.model_name, self.dataset, config)
Predictor._load_checkpoint(config.eval.model_dir, self.model, self.use_cuda)
self.model.eval()
@staticmethod
def _get_classification_model(model_name, dataset, conf):
model = globals()[model_name](dataset, conf)
model = model.cuda(conf.device) if conf.device.startswith("cuda") else model
return model
@staticmethod
def _load_checkpoint(file_name, model, use_cuda):
if use_cuda:
checkpoint = torch.load(file_name)
else:
checkpoint = torch.load(file_name, map_location=lambda storage, loc: storage)
model.load_state_dict(checkpoint["state_dict"])
def predict(self, texts):
"""
input texts should be json objects
"""
with torch.no_grad():
batch_texts = [self.dataset._get_vocab_id_list(json.loads(text)) for text in texts]
batch_texts = self.collate_fn(batch_texts)
logits = self.model(batch_texts)
if self.config.task_info.label_type != ClassificationType.MULTI_LABEL:
probs = torch.softmax(logits, dim=1)
else:
probs = torch.sigmoid(logits)
probs = probs.cpu().tolist()
return np.array(probs)
if __name__ == "__main__":
config = Config(config_file=sys.argv[1])
predictor = Predictor(config)
batch_size = config.eval.batch_size
input_texts = []
predict_probs = []
is_multi = config.task_info.label_type == ClassificationType.MULTI_LABEL
for line in codecs.open(sys.argv[2], "r", predictor.dataset.CHARSET):
input_texts.append(line.strip("\n"))
epoches = math.ceil(len(input_texts)/batch_size)
if config.task_info.synaptic_pipeline:
with open("parent_child.json", "r") as read_file:
parent_child_dict = json.load(read_file)
label2id = dict()
for i, val in predictor.dataset.id_to_label_map.items():
label2id[val] = i
parent_child_id = dict()
for i, val in parent_child_dict.items():
if i == 'Root':
parent_child_id['Root'] = [label2id[x] for x in val]
continue
try :
parent_child_id[label2id[i]]= [label2id[x] for x in val]
except KeyError as e:
print(e)
for i in range(epoches):
batch_texts = input_texts[i*batch_size:(i+1)*batch_size]
predict_prob = predictor.predict(batch_texts)
for j in predict_prob:
predict_probs.append(j)
with codecs.open("predict.txt", "w", predictor.dataset.CHARSET) as of:
for predict_prob in predict_probs:
if not is_multi:
predict_label_ids = [predict_prob.argmax()]
else:
predict_label_ids = []
predict_label_idx = np.argsort(-predict_prob)
j = 0
prev = 'Root'
forget = []
while j < config.eval.top_k:
if j in forget:
j += 1
continue
if predict_prob[predict_label_idx[j]] > config.eval.threshold:
try:
if predict_label_idx[j] in parent_child_id[prev]:
predict_label_ids.append(predict_label_idx[j])
prev = predict_label_idx[j]
forget.append(j)
j=0
except KeyError:
pass
predict_label_ids.append(predict_label_idx[j])
prev = predict_label_idx[j]
forget.append(j)
j = 0
j += 1
predict_label_name = [predictor.dataset.id_to_label_map[predict_label_id] \
for predict_label_id in predict_label_ids]
of.write(";".join(predict_label_name) + "\n")
else:
for i in range(epoches):
batch_texts = input_texts[i*batch_size:(i+1)*batch_size]
predict_prob = predictor.predict(batch_texts)
for j in predict_prob:
predict_probs.append(j)
with codecs.open("predict.txt", "w", predictor.dataset.CHARSET) as of:
for predict_prob in predict_probs:
if not is_multi:
predict_label_ids = [predict_prob.argmax()]
else:
predict_label_ids = []
predict_label_idx = np.argsort(-predict_prob)
for j in range(0, config.eval.top_k):
if predict_prob[predict_label_idx[j]] > config.eval.threshold:
predict_label_ids.append(predict_label_idx[j])
predict_label_name = [predictor.dataset.id_to_label_map[predict_label_id] \
for predict_label_id in predict_label_ids]
of.write(";".join(predict_label_name) + "\n")