forked from hellohaptik/multi-task-NLP
-
Notifications
You must be signed in to change notification settings - Fork 0
/
data_preparation.py
364 lines (316 loc) · 16.3 KB
/
data_preparation.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
import argparse
import os
import json
import multiprocessing as mp
from keras.preprocessing.sequence import pad_sequences
from utils.data_utils import TaskType, ModelType, NLP_MODELS
from utils.task_utils import TasksParam
from tqdm import tqdm
from ast import literal_eval
def load_data(dataPath, taskType, hasLabels):
'''
This fn loads data from tsv file in according to the format in taskType
dataPath - path/name of file to read
taskType - Type of task for which format will be set. Can be
Single Sentence Classification
Sentence Pait Classification
Span Prediction (MRC)
hasLabels - Whether or not the file has labels. When hasLabels is not True, it will
make dummy labels
Function taken from MT_DNN with modification
'''
allData = []
for line in open(dataPath):
cols = line.strip("\n").split("\t")
if taskType == TaskType.SingleSenClassification:
if hasLabels is True:
assert len(cols) == 3, "Data is not in Single Sentence Classification format"
row = {"uid": cols[0], "label": cols[1], "sentenceA": cols[2]}
else:
row = {"uid": cols[0], "label": '0', "sentenceA": cols[1]}
elif taskType == TaskType.SentencePairClassification:
if hasLabels is True:
assert len(cols) == 4, "Data is not in Sentence Pair Classification format"
row = {"uid": cols[0], "label": cols[1],"sentenceA": cols[2], "sentenceB": cols[3]}
else:
row = {"uid": cols[0], "label": '0', "sentenceA": cols[1], "sentenceB": cols[2]}
elif taskType == TaskType.NER:
#print(hasLabels)
if hasLabels is True:
assert len(cols) == 3, "Data not in NER format"
row = {"uid":cols[0], "label":literal_eval(cols[1]), "sentence":literal_eval(cols[2])}
assert type(row['label'])==list, "Label should be in list of token labels format in data"
else:
row = {"uid":cols[0], "label": ["O"]*len(literal_eval(cols[1])), "sentence":literal_eval(cols[1])}
assert type(row['sentence'])==list, "Sentence should be in list of token labels format in data"
elif taskType == TaskType.Span:
assert len(cols) == 4, "Data is not in Span format"
row = {
"uid": cols[0],
"label": cols[1],
"sentenceA": cols[2],
"sentenceB": cols[3]}
else:
raise ValueError(taskType)
allData.append(row)
return allData
def standard_data_converter(maxSeqLen, tokenizer, senA, senB = None):
'''
If the data is sentence Pair, -> [CLS]senA[SEP]senB[SEP]
If data is single sentence -> [CLS]senA[SEP]
Truncation stategy will truncate the 2nd sentence only (that is passage.
This would be helpful as we don't want to truncate the query). The strategy can
be changed to 'longest_first' or other if required.
Different model encoders require different inputs. Some encoders doesn't support type_ids, some
doesn't support attention_mask. Hence, to support multiple encoders, typeIds and mask would be intially kept None
'''
typeIds = None
mask = None
if senB:
out = tokenizer.encode_plus(senA, senB, add_special_tokens = True,
truncation_strategy = 'only_second', max_length = maxSeqLen,
pad_to_max_length = True)
else:
out = tokenizer.encode_plus(senA, add_special_tokens=True,
truncation_strategy ='only_first',
max_length = maxSeqLen, pad_to_max_length=True)
tokenIds = out['input_ids']
if 'token_type_ids' in out.keys():
typeIds = out['token_type_ids']
if 'attention_mask' in out.keys():
mask = out['attention_mask']
return tokenIds, typeIds, mask
def create_data_single_sen_classification(data, chunkNumber, tempList, maxSeqLen, tokenizer, labelMap):
name = 'single_sen_{}.json'.format(str(chunkNumber))
with open(name, 'w') as wf:
with tqdm(total = len(data), position = chunkNumber) as progress:
for idx, sample in enumerate(data):
ids = sample['uid']
senA = sample['sentenceA']
label = sample['label']
assert label.isnumeric() or labelMap is not None, "In Sen Classification, either labels \
should be integers or label map should be given in task file"
if label.isnumeric():
label = int(label)
else:
#make index label according to the map
label = labelMap[sample['label']]
inputIds, typeIds, inputMask = standard_data_converter(maxSeqLen, tokenizer, senA)
features = {
'uid': ids,
'label': label,
'token_id': inputIds,
'type_id': typeIds,
'mask': inputMask}
wf.write('{}\n'.format(json.dumps(features)))
progress.update(1)
tempList.append(name)
def create_data_sentence_pair_classification(data, chunkNumber, tempList, maxSeqLen, tokenizer):
name = 'sentence_pair_{}.json'.format(str(chunkNumber))
with open(name, 'w') as wf:
with tqdm(total = len(data), position = chunkNumber) as progress:
for idx, sample in enumerate(data):
ids = sample['uid']
senA = sample['sentenceA']
senB = sample['sentenceB']
label = int(sample['label'])
inputIds, typeIds, inputMask = standard_data_converter(maxSeqLen, tokenizer, senA, senB)
features = {
'uid': ids,
'label': label,
'token_id': inputIds,
'type_id': typeIds,
'mask': inputMask}
wf.write('{}\n'.format(json.dumps(features)))
progress.update(1)
tempList.append(name)
def create_data_ner(data, chunkNumber, tempList, maxSeqLen, tokenizer, labelMap):
'''
Function to create data in NER/Sequence Labelling format.
The tsv format expected by this function is
sample['uid] :- unique sample/sentence id
sample['sentence'] :- list of the sentence tokens for the sentence eg. ['My', 'name', 'is', 'hello']
sample['label] :- list of corresponding tag for token in sentence ed. ['O', 'O', 'O', 'B-PER']
Here we won't use the standard data converter as format for NER data
is slightly different and required different steps to prepare.
The '[CLS]' and '[SEP]' also has to be added in label front and end, as they will
be present in sentence start and end.
Word piece tokenizer breaks a single word into multiple parts if
its unknown, we need to add 'X' in label for extra pieces
'''
name = 'ner_{}.json'.format(str(chunkNumber))
with open(name, 'w') as wf:
with tqdm(total = len(data), position = chunkNumber) as progress:
for idx, sample in enumerate(data):
ids = sample['uid']
tempTokens = ['[CLS]']
tempLabels = ['[CLS]']
for word, label in zip(sample['sentence'], sample['label']):
tokens = tokenizer.tokenize(word)
for m, token in enumerate(tokens):
tempTokens.append(token)
#only first piece would be marked with label
if m==0:
tempLabels.append(label)
else:
tempLabels.append('X')
# adding [SEP] at end
tempTokens.append('[SEP]')
tempLabels.append('[SEP]')
out = tokenizer.encode_plus(text = tempTokens, add_special_tokens=False,
truncation_strategy ='only_first',
max_length = maxSeqLen, pad_to_max_length=True)
typeIds = None
inputMask = None
tokenIds = out['input_ids']
if 'token_type_ids' in out.keys():
typeIds = out['token_type_ids']
if 'attention_mask' in out.keys():
inputMask = out['attention_mask']
tempLabelsEnc = pad_sequences([ [labelMap[l] for l in tempLabels] ],
maxlen=maxSeqLen, value=labelMap["O"], padding="post",
dtype="long", truncating="post").tolist()[0]
#print(tempLabelsEnc)
assert len(tempLabelsEnc) == len(tokenIds), "mismatch between processed tokens and labels"
features = {
'uid': ids,
'label': tempLabelsEnc,
'token_id': tokenIds,
'type_id': typeIds,
'mask': inputMask}
wf.write('{}\n'.format(json.dumps(features)))
progress.update(1)
tempList.append(name)
def create_data_span_prediction(data, chunkNumber, tempList, maxSeqLen, tokenizer):
name = 'span_prediction_{}.json'.format(str(chunkNumber))
'''
with open(name, 'w') as wf:
unique_id = 1000000000
for example_index, sample in enumerate(data):
ids = sample['uid']
doc = sample['sentenceA']
query = sample['sentenceB']
label = sample['label']
doc_tokens, cw_map = squad_utils.token_doc(doc)
answer_start, answer_end, answer, is_impossible = squad_utils.parse_squad_label(label)
answer_start_adjusted, answer_end_adjusted = squad_utils.recompute_span(answer, answer_start, cw_map)
is_valid = squad_utils.is_valid_answer(doc_tokens, answer_start_adjusted, answer_end_adjusted, answer)
if not is_valid: continue
feature_list = squad_utils.mrc_feature(tokenizer,
unique_id,
example_index,
query,
doc_tokens,
answer_start_adjusted,
answer_end_adjusted,
is_impossible,
max_seq_len,
MAX_QUERY_LEN,
DOC_STRIDE,
answer_text=answer,
is_training=True)
unique_id += len(feature_list)
for feature in feature_list:
so = json.dumps({'uid': ids,
'token_id' : feature.input_ids,
'mask': feature.input_mask,
'type_id': feature.segment_ids,
'example_index': feature.example_index,
'doc_span_index':feature.doc_span_index,
'tokens': feature.tokens,
'token_to_orig_map': feature.token_to_orig_map,
'token_is_max_context': feature.token_is_max_context,
'start_position': feature.start_position,
'end_position': feature.end_position,
'label': feature.is_impossible,
'doc': doc,
'doc_offset': feature.doc_offset,
'answer': [answer]})
writer.write('{}\n'.format(so))
tempList.append(name)
'''
def create_data_multithreaded(data, wrtPath, tokenizer, taskObj, taskName, maxSeqLen, multithreaded):
'''
This function uses multi-processing to create the data in the required format
for base models as per the task. Utilizing multiple Cores help in processing
huge data with speed
'''
man = mp.Manager()
# shared list to store all temp files written by processes
tempFilesList = man.list()
numProcess = 1
if multithreaded:
numProcess = mp.cpu_count() - 1
'''
Dividing the entire data into chunks which can be sent to different processes.
Each process will write its chunk into a file.
After all processes are done writing, we will combine all the files into one
'''
taskType = taskObj.taskTypeMap[taskName]
labelMap = taskObj.labelMap[taskName]
chunkSize = int(len(data) / (numProcess))
print('Data Size: ', len(data))
print('number of threads: ', numProcess)
processes = []
for i in range(numProcess):
dataChunk = data[chunkSize*i : chunkSize*(i+1)]
if taskType == TaskType.SingleSenClassification:
p = mp.Process(target = create_data_single_sen_classification, args = (dataChunk, i, tempFilesList, maxSeqLen, tokenizer, labelMap))
if taskType == TaskType.SentencePairClassification:
p = mp.Process(target = create_data_sentence_pair_classification, args = (dataChunk, i, tempFilesList, maxSeqLen, tokenizer))
if taskType == TaskType.NER:
p = mp.Process(target = create_data_ner, args = (dataChunk, i, tempFilesList, maxSeqLen, tokenizer, labelMap))
if taskType == TaskType.Span:
p = mp.Process(target = create_data_span_prediction, args = (dataChunk, i, tempFilesList, maxSeqLen, tokenizer))
p.start()
processes.append(p)
for pr in processes:
pr.join()
# combining the files written by multiple processes into a single final file
with open(wrtPath, 'w') as f:
for file in tempFilesList:
with open(file, 'r') as r:
for line in r:
sample = json.loads(line)
f.write('{}\n'.format(json.dumps(sample)))
os.remove(file)
def main():
# taking in arguments
parser = argparse.ArgumentParser()
parser.add_argument('--task_file', type=str, default="tasks_file.yml")
parser.add_argument('--data_dir', type=str, default='data')
parser.add_argument('--do_lower_case', type=bool, default=True)
parser.add_argument('--max_seq_len', type=int, default = 384,
help = "max sequence length for making data for model")
parser.add_argument('--multithreaded', type = bool, default = True,
help = "use multiple threads for processing data with speed")
parser.add_argument('--has_labels', type=bool, default=True,
help = "If labels are not present in file then False. \
To be used when preparing data for inference ")
args = parser.parse_args()
tasks = TasksParam(args.task_file)
print('task object created from task file...')
assert os.path.exists(args.data_dir), "data dir doesnt exist"
modelName = tasks.modelType.name.lower()
configClass, modelClass, tokenizerClass, defaultName = NLP_MODELS[modelName]
configName = tasks.modelConfig
if configName is None:
configName = defaultName
#making tokenizer for model
tokenizer = tokenizerClass.from_pretrained(configName)
print('{} model tokenizer loaded for config {}'.format(modelName, configName))
dataPath = os.path.join(args.data_dir, '{}_prepared_data'.format(configName))
if not os.path.exists(dataPath):
os.makedirs(dataPath)
for taskId, taskName in tasks.taskIdNameMap.items():
for file in tasks.fileNamesMap[taskName]:
print('Loading raw data for task {} from {}'.format(taskName, os.path.join(args.data_dir, file)))
rows = load_data(os.path.join(args.data_dir, file), tasks.taskTypeMap[taskName],
hasLabels = args.has_labels)
wrtFile = os.path.join(dataPath, '{}.json'.format(file.split('.')[0]))
print('Processing Started...')
create_data_multithreaded(rows, wrtFile, tokenizer, tasks, taskName,
args.max_seq_len, args.multithreaded)
print('Data Processing done for {}. File saved at {}'.format(taskName, wrtFile))
if __name__ == "__main__":
main()