-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathSaxDataLoaderTool.py
More file actions
317 lines (261 loc) · 10.4 KB
/
SaxDataLoaderTool.py
File metadata and controls
317 lines (261 loc) · 10.4 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
from Params import *
from AllenTool import *
from transformers import AutoTokenizer
import torch
from torch.utils.data import DataLoader
import pickle
import os
import nltk
from copy import deepcopy
from SaxDataset import *
from MInput import *
from Params import *
class SaxDataLoaderTool:
"""
This main purpose of this class is to create torch DataLoaders for ttt
in ["train", "tune", "test"]. and for extracting.
Dataset and DataLoader are located in torch.utils.data. Dataset stores a
huge number of samples, and DataLoader wraps an iterable around the
Dataset to enable access to batches of samples in a for loop.
SaxDataset is a child of torch Dataset. SaxDataLoaderTool is not a child
of DataLoader; instead, it creates multiple instances of DataLoader.
That is why we call it SaxDataLoaderTool rather than just SaxDataLoader.
data processing chain
(optional allen_fp->)tags_in_fp->MInput->PaddedMInput->SaxDataset
->SaxDataLoaderTool
Note from this chain that SaxDataLoaderTool has an instance of
SaxDataset as input for each DataLoader instance it creates.
Attributes
----------
auto_tokenizer: AutoTokenizer
pad_icode: int
integer code for padding. This equals 0 for BERT
params: Params
parameters
extract_dloader: DataLoader|None
DataLoader for extracting.
test_tags_fp: str
file path for extags or cctags file used when ttt="test"
train_tags_fp: str
file path for extags or cctags file used when ttt="train"
tune_tags_fp: str
file path for extags or cctags file used when ttt="tune". (
tune=validation)
test_dloader: DataLoader|None
DataLoader for ttt="test"
train_dloader: DataLoader|None
DataLoader for ttt="train"
tune_dloader: DataLoader|None
DataLoader for ttt="tune". (tune=validation)
"""
def __init__(self,
params,
auto_tokenizer,
train_tags_fp, tune_tags_fp, test_tags_fp):
"""
Constructor
Parameters
----------
params: Params
auto_tokenizer: AutoTokenizer
train_tags_fp: str
tune_tags_fp: str
test_tags_fp: str
"""
self.params = params
self.auto_tokenizer = auto_tokenizer
# print("nkjg", type(auto_tokenizer))
self.pad_icode = \
auto_tokenizer.encode(auto_tokenizer.pad_token)[1]
self.train_tags_fp = train_tags_fp
self.tune_tags_fp = tune_tags_fp
self.test_tags_fp = test_tags_fp
self.train_dloader = None
self.tune_dloader = None
self.test_dloader = None
self.extract_dloader = None
def get_all_ttt_datasets(self):
"""
similar to Openie6.data.process_data()
This method returns a triple of 3 SaxDatasets, one each for ttt in [
"train", "tune", "test"].
Take ttt="train" as an example. If self.params.d["refresh_cache"] =
True or there is a file with the appropriate info previously stored
in the `cache` folder, this method constructs the train dataset from
that. Otherwise, this method reads the self.train_tags_fp file and
constructs the dataset from that, and stores the results, for future
use, as a pickle file in the `cache` folder.
Returns
-------
SaxDataset, SaxDataset, SaxDataset
"""
# train_tags_fp = self.params.d["train_tags_fp"]
# tune_tags_fp = self.params.d["tune_tags_fp"]
# test_tags_fp = self.params.d["test_tags_fp"]
# if 'extract' not in params.action, use caching
assert self.train_tags_fp, self.train_tags_fp
assert self.tune_tags_fp, self.tune_tags_fp
assert self.test_tags_fp, self.test_tags_fp
# model_str = self.params.d["model_str"].replace("/", "_")
task = self.params.task
cached_train_m_in_fp = \
CACHE_DIR + "/" + task + "_train_m_in_" + \
self.train_tags_fp.replace("/", "_").split(".")[0] + ".pkl"
cached_tune_m_in_fp = \
CACHE_DIR + "/" + task + "_tune_m_in_" + \
self.tune_tags_fp.replace("/", "_").split(".")[0] + ".pkl"
cached_test_m_in_fp = \
CACHE_DIR + "/" + task + "_test_m_in_" + \
self.test_tags_fp.replace("/", "_").split(".")[0] + ".pkl"
def find_m_in(cached_fp, tags_fp, ttt):
if not os.path.exists(cached_fp) or \
self.params.d["refresh_cache"]:
m_in = MInput(
self.params,
tags_fp,
self.auto_tokenizer,
omit_exless=get_omit_exless_flag(task, ttt))
pickle.dump(m_in, open(cached_fp, 'wb'))
else:
m_in = pickle.load(open(cached_fp, 'rb'))
return m_in
train_m_in = find_m_in(cached_train_m_in_fp,
self.train_tags_fp,
"train")
tune_m_in = find_m_in(cached_tune_m_in_fp,
self.tune_tags_fp,
"tune")
test_m_in = find_m_in(cached_test_m_in_fp,
self.test_tags_fp,
"test")
# vocab = self.build_vocab(
# train_m_in + tune_m_in + test_m_in)
train_dataset = SaxDataset(train_m_in)
tune_dataset = SaxDataset(tune_m_in)
test_dataset = SaxDataset(test_m_in)
# to simulate bucket sort (along with pad_data)
# train_dataset.sort()
return train_dataset, tune_dataset, test_dataset
# , vocab, orig_sents
def get_extract_dataset(self, pred_tags_in_fp):
"""
similar to Openie6.data.process_data()
This method returns a dataset for extracting. It creates that
dataset from the info it gleans by reading the file `pred_tags_in_fp`.
Parameters
----------
pred_tags_in_fp: str
Returns
-------
SaxDataset
"""
# no caching used if extract in action
# if not pred_in_sents: # extract
# # if self.params.d["in_fp"] :
# # predict_fp = self.params.d["in_fp"]
# # else:
# # predict_fp = self.params.d["predict_fp"]
# with open(self.predict_fp, "r") as f:
# predict_lines = f.readlines()
#
# pred_in_sents = []
# for line in predict_lines:
# line = line_to_ascii(line)
# # tokenized_line = line.split()
#
# # Why use both nltk and spacy to word tokenize.
# # get_all_ttt_datasets() uses nltk.word_tokenize()
# # get_samples() uses spacy_model.pipe(sents...)
#
# words = ' '.join(nltk.word_tokenize(line))
# pred_in_sents.append(
# words + UNUSED_TOKENS_STR + "\n")
#
# # openie6 is wrong here. Uses wrong arguments for
# process_data()
# which is get_all_ttt_datasets() for us.
# get_samples()
# returns: examples, orig_sents
assert pred_tags_in_fp
extract_m_in = MInput(self.params,
pred_tags_in_fp,
self.auto_tokenizer,
omit_exless=False)
# vocab = build_vocab(extract_m_in)
extract_dataset = SaxDataset(extract_m_in)
return extract_dataset
def set_all_ttt_dataloaders(self):
"""
This method sets class attributes for 3 DataLoaders, one for each
ttt in [ "train", "tune", "test"].
The method does this by first calling get_all_ttt_dataset() to get 3
Datasets. It then constructs the 3 DataLoaders from those 3 Datasets.
Returns
-------
None
"""
train_dataset, tune_dataset, test_dataset = \
self.get_all_ttt_datasets()
self.train_dloader = \
DataLoader(train_dataset,
batch_size=self.params.d["batch_size"],
# collate_fn=None,
shuffle=True,
num_workers=1)
self.tune_dloader = \
DataLoader(tune_dataset,
batch_size=self.params.d["batch_size"],
# collate_fn=None,
num_workers=1)
self.test_dloader = \
DataLoader(test_dataset,
batch_size=self.params.d["batch_size"],
# collate_fn=None,
num_workers=1)
def set_extract_dataloader(self, pred_tags_in_fp):
"""
This method sets the class attribute for the DataLoader for extracting.
The method does this by first calling get_extract_dataset() to get a
extract Dataset. It then constructs the DataLoader from that Dataset.
Parameters
----------
pred_tags_in_fp: str
Returns
-------
None
"""
self.extract_dloader = \
DataLoader(self.get_extract_dataset(pred_tags_in_fp),
batch_size=self.params.d["batch_size"],
# collate_fn=None,
shuffle=True,
num_workers=1)
if __name__ == "__main__":
def main(pid):
params = Params(pid)
do_lower_case = ('uncased' in params.d["model_str"])
auto = AutoTokenizer.from_pretrained(
params.d["model_str"],
do_lower_case=do_lower_case,
use_fast=True,
data_dir=CACHE_DIR,
add_special_tokens=False,
additional_special_tokens=UNUSED_TOKENS)
train_tags_fp = "tests/train_extags.txt"
tune_tags_fp = "tests/tune_extags.txt"
test_tags_fp = "tests/test_extags.txt"
dl_tool = SaxDataLoaderTool(params,
auto,
train_tags_fp,
tune_tags_fp,
test_tags_fp)
train_dataset, tune_dataset, test_dataset = \
dl_tool.get_all_ttt_datasets()
print("len(train_dataset)=", len(train_dataset))
print("len(tune_dataset)=", len(tune_dataset))
print("len(test_dataset)=", len(test_dataset))
# try with params_id=2, 3
# 2: ex", "test"
# 3: "ex", "extract"
main(2)
main(3)