-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathLoad_Data.py
96 lines (81 loc) · 2.53 KB
/
Load_Data.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
from nltk.tokenize import word_tokenize
import pandas as pd
import numpy as np
import math
import os
import re
def int_to_label(number,base):
vector = np.zeros(base)
vector[number-1] = 1
return vector
def text_flourish(string):
"""
Tokenization/string cleaning for all datasets except for SST.
Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
"""
string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " \( ", string)
string = re.sub(r"\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip().lower()
def navigate(dataname):
'''
-A function navigates through the current working directory
-Returns the first file that meets the conditions
'''
current_dir = os.getcwd()
for dirname,_,files in os.walk(current_dir):
for file in files:
if file == dataname:
return os.path.join(dirname,file)
def pad_sentences(sentences,lengths):
'''
For a better implementation later
'''
max_len = max(lengths)
for i,sent in enumerate(sentences):
current_length = len(sent)
diff = max_len-current_length
sentences[i] = sent + ['PAD']*diff
return
def load_data(dataname):
'''
Load data and vectorize the labels
'''
data_dir = navigate(dataname)
#dataset = pd.read_csv(data_dir,usecols=['Stars','Review'])
data = pd.read_csv(data_dir,usecols=['Score','Text'],nrows=50000)
'''
To solve class imbalance
'''
subset0 = data.loc[data['Score']==1]
subset1 = data.loc[data['Score']==2]
subset2 = data.loc[data['Score']==3]
subset3 = data.loc[data['Score']==4][:4047]
subset4 = data.loc[data['Score']==5][:4047]
frames = [subset0,subset3,subset2,subset1,subset4]
concat = pd.concat(frames,ignore_index=True)
dataset = concat.sample(frac=1)
print(dataset.head())
#Free memory
del subset4,subset3,subset2,subset1,subset0,frames,concat
data_x = []
data_y = []
for i in dataset['Score'].unique():
current = dataset['Text'].loc[dataset['Score']==i]
label = int_to_label(i,5)
for current_x in current:
text = text_flourish(current_x)
data_x.append(text)
data_y.append(label)
#data_x = pad_sentences(data_x)
return data_x,data_y