-
Notifications
You must be signed in to change notification settings - Fork 17
/
fetch_data.py
103 lines (83 loc) · 3.81 KB
/
fetch_data.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
import os
import urllib
import tarfile
import zipfile
TWENTY_URL = ("http://people.csail.mit.edu/jrennie/"
"20Newsgroups/20news-bydate.tar.gz")
TWENTY_ARCHIVE_NAME = "20news-bydate.tar.gz"
TWENTY_CACHE_NAME = "20news-bydate.pkz"
TWENTY_TRAIN_FOLDER = "20news-bydate-train"
TWENTY_TEST_FOLDER = "20news-bydate-test"
SENTIMENT140_URL = ("http://cs.stanford.edu/people/alecmgo/"
"trainingandtestdata.zip")
SENTIMENT140_ARCHIVE_NAME = "trainingandtestdata.zip"
def get_datasets_folder():
here = os.path.dirname(__file__)
notebooks = os.path.join(here, 'notebooks')
datasets_folder = os.path.abspath(os.path.join(notebooks, 'datasets'))
datasets_archive = os.path.abspath(os.path.join(notebooks, 'datasets.zip'))
if not os.path.exists(datasets_folder):
if os.path.exists(datasets_archive):
print("Extracting " + datasets_archive)
zf = zipfile.ZipFile(datasets_archive)
zf.extractall('.')
assert os.path.exists(datasets_folder)
else:
print("Creating datasets folder: " + datasets_folder)
os.makedirs(datasets_folder)
else:
print("Using existing dataset folder:" + datasets_folder)
return datasets_folder
def check_twenty_newsgroups(datasets_folder):
print("Checking availability of the 20 newsgroups dataset")
archive_path = os.path.join(datasets_folder, TWENTY_ARCHIVE_NAME)
train_path = os.path.join(datasets_folder, TWENTY_TRAIN_FOLDER)
test_path = os.path.join(datasets_folder, TWENTY_TEST_FOLDER)
if not os.path.exists(archive_path):
print("Downloading dataset from %s (14 MB)" % TWENTY_URL)
opener = urllib.urlopen(TWENTY_URL)
open(archive_path, 'wb').write(opener.read())
else:
print("Found archive: " + archive_path)
if not os.path.exists(train_path) or not os.path.exists(test_path):
print("Decompressing %s" % archive_path)
tarfile.open(archive_path, "r:gz").extractall(path=datasets_folder)
print("Checking that the 20 newsgroups files exist...")
assert os.path.exists(train_path)
assert os.path.exists(test_path)
print("=> Success!")
def check_sentiment140(datasets_folder):
print("Checking availability of the sentiment 140 dataset")
archive_path = os.path.join(datasets_folder, SENTIMENT140_ARCHIVE_NAME)
sentiment140_path = os.path.join(datasets_folder, 'sentiment140')
train_path = os.path.join(sentiment140_path,
'training.1600000.processed.noemoticon.csv')
test_path = os.path.join(sentiment140_path,
'testdata.manual.2009.06.14.csv')
if not os.path.exists(archive_path):
print("Downloading dataset from %s (77MB)" % SENTIMENT140_URL)
opener = urllib.urlopen(SENTIMENT140_URL)
open(archive_path, 'wb').write(opener.read())
else:
print("Found archive: " + archive_path)
if not os.path.exists(sentiment140_path):
print("Extracting %s to %s" % (archive_path, sentiment140_path))
zf = zipfile.ZipFile(archive_path)
zf.extractall(sentiment140_path)
print("Checking that the sentiment 140 CSV files exist...")
assert os.path.exists(train_path)
assert os.path.exists(test_path)
print("=> Success!")
if __name__ == "__main__":
datasets_folder = get_datasets_folder()
check_twenty_newsgroups(datasets_folder)
from sklearn.datasets import fetch_olivetti_faces
fetch_olivetti_faces()
print "Loading Labeled Faces Data (~200MB)"
from sklearn.datasets import fetch_lfw_people
fetch_lfw_people(min_faces_per_person=70, resize=0.4,
data_home=datasets_folder)
print 'Not downloading the sentiment140 data as we will not cover '
print 'notebook 10'
#check_sentiment140(datasets_folder)
print("=> Success!")