-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathspider.py
187 lines (157 loc) · 7.54 KB
/
spider.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
#encoding=utf-8
from urllib import request
from urllib import parse
from fake_useragent import UserAgent
import random
import time
import re
import csv
class DouBanSpider :
def __init__(self, url) :
self.baseurl = url
def get_ua(self) :
ua = UserAgent()
headers = {
'User-Agent' : ua.chrome
}
return headers
def get_html(self, url) :
info_list_top = []
info_list_films = []
info_list_actors = []
info_list_comments = []
# 发送请求
req = request.Request(url=url, headers=self.get_ua())
response = request.urlopen(req)
html = response.read().decode('utf-8')
# 爬取top250
re_exp = '<div class="info".*?href="(.*?)"' +\
'.*?<span class="title">(.*?)</span>' +\
'.*?<div class="bd".*?<p class="">.*?(\d*) / (.*?) / (.*?)\n.*?</p>'
info_list_top = self.parse_html(str=html, re_exp=re_exp)
for info in info_list_top :
film_title = info[1]
req = request.Request(url=info[0], headers=self.get_ua())
response = request.urlopen(req)
html = response.read().decode('utf-8')
# 爬取简介,影评链接,演职员列表
comment_num = 2
re_exp = '<div class="indent".*?<span class="all hidden">\n(.*?)</span>' +\
'.*?<div id="celebrities".*?<a href="(.*?)"'+\
'.*?<div class="main-bd".*?<h2><a href="(.*?)"' * comment_num +\
'|<div class="indent".*?<span property.*?>\n(.*?)</span>' +\
'.*?<div id="celebrities".*?<a href="(.*?)"'+\
'.*?<div class="main-bd".*?<h2><a href="(.*?)"' * comment_num
info_tuple_film = self.parse_html(str=html, re_exp=re_exp)[0]
# 爬取内容处理
# 删除空元组
if(info_tuple_film[0] == '') :
info_tuple_film = info_tuple_film[comment_num+2:]
else :
info_tuple_film = info_tuple_film[:comment_num+2]
# 删除多余字符
info_list_tmp = list(info_tuple_film)
info_list_tmp[0] = info_list_tmp[0].replace('\n', '').replace(' ', '').replace('\u3000','').replace('<br />', '').replace('<br/>', '')
info_list_tmp.insert(0, film_title)
info_tuple_film = tuple(info_list_tmp)
info_list_films.append(info_tuple_film)
# 爬取演职员列表内容
actor_ref = info_tuple_film[2]
req = request.Request(url='https://movie.douban.com'+actor_ref, headers=self.get_ua())
response = request.urlopen(req)
html = response.read().decode('utf-8')
# 分三次进行解析
# 获得演员部分范围
re_exp = 'Cast</h2>.*?<ul class="cele.*?line">(.*?)</ul>'
actor_list_range_str = self.parse_html(str=html, re_exp=re_exp)[0]
# 获得每个演员信息
re_exp = '<li class="cele.*?<div class="info".*?title="(.*?)"'+\
'.*?title="(.*?)"'+\
'.*?<span class="works">(.*?)</span>'
actors_list = self.parse_html(str=actor_list_range_str, re_exp=re_exp)
# 获得演员代表作
for i in range(0, len(actors_list)) :
works_range_str = actors_list[i][2]
re_exp = 'title=(.*?)>'
works_list = self.parse_html(str=works_range_str, re_exp=re_exp)
actor_list = list(actors_list[i])
actor_list.pop()
actor_list = actor_list + works_list
actors_list[i] = tuple(actor_list)
info_list_actors.append(tuple(['<'+film_title+'>']))
for actor in actors_list :
info_list_actors.append(actor)
actors_list.insert(0, film_title)
info_list_actors.append(tuple(['</'+film_title+'>']))
# 爬取影评内容
comments_list = []
comments_list.append(film_title)
for comment_ref in info_tuple_film[3:] :
req = request.Request(url=comment_ref, headers=self.get_ua())
response = request.urlopen(req)
html = response.read().decode('utf-8')
re_exp = '<div class="review-content.*?>(.*?)</div>'
info_str_comment = self.parse_html(str=html, re_exp=re_exp)[0]
info_str_comment = info_str_comment.replace(' ', '').replace('\n', '').replace('<p>', '').replace('</p>', '').replace('<br>', '').replace(' ', '')
comments_list.append(info_str_comment)
info_list_comments.append(tuple(comments_list))
return [info_list_top, info_list_films, info_list_actors, info_list_comments]
def parse_html(self, str, re_exp) :
pattern = re.compile(re_exp, re.S)
info_list = pattern.findall(str)
return info_list
# 用于本地分析html构成
def save_html(self, filename, str, mod='w') :
filename = filename + '.html'
with open(filename, mod, encoding='utf-8') as f :
f.write(str)
def save_csv(self, filename, content, mod='w') :
filename = filename + '.csv'
with open(filename, mod, newline='', encoding='utf-8') as f :
writer = csv.writer(f)
writer.writerows(content)
def run(self, start, end) :
start_time = time.time()
for page in range(start, end+1) :
start_time_epoch = time.time()
# 查询参数编码
index = (page - 1) * 25
params = {
'start' : str(index)
}
params = parse.urlencode(params)
url = self.baseurl.format(params)
# 发起请求
info = self.get_html(url=url)
print('第%d页爬取成功'%page)
try :
# 保存电影top250
if(len(info[0]) != 0) :
filename = 'data/top250'
self.save_csv(filename=filename, content=info[0], mod='a')
# 保存电影简介及影评地址
if(len(info[1]) != 0) :
filename = 'data/films'
self.save_csv(filename=filename, content=info[1], mod='a')
# 保存电影演员及相关信息
if(len(info[2]) != 0) :
filename = 'data/actors'
self.save_csv(filename=filename, content=info[2], mod='a')
# 保存影评
if(len(info[3]) != 0) :
filename= 'data/comments'
self.save_csv(filename=filename, content=info[3], mod='a')
print('第%d页保存成功'%page)
except :
print('保存失败')
time.sleep(random.randint(1, 2))
end_time_epoch = time.time()
print('Time used every epoch is %fs'%(end_time_epoch-start_time_epoch))
end_time = time.time()
print('All time used is %fs'%(end_time-start_time))
if __name__ == '__main__' :
baseurl = 'https://movie.douban.com/top250?{}'
DouBanSpider = DouBanSpider(url=baseurl)
start_index = 1
end_index = 10
DouBanSpider.run(start=start_index, end=end_index)