-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathTrollTweets.rb
More file actions
319 lines (281 loc) · 10.6 KB
/
TrollTweets.rb
File metadata and controls
319 lines (281 loc) · 10.6 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
require 'csv'
require 'open-uri'
Encoding.default_external = Encoding::UTF_8
Encoding.default_internal = Encoding::UTF_8
def count_troll_types(filename)
if filename.start_with? 'http'
# read a whole 90 MB file; this will be SLOW!!!!
csv_text = open(filename)
else
# read the text of the whole file-
csv_text = File.read(filename)
end
# now parse the file, assuming it has a header row at the top
csv = CSV.parse(csv_text, :headers => true)
# hash from categories to the number of tweets in category
# keys are categories (string), values are count (integers)
categories = Hash.new
# go through each row of the csv file
csv.each do |row|
# convert the row to a hash
# the keys of the hash will be the headers from the csv file
hash = row.to_hash
# this is a trick to make sure that this key exists in a hash
# so that the next line which adds 1 will never fail
if !categories.include? hash['account_category']
categories[hash['account_category']] = 0
end
# This cannot fail because if the key hadn't existed,
# then the previous if will have created it
categories[hash['account_category']] += 1
end
# now print the key/value pairs
categories.each do |key, value|
puts "#{key} occurs #{value} times"
end
puts
end
#################################################################
##############################################################
def count_common_date(filename)
if filename.start_with? 'http'
# read a whole 90 MB file; this will be SLOW!!!!
csv_text = open(filename)
else
# read the text of the whole file
csv_text = File.read(filename)
end
# now parse the file, assuming it has a header row at the top
csv = CSV.parse(csv_text, :headers => true)
count = Hash.new
# go through each row of the csv file
csv.each do |row|
# convert the row to a hash
# the keys of the hash will be the headers from the csv file
hash = row.to_hash
#trim the date and time
trim=hash['publish_date'].split
# this is a trick to make sure that this key exists in a hash
# so that the next line which adds 1 will never fail
if !count.include? trim[0]
count[trim[0]] = 0
end
count[trim[0]] += 1
end
#print the top 10 most common dates
for i in 1..10
temp=count.max_by{|k,v| v}
#from stackoverflow
puts "Date: #{temp[0]} had #{temp[1]} tweets."
#delete the max value so the second max value becomes the max in next loop
count.delete(temp[0])
end
puts
end
##################################################################
##############################################################
def count_common_hour(filename)
if filename.start_with? 'http'
# read a whole 90 MB file; this will be SLOW!!!!
csv_text = open(filename)
else
# read the text of the whole file
csv_text = File.read(filename)
end
# now parse the file, assuming it has a header row at the top
csv = CSV.parse(csv_text, :headers => true)
count = Hash.new
# go through each row of the csv file
csv.each do |row|
# convert the row to a hash
# the keys of the hash will be the headers from the csv file
hash = row.to_hash
#trim the hour and mins
trim=hash['publish_date'].split
#trim the time into just hours
hours=trim[1].split(':')
# this is a trick to make sure that this key exists in a hash
# so that the next line which adds 1 will never fail
if !count.include? hours[0]
count[hours[0]] = 0
end
count[hours[0]] += 1
end
#print the top 10 most common dates
temp=count.max_by{|k,v| v}
#from stackoverflow
puts "hour #{temp[0]} had the most tweets at #{temp[1]}."
puts
end
##################################################################
##############################################################
def count_common_word(filename)
if filename.start_with? 'http'
# read a whole 90 MB file; this will be SLOW!!!!
csv_text = open(filename)
else
# read the text of the whole file
csv_text = File.read(filename)
end
# now parse the file, assuming it has a header row at the top
csv = CSV.parse(csv_text, :headers => true)
count = Hash.new
# go through each row of the csv file
csv.each do |row|
# convert the row to a hash
# the keys of the hash will be the headers from the csv file
hash = row.to_hash
#split into words
words=hash['content'].split
# this is a trick to make sure that this key exists in a hash
# so that the next line which adds 1 will never fail
#loop through each word
words.each do |word|
if !count.include? word
count[word] = 0
end
count[word] += 1
end
end
#print the most common word
count.each do |k,v|
puts " '#{k}' is most common which occurs #{v} times." if v==count.values.max
end
puts
end
##################################################################
##############################################################
def find_most_common_langs(filename)
if filename.start_with? 'http'
# read a whole 90 MB file; this will be SLOW!!!!
csv_text = open(filename)
else
# read the text of the whole file
csv_text = File.read(filename)
end
# now parse the file, assuming it has a header row at the top
csv = CSV.parse(csv_text, :headers => true)
count = Hash.new
# go through each row of the csv file
csv.each do |row|
# convert the row to a hash
# the keys of the hash will be the headers from the csv file
hash = row.to_hash
language=hash['language']
# this is a trick to make sure that this key exists in a hash
# so that the next line which adds 1 will never fail
if !count.include? language
count[language] = 0
end
count[language] += 1
end
#loop through count to find the most common language
for i in 1..3 do
temp=count.max_by{|k,v| v}
#same thing as top 10 dates
puts "#{temp[0]} is the #{i} most common language used #{temp[1]} times."
count.delete(temp[0])
end
puts
end
##################################################################
##############################################################
def common_word_among_account_type(filename)
if filename.start_with? 'http'
# read a whole 90 MB file; this will be SLOW!!!!
csv_text = open(filename)
else
# read the text of the whole file
csv_text = File.read(filename)
end
# now parse the file, assuming it has a header row at the top
csv = CSV.parse(csv_text, :headers => true)
# hash from categories to the number of tweets in category
# keys are categories (string), values are count (integers)
categories = Hash.new
# go through each row of the csv file
csv.each do |row|
# convert the row to a hash
# the keys of the hash will be the headers from the csv file
hash = row.to_hash
# this is a trick to make sure that this key exists in a hash
# so that the next line which adds 1 will never fail
if !categories.include? hash['account_category']
categories[hash['account_category']] = 0
end
# This cannot fail because if the key hadn't existed,
# then the previous if will have created it
categories[hash['account_category']] += 1
end
mostcalledwords=Hash.new
#an array of commmon words oin english (https://gist.github.com/gravitymonkey/2406023)
common=["the","of","and","a","to","in","is","you","that","it","he","was","for","on","are","as","with","his","they","I","at","be","this","have","from","or","one","had","by","word","but","not","what","all","were","we","when","your","can","said","there","use","an","each","which","she","do","how","their","if","will","up","other","about","out","many","then","them","these","so","some","her","would","make","like","him","into","time","has","look","two","more","write","go","see","number","no","way","could","people","my","than","first","water","been","call","who","oil","its","now","find","long","down","day","did","get","come","made","may","part","The","I'm","me","\\|","-","me","RT","That","https://t.co/1KPXto2HfW"]
#sort by descending order so i can get the most common types at first
categories=categories.sort_by{|k,v| v}.reverse
categories.each do |k,v|
csv.each do |row|
hash=row.to_hash
if hash['account_category']==k
words=hash['content'].split
#loop through each word
words.each do |word|
#removing the gibberish stuff after running many times with the common words and adjusting the values in the common array
if common.include? word
next
end
if !mostcalledwords.include? word
mostcalledwords[word] = 0
end
mostcalledwords[word] += 1
end
end
end
mostcalledwords=mostcalledwords.sort_by{|k,v| v}.reverse
puts "#{mostcalledwords.first[0]} is the most commmon word said by #{k} #{mostcalledwords.first[1]} times"
mostcalledwords=Hash.new
end
end
##################################################################
#################################################################
def most_followed_accounts_language(filename)
if filename.start_with? 'http'
# read a whole 90 MB file; this will be SLOW!!!!
csv_text = open(filename)
else
# read the text of the whole file
csv_text = File.read(filename)
end
# now parse the file, assuming it has a header row at the top
csv = CSV.parse(csv_text, :headers => true)
count = Hash.new
language=Hash.new
# go through each row of the csv file
csv.each do |row|
# convert the row to a hash
# the keys of the hash will be the headers from the csv file
hash = row.to_hash
if !count.include? hash['author']
count[hash['author']]=hash['followers'].to_i
language[hash['author']]=hash['language']
else
next
end
end
#find the 3 greatest value of followers
for i in 1..5
temp=count.max_by{|k,v| v}
puts "#{temp[0]} is the #{i} most followed author using #{language[temp[0]]} with #{temp[1]} followers."
count.delete(temp[0])
end
end
##################################################################
#these are for only 1000 tweets
#count_troll_types('test-tweets.csv')
#count_common_date('test-tweets.csv')
#count_common_hour('test-tweets.csv')
#count_common_word('test-tweets.csv')
########################################
#these are for about 250k tweets
#find_most_common_langs('https://raw.githubusercontent.com/fivethirtyeight/russian-troll-tweets/master/IRAhandle_tweets_1.csv')
common_word_among_account_type('https://raw.githubusercontent.com/fivethirtyeight/russian-troll-tweets/master/IRAhandle_tweets_1.csv')
#most_followed_accounts_language('https://raw.githubusercontent.com/fivethirtyeight/russian-troll-tweets/master/IRAhandle_tweets_1.csv')