-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathword_extractor.py
More file actions
207 lines (161 loc) · 5.83 KB
/
word_extractor.py
File metadata and controls
207 lines (161 loc) · 5.83 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
#!/usr/bin/python
"""
python 3.8
This script extracts NOT common words from provided source. Intended users are
Source can be a text file, pdf file or a URL. Two output files are generated.
An HTML file and a plain text file. HTML file contains word with meaning and link to more detail explaination.
Note:
excluded_word.txt include list of words to exclude.
Run 'pip install -r requirements.txt' to install dependencies
Usage: python word_extractor.py -s <file/url> -t [minimum word length] -o [output file name]
-s <file/pdf/url>
-t Minimum word length to consier (optional, default 3)
-o output filename base (optional, default result)
Examples:
python word_extractor.py -s Meet_Joe_Black.srt
author: <arshadm78 @ yahoo.com>
"""
import os
import PyPDF2
import re
import requests
from bs4 import BeautifulSoup
import argparse
from urllib.parse import urlparse
import nltk
nltk.download("stopwords", quiet=True, raise_on_error=True)
nltk.download("wordnet", quiet=True, raise_on_error=True)
nltk.download("words", quiet=True, raise_on_error=True)
from nltk.corpus import stopwords, words
from nltk.stem import WordNetLemmatizer
from nltk.corpus import wordnet as wn
wnl = WordNetLemmatizer()
def uri_validator(x):
try:
result = urlparse(x)
return all([result.scheme, result.netloc])
except:
return False
def gettextfromurl(url):
response = requests.get(url)
soup = BeautifulSoup(response.text, "html.parser")
words = []
# Extract words
for text in soup.stripped_strings:
words.extend(text.lower().split())
return words
def gettextfrompdf(file):
# Open the PDF file
pdf_file = open(file, "rb")
# Create a PDF reader object
pdf_reader = PyPDF2.PdfReader(pdf_file)
# Get the number of pages in the PDF file
num_pages = len(pdf_reader.pages)
words = []
# Loop through each page and extract the text
for page in range(num_pages):
# Get the page object
pdf_page = pdf_reader.pages[page]
# Extract the text from the page
page_text = pdf_page.extract_text().lower()
# Split the text into words
words += page_text.split()
# Close the PDF file
pdf_file.close()
return words
def gettext(file):
text_file = open(file, "r")
text = text_file.read().lower().replace(".", "").replace("--", "")
pattern = "[a-zA-Z\-\.'/]+"
words = re.findall(pattern, text)
text_file.close()
return words
def getFinalList(text_list, min_length=3):
wordnet_tag = ["n", "s", "a", "r", "v"]
# initialize a null list
unique_list = []
with open(os.path.join( os.path.dirname(os.path.realpath(__file__)) ,"excluded_word.txt"), "r") as f_object:
common_words = f_object.read().split()
stop_words = set(stopwords.words("english"))
full_list = words.words()
# traverse for all elements
lem_tmp = []
for x in text_list:
lem = x
for t in wordnet_tag:
lem1 = wnl.lemmatize(x, t)
# Use shortest form
if lem1 != x:
lem = lem1
# check if exists in unique_list or not
if len(lem) > min_length and lem.isalpha():
if lem not in common_words and lem not in stop_words and lem in full_list:
# if x != lem:
# final_word = x + " [" + lem + "]"
# else:
final_word = lem
if final_word not in unique_list:
unique_list.append(final_word)
return unique_list
def main(args=None):
parser = argparse.ArgumentParser()
parser.add_argument(
"-s", "--source", help="Input source. text, pdf or url", required=True, type=str
)
parser.add_argument(
"-t", "--threshold", help="Minimum word size to consider", default=3, type=int
)
parser.add_argument(
"-o",
"--output",
help="Output files base name",
default="result",
required=False,
type=str,
)
arg_list = parser.parse_args()
print(f"Processing...")
word_lst = []
if uri_validator(arg_list.source):
word_lst = gettextfromurl(arg_list.source)
elif arg_list.source.endswith(".pdf"):
word_lst = gettextfrompdf(arg_list.source)
else:
word_lst = gettext(arg_list.source)
final_list = getFinalList(word_lst, arg_list.threshold)
# Define the initial HTML content
html_content = "<html><head></head><body>"
html_content += f"<h1>Source: {arg_list.source}</h1>"
count = 1
for word in final_list:
wnl.lemmatize(word, "v")
synsets = wn.synsets(word)
if len(synsets) == 0:
continue
html_content += f"<h2>{count} - <a href='https://www.merriam-webster.com/dictionary/{word}'>{word.capitalize()}</a> </h2>"
html_content += "<ul>"
for synset in synsets:
html_content += "<ul>"
# for definition in synset.definition():
html_content += f"<li>{synset.definition().capitalize()}</li>"
for example in synset.examples():
html_content += "<br> '" + example.capitalize() + "'</br>"
html_content += "<br></br></ul>"
html_content += f"<a href='https://www.google.com/search?tbm=isch&q={word}'> Image search </a></ul>"
count += 1
html_content += "</body></html>"
# Write the HTML content to a new file
with open(arg_list.output + ".html", "w") as f:
f.write(html_content)
f.close()
with open(arg_list.output + ".txt", "w") as f:
f.write("\n".join(final_list))
f.close()
print(f"HTML written to {arg_list.output}.html")
print(f"Words written to {arg_list.output}.txt")
if __name__ == "__main__":
try:
main()
except Exception as e:
import traceback
traceback.print_exc()