diff --git a/Common/TaskController.py b/Common/TaskController.py index 91dfac0..e40e47d 100644 --- a/Common/TaskController.py +++ b/Common/TaskController.py @@ -60,7 +60,7 @@ def __init__(self): self.TimeDate = str(t.strftime("%Y%m%d-%H%M")) self.logger.info("SimplyEmail started at: " + self.TimeDate) except Exception as e: - print e + print(e) # def TestModule(self, module, domain): # ModuleName = module @@ -95,13 +95,13 @@ def _execute_api_module(self,Module): if Module.apikeyv: e = " [*] API module key loaded for: " + \ Module.name - print helpers.color(e, status=True) + print(helpers.color(e, status=True)) self.logger.info("_execute_api_module: API key present") return True else: e = " [*] No API module key loaded for: " + \ Module.name - print helpers.color(e, firewall=True) + print(helpers.color(e, firewall=True)) # Exit a API module with out a key self.logger.info("_execute_api_module: no API key present") return False @@ -131,7 +131,7 @@ def ExecuteModule(self, Task_queue, Results_queue, Html_queue, Json_queue, domai Task = self.modules[Task] Module = Task.ClassName(domain, verbose=verbose) name = " [*] Starting: " + Module.name - print helpers.color(name, status=True) + print(helpers.color(name, status=True)) # Try to start the module try: # Check for API key to ensure its in .ini @@ -149,14 +149,14 @@ def ExecuteModule(self, Task_queue, Results_queue, Html_queue, Json_queue, domai else: Message = " [*] " + Module.name + \ " has completed with no Email(s)" - print helpers.color(Message, status=True) + print(helpers.color(Message, status=True)) except Exception as e: error = " [!] Error During Runtime in Module " + \ Module.name + ": " + str(e) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) except Exception as e: error = " [!] Error Loading Module: " + str(e) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) def printer(self, FinalEmailList, Domain, VerifyEmail=False, NameEmails=False): # Building out the Text file that will be outputted @@ -178,14 +178,14 @@ def printer(self, FinalEmailList, Domain, VerifyEmail=False, NameEmails=False): with open(NamePath, "a") as myfile: myfile.write(PrintTitle) except Exception as e: - print e + print(e) try: with open(NamePath, "a") as myfile: myfile.write(item) x += 1 except Exception as e: - print e - print helpers.color(" [*] Completed output!", status=True) + print(e) + print(helpers.color(" [*] Completed output!", status=True)) self.logger.info("Version / Update request started") return x elif VerifyEmail: @@ -198,14 +198,14 @@ def printer(self, FinalEmailList, Domain, VerifyEmail=False, NameEmails=False): with open(VerPath, "a") as myfile: myfile.write(PrintTitle) except Exception as e: - print e + print(e) try: with open(VerPath, "a") as myfile: myfile.write(item) x += 1 except Exception as e: - print e - print helpers.color(" [*] Completed output!", status=True) + print(e) + print(helpers.color(" [*] Completed output!", status=True)) return x else: x = 0 @@ -217,14 +217,14 @@ def printer(self, FinalEmailList, Domain, VerifyEmail=False, NameEmails=False): with open(ListPath, "a") as myfile: myfile.write(PrintTitle) except Exception as e: - print e + print(e) try: with open(ListPath, "a") as myfile: myfile.write(item) x += 1 except Exception as e: - print e - print helpers.color(" [*] Completed output!", status=True) + print(e) + print(helpers.color(" [*] Completed output!", status=True)) return x def HtmlPrinter(self, HtmlFinalEmailList, Domain): @@ -290,7 +290,7 @@ def CleanResults(self, domain, scope=False): for item in HtmlSecondList: if item not in HtmlFinalList: HtmlFinalList.append(item) - print helpers.color(" [*] Completed cleaning results", status=True) + print(helpers.color(" [*] Completed cleaning results", status=True)) self.logger.info("Completed cleaning results") return FinalList, HtmlFinalList @@ -321,7 +321,7 @@ def Consumer(self, Results_queue, verbose): self.ConsumerList.append(item) except Exception as e: if verbose: - print e + print(e) def HtmlConsumer(self, Html_queue, verbose): while True: @@ -332,7 +332,7 @@ def HtmlConsumer(self, Html_queue, verbose): self.HtmlList.append(item) except Exception as e: if verbose: - print e + print(e) def JsonConsumer(self, Json_queue, verbose): while True: @@ -343,7 +343,7 @@ def JsonConsumer(self, Json_queue, verbose): self.JsonList.append(item) except Exception as e: if verbose: - print e + print(e) def _task_queue_start(self): """ @@ -417,7 +417,7 @@ def TaskSelector(self, domain, verbose=False, scope=False, Names=False, json="", # Make sure we aren't starting up Procs that aren't needed. if total_proc > len(self.modules): total_proc = len(self.modules) - for i in xrange(total_proc): + for i in range(total_proc): Task_queue.put(None) i = i procs = [] @@ -470,7 +470,7 @@ def TaskSelector(self, domain, verbose=False, scope=False, Names=False, json="", except Exception as e: error = " [!] Something went wrong with parsing results:" + \ str(e) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) self.logger.critical("Something went wrong with parsing results: " + str(e)) try: if not json: @@ -478,7 +478,7 @@ def TaskSelector(self, domain, verbose=False, scope=False, Names=False, json="", except Exception as e: error = " [!] Something went wrong with outputixng results:" + \ str(e) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) self.logger.critical("Something went wrong with outputixng results: " + str(e)) try: if json: @@ -488,7 +488,7 @@ def TaskSelector(self, domain, verbose=False, scope=False, Names=False, json="", except Exception as e: error = " [!] Something went wrong with HTML results:" + \ str(e) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) self.logger.critical("Something went wrong with HTML results:: " + str(e)) break for p in procs: @@ -518,7 +518,7 @@ def TaskSelector(self, domain, verbose=False, scope=False, Names=False, json="", FinalEmailList, domain, VerifyEmail=Verify) # save seperate file for verified emails except Exception as e: - print e + print(e) try: if Names: if BuiltNames: @@ -526,7 +526,7 @@ def TaskSelector(self, domain, verbose=False, scope=False, Names=False, json="", except Exception as e: error = " [!] Something went wrong with outputting results of Built Names:" + \ str(e) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) if not json: self.CompletedScreen(FinalCount, BuiltNameCount, domain) @@ -548,7 +548,7 @@ def TestModule(self, domain, module, verbose=False, scope=False, Names=False, js if module in Task: Task_queue.put(Task) # Only use one proc since this is a test module - for i in xrange(total_proc): + for i in range(total_proc): Task_queue.put(None) procs = [] for thread in range(total_proc): @@ -598,7 +598,7 @@ def TestModule(self, domain, module, verbose=False, scope=False, Names=False, js except Exception as e: error = " [!] Something went wrong with parsing results:" + \ str(e) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) self.logger.critical("Something went wrong with parsing results: " + str(e)) try: if not json: @@ -606,7 +606,7 @@ def TestModule(self, domain, module, verbose=False, scope=False, Names=False, js except Exception as e: error = " [!] Something went wrong with outputting results:" + \ str(e) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) self.logger.critical("Something went wrong with outputting results: " + str(e)) try: if json: @@ -616,7 +616,7 @@ def TestModule(self, domain, module, verbose=False, scope=False, Names=False, js except Exception as e: error = " [!] Something went wrong with HTML results:" + \ str(e) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) self.logger.critical("Something went wrong with HTML results: " + str(e)) # Check for valid emails if user wants break @@ -649,7 +649,7 @@ def TestModule(self, domain, module, verbose=False, scope=False, Names=False, js FinalEmailList, domain, VerifyEmail=Verify) # save Seprate file for verified emails except Exception as e: - print e + print(e) try: if Names: if BuiltNames: @@ -657,7 +657,7 @@ def TestModule(self, domain, module, verbose=False, scope=False, Names=False, js except Exception as e: error = " [!] Something went wrong with outputting results of Built Names:" + \ str(e) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) if not json: self.CompletedScreen(FinalCount, BuiltNameCount, domain) @@ -673,7 +673,7 @@ def NameBuilder(self, domain, emaillist, Verbose=False): ValidFormat = ['{first}.{last}', '{first}{last}', '{f}{last}', '{f}.{last}', '{first}{l}', '{first}_{last}', '{first}'] line = " [*] Now attempting to build Names:\n" - print line + print(line) CleanNames = [] # Query for Linkedin Names - Adapted from # https://github.com/pan0pt1c0n/PhishBait @@ -683,7 +683,7 @@ def NameBuilder(self, domain, emaillist, Verbose=False): if LNames: e = ' [*] LinkedinScraper has Gathered: ' + \ str(len(LNames)) + ' Names' - print helpers.color(e, status=True) + print(helpers.color(e, status=True)) self.logger.info("LinkedInScraper has Gathered: " + str(len(LNames))) for raw in LNames: try: @@ -691,27 +691,27 @@ def NameBuilder(self, domain, emaillist, Verbose=False): if name: CleanNames.append(name) except Exception as e: - print e + print(e) self.logger.error("Issue cleaning LinkedInNames: " + str(e)) # Query for Connect6 Names c6 = Connect6.Connect6Scraper(domain, Verbose=Verbose) urllist = c6.Connect6AutoUrl() self.title() - print helpers.color(" [*] Now Starting Connect6 Scrape:") + print(helpers.color(" [*] Now Starting Connect6 Scrape:")) self.logger.info("Now starting Connect6 scrape") if urllist: line = " [*] SimplyEmail has attempted to find correct URL for Connect6:\n" line += " URL detected: " + \ helpers.color(urllist[0], status=True) - print line + print(line) Question = " [>] Is this URL correct?: " - Answer = raw_input(helpers.color(Question, bold=False)) + Answer = input(helpers.color(Question, bold=False)) if Answer.upper() in "YES": Names = c6.Connect6Download(urllist[0]) if Names: e = ' [*] Connect6 has Gathered: ' + \ str(len(Names)) + ' Names' - print helpers.color(e, status=True) + print(helpers.color(e, status=True)) for raw in Names: name = c6.Connect6ParseName(raw) if name: @@ -719,16 +719,16 @@ def NameBuilder(self, domain, emaillist, Verbose=False): else: while True: for item in urllist: - print " Potential URL: " + item + print(" Potential URL: " + item) e = ' [!] GoogleDork This: site:connect6.com "' + \ str(domain)+'"' - print helpers.color(e, bold=False) - print " [-] Commands Supported: (B) ack - (R) etry" + print(helpers.color(e, bold=False)) + print(" [-] Commands Supported: (B) ack - (R) etry") Question = " [>] Please Provide a URL: " - Answer = raw_input(helpers.color(Question, bold=False)) + Answer = input(helpers.color(Question, bold=False)) if Answer.upper() in "BACK": e = " [!] Skipping Connect6 Scrape!" - print helpers.color(e, firewall=True) + print(helpers.color(e, firewall=True)) break if Answer: break @@ -737,7 +737,7 @@ def NameBuilder(self, domain, emaillist, Verbose=False): if Names: e = ' [*] Connect6 has Gathered: ' + \ str(len(Names)) + ' Names' - print helpers.color(e, status=True) + print(helpers.color(e, status=True)) for raw in Names: name = c6.Connect6ParseName(raw) if name: @@ -745,40 +745,40 @@ def NameBuilder(self, domain, emaillist, Verbose=False): else: line = " [*] SimplyEmail has attempted to find correct URL for Connect6:\n" line += " URL was not detected!" - print line + print(line) e = ' [!] GoogleDork This: site:connect6.com "'+str(domain)+'"' - print helpers.color(e, bold=False) + print(helpers.color(e, bold=False)) while True: - print " [-] Commands Supported: (B) ack - (R) etry" + print(" [-] Commands Supported: (B) ack - (R) etry") Question = " [>] Please Provide a URL: " - Answer = raw_input(helpers.color(Question, bold=False)) + Answer = input(helpers.color(Question, bold=False)) if Answer.upper() in "BACK": e = " [!] Skipping Connect6 Scrape!" - print helpers.color(e, firewall=True) + print(helpers.color(e, firewall=True)) break if Answer: break if Answer.upper() != "B": Names = c6.Connect6Download(Answer) - print Names + print(Names) if Names: e = ' [*] Connect6 has Gathered: ' + \ str(len(Names)) + ' Names' - print helpers.color(e, status=True) + print(helpers.color(e, status=True)) for raw in Names: name = c6.Connect6ParseName(raw) if name: CleanNames.append(name) self.title() - print helpers.color(' [*] Names have been built:', status=True) - print helpers.color(' [*] Attempting to resolve email format', status=True) + print(helpers.color(' [*] Names have been built:', status=True)) + print(helpers.color(' [*] Attempting to resolve email format', status=True)) Em = EmailFormat.EmailFormat(domain, Verbose=Verbose) Format = Em.EmailHunterDetect() if Format: e = ' [!] Auto detected the format: ' + str(Format) - print helpers.color(e, status=True) + print(helpers.color(e, status=True)) if not Format: - print helpers.color(" [*] Now attempting to manually detect format (slow)!") + print(helpers.color(" [*] Now attempting to manually detect format (slow)!")) Format = Em.EmailDetect(CleanNames, domain, emaillist) # Now check if we have more than one result in the list # This due to how I perform checks, in rare cases I had more than @@ -789,21 +789,21 @@ def NameBuilder(self, domain, emaillist, Verbose=False): try: for item in Format: line += ' * Format: ' + item + '\n' - print line + print(line) except: p = " [*] No email samples gathered to show." - print helpers.color(p, firewall=True) + print(helpers.color(p, firewall=True)) line = ' [*] Here are a few samples of the emails obtained:\n' for i in range(1, 6, 1): try: line += ' %s) %s \n' % (i, emaillist[i]) except: pass - print line + print(line) while True: s = False Question = " [>] Please provide a valid format: " - Answer = raw_input(helpers.color(Question, bold=False)) + Answer = input(helpers.color(Question, bold=False)) try: for item in ValidFormat: if str(Answer) == str(item): @@ -818,7 +818,7 @@ def NameBuilder(self, domain, emaillist, Verbose=False): else: Format = str(Format[0]) if not Format: - print helpers.color(' [!] Failed to resolve format of email', firewall=True) + print(helpers.color(' [!] Failed to resolve format of email', firewall=True)) line = helpers.color( ' [*] Available formats supported:\n', status=True) line += ' {first}.{last} = alex.alex@domain.com\n' @@ -829,7 +829,7 @@ def NameBuilder(self, domain, emaillist, Verbose=False): line += ' {first}.{l} = j.amesh@domain.com\n' line += ' {first}_{last} = james_amesh@domain.com\n' line += ' {first} = james@domain.com\n\n' - print line + print(line) if len(emaillist) > 0: line = ' [*] Here are a few samples of the emails obtained:\n' line += ' 1)' + emaillist[0] + '\n' @@ -840,14 +840,14 @@ def NameBuilder(self, domain, emaillist, Verbose=False): line += ' 3)' + emaillist[2] except: pass - print line + print(line) else: line = ' [*] No unique emails discovered to display (May have to go manual)!\n' - print helpers.color(line, firewall=True) + print(helpers.color(line, firewall=True)) while True: s = False Question = " [>] Please provide a valid format: " - Answer = raw_input(helpers.color(Question, bold=False)) + Answer = input(helpers.color(Question, bold=False)) try: for item in ValidFormat: if str(Answer) == str(item): @@ -880,7 +880,7 @@ def load_modules(self): return self.modules def ListModules(self): - print helpers.color(" [*] Available Modules are:\n", blue=True) + print(helpers.color(" [*] Available Modules are:\n", blue=True)) self.logger.debug("User Executed ListModules") x = 1 ordList = [] @@ -893,19 +893,19 @@ def ListModules(self): name = 'Modules/' + name finalList.append(name) for name in finalList: - print "\t%s)\t%s" % (x, '{0: <24}'.format(name)) + print("\t%s)\t%s" % (x, '{0: <24}'.format(name))) x += 1 - print "" + print("") def title(self): os.system('clear') # stolen from Veil :) self.logger.debug("Title executed") - print " ============================================================" - print " Current Version: " + self.version + " | Website: CyberSyndicates.com" - print " ============================================================" - print " Twitter: @real_slacker007 | Twitter: @Killswitch_gui" - print " ============================================================" + print(" ============================================================") + print(" Current Version: " + self.version + " | Website: CyberSyndicates.com") + print(" ============================================================") + print(" Twitter: @real_slacker007 | Twitter: @Killswitch_gui") + print(" ============================================================") def title_screen(self): self.logger.debug("Title_screen executed") @@ -921,7 +921,7 @@ def title_screen(self): $$$$$$/ $$$$$$$$/$$/ $$/ $$/ $$$$$$$/$$/$$/ ------------------------------------------------------------""" - print helpers.color(offtext, bold=False) + print(helpers.color(offtext, bold=False)) def CompletedScreen(self, FinalCount, EmailsBuilt, domain): Config = configparser.ConfigParser() @@ -942,11 +942,11 @@ def CompletedScreen(self, FinalCount, EmailsBuilt, domain): Line += " Verified Email File:\t\tEmail_List_Verified.txt\n" Line += " Domain Performed:\t\t" + str(domain) + "\n" self.title() - print Line + print(Line) # Ask user to open report on CLI Question = "[>] Would you like to launch the HTML report?: " - Answer = raw_input(helpers.color(Question, bold=False)) + Answer = input(helpers.color(Question, bold=False)) Answer = Answer.upper() if Answer in "NO": sys.exit(0) @@ -967,9 +967,9 @@ def VerifyScreen(self): line += " This grabs the MX records, sorts and attempts to check\n" line += " if the SMTP server sends a code other than 250 for known bad addresses\n" - print line + print(line) Question = " [>] Would you like to verify email(s)?: " - Answer = raw_input(helpers.color(Question, bold=False)) + Answer = input(helpers.color(Question, bold=False)) Answer = Answer.upper() if Answer in "NO": self.logger.info("User declined to run verify emails") diff --git a/Helpers/CanarioAPI.py b/Helpers/CanarioAPI.py index da5b3ae..5683766 100644 --- a/Helpers/CanarioAPI.py +++ b/Helpers/CanarioAPI.py @@ -44,7 +44,7 @@ def retrieve(s, url, data=None, post=False): # 'data' must be in the form of a dictionary def build_url(s, data): - d = ['%s=%s' % (x, y) for x, y in data.iteritems()] + d = ['%s=%s' % (x, y) for x, y in list(data.items())] return '%s&%s' % (s.url, '&'.join(d)) # Does a search--whee. Bangs can be specified via separate argument. This is due to plan to make changes to the search for API users diff --git a/Helpers/Connect6.py b/Helpers/Connect6.py index 720fcb6..cf32f90 100644 --- a/Helpers/Connect6.py +++ b/Helpers/Connect6.py @@ -1,10 +1,10 @@ #!/usr/bin/env python -import helpers +from . import helpers import requests import configparser -import urlparse +import urllib.parse import logging -from BeautifulSoup import BeautifulSoup +from bs4 import BeautifulSoup class Connect6Scraper(object): @@ -24,7 +24,7 @@ def __init__(self, domain, Verbose=False): self.FinalAnswer = '' self.verbose = Verbose except Exception as e: - print e + print(e) ''' Try to find the connect6 url for the domain @@ -44,14 +44,14 @@ def Connect6AutoUrl(self): except Exception as e: error = "[!] Major issue with Google Search: for Connect6 URL" + \ str(e) - print helpers.color(error, warning=True) + print((helpers.color(error, warning=True))) try: rawhtml = r.content soup = BeautifulSoup(rawhtml) for a in soup.findAll('a', href=True): try: - l = urlparse.parse_qs( - urlparse.urlparse(a['href']).query)['q'] + l = urllib.parse.parse_qs( + urllib.parse.urlparse(a['href']).query)['q'] if 'site:connect6.com' not in l[0]: l = l[0].split(":") urllist.append(l[2]) @@ -67,7 +67,7 @@ def Connect6AutoUrl(self): y += 1 return urllist except Exception as e: - print e + print(e) return urllist def Connect6Download(self, url): @@ -82,12 +82,12 @@ def Connect6Download(self, url): url = 'http://' + str(url) if self.verbose: p = " [*] Now downloading Connect6 Source: " + str(url) - print helpers.color(p, firewall=True) + print((helpers.color(p, firewall=True))) r = requests.get(url, headers=self.UserAgent) except Exception as e: error = " [!] Major issue with Downloading Connect6 source:" + \ str(e) - print helpers.color(error, warning=True) + print((helpers.color(error, warning=True))) try: if r: rawhtml = r.content @@ -99,13 +99,13 @@ def Connect6Download(self, url): if self.verbose: p = " [*] Connect6 Name Found: " + \ str(litag.text) - print helpers.color(p, firewall=True) + print((helpers.color(p, firewall=True))) except: pass return NameList # for a in soup.findAll('a', href=True): except Exception as e: - print e + print(e) def Connect6ParseName(self, raw): ''' diff --git a/Helpers/Converter.py b/Helpers/Converter.py index 04a685a..db9d80b 100644 --- a/Helpers/Converter.py +++ b/Helpers/Converter.py @@ -8,7 +8,7 @@ from pdfminer.converter import TextConverter from pdfminer.layout import LAParams from pdfminer.pdfpage import PDFPage -from cStringIO import StringIO +from io import StringIO class Converter(object): @@ -18,7 +18,7 @@ def __init__(self, verbose=False): self.logger = logging.getLogger("SimplyEmail.Converter") self.verbose = verbose except Exception as e: - print e + print(e) def convert_docx_to_txt(self, path): """ @@ -32,7 +32,7 @@ def convert_docx_to_txt(self, path): try: text = docx2txt.process(path) self.logger.debug("Converted docx to text: " + str(path)) - return unicode(text) + return str(text) except Exception as e: text = "" return text @@ -144,14 +144,14 @@ def convert_zip_to_text(self, path, rawtext=True): try: text += str(a[x]) except Exception as e: - print e + print(e) # pass self.logger.debug("Unzip of file complted (raw text): " + str(path)) return text else: return {name: input_zip.read(name) for name in input_zip.namelist()} except Exception as e: - print e + print(e) text = "" return text self.logger.error( diff --git a/Helpers/Download.py b/Helpers/Download.py index 15be1b3..7b3574a 100644 --- a/Helpers/Download.py +++ b/Helpers/Download.py @@ -2,9 +2,9 @@ import requests import os import configparser -import helpers +from . import helpers import logging -import urllib2 +import urllib.request, urllib.error, urllib.parse import time from bs4 import BeautifulSoup from random import randint @@ -21,7 +21,7 @@ def __init__(self, verbose=False): self.UserAgent = { 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'} except Exception as e: - print e + print(e) def download_file(self, url, filetype, maxfile=100, verify=True): """ @@ -56,7 +56,7 @@ def download_file(self, url, filetype, maxfile=100, verify=True): except Exception as e: if self.verbose: p = ' [*] Download of file failed: ' + e - print helpers.color(p, firewall=True) + print((helpers.color(p, firewall=True))) self.logger.error("Failed to download file: " + str(url) + ' error: ' + str(e)) download = os.path.isfile(local_filename) return local_filename, download @@ -75,19 +75,19 @@ def download_file2(self, url, filetype, timeout=10): url = 'http://' + str(url) try: self.logger.debug("Download2 started download: " + str(url)) - response = urllib2.urlopen(url, timeout=timeout) + response = urllib.request.urlopen(url, timeout=timeout) data = response.read() download = os.path.isfile(local_filename) - except urllib2.HTTPError, e: + except urllib.error.HTTPError as e: self.logger.debug('urllib2 HTTPError: ' + e) - except urllib2.URLError, e: + except urllib.error.URLError as e: self.logger.debug('urllib2 URLError: ' + e) - except urllib2.HTTPException, e: + except urllib2.HTTPException as e: self.logger.debug('urllib2 HTTPException: ' + e) except Exception as e: if self.verbose: p = ' [*] Download2 of file failed: ' + e - print helpers.color(p, firewall=True) + print((helpers.color(p, firewall=True))) self.logger.error("Failed to download2 file: " + str(e)) try: with open(local_filename, 'wb+') as f: @@ -108,18 +108,18 @@ def delete_file(self, local_filename): else: if self.verbose: p = ' [*] File not found to remove : ' + local_filename - print helpers.color(p, firewall=True) + print((helpers.color(p, firewall=True))) except Exception as e: self.logger.error("Failed to delete file: " + str(e)) if self.verbose: - print e + print(e) def GoogleCaptchaDetection(self, RawHtml): soup = BeautifulSoup(RawHtml, "lxml") if "Our systems have detected unusual traffic" in soup.text: p = " [!] Google Captcha was detected! (For best results resolve/restart -- Increase sleep/jitter in SimplyEmail.ini)" self.logger.warning("Google Captcha was detected!") - print helpers.color(p, warning=True) + print((helpers.color(p, warning=True))) return True else: return False @@ -147,7 +147,7 @@ def requesturl(self, url, useragent, timeout=10, retrytime=5, statuscode=False, if self.verbose: p = ' [!] Request for url timed out, retrying: ' + url self.logger.info('Request timed out, retrying: ' + url) - print helpers.color(p, firewall=True) + print((helpers.color(p, firewall=True))) r = requests.get(url, headers=self.UserAgent, timeout=retrytime, verify=verify) rawhtml = r.content except requests.exceptions.TooManyRedirects: @@ -156,14 +156,14 @@ def requesturl(self, url, useragent, timeout=10, retrytime=5, statuscode=False, p = ' [!] Request for url resulted in bad url: ' + url self.logger.error( 'Request for url resulted in bad url: ' + url) - print helpers.color(p, warning=True) + print((helpers.color(p, warning=True))) except requests.exceptions.RequestException as e: # catastrophic error. bail. if self.verbose: p = ' [!] Request for url resulted in major error: ' + str(e) self.logger.critical( 'Request for url resulted in major error: ' + str(e)) - print helpers.color(p, warning=True) + print((helpers.color(p, warning=True))) except Exception as e: p = ' [!] Request for url resulted in unhandled error: ' + str(e) self.logger.critical( diff --git a/Helpers/EmailFormat.py b/Helpers/EmailFormat.py index 0935588..208c94d 100644 --- a/Helpers/EmailFormat.py +++ b/Helpers/EmailFormat.py @@ -1,8 +1,8 @@ #!/usr/bin/env python -import helpers +from . import helpers import requests import configparser -import Download +from . import Download # Email layouts supported: # {first}.{last} = alex.alex@domain.com @@ -43,7 +43,7 @@ def __init__(self, domain, Verbose=False): self.type = "&type=generic" self.etype = "generic_emails" except Exception as e: - print e + print(e) def EmailHunterDetect(self): ''' @@ -63,11 +63,11 @@ def EmailHunterDetect(self): else: if self.verbose: e = ' [!] No pattern detected via EmailHunter API' - print helpers.color(e, firewall=True) + print((helpers.color(e, firewall=True))) return False except: error = "[!] Major issue with EmailHunter Search:" + str(e) - print helpers.color(error, warning=True) + print((helpers.color(error, warning=True))) def BuildName(self, CleanName, Format, Raw=False): ''' @@ -134,12 +134,12 @@ def EmailDetect(self, CleanNames, Domain, FinalEmails): if self.verbose: r = " [*] Email format matched {f}{last}: " + \ BuiltEmail - print helpers.color(r, firewall=True) + print((helpers.color(r, firewall=True))) if not Set: FinalResult.append(Format) Set = True except Exception as e: - print e + print(e) # Detect {f}.{last} format try: Set = False @@ -156,12 +156,12 @@ def EmailDetect(self, CleanNames, Domain, FinalEmails): if self.verbose: r = " [*] Email format matched {f}.{last}: " + \ BuiltEmail - print helpers.color(r, firewall=True) + print((helpers.color(r, firewall=True))) if not Set: FinalResult.append(Format) Set = True except Exception as e: - print e + print(e) # Detect {first}{last} try: Set = False @@ -177,12 +177,12 @@ def EmailDetect(self, CleanNames, Domain, FinalEmails): if self.verbose: r = " [*] Email format matched {first}{last}: " + \ BuiltEmail - print helpers.color(r, firewall=True) + print((helpers.color(r, firewall=True))) if not Set: FinalResult.append(Format) Set = True except Exception as e: - print e + print(e) # Detect {first}.{last} try: Set = False @@ -198,12 +198,12 @@ def EmailDetect(self, CleanNames, Domain, FinalEmails): if self.verbose: r = " [*] Email format matched {first}.{last}: " + \ BuiltEmail - print helpers.color(r, firewall=True) + print((helpers.color(r, firewall=True))) if not Set: FinalResult.append(Format) Set = True except Exception as e: - print e + print(e) # Detect {first}.{l} try: Set = False @@ -220,12 +220,12 @@ def EmailDetect(self, CleanNames, Domain, FinalEmails): if self.verbose: r = " [*] Email format matched {first}.{l}: " + \ BuiltEmail - print helpers.color(r, firewall=True) + print((helpers.color(r, firewall=True))) if not Set: FinalResult.append(Format) Set = True except Exception as e: - print e + print(e) # Detect {first}{l} try: Set = False @@ -242,12 +242,12 @@ def EmailDetect(self, CleanNames, Domain, FinalEmails): if self.verbose: r = " [*] Email format matched {first}{l}: " + \ BuiltEmail - print helpers.color(r, firewall=True) + print((helpers.color(r, firewall=True))) if not Set: FinalResult.append(Format) Set = True except Exception as e: - print e + print(e) # Detect {first}.{last} try: Set = False @@ -263,12 +263,12 @@ def EmailDetect(self, CleanNames, Domain, FinalEmails): if self.verbose: r = " [*] Email format matched {first}_{last}: " + \ BuiltEmail - print helpers.color(r, firewall=True) + print((helpers.color(r, firewall=True))) if not Set: FinalResult.append(Format) Set = True except Exception as e: - print e + print(e) try: Set = False Format = '{first}' @@ -282,12 +282,12 @@ def EmailDetect(self, CleanNames, Domain, FinalEmails): if Count > 0: if self.verbose: r = " [*] Email format matched {first}: " + BuiltEmail - print helpers.color(r, firewall=True) + print((helpers.color(r, firewall=True))) if not Set: FinalResult.append(Format) Set = True except Exception as e: - print e + print(e) # Finaly return the list of Formats # print FinalResult return FinalResult @@ -312,15 +312,15 @@ def EmailBuilder(self, CleanNames, Domain, Format, Verbose=True): FirstIntial) + str(LastName) + "@" + Domain if Verbose: e = ' [*] Email built: ' + str(BuiltName) - print helpers.color(e, firewall=True) + print((helpers.color(e, firewall=True))) if BuiltName: BuiltEmails.append(BuiltName) except Exception as e: - print e + print(e) if BuiltEmails: return BuiltEmails else: - print helpers.color(' [!] NO Names built, please do a sanity check!', warning=True) + print((helpers.color(' [!] NO Names built, please do a sanity check!', warning=True))) return False elif Format == '{f}.{last}': for name in CleanNames: @@ -335,15 +335,15 @@ def EmailBuilder(self, CleanNames, Domain, Format, Verbose=True): str(LastName) + "@" + Domain if Verbose: e = ' [*] Email built: ' + str(BuiltName) - print helpers.color(e, firewall=True) + print((helpers.color(e, firewall=True))) if BuiltName: BuiltEmails.append(BuiltName) except Exception as e: - print e + print(e) if BuiltEmails: return BuiltEmails else: - print helpers.color(' [!] No names built, please do a sanity check!', warning=True) + print((helpers.color(' [!] No names built, please do a sanity check!', warning=True))) return False elif Format == '{first}{last}': for name in CleanNames: @@ -356,15 +356,15 @@ def EmailBuilder(self, CleanNames, Domain, Format, Verbose=True): FirstName) + str(LastName) + "@" + Domain if Verbose: e = ' [*] Email built: ' + str(BuiltName) - print helpers.color(e, firewall=True) + print((helpers.color(e, firewall=True))) if BuiltName: BuiltEmails.append(BuiltName) except Exception as e: - print e + print(e) if BuiltEmails: return BuiltEmails else: - print helpers.color(' [!] No names built, please do a sanity check!', warning=True) + print((helpers.color(' [!] No names built, please do a sanity check!', warning=True))) return False elif Format == '{first}.{last}': for name in CleanNames: @@ -377,15 +377,15 @@ def EmailBuilder(self, CleanNames, Domain, Format, Verbose=True): FirstName) + '.' + str(LastName) + "@" + Domain if Verbose: e = ' [*] Email built: ' + str(BuiltName) - print helpers.color(e, firewall=True) + print((helpers.color(e, firewall=True))) if BuiltName: BuiltEmails.append(BuiltName) except Exception as e: - print e + print(e) if BuiltEmails: return BuiltEmails else: - print helpers.color(' [!] No names built, please do a sanity check!', warning=True) + print((helpers.color(' [!] No names built, please do a sanity check!', warning=True))) return False elif Format == '{first}.{l}': for name in CleanNames: @@ -399,15 +399,15 @@ def EmailBuilder(self, CleanNames, Domain, Format, Verbose=True): str(LastInitial) + "@" + Domain if Verbose: e = ' [*] Email built: ' + str(BuiltName) - print helpers.color(e, firewall=True) + print((helpers.color(e, firewall=True))) if BuiltName: BuiltEmails.append(BuiltName) except Exception as e: - print e + print(e) if BuiltEmails: return BuiltEmails else: - print helpers.color(' [!] No names built, please do a sanity check!', warning=True) + print((helpers.color(' [!] No names built, please do a sanity check!', warning=True))) return False elif Format == '{first}{l}': for name in CleanNames: @@ -421,15 +421,15 @@ def EmailBuilder(self, CleanNames, Domain, Format, Verbose=True): FirstName) + str(LastInitial) + "@" + Domain if Verbose: e = ' [*] Email built: ' + str(BuiltName) - print helpers.color(e, firewall=True) + print((helpers.color(e, firewall=True))) if BuiltName: BuiltEmails.append(BuiltName) except Exception as e: - print e + print(e) if BuiltEmails: return BuiltEmails else: - print helpers.color(' [!] No names built, please do a sanity check!', warning=True) + print((helpers.color(' [!] No names built, please do a sanity check!', warning=True))) return False elif Format == '{first}_{last}': for name in CleanNames: @@ -441,11 +441,11 @@ def EmailBuilder(self, CleanNames, Domain, Format, Verbose=True): BuiltName = FirstName + "_" + LastName + "@" + Domain if Verbose: e = ' [*] Email built: ' + str(BuiltName) - print helpers.color(e, firewall=True) + print((helpers.color(e, firewall=True))) if BuiltName: BuiltEmails.append(BuiltName) except Exception as e: - print e + print(e) if BuiltEmails: return BuiltEmails elif Format == '{first}': @@ -457,13 +457,13 @@ def EmailBuilder(self, CleanNames, Domain, Format, Verbose=True): BuiltName = Name + "@" + Domain if Verbose: e = ' [*] Email built: ' + str(BuiltName) - print helpers.color(e, firewall=True) + print((helpers.color(e, firewall=True))) if BuiltName: BuiltEmails.append(BuiltName) except Exception as e: - print e + print(e) if BuiltEmails: return BuiltEmails else: - print helpers.color(' [!] No names built, please do a sanity check!', warning=True) + print((helpers.color(' [!] No names built, please do a sanity check!', warning=True))) return False diff --git a/Helpers/HtmlBootStrapTheme.py b/Helpers/HtmlBootStrapTheme.py index 955bd3a..f6e528a 100644 --- a/Helpers/HtmlBootStrapTheme.py +++ b/Helpers/HtmlBootStrapTheme.py @@ -2,6 +2,7 @@ # encoding=utf8 import sys import ast +import importlib # This Classes main goal is to build the HTML output file using all self # contained CSS and JS @@ -14,7 +15,7 @@ def __init__(self, Emails, Domain): self.Domain = Domain self.Source = "" self.HTML = "" - reload(sys) + importlib.reload(sys) sys.setdefaultencoding('utf8') def BuildHtml(self): @@ -121,14 +122,14 @@ def BuildHtml(self): def OutPutHTML(self, Path): try: with open('Helpers/bootstrap-3.3.5/SimplyEmailTemplate.html', "r") as myfile: - SourceHtml = unicode(myfile.read()) + SourceHtml = str(myfile.read()) except Exception as e: - print e + print(e) # Add my tables to the bottom of the HTML and CSS - SourceHtml += unicode(self.HTML) + SourceHtml += str(self.HTML) buildpath = Path + '/Email_List.html' try: with open(buildpath, "w") as myfile: myfile.write(SourceHtml) except Exception as e: - print e + print(e) diff --git a/Helpers/LinkedinNames.py b/Helpers/LinkedinNames.py index cf50e10..cf9a2fa 100644 --- a/Helpers/LinkedinNames.py +++ b/Helpers/LinkedinNames.py @@ -1,7 +1,6 @@ #!/usr/bin/env python -import helpers +from . import helpers import configparser -import mechanize from bs4 import BeautifulSoup @@ -25,7 +24,7 @@ def __init__(self, domain, Verbose=False): self.FinalAnswer = '' self.verbose = Verbose except Exception as e: - print e + print(e) def LinkedInNames(self): # This function simply uses @@ -58,7 +57,7 @@ def LinkedInNames(self): name = name.split(' ') if self.verbose: e = ' [*] LinkedIn Name Found: ' + str(name) - print helpers.color(e, firewall=True) + print(helpers.color(e, firewall=True)) namelist.append(name) for link in br.links(): link_list.append(link.text) @@ -72,7 +71,7 @@ def LinkedInNames(self): except Exception as e: error = " [!] Major issue with Downloading LinkedIn source:" + \ str(e) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) if namelist: return namelist @@ -122,10 +121,10 @@ def LinkedInClean(self, raw): pass if self.verbose: e = ' [*] Name Cleaned: ' + str([firstname, lastname]) - print helpers.color(e, firewall=True) + print(helpers.color(e, firewall=True)) return [firstname, lastname] except Exception as e: if self.verbose: h = " [!] Error during name building: " + str(e) - print helpers.color(h, warning=True) + print(helpers.color(h, warning=True)) return None diff --git a/Helpers/Parser.py b/Helpers/Parser.py index d49041b..ca61099 100644 --- a/Helpers/Parser.py +++ b/Helpers/Parser.py @@ -8,7 +8,7 @@ import subprocess import time from random import randint -import helpers +from . import helpers # Simple Parser Options for email enumeration. @@ -70,7 +70,7 @@ def RemoveUnicode(self): except Exception as e: self.logger.error('UTF8 decoding issues' + str(e)) p = '[!] UTF8 decoding issues Matching: ' + str(e) - print helpers.color(p, firewall=True) + print((helpers.color(p, firewall=True))) def FindEmails(self): Result = [] diff --git a/Helpers/VerifyEmails.py b/Helpers/VerifyEmails.py index 2874749..dfeae54 100644 --- a/Helpers/VerifyEmails.py +++ b/Helpers/VerifyEmails.py @@ -1,6 +1,6 @@ #!/usr/bin/env python import configparser -import helpers +from . import helpers import dns.resolver import socket import smtplib @@ -24,7 +24,7 @@ def __init__(self, email, email2, domain, Verbose=False): self.FinalList = [] self.verbose = True except Exception as e: - print e + print(e) def VerifyEmail(self, email, email2): ''' @@ -39,14 +39,14 @@ def VerifyEmail(self, email, email2): try: if self.verbose: e = " [*] Checking for valid email: " + str(email) - print helpers.color(e, firewall=True) + print((helpers.color(e, firewall=True))) server.connect(self.mxhost['Host']) server.helo(hostname) server.mail('email@gmail.com') code, message = server.rcpt(str(email)) server.quit() except Exception as e: - print e + print(e) if code == 250: return True else: @@ -75,13 +75,13 @@ def VerifySMTPServer(self): else: return True except Exception as e: - print e + print(e) def GetMX(self): MXRecord = [] try: if self.verbose: - print helpers.color(' [*] Attempting to resolve MX records!', firewall=True) + print((helpers.color(' [*] Attempting to resolve MX records!', firewall=True))) answers = dns.resolver.query(self.domain, 'MX') for rdata in answers: data = { @@ -95,10 +95,10 @@ def GetMX(self): self.mxhost = Newlist[0] if self.verbose: val = ' [*] MX Host: ' + str(self.mxhost['Host']) - print helpers.color(val, firewall=True) + print((helpers.color(val, firewall=True))) except Exception as e: error = ' [!] Failed to get MX record: ' + str(e) - print helpers.color(error, warning=True) + print((helpers.color(error, warning=True))) def ExecuteVerify(self): self.GetMX() @@ -108,15 +108,15 @@ def ExecuteVerify(self): IsTrue = self.VerifyEmail(item) if IsTrue: e = " [!] Email seems valid: " + str(item) - print helpers.color(e, status=True) + print((helpers.color(e, status=True))) self.FinalList.append(item) else: if self.verbose: e = " [!] Checks show email is not valid: " + str(item) - print helpers.color(e, firewall=True) + print((helpers.color(e, firewall=True))) else: e = " [!] Checks show 'Server Is Catch All' on: " + \ str(self.mxhost['Host']) - print helpers.color(e, warning=True) + print((helpers.color(e, warning=True))) return self.FinalList diff --git a/Helpers/VersionCheck.py b/Helpers/VersionCheck.py index eb05cc6..0aa2241 100644 --- a/Helpers/VersionCheck.py +++ b/Helpers/VersionCheck.py @@ -1,7 +1,7 @@ #!/usr/bin/env python import requests import configparser -import helpers +from . import helpers import logging @@ -19,7 +19,7 @@ def __init__(self, version): self.UserAgent = { 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'} except Exception as e: - print e + print(e) def VersionRequest(self): if self.Start == "Yes": @@ -30,7 +30,7 @@ def VersionRequest(self): results = results.rstrip('\n') if str(results) != str(self.version): p = " [!] Newer Version Available, Re-Run Setup.sh to update!" - print helpers.color(p, warning=True, bold=False) + print((helpers.color(p, warning=True, bold=False))) self.logger.info( "Version / Update returned newer Version Available") self.logger.info("Version / Update request completed OK") @@ -38,4 +38,4 @@ def VersionRequest(self): error = " [!] Fail during Request to Update/Version Check (Check Connection)" self.logger.error( "Fail during Request to Update/Version Check (Check Connection)" + str(e)) - print helpers.color(error, warning=True) + print((helpers.color(error, warning=True))) diff --git a/Helpers/helpers.py b/Helpers/helpers.py index 6cfda87..40f10ea 100644 --- a/Helpers/helpers.py +++ b/Helpers/helpers.py @@ -4,7 +4,6 @@ import textwrap import logging import time -import magic import json import configparser import collections @@ -135,9 +134,9 @@ def modsleep(delay, jitter=0): sleepTime = random.randint(minSleep, maxSleep) time.sleep(int(sleepTime)) -def filetype(path): - m = magic.from_file(str(path)) - return m +#def filetype(path): +# m = magic.from_file(str(path)) +# return m ####################### # Setup Logging Class # @@ -169,7 +168,7 @@ def infomsg(self, message, modulename): logger = logging.getLogger(msg) logger.info(str(message)) except Exception as e: - print e + print(e) def warningmsg(self, message, modulename): try: @@ -177,4 +176,4 @@ def warningmsg(self, message, modulename): logger = logging.getLogger(msg) logger.warning(str(message)) except Exception as e: - print e + print(e) diff --git a/Helpers/messages.py b/Helpers/messages.py index eb766a4..1c81361 100644 --- a/Helpers/messages.py +++ b/Helpers/messages.py @@ -1,7 +1,7 @@ -import helpers +from . import helpers def email_count(text, Module): Length = " [*] " + Module + \ ": Gathered " + str(text) + " Email(s)!" - print helpers.color(Length, status=True) \ No newline at end of file + print((helpers.color(Length, status=True))) \ No newline at end of file diff --git a/Modules/AskSearch.py b/Modules/AskSearch.py index ef31dcd..fa4c9f4 100644 --- a/Modules/AskSearch.py +++ b/Modules/AskSearch.py @@ -36,7 +36,7 @@ def __init__(self, Domain, verbose=False): except Exception as e: self.logger.critical( 'AskSearch module failed to load: ' + str(e)) - print helpers.color("[*] Major Settings for Ask Search are missing, EXITING!\n", warning=True) + print(helpers.color("[*] Major Settings for Ask Search are missing, EXITING!\n", warning=True)) def execute(self): self.logger.debug("AskSearch module started") @@ -49,7 +49,7 @@ def process(self): while self.Counter <= self.PageLimit: if self.verbose: p = ' [*] AskSearch on page: ' + str(self.Counter) - print helpers.color(p, firewall=True) + print(helpers.color(p, firewall=True)) self.logger.info('AskSearch on page: ' + str(self.Counter)) try: url = 'http://www.ask.com/web?q=@' + str(self.Domain) + \ @@ -57,7 +57,7 @@ def process(self): except Exception as e: error = " [!] Major issue with Ask Search:" + str(e) self.logger.error('Major issue with Ask Search: ' + str(e)) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) try: rawhtml = dl.requesturl(url, useragent=self.UserAgent) except Exception as e: @@ -65,7 +65,7 @@ def process(self): str(e) self.logger.error( 'Fail during Request to Ask (Check Connection): ' + str(e)) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) self.Html += rawhtml self.Counter += 1 helpers.modsleep(self.Sleep, jitter=self.Jitter) diff --git a/Modules/ExaleadDOCSearch.py b/Modules/ExaleadDOCSearch.py index 654bfb0..18585ef 100644 --- a/Modules/ExaleadDOCSearch.py +++ b/Modules/ExaleadDOCSearch.py @@ -16,8 +16,8 @@ # import for "'ascii' codec can't decode byte" error import sys -reload(sys) -sys.setdefaultencoding("utf-8") +import importlib +importlib.reload(sys) # import for "'ascii' codec can't decode byte" error @@ -42,7 +42,7 @@ def __init__(self, Domain, verbose=False): self.Text = "" except Exception as e: self.logger.critical("ExaleadDOCSearch module failed to __init__: " + str(e)) - print helpers.color(" [*] Major Settings for Exalead are missing, EXITING!\n", warning=True) + print(helpers.color(" [*] Major Settings for Exalead are missing, EXITING!\n", warning=True)) def execute(self): self.logger.debug("ExaleadDOCSearch module started") @@ -58,7 +58,7 @@ def search(self): if self.verbose: p = ' [*] Exalead DOC Search on page: ' + str(self.Counter) self.logger.info('ExaleadDOCSearch on page: ' + str(self.Counter)) - print helpers.color(p, firewall=True) + print(helpers.color(p, firewall=True)) try: url = 'http://www.exalead.com/search/web/results/?q="%40' + self.Domain + \ '"+filetype:word&elements_per_page=' + \ @@ -66,7 +66,7 @@ def search(self): except Exception as e: self.logger.error('ExaleadDOCSearch could not build URL') error = " [!] Major issue with Exalead DOC Search: " + str(e) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) try: RawHtml = dl.requesturl(url, useragent=self.UserAgent) # sometimes url is broken but exalead search results contain @@ -78,7 +78,7 @@ def search(self): except Exception as e: self.logger.error('ExaleadDOCSearch could not request / parse HTML') error = " [!] Fail during parsing result: " + str(e) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) self.Counter += 30 # now download the required files @@ -87,7 +87,7 @@ def search(self): if self.verbose: p = ' [*] Exalead DOC search downloading: ' + str(url) self.logger.info('ExaleadDOCSearch downloading: ' + str(url)) - print helpers.color(p, firewall=True) + print(helpers.color(p, firewall=True)) try: filetype = ".doc" dl = Download.Download(self.verbose) @@ -97,7 +97,7 @@ def search(self): p = ' [*] Exalead DOC file was downloaded: ' + \ str(url) self.logger.info('ExaleadDOCSearch downloaded: ' + str(p)) - print helpers.color(p, firewall=True) + print(helpers.color(p, firewall=True)) ft = helpers.filetype(FileName).lower() if 'word' in ft: self.Text += convert.convert_doc_to_txt(FileName) @@ -105,18 +105,18 @@ def search(self): self.logger.warning('Downloaded file is not a DOC: ' + ft) except Exception as e: error = " [!] Issue with opening DOC Files:%s\n" % (str(e)) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) try: dl.delete_file(FileName) except Exception as e: - print e + print(e) except Exception as e: self.logger.error("ExaleadDOCSearch no doc's to download") - print helpers.color(" [*] No DOC's to download from Exalead!\n", firewall=True) + print(helpers.color(" [*] No DOC's to download from Exalead!\n", firewall=True)) if self.verbose: p = ' [*] Searching DOC from Exalead Complete' - print helpers.color(p, status=True) + print(helpers.color(p, status=True)) def get_emails(self): Parse = Parser.Parser(self.Text) diff --git a/Modules/ExaleadDOCXSearch.py b/Modules/ExaleadDOCXSearch.py index ed42866..e063000 100644 --- a/Modules/ExaleadDOCXSearch.py +++ b/Modules/ExaleadDOCXSearch.py @@ -17,8 +17,8 @@ # import for "'ascii' codec can't decode byte" error import sys -reload(sys) -sys.setdefaultencoding("utf-8") +import importlib +importlib.reload(sys) # import for "'ascii' codec can't decode byte" error @@ -44,7 +44,7 @@ def __init__(self, Domain, verbose=False): except Exception as e: self.logger.critical("ExaleadDOCXSearch module failed to __init__: " + str(e)) p = " [*] Major Settings for ExaleadDOCXSearch are missing, EXITING: " + e - print helpers.color(p, warning=True) + print(helpers.color(p, warning=True)) def execute(self): self.logger.debug("ExaleadDOCXSearch module started") @@ -70,7 +70,7 @@ def search(self): if self.verbose: p = ' [*] Exalead Search on page: ' + str(self.Counter) self.logger.info("ExaleadDOCXSearch on page: " + str(self.Counter)) - print helpers.color(p, firewall=True) + print(helpers.color(p, firewall=True)) try: url = 'http://www.exalead.com/search/web/results/?q="%40' + self.Domain + \ '"+filetype:docx&elements_per_page=' + \ @@ -78,13 +78,13 @@ def search(self): except Exception as e: self.logger.error("Issue building URL to search") error = " [!] Major issue with Exalead DOCX Search: " + str(e) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) try: r = requests.get(url, headers=self.UserAgent) except Exception as e: error = " [!] Fail during Request to Exalead (Check Connection):" + str( e) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) try: RawHtml = r.content # sometimes url is broken but exalead search results contain @@ -96,7 +96,7 @@ def search(self): except Exception as e: self.logger.error("Fail during parsing result: " + str(e)) error = " [!] Fail during parsing result: " + str(e) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) self.Counter += 30 # now download the required files @@ -105,7 +105,7 @@ def search(self): if self.verbose: p = ' [*] Exalead DOCX search downloading: ' + str(url) self.logger.info("Starting download of DOCX: " + str(url)) - print helpers.color(p, firewall=True) + print(helpers.color(p, firewall=True)) try: filetype = ".docx" dl = Download.Download(self.verbose) @@ -115,26 +115,26 @@ def search(self): self.logger.info("File was downloaded: " + str(url)) p = ' [*] Exalead DOCX file was downloaded: ' + \ str(url) - print helpers.color(p, firewall=True) + print(helpers.color(p, firewall=True)) self.Text += convert.convert_docx_to_txt(FileName) except Exception as e: self.logger.error("Issue with opening DOCX Files: " + str(e)) error = " [!] Issue with opening DOCX Files:%s\n" % (str(e)) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) try: dl.delete_file(FileName) except Exception as e: - print e + print(e) except Exception as e: p = " [*] No DOCX's to download from Exalead: " + e self.logger.info("No DOCX's to download from Exalead: " + str(e)) - print helpers.color(p, firewall=True) + print(helpers.color(p, firewall=True)) if self.verbose: p = ' [*] Searching DOCX from Exalead Complete' self.logger.info("Searching DOCX from Exalead Complete") - print helpers.color(p, status=True) + print(helpers.color(p, status=True)) def get_emails(self): Parse = Parser.Parser(self.Text) diff --git a/Modules/ExaleadPDFSearch.py b/Modules/ExaleadPDFSearch.py index 2a412ac..3cd486c 100644 --- a/Modules/ExaleadPDFSearch.py +++ b/Modules/ExaleadPDFSearch.py @@ -34,7 +34,7 @@ def __init__(self, Domain, verbose=False): self.urlList = [] self.Text = "" except: - print helpers.color(" [*] Major Settings for ExaleadPDFSearch are missing, EXITING!\n", warning=True) + print(helpers.color(" [*] Major Settings for ExaleadPDFSearch are missing, EXITING!\n", warning=True)) def execute(self): self.search() @@ -47,20 +47,20 @@ def search(self): time.sleep(1) if self.verbose: p = ' [*] Exalead Search on page: ' + str(self.Counter) - print helpers.color(p, firewall=True) + print(helpers.color(p, firewall=True)) try: url = 'http://www.exalead.com/search/web/results/?q="%40' + self.Domain + \ '"+filetype:pdf&elements_per_page=' + \ str(self.Quanity) + '&start_index=' + str(self.Counter) except Exception as e: error = " [!] Major issue with Exalead PDF Search: " + str(e) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) try: r = requests.get(url, headers=self.UserAgent) except Exception as e: error = " [!] Fail during Request to Exalead (Check Connection):" + str( e) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) try: RawHtml = r.content # sometimes url is broken but exalead search results contain @@ -71,7 +71,7 @@ def search(self): for h2 in soup.findAll('h4', class_='media-heading')] except Exception as e: error = " [!] Fail during parsing result: " + str(e) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) self.Counter += 30 # now download the required files @@ -79,7 +79,7 @@ def search(self): for url in self.urlList: if self.verbose: p = ' [*] Exalead PDF search downloading: ' + str(url) - print helpers.color(p, firewall=True) + print(helpers.color(p, firewall=True)) try: filetype = ".pdf" dl = Download.Download(self.verbose) @@ -88,20 +88,20 @@ def search(self): if self.verbose: p = ' [*] Exalead PDF file was downloaded: ' + \ str(url) - print helpers.color(p, firewall=True) + print(helpers.color(p, firewall=True)) self.Text += convert.convert_pdf_to_txt(FileName) except Exception as e: pass try: dl.delete_file(FileName) except Exception as e: - print e + print(e) except: - print helpers.color(" [*] No PDF's to download from Exalead!\n", firewall=True) + print(helpers.color(" [*] No PDF's to download from Exalead!\n", firewall=True)) if self.verbose: p = ' [*] Searching PDF from Exalead Complete' - print helpers.color(p, status=True) + print(helpers.color(p, status=True)) def get_emails(self): Parse = Parser.Parser(self.Text) diff --git a/Modules/ExaleadPPTXSearch.py b/Modules/ExaleadPPTXSearch.py index 15f0304..7781768 100644 --- a/Modules/ExaleadPPTXSearch.py +++ b/Modules/ExaleadPPTXSearch.py @@ -15,8 +15,8 @@ from bs4 import BeautifulSoup # import for "'ascii' codec can't decode byte" error import sys -reload(sys) -sys.setdefaultencoding("utf-8") +import importlib +importlib.reload(sys) # import for "'ascii' codec can't decode byte" error @@ -41,7 +41,7 @@ def __init__(self, Domain, verbose=False): self.Text = "" except Exception as e: self.logger.critical("ExaleadPPTXSearch module failed to __init__: " + str(e)) - print helpers.color(" [*] Major Settings for Exalead are missing, EXITING!\n", warning=True) + print(helpers.color(" [*] Major Settings for Exalead are missing, EXITING!\n", warning=True)) def execute(self): self.logger.debug("ExaleadPPTXSearch module started") @@ -57,7 +57,7 @@ def search(self): if self.verbose: p = ' [*] Exalead PPTX Search on page: ' + str(self.Counter) self.logger.info('ExaleadPPTXSearch on page: ' + str(self.Counter)) - print helpers.color(p, firewall=True) + print(helpers.color(p, firewall=True)) try: url = 'http://www.exalead.com/search/web/results/?q="%40' + self.Domain + \ '"+filetype:pptx&elements_per_page=' + \ @@ -65,7 +65,7 @@ def search(self): except Exception as e: self.logger.error('ExaleadPPTXSearch could not build URL') error = " [!] Major issue with Exalead PPTX Search: " + str(e) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) try: RawHtml = dl.requesturl(url, useragent=self.UserAgent) # sometimes url is broken but exalead search results contain @@ -77,7 +77,7 @@ def search(self): except Exception as e: self.logger.error('ExaleadPPTXSearch could not request / parse HTML') error = " [!] Fail during parsing result: " + str(e) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) self.Counter += 30 # now download the required files @@ -86,7 +86,7 @@ def search(self): if self.verbose: p = ' [*] Exalead PPTX search downloading: ' + str(url) self.logger.info('ExaleadPPTXSearch downloading: ' + str(url)) - print helpers.color(p, firewall=True) + print(helpers.color(p, firewall=True)) try: filetype = ".pptx" dl = Download.Download(self.verbose) @@ -96,7 +96,7 @@ def search(self): p = ' [*] Exalead PPTX file was downloaded: ' + \ str(url) self.logger.info('ExaleadDOCSearch downloaded: ' + str(p)) - print helpers.color(p, firewall=True) + print(helpers.color(p, firewall=True)) ft = helpers.filetype(FileName).lower() if 'powerpoint' in ft: self.Text += convert.convert_zip_to_text(FileName) @@ -104,18 +104,18 @@ def search(self): self.logger.warning('Downloaded file is not a PPTX: ' + ft) except Exception as e: error = " [!] Issue with opening PPTX Files:%s" % (str(e)) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) try: dl.delete_file(FileName) except Exception as e: - print e + print(e) except Exception as e: self.logger.error("ExaleadPPTXSearch no doc's to download") - print helpers.color(" [*] No PPTX's to download from Exalead!\n", firewall=True) + print(helpers.color(" [*] No PPTX's to download from Exalead!\n", firewall=True)) if self.verbose: p = ' [*] Searching PPTX from Exalead Complete' - print helpers.color(p, status=True) + print(helpers.color(p, status=True)) def get_emails(self): Parse = Parser.Parser(self.Text) diff --git a/Modules/ExaleadSearch.py b/Modules/ExaleadSearch.py index a3a8e41..6b25f2c 100644 --- a/Modules/ExaleadSearch.py +++ b/Modules/ExaleadSearch.py @@ -32,7 +32,7 @@ def __init__(self, Domain, verbose=False): self.urlList = [] self.Text = "" except: - print helpers.color(" [*] Major Settings for Exalead are missing, EXITING!\n", warning=True) + print(helpers.color(" [*] Major Settings for Exalead are missing, EXITING!\n", warning=True)) def execute(self): self.search() @@ -44,19 +44,19 @@ def search(self): time.sleep(1) if self.verbose: p = ' [*] Exalead Search on page: ' + str(self.Counter) - print helpers.color(p, firewall=True) + print(helpers.color(p, firewall=True)) try: url = 'http://www.exalead.com/search/web/results/?q="%40' + self.Domain + '"&elements_per_page=' + \ str(self.Quanity) + '&start_index=' + str(self.Counter) except Exception as e: error = " [!] Major issue with Exalead Search: " + str(e) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) try: r = requests.get(url, headers=self.UserAgent) except Exception as e: error = " [!] Fail during Request to Exalead (Check Connection):" + str( e) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) try: RawHtml = r.content # sometimes url is broken but exalead search results contain @@ -67,7 +67,7 @@ def search(self): for h2 in soup.findAll('h4', class_='media-heading')] except Exception as e: error = " [!] Fail during parsing result: " + str(e) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) self.Counter += 30 # Now take all gathered URL's and gather the Raw content needed @@ -77,11 +77,11 @@ def search(self): self.Text += data.content except Exception as e: error = " [!] Connection Timed out on Exalead Search:" + str(e) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) if self.verbose: p = ' [*] Searching Exalead Complete' - print helpers.color(p, status=True) + print(helpers.color(p, status=True)) def get_emails(self): Parse = Parser.Parser(self.Text) diff --git a/Modules/ExaleadXLSXSearch.py b/Modules/ExaleadXLSXSearch.py index dda950c..2424ef1 100644 --- a/Modules/ExaleadXLSXSearch.py +++ b/Modules/ExaleadXLSXSearch.py @@ -16,8 +16,8 @@ # import for "'ascii' codec can't decode byte" error import sys -reload(sys) -sys.setdefaultencoding("utf-8") +import importlib +importlib.reload(sys) # import for "'ascii' codec can't decode byte" error @@ -40,7 +40,7 @@ def __init__(self, Domain, verbose=False): self.urlList = [] self.Text = "" except: - print helpers.color(" [*] Major Settings for ExaleadXLSXSearch are missing, EXITING!\n", warning=True) + print(helpers.color(" [*] Major Settings for ExaleadXLSXSearch are missing, EXITING!\n", warning=True)) def execute(self): self.search() @@ -54,20 +54,20 @@ def search(self): time.sleep(1) if self.verbose: p = ' [*] Exalead XLSX Search on page: ' + str(self.Counter) - print helpers.color(p, firewall=True) + print(helpers.color(p, firewall=True)) try: url = 'http://www.exalead.com/search/web/results/?q="%40' + self.Domain + \ '"+filetype:xlsx&elements_per_page=' + \ str(self.Quanity) + '&start_index=' + str(self.Counter) except Exception as e: error = " [!] Major issue with Exalead XLSX Search:" + str(e) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) try: r = requests.get(url, headers=self.UserAgent) except Exception as e: error = " [!] Fail during Request to Exalead (Check Connection):" + str( e) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) try: RawHtml = r.content # sometimes url is broken but exalead search results contain @@ -78,7 +78,7 @@ def search(self): for h4 in soup.findAll('h4', class_='media-heading')] except Exception as e: error = " [!] Fail during parsing result: " + str(e) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) self.Counter += 30 # now download the required files @@ -86,7 +86,7 @@ def search(self): for url in self.urlList: if self.verbose: p = ' [*] Exalead XLSX search downloading: ' + str(url) - print helpers.color(p, firewall=True) + print(helpers.color(p, firewall=True)) try: filetype = ".xlsx" FileName, FileDownload = dl.download_file(url, filetype) @@ -94,21 +94,21 @@ def search(self): if self.verbose: p = ' [*] Exalead XLSX file was downloaded: ' + \ str(url) - print helpers.color(p, firewall=True) + print(helpers.color(p, firewall=True)) self.Text += convert.convert_Xlsx_to_Csv(FileName) except Exception as e: error = " [!] Issue with opening Xlsx Files:%s\n" % (str(e)) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) try: dl.delete_file(FileName) except Exception as e: - print e + print(e) except: - print helpers.color("[*] No XLSX's to download from Exalead!\n", firewall=True) + print(helpers.color("[*] No XLSX's to download from Exalead!\n", firewall=True)) if self.verbose: p = ' [*] Searching XLSX from Exalead Complete' - print helpers.color(p, status=True) + print(helpers.color(p, status=True)) def get_emails(self): Parse = Parser.Parser(self.Text) diff --git a/Modules/FlickrSearch.py b/Modules/FlickrSearch.py index 25c1d19..22e163f 100644 --- a/Modules/FlickrSearch.py +++ b/Modules/FlickrSearch.py @@ -27,7 +27,7 @@ def __init__(self, domain, verbose=False): config.read('Common/SimplyEmail.ini') self.HostName = str(config['FlickrSearch']['Hostname']) except: - print helpers.color(" [*] Major Settings for FlickrSearch are missing, EXITING!\n", warning=True) + print(helpers.color(" [*] Major Settings for FlickrSearch are missing, EXITING!\n", warning=True)) def execute(self): self.process() @@ -41,11 +41,11 @@ def process(self): rawhtml = dl.requesturl(url, useragent=self.UserAgent) except Exception as e: error = " [!] Major issue with Flickr Search:" + str(e) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) self.results += rawhtml if self.verbose: p = ' [*] FlickrSearch has completed' - print helpers.color(p, firewall=True) + print(helpers.color(p, firewall=True)) # https://www.flickr.com/search/?text=%40microsoft.com # is an example of a complete request for "@microsoft.com" diff --git a/Modules/GitHubCodeSearch.py b/Modules/GitHubCodeSearch.py index 9593461..c335e07 100644 --- a/Modules/GitHubCodeSearch.py +++ b/Modules/GitHubCodeSearch.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- import configparser -from BeautifulSoup import BeautifulSoup +from bs4 import BeautifulSoup from Helpers import Download from Helpers import Parser from Helpers import helpers @@ -55,7 +55,7 @@ def __init__(self, domain, verbose=False): self.Depth = int(config['GitHubSearch']['PageDepth']) self.Counter = int(config['GitHubSearch']['QueryStart']) except: - print helpers.color(" [*] Major Settings for GitHubSearch are missing, EXITING!\n", warning=True) + print(helpers.color(" [*] Major Settings for GitHubSearch are missing, EXITING!\n", warning=True)) def execute(self): self.process() @@ -70,7 +70,7 @@ def process(self): while self.Counter <= self.Depth: if self.verbose: p = ' [*] GitHub Code Search on page: ' + str(self.Counter) - print helpers.color(p, firewall=True) + print(helpers.color(p, firewall=True)) try: url = "https://github.com/search?p=" + str(self.Counter) + "&q=" + \ str(self.domain) + "+&ref=searchresults&type=Code&utf8=✓" @@ -80,7 +80,7 @@ def process(self): except Exception as e: error = " [!] Major isself.Counter += 1sue with GitHub Search:" + \ str(e) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) RawHtml = r.content # Parse the results for our URLS) soup = BeautifulSoup(RawHtml) @@ -97,7 +97,7 @@ def process(self): self.Html += html except Exception as e: error = " [!] Connection Timed out on Github Search:" + str(e) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) def get_emails(self): Parse = Parser.Parser(self.Html) diff --git a/Modules/GitHubGistSearch.py b/Modules/GitHubGistSearch.py index dc4538a..3e6e5db 100644 --- a/Modules/GitHubGistSearch.py +++ b/Modules/GitHubGistSearch.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- import configparser -from BeautifulSoup import BeautifulSoup +from bs4 import BeautifulSoup from Helpers import Download from Helpers import Parser from Helpers import helpers @@ -32,7 +32,7 @@ def __init__(self, domain, verbose=False): self.Depth = int(config['GitHubGistSearch']['PageDepth']) self.Counter = int(config['GitHubGistSearch']['QueryStart']) except: - print helpers.color(" [*] Major Settings for GitHubGistSearch are missing, EXITING!\n", warning=True) + print(helpers.color(" [*] Major Settings for GitHubGistSearch are missing, EXITING!\n", warning=True)) def execute(self): self.process() @@ -48,7 +48,7 @@ def process(self): if self.verbose: p = ' [*] GitHub Gist Search Search on page: ' + \ str(self.Counter) - print helpers.color(p, firewall=True) + print(helpers.color(p, firewall=True)) try: # search?p=2&q=%40enron.com&ref=searchresults&utf8=✓ url = "https://gist.github.com/search?p=" + str(self.Counter) + "&q=%40" + \ @@ -59,7 +59,7 @@ def process(self): except Exception as e: error = " [!] Major issue with GitHubGist Search:" + \ str(e) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) RawHtml = r.content # Parse the results for our URLS) soup = BeautifulSoup(RawHtml) @@ -77,7 +77,7 @@ def process(self): except Exception as e: error = " [!] Connection Timed out on GithubGist Search:" + \ str(e) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) def get_emails(self): Parse = Parser.Parser(self.Html) diff --git a/Modules/GitHubUserSearch.py b/Modules/GitHubUserSearch.py index 03fbe03..1b09c24 100644 --- a/Modules/GitHubUserSearch.py +++ b/Modules/GitHubUserSearch.py @@ -29,7 +29,7 @@ def __init__(self, domain, verbose=False): self.Depth = int(config['GitHubUserSearch']['PageDepth']) self.Counter = int(config['GitHubUserSearch']['QueryStart']) except: - print helpers.color(" [*] Major Settings for GitHubUserSearch are missing, EXITING!\n", warning=True) + print(helpers.color(" [*] Major Settings for GitHubUserSearch are missing, EXITING!\n", warning=True)) def execute(self): self.search() @@ -42,20 +42,20 @@ def search(self): helpers.modsleep(5) if self.verbose: p = ' [*] GitHubUser Search on page: ' + str(self.Counter) - print helpers.color(p, firewall=True) + print(helpers.color(p, firewall=True)) try: url = 'https://github.com/search?p=' + str(self.Counter) + '&q=' + \ str(self.domain) + 'ref=searchresults&type=Users&utf8=' except Exception as e: error = " [!] Major issue with GitHubUser Search:" + str(e) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) try: r = dl.requesturl( url, useragent=self.UserAgent, raw=True, timeout=10) except Exception as e: error = " [!] Fail during Request to GitHubUser (Check Connection):" + \ str(e) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) results = r.content self.Html += results self.Counter += 1 diff --git a/Modules/GoogleCsvSearch.py b/Modules/GoogleCsvSearch.py index 46c09e2..4e5a4a2 100644 --- a/Modules/GoogleCsvSearch.py +++ b/Modules/GoogleCsvSearch.py @@ -5,13 +5,13 @@ # 2) main name called "ClassName" # 3) execute function (calls everything it needs) # 4) places the findings into a queue -import urlparse +import urllib.parse import configparser import time from Helpers import Download from Helpers import helpers from Helpers import Parser -from BeautifulSoup import BeautifulSoup +from bs4 import BeautifulSoup class ClassName(object): @@ -35,7 +35,7 @@ def __init__(self, Domain, verbose=False): self.urlList = [] self.Text = "" except: - print helpers.color(" [*] Major Settings for GoogleCsvSearch are missing, EXITING!\n", warning=True) + print(helpers.color(" [*] Major Settings for GoogleCsvSearch are missing, EXITING!\n", warning=True)) def execute(self): self.search() @@ -48,25 +48,25 @@ def search(self): time.sleep(1) if self.verbose: p = ' [*] Google CSV Search on page: ' + str(self.Counter) - print helpers.color(p, firewall=True) + print(helpers.color(p, firewall=True)) try: url = "https://www.google.com/search?q=site:" + \ self.Domain + "+filetype:csv&start=" + str(self.Counter) except Exception as e: error = " [!] Major issue with Google Search:" + str(e) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) try: RawHtml = dl.requesturl(url, useragent=self.UserAgent) except Exception as e: error = " [!] Fail during Request to Google (Check Connection):" + \ str(e) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) # check for captcha try: # Url = r.url dl.GoogleCaptchaDetection(RawHtml) except Exception as e: - print e + print(e) soup = BeautifulSoup(RawHtml) # I use this to parse my results, for URLS to follow for a in soup.findAll('a'): @@ -74,8 +74,8 @@ def search(self): # https://stackoverflow.com/questions/21934004/not-getting-proper-links- # from-google-search-results-using-mechanize-and-beautifu/22155412#22155412? # newreg=01f0ed80771f4dfaa269b15268b3f9a9 - l = urlparse.parse_qs( - urlparse.urlparse(a['href']).query)['q'][0] + l = urllib.parse.parse_qs( + urllib.parse.urlparse(a['href']).query)['q'][0] if l.startswith('http') or l.startswith('www'): if "webcache.googleusercontent.com" not in l: self.urlList.append(l) @@ -88,7 +88,7 @@ def search(self): for url in self.urlList: if self.verbose: p = ' [*] Google CSV search downloading: ' + str(url) - print helpers.color(p, firewall=True) + print(helpers.color(p, firewall=True)) try: filetype = ".csv" FileName, FileDownload = dl.download_file2(url, filetype) @@ -96,18 +96,18 @@ def search(self): if self.verbose: p = '[*] Google CSV file was downloaded: ' + \ str(url) - print helpers.color(p, firewall=True) + print(helpers.color(p, firewall=True)) with open(FileName) as f: self.Text += f.read() # print self.Text except Exception as e: - print helpers.color(" [!] Issue with opening CSV Files\n", firewall=True) + print(helpers.color(" [!] Issue with opening CSV Files\n", firewall=True)) try: dl.delete_file(FileName) except Exception as e: - print e + print(e) except: - print helpers.color(" [*] No CSV to download from Google!\n", firewall=True) + print(helpers.color(" [*] No CSV to download from Google!\n", firewall=True)) def get_emails(self): Parse = Parser.Parser(self.Text) diff --git a/Modules/GoogleDocSearch.py b/Modules/GoogleDocSearch.py index 3595a53..addded2 100644 --- a/Modules/GoogleDocSearch.py +++ b/Modules/GoogleDocSearch.py @@ -6,14 +6,14 @@ # 3) execute function (calls everything it needs) # 4) places the findings into a queue import requests -import urlparse +import urllib.parse import configparser import time from Helpers import Download from Helpers import Converter from Helpers import helpers from Helpers import Parser -from BeautifulSoup import BeautifulSoup +from bs4 import BeautifulSoup class ClassName(object): @@ -37,7 +37,7 @@ def __init__(self, Domain, verbose=False): self.urlList = [] self.Text = "" except: - print helpers.color("[*] Major Settings for GoogleDocSearch are missing, EXITING!\n", warning=True) + print(helpers.color("[*] Major Settings for GoogleDocSearch are missing, EXITING!\n", warning=True)) def execute(self): self.search() @@ -51,26 +51,26 @@ def search(self): time.sleep(1) if self.verbose: p = ' [*] Google DOC Search on page: ' + str(self.Counter) - print helpers.color(p, firewall=True) + print(helpers.color(p, firewall=True)) try: urly = "https://www.google.com/search?q=site:" + \ self.Domain + "+filetype:doc&start=" + str(self.Counter) except Exception as e: error = " [!] Major issue with Google Search:" + str(e) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) try: r = requests.get(urly) except Exception as e: error = " [!] Fail during Request to Google (Check Connection):" + \ str(e) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) RawHtml = r.content # check for captcha try: # Url = r.url dl.GoogleCaptchaDetection(RawHtml) except Exception as e: - print e + print(e) soup = BeautifulSoup(RawHtml) # I use this to parse my results, for URLS to follow for a in soup.findAll('a'): @@ -78,8 +78,8 @@ def search(self): # https://stackoverflow.com/questions/21934004/not-getting-proper-links- # from-google-search-results-using-mechanize-and-beautifu/22155412#22155412? # newreg=01f0ed80771f4dfaa269b15268b3f9a9 - l = urlparse.parse_qs( - urlparse.urlparse(a['href']).query)['q'][0] + l = urllib.parse.parse_qs( + urllib.parse.urlparse(a['href']).query)['q'][0] if l.startswith('http') or l.startswith('www'): if "webcache.googleusercontent.com" not in l: self.urlList.append(l) @@ -92,7 +92,7 @@ def search(self): for url in self.urlList: if self.verbose: p = ' [*] Google DOC search downloading: ' + str(url) - print helpers.color(p, firewall=True) + print(helpers.color(p, firewall=True)) try: filetype = ".doc" FileName, FileDownload = dl.download_file(url, filetype) @@ -100,17 +100,17 @@ def search(self): if self.verbose: p = ' [*] Google DOC file was downloaded: ' + \ str(url) - print helpers.color(p, firewall=True) + print(helpers.color(p, firewall=True)) self.Text += convert.convert_doc_to_txt(FileName) # print self.Text except Exception as e: - print helpers.color(" [!] Issue with opening Doc Files\n", firewall=True) + print(helpers.color(" [!] Issue with opening Doc Files\n", firewall=True)) try: dl.delete_file(FileName) except Exception as e: - print e + print(e) except: - print helpers.color(" [*] No DOC's to download from Google!\n", firewall=True) + print(helpers.color(" [*] No DOC's to download from Google!\n", firewall=True)) def get_emails(self): Parse = Parser.Parser(self.Text) diff --git a/Modules/GoogleDocxSearch.py b/Modules/GoogleDocxSearch.py index d666735..37de9c5 100644 --- a/Modules/GoogleDocxSearch.py +++ b/Modules/GoogleDocxSearch.py @@ -7,14 +7,14 @@ # 3) execute function (calls everything it needs) # 4) places the findings into a queue import requests -import urlparse +import urllib.parse import configparser import time from Helpers import Converter from Helpers import helpers from Helpers import Parser from Helpers import Download -from BeautifulSoup import BeautifulSoup +from bs4 import BeautifulSoup class ClassName(object): @@ -38,7 +38,7 @@ def __init__(self, Domain, verbose=False): self.urlList = [] self.Text = "" except: - print helpers.color(" [*] Major Settings for GoogleDocxSearch are missing, EXITING!\n", warning=True) + print(helpers.color(" [*] Major Settings for GoogleDocxSearch are missing, EXITING!\n", warning=True)) def execute(self): self.search() @@ -52,19 +52,19 @@ def search(self): time.sleep(1) if self.verbose: p = ' [*] Google DOCX Search on page: ' + str(self.Counter) - print helpers.color(p, firewall=True) + print(helpers.color(p, firewall=True)) try: urly = "https://www.google.com/search?q=site:" + \ self.Domain + "+filetype:docx&start=" + str(self.Counter) except Exception as e: error = "[!] Major issue with Google Search:" + str(e) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) try: r = requests.get(urly) except Exception as e: error = " [!] Fail during Request to Google (Check Connection):" + \ str(e) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) RawHtml = r.content soup = BeautifulSoup(RawHtml) # I use this to parse my results, for URLS to follow @@ -73,8 +73,8 @@ def search(self): # https://stackoverflow.com/questions/21934004/not-getting-proper-links- # from-google-search-results-using-mechanize-and-beautifu/22155412#22155412? # newreg=01f0ed80771f4dfaa269b15268b3f9a9 - l = urlparse.parse_qs( - urlparse.urlparse(a['href']).query)['q'][0] + l = urllib.parse.parse_qs( + urllib.parse.urlparse(a['href']).query)['q'][0] if l.startswith('http') or l.startswith('www'): if "webcache.googleusercontent.com" not in l: self.urlList.append(l) @@ -87,7 +87,7 @@ def search(self): for url in self.urlList: if self.verbose: p = ' [*] Google DOCX search downloading: ' + str(url) - print helpers.color(p, firewall=True) + print(helpers.color(p, firewall=True)) try: filetype = ".docx" FileName, FileDownload = dl.download_file(url, filetype) @@ -95,17 +95,17 @@ def search(self): if self.verbose: p = ' [*] Google DOCX file was downloaded: ' + \ str(url) - print helpers.color(p, firewall=True) + print(helpers.color(p, firewall=True)) self.Text += convert.convert_docx_to_txt(FileName) # print self.Text except Exception as e: - print helpers.color(" [!] Issue with Converting Docx Files\n", firewall=True) + print(helpers.color(" [!] Issue with Converting Docx Files\n", firewall=True)) try: dl.delete_file(FileName) except Exception as e: - print e + print(e) except: - print helpers.color(" [*] No DOCX's to download from Google!\n", firewall=True) + print(helpers.color(" [*] No DOCX's to download from Google!\n", firewall=True)) def get_emails(self): Parse = Parser.Parser(self.Text) diff --git a/Modules/GooglePDFSearch.py b/Modules/GooglePDFSearch.py index 56a9eff..0beebc7 100644 --- a/Modules/GooglePDFSearch.py +++ b/Modules/GooglePDFSearch.py @@ -6,14 +6,14 @@ # 3) execute function (calls everything it needs) # 4) places the findings into a queue import requests -import urlparse +import urllib.parse import configparser import time from Helpers import helpers from Helpers import Parser from Helpers import Download from Helpers import Converter -from BeautifulSoup import BeautifulSoup +from bs4 import BeautifulSoup class ClassName(object): @@ -37,7 +37,7 @@ def __init__(self, Domain, verbose=False): self.urlList = [] self.Text = "" except: - print helpers.color(" [*] Major Settings for GooglePDFSearch are missing, EXITING!\n", warning=True) + print(helpers.color(" [*] Major Settings for GooglePDFSearch are missing, EXITING!\n", warning=True)) def execute(self): self.search() @@ -52,19 +52,19 @@ def search(self): time.sleep(1) if self.verbose: p = ' [*] Google PDF Search on page: ' + str(self.Counter) - print helpers.color(p, firewall=True) + print(helpers.color(p, firewall=True)) try: urly = "https://www.google.com/search?q=" + \ self.Domain + "+filetype:pdf&start=" + str(self.Counter) except Exception as e: error = " [!] Major issue with Google Search:" + str(e) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) try: r = requests.get(urly) except Exception as e: error = " [!] Fail during Request to Google (Check Connection):" + \ str(e) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) RawHtml = r.content # get redirect URL # Url = r.url @@ -75,8 +75,8 @@ def search(self): # https://stackoverflow.com/questions/21934004/not-getting-proper-links- # from-google-search-results-using-mechanize-and-beautifu/22155412#22155412? # newreg=01f0ed80771f4dfaa269b15268b3f9a9 - l = urlparse.parse_qs( - urlparse.urlparse(a['href']).query)['q'][0] + l = urllib.parse.parse_qs( + urllib.parse.urlparse(a['href']).query)['q'][0] if l.startswith('http') or l.startswith('www'): if "webcache.googleusercontent.com" not in l: self.urlList.append(l) @@ -89,7 +89,7 @@ def search(self): for url in self.urlList: if self.verbose: p = ' [*] Google PDF search downloading: ' + str(url) - print helpers.color(p, firewall=True) + print(helpers.color(p, firewall=True)) try: filetype = ".pdf" # use new helper class to download file @@ -99,17 +99,17 @@ def search(self): if self.verbose: p = ' [*] Google PDF file was downloaded: ' + \ str(url) - print helpers.color(p, firewall=True) + print(helpers.color(p, firewall=True)) self.Text += convert.convert_pdf_to_txt(FileName) except Exception as e: - print e + print(e) try: # now remove any files left behind dl.delete_file(FileName) except Exception as e: - print e + print(e) except: - print helpers.color(" [*] No PDF's to download from Google!\n", firewall=True) + print(helpers.color(" [*] No PDF's to download from Google!\n", firewall=True)) def get_emails(self): Parse = Parser.Parser(self.Text) diff --git a/Modules/GooglePPTXSearch.py b/Modules/GooglePPTXSearch.py index b0befa4..1aef02d 100644 --- a/Modules/GooglePPTXSearch.py +++ b/Modules/GooglePPTXSearch.py @@ -5,14 +5,14 @@ # 2) main name called "ClassName" # 3) execute function (calls everthing it neeeds) # 4) places the findings into a queue -import urlparse +import urllib.parse import configparser import time from Helpers import Converter from Helpers import Download from Helpers import helpers from Helpers import Parser -from BeautifulSoup import BeautifulSoup +from bs4 import BeautifulSoup class ClassName(object): @@ -36,7 +36,7 @@ def __init__(self, Domain, verbose=False): self.urlList = [] self.Text = "" except: - print helpers.color(" [*] Major Settings for GooglePptxSearch are missing, EXITING!\n", warning=True) + print(helpers.color(" [*] Major Settings for GooglePptxSearch are missing, EXITING!\n", warning=True)) def execute(self): self.search() @@ -50,25 +50,25 @@ def search(self): time.sleep(1) if self.verbose: p = ' [*] Google PPTX Search on page: ' + str(self.Counter) - print helpers.color(p, firewall=True) + print(helpers.color(p, firewall=True)) try: url = "https://www.google.com/search?q=" + \ self.Domain + "+filetype:pptx&start=" + str(self.Counter) except Exception as e: error = " [!] Major issue with Google Search:" + str(e) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) try: RawHtml = dl.requesturl(url, useragent=self.UserAgent) except Exception as e: error = " [!] Fail during Request to Google (Check Connection):" + \ str(e) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) # check for captcha try: # Url = r.url dl.GoogleCaptchaDetection(RawHtml) except Exception as e: - print e + print(e) soup = BeautifulSoup(RawHtml) # I use this to parse my results, for URLS to follow for a in soup.findAll('a'): @@ -76,12 +76,12 @@ def search(self): # https://stackoverflow.com/questions/21934004/not-getting-proper-links- # from-google-search-results-using-mechanize-and-beautifu/22155412#22155412? # newreg=01f0ed80771f4dfaa269b15268b3f9a9 - l = urlparse.parse_qs(urlparse.urlparse(a['href']).query)['q'][0] + l = urllib.parse.parse_qs(urllib.parse.urlparse(a['href']).query)['q'][0] if l.startswith('http') or l.startswith('www') or l.startswith('https'): if "webcache.googleusercontent.com" not in l: self.urlList.append(l) # for some reason PPTX seems to be cached data: - l = urlparse.parse_qs(urlparse.urlparse(a['href']).query)['q'][0] + l = urllib.parse.parse_qs(urllib.parse.urlparse(a['href']).query)['q'][0] l = l.split(':', 2) if "webcache.googleusercontent.com" not in l[2]: self.urlList.append(l[2]) @@ -94,7 +94,7 @@ def search(self): for url in self.urlList: if self.verbose: p = ' [*] Google PPTX search downloading: ' + str(url) - print helpers.color(p, firewall=True) + print(helpers.color(p, firewall=True)) try: filetype = ".pptx" FileName, FileDownload = dl.download_file2(url, filetype) @@ -102,7 +102,7 @@ def search(self): if self.verbose: p = ' [*] Google PPTX file was downloaded: ' + \ str(url) - print helpers.color(p, firewall=True) + print(helpers.color(p, firewall=True)) ft = helpers.filetype(FileName).lower() if 'powerpoint' in ft: # self.Text += convert.convert_zip_to_text(FileName) @@ -111,14 +111,14 @@ def search(self): self.logger.warning('Downloaded file is not a PPTX: ' + ft) # print self.Text except Exception as e: - print helpers.color(" [!] Issue with opening PPTX Files\n", firewall=True) + print(helpers.color(" [!] Issue with opening PPTX Files\n", firewall=True)) try: if FileDownload: dl.delete_file(FileName) except Exception as e: self.logger.warning('Issue deleting file: ' + str(e)) except: - print helpers.color(" [*] No CSV to download from Google!\n", firewall=True) + print(helpers.color(" [*] No CSV to download from Google!\n", firewall=True)) def get_emails(self): Parse = Parser.Parser(self.Text) diff --git a/Modules/GoogleSearch.py b/Modules/GoogleSearch.py index 47d0ae2..23eb9b6 100644 --- a/Modules/GoogleSearch.py +++ b/Modules/GoogleSearch.py @@ -32,7 +32,7 @@ def __init__(self, Domain, verbose=False): self.verbose = verbose self.Html = "" except: - print helpers.color(" [*] Major Settings for GoogleSearch are missing, EXITING!\n", warning=True) + print(helpers.color(" [*] Major Settings for GoogleSearch are missing, EXITING!\n", warning=True)) def execute(self): self.search() @@ -45,25 +45,25 @@ def search(self): time.sleep(1) if self.verbose: p = ' [*] Google Search on page: ' + str(self.Counter) - print helpers.color(p, firewall=True) + print(helpers.color(p, firewall=True)) try: url = "http://www.google.com/search?num=" + str(self.Quanity) + "&start=" + \ str(self.Counter) + "&hl=en&meta=&q=%40\"" + \ self.Domain + "\"" except Exception as e: error = " [!] Major issue with Google Search:" + str(e) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) try: results = dl.requesturl(url, useragent=self.UserAgent) except Exception as e: error = " [!] Fail during Request to Google (Check Connection):" + \ str(e) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) try: # Url = r.url dl.GoogleCaptchaDetection(results) except Exception as e: - print e + print(e) self.Html += results self.Counter += 100 helpers.modsleep(self.Sleep, jitter=self.Jitter) diff --git a/Modules/GoogleXLSXSearch.py b/Modules/GoogleXLSXSearch.py index 8bb89b3..1b3a90e 100644 --- a/Modules/GoogleXLSXSearch.py +++ b/Modules/GoogleXLSXSearch.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3.7 # Class will have the following properties: # 1) name / description @@ -6,7 +6,7 @@ # 3) execute function (calls everything it needs) # 4) places the findings into a queue import requests -import urlparse +import urllib.parse import configparser import time import logging @@ -14,7 +14,7 @@ from Helpers import helpers from Helpers import Parser from Helpers import Converter -from BeautifulSoup import BeautifulSoup +from bs4 import BeautifulSoup class ClassName(object): @@ -41,7 +41,7 @@ def __init__(self, Domain, verbose=False): except Exception as e: self.logger.critical( 'GoogleXlsxSearch module failed to load: ' + str(e)) - print helpers.color(" [*] Major Settings for GoogleXlsxSearch are missing, EXITING!\n", warning=True) + print(helpers.color(" [*] Major Settings for GoogleXlsxSearch are missing, EXITING!\n", warning=True)) def execute(self): self.logger.debug("GoogleXlsxSearch Started") @@ -57,7 +57,7 @@ def search(self): p = ' [*] Google XLSX Search on page: ' + str(self.Counter) self.logger.info( "Google XLSX Search on page: " + str(self.Counter)) - print helpers.color(p, firewall=True) + print(helpers.color(p, firewall=True)) try: urly = "https://www.google.com/search?q=site:" + \ self.Domain + "+filetype:xlsx&start=" + str(self.Counter) @@ -65,7 +65,7 @@ def search(self): error = " [!] Major issue with Google XLSX Search:" + str(e) self.logger.error( "GoogleXlsxSearch failed to build url: " + str(e)) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) try: r = requests.get(urly) except Exception as e: @@ -73,7 +73,7 @@ def search(self): str(e) self.logger.error( "GoogleXlsxSearch failed to request url (Check Connection): " + str(e)) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) RawHtml = r.content soup = BeautifulSoup(RawHtml) # I use this to parse my results, for URLS to follow @@ -82,8 +82,8 @@ def search(self): # https://stackoverflow.com/questions/21934004/not-getting-proper-links- # from-google-search-results-using-mechanize-and-beautifu/22155412#22155412? # newreg=01f0ed80771f4dfaa269b15268b3f9a9 - l = urlparse.parse_qs( - urlparse.urlparse(a['href']).query)['q'][0] + l = urllib.parse.parse_qs( + urllib.parse.urlparse(a['href']).query)['q'][0] if l.startswith('http') or l.startswith('www'): if "webcache.googleusercontent.com" not in l: self.urlList.append(l) @@ -100,7 +100,7 @@ def search(self): p = ' [*] Google XLSX search downloading: ' + str(url) self.logger.info( "Google XLSX search downloading: " + str(url)) - print helpers.color(p, firewall=True) + print(helpers.color(p, firewall=True)) try: filetype = ".xlsx" dl = Download.Download(self.verbose) @@ -111,11 +111,11 @@ def search(self): str(url) self.logger.info( "Google XLSX file was downloaded: " + str(url)) - print helpers.color(p, firewall=True) + print(helpers.color(p, firewall=True)) self.Text += convert.convert_Xlsx_to_Csv(FileName) # print self.Text except Exception as e: - print helpers.color(" [!] Issue with opening Xlsx Files\n", firewall=True) + print(helpers.color(" [!] Issue with opening Xlsx Files\n", firewall=True)) self.logger.error("Google XLSX had issue opening file") try: dl.delete_file(FileName) @@ -123,7 +123,7 @@ def search(self): self.logger.error( "Google XLSX failed to delete file: " + str(e)) except Exception as e: - print helpers.color(" [*] No XLSX's to download from google!\n", firewall=True) + print(helpers.color(" [*] No XLSX's to download from google!\n", firewall=True)) self.logger.error("No XLSX's to download from google! " + str(e)) def get_emails(self): diff --git a/Modules/HtmlScrape.py b/Modules/HtmlScrape.py index 619360f..e217ffe 100644 --- a/Modules/HtmlScrape.py +++ b/Modules/HtmlScrape.py @@ -42,7 +42,7 @@ def __init__(self, domain, verbose=False): self.retVal = 0 self.maxRetries = "--tries=5" except: - print helpers.color(" [*] Major Settings for HTML are missing, EXITING!\n", warning=True) + print(helpers.color(" [*] Major Settings for HTML are missing, EXITING!\n", warning=True)) def execute(self): try: @@ -50,7 +50,7 @@ def execute(self): FinalOutput, HtmlResults, JsonResults = self.get_emails() return FinalOutput, HtmlResults, JsonResults except Exception as e: - print e + print(e) def search(self): # setup domain so it will follow redirects @@ -62,20 +62,20 @@ def search(self): # "--convert-links" if self.verbose: p = ' [*] HTML scrape underway [This can take a bit!]' - print helpers.color(p, firewall=True) + print(helpers.color(p, firewall=True)) self.retVal = subprocess.call(["wget", "-q", "-e robots=off", "--header=\"Accept: text/html\"", self.useragent, "--recursive", self.depth, self.wait, self.limit_rate, self.save, self.timeout, "--page-requisites", "-R gif,jpg,pdf,png,css,zip,mov,wmv,ppt,doc,docx,xls,exe,bin,pptx,avi,swf,vbs,xlsx,kfp,pub", "--no-clobber", self.maxRetries,"--domains", self.domain, TempDomain]) if self.retVal > 0: - print helpers.color(" [*] Wget returned error, likely 403 (attempting again): " + str(self.retVal), warning=True) + print(helpers.color(" [*] Wget returned error, likely 403 (attempting again): " + str(self.retVal), warning=True)) self.retVal = subprocess.call(["wget", "-q", "-e robots=off", "--header=\"Accept: text/html\"", self.useragent, "--recursive", self.depth, self.wait, self.limit_rate, self.save, self.timeout, "--page-requisites", "-R gif,jpg,pdf,png,css,zip,mov,wmv,ppt,doc,docx,xls,exe,bin,pptx,avi,swf,vbs,xlsx,kfp,pub", "--no-clobber", self.maxRetries,"--domains", self.domain, TempDomain]) except Exception as e: - print e - print " [!] ERROR during Wget Request" + print(e) + print(" [!] ERROR during Wget Request") def get_emails(self): # Direct location of new dir created during wget @@ -110,7 +110,7 @@ def get_emails(self): for item in output: FinalOutput.append(item.rstrip("\n")) except Exception as e: - print e + print(e) if self.remove == "yes" or self.remove == "Yes": if not self.retVal > 0: shutil.rmtree(directory) diff --git a/Modules/Hunter.py b/Modules/Hunter.py index baa9b0e..11e0b0c 100644 --- a/Modules/Hunter.py +++ b/Modules/Hunter.py @@ -46,7 +46,7 @@ def __init__(self, domain, verbose=False): raise Exception("Email Type setting invalid") except Exception as e: self.logger.critical("Hunter module failed to __init__: " + str(e)) - print helpers.color(" [*] Error in Hunter settings: " + str(e) + "\n", warning=True) + print(helpers.color(" [*] Error in Hunter settings: " + str(e) + "\n", warning=True)) def execute(self): self.logger.debug("Hunter module started") @@ -69,7 +69,7 @@ def process(self): overQuotaLimit = False except Exception as e: error = " [!] Hunter API error: " + str(accountInfo['errors'][0]['details']) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) try: # Hunter's API only allows 100 emails per request, so we check the number of emails Hunter has # on our specified domain, and if it's over 100 we need to make multiple requests to get all of the emails @@ -81,19 +81,19 @@ def process(self): offset = 0 except Exception as e: error = "[!] Major issue with Hunter Search: " + str(e) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) requestsMade = 0 # Main loop to keep requesting the Hunter API until we get all of the emails they have while emailsLeft > 0: try: if overQuotaLimit or requestsMade + quotaUsed >= self.QuotaLimit: if self.verbose: - print helpers.color(" [*] You are over your set Quota Limit: " + \ - str(quotaUsed) + "/" + str(self.QuotaLimit) + " stopping search", firewall=True) + print(helpers.color(" [*] You are over your set Quota Limit: " + \ + str(quotaUsed) + "/" + str(self.QuotaLimit) + " stopping search", firewall=True)) break elif self.RequestLimit != 0 and requestsMade >= self.RequestLimit: if self.verbose: - print helpers.color(" [*] Stopping search due to user set Request Limit", firewall=True) + print(helpers.color(" [*] Stopping search due to user set Request Limit", firewall=True)) break # This returns a JSON object @@ -104,7 +104,7 @@ def process(self): emailCount = int(results['meta']['results']) except Exception as e: error = " [!] Hunter API error: " + str(results['errors'][0]['details']) + " QUITTING!" - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) break try: # Make sure we don't exceed the index for the 'emails' array in the 'results' Json object @@ -129,18 +129,18 @@ def process(self): offset += emailsLeft except Exception as e: error = " [!] Major issue with search parsing: " + str(e) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) break if self.verbose: # Print the avalible requests user has if verbose - print helpers.color(' [*] Hunter has completed JSON request', firewall=True) + print(helpers.color(' [*] Hunter has completed JSON request', firewall=True)) requestsUsed = requestsMade + quotaUsed if quota - requestsUsed <= 0: - print helpers.color(" [*] You have no Hunter requests left." \ - + "They will refill in about a month", firewall=True) + print(helpers.color(" [*] You have no Hunter requests left." \ + + "They will refill in about a month", firewall=True)) else: - print helpers.color(" [*] You have " + str(requestsUsed) \ - + "/" + str(quota) + " Hunter requests left", firewall=True) + print(helpers.color(" [*] You have " + str(requestsUsed) \ + + "/" + str(quota) + " Hunter requests left", firewall=True)) def get_emails(self): # Make sure you remove any newlines diff --git a/Modules/PasteBinSearch.py b/Modules/PasteBinSearch.py index 0441aa0..5d04904 100644 --- a/Modules/PasteBinSearch.py +++ b/Modules/PasteBinSearch.py @@ -37,7 +37,7 @@ def __init__(self, Domain, verbose=False): except Exception as e: self.logger.critical( 'PasteBinSearch module failed to __init__: ' + str(e)) - print helpers.color("[*] Major Settings for PasteBinSearch are missing, EXITING!\n", warning=True) + print(helpers.color("[*] Major Settings for PasteBinSearch are missing, EXITING!\n", warning=True)) def execute(self): self.logger.debug("PasteBinSearch started") @@ -54,7 +54,7 @@ def search(self): str(self.Counter) self.logger.info( "GooglePasteBinSearch on page: " + str(self.Counter)) - print helpers.color(p, firewall=True) + print(helpers.color(p, firewall=True)) try: url = "http://www.google.com/search?num=" + str(self.Quanity) + "&start=" + str(self.Counter) + \ '&hl=en&meta=&q=site:pastebin.com+"%40' + \ @@ -64,7 +64,7 @@ def search(self): str(e) self.logger.error( "GooglePasteBinSearch could not create URL: " + str(e)) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) try: r = requests.get(url, headers=self.UserAgent) @@ -73,7 +73,7 @@ def search(self): e) self.logger.error( "Fail during Request to PasteBin (Check Connection): " + str(e)) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) try: RawHtml = r.content try: @@ -90,7 +90,7 @@ def search(self): error = " [!] Fail during parsing result: " + str(e) self.logger.error( "PasteBinSearch Fail during parsing result: " + str(e)) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) self.Counter += 100 # Now take all gathered URL's and gather the Raw content needed for Url in self.urlList: @@ -102,12 +102,12 @@ def search(self): error = "[!] Connection Timed out on PasteBin Search:" + str(e) self.logger.error( "Connection Timed out on PasteBin raw download: " + str(e)) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) if self.verbose: p = ' [*] Searching PasteBin Complete' self.logger.info("Searching PasteBin Complete") - print helpers.color(p, firewall=True) + print(helpers.color(p, firewall=True)) def get_emails(self): Parse = Parser.Parser(self.Text) diff --git a/Modules/RedditPostSearch.py b/Modules/RedditPostSearch.py index ff6979b..f484f85 100644 --- a/Modules/RedditPostSearch.py +++ b/Modules/RedditPostSearch.py @@ -33,7 +33,7 @@ def __init__(self, Domain, verbose=False): except Exception as e: self.logger.critical( 'RedditPostSearch module failed to load: ' + str(e)) - print helpers.color(" [*] Major Settings for RedditPostSearch are missing, EXITING!\n", warning=True) + print(helpers.color(" [*] Major Settings for RedditPostSearch are missing, EXITING!\n", warning=True)) def execute(self): self.logger.debug("RedditPostSearch started") @@ -49,7 +49,7 @@ def search(self): p = ' [*] RedditPost Search on result: ' + str(self.Counter) self.logger.debug( "RedditPost Search on result: " + str(self.Counter)) - print helpers.color(p, firewall=True) + print(helpers.color(p, firewall=True)) try: url = "https://www.reddit.com/search?q=%40" + str(self.Domain) + \ "&restrict_sr=&sort=relevance&t=all&count=" + str(self.Counter) + \ @@ -58,7 +58,7 @@ def search(self): error = " [!] Major issue with RedditPost search:" + str(e) self.logger.error( "Major issue with RedditPostSearch: " + str(e)) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) try: RawHtml = dl.requesturl(url, useragent=self.UserAgent) except Exception as e: @@ -66,7 +66,7 @@ def search(self): str(e) self.logger.error( "Fail during Request to Reddit (Check Connection): " + str(e)) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) self.Html += RawHtml # reddit seems to increment by 25 in cases self.Counter += 25 diff --git a/Modules/SearchPGP.py b/Modules/SearchPGP.py index a318f08..902c23a 100644 --- a/Modules/SearchPGP.py +++ b/Modules/SearchPGP.py @@ -31,7 +31,7 @@ def __init__(self, domain, verbose=False): except Exception as e: self.logger.critical( 'SearchPGP module failed to __init__: ' + str(e)) - print helpers.color("[*] Major Settings for SearchPGP are missing, EXITING!\n", warning=True) + print(helpers.color("[*] Major Settings for SearchPGP are missing, EXITING!\n", warning=True)) def execute(self): self.logger.debug("SearchPGP started") @@ -48,11 +48,11 @@ def process(self): except Exception as e: error = " [!] Major issue with PGP Search:" + str(e) self.logger.error("Major issue with PGP search: " + str(e)) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) if self.verbose: p = ' [*] Searching PGP Complete' self.logger.info("SearchPGP Completed search") - print helpers.color(p, firewall=True) + print(helpers.color(p, firewall=True)) self.results = r.content def get_emails(self): diff --git a/Modules/WhoisAPISearch.py b/Modules/WhoisAPISearch.py index 03ec703..2181544 100644 --- a/Modules/WhoisAPISearch.py +++ b/Modules/WhoisAPISearch.py @@ -31,7 +31,7 @@ def __init__(self, domain, verbose=False): except Exception as e: self.logger.critical( 'WhoisAPISearch module failed to __init__: ' + str(e)) - print helpers.color(" [*] Major Settings for Search Whois are missing, EXITING!\n", warning=True) + print(helpers.color(" [*] Major Settings for Search Whois are missing, EXITING!\n", warning=True)) def execute(self): self.logger.debug("WhoisAPISearch Started") @@ -44,7 +44,7 @@ def process(self): if self.verbose: p = ' [*] Requesting API on HackerTarget whois' self.logger.info("Requesting API on HackerTarget whois") - print helpers.color(p, firewall=True) + print(helpers.color(p, firewall=True)) url = "http://api.hackertarget.com/whois/?q=" + \ self.domain r = requests.get(url) @@ -52,7 +52,7 @@ def process(self): error = " [!] Major issue with Whois Search:" + str(e) self.logger.error( "Failed to request URL (Check Connection): " + str(e)) - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) self.results = r.content def get_emails(self): diff --git a/Modules/Whoisolgy.py b/Modules/Whoisolgy.py index 25efae9..0a332d1 100644 --- a/Modules/Whoisolgy.py +++ b/Modules/Whoisolgy.py @@ -32,7 +32,7 @@ def __init__(self, domain, verbose=False): except Exception as e: self.logger.critical( 'Whoisology module failed to __init__: ' + str(e)) - print helpers.color("[*] Major Settings for Search Whoisology are missing, EXITING!\n", warning=True) + print(helpers.color("[*] Major Settings for Search Whoisology are missing, EXITING!\n", warning=True)) def execute(self): self.logger.debug("Whoisology Started") @@ -45,14 +45,14 @@ def process(self): if self.verbose: self.logger.info("Whoisology request started") p = ' [*] Whoisology request started' - print helpers.color(p, firewall=True) + print(helpers.color(p, firewall=True)) url = "https://whoisology.com/archive_11/" + \ self.domain r = requests.get(url) except Exception as e: error = "[!] Major issue with Whoisology Search:" + str(e) self.logger.error("Whoisology can download source (Check Connection)") - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) self.results = r.content def get_emails(self): diff --git a/Modules/YahooSearch.py b/Modules/YahooSearch.py index 6b3b8f7..a82e1a2 100644 --- a/Modules/YahooSearch.py +++ b/Modules/YahooSearch.py @@ -41,7 +41,7 @@ def __init__(self, Domain, verbose=False): except Exception as e: self.logger.critical( 'YahooSearch module failed to load: ' + str(e)) - print helpers.color(" [*] Major Settings for YahooSearch are missing, EXITING!\n", warning=True) + print(helpers.color(" [*] Major Settings for YahooSearch are missing, EXITING!\n", warning=True)) def execute(self): self.logger.debug("AskSearch Started") @@ -55,14 +55,14 @@ def search(self): if self.verbose: p = ' [*] Yahoo Search on page: ' + str(self.Counter) self.logger.info("YahooSearch on page:" + str(self.Counter)) - print helpers.color(p, firewall=True) + print(helpers.color(p, firewall=True)) try: url = 'https://search.yahoo.com/search?p=' + str(self.Domain) + \ '&b=' + str(self.Counter) + "&pz=" + str(self.Quanity) except Exception as e: error = " [!] Major issue with Yahoo Search:" + str(e) self.logger.error("Yahoo Search can not create URL:") - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) try: self.logger.debug("YahooSearch starting request on: " + str(url)) r = requests.get(url, headers=self.UserAgent) @@ -70,7 +70,7 @@ def search(self): error = " [!] Fail during Request to Yahoo (Check Connection):" + \ str(e) self.logger.error("YahooSearch failed to request (Check Connection)") - print helpers.color(error, warning=True) + print(helpers.color(error, warning=True)) results = r.content self.Html += results self.Counter += 100 diff --git a/README.md b/README.md index 82c0214..ee320c5 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,6 @@ [![Build Status](https://travis-ci.org/SimplySecurity/SimplyEmail.svg?branch=master)](https://travis-ci.org/SimplySecurity/SimplyEmail) [![Coverage Status](https://coveralls.io/repos/github/killswitch-GUI/SimplyEmail/badge.svg?branch=Version-1.4)](https://coveralls.io/github/killswitch-GUI/SimplyEmail?branch=Version-1.4) -![alt text](https://simplyemail.org/img/se-logo-2.png "Logo Title Text 1") - ---- SimplyEmail was built arround the concept that tools should do somthing, and do that somthing well, hence 'Simply'. Full documentation can be found at: @@ -12,7 +10,6 @@ SimplyEmail was built arround the concept that tools should do somthing, and do [HELP/QUESTIONS/CHAT] Join us at: https://simplysecurity.herokuapp.com -## TL;DR Supported Platforms / Tested with CI: * **Docker** * Kali Rolling @@ -33,3 +30,12 @@ or root@kali:~# docker pull simplysecurity/simplyemail root@kali:~# docker run -ti simplysecurity/simplyemail ``` + +Install dependencies: +pip install dnspython==2.3.0rc1 + +#install mechanize +git clone https://github.com/python-mechanize/mechanize.git +cd mechanize +pip3 install -e . + diff --git a/SimplyEmail.py b/SimplyEmail.py index c92880d..5b56c45 100755 --- a/SimplyEmail.py +++ b/SimplyEmail.py @@ -70,7 +70,7 @@ def TaskStarter(version): sys.exit(0) if not len(cli_domain) > 1: log.warningmsg("Domain not supplied", "Main") - print helpers.color("[*] No Domain Supplied to start up!\n", warning=True) + print((helpers.color("[*] No Domain Supplied to start up!\n", warning=True))) sys.exit(0) if cli_test: # setup a small easy test to activate certain modules @@ -101,7 +101,7 @@ def main(): config.read('Common/SimplyEmail.ini') version = str(config['GlobalSettings']['Version']) except Exception as e: - print e + print(e) orc = TaskController.Conducter() orc.title() orc.title_screen() @@ -112,7 +112,7 @@ def main(): try: main() except KeyboardInterrupt: - print 'Interrupted' + print('Interrupted') try: sys.exit(0) except SystemExit: diff --git a/setup/requirments.txt b/setup/requirements.txt similarity index 69% rename from setup/requirments.txt rename to setup/requirements.txt index 39062e3..7549525 100644 --- a/setup/requirments.txt +++ b/setup/requirements.txt @@ -1,14 +1,12 @@ -BeautifulSoup==3.2.1 -beautifulsoup4==4.6.0 +beautifulsoup4 certifi==2017.7.27.1 chardet==3.0.4 configparser==3.5.0 -dnspython==1.15.0 +dnspython==2.3.0rc1 docx2txt==0.6 html5lib==0.999999999 idna==2.6 -mechanize==0.3.6 -pdfminer==20140328 +pdfminer.six python-magic==0.4.13 requests==2.18.4 six==1.11.0 @@ -16,8 +14,9 @@ urllib3==1.22 webencodings==0.5.1 xlsx2csv==0.7.3 XlsxWriter==1.0.2 +docx2txt lxml==4.1.0 coverage==4.4.1 coveralls==1.2.0 nose==1.3.7 -fake-useragent==0.1.8 +fake-useragent==1.1.1 diff --git a/tests/test_simplyemail_list.py b/tests/test_simplyemail_list.py index fd8c581..43a7c39 100644 --- a/tests/test_simplyemail_list.py +++ b/tests/test_simplyemail_list.py @@ -128,7 +128,7 @@ def test_converter(): # test the convert for all formats p = os.path.dirname(os.path.realpath('.')) + '/SimplyEmail/tests/' c = Converter.Converter(verbose=True) - print p + print(p) text = c.convert_docx_to_txt(p + 'Test-DOCX.docx') assert text assert 'How to Design and Test' in text @@ -227,7 +227,7 @@ def test_emailformat(): assert 'madm@verisgroup.com' in emails fm = '{first}_{last}' emails = em.EmailBuilder(cleannames, domain, fm) - print emails + print(emails) assert 'mad_max@verisgroup.com' in emails fm = '{first}' emails = em.EmailBuilder(cleannames, domain, fm)