Introduction of Black: The uncompromising Python code formatter.

Please find more about it there:
* https://github.com/python/black
* https://black.readthedocs.io/en/stable/
This commit is contained in:
funilrys 2019-07-13 13:40:13 +02:00
parent 5386cec268
commit 05dad7e657
No known key found for this signature in database
GPG Key ID: 0D8BFEF5515C00C6
4 changed files with 788 additions and 524 deletions

View File

@ -54,59 +54,89 @@ def update_readme_file():
def main():
parser = argparse.ArgumentParser(description="Creates custom hosts "
"file from hosts stored in "
"data subfolders.")
parser = argparse.ArgumentParser(
description="Creates custom hosts "
"file from hosts stored in "
"data subfolders."
)
parser.parse_args()
update_hosts_file("-a", "-o",
"alternates/gambling",
"-e", "gambling")
update_hosts_file("-a", "-n", "-o",
"alternates/porn",
"-e", "porn")
update_hosts_file("-a", "-n", "-o",
"alternates/social",
"-e", "social")
update_hosts_file("-a", "-n", "-o",
"alternates/fakenews",
"-e", "fakenews")
update_hosts_file("-a", "-o", "alternates/gambling", "-e", "gambling")
update_hosts_file("-a", "-n", "-o", "alternates/porn", "-e", "porn")
update_hosts_file("-a", "-n", "-o", "alternates/social", "-e", "social")
update_hosts_file("-a", "-n", "-o", "alternates/fakenews", "-e", "fakenews")
update_hosts_file("-a", "-n", "-o",
"alternates/fakenews-gambling",
"-e", "fakenews", "gambling")
update_hosts_file("-a", "-n", "-o",
"alternates/fakenews-porn",
"-e", "fakenews", "porn")
update_hosts_file("-a", "-n", "-o",
"alternates/fakenews-social",
"-e", "fakenews", "social")
update_hosts_file("-a", "-n", "-o",
"alternates/gambling-porn",
"-e", "gambling", "porn")
update_hosts_file("-a", "-n", "-o",
"alternates/gambling-social",
"-e", "gambling", "social")
update_hosts_file("-a", "-n", "-o",
"alternates/porn-social",
"-e", "porn", "social")
update_hosts_file(
"-a", "-n", "-o", "alternates/fakenews-gambling", "-e", "fakenews", "gambling"
)
update_hosts_file(
"-a", "-n", "-o", "alternates/fakenews-porn", "-e", "fakenews", "porn"
)
update_hosts_file(
"-a", "-n", "-o", "alternates/fakenews-social", "-e", "fakenews", "social"
)
update_hosts_file(
"-a", "-n", "-o", "alternates/gambling-porn", "-e", "gambling", "porn"
)
update_hosts_file(
"-a", "-n", "-o", "alternates/gambling-social", "-e", "gambling", "social"
)
update_hosts_file(
"-a", "-n", "-o", "alternates/porn-social", "-e", "porn", "social"
)
update_hosts_file("-a", "-n", "-o",
"alternates/fakenews-gambling-porn",
"-e", "fakenews", "gambling", "porn")
update_hosts_file("-a", "-n", "-o",
"alternates/fakenews-gambling-social",
"-e", "fakenews", "gambling", "social")
update_hosts_file("-a", "-n", "-o",
"alternates/fakenews-porn-social",
"-e", "fakenews", "porn", "social")
update_hosts_file("-a", "-n", "-o",
"alternates/gambling-porn-social",
"-e", "gambling", "porn", "social")
update_hosts_file(
"-a",
"-n",
"-o",
"alternates/fakenews-gambling-porn",
"-e",
"fakenews",
"gambling",
"porn",
)
update_hosts_file(
"-a",
"-n",
"-o",
"alternates/fakenews-gambling-social",
"-e",
"fakenews",
"gambling",
"social",
)
update_hosts_file(
"-a",
"-n",
"-o",
"alternates/fakenews-porn-social",
"-e",
"fakenews",
"porn",
"social",
)
update_hosts_file(
"-a",
"-n",
"-o",
"alternates/gambling-porn-social",
"-e",
"gambling",
"porn",
"social",
)
update_hosts_file("-a", "-n", "-o",
"alternates/fakenews-gambling-porn-social",
"-e", "fakenews", "gambling", "porn", "social")
update_hosts_file(
"-a",
"-n",
"-o",
"alternates/fakenews-gambling-porn-social",
"-e",
"fakenews",
"gambling",
"porn",
"social",
)
update_hosts_file("-a", "-n")

File diff suppressed because it is too large Load Diff

View File

@ -30,7 +30,7 @@ PY3 = sys.version_info >= (3, 0)
if PY3:
from urllib.request import urlopen
else:
raise Exception('We do not support Python 2 anymore.')
raise Exception("We do not support Python 2 anymore.")
# Syntactic sugar for "sudo" command in UNIX / Linux
if platform.system() == "OpenBSD":
@ -79,58 +79,120 @@ def get_defaults():
"exclusions": [],
"commonexclusions": ["hulu.com"],
"blacklistfile": path_join_robust(BASEDIR_PATH, "blacklist"),
"whitelistfile": path_join_robust(BASEDIR_PATH, "whitelist")}
"whitelistfile": path_join_robust(BASEDIR_PATH, "whitelist"),
}
# End Project Settings
def main():
parser = argparse.ArgumentParser(description="Creates a unified hosts "
"file from hosts stored in "
"data subfolders.")
parser.add_argument("--auto", "-a", dest="auto", default=False,
action="store_true", help="Run without prompting.")
parser.add_argument("--backup", "-b", dest="backup", default=False,
action="store_true", help="Backup the hosts "
"files before they "
"are overridden.")
parser.add_argument("--extensions", "-e", dest="extensions", default=[],
nargs="*", help="Host extensions to include "
"in the final hosts file.")
parser.add_argument("--ip", "-i", dest="targetip", default="0.0.0.0",
help="Target IP address. Default is 0.0.0.0.")
parser.add_argument("--keepdomaincomments", "-k",
dest="keepdomaincomments", action="store_false", default=True,
help="Do not keep domain line comments.")
parser.add_argument("--noupdate", "-n", dest="noupdate", default=False,
action="store_true", help="Don't update from "
"host data sources.")
parser.add_argument("--skipstatichosts", "-s", dest="skipstatichosts",
default=False, action="store_true",
help="Skip static localhost entries "
"in the final hosts file.")
parser.add_argument("--output", "-o", dest="outputsubfolder", default="",
help="Output subfolder for generated hosts file.")
parser.add_argument("--replace", "-r", dest="replace", default=False,
action="store_true", help="Replace your active "
"hosts file with this "
"new hosts file.")
parser.add_argument("--flush-dns-cache", "-f", dest="flushdnscache",
default=False, action="store_true",
help="Attempt to flush DNS cache "
"after replacing the hosts file.")
parser.add_argument("--compress", "-c", dest="compress",
default=False, action="store_true",
help="Compress the hosts file "
"ignoring non-necessary lines "
"(empty lines and comments) and "
"putting multiple domains in "
"each line. Improve the "
"performances under Windows.")
parser.add_argument("--minimise", "-m", dest="minimise",
default=False, action="store_true",
help="Minimise the hosts file "
"ignoring non-necessary lines "
"(empty lines and comments).")
parser = argparse.ArgumentParser(
description="Creates a unified hosts "
"file from hosts stored in "
"data subfolders."
)
parser.add_argument(
"--auto",
"-a",
dest="auto",
default=False,
action="store_true",
help="Run without prompting.",
)
parser.add_argument(
"--backup",
"-b",
dest="backup",
default=False,
action="store_true",
help="Backup the hosts " "files before they " "are overridden.",
)
parser.add_argument(
"--extensions",
"-e",
dest="extensions",
default=[],
nargs="*",
help="Host extensions to include " "in the final hosts file.",
)
parser.add_argument(
"--ip",
"-i",
dest="targetip",
default="0.0.0.0",
help="Target IP address. Default is 0.0.0.0.",
)
parser.add_argument(
"--keepdomaincomments",
"-k",
dest="keepdomaincomments",
action="store_false",
default=True,
help="Do not keep domain line comments.",
)
parser.add_argument(
"--noupdate",
"-n",
dest="noupdate",
default=False,
action="store_true",
help="Don't update from " "host data sources.",
)
parser.add_argument(
"--skipstatichosts",
"-s",
dest="skipstatichosts",
default=False,
action="store_true",
help="Skip static localhost entries " "in the final hosts file.",
)
parser.add_argument(
"--output",
"-o",
dest="outputsubfolder",
default="",
help="Output subfolder for generated hosts file.",
)
parser.add_argument(
"--replace",
"-r",
dest="replace",
default=False,
action="store_true",
help="Replace your active " "hosts file with this " "new hosts file.",
)
parser.add_argument(
"--flush-dns-cache",
"-f",
dest="flushdnscache",
default=False,
action="store_true",
help="Attempt to flush DNS cache " "after replacing the hosts file.",
)
parser.add_argument(
"--compress",
"-c",
dest="compress",
default=False,
action="store_true",
help="Compress the hosts file "
"ignoring non-necessary lines "
"(empty lines and comments) and "
"putting multiple domains in "
"each line. Improve the "
"performances under Windows.",
)
parser.add_argument(
"--minimise",
"-m",
dest="minimise",
default=False,
action="store_true",
help="Minimise the hosts file "
"ignoring non-necessary lines "
"(empty lines and comments).",
)
global settings
@ -149,17 +211,19 @@ def main():
settings["extensionsources"] = list_dir_no_hidden(extensions_path)
# All our extensions folders...
settings["extensions"] = [os.path.basename(item) for item in list_dir_no_hidden(extensions_path)]
settings["extensions"] = [
os.path.basename(item) for item in list_dir_no_hidden(extensions_path)
]
# ... intersected with the extensions passed-in as arguments, then sorted.
settings["extensions"] = sorted(list(
set(options["extensions"]).intersection(settings["extensions"])))
settings["extensions"] = sorted(
list(set(options["extensions"]).intersection(settings["extensions"]))
)
auto = settings["auto"]
exclusion_regexes = settings["exclusionregexs"]
source_data_filename = settings["sourcedatafilename"]
update_sources = prompt_for_update(freshen=settings["freshen"],
update_auto=auto)
update_sources = prompt_for_update(freshen=settings["freshen"], update_auto=auto)
if update_sources:
update_all_sources(source_data_filename, settings["hostfilename"])
@ -171,14 +235,17 @@ def main():
exclusion_regexes = display_exclusion_options(
common_exclusions=common_exclusions,
exclusion_pattern=exclusion_pattern,
exclusion_regexes=exclusion_regexes)
exclusion_regexes=exclusion_regexes,
)
extensions = settings["extensions"]
sources_data = update_sources_data(settings["sourcesdata"],
datapath=data_path,
extensions=extensions,
extensionspath=extensions_path,
sourcedatafilename=source_data_filename)
sources_data = update_sources_data(
settings["sourcesdata"],
datapath=data_path,
extensions=extensions,
extensionspath=extensions_path,
sourcedatafilename=source_data_filename,
)
merge_file = create_initial_file()
remove_old_hosts_file(settings["backup"])
@ -199,32 +266,44 @@ def main():
output_subfolder = settings["outputsubfolder"]
skip_static_hosts = settings["skipstatichosts"]
write_opening_header(final_file, extensions=extensions,
numberofrules=number_of_rules,
outputsubfolder=output_subfolder,
skipstatichosts=skip_static_hosts)
write_opening_header(
final_file,
extensions=extensions,
numberofrules=number_of_rules,
outputsubfolder=output_subfolder,
skipstatichosts=skip_static_hosts,
)
final_file.close()
update_readme_data(settings["readmedatafilename"],
extensions=extensions,
numberofrules=number_of_rules,
outputsubfolder=output_subfolder,
sourcesdata=sources_data)
update_readme_data(
settings["readmedatafilename"],
extensions=extensions,
numberofrules=number_of_rules,
outputsubfolder=output_subfolder,
sourcesdata=sources_data,
)
print_success("Success! The hosts file has been saved in folder " +
output_subfolder + "\nIt contains " +
"{:,}".format(number_of_rules) +
" unique entries.")
print_success(
"Success! The hosts file has been saved in folder "
+ output_subfolder
+ "\nIt contains "
+ "{:,}".format(number_of_rules)
+ " unique entries."
)
move_file = prompt_for_move(final_file, auto=auto,
replace=settings["replace"],
skipstatichosts=skip_static_hosts)
move_file = prompt_for_move(
final_file,
auto=auto,
replace=settings["replace"],
skipstatichosts=skip_static_hosts,
)
# We only flush the DNS cache if we have
# moved a new hosts file into place.
if move_file:
prompt_for_flush_dns_cache(flush_cache=settings["flushdnscache"],
prompt_flush=not auto)
prompt_for_flush_dns_cache(
flush_cache=settings["flushdnscache"], prompt_flush=not auto
)
# Prompt the User
@ -259,7 +338,9 @@ def prompt_for_update(freshen, update_auto):
# Starting in Python 3.3, IOError is aliased
# OSError. However, we have to catch both for
# Python 2.x failures.
print_failure("ERROR: No 'hosts' file in the folder. Try creating one manually.")
print_failure(
"ERROR: No 'hosts' file in the folder. Try creating one manually."
)
if not freshen:
return
@ -291,9 +372,11 @@ def prompt_for_exclusions(skip_prompt):
custom domains beyond those in the whitelist.
"""
prompt = ("Do you want to exclude any domains?\n"
"For example, hulu.com video streaming must be able to access "
"its tracking and ad servers in order to play video.")
prompt = (
"Do you want to exclude any domains?\n"
"For example, hulu.com video streaming must be able to access "
"its tracking and ad servers in order to play video."
)
if not skip_prompt:
if query_yes_no(prompt):
@ -361,6 +444,8 @@ def prompt_for_move(final_file, **move_params):
move_hosts_file_into_place(final_file)
return move_file
# End Prompt the User
@ -394,15 +479,16 @@ def display_exclusion_options(common_exclusions, exclusion_pattern, exclusion_re
prompt = "Do you want to exclude the domain " + exclusion_option + " ?"
if query_yes_no(prompt):
exclusion_regexes = exclude_domain(exclusion_option,
exclusion_pattern,
exclusion_regexes)
exclusion_regexes = exclude_domain(
exclusion_option, exclusion_pattern, exclusion_regexes
)
else:
continue
if query_yes_no("Do you want to exclude any other domains?"):
exclusion_regexes = gather_custom_exclusions(exclusion_pattern,
exclusion_regexes)
exclusion_regexes = gather_custom_exclusions(
exclusion_pattern, exclusion_regexes
)
return exclusion_regexes
@ -432,7 +518,9 @@ def gather_custom_exclusions(exclusion_pattern, exclusion_regexes):
user_domain = input(domain_prompt)
if is_valid_domain_format(user_domain):
exclusion_regexes = exclude_domain(user_domain, exclusion_pattern, exclusion_regexes)
exclusion_regexes = exclude_domain(
user_domain, exclusion_pattern, exclusion_regexes
)
continue_prompt = "Do you have more domains you want to enter?"
if not query_yes_no(continue_prompt):
@ -497,6 +585,8 @@ def matches_exclusions(stripped_rule, exclusion_regexes):
return True
return False
# End Exclusion Logic
@ -533,8 +623,7 @@ def update_sources_data(sources_data, **sources_params):
update_file.close()
for source in sources_params["extensions"]:
source_dir = path_join_robust(
sources_params["extensionspath"], source)
source_dir = path_join_robust(sources_params["extensionspath"], source)
for update_file_path in recursive_glob(source_dir, source_data_filename):
update_file = open(update_file_path, "r")
update_data = json.load(update_file)
@ -579,9 +668,7 @@ def update_all_sources(source_data_filename, host_filename):
"""
# The transforms we support
transform_methods = {
'jsonarray': jsonarray
}
transform_methods = {"jsonarray": jsonarray}
all_sources = recursive_glob("*", source_data_filename)
@ -606,13 +693,16 @@ def update_all_sources(source_data_filename, host_filename):
# get rid of carriage-return symbols
updated_file = updated_file.replace("\r", "")
hosts_file = open(path_join_robust(BASEDIR_PATH,
os.path.dirname(source),
host_filename), "wb")
hosts_file = open(
path_join_robust(BASEDIR_PATH, os.path.dirname(source), host_filename),
"wb",
)
write_data(hosts_file, updated_file)
hosts_file.close()
except Exception:
print("Error in updating source: ", update_url)
# End Update Logic
@ -625,8 +715,7 @@ def create_initial_file():
merge_file = tempfile.NamedTemporaryFile()
# spin the sources for the base file
for source in recursive_glob(settings["datapath"],
settings["hostfilename"]):
for source in recursive_glob(settings["datapath"], settings["hostfilename"]):
start = "# Start {}\n\n".format(os.path.basename(os.path.dirname(source)))
end = "# End {}\n\n".format(os.path.basename(os.path.dirname(source)))
@ -636,8 +725,10 @@ def create_initial_file():
# spin the sources for extensions to the base file
for source in settings["extensions"]:
for filename in recursive_glob(path_join_robust(
settings["extensionspath"], source), settings["hostfilename"]):
for filename in recursive_glob(
path_join_robust(settings["extensionspath"], source),
settings["hostfilename"],
):
with open(filename, "r") as curFile:
write_data(merge_file, curFile.read())
@ -668,7 +759,7 @@ def compress_file(input_file, target_ip, output_file):
"""
input_file.seek(0) # reset file pointer
write_data(output_file, '\n')
write_data(output_file, "\n")
target_ip_len = len(target_ip)
lines = [target_ip]
@ -677,12 +768,11 @@ def compress_file(input_file, target_ip, output_file):
line = line.decode("UTF-8")
if line.startswith(target_ip):
if lines[lines_index].count(' ') < 9:
lines[lines_index] += ' ' \
+ line[target_ip_len:line.find('#')].strip()
if lines[lines_index].count(" ") < 9:
lines[lines_index] += " " + line[target_ip_len : line.find("#")].strip()
else:
lines[lines_index] += '\n'
lines.append(line[:line.find('#')].strip())
lines[lines_index] += "\n"
lines.append(line[: line.find("#")].strip())
lines_index += 1
for line in lines:
@ -707,14 +797,14 @@ def minimise_file(input_file, target_ip, output_file):
"""
input_file.seek(0) # reset file pointer
write_data(output_file, '\n')
write_data(output_file, "\n")
lines = []
for line in input_file.readlines():
line = line.decode("UTF-8")
if line.startswith(target_ip):
lines.append(line[:line.find('#')].strip() + '\n')
lines.append(line[: line.find("#")].strip() + "\n")
for line in lines:
write_data(output_file, line)
@ -772,27 +862,28 @@ def remove_dups_and_excl(merge_file, exclusion_regexes, output_file=None):
line = line.replace("\t+", " ")
# see gh-271: trim trailing whitespace, periods
line = line.rstrip(' .')
line = line.rstrip(" .")
# Testing the first character doesn't require startswith
if line[0] == "#" or re.match(r'^\s*$', line[0]):
if line[0] == "#" or re.match(r"^\s*$", line[0]):
write_data(final_file, line)
continue
if "::1" in line:
continue
stripped_rule = strip_rule(line) # strip comments
if not stripped_rule or matches_exclusions(stripped_rule,
exclusion_regexes):
if not stripped_rule or matches_exclusions(stripped_rule, exclusion_regexes):
continue
# Normalize rule
hostname, normalized_rule = normalize_rule(
stripped_rule, target_ip=settings["targetip"],
keep_domain_comments=settings["keepdomaincomments"])
stripped_rule,
target_ip=settings["targetip"],
keep_domain_comments=settings["keepdomaincomments"],
)
for exclude in exclusions:
if re.search(r'[\s\.]' + re.escape(exclude) + r'\s', line):
if re.search(r"[\s\.]" + re.escape(exclude) + r"\s", line):
write_line = False
break
@ -832,7 +923,7 @@ def normalize_rule(rule, target_ip, keep_domain_comments):
"""
first try: IP followed by domain
"""
regex = r'^\s*(\d{1,3}\.){3}\d{1,3}\s+([\w\.-]+[a-zA-Z])(.*)'
regex = r"^\s*(\d{1,3}\.){3}\d{1,3}\s+([\w\.-]+[a-zA-Z])(.*)"
result = re.search(regex, rule)
if result:
@ -843,7 +934,7 @@ def normalize_rule(rule, target_ip, keep_domain_comments):
rule = "%s %s" % (target_ip, hostname)
if suffix and keep_domain_comments:
if not suffix.strip().startswith('#'):
if not suffix.strip().startswith("#"):
rule += " #%s" % suffix
else:
rule += " %s" % suffix
@ -853,7 +944,7 @@ def normalize_rule(rule, target_ip, keep_domain_comments):
"""
next try: IP address followed by host IP address
"""
regex = r'^\s*(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\s+(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\s*(.*)'
regex = r"^\s*(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\s+(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\s*(.*)"
result = re.search(regex, rule)
if result:
@ -863,7 +954,7 @@ def normalize_rule(rule, target_ip, keep_domain_comments):
rule = "%s %s" % (target_ip, ip_host)
if suffix and keep_domain_comments:
if not suffix.strip().startswith('#'):
if not suffix.strip().startswith("#"):
rule += " #%s" % suffix
else:
rule += " %s" % suffix
@ -922,22 +1013,51 @@ def write_opening_header(final_file, **header_params):
file_contents = final_file.read() # Save content.
final_file.seek(0) # Write at the top.
write_data(final_file, "# This hosts file is a merged collection "
"of hosts from reputable sources,\n")
write_data(
final_file,
"# This hosts file is a merged collection "
"of hosts from reputable sources,\n",
)
write_data(final_file, "# with a dash of crowd sourcing via Github\n#\n")
write_data(final_file, "# Date: " + time.strftime("%d %B %Y %H:%M:%S (%Z)", time.gmtime()) + "\n")
write_data(
final_file,
"# Date: " + time.strftime("%d %B %Y %H:%M:%S (%Z)", time.gmtime()) + "\n",
)
if header_params["extensions"]:
write_data(final_file, "# Extensions added to this file: " + ", ".join(
header_params["extensions"]) + "\n")
write_data(
final_file,
"# Extensions added to this file: "
+ ", ".join(header_params["extensions"])
+ "\n",
)
write_data(final_file, ("# Number of unique domains: {:,}\n#\n".format(header_params["numberofrules"])))
write_data(final_file, "# Fetch the latest version of this file: "
"https://raw.githubusercontent.com/StevenBlack/hosts/master/" +
path_join_robust(header_params["outputsubfolder"], "") + "hosts\n")
write_data(final_file, "# Project home page: https://github.com/StevenBlack/hosts\n")
write_data(final_file, "# Project releases: https://github.com/StevenBlack/hosts/releases\n#\n")
write_data(final_file, "# ===============================================================\n")
write_data(
final_file,
(
"# Number of unique domains: {:,}\n#\n".format(
header_params["numberofrules"]
)
),
)
write_data(
final_file,
"# Fetch the latest version of this file: "
"https://raw.githubusercontent.com/StevenBlack/hosts/master/"
+ path_join_robust(header_params["outputsubfolder"], "")
+ "hosts\n",
)
write_data(
final_file, "# Project home page: https://github.com/StevenBlack/hosts\n"
)
write_data(
final_file,
"# Project releases: https://github.com/StevenBlack/hosts/releases\n#\n",
)
write_data(
final_file,
"# ===============================================================\n",
)
write_data(final_file, "\n")
if not header_params["skipstatichosts"]:
@ -997,9 +1117,11 @@ def update_readme_data(readme_file, **readme_updates):
extensions_key = "-".join(extensions)
output_folder = readme_updates["outputsubfolder"]
generation_data = {"location": path_join_robust(output_folder, ""),
"entries": readme_updates["numberofrules"],
"sourcesdata": readme_updates["sourcesdata"]}
generation_data = {
"location": path_join_robust(output_folder, ""),
"entries": readme_updates["numberofrules"],
"sourcesdata": readme_updates["sourcesdata"],
}
with open(readme_file, "r") as f:
readme_data = json.load(f)
@ -1029,12 +1151,16 @@ def move_hosts_file_into_place(final_file):
filename = os.path.abspath(final_file.name)
if os.name == "posix":
print("Moving the file requires administrative privileges. You might need to enter your password.")
print(
"Moving the file requires administrative privileges. You might need to enter your password."
)
if subprocess.call(SUDO + ["cp", filename, "/etc/hosts"]):
print_failure("Moving the file failed.")
elif os.name == "nt":
print("Automatically moving the hosts file in place is not yet supported.")
print("Please move the generated file to %SystemRoot%\system32\drivers\etc\hosts") # noqa: W605
print(
"Please move the generated file to %SystemRoot%\system32\drivers\etc\hosts"
) # noqa: W605
def flush_dns_cache():
@ -1043,7 +1169,9 @@ def flush_dns_cache():
"""
print("Flushing the DNS cache to utilize new hosts file...")
print("Flushing the DNS cache requires administrative privileges. You might need to enter your password.")
print(
"Flushing the DNS cache requires administrative privileges. You might need to enter your password."
)
dns_cache_found = False
@ -1052,8 +1180,10 @@ def flush_dns_cache():
print_failure("Flushing the DNS cache failed.")
elif os.name == "nt":
print("Automatically flushing the DNS cache is not yet supported.")
print("Please copy and paste the command 'ipconfig /flushdns' in "
"administrator command prompt after running this script.")
print(
"Please copy and paste the command 'ipconfig /flushdns' in "
"administrator command prompt after running this script."
)
else:
nscd_prefixes = ["/etc", "/etc/rc.d"]
nscd_msg = "Flushing the DNS cache by restarting nscd {result}"
@ -1088,7 +1218,9 @@ def flush_dns_cache():
for service_type in service_types:
service = service_type + ".service"
service_file = path_join_robust(system_dir, service)
service_msg = ("Flushing the DNS cache by restarting " + service + " {result}")
service_msg = (
"Flushing the DNS cache by restarting " + service + " {result}"
)
if os.path.isfile(service_file):
dns_cache_found = True
@ -1132,8 +1264,9 @@ def remove_old_hosts_file(backup):
open(old_file_path, "a").close()
if backup:
backup_file_path = path_join_robust(BASEDIR_PATH, "hosts-{}".format(
time.strftime("%Y-%m-%d-%H-%M-%S")))
backup_file_path = path_join_robust(
BASEDIR_PATH, "hosts-{}".format(time.strftime("%Y-%m-%d-%H-%M-%S"))
)
# Make a backup copy, marking the date in which the list was updated
shutil.copy(old_file_path, backup_file_path)
@ -1142,6 +1275,8 @@ def remove_old_hosts_file(backup):
# Create new empty hosts file
open(old_file_path, "a").close()
# End File Logic
@ -1174,9 +1309,9 @@ def domain_to_idna(line):
- The following also split the trailing comment of a given line.
"""
if not line.startswith('#'):
tabs = '\t'
space = ' '
if not line.startswith("#"):
tabs = "\t"
space = " "
tabs_position, space_position = (line.find(tabs), line.find(space))
@ -1190,7 +1325,7 @@ def domain_to_idna(line):
elif not space_position == -1:
separator = space
else:
separator = ''
separator = ""
if separator:
splited_line = line.split(separator)
@ -1202,20 +1337,21 @@ def domain_to_idna(line):
break
index += 1
if '#' in splited_line[index]:
index_comment = splited_line[index].find('#')
if "#" in splited_line[index]:
index_comment = splited_line[index].find("#")
if index_comment > -1:
comment = splited_line[index][index_comment:]
splited_line[index] = splited_line[index] \
.split(comment)[0] \
.encode("IDNA").decode("UTF-8") + \
comment
splited_line[index] = (
splited_line[index]
.split(comment)[0]
.encode("IDNA")
.decode("UTF-8")
+ comment
)
splited_line[index] = splited_line[index] \
.encode("IDNA") \
.decode("UTF-8")
splited_line[index] = splited_line[index].encode("IDNA").decode("UTF-8")
except IndexError:
pass
return separator.join(splited_line)
@ -1267,8 +1403,8 @@ def get_file_by_url(url):
try:
f = urlopen(url)
soup = BeautifulSoup(f.read(), 'lxml').get_text()
return '\n'.join(list(map(domain_to_idna, soup.split('\n'))))
soup = BeautifulSoup(f.read(), "lxml").get_text()
return "\n".join(list(map(domain_to_idna, soup.split("\n"))))
except Exception:
print("Problem getting file: ", url)
@ -1322,11 +1458,8 @@ def query_yes_no(question, default="yes"):
yes : Whether or not the user replied yes to the question.
"""
valid = {"yes": "yes", "y": "yes", "ye": "yes",
"no": "no", "n": "no"}
prompt = {None: " [y/n] ",
"yes": " [Y/n] ",
"no": " [y/N] "}.get(default, None)
valid = {"yes": "yes", "y": "yes", "ye": "yes", "no": "no", "n": "no"}
prompt = {None: " [y/n] ", "yes": " [Y/n] ", "no": " [y/N] "}.get(default, None)
if not prompt:
raise ValueError("invalid default answer: '%s'" % default)
@ -1371,8 +1504,10 @@ def is_valid_domain_format(domain):
domain_regex = re.compile(r"www\d{0,3}[.]|https?")
if domain_regex.match(domain):
print("The domain " + domain + " is not valid. Do not include "
"www.domain.com or http(s)://domain.com. Try again.")
print(
"The domain " + domain + " is not valid. Do not include "
"www.domain.com or http(s)://domain.com. Try again."
)
return False
else:
return True
@ -1436,7 +1571,9 @@ def path_join_robust(path, *paths):
return os.path.join(path, *paths)
except UnicodeDecodeError as e:
raise locale.Error("Unable to construct path. This is likely a LOCALE issue:\n\n" + str(e))
raise locale.Error(
"Unable to construct path. This is likely a LOCALE issue:\n\n" + str(e)
)
# Colors
@ -1462,7 +1599,9 @@ def supports_color():
"""
sys_platform = sys.platform
supported = sys_platform != "Pocket PC" and (sys_platform != "win32" or "ANSICON" in os.environ)
supported = sys_platform != "Pocket PC" and (
sys_platform != "win32" or "ANSICON" in os.environ
)
atty_connected = hasattr(sys.stdout, "isatty") and sys.stdout.isatty()
return supported and atty_connected
@ -1521,6 +1660,8 @@ def print_failure(text):
"""
print(colorize(text, Colors.FAIL))
# End Helper Functions

View File

@ -12,19 +12,21 @@ from string import Template
# Project Settings
BASEDIR_PATH = os.path.dirname(os.path.realpath(__file__))
README_TEMPLATE = os.path.join(BASEDIR_PATH, 'readme_template.md')
README_FILENAME = 'readme.md'
README_TEMPLATE = os.path.join(BASEDIR_PATH, "readme_template.md")
README_FILENAME = "readme.md"
README_DATA_FILENAME = "readmeData.json"
def main():
s = Template('${description} | [Readme](https://github.com/StevenBlack/'
'hosts/blob/master/${location}readme.md) | '
'[link](https://raw.githubusercontent.com/StevenBlack/'
'hosts/master/${location}hosts) | '
'${fmtentries} | '
'[link](http://sbc.io/hosts/${location}hosts)')
with open(README_DATA_FILENAME, 'r') as f:
s = Template(
"${description} | [Readme](https://github.com/StevenBlack/"
"hosts/blob/master/${location}readme.md) | "
"[link](https://raw.githubusercontent.com/StevenBlack/"
"hosts/master/${location}hosts) | "
"${fmtentries} | "
"[link](http://sbc.io/hosts/${location}hosts)"
)
with open(README_DATA_FILENAME, "r") as f:
data = json.load(f)
keys = list(data.keys())
@ -37,10 +39,11 @@ def main():
for key in keys:
data[key]["fmtentries"] = "{:,}".format(data[key]["entries"])
if key == "base":
data[key]["description"] = 'Unified hosts = **(adware + malware)**'
data[key]["description"] = "Unified hosts = **(adware + malware)**"
else:
data[key]["description"] = ('Unified hosts **+ ' +
key.replace("-", " + ") + '**')
data[key]["description"] = (
"Unified hosts **+ " + key.replace("-", " + ") + "**"
)
toc_rows += s.substitute(data[key]) + "\n"
@ -52,10 +55,13 @@ def main():
"issues": "",
"url": "",
"license": "",
"issues": ""}
"issues": "",
}
t = Template('${name} | ${description} |[link](${homeurl})'
' | [raw](${url}) | ${frequency} | ${license} | [issues](${issues}) ')
t = Template(
"${name} | ${description} |[link](${homeurl})"
" | [raw](${url}) | ${frequency} | ${license} | [issues](${issues}) "
)
for key in keys:
extensions = key.replace("-", ", ")
@ -71,16 +77,21 @@ def main():
this_row.update(source)
source_rows += t.substitute(this_row) + "\n"
with open(os.path.join(data[key]["location"],
README_FILENAME), "wt") as out:
with open(os.path.join(data[key]["location"], README_FILENAME), "wt") as out:
for line in open(README_TEMPLATE):
line = line.replace('@GEN_DATE@', time.strftime("%B %d %Y", time.gmtime()))
line = line.replace('@EXTENSIONS@', extensions_str)
line = line.replace('@EXTENSIONS_HEADER@', extensions_header)
line = line.replace('@NUM_ENTRIES@', "{:,}".format(data[key]["entries"]))
line = line.replace('@SUBFOLDER@', os.path.join(data[key]["location"], ''))
line = line.replace('@TOCROWS@', toc_rows)
line = line.replace('@SOURCEROWS@', source_rows)
line = line.replace(
"@GEN_DATE@", time.strftime("%B %d %Y", time.gmtime())
)
line = line.replace("@EXTENSIONS@", extensions_str)
line = line.replace("@EXTENSIONS_HEADER@", extensions_header)
line = line.replace(
"@NUM_ENTRIES@", "{:,}".format(data[key]["entries"])
)
line = line.replace(
"@SUBFOLDER@", os.path.join(data[key]["location"], "")
)
line = line.replace("@TOCROWS@", toc_rows)
line = line.replace("@SOURCEROWS@", source_rows)
out.write(line)