diff --git a/.editorconfig b/.editorconfig
index 85e5fd9..a0ecf5f 100644
--- a/.editorconfig
+++ b/.editorconfig
@@ -4,30 +4,23 @@
root = true
-[*]
+[Vagrantfile]
indent_style = space
-indent_size = 4
+indent_size = 2
+
+[*.rb]
+indent_style = space
+indent_size = 2
+
+[**.html]
+indent_style = tab
+
+[**.py]
+indent_style = tab
+
+[**]
+indent_style = tab
end_of_line = lf
charset = utf-8
trim_trailing_whitespace = true
insert_final_newline = true
-
-[*.html]
-indent_style = tab
-
-[Makefile]
-indent_style = tab
-indent_size = 4
-
-[Vagrantfile]
-indent_size = 2
-
-[*.rb]
-indent_size = 2
-
-[*.py]
-indent_style = tab
-
-[*.js]
-indent_size = 2
-
diff --git a/.style.yapf b/.style.yapf
new file mode 100644
index 0000000..e56c42d
--- /dev/null
+++ b/.style.yapf
@@ -0,0 +1,3 @@
+[style]
+based_on_style = pep8
+use_tabs = True
diff --git a/management/auth.py b/management/auth.py
index 0a88c45..a9886b7 100644
--- a/management/auth.py
+++ b/management/auth.py
@@ -1,4 +1,9 @@
-import base64, os, os.path, hmac, json, secrets
+import base64
+import os
+import os.path
+import hmac
+import json
+import secrets
from datetime import timedelta
from expiringdict import ExpiringDict
@@ -7,17 +12,21 @@ import utils
from mailconfig import get_mail_password, get_mail_user_privileges
from mfa import get_hash_mfa_state, validate_auth_mfa
-DEFAULT_KEY_PATH = '/var/lib/mailinabox/api.key'
+DEFAULT_KEY_PATH = '/var/lib/mailinabox/api.key'
DEFAULT_AUTH_REALM = 'Mail-in-a-Box Management Server'
+
class AuthService:
+
def __init__(self):
self.auth_realm = DEFAULT_AUTH_REALM
self.key_path = DEFAULT_KEY_PATH
self.max_session_duration = timedelta(days=2)
self.init_system_api_key()
- self.sessions = ExpiringDict(max_len=64, max_age_seconds=self.max_session_duration.total_seconds())
+ self.sessions = ExpiringDict(
+ max_len=64,
+ max_age_seconds=self.max_session_duration.total_seconds())
def init_system_api_key(self):
"""Write an API key to a local file so local processes can use the API"""
@@ -26,7 +35,8 @@ class AuthService:
# Based on answer by A-B-B: http://stackoverflow.com/a/15015748
old_umask = os.umask(0)
try:
- return os.fdopen(os.open(path, os.O_WRONLY | os.O_CREAT, mode), 'w')
+ return os.fdopen(os.open(path, os.O_WRONLY | os.O_CREAT, mode),
+ 'w')
finally:
os.umask(old_umask)
@@ -46,8 +56,10 @@ class AuthService:
this key is not associated with a user."""
def parse_http_authorization_basic(header):
+
def decode(s):
return base64.b64decode(s.encode('ascii')).decode('ascii')
+
if " " not in header:
return None, None
scheme, credentials = header.split(maxsplit=1)
@@ -59,12 +71,15 @@ class AuthService:
username, password = credentials.split(':', maxsplit=1)
return username, password
- username, password = parse_http_authorization_basic(request.headers.get('Authorization', ''))
+ username, password = parse_http_authorization_basic(
+ request.headers.get('Authorization', ''))
if username in (None, ""):
raise ValueError("Authorization header invalid.")
if username.strip() == "" and password.strip() == "":
- raise ValueError("No email address, password, session key, or API key provided.")
+ raise ValueError(
+ "No email address, password, session key, or API key provided."
+ )
# If user passed the system API key, grant administrative privs. This key
# is not associated with a user.
@@ -72,7 +87,8 @@ class AuthService:
return (None, ["admin"])
# If the password corresponds with a session token for the user, grant access for that user.
- if self.get_session(username, password, "login", env) and not login_only:
+ if self.get_session(username, password, "login",
+ env) and not login_only:
sessionid = password
session = self.sessions[sessionid]
if logout:
@@ -96,7 +112,8 @@ class AuthService:
# deleted after the session was granted. On error the call will return a tuple
# of an error message and an HTTP status code.
privs = get_mail_user_privileges(username, env)
- if isinstance(privs, tuple): raise ValueError(privs[0])
+ if isinstance(privs, tuple):
+ raise ValueError(privs[0])
# Return the authorization information.
return (username, privs)
@@ -120,10 +137,13 @@ class AuthService:
# a non-zero exit status if the credentials are no good,
# and check_call will raise an exception in that case.
utils.shell('check_call', [
- "/usr/bin/doveadm", "pw",
- "-p", pw,
- "-t", pw_hash,
- ])
+ "/usr/bin/doveadm",
+ "pw",
+ "-p",
+ pw,
+ "-t",
+ pw_hash,
+ ])
except:
# Login failed.
raise ValueError("Incorrect email address or password.")
@@ -141,7 +161,8 @@ class AuthService:
# Add to the message the current MFA state, which is a list of MFA information.
# Turn it into a string stably.
- msg += b" " + json.dumps(get_hash_mfa_state(email, env), sort_keys=True).encode("utf8")
+ msg += b" " + json.dumps(get_hash_mfa_state(email, env),
+ sort_keys=True).encode("utf8")
# Make a HMAC using the system API key as a hash key.
hash_key = self.key.encode('ascii')
@@ -152,15 +173,21 @@ class AuthService:
token = secrets.token_hex(32)
self.sessions[token] = {
"email": username,
- "password_token": self.create_user_password_state_token(username, env),
+ "password_token":
+ self.create_user_password_state_token(username, env),
"type": type,
}
return token
def get_session(self, user_email, session_key, session_type, env):
- if session_key not in self.sessions: return None
+ if session_key not in self.sessions:
+ return None
session = self.sessions[session_key]
- if session_type == "login" and session["email"] != user_email: return None
- if session["type"] != session_type: return None
- if session["password_token"] != self.create_user_password_state_token(session["email"], env): return None
+ if session_type == "login" and session["email"] != user_email:
+ return None
+ if session["type"] != session_type:
+ return None
+ if session["password_token"] != self.create_user_password_state_token(
+ session["email"], env):
+ return None
return session
diff --git a/management/backup.py b/management/backup.py
index b0c9ab2..282ee65 100755
--- a/management/backup.py
+++ b/management/backup.py
@@ -7,14 +7,23 @@
# 4) The stopped services are restarted.
# 5) STORAGE_ROOT/backup/after-backup is executed if it exists.
-import os, os.path, shutil, glob, re, datetime, sys
-import dateutil.parser, dateutil.relativedelta, dateutil.tz
+import os
+import os.path
+import shutil
+import glob
+import re
+import datetime
+import sys
+import dateutil.parser
+import dateutil.relativedelta
+import dateutil.tz
import rtyaml
from exclusiveprocess import Lock, CannotAcquireLock
from utils import load_environment, shell, wait_for_service, fix_boto, get_php_version, get_os_code
-def rsync_ssh_options(port = 22, direct = False):
+
+def rsync_ssh_options(port=22, direct=False):
# Just in case we pass a string
try:
port = int(port)
@@ -29,30 +38,39 @@ def rsync_ssh_options(port = 22, direct = False):
f"--rsync-options= -e \"/usr/bin/ssh -oStrictHostKeyChecking=no -oBatchMode=yes -p {port} -i /root/.ssh/id_rsa_miab\"",
]
+
def backup_status(env):
# If backups are disabled, return no status.
config = get_backup_config(env)
if config["target"] == "off":
- return { }
+ return {}
# Query duplicity to get a list of all full and incremental
# backups available.
- backups = { }
+ backups = {}
now = datetime.datetime.now(dateutil.tz.tzlocal())
backup_root = os.path.join(env["STORAGE_ROOT"], 'backup')
backup_cache_dir = os.path.join(backup_root, 'cache')
def reldate(date, ref, clip):
- if ref < date: return clip
+ if ref < date:
+ return clip
rd = dateutil.relativedelta.relativedelta(ref, date)
- if rd.years > 1: return "%d years, %d months" % (rd.years, rd.months)
- if rd.years == 1: return "%d year, %d months" % (rd.years, rd.months)
- if rd.months > 1: return "%d months, %d days" % (rd.months, rd.days)
- if rd.months == 1: return "%d month, %d days" % (rd.months, rd.days)
- if rd.days >= 7: return "%d days" % rd.days
- if rd.days > 1: return "%d days, %d hours" % (rd.days, rd.hours)
- if rd.days == 1: return "%d day, %d hours" % (rd.days, rd.hours)
+ if rd.years > 1:
+ return "%d years, %d months" % (rd.years, rd.months)
+ if rd.years == 1:
+ return "%d year, %d months" % (rd.years, rd.months)
+ if rd.months > 1:
+ return "%d months, %d days" % (rd.months, rd.days)
+ if rd.months == 1:
+ return "%d month, %d days" % (rd.months, rd.days)
+ if rd.days >= 7:
+ return "%d days" % rd.days
+ if rd.days > 1:
+ return "%d days, %d hours" % (rd.days, rd.hours)
+ if rd.days == 1:
+ return "%d day, %d hours" % (rd.days, rd.hours)
return "%d hours, %d minutes" % (rd.hours, rd.minutes)
# Get duplicity collection status and parse for a list of backups.
@@ -64,24 +82,31 @@ def backup_status(env):
"date_str": date.strftime("%Y-%m-%d %X") + " " + now.tzname(),
"date_delta": reldate(date, now, "the future?"),
"full": keys[0] == "full",
- "size": 0, # collection-status doesn't give us the size
- "volumes": int(keys[2]), # number of archive volumes for this backup (not really helpful)
+ "size": 0, # collection-status doesn't give us the size
+ # number of archive volumes for this backup (not really helpful)
+ "volumes": int(keys[2]),
}
- code, collection_status = shell('check_output', [
- "/usr/bin/duplicity",
- "collection-status",
- "--archive-dir", backup_cache_dir,
- "--gpg-options", "--cipher-algo=AES256",
- "--log-fd", "1",
- config["target"],
- ] + rsync_ssh_options(port = config["target_rsync_port"]),
+ code, collection_status = shell(
+ 'check_output',
+ [
+ "/usr/bin/duplicity",
+ "collection-status",
+ "--archive-dir",
+ backup_cache_dir,
+ "--gpg-options",
+ "--cipher-algo=AES256",
+ "--log-fd",
+ "1",
+ config["target"],
+ ] + rsync_ssh_options(port=config["target_rsync_port"]),
get_env(env),
trap=True)
if code != 0:
# Command failed. This is likely due to an improperly configured remote
# destination for the backups or the last backup job terminated unexpectedly.
- raise Exception("Something is wrong with the backup: " + collection_status)
+ raise Exception("Something is wrong with the backup: " +
+ collection_status)
for line in collection_status.split('\n'):
if line.startswith(" full") or line.startswith(" inc"):
backup = parse_line(line)
@@ -94,8 +119,11 @@ def backup_status(env):
# space is used for those.
unmatched_file_size = 0
for fn, size in list_target_files(config):
- m = re.match(r"duplicity-(full|full-signatures|(inc|new-signatures)\.(?P\d+T\d+Z)\.to)\.(?P\d+T\d+Z)\.", fn)
- if not m: continue # not a part of a current backup chain
+ m = re.match(
+ r"duplicity-(full|full-signatures|(inc|new-signatures)\.(?P\d+T\d+Z)\.to)\.(?P\d+T\d+Z)\.",
+ fn)
+ if not m:
+ continue # not a part of a current backup chain
key = m.group("date")
if key in backups:
backups[key]["size"] += size
@@ -104,7 +132,7 @@ def backup_status(env):
# Ensure the rows are sorted reverse chronologically.
# This is relied on by should_force_full() and the next step.
- backups = sorted(backups.values(), key = lambda b : b["date"], reverse=True)
+ backups = sorted(backups.values(), key=lambda b: b["date"], reverse=True)
# Get the average size of incremental backups, the size of the
# most recent full backup, and the date of the most recent
@@ -133,16 +161,23 @@ def backup_status(env):
if incremental_count > 0 and incremental_size > 0 and first_full_size is not None:
# How many days until the next incremental backup? First, the part of
# the algorithm based on increment sizes:
- est_days_to_next_full = (.5 * first_full_size - incremental_size) / (incremental_size/incremental_count)
- est_time_of_next_full = first_date + datetime.timedelta(days=est_days_to_next_full)
+ est_days_to_next_full = (.5 * first_full_size - incremental_size) / (
+ incremental_size / incremental_count)
+ est_time_of_next_full = first_date + \
+ datetime.timedelta(days=est_days_to_next_full)
# ...And then the part of the algorithm based on full backup age:
- est_time_of_next_full = min(est_time_of_next_full, first_full_date + datetime.timedelta(days=config["min_age_in_days"]*10+1))
+ est_time_of_next_full = min(
+ est_time_of_next_full, first_full_date +
+ datetime.timedelta(days=config["min_age_in_days"] * 10 + 1))
# It still can't be deleted until it's old enough.
- est_deleted_on = max(est_time_of_next_full, first_date + datetime.timedelta(days=config["min_age_in_days"]))
+ est_deleted_on = max(
+ est_time_of_next_full,
+ first_date + datetime.timedelta(days=config["min_age_in_days"]))
- deleted_in = "approx. %d days" % round((est_deleted_on-now).total_seconds()/60/60/24 + .5)
+ deleted_in = "approx. %d days" % round(
+ (est_deleted_on - now).total_seconds() / 60 / 60 / 24 + .5)
# When will a backup be deleted? Set the deleted_in field of each backup.
saw_full = False
@@ -158,7 +193,11 @@ def backup_status(env):
elif saw_full and not deleted_in:
# We're now on backups prior to the most recent full backup. These are
# free to be deleted as soon as they are min_age_in_days old.
- deleted_in = reldate(now, dateutil.parser.parse(bak["date"]) + datetime.timedelta(days=config["min_age_in_days"]), "on next daily backup")
+ deleted_in = reldate(
+ now,
+ dateutil.parser.parse(bak["date"]) +
+ datetime.timedelta(days=config["min_age_in_days"]),
+ "on next daily backup")
bak["deleted_in"] = deleted_in
return {
@@ -166,6 +205,7 @@ def backup_status(env):
"unmatched_file_size": unmatched_file_size,
}
+
def should_force_full(config, env):
# Force a full backup when the total size of the increments
# since the last full backup is greater than half the size
@@ -181,9 +221,11 @@ def should_force_full(config, env):
# Return if we should to a full backup, which is based
# on the size of the increments relative to the full
# backup, as well as the age of the full backup.
- if inc_size > .5*bak["size"]:
+ if inc_size > .5 * bak["size"]:
return True
- if dateutil.parser.parse(bak["date"]) + datetime.timedelta(days=config["min_age_in_days"]*10+1) < datetime.datetime.now(dateutil.tz.tzlocal()):
+ if dateutil.parser.parse(bak["date"]) + datetime.timedelta(
+ days=config["min_age_in_days"] * 10 +
+ 1) < datetime.datetime.now(dateutil.tz.tzlocal()):
return True
return False
else:
@@ -191,6 +233,7 @@ def should_force_full(config, env):
# (I love for/else blocks. Here it's just to show off.)
return True
+
def get_passphrase(env):
# Get the encryption passphrase. secret_key.txt is 2048 random
# bits base64-encoded and with line breaks every 65 characters.
@@ -201,14 +244,16 @@ def get_passphrase(env):
backup_root = os.path.join(env["STORAGE_ROOT"], 'backup')
with open(os.path.join(backup_root, 'secret_key.txt')) as f:
passphrase = f.readline().strip()
- if len(passphrase) < 43: raise Exception("secret_key.txt's first line is too short!")
+ if len(passphrase) < 43:
+ raise Exception("secret_key.txt's first line is too short!")
return passphrase
+
def get_env(env):
config = get_backup_config(env)
- env = { "PASSPHRASE" : get_passphrase(env) }
+ env = {"PASSPHRASE": get_passphrase(env)}
if get_target_type(config) == 's3':
env["AWS_ACCESS_KEY_ID"] = config["target_user"]
@@ -216,10 +261,12 @@ def get_env(env):
return env
+
def get_target_type(config):
protocol = config["target"].split(":")[0]
return protocol
+
def perform_backup(full_backup, user_initiated=False):
env = load_environment()
php_fpm = f"php{get_php_version()}-fpm"
@@ -235,7 +282,7 @@ def perform_backup(full_backup, user_initiated=False):
return "Another backup is already being done!"
else:
lock.forever()
-
+
config = get_backup_config(env)
backup_root = os.path.join(env["STORAGE_ROOT"], 'backup')
backup_cache_dir = os.path.join(backup_root, 'cache')
@@ -260,7 +307,10 @@ def perform_backup(full_backup, user_initiated=False):
# Stop services.
def service_command(service, command, quit=None):
# Execute silently, but if there is an error then display the output & exit.
- code, ret = shell('check_output', ["/usr/sbin/service", service, command], capture_stderr=True, trap=True)
+ code, ret = shell('check_output',
+ ["/usr/sbin/service", service, command],
+ capture_stderr=True,
+ trap=True)
if code != 0:
print(ret)
if quit:
@@ -284,18 +334,12 @@ def perform_backup(full_backup, user_initiated=False):
# after the first backup. See #396.
try:
shell('check_call', [
- "/usr/bin/duplicity",
- "full" if full_backup else "incr",
- "--verbosity", "warning", "--no-print-statistics",
- "--archive-dir", backup_cache_dir,
- "--exclude", backup_root,
- "--volsize", "250",
- "--gpg-options", "--cipher-algo=AES256",
- env["STORAGE_ROOT"],
- config["target"],
- "--allow-source-mismatch"
- ] + rsync_ssh_options(port = config["target_rsync_port"]),
- get_env(env))
+ "/usr/bin/duplicity", "full" if full_backup else "incr",
+ "--verbosity", "warning", "--no-print-statistics", "--archive-dir",
+ backup_cache_dir, "--exclude", backup_root, "--volsize", "250",
+ "--gpg-options", "--cipher-algo=AES256", env["STORAGE_ROOT"],
+ config["target"], "--allow-source-mismatch"
+ ] + rsync_ssh_options(port=config["target_rsync_port"]), get_env(env))
finally:
# Start services again.
service_command("dovecot", "start", quit=False)
@@ -305,15 +349,10 @@ def perform_backup(full_backup, user_initiated=False):
# Remove old backups. This deletes all backup data no longer needed
# from more than 3 days ago.
shell('check_call', [
- "/usr/bin/duplicity",
- "remove-older-than",
- "%dD" % config["min_age_in_days"],
- "--verbosity", "error",
- "--archive-dir", backup_cache_dir,
- "--force",
- config["target"]
- ] + rsync_ssh_options(port = config["target_rsync_port"]),
- get_env(env))
+ "/usr/bin/duplicity", "remove-older-than",
+ "%dD" % config["min_age_in_days"], "--verbosity", "error",
+ "--archive-dir", backup_cache_dir, "--force", config["target"]
+ ] + rsync_ssh_options(port=config["target_rsync_port"]), get_env(env))
# From duplicity's manual:
# "This should only be necessary after a duplicity session fails or is
@@ -321,19 +360,15 @@ def perform_backup(full_backup, user_initiated=False):
# That may be unlikely here but we may as well ensure we tidy up if
# that does happen - it might just have been a poorly timed reboot.
shell('check_call', [
- "/usr/bin/duplicity",
- "cleanup",
- "--verbosity", "error",
- "--archive-dir", backup_cache_dir,
- "--force",
- config["target"]
- ] + rsync_ssh_options(port = config["target_rsync_port"]),
- get_env(env))
+ "/usr/bin/duplicity", "cleanup", "--verbosity", "error",
+ "--archive-dir", backup_cache_dir, "--force", config["target"]
+ ] + rsync_ssh_options(port=config["target_rsync_port"]), get_env(env))
# Change ownership of backups to the user-data user, so that the after-bcakup
# script can access them.
if get_target_type(config) == 'file':
- shell('check_call', ["/bin/chown", "-R", env["STORAGE_USER"], backup_dir])
+ shell('check_call',
+ ["/bin/chown", "-R", env["STORAGE_USER"], backup_dir])
# Execute a post-backup script that does the copying to a remote server.
# Run as the STORAGE_USER user, not as root. Pass our settings in
@@ -356,6 +391,7 @@ def perform_backup(full_backup, user_initiated=False):
wait_for_service(25, True, env, 10)
wait_for_service(993, True, env, 10)
+
def run_duplicity_verification():
env = load_environment()
backup_root = os.path.join(env["STORAGE_ROOT"], 'backup')
@@ -364,14 +400,18 @@ def run_duplicity_verification():
shell('check_call', [
"/usr/bin/duplicity",
- "--verbosity", "info",
+ "--verbosity",
+ "info",
"verify",
"--compare-data",
- "--archive-dir", backup_cache_dir,
- "--exclude", backup_root,
+ "--archive-dir",
+ backup_cache_dir,
+ "--exclude",
+ backup_root,
config["target"],
env["STORAGE_ROOT"],
- ] + rsync_ssh_options(port = config["target_rsync_port"]), get_env(env))
+ ] + rsync_ssh_options(port=config["target_rsync_port"]), get_env(env))
+
def run_duplicity_restore(args):
env = load_environment()
@@ -380,10 +420,12 @@ def run_duplicity_restore(args):
shell('check_call', [
"/usr/bin/duplicity",
"restore",
- "--archive-dir", backup_cache_dir,
+ "--archive-dir",
+ backup_cache_dir,
config["target"],
- ] + rsync_ssh_options(port = config["target_rsync_port"]) + args,
- get_env(env))
+ ] + rsync_ssh_options(port=config["target_rsync_port"]) + args,
+ get_env(env))
+
def list_target_files(config):
import urllib.parse
@@ -393,7 +435,8 @@ def list_target_files(config):
return "invalid target"
if target.scheme == "file":
- return [(fn, os.path.getsize(os.path.join(target.path, fn))) for fn in os.listdir(target.path)]
+ return [(fn, os.path.getsize(os.path.join(target.path, fn)))
+ for fn in os.listdir(target.path)]
elif target.scheme == "rsync":
rsync_fn_size_re = re.compile(r'.* ([^ ]*) [^ ]* [^ ]* (.*)')
@@ -405,23 +448,24 @@ def list_target_files(config):
if target_path.startswith('/'):
target_path = target_path[1:]
- rsync_command = [ 'rsync',
- '-e',
- rsync_ssh_options(config["target_rsync_port"], direct = True),
- '--list-only',
- '-r',
- rsync_target.format(
- host=target.netloc,
- path=target_path)
- ]
+ rsync_command = [
+ 'rsync', '-e',
+ rsync_ssh_options(config["target_rsync_port"], direct=True),
+ '--list-only', '-r',
+ rsync_target.format(host=target.netloc, path=target_path)
+ ]
- code, listing = shell('check_output', rsync_command, trap=True, capture_stderr=True)
+ code, listing = shell('check_output',
+ rsync_command,
+ trap=True,
+ capture_stderr=True)
if code == 0:
ret = []
for l in listing.split('\n'):
match = rsync_fn_size_re.match(l)
if match:
- ret.append( (match.groups()[1], int(match.groups()[0].replace(',',''))) )
+ ret.append((match.groups()[1],
+ int(match.groups()[0].replace(',', ''))))
return ret
else:
if 'Permission denied (publickey).' in listing:
@@ -429,18 +473,21 @@ def list_target_files(config):
elif 'No such file or directory' in listing:
reason = "Provided path {} is invalid.".format(target_path)
elif 'Network is unreachable' in listing:
- reason = "The IP address {} is unreachable.".format(target.hostname)
+ reason = "The IP address {} is unreachable.".format(
+ target.hostname)
elif 'Could not resolve hostname' in listing:
- reason = "The hostname {} cannot be resolved.".format(target.hostname)
+ reason = "The hostname {} cannot be resolved.".format(
+ target.hostname)
else:
reason = "Unknown error. " \
- "Please check running 'management/backup.py --verify' " \
- "from mailinabox sources to debug the issue."
- raise ValueError("Connection to rsync host failed: {}".format(reason))
+ "Please check running 'management/backup.py --verify' " \
+ "from mailinabox sources to debug the issue."
+ raise ValueError(
+ "Connection to rsync host failed: {}".format(reason))
elif target.scheme == "s3":
# match to a Region
- fix_boto() # must call prior to importing boto
+ fix_boto() # must call prior to importing boto
import boto.s3
from boto.exception import BotoServerError
custom_region = False
@@ -457,7 +504,9 @@ def list_target_files(config):
# Create a custom region with custom endpoint
if custom_region:
from boto.s3.connection import S3Connection
- region = boto.s3.S3RegionInfo(name=bucket, endpoint=target.hostname, connection_cls=S3Connection)
+ region = boto.s3.S3RegionInfo(name=bucket,
+ endpoint=target.hostname,
+ connection_cls=S3Connection)
# If no prefix is specified, set the path to '', otherwise boto won't list the files
if path == '/':
@@ -468,7 +517,8 @@ def list_target_files(config):
# connect to the region & bucket
try:
- conn = region.connect(aws_access_key_id=config["target_user"], aws_secret_access_key=config["target_pass"])
+ conn = region.connect(aws_access_key_id=config["target_user"],
+ aws_secret_access_key=config["target_pass"])
bucket = conn.get_bucket(bucket)
except BotoServerError as e:
if e.status == 403:
@@ -479,7 +529,8 @@ def list_target_files(config):
raise ValueError("Incorrect region for this bucket.")
raise ValueError(e.reason)
- return [(key.name[len(path):], key.size) for key in bucket.list(prefix=path)]
+ return [(key.name[len(path):], key.size)
+ for key in bucket.list(prefix=path)]
elif target.scheme == 'b2':
InMemoryAccountInfo = None
B2Api = None
@@ -497,30 +548,35 @@ def list_target_files(config):
info = InMemoryAccountInfo()
b2_api = B2Api(info)
-
+
# Extract information from target
b2_application_keyid = target.netloc[:target.netloc.index(':')]
- b2_application_key = target.netloc[target.netloc.index(':')+1:target.netloc.index('@')]
- b2_bucket = target.netloc[target.netloc.index('@')+1:]
+ b2_application_key = target.netloc[target.netloc.index(':') +
+ 1:target.netloc.index('@')]
+ b2_bucket = target.netloc[target.netloc.index('@') + 1:]
try:
- b2_api.authorize_account("production", b2_application_keyid, b2_application_key)
+ b2_api.authorize_account("production", b2_application_keyid,
+ b2_application_key)
bucket = b2_api.get_bucket_by_name(b2_bucket)
except NonExistentBucket as e:
- raise ValueError("B2 Bucket does not exist. Please double check your information!")
+ raise ValueError(
+ "B2 Bucket does not exist. Please double check your information!"
+ )
return [(key.file_name, key.size) for key, _ in bucket.ls()]
else:
raise ValueError(config["target"])
-def backup_set_custom(env, target, target_user, target_pass, target_rsync_port, min_age):
+def backup_set_custom(env, target, target_user, target_pass, target_rsync_port,
+ min_age):
config = get_backup_config(env, for_save=True)
# min_age must be an int
if isinstance(min_age, str):
min_age = int(min_age)
-
+
if isinstance(target_rsync_port, str):
try:
target_rsync_port = int(target_rsync_port)
@@ -546,20 +602,19 @@ def backup_set_custom(env, target, target_user, target_pass, target_rsync_port,
return "OK"
+
def get_backup_config(env, for_save=False, for_ui=False):
backup_root = os.path.join(env["STORAGE_ROOT"], 'backup')
# Defaults.
- config = {
- "min_age_in_days": 3,
- "target": "local",
- "target_rsync_port": 22
- }
+ config = {"min_age_in_days": 3, "target": "local", "target_rsync_port": 22}
# Merge in anything written to custom.yaml.
try:
- custom_config = rtyaml.load(open(os.path.join(backup_root, 'custom.yaml')))
- if not isinstance(custom_config, dict): raise ValueError() # caught below
+ custom_config = rtyaml.load(
+ open(os.path.join(backup_root, 'custom.yaml')))
+ if not isinstance(custom_config, dict):
+ raise ValueError() # caught below
config.update(custom_config)
except:
pass
@@ -587,11 +642,13 @@ def get_backup_config(env, for_save=False, for_ui=False):
return config
+
def write_backup_config(env, newconfig):
backup_root = os.path.join(env["STORAGE_ROOT"], 'backup')
with open(os.path.join(backup_root, 'custom.yaml'), "w") as f:
f.write(rtyaml.dump(newconfig))
+
if __name__ == "__main__":
import sys
if sys.argv[-1] == "--verify":
@@ -601,7 +658,8 @@ if __name__ == "__main__":
elif sys.argv[-1] == "--list":
# List the saved backup files.
- for fn, size in list_target_files(get_backup_config(load_environment())):
+ for fn, size in list_target_files(get_backup_config(
+ load_environment())):
print("{}\t{}".format(fn, size))
elif sys.argv[-1] == "--status":
diff --git a/management/cli.py b/management/cli.py
index 9d4e89f..f28af09 100755
--- a/management/cli.py
+++ b/management/cli.py
@@ -6,7 +6,14 @@
# root API key. This file is readable only by root, so this
# tool can only be used as root.
-import sys, getpass, urllib.request, urllib.error, json, re, csv
+import sys
+import getpass
+import urllib.request
+import urllib.error
+import json
+import re
+import csv
+
def mgmt(cmd, data=None, is_json=False):
# The base URL for the management daemon. (Listens on IPv4 only.)
@@ -14,7 +21,9 @@ def mgmt(cmd, data=None, is_json=False):
setup_key_auth(mgmt_uri)
- req = urllib.request.Request(mgmt_uri + cmd, urllib.parse.urlencode(data).encode("utf8") if data else None)
+ req = urllib.request.Request(
+ mgmt_uri + cmd,
+ urllib.parse.urlencode(data).encode("utf8") if data else None)
try:
response = urllib.request.urlopen(req)
except urllib.error.HTTPError as e:
@@ -23,70 +32,74 @@ def mgmt(cmd, data=None, is_json=False):
print(e.read().decode("utf8"))
except:
pass
- print("The management daemon refused access. The API key file may be out of sync. Try 'service mailinabox restart'.", file=sys.stderr)
+ print(
+ "The management daemon refused access. The API key file may be out of sync. Try 'service mailinabox restart'.",
+ file=sys.stderr)
elif hasattr(e, 'read'):
print(e.read().decode('utf8'), file=sys.stderr)
else:
print(e, file=sys.stderr)
sys.exit(1)
resp = response.read().decode('utf8')
- if is_json: resp = json.loads(resp)
+ if is_json:
+ resp = json.loads(resp)
return resp
+
def read_password():
- while True:
- first = getpass.getpass('password: ')
- if len(first) < 8:
- print("Passwords must be at least eight characters.")
- continue
- second = getpass.getpass(' (again): ')
- if first != second:
- print("Passwords not the same. Try again.")
- continue
- break
- return first
+ while True:
+ first = getpass.getpass('password: ')
+ if len(first) < 8:
+ print("Passwords must be at least eight characters.")
+ continue
+ second = getpass.getpass(' (again): ')
+ if first != second:
+ print("Passwords not the same. Try again.")
+ continue
+ break
+ return first
+
def setup_key_auth(mgmt_uri):
key = open('/var/lib/mailinabox/api.key').read().strip()
auth_handler = urllib.request.HTTPBasicAuthHandler()
- auth_handler.add_password(
- realm='Mail-in-a-Box Management Server',
- uri=mgmt_uri,
- user=key,
- passwd='')
+ auth_handler.add_password(realm='Mail-in-a-Box Management Server',
+ uri=mgmt_uri,
+ user=key,
+ passwd='')
opener = urllib.request.build_opener(auth_handler)
urllib.request.install_opener(opener)
+
if len(sys.argv) < 2:
print("""Usage:
- {cli} system default-quota [new default] (set default quota for system)
- {cli} user (lists users)
- {cli} user add user@domain.com [password]
- {cli} user password user@domain.com [password]
- {cli} user remove user@domain.com
- {cli} user make-admin user@domain.com
- {cli} user quota user@domain [new-quota]
- {cli} user remove-admin user@domain.com
- {cli} user admins (lists admins)
- {cli} user mfa show user@domain.com (shows MFA devices for user, if any)
- {cli} user mfa disable user@domain.com [id] (disables MFA for user)
- {cli} alias (lists aliases)
- {cli} alias add incoming.name@domain.com sent.to@other.domain.com
- {cli} alias add incoming.name@domain.com 'sent.to@other.domain.com, multiple.people@other.domain.com'
- {cli} alias remove incoming.name@domain.com
+{cli} system default-quota [new default] (set default quota for system)
+{cli} user (lists users)
+{cli} user add user@domain.com [password]
+{cli} user password user@domain.com [password]
+{cli} user remove user@domain.com
+{cli} user make-admin user@domain.com
+{cli} user quota user@domain [new-quota]
+{cli} user remove-admin user@domain.com
+{cli} user admins (lists admins)
+{cli} user mfa show user@domain.com (shows MFA devices for user, if any)
+{cli} user mfa disable user@domain.com [id] (disables MFA for user)
+{cli} alias (lists aliases)
+{cli} alias add incoming.name@domain.com sent.to@other.domain.com
+{cli} alias add incoming.name@domain.com 'sent.to@other.domain.com, multiple.people@other.domain.com'
+{cli} alias remove incoming.name@domain.com
Removing a mail user does not delete their mail folders on disk. It only prevents IMAP/SMTP login.
-""".format(
- cli="management/cli.py"
- ))
+""".format(cli="management/cli.py"))
elif sys.argv[1] == "user" and len(sys.argv) == 2:
# Dump a list of users, one per line. Mark admins with an asterisk.
users = mgmt("/mail/users?format=json", is_json=True)
for domain in users:
for user in domain["users"]:
- if user['status'] == 'inactive': continue
+ if user['status'] == 'inactive':
+ continue
print(user['email'], end='')
if "admin" in user['privileges']:
print("*", end='')
@@ -107,19 +120,25 @@ elif sys.argv[1] == "user" and sys.argv[2] in ("add", "password"):
email, pw = sys.argv[3:5]
if sys.argv[2] == "add":
- print(mgmt("/mail/users/add", { "email": email, "password": pw }))
+ print(mgmt("/mail/users/add", {"email": email, "password": pw}))
elif sys.argv[2] == "password":
- print(mgmt("/mail/users/password", { "email": email, "password": pw }))
+ print(mgmt("/mail/users/password", {"email": email, "password": pw}))
elif sys.argv[1] == "user" and sys.argv[2] == "remove" and len(sys.argv) == 4:
- print(mgmt("/mail/users/remove", { "email": sys.argv[3] }))
+ print(mgmt("/mail/users/remove", {"email": sys.argv[3]}))
-elif sys.argv[1] == "user" and sys.argv[2] in ("make-admin", "remove-admin") and len(sys.argv) == 4:
+elif sys.argv[1] == "user" and sys.argv[2] in ("make-admin",
+ "remove-admin") and len(
+ sys.argv) == 4:
if sys.argv[2] == "make-admin":
action = "add"
else:
action = "remove"
- print(mgmt("/mail/users/privileges/" + action, { "email": sys.argv[3], "privilege": "admin" }))
+ print(
+ mgmt("/mail/users/privileges/" + action, {
+ "email": sys.argv[3],
+ "privilege": "admin"
+ }))
elif sys.argv[1] == "user" and sys.argv[2] == "admins":
# Dump a list of admin users.
@@ -135,36 +154,51 @@ elif sys.argv[1] == "user" and sys.argv[2] == "quota" and len(sys.argv) == 4:
elif sys.argv[1] == "user" and sys.argv[2] == "quota" and len(sys.argv) == 5:
# Set a user's quota
- users = mgmt("/mail/users/quota", { "email": sys.argv[3], "quota": sys.argv[4] })
+ users = mgmt("/mail/users/quota", {
+ "email": sys.argv[3],
+ "quota": sys.argv[4]
+ })
-elif sys.argv[1] == "user" and len(sys.argv) == 5 and sys.argv[2:4] == ["mfa", "show"]:
+elif sys.argv[1] == "user" and len(
+ sys.argv) == 5 and sys.argv[2:4] == ["mfa", "show"]:
# Show MFA status for a user.
- status = mgmt("/mfa/status", { "user": sys.argv[4] }, is_json=True)
+ status = mgmt("/mfa/status", {"user": sys.argv[4]}, is_json=True)
W = csv.writer(sys.stdout)
W.writerow(["id", "type", "label"])
for mfa in status["enabled_mfa"]:
W.writerow([mfa["id"], mfa["type"], mfa["label"]])
-elif sys.argv[1] == "user" and len(sys.argv) in (5, 6) and sys.argv[2:4] == ["mfa", "disable"]:
+elif sys.argv[1] == "user" and len(
+ sys.argv) in (5, 6) and sys.argv[2:4] == ["mfa", "disable"]:
# Disable MFA (all or a particular device) for a user.
- print(mgmt("/mfa/disable", { "user": sys.argv[4], "mfa-id": sys.argv[5] if len(sys.argv) == 6 else None }))
+ print(
+ mgmt(
+ "/mfa/disable", {
+ "user": sys.argv[4],
+ "mfa-id": sys.argv[5] if len(sys.argv) == 6 else None
+ }))
elif sys.argv[1] == "alias" and len(sys.argv) == 2:
print(mgmt("/mail/aliases"))
elif sys.argv[1] == "alias" and sys.argv[2] == "add" and len(sys.argv) == 5:
- print(mgmt("/mail/aliases/add", { "address": sys.argv[3], "forwards_to": sys.argv[4] }))
+ print(
+ mgmt("/mail/aliases/add", {
+ "address": sys.argv[3],
+ "forwards_to": sys.argv[4]
+ }))
elif sys.argv[1] == "alias" and sys.argv[2] == "remove" and len(sys.argv) == 4:
- print(mgmt("/mail/aliases/remove", { "address": sys.argv[3] }))
+ print(mgmt("/mail/aliases/remove", {"address": sys.argv[3]}))
-elif sys.argv[1] == "system" and sys.argv[2] == "default-quota" and len(sys.argv) == 3:
+elif sys.argv[1] == "system" and sys.argv[2] == "default-quota" and len(
+ sys.argv) == 3:
print(mgmt("/system/default-quota?text=1"))
-elif sys.argv[1] == "system" and sys.argv[2] == "default-quota" and len(sys.argv) == 4:
- print(mgmt("/system/default-quota", { "default_quota": sys.argv[3]}))
+elif sys.argv[1] == "system" and sys.argv[2] == "default-quota" and len(
+ sys.argv) == 4:
+ print(mgmt("/system/default-quota", {"default_quota": sys.argv[3]}))
else:
print("Invalid command-line arguments.")
sys.exit(1)
-
diff --git a/management/daemon.py b/management/daemon.py
index a5c9427..1786c8e 100755
--- a/management/daemon.py
+++ b/management/daemon.py
@@ -10,14 +10,20 @@
# DEBUG=1 management/daemon.py
# service mailinabox start # when done debugging, start it up again
-import os, os.path, re, json, time
-import multiprocessing.pool, subprocess
+import os
+import os.path
+import re
+import json
+import time
+import multiprocessing.pool
+import subprocess
from functools import wraps
from flask import Flask, request, render_template, abort, Response, send_from_directory, make_response
-import auth, utils
+import auth
+import utils
from mailconfig import get_mail_users, get_mail_users_ex, get_admins, add_mail_user, set_mail_password, remove_mail_user
from mailconfig import get_mail_user_privileges, add_remove_mail_user_privilege, open_database
from mailconfig import get_mail_aliases, get_mail_aliases_ex, get_mail_domains, add_mail_alias, remove_mail_alias
@@ -39,14 +45,20 @@ except OSError:
csr_country_codes = []
with open(os.path.join(os.path.dirname(me), "csr_country_codes.tsv")) as f:
for line in f:
- if line.strip() == "" or line.startswith("#"): continue
+ if line.strip() == "" or line.startswith("#"):
+ continue
code, name = line.strip().split("\t")[0:2]
csr_country_codes.append((code, name))
-app = Flask(__name__, template_folder=os.path.abspath(os.path.join(os.path.dirname(me), "templates")))
+app = Flask(__name__,
+ template_folder=os.path.abspath(
+ os.path.join(os.path.dirname(me), "templates")))
# Decorator to protect views that require a user with 'admin' privileges.
+
+
def authorized_personnel_only(viewfunc):
+
@wraps(viewfunc)
def newview(*args, **kwargs):
# Authenticate the passed credentials, which is either the API key or a username:password pair
@@ -81,7 +93,8 @@ def authorized_personnel_only(viewfunc):
# Not authorized. Return a 401 (send auth) and a prompt to authorize by default.
status = 401
headers = {
- 'WWW-Authenticate': 'Basic realm="{0}"'.format(auth_service.auth_realm),
+ 'WWW-Authenticate':
+ 'Basic realm="{0}"'.format(auth_service.auth_realm),
'X-Reason': error,
}
@@ -93,27 +106,39 @@ def authorized_personnel_only(viewfunc):
if request.headers.get('Accept') in (None, "", "*/*"):
# Return plain text output.
- return Response(error+"\n", status=status, mimetype='text/plain', headers=headers)
+ return Response(error + "\n",
+ status=status,
+ mimetype='text/plain',
+ headers=headers)
else:
# Return JSON output.
return Response(json.dumps({
"status": "error",
"reason": error,
- })+"\n", status=status, mimetype='application/json', headers=headers)
+ }) + "\n",
+ status=status,
+ mimetype='application/json',
+ headers=headers)
return newview
+
@app.errorhandler(401)
def unauthorized(error):
return auth_service.make_unauthorized_response()
+
def json_response(data, status=200):
- return Response(json.dumps(data, indent=2, sort_keys=True)+'\n', status=status, mimetype='application/json')
+ return Response(json.dumps(data, indent=2, sort_keys=True) + '\n',
+ status=status,
+ mimetype='application/json')
+
###################################
# Control Panel (unauthenticated views)
+
@app.route('/')
def index():
# Render the control panel. This route does not require user authentication
@@ -122,22 +147,24 @@ def index():
no_users_exist = (len(get_mail_users(env)) == 0)
no_admins_exist = (len(get_admins(env)) == 0)
- utils.fix_boto() # must call prior to importing boto
+ utils.fix_boto() # must call prior to importing boto
import boto.s3
backup_s3_hosts = [(r.name, r.endpoint) for r in boto.s3.regions()]
- return render_template('index.html',
+ return render_template(
+ 'index.html',
hostname=env['PRIMARY_HOSTNAME'],
storage_root=env['STORAGE_ROOT'],
-
no_users_exist=no_users_exist,
no_admins_exist=no_admins_exist,
-
backup_s3_hosts=backup_s3_hosts,
csr_country_codes=csr_country_codes,
)
+
# Create a session key by checking the username/password in the Authorization header.
+
+
@app.route('/login', methods=["POST"])
def login():
# Is the caller authorized?
@@ -170,6 +197,7 @@ def login():
# Return.
return json_response(resp)
+
@app.route('/logout', methods=["POST"])
def logout():
try:
@@ -178,27 +206,33 @@ def logout():
except ValueError as e:
pass
finally:
- return json_response({ "status": "ok" })
+ return json_response({"status": "ok"})
+
# MAIL
+
@app.route('/mail/users')
@authorized_personnel_only
def mail_users():
if request.args.get("format", "") == "json":
return json_response(get_mail_users_ex(env, with_archived=True))
else:
- return "".join(x+"\n" for x in get_mail_users(env))
+ return "".join(x + "\n" for x in get_mail_users(env))
+
@app.route('/mail/users/add', methods=['POST'])
@authorized_personnel_only
def mail_users_add():
quota = request.form.get('quota', get_default_quota(env))
try:
- return add_mail_user(request.form.get('email', ''), request.form.get('password', ''), request.form.get('privileges', ''), quota, env)
+ return add_mail_user(request.form.get('email', ''),
+ request.form.get('password', ''),
+ request.form.get('privileges', ''), quota, env)
except ValueError as e:
return (str(e), 400)
+
@app.route('/mail/users/quota', methods=['GET'])
@authorized_personnel_only
def get_mail_users_quota():
@@ -208,27 +242,29 @@ def get_mail_users_quota():
if request.values.get('text'):
return quota
- return json_response({
- "email": email,
- "quota": quota
- })
+ return json_response({"email": email, "quota": quota})
+
@app.route('/mail/users/quota', methods=['POST'])
@authorized_personnel_only
def mail_users_quota():
try:
- return set_mail_quota(request.form.get('email', ''), request.form.get('quota'), env)
+ return set_mail_quota(request.form.get('email', ''),
+ request.form.get('quota'), env)
except ValueError as e:
return (str(e), 400)
+
@app.route('/mail/users/password', methods=['POST'])
@authorized_personnel_only
def mail_users_password():
try:
- return set_mail_password(request.form.get('email', ''), request.form.get('password', ''), env)
+ return set_mail_password(request.form.get('email', ''),
+ request.form.get('password', ''), env)
except ValueError as e:
return (str(e), 400)
+
@app.route('/mail/users/remove', methods=['POST'])
@authorized_personnel_only
def mail_users_remove():
@@ -239,18 +275,25 @@ def mail_users_remove():
@authorized_personnel_only
def mail_user_privs():
privs = get_mail_user_privileges(request.args.get('email', ''), env)
- if isinstance(privs, tuple): return privs # error
+ if isinstance(privs, tuple):
+ return privs # error
return "\n".join(privs)
+
@app.route('/mail/users/privileges/add', methods=['POST'])
@authorized_personnel_only
def mail_user_privs_add():
- return add_remove_mail_user_privilege(request.form.get('email', ''), request.form.get('privilege', ''), "add", env)
+ return add_remove_mail_user_privilege(request.form.get('email', ''),
+ request.form.get('privilege', ''),
+ "add", env)
+
@app.route('/mail/users/privileges/remove', methods=['POST'])
@authorized_personnel_only
def mail_user_privs_remove():
- return add_remove_mail_user_privilege(request.form.get('email', ''), request.form.get('privilege', ''), "remove", env)
+ return add_remove_mail_user_privilege(request.form.get('email', ''),
+ request.form.get('privilege', ''),
+ "remove", env)
@app.route('/mail/aliases')
@@ -259,37 +302,44 @@ def mail_aliases():
if request.args.get("format", "") == "json":
return json_response(get_mail_aliases_ex(env))
else:
- return "".join(address+"\t"+receivers+"\t"+(senders or "")+"\n" for address, receivers, senders, auto in get_mail_aliases(env))
+ return "".join(
+ address + "\t" + receivers + "\t" + (senders or "") + "\n"
+ for address, receivers, senders, auto in get_mail_aliases(env))
+
@app.route('/mail/aliases/add', methods=['POST'])
@authorized_personnel_only
def mail_aliases_add():
- return add_mail_alias(
- request.form.get('address', ''),
- request.form.get('forwards_to', ''),
- request.form.get('permitted_senders', ''),
- env,
- update_if_exists=(request.form.get('update_if_exists', '') == '1')
- )
+ return add_mail_alias(request.form.get('address', ''),
+ request.form.get('forwards_to', ''),
+ request.form.get('permitted_senders', ''),
+ env,
+ update_if_exists=(request.form.get(
+ 'update_if_exists', '') == '1'))
+
@app.route('/mail/aliases/remove', methods=['POST'])
@authorized_personnel_only
def mail_aliases_remove():
return remove_mail_alias(request.form.get('address', ''), env)
+
@app.route('/mail/domains')
@authorized_personnel_only
def mail_domains():
- return "".join(x+"\n" for x in get_mail_domains(env))
+ return "".join(x + "\n" for x in get_mail_domains(env))
+
# DNS
+
@app.route('/dns/zones')
@authorized_personnel_only
def dns_zones():
from dns_update import get_dns_zones
return json_response([z[0] for z in get_dns_zones(env)])
+
@app.route('/dns/update', methods=['POST'])
@authorized_personnel_only
def dns_update():
@@ -299,21 +349,31 @@ def dns_update():
except Exception as e:
return (str(e), 500)
+
@app.route('/dns/secondary-nameserver')
@authorized_personnel_only
def dns_get_secondary_nameserver():
from dns_update import get_custom_dns_config, get_secondary_dns
- return json_response({ "hostnames": get_secondary_dns(get_custom_dns_config(env), mode=None) })
+ return json_response({
+ "hostnames":
+ get_secondary_dns(get_custom_dns_config(env), mode=None)
+ })
+
@app.route('/dns/secondary-nameserver', methods=['POST'])
@authorized_personnel_only
def dns_set_secondary_nameserver():
from dns_update import set_secondary_dns
try:
- return set_secondary_dns([ns.strip() for ns in re.split(r"[, ]+", request.form.get('hostnames') or "") if ns.strip() != ""], env)
+ return set_secondary_dns([
+ ns.strip() for ns in re.split(r"[, ]+",
+ request.form.get('hostnames') or "")
+ if ns.strip() != ""
+ ], env)
except ValueError as e:
return (str(e), 400)
+
@app.route('/dns/custom')
@authorized_personnel_only
def dns_get_records(qname=None, rtype=None):
@@ -322,20 +382,19 @@ def dns_get_records(qname=None, rtype=None):
records = get_custom_dns_config(env, only_real_records=True)
# Filter per the arguments for the more complex GET routes below.
- records = [r for r in records
- if (not qname or r[0] == qname)
- and (not rtype or r[1] == rtype) ]
+ records = [
+ r for r in records
+ if (not qname or r[0] == qname) and (not rtype or r[1] == rtype)
+ ]
# Make a better data structure.
- records = [
- {
- "qname": r[0],
- "rtype": r[1],
- "value": r[2],
- "ttl": r[3],
- "sort-order": { },
- }
- for r in records ]
+ records = [{
+ "qname": r[0],
+ "rtype": r[1],
+ "value": r[2],
+ "ttl": r[3],
+ "sort-order": {},
+ } for r in records]
# To help with grouping by zone in qname sorting, label each record with which zone it is in.
# There's an inconsistency in how we handle zones in get_dns_zones and in sort_domains, so
@@ -355,17 +414,23 @@ def dns_get_records(qname=None, rtype=None):
for i, r in enumerate(records):
r["sort-order"]["created"] = i
domain_sort_order = utils.sort_domains([r["qname"] for r in records], env)
- for i, r in enumerate(sorted(records, key = lambda r : (
- zones.index(r["zone"]) if r.get("zone") else 0, # record is not within a zone managed by the box
- domain_sort_order.index(r["qname"]),
- r["rtype"]))):
+ for i, r in enumerate(
+ sorted(
+ records,
+ key=lambda r: (
+ # record is not within a zone managed by the box
+ zones.index(r["zone"]) if r.get("zone") else 0,
+ domain_sort_order.index(r["qname"]),
+ r["rtype"]))):
r["sort-order"]["qname"] = i
# Return.
return json_response(records)
+
@app.route('/dns/custom/', methods=['GET', 'POST', 'PUT', 'DELETE'])
-@app.route('/dns/custom//', methods=['GET', 'POST', 'PUT', 'DELETE'])
+@app.route('/dns/custom//',
+ methods=['GET', 'POST', 'PUT', 'DELETE'])
@authorized_personnel_only
def dns_set_record(qname, rtype="A"):
from dns_update import do_dns_update, set_custom_dns_record
@@ -398,7 +463,8 @@ def dns_set_record(qname, rtype="A"):
elif request.method in ("POST", "PUT"):
# There is a default value for A/AAAA records.
if rtype in ("A", "AAAA") and value == "":
- value = request.environ.get("HTTP_X_FORWARDED_FOR") # normally REMOTE_ADDR but we're behind nginx as a reverse proxy
+ # normally REMOTE_ADDR but we're behind nginx as a reverse proxy
+ value = request.environ.get("HTTP_X_FORWARDED_FOR")
# Cannot add empty records.
if value == '':
@@ -423,27 +489,33 @@ def dns_set_record(qname, rtype="A"):
pass
action = "remove"
- if set_custom_dns_record(qname, rtype, value, action, env, ttl = ttl):
+ if set_custom_dns_record(qname, rtype, value, action, env, ttl=ttl):
return do_dns_update(env) or "Something isn't right."
return "OK"
except ValueError as e:
return (str(e), 400)
+
@app.route('/dns/dump')
@authorized_personnel_only
def dns_get_dump():
from dns_update import build_recommended_dns
return json_response(build_recommended_dns(env))
+
@app.route('/dns/zonefile/')
@authorized_personnel_only
def dns_get_zonefile(zone):
from dns_update import get_dns_zonefile
- return Response(get_dns_zonefile(zone, env), status=200, mimetype='text/plain')
+ return Response(get_dns_zonefile(zone, env),
+ status=200,
+ mimetype='text/plain')
+
# SSL
+
@app.route('/ssl/status')
@authorized_personnel_only
def ssl_get_status():
@@ -451,23 +523,31 @@ def ssl_get_status():
from web_update import get_web_domains_info, get_web_domains
# What domains can we provision certificates for? What unexpected problems do we have?
- provision, cant_provision = get_certificates_to_provision(env, show_valid_certs=False)
+ provision, cant_provision = get_certificates_to_provision(
+ env, show_valid_certs=False)
# What's the current status of TLS certificates on all of the domain?
domains_status = get_web_domains_info(env)
- domains_status = [
- {
- "domain": d["domain"],
- "status": d["ssl_certificate"][0],
- "text": d["ssl_certificate"][1] + ((" " + cant_provision[d["domain"]] if d["domain"] in cant_provision else ""))
- } for d in domains_status ]
+ domains_status = [{
+ "domain":
+ d["domain"],
+ "status":
+ d["ssl_certificate"][0],
+ "text":
+ d["ssl_certificate"][1] + ((" " + cant_provision[d["domain"]]
+ if d["domain"] in cant_provision else ""))
+ } for d in domains_status]
# Warn the user about domain names not hosted here because of other settings.
- for domain in set(get_web_domains(env, exclude_dns_elsewhere=False)) - set(get_web_domains(env)):
+ for domain in set(get_web_domains(env, exclude_dns_elsewhere=False)) - set(
+ get_web_domains(env)):
domains_status.append({
- "domain": domain,
- "status": "not-applicable",
- "text": "The domain's website is hosted elsewhere.",
+ "domain":
+ domain,
+ "status":
+ "not-applicable",
+ "text":
+ "The domain's website is hosted elsewhere.",
})
return json_response({
@@ -475,12 +555,16 @@ def ssl_get_status():
"status": domains_status,
})
+
@app.route('/ssl/csr/', methods=['POST'])
@authorized_personnel_only
def ssl_get_csr(domain):
from ssl_certificates import create_csr
- ssl_private_key = os.path.join(os.path.join(env["STORAGE_ROOT"], 'ssl', 'ssl_private_key.pem'))
- return create_csr(domain, ssl_private_key, request.form.get('countrycode', ''), env)
+ ssl_private_key = os.path.join(
+ os.path.join(env["STORAGE_ROOT"], 'ssl', 'ssl_private_key.pem'))
+ return create_csr(domain, ssl_private_key,
+ request.form.get('countrycode', ''), env)
+
@app.route('/ssl/install', methods=['POST'])
@authorized_personnel_only
@@ -494,15 +578,18 @@ def ssl_install_cert():
return "Invalid domain name."
return install_cert(domain, ssl_cert, ssl_chain, env)
+
@app.route('/ssl/provision', methods=['POST'])
@authorized_personnel_only
def ssl_provision_certs():
from ssl_certificates import provision_certificates
requests = provision_certificates(env, limit_domains=None)
- return json_response({ "requests": requests })
+ return json_response({"requests": requests})
+
# multi-factor auth
+
@app.route('/mfa/status', methods=['POST'])
@authorized_personnel_only
def mfa_get_status():
@@ -510,21 +597,17 @@ def mfa_get_status():
# see the MFA status for any user if they submit a 'user' form
# field. But we don't include provisioning info since a user can
# only provision for themselves.
- email = request.form.get('user', request.user_email) # user field if given, otherwise the user making the request
+ # user field if given, otherwise the user making the request
+ email = request.form.get('user', request.user_email)
try:
- resp = {
- "enabled_mfa": get_public_mfa_state(email, env)
- }
+ resp = {"enabled_mfa": get_public_mfa_state(email, env)}
if email == request.user_email:
- resp.update({
- "new_mfa": {
- "totp": provision_totp(email, env)
- }
- })
+ resp.update({"new_mfa": {"totp": provision_totp(email, env)}})
except ValueError as e:
return (str(e), 400)
return json_response(resp)
+
@app.route('/mfa/totp/enable', methods=['POST'])
@authorized_personnel_only
def totp_post_enable():
@@ -540,30 +623,37 @@ def totp_post_enable():
return (str(e), 400)
return "OK"
+
@app.route('/mfa/disable', methods=['POST'])
@authorized_personnel_only
def totp_post_disable():
# Anyone accessing this route is an admin, and we permit them to
# disable the MFA status for any user if they submit a 'user' form
# field.
- email = request.form.get('user', request.user_email) # user field if given, otherwise the user making the request
+ # user field if given, otherwise the user making the request
+ email = request.form.get('user', request.user_email)
try:
- result = disable_mfa(email, request.form.get('mfa-id') or None, env) # convert empty string to None
+ result = disable_mfa(email,
+ request.form.get('mfa-id') or None,
+ env) # convert empty string to None
except ValueError as e:
return (str(e), 400)
- if result: # success
+ if result: # success
return "OK"
- else: # error
+ else: # error
return ("Invalid user or MFA id.", 400)
+
# WEB
+
@app.route('/web/domains')
@authorized_personnel_only
def web_get_domains():
from web_update import get_web_domains_info
return json_response(get_web_domains_info(env))
+
@app.route('/web/update', methods=['POST'])
@authorized_personnel_only
def web_update():
@@ -573,8 +663,10 @@ def web_update():
except Exception as e:
return (str(e), 500)
+
# System
+
@app.route('/system/version', methods=["GET"])
@authorized_personnel_only
def system_version():
@@ -584,6 +676,7 @@ def system_version():
except Exception as e:
return (str(e), 500)
+
@app.route('/system/latest-upstream-version', methods=["POST"])
@authorized_personnel_only
def system_latest_upstream_version():
@@ -593,47 +686,67 @@ def system_latest_upstream_version():
except Exception as e:
return (str(e), 500)
+
@app.route('/system/status', methods=["POST"])
@authorized_personnel_only
def system_status():
from status_checks import run_checks
+
class WebOutput:
+
def __init__(self):
self.items = []
+
def add_heading(self, heading):
- self.items.append({ "type": "heading", "text": heading, "extra": [] })
+ self.items.append({
+ "type": "heading",
+ "text": heading,
+ "extra": []
+ })
+
def print_ok(self, message):
- self.items.append({ "type": "ok", "text": message, "extra": [] })
+ self.items.append({"type": "ok", "text": message, "extra": []})
+
def print_error(self, message):
- self.items.append({ "type": "error", "text": message, "extra": [] })
+ self.items.append({"type": "error", "text": message, "extra": []})
+
def print_warning(self, message):
- self.items.append({ "type": "warning", "text": message, "extra": [] })
+ self.items.append({
+ "type": "warning",
+ "text": message,
+ "extra": []
+ })
+
def print_na(self, message):
- self.items.append({ "type": "na", "text": message, "extra": [] })
+ self.items.append({"type": "na", "text": message, "extra": []})
+
def print_line(self, message, monospace=False):
- self.items[-1]["extra"].append({ "text": message, "monospace": monospace })
+ self.items[-1]["extra"].append({
+ "text": message,
+ "monospace": monospace
+ })
+
output = WebOutput()
# Create a temporary pool of processes for the status checks
with multiprocessing.pool.Pool(processes=5) as pool:
run_checks(False, env, output, pool)
return json_response(output.items)
+
@app.route('/system/updates')
@authorized_personnel_only
def show_updates():
from status_checks import list_apt_updates
- return "".join(
- "%s (%s)\n"
- % (p["package"], p["version"])
- for p in list_apt_updates())
+ return "".join("%s (%s)\n" % (p["package"], p["version"])
+ for p in list_apt_updates())
+
@app.route('/system/update-packages', methods=["POST"])
@authorized_personnel_only
def do_updates():
utils.shell("check_call", ["/usr/bin/apt-get", "-qq", "update"])
- return utils.shell("check_output", ["/usr/bin/apt-get", "-y", "upgrade"], env={
- "DEBIAN_FRONTEND": "noninteractive"
- })
+ return utils.shell("check_output", ["/usr/bin/apt-get", "-y", "upgrade"],
+ env={"DEBIAN_FRONTEND": "noninteractive"})
@app.route('/system/reboot', methods=["GET"])
@@ -645,13 +758,15 @@ def needs_reboot():
else:
return json_response(False)
+
@app.route('/system/reboot', methods=["POST"])
@authorized_personnel_only
def do_reboot():
# To keep the attack surface low, we don't allow a remote reboot if one isn't necessary.
from status_checks import is_reboot_needed_due_to_package_installation
if is_reboot_needed_due_to_package_installation():
- return utils.shell("check_output", ["/sbin/shutdown", "-r", "now"], capture_stderr=True)
+ return utils.shell("check_output", ["/sbin/shutdown", "-r", "now"],
+ capture_stderr=True)
else:
return "No reboot is required, so it is not allowed."
@@ -663,7 +778,8 @@ def backup_status():
try:
return json_response(backup_status(env))
except Exception as e:
- return json_response({ "error": str(e) })
+ return json_response({"error": str(e)})
+
@app.route('/system/backup/config', methods=["GET"])
@authorized_personnel_only
@@ -671,17 +787,18 @@ def backup_get_custom():
from backup import get_backup_config
return json_response(get_backup_config(env, for_ui=True))
+
@app.route('/system/backup/config', methods=["POST"])
@authorized_personnel_only
def backup_set_custom():
from backup import backup_set_custom
- return json_response(backup_set_custom(env,
- request.form.get('target', ''),
- request.form.get('target_user', ''),
- request.form.get('target_pass', ''),
- request.form.get('target_rsync_port', ''),
- request.form.get('min_age', '')
- ))
+ return json_response(
+ backup_set_custom(env, request.form.get('target', ''),
+ request.form.get('target_user', ''),
+ request.form.get('target_pass', ''),
+ request.form.get('target_rsync_port', ''),
+ request.form.get('min_age', '')))
+
@app.route('/system/backup/new', methods=["POST"])
@authorized_personnel_only
@@ -696,12 +813,14 @@ def backup_new():
msg = perform_backup(request.form.get('full', False) == 'true', True)
return "OK" if msg is None else msg
+
@app.route('/system/privacy', methods=["GET"])
@authorized_personnel_only
def privacy_status_get():
config = utils.load_settings(env)
return json_response(config.get("privacy", True))
+
@app.route('/system/privacy', methods=["POST"])
@authorized_personnel_only
def privacy_status_set():
@@ -710,6 +829,7 @@ def privacy_status_set():
utils.write_settings(config, env)
return "OK"
+
@app.route('/system/smtp/relay', methods=["GET"])
@authorized_personnel_only
def smtp_relay_get():
@@ -720,7 +840,8 @@ def smtp_relay_get():
if rr is not None:
if rr.get("p") is None:
raise ValueError("Key doesn't exist!")
- for c, d in (("v", "DKIM1"), ("h", None), ("k", "rsa"), ("n", None), ("s", None), ("t", None)):
+ for c, d in (("v", "DKIM1"), ("h", None), ("k", "rsa"), ("n", None),
+ ("s", None), ("t", None)):
txt = rr.get(c, d)
if txt is None:
continue
@@ -738,12 +859,15 @@ def smtp_relay_get():
"dkim_rr": dkim_rrtxt
}
+
@app.route('/system/smtp/relay', methods=["POST"])
@authorized_personnel_only
def smtp_relay_set():
from editconf import edit_conf
from os import chmod
- import re, socket, ssl
+ import re
+ import socket
+ import ssl
config = utils.load_settings(env)
newconf = request.form
@@ -756,7 +880,9 @@ def smtp_relay_set():
elif re.fullmatch(r"[a-z\d\._]+", sel.strip()) is None:
return ("The DKIM selector is invalid!", 400)
elif sel.strip() == config.get("local_dkim_selector", "mail"):
- return (f"The DKIM selector {sel.strip()} is already in use by the box!", 400)
+ return (
+ f"The DKIM selector {sel.strip()} is already in use by the box!",
+ 400)
else:
# DKIM selector looks good, try processing the RR
rr = newconf.get("dkim_rr", "")
@@ -769,7 +895,7 @@ def smtp_relay_set():
if len(sp) != 2:
return ("DKIM public key RR is malformed!", 400)
components[sp[0]] = sp[1]
-
+
if not components.get("p"):
return ("The DKIM public key doesn't exist!", 400)
@@ -780,22 +906,28 @@ def smtp_relay_set():
implicit_tls = False
if newconf.get("enabled") == "true":
- relay_on = True
+ relay_on = True
# Try negotiating TLS directly. We need to know this because we need to configure Postfix
# to be aware of this detail.
try:
ctx = ssl.create_default_context()
- with socket.create_connection((newconf.get("host"), int(newconf.get("port"))), 5) as sock:
- with ctx.wrap_socket(sock, server_hostname=newconf.get("host")):
+ with socket.create_connection(
+ (newconf.get("host"), int(newconf.get("port"))), 5) as sock:
+ with ctx.wrap_socket(sock,
+ server_hostname=newconf.get("host")):
implicit_tls = True
except ssl.SSLError as sle:
# Couldn't connect via TLS, configure Postfix to send via STARTTLS
print(sle.reason)
except (socket.herror, socket.gaierror) as he:
- return (f"Unable to resolve hostname (it probably is incorrect): {he.strerror}", 400)
+ return (
+ f"Unable to resolve hostname (it probably is incorrect): {he.strerror}",
+ 400)
except socket.timeout:
- return ("We couldn't connect to the server. Is it down or did you write the wrong port number?", 400)
+ return (
+ "We couldn't connect to the server. Is it down or did you write the wrong port number?",
+ 400)
pw_file = "/etc/postfix/sasl_passwd"
modify_password = True
@@ -804,7 +936,9 @@ def smtp_relay_set():
if os.path.isfile(pw_file):
modify_password = False
else:
- return ("Please provide a password/key (there is no existing password to retain).", 400)
+ return (
+ "Please provide a password/key (there is no existing password to retain).",
+ 400)
try:
# Write on daemon settings
@@ -812,43 +946,58 @@ def smtp_relay_set():
config["SMTP_RELAY_HOST"] = newconf.get("host")
config["SMTP_RELAY_PORT"] = int(newconf.get("port"))
config["SMTP_RELAY_USER"] = newconf.get("user")
- config["SMTP_RELAY_AUTHORIZED_SERVERS"] = [s.strip() for s in re.split(r"[, ]+", newconf.get("authorized_servers", []) or "") if s.strip() != ""]
+ config["SMTP_RELAY_AUTHORIZED_SERVERS"] = [
+ s.strip()
+ for s in re.split(r"[, ]+",
+ newconf.get("authorized_servers", []) or "")
+ if s.strip() != ""
+ ]
utils.write_settings(config, env)
# Write on Postfix configs
edit_conf("/etc/postfix/main.cf", [
- "relayhost=" + (f"[{config['SMTP_RELAY_HOST']}]:{config['SMTP_RELAY_PORT']}" if config["SMTP_RELAY_ENABLED"] else ""),
+ "relayhost=" +
+ (f"[{config['SMTP_RELAY_HOST']}]:{config['SMTP_RELAY_PORT']}"
+ if config["SMTP_RELAY_ENABLED"] else ""),
f"smtp_tls_wrappermode={'yes' if implicit_tls else 'no'}"
- ], delimiter_re=r"\s*=\s*", delimiter="=", comment_char="#")
+ ],
+ delimiter_re=r"\s*=\s*",
+ delimiter="=",
+ comment_char="#")
# Edit the sasl password (still will edit the file, but keep the pw)
-
+
with open(pw_file, "a+") as f:
f.seek(0)
pwm = re.match(r"\[.+\]\:[0-9]+\s.+\:(.*)", f.readline())
if (pwm is None or len(pwm.groups()) != 1) and not modify_password:
# Well if this isn't a bruh moment
- return ("Please provide a password/key (there is no existing password to retain).", 400)
+ return (
+ "Please provide a password/key (there is no existing password to retain).",
+ 400)
f.truncate(0)
f.write(
f"[{config['SMTP_RELAY_HOST']}]:{config['SMTP_RELAY_PORT']} {config['SMTP_RELAY_USER']}:{newconf.get('key') if modify_password else pwm[1]}\n"
)
chmod(pw_file, 0o600)
- utils.shell("check_output", ["/usr/sbin/postmap", pw_file], capture_stderr=True)
+ utils.shell("check_output", ["/usr/sbin/postmap", pw_file],
+ capture_stderr=True)
# Regenerate DNS (to apply whatever changes need to be made)
from dns_update import do_dns_update
do_dns_update(env)
# Restart Postfix
- return utils.shell("check_output", ["/usr/sbin/postfix", "reload"], capture_stderr=True)
+ return utils.shell("check_output", ["/usr/sbin/postfix", "reload"],
+ capture_stderr=True)
except Exception as e:
return (str(e), 400)
# PGP
+
@app.route('/system/pgp/', methods=["GET"])
@authorized_personnel_only
def get_keys():
@@ -858,6 +1007,7 @@ def get_keys():
"imported": list(map(key_representation, get_imported_keys()))
}
+
@app.route('/system/pgp/', methods=["GET"])
@authorized_personnel_only
def get_key(fpr):
@@ -867,6 +1017,7 @@ def get_key(fpr):
abort(404)
return key_representation(k)
+
@app.route('/system/pgp/', methods=["DELETE"])
@authorized_personnel_only
def delete_key(fpr):
@@ -881,6 +1032,7 @@ def delete_key(fpr):
except ValueError as e:
return (str(e), 400)
+
@app.route('/system/pgp//export', methods=["GET"])
@authorized_personnel_only
def export_key(fpr):
@@ -890,6 +1042,7 @@ def export_key(fpr):
abort(404)
return exp
+
@app.route('/system/pgp/import', methods=["POST"])
@authorized_personnel_only
def import_key():
@@ -899,7 +1052,7 @@ def import_key():
k = request.form.get('key')
try:
result = import_key(k)
- build_wkd() # Rebuild the WKD
+ build_wkd() # Rebuild the WKD
return {
"keys_read": result.considered,
"keys_added": result.imported,
@@ -911,7 +1064,10 @@ def import_key():
except ValueError as e:
return (str(e), 400)
+
# Web Key Directory
+
+
@app.route('/system/pgp/wkd', methods=["GET"])
@authorized_personnel_only
def get_wkd_status():
@@ -922,7 +1078,13 @@ def get_wkd_status():
options = get_user_fpr_maps()
chosen = get_wkd_config()
- wkd_tmp = {x: {"options": list(options.get(x)), "selected": chosen.get(x)} for x in options.keys()}
+ wkd_tmp = {
+ x: {
+ "options": list(options.get(x)),
+ "selected": chosen.get(x)
+ }
+ for x in options.keys()
+ }
wkd = {}
for e in wkd_tmp.keys():
@@ -931,10 +1093,15 @@ def get_wkd_status():
wkd[get_domain(e)][e] = wkd_tmp[e]
return {
- "keys": {x.get("master_fpr"): x for x in map(key_representation, [get_daemon_key()] + get_imported_keys())},
+ "keys": {
+ x.get("master_fpr"): x
+ for x in map(key_representation, [get_daemon_key()] +
+ get_imported_keys())
+ },
"wkd": wkd
}
+
@app.route('/system/pgp/wkd', methods=["POST"])
@authorized_personnel_only
def update_wkd():
@@ -943,6 +1110,7 @@ def update_wkd():
build_wkd()
return "OK"
+
@app.route('/system/default-quota', methods=["GET"])
@authorized_personnel_only
def default_quota_get():
@@ -953,12 +1121,14 @@ def default_quota_get():
"default-quota": get_default_quota(env),
})
+
@app.route('/system/default-quota', methods=["POST"])
@authorized_personnel_only
def default_quota_set():
config = utils.load_settings(env)
try:
- config["default-quota"] = validate_quota(request.values.get('default_quota'))
+ config["default-quota"] = validate_quota(
+ request.values.get('default_quota'))
utils.write_settings(config, env)
except ValueError as e:
@@ -966,8 +1136,10 @@ def default_quota_set():
return "OK"
+
# MUNIN
+
@app.route('/munin/')
@authorized_personnel_only
def munin_start():
@@ -976,33 +1148,54 @@ def munin_start():
# that subsequent requests will read for authorization. (We don't use cookies
# for the API to avoid CSRF vulnerabilities.)
response = make_response("OK")
- response.set_cookie("session", auth_service.create_session_key(request.user_email, env, type='cookie'),
- max_age=60*30, secure=True, httponly=True, samesite="Strict") # 30 minute duration
+ response.set_cookie("session",
+ auth_service.create_session_key(request.user_email,
+ env,
+ type='cookie'),
+ max_age=60 * 30,
+ secure=True,
+ httponly=True,
+ samesite="Strict") # 30 minute duration
return response
+
def check_request_cookie_for_admin_access():
- session = auth_service.get_session(None, request.cookies.get("session", ""), "cookie", env)
- if not session: return False
+ session = auth_service.get_session(None,
+ request.cookies.get("session",
+ ""), "cookie", env)
+ if not session:
+ return False
privs = get_mail_user_privileges(session["email"], env)
- if not isinstance(privs, list): return False
- if "admin" not in privs: return False
+ if not isinstance(privs, list):
+ return False
+ if "admin" not in privs:
+ return False
return True
+
def authorized_personnel_only_via_cookie(f):
+
@wraps(f)
def g(*args, **kwargs):
if not check_request_cookie_for_admin_access():
- return Response("Unauthorized", status=403, mimetype='text/plain', headers={})
+ return Response("Unauthorized",
+ status=403,
+ mimetype='text/plain',
+ headers={})
return f(*args, **kwargs)
+
return g
+
@app.route('/munin/')
@authorized_personnel_only_via_cookie
def munin_static_file(filename=""):
# Proxy the request to static files.
- if filename == "": filename = "index.html"
+ if filename == "":
+ filename = "index.html"
return send_from_directory("/var/cache/munin/www", filename)
+
@app.route('/munin/cgi-graph/')
@authorized_personnel_only_via_cookie
def munin_cgi(filename):
@@ -1034,17 +1227,23 @@ def munin_cgi(filename):
query_str = request.query_string.decode("utf-8", 'ignore')
- env = {'PATH_INFO': '/%s/' % filename, 'REQUEST_METHOD': 'GET', 'QUERY_STRING': query_str}
- code, binout = utils.shell('check_output',
- COMMAND.split(" ", 5),
- # Using a maxsplit of 5 keeps the last arguments together
- env=env,
- return_bytes=True,
- trap=True)
+ env = {
+ 'PATH_INFO': '/%s/' % filename,
+ 'REQUEST_METHOD': 'GET',
+ 'QUERY_STRING': query_str
+ }
+ code, binout = utils.shell(
+ 'check_output',
+ COMMAND.split(" ", 5),
+ # Using a maxsplit of 5 keeps the last arguments together
+ env=env,
+ return_bytes=True,
+ trap=True)
if code != 0:
# nonzero returncode indicates error
- app.logger.error("munin_cgi: munin-cgi-graph returned nonzero exit code, %s", code)
+ app.logger.error(
+ "munin_cgi: munin-cgi-graph returned nonzero exit code, %s", code)
return ("error processing graph image", 500)
# /usr/lib/munin/cgi/munin-cgi-graph returns both headers and binary png when successful.
@@ -1055,9 +1254,12 @@ def munin_cgi(filename):
name, value = line.decode("utf8").split(':', 1)
response.headers[name] = value
if 'Status' in response.headers and '404' in response.headers['Status']:
- app.logger.warning("munin_cgi: munin-cgi-graph returned 404 status code. PATH_INFO=%s", env['PATH_INFO'])
+ app.logger.warning(
+ "munin_cgi: munin-cgi-graph returned 404 status code. PATH_INFO=%s",
+ env['PATH_INFO'])
return response
+
def log_failed_login(request):
# We need to figure out the ip to list in the message, all our calls are routed
# through nginx who will put the original ip in X-Forwarded-For.
@@ -1071,7 +1273,9 @@ def log_failed_login(request):
# We need to add a timestamp to the log message, otherwise /dev/log will eat the "duplicate"
# message.
- app.logger.warning( "Mail-in-a-Box Management Daemon: Failed login attempt from ip %s - timestamp %s" % (ip, time.time()))
+ app.logger.warning(
+ "Mail-in-a-Box Management Daemon: Failed login attempt from ip %s - timestamp %s"
+ % (ip, time.time()))
# APP
diff --git a/management/dns_update.py b/management/dns_update.py
index 152b853..730359f 100755
--- a/management/dns_update.py
+++ b/management/dns_update.py
@@ -4,7 +4,14 @@
# and mail aliases and restarts nsd.
########################################################################
-import sys, os, os.path, urllib.parse, datetime, re, hashlib, base64
+import sys
+import os
+import os.path
+import urllib.parse
+import datetime
+import re
+import hashlib
+import base64
import ipaddress
import rtyaml
import idna
@@ -19,9 +26,12 @@ from ssl_certificates import get_ssl_certificates, check_certificate
# DNS but not in URLs), which are common in certain record types like for DKIM.
DOMAIN_RE = "^(?!\-)(?:[*][.])?(?:[a-zA-Z\d\-_]{0,62}[a-zA-Z\d_]\.){1,126}(?!\d+)[a-zA-Z\d_]{1,63}(\.?)$"
-DEFAULT_TTL = 86400 # 24 hours; MIAB-generated records and all custom records without a specified TTL will use this one.
-TTL_MIN = 30 # 30 seconds; Most resolvers will not honor TTL values below this one. Some have an higher min TTL.
-TTL_MAX = 2592000 # 30 days; some DNS services have lower caps (7 days)
+# 24 hours; MIAB-generated records and all custom records without a specified TTL will use this one.
+DEFAULT_TTL = 86400
+# 30 seconds; Most resolvers will not honor TTL values below this one. Some have an higher min TTL.
+TTL_MIN = 30
+TTL_MAX = 2592000 # 30 days; some DNS services have lower caps (7 days)
+
def get_dns_domains(env):
# Add all domain names in use by email users and mail aliases, any
@@ -35,6 +45,7 @@ def get_dns_domains(env):
domains.add(env['PRIMARY_HOSTNAME'])
return domains
+
def get_dns_zones(env):
# What domains should we create DNS zones for? Never create a zone for
# a domain & a subdomain of that domain.
@@ -43,7 +54,7 @@ def get_dns_zones(env):
# Exclude domains that are subdomains of other domains we know. Proceed
# by looking at shorter domains first.
zone_domains = set()
- for domain in sorted(domains, key=lambda d : len(d)):
+ for domain in sorted(domains, key=lambda d: len(d)):
for d in zone_domains:
if domain.endswith("." + d):
# We found a parent domain already in the list.
@@ -60,11 +71,12 @@ def get_dns_zones(env):
# Sort the list so that the order is nice and so that nsd.conf has a
# stable order so we don't rewrite the file & restart the service
# meaninglessly.
- zone_order = sort_domains([ zone[0] for zone in zonefiles ], env)
- zonefiles.sort(key = lambda zone : zone_order.index(zone[0]) )
+ zone_order = sort_domains([zone[0] for zone in zonefiles], env)
+ zonefiles.sort(key=lambda zone: zone_order.index(zone[0]))
return zonefiles
+
def do_dns_update(env, force=False):
# Write zone files.
os.makedirs('/etc/nsd/zones', exist_ok=True)
@@ -76,7 +88,8 @@ def do_dns_update(env, force=False):
# See if the zone has changed, and if so update the serial number
# and write the zone file.
- if not write_nsd_zone(domain, "/etc/nsd/zones/" + zonefile, records, env, force):
+ if not write_nsd_zone(domain, "/etc/nsd/zones/" + zonefile, records,
+ env, force):
# Zone was not updated. There were no changes.
continue
@@ -124,8 +137,10 @@ def do_dns_update(env, force=False):
else:
return "updated DNS: " + ",".join(updated_domains) + "\n"
+
########################################################################
+
def build_zones(env):
# What domains (and their zone filenames) should we build?
domains = get_dns_domains(env)
@@ -136,10 +151,11 @@ def build_zones(env):
from mailconfig import get_mail_domains
from web_update import get_web_domains
mail_domains = set(get_mail_domains(env))
- mail_user_domains = set(get_mail_domains(env, users_only=True)) # i.e. will log in for mail, Nextcloud
+ # i.e. will log in for mail, Nextcloud
+ mail_user_domains = set(get_mail_domains(env, users_only=True))
web_domains = set(get_web_domains(env))
auto_domains = web_domains - set(get_web_domains(env, include_auto=False))
- domains |= auto_domains # www redirects not included in the initial list, see above
+ domains |= auto_domains # www redirects not included in the initial list, see above
# Add ns1/ns2+PRIMARY_HOSTNAME which must also have A/AAAA records
# when the box is acting as authoritative DNS server for its domains.
@@ -160,7 +176,9 @@ def build_zones(env):
# For MTA-STS, we'll need to check if the PRIMARY_HOSTNAME certificate is
# singned and valid. Check that now rather than repeatedly for each domain.
- domains[env["PRIMARY_HOSTNAME"]]["certificate-is-valid"] = is_domain_cert_signed_and_valid(env["PRIMARY_HOSTNAME"], env)
+ domains[env["PRIMARY_HOSTNAME"]][
+ "certificate-is-valid"] = is_domain_cert_signed_and_valid(
+ env["PRIMARY_HOSTNAME"], env)
# Load custom records to add to zones.
additional_records = list(get_custom_dns_config(env))
@@ -171,9 +189,14 @@ def build_zones(env):
records = build_zone(domain, domains, additional_records, env)
yield (domain, zonefile, records)
-def build_zone(domain, domain_properties, additional_records, env, is_zone=True):
+
+def build_zone(domain,
+ domain_properties,
+ additional_records,
+ env,
+ is_zone=True):
records = []
-
+
# Are there any other authorized servers for this domain?
settings = load_settings(env)
spf_extra = None
@@ -199,7 +222,8 @@ def build_zone(domain, domain_properties, additional_records, env, is_zone=True)
elif idna.encode(r):
sr = "a:" + idna.encode(r).decode()
else:
- raise ValueError(f"Unexpected entry on authorized servers: {r}")
+ raise ValueError(
+ f"Unexpected entry on authorized servers: {r}")
spf_extra += f"{sr} "
if spf_extra.strip() == "":
spf_extra = None
@@ -214,60 +238,82 @@ def build_zone(domain, domain_properties, additional_records, env, is_zone=True)
# is managed outside of the box.
if is_zone:
# Obligatory NS record to ns1.PRIMARY_HOSTNAME.
- records.append((None, "NS", "ns1.%s." % env["PRIMARY_HOSTNAME"], False, None))
+ records.append(
+ (None, "NS", "ns1.%s." % env["PRIMARY_HOSTNAME"], False, None))
# NS record to ns2.PRIMARY_HOSTNAME or whatever the user overrides.
# User may provide one or more additional nameservers
secondary_ns_list = get_secondary_dns(additional_records, mode="NS") \
- or ["ns2." + env["PRIMARY_HOSTNAME"]]
+ or ["ns2." + env["PRIMARY_HOSTNAME"]]
for secondary_ns in secondary_ns_list:
- records.append((None, "NS", secondary_ns+'.', False, None))
-
+ records.append((None, "NS", secondary_ns + '.', False, None))
# In PRIMARY_HOSTNAME...
if domain == env["PRIMARY_HOSTNAME"]:
# Set the A/AAAA records. Do this early for the PRIMARY_HOSTNAME so that the user cannot override them
# and we can provide different explanatory text.
- records.append((None, "A", env["PUBLIC_IP"], "Required. Sets the IP address of the box.", None))
- if env.get("PUBLIC_IPV6"): records.append((None, "AAAA", env["PUBLIC_IPV6"], "Required. Sets the IPv6 address of the box.", None))
+ records.append((None, "A", env["PUBLIC_IP"],
+ "Required. Sets the IP address of the box.", None))
+ if env.get("PUBLIC_IPV6"):
+ records.append(
+ (None, "AAAA", env["PUBLIC_IPV6"],
+ "Required. Sets the IPv6 address of the box.", None))
# Add a DANE TLSA record for SMTP.
- records.append(("_25._tcp", "TLSA", build_tlsa_record(env), "Recommended when DNSSEC is enabled. Advertises to mail servers connecting to the box that mandatory encryption should be used.", None))
+ records.append(("_25._tcp", "TLSA", build_tlsa_record(
+ env
+ ), "Recommended when DNSSEC is enabled. Advertises to mail servers connecting to the box that mandatory encryption should be used.",
+ None))
# Add a DANE TLSA record for HTTPS, which some browser extensions might make use of.
- records.append(("_443._tcp", "TLSA", build_tlsa_record(env), "Optional. When DNSSEC is enabled, provides out-of-band HTTPS certificate validation for a few web clients that support it.", None))
+ records.append(("_443._tcp", "TLSA", build_tlsa_record(
+ env
+ ), "Optional. When DNSSEC is enabled, provides out-of-band HTTPS certificate validation for a few web clients that support it.",
+ None))
# Add a SSHFP records to help SSH key validation. One per available SSH key on this system.
for value in build_sshfp_records():
- records.append((None, "SSHFP", value, "Optional. Provides an out-of-band method for verifying an SSH key before connecting. Use 'VerifyHostKeyDNS yes' (or 'VerifyHostKeyDNS ask') when connecting with ssh.", None))
+ records.append((
+ None, "SSHFP", value,
+ "Optional. Provides an out-of-band method for verifying an SSH key before connecting. Use 'VerifyHostKeyDNS yes' (or 'VerifyHostKeyDNS ask') when connecting with ssh.",
+ None))
# Add DNS records for any subdomains of this domain. We should not have a zone for
# both a domain and one of its subdomains.
- if is_zone: # don't recurse when we're just loading data for a subdomain
+ if is_zone: # don't recurse when we're just loading data for a subdomain
subdomains = [d for d in domain_properties if d.endswith("." + domain)]
for subdomain in subdomains:
subdomain_qname = subdomain[0:-len("." + domain)]
- subzone = build_zone(subdomain, domain_properties, additional_records, env, is_zone=False)
+ subzone = build_zone(subdomain,
+ domain_properties,
+ additional_records,
+ env,
+ is_zone=False)
for child_qname, child_rtype, child_value, child_explanation, child_ttl in subzone:
if child_qname == None:
child_qname = subdomain_qname
else:
child_qname += "." + subdomain_qname
- records.append((child_qname, child_rtype, child_value, child_explanation, child_ttl))
+ records.append((child_qname, child_rtype, child_value,
+ child_explanation, child_ttl))
+
+ has_rec_base = list(records) # clone current state
- has_rec_base = list(records) # clone current state
def has_rec(qname, rtype, prefix=None):
for rec in has_rec_base:
- if rec[0] == qname and rec[1] == rtype and (prefix is None or rec[2].startswith(prefix)):
+ if rec[0] == qname and rec[1] == rtype and (
+ prefix is None or rec[2].startswith(prefix)):
return True
return False
# The user may set other records that don't conflict with our settings.
# Don't put any TXT records above this line, or it'll prevent any custom TXT records.
- for qname, rtype, value, ttl in filter_custom_records(domain, additional_records):
+ for qname, rtype, value, ttl in filter_custom_records(
+ domain, additional_records):
# Don't allow custom records for record types that override anything above.
# But allow multiple custom records for the same rtype --- see how has_rec_base is used.
- if has_rec(qname, rtype): continue
+ if has_rec(qname, rtype):
+ continue
# The "local" keyword on A/AAAA records are short-hand for our own IP.
# This also flags for web configuration that the user wants a website here.
@@ -288,23 +334,33 @@ def build_zone(domain, domain_properties, additional_records, env, is_zone=True)
has_rec_base = list(records)
a_expl = "Required. May have a different value. Sets the IP address that %s resolves to for web hosting and other services besides mail. The A record must be present but its value does not affect mail delivery." % domain
if domain_properties[domain]["auto"]:
- if domain.startswith("ns1.") or domain.startswith("ns2."): a_expl = False # omit from 'External DNS' page since this only applies if box is its own DNS server
- if domain.startswith("www."): a_expl = "Optional. Sets the IP address that %s resolves to so that the box can provide a redirect to the parent domain." % domain
- if domain.startswith("mta-sts."): a_expl = "Optional. MTA-STS Policy Host serving /.well-known/mta-sts.txt."
- if domain.startswith("autoconfig."): a_expl = "Provides email configuration autodiscovery support for Thunderbird Autoconfig."
- if domain.startswith("autodiscover."): a_expl = "Provides email configuration autodiscovery support for Z-Push ActiveSync Autodiscover."
+ if domain.startswith("ns1.") or domain.startswith("ns2."):
+ a_expl = False # omit from 'External DNS' page since this only applies if box is its own DNS server
+ if domain.startswith("www."):
+ a_expl = "Optional. Sets the IP address that %s resolves to so that the box can provide a redirect to the parent domain." % domain
+ if domain.startswith("mta-sts."):
+ a_expl = "Optional. MTA-STS Policy Host serving /.well-known/mta-sts.txt."
+ if domain.startswith("autoconfig."):
+ a_expl = "Provides email configuration autodiscovery support for Thunderbird Autoconfig."
+ if domain.startswith("autodiscover."):
+ a_expl = "Provides email configuration autodiscovery support for Z-Push ActiveSync Autodiscover."
defaults = [
- (None, "A", env["PUBLIC_IP"], a_expl),
- (None, "AAAA", env.get('PUBLIC_IPV6'), "Optional. Sets the IPv6 address that %s resolves to, e.g. for web hosting. (It is not necessary for receiving mail on this domain.)" % domain),
+ (None, "A", env["PUBLIC_IP"], a_expl),
+ (None, "AAAA", env.get('PUBLIC_IPV6'),
+ "Optional. Sets the IPv6 address that %s resolves to, e.g. for web hosting. (It is not necessary for receiving mail on this domain.)"
+ % domain),
]
for qname, rtype, value, explanation in defaults:
- if value is None or value.strip() == "": continue # skip IPV6 if not set
- if not is_zone and qname == "www": continue # don't create any default 'www' subdomains on what are themselves subdomains
+ if value is None or value.strip() == "":
+ continue # skip IPV6 if not set
+ if not is_zone and qname == "www":
+ continue # don't create any default 'www' subdomains on what are themselves subdomains
# Set the default record, but not if:
# (1) there is not a user-set record of the same type already
# (2) there is not a CNAME record already, since you can't set both and who knows what takes precedence
# (2) there is not an A record already (if this is an A record this is a dup of (1), and if this is an AAAA record then don't set a default AAAA record if the user sets a custom A record, since the default wouldn't make sense and it should not resolve if the user doesn't provide a new AAAA record)
- if not has_rec(qname, rtype) and not has_rec(qname, "CNAME") and not has_rec(qname, "A"):
+ if not has_rec(qname, rtype) and not has_rec(
+ qname, "CNAME") and not has_rec(qname, "A"):
records.append((qname, rtype, value, explanation, None))
# Don't pin the list of records that has_rec checks against anymore.
@@ -313,17 +369,26 @@ def build_zone(domain, domain_properties, additional_records, env, is_zone=True)
if domain_properties[domain]["mail"]:
# The MX record says where email for the domain should be delivered: Here!
if not has_rec(None, "MX", prefix="10 "):
- records.append((None, "MX", "10 %s." % env["PRIMARY_HOSTNAME"], "Required. Specifies the hostname (and priority) of the machine that handles @%s mail." % domain, None))
+ records.append((
+ None, "MX", "10 %s." % env["PRIMARY_HOSTNAME"],
+ "Required. Specifies the hostname (and priority) of the machine that handles @%s mail."
+ % domain, None))
# Append a WKD record.
# Skip if the user has set a WKD record already.
- if not has_rec("openpgpkey", "CNAME") and not has_rec("openpgpkey", "A") and not has_rec("openpgpkey", "AAAA"):
+ if not has_rec("openpgpkey", "CNAME") and not has_rec(
+ "openpgpkey", "A") and not has_rec("openpgpkey", "AAAA"):
wkd_records = [
- ("openpgpkey", "A", env["PUBLIC_IP"], "Optional. Specifies that this machine is an authoritative public key source for @%s user-id's." % domain),
- ("openpgpkey", "AAAA", env["PUBLIC_IPV6"], "Optional. Specifies that this machine is an authoritative public key source for @%s user-id's." % domain)
+ ("openpgpkey", "A", env["PUBLIC_IP"],
+ "Optional. Specifies that this machine is an authoritative public key source for @%s user-id's."
+ % domain),
+ ("openpgpkey", "AAAA", env["PUBLIC_IPV6"],
+ "Optional. Specifies that this machine is an authoritative public key source for @%s user-id's."
+ % domain)
]
for qname, rtype, value, explanation in wkd_records:
- if value is None or value.strip() == "": continue # skip IPV6 if not set
+ if value is None or value.strip() == "":
+ continue # skip IPV6 if not set
if not has_rec(qname, rtype):
records.append((qname, rtype, value, explanation, None))
@@ -332,40 +397,60 @@ def build_zone(domain, domain_properties, additional_records, env, is_zone=True)
# Skip if the user has set a custom SPF record.
if not has_rec(None, "TXT", prefix="v=spf1 "):
if spf_extra is None:
- records.append((None, "TXT", 'v=spf1 mx -all', "Recommended. Specifies that only the box is permitted to send @%s mail." % domain, None))
+ records.append((
+ None, "TXT", 'v=spf1 mx -all',
+ "Recommended. Specifies that only the box is permitted to send @%s mail."
+ % domain, None))
else:
- records.append((None, "TXT", f'v=spf1 mx {spf_extra}-all', "Recommended. Specifies that only the box and the server(s) you authorized are permitted to send @%s mail." % domain, None))
+ records.append((
+ None, "TXT", f'v=spf1 mx {spf_extra}-all',
+ "Recommended. Specifies that only the box and the server(s) you authorized are permitted to send @%s mail."
+ % domain, None))
# Append the DKIM TXT record to the zone as generated by OpenDKIM.
# Skip if the user has set a DKIM record already.
- opendkim_record_file = os.path.join(env['STORAGE_ROOT'], 'mail/dkim/mail.txt')
+ opendkim_record_file = os.path.join(env['STORAGE_ROOT'],
+ 'mail/dkim/mail.txt')
with open(opendkim_record_file) as orf:
- m = re.match(r'(\S+)\s+IN\s+TXT\s+\( ((?:"[^"]+"\s+)+)\)', orf.read(), re.S)
+ m = re.match(r'(\S+)\s+IN\s+TXT\s+\( ((?:"[^"]+"\s+)+)\)',
+ orf.read(), re.S)
val = "".join(re.findall(r'"([^"]+)"', m.group(2)))
rname = f"{settings.get('local_dkim_selector', 'mail')}._domainkey"
if not has_rec(rname, "TXT", prefix="v=DKIM1; "):
- records.append((rname, "TXT", val, "Recommended. Provides a way for recipients to verify that this machine sent @%s mail." % domain, None))
-
+ records.append((
+ rname, "TXT", val,
+ "Recommended. Provides a way for recipients to verify that this machine sent @%s mail."
+ % domain, None))
+
# Append the DKIM TXT record relative to the SMTP relay, if applicable.
# Skip if manually set by the user.
relay_ds = settings.get("SMTP_RELAY_DKIM_SELECTOR")
rr = settings.get("SMTP_RELAY_DKIM_RR", {})
- if relay_ds is not None and not has_rec(f"{relay_ds}._domainkey", "TXT", prefix="v=DKIM1; ") and rr.get("p") is not None:
+ if relay_ds is not None and not has_rec(
+ f"{relay_ds}._domainkey", "TXT",
+ prefix="v=DKIM1; ") and rr.get("p") is not None:
dkim_rrtxt = ""
- for c, d in (("v", "DKIM1"), ("h", None), ("k", "rsa"), ("n", None), ("s", None), ("t", None)):
+ for c, d in (("v", "DKIM1"), ("h", None), ("k", "rsa"),
+ ("n", None), ("s", None), ("t", None)):
txt = rr.get(c, d)
if txt is None:
continue
else:
dkim_rrtxt += f"{c}={txt}; "
dkim_rrtxt += f"p={rr.get('p')}"
- records.append((f"{relay_ds}._domainkey", "TXT", dkim_rrtxt, "Recommended. Provides a way for recipients to verify that the SMTP relay you set up sent @%s mail." % domain, None))
+ records.append((
+ f"{relay_ds}._domainkey", "TXT", dkim_rrtxt,
+ "Recommended. Provides a way for recipients to verify that the SMTP relay you set up sent @%s mail."
+ % domain, None))
# Append a DMARC record.
# Skip if the user has set a DMARC record already.
if not has_rec("_dmarc", "TXT", prefix="v=DMARC1; "):
- records.append(("_dmarc", "TXT", 'v=DMARC1; p=quarantine', "Recommended. Specifies that mail that does not originate from the box but claims to be from @%s or which does not have a valid DKIM signature is suspect and should be quarantined by the recipient's mail system." % domain, None))
+ records.append((
+ "_dmarc", "TXT", 'v=DMARC1; p=quarantine',
+ "Recommended. Specifies that mail that does not originate from the box but claims to be from @%s or which does not have a valid DKIM signature is suspect and should be quarantined by the recipient's mail system."
+ % domain, None))
if domain_properties[domain]["user"]:
# Add CardDAV/CalDAV SRV records on the non-primary hostname that points to the primary hostname
@@ -375,7 +460,11 @@ def build_zone(domain, domain_properties, additional_records, env, is_zone=True)
for dav in ("card", "cal"):
qname = "_" + dav + "davs._tcp"
if not has_rec(qname, "SRV"):
- records.append((qname, "SRV", "0 0 443 " + env["PRIMARY_HOSTNAME"] + ".", "Recommended. Specifies the hostname of the server that handles CardDAV/CalDAV services for email addresses on this domain.", None))
+ records.append((
+ qname, "SRV",
+ "0 0 443 " + env["PRIMARY_HOSTNAME"] + ".",
+ "Recommended. Specifies the hostname of the server that handles CardDAV/CalDAV services for email addresses on this domain.",
+ None))
# If this is a domain name that there are email addresses configured for, i.e. "something@"
# this domain name, then the domain name is a MTA-STS (https://tools.ietf.org/html/rfc8461)
@@ -394,25 +483,32 @@ def build_zone(domain, domain_properties, additional_records, env, is_zone=True)
# yet been provisioned). Since we cannot provision a certificate without A/AAAA records, we
# always set them (by including them in the www domains) --- only the TXT records depend on there
# being valid certificates.
- mta_sts_records = [ ]
+ mta_sts_records = []
if domain_properties[domain]["mail"] \
- and domain_properties[env["PRIMARY_HOSTNAME"]]["certificate-is-valid"] \
- and is_domain_cert_signed_and_valid("mta-sts." + domain, env):
+ and domain_properties[env["PRIMARY_HOSTNAME"]]["certificate-is-valid"] \
+ and is_domain_cert_signed_and_valid("mta-sts." + domain, env):
# Compute an up-to-32-character hash of the policy file. We'll take a SHA-1 hash of the policy
# file (20 bytes) and encode it as base-64 (28 bytes, using alphanumeric alternate characters
# instead of '+' and '/' which are not allowed in an MTA-STS policy id) but then just take its
# first 20 characters, which is more than sufficient to change whenever the policy file changes
# (and ensures any '=' padding at the end of the base64 encoding is dropped).
with open("/var/lib/mailinabox/mta-sts.txt", "rb") as f:
- mta_sts_policy_id = base64.b64encode(hashlib.sha1(f.read()).digest(), altchars=b"AA").decode("ascii")[0:20]
- mta_sts_records.extend([
- ("_mta-sts", "TXT", "v=STSv1; id=" + mta_sts_policy_id, "Optional. Part of the MTA-STS policy for incoming mail. If set, a MTA-STS policy must also be published.")
- ])
+ mta_sts_policy_id = base64.b64encode(
+ hashlib.sha1(f.read()).digest(),
+ altchars=b"AA").decode("ascii")[0:20]
+ mta_sts_records.extend([(
+ "_mta-sts", "TXT", "v=STSv1; id=" + mta_sts_policy_id,
+ "Optional. Part of the MTA-STS policy for incoming mail. If set, a MTA-STS policy must also be published."
+ )])
# Enable SMTP TLS reporting (https://tools.ietf.org/html/rfc8460) if the user has set a config option.
# Skip if the rules below if the user has set a custom _smtp._tls record.
- if env.get("MTA_STS_TLSRPT_RUA") and not has_rec("_smtp._tls", "TXT", prefix="v=TLSRPTv1;"):
- mta_sts_records.append(("_smtp._tls", "TXT", "v=TLSRPTv1; rua=" + env["MTA_STS_TLSRPT_RUA"], "Optional. Enables MTA-STS reporting.", None))
+ if env.get("MTA_STS_TLSRPT_RUA") and not has_rec(
+ "_smtp._tls", "TXT", prefix="v=TLSRPTv1;"):
+ mta_sts_records.append(
+ ("_smtp._tls", "TXT",
+ "v=TLSRPTv1; rua=" + env["MTA_STS_TLSRPT_RUA"],
+ "Optional. Enables MTA-STS reporting.", None))
for qname, rtype, value, explanation in mta_sts_records:
if not has_rec(qname, rtype):
records.append((qname, rtype, value, explanation, None))
@@ -422,52 +518,88 @@ def build_zone(domain, domain_properties, additional_records, env, is_zone=True)
# non-mail domain and also may include qnames from custom DNS records.
# Do this once at the end of generating a zone.
if is_zone:
- qnames_with_a = set(qname for (qname, rtype, value, explanation, ttl) in records if rtype in ("A", "AAAA"))
- qnames_with_mx = set(qname for (qname, rtype, value, explanation, ttl) in records if rtype == "MX")
+ qnames_with_a = set(qname for (qname, rtype, value, explanation,
+ ttl) in records
+ if rtype in ("A", "AAAA"))
+ qnames_with_mx = set(qname for (qname, rtype, value, explanation,
+ ttl) in records if rtype == "MX")
for qname in qnames_with_a - qnames_with_mx:
# Mark this domain as not sending mail with hard-fail SPF and DMARC records.
- d = (qname+"." if qname else "") + domain
+ d = (qname + "." if qname else "") + domain
if not has_rec(qname, "TXT", prefix="v=spf1 "):
- records.append((qname, "TXT", 'v=spf1 -all', "Recommended. Prevents use of this domain name for outbound mail by specifying that no servers are valid sources for mail from @%s. If you do send email from this domain name you should either override this record such that the SPF rule does allow the originating server, or, take the recommended approach and have the box handle mail for this domain (simply add any receiving alias at this domain name to make this machine treat the domain name as one of its mail domains)." % d, None))
- if not has_rec("_dmarc" + ("."+qname if qname else ""), "TXT", prefix="v=DMARC1; "):
- records.append(("_dmarc" + ("."+qname if qname else ""), "TXT", 'v=DMARC1; p=reject', "Recommended. Prevents use of this domain name for outbound mail by specifying that the SPF rule should be honoured for mail from @%s." % d, None))
+ records.append((
+ qname, "TXT", 'v=spf1 -all',
+ "Recommended. Prevents use of this domain name for outbound mail by specifying that no servers are valid sources for mail from @%s. If you do send email from this domain name you should either override this record such that the SPF rule does allow the originating server, or, take the recommended approach and have the box handle mail for this domain (simply add any receiving alias at this domain name to make this machine treat the domain name as one of its mail domains)."
+ % d, None))
+ if not has_rec("_dmarc" + ("." + qname if qname else ""),
+ "TXT",
+ prefix="v=DMARC1; "):
+ records.append((
+ "_dmarc" + ("." + qname if qname else ""), "TXT",
+ 'v=DMARC1; p=reject',
+ "Recommended. Prevents use of this domain name for outbound mail by specifying that the SPF rule should be honoured for mail from @%s."
+ % d, None))
# And with a null MX record (https://explained-from-first-principles.com/email/#null-mx-record)
if not has_rec(qname, "MX"):
- records.append((qname, "MX", '0 .', "Recommended. Prevents use of this domain name for incoming mail.", None))
+ records.append((
+ qname, "MX", '0 .',
+ "Recommended. Prevents use of this domain name for incoming mail.",
+ None))
# Add no-mail-here records for any qname that has an A or AAAA record
# but no MX record. This would include domain itself if domain is a
# non-mail domain and also may include qnames from custom DNS records.
# Do this once at the end of generating a zone.
if is_zone:
- qnames_with_a = set(qname for (qname, rtype, value, explanation, ttl) in records if rtype in ("A", "AAAA"))
- qnames_with_mx = set(qname for (qname, rtype, value, explanation, ttl) in records if rtype == "MX")
+ qnames_with_a = set(qname for (qname, rtype, value, explanation,
+ ttl) in records
+ if rtype in ("A", "AAAA"))
+ qnames_with_mx = set(qname for (qname, rtype, value, explanation,
+ ttl) in records if rtype == "MX")
for qname in qnames_with_a - qnames_with_mx:
# Mark this domain as not sending mail with hard-fail SPF and DMARC records.
- d = (qname+"." if qname else "") + domain
+ d = (qname + "." if qname else "") + domain
if not has_rec(qname, "TXT", prefix="v=spf1 "):
- records.append((qname, "TXT", 'v=spf1 -all', "Recommended. Prevents use of this domain name for outbound mail by specifying that no servers are valid sources for mail from @%s. If you do send email from this domain name you should either override this record such that the SPF rule does allow the originating server, or, take the recommended approach and have the box handle mail for this domain (simply add any receiving alias at this domain name to make this machine treat the domain name as one of its mail domains)." % d, None))
- if not has_rec("_dmarc" + ("."+qname if qname else ""), "TXT", prefix="v=DMARC1; "):
- records.append(("_dmarc" + ("."+qname if qname else ""), "TXT", 'v=DMARC1; p=reject', "Recommended. Prevents use of this domain name for outbound mail by specifying that the SPF rule should be honoured for mail from @%s." % d, None))
+ records.append((
+ qname, "TXT", 'v=spf1 -all',
+ "Recommended. Prevents use of this domain name for outbound mail by specifying that no servers are valid sources for mail from @%s. If you do send email from this domain name you should either override this record such that the SPF rule does allow the originating server, or, take the recommended approach and have the box handle mail for this domain (simply add any receiving alias at this domain name to make this machine treat the domain name as one of its mail domains)."
+ % d, None))
+ if not has_rec("_dmarc" + ("." + qname if qname else ""),
+ "TXT",
+ prefix="v=DMARC1; "):
+ records.append((
+ "_dmarc" + ("." + qname if qname else ""), "TXT",
+ 'v=DMARC1; p=reject',
+ "Recommended. Prevents use of this domain name for outbound mail by specifying that the SPF rule should be honoured for mail from @%s."
+ % d, None))
# And with a null MX record (https://explained-from-first-principles.com/email/#null-mx-record)
if not has_rec(qname, "MX"):
- records.append((qname, "MX", '0 .', "Recommended. Prevents use of this domain name for incoming mail.", None))
+ records.append((
+ qname, "MX", '0 .',
+ "Recommended. Prevents use of this domain name for incoming mail.",
+ None))
# Sort the records. The None records *must* go first in the nsd zone file. Otherwise it doesn't matter.
- records.sort(key = lambda rec : list(reversed(rec[0].split(".")) if rec[0] is not None else ""))
+ records.sort(key=lambda rec: list(
+ reversed(rec[0].split(".")) if rec[0] is not None else ""))
return records
+
def is_domain_cert_signed_and_valid(domain, env):
cert = get_ssl_certificates(env).get(domain)
- if not cert: return False # no certificate provisioned
- cert_status = check_certificate(domain, cert['certificate'], cert['private-key'])
+ if not cert:
+ return False # no certificate provisioned
+ cert_status = check_certificate(domain, cert['certificate'],
+ cert['private-key'])
return cert_status[0] == 'OK'
+
########################################################################
+
def build_tlsa_record(env):
# A DANE TLSA record in DNS specifies that connections on a port
# must use TLS and the certificate must match a particular criteria.
@@ -494,7 +626,8 @@ def build_tlsa_record(env):
fn = os.path.join(env["STORAGE_ROOT"], "ssl", "ssl_certificate.pem")
cert = load_pem(load_cert_chain(fn)[0])
- subject_public_key = cert.public_key().public_bytes(Encoding.DER, PublicFormat.SubjectPublicKeyInfo)
+ subject_public_key = cert.public_key().public_bytes(
+ Encoding.DER, PublicFormat.SubjectPublicKeyInfo)
# We could have also loaded ssl_private_key.pem and called priv_key.public_key().public_bytes(...)
pk_hash = hashlib.sha256(subject_public_key).hexdigest()
@@ -505,6 +638,7 @@ def build_tlsa_record(env):
# 1: Use SHA256.
return "3 1 1 " + pk_hash
+
def build_sshfp_records():
# The SSHFP record is a way for us to embed this server's SSH public
# key fingerprint into the DNS so that remote hosts have an out-of-band
@@ -542,25 +676,31 @@ def build_sshfp_records():
pass
break
- keys = shell("check_output", ["ssh-keyscan", "-t", "rsa,dsa,ecdsa,ed25519", "-p", str(port), "localhost"])
+ keys = shell("check_output", [
+ "ssh-keyscan", "-t", "rsa,dsa,ecdsa,ed25519", "-p",
+ str(port), "localhost"
+ ])
keys = sorted(keys.split("\n"))
for key in keys:
- if key.strip() == "" or key[0] == "#": continue
+ if key.strip() == "" or key[0] == "#":
+ continue
try:
host, keytype, pubkey = key.split(" ")
yield "%d %d ( %s )" % (
algorithm_number[keytype],
- 2, # specifies we are using SHA-256 on next line
+ 2, # specifies we are using SHA-256 on next line
hashlib.sha256(base64.b64decode(pubkey)).hexdigest().upper(),
- )
+ )
except:
# Lots of things can go wrong. Don't let it disturb the DNS
# zone.
pass
+
########################################################################
+
def write_nsd_zone(domain, zonefile, records, env, force):
# On the $ORIGIN line, there's typically a ';' comment at the end explaining
# what the $ORIGIN line does. Any further data after the domain confuses
@@ -581,16 +721,18 @@ $ORIGIN {domain}.
$TTL {ttl} ; default time to live
@ IN SOA ns1.{primary_domain}. hostmaster.{primary_domain}. (
- __SERIAL__ ; serial number
- 7200 ; Refresh (secondary nameserver update interval)
- 3600 ; Retry (when refresh fails, how often to try again, should be lower than the refresh)
- 1209600 ; Expire (when refresh fails, how long secondary nameserver will keep records around anyway)
- {ttl} ; Negative TTL (how long negative responses are cached)
- )
+ __SERIAL__ ; serial number
+ 7200 ; Refresh (secondary nameserver update interval)
+ 3600 ; Retry (when refresh fails, how often to try again, should be lower than the refresh)
+ 1209600 ; Expire (when refresh fails, how long secondary nameserver will keep records around anyway)
+ {ttl} ; Negative TTL (how long negative responses are cached)
+ )
"""
# Replace replacement strings.
- zone = zone.format(domain=domain, primary_domain=env["PRIMARY_HOSTNAME"], ttl=DEFAULT_TTL)
+ zone = zone.format(domain=domain,
+ primary_domain=env["PRIMARY_HOSTNAME"],
+ ttl=DEFAULT_TTL)
# Add records.
for subdomain, querytype, value, explanation, ttl in records:
@@ -605,15 +747,16 @@ $TTL {ttl} ; default time to live
while len(value) > 0:
s = value[0:255]
value = value[255:]
- s = s.replace('\\', '\\\\') # escape backslashes
- s = s.replace('"', '\\"') # escape quotes
- s = '"' + s + '"' # wrap in quotes
+ s = s.replace('\\', '\\\\') # escape backslashes
+ s = s.replace('"', '\\"') # escape quotes
+ s = '"' + s + '"' # wrap in quotes
v2 += s + " "
value = v2
zone += value + "\n"
# Append a stable hash of DNSSEC signing keys in a comment.
- zone += "\n; DNSSEC signing keys hash: {}\n".format(hash_dnssec_keys(domain, env))
+ zone += "\n; DNSSEC signing keys hash: {}\n".format(
+ hash_dnssec_keys(domain, env))
# DNSSEC requires re-signing a zone periodically. That requires
# bumping the serial number even if no other records have changed.
@@ -631,15 +774,18 @@ $TTL {ttl} ; default time to live
# number so we can re-sign it.
with open(zonefile + ".signed") as f:
signed_zone = f.read()
- expiration_times = re.findall(r"\sRRSIG\s+SOA\s+\d+\s+\d+\s\d+\s+(\d{14})", signed_zone)
+ expiration_times = re.findall(
+ r"\sRRSIG\s+SOA\s+\d+\s+\d+\s\d+\s+(\d{14})", signed_zone)
if len(expiration_times) == 0:
# weird
force_bump = True
else:
# All of the times should be the same, but if not choose the soonest.
expiration_time = min(expiration_times)
- expiration_time = datetime.datetime.strptime(expiration_time, "%Y%m%d%H%M%S")
- if expiration_time - datetime.datetime.now() < datetime.timedelta(days=3):
+ expiration_time = datetime.datetime.strptime(
+ expiration_time, "%Y%m%d%H%M%S")
+ if expiration_time - datetime.datetime.now() < datetime.timedelta(
+ days=3):
# We're within three days of the expiration, so bump serial & resign.
force_bump = True
@@ -655,7 +801,8 @@ $TTL {ttl} ; default time to live
# Clear out the serial number in the existing zone file for the
# purposes of seeing if anything *else* in the zone has changed.
existing_serial = m.group(1)
- existing_zone = existing_zone.replace(m.group(0), "__SERIAL__ ; serial number")
+ existing_zone = existing_zone.replace(
+ m.group(0), "__SERIAL__ ; serial number")
# If the existing zone is the same as the new zone (modulo the serial number),
# there is no need to update the file. Unless we're forcing a bump.
@@ -675,21 +822,25 @@ $TTL {ttl} ; default time to live
with open(zonefile, "w") as f:
f.write(zone)
- return True # file is updated
+ return True # file is updated
+
def get_dns_zonefile(zone, env):
for domain, fn in get_dns_zones(env):
if zone == domain:
break
else:
- raise ValueError("%s is not a domain name that corresponds to a zone." % zone)
+ raise ValueError(
+ "%s is not a domain name that corresponds to a zone." % zone)
nsd_zonefile = "/etc/nsd/zones/" + fn
with open(nsd_zonefile, "r") as f:
return f.read()
+
########################################################################
+
def write_nsd_conf(zonefiles, additional_records, env):
# Write the list of zones to a configuration file.
nsd_conf_file = "/etc/nsd/nsd.conf.d/zones.conf"
@@ -723,8 +874,10 @@ zone:
f.write(nsdconf)
return True
+
########################################################################
+
def find_dnssec_signing_keys(domain, env):
# For key that we generated (one per algorithm)...
d = os.path.join(env['STORAGE_ROOT'], 'dns/dnssec')
@@ -741,25 +894,30 @@ def find_dnssec_signing_keys(domain, env):
# deleting the key's .conf file, which might result in the key
# being regenerated next upgrade. Keys should be disabled if
# they are not needed to reduce the DNSSEC query response size.
- if "DOMAINS" in keyinfo and domain not in [dd.strip() for dd in keyinfo["DOMAINS"].split(",")]:
+ if "DOMAINS" in keyinfo and domain not in [
+ dd.strip() for dd in keyinfo["DOMAINS"].split(",")
+ ]:
continue
for keytype in ("KSK", "ZSK"):
yield keytype, keyinfo[keytype]
+
def hash_dnssec_keys(domain, env):
# Create a stable (by sorting the items) hash of all of the private keys
# that will be used to sign this domain.
keydata = []
for keytype, keyfn in sorted(find_dnssec_signing_keys(domain, env)):
- oldkeyfn = os.path.join(env['STORAGE_ROOT'], 'dns/dnssec', keyfn + ".private")
+ oldkeyfn = os.path.join(env['STORAGE_ROOT'], 'dns/dnssec',
+ keyfn + ".private")
keydata.append(keytype)
keydata.append(keyfn)
with open(oldkeyfn, "r") as fr:
- keydata.append( fr.read() )
+ keydata.append(fr.read())
keydata = "".join(keydata).encode("utf8")
return hashlib.sha1(keydata).hexdigest()
+
def sign_zone(domain, zonefile, env):
# Sign the zone with all of the keys that were generated during
# setup so that the user can choose which to use in their DS record at
@@ -782,36 +940,44 @@ def sign_zone(domain, zonefile, env):
#
# Use os.umask and open().write() to securely create a copy that only
# we (root) can read.
- oldkeyfn = os.path.join(env['STORAGE_ROOT'], 'dns/dnssec', keyfn + ext)
+ oldkeyfn = os.path.join(env['STORAGE_ROOT'], 'dns/dnssec',
+ keyfn + ext)
with open(oldkeyfn, "r") as fr:
keydata = fr.read()
keydata = keydata.replace("_domain_", domain)
- prev_umask = os.umask(0o77) # ensure written file is not world-readable
+ # ensure written file is not world-readable
+ prev_umask = os.umask(0o77)
try:
with open(newkeyfn + ext, "w") as fw:
fw.write(keydata)
finally:
- os.umask(prev_umask) # other files we write should be world-readable
+ # other files we write should be world-readable
+ os.umask(prev_umask)
# Put the patched key filename base (without extension) into the list of keys we'll sign with.
all_keys.append(newkeyfn)
- if keytype == "KSK": ksk_keys.append(newkeyfn)
+ if keytype == "KSK":
+ ksk_keys.append(newkeyfn)
# Do the signing.
- expiry_date = (datetime.datetime.now() + datetime.timedelta(days=30)).strftime("%Y%m%d")
- shell('check_call', ["/usr/bin/ldns-signzone",
- # expire the zone after 30 days
- "-e", expiry_date,
+ expiry_date = (datetime.datetime.now() +
+ datetime.timedelta(days=30)).strftime("%Y%m%d")
+ shell(
+ 'check_call',
+ [
+ "/usr/bin/ldns-signzone",
+ # expire the zone after 30 days
+ "-e",
+ expiry_date,
- # use NSEC3
- "-n",
+ # use NSEC3
+ "-n",
- # zonefile to sign
- "/etc/nsd/zones/" + zonefile,
- ]
+ # zonefile to sign
+ "/etc/nsd/zones/" + zonefile,
+ ]
# keys to sign with (order doesn't matter -- it'll figure it out)
- + all_keys
- )
+ + all_keys)
# Create a DS record based on the patched-up key files. The DS record is specific to the
# zone being signed, so we can't use the .ds files generated when we created the keys.
@@ -825,11 +991,14 @@ def sign_zone(domain, zonefile, env):
with open("/etc/nsd/zones/" + zonefile + ".ds", "w") as f:
for key in ksk_keys:
for digest_type in ('1', '2', '4'):
- rr_ds = shell('check_output', ["/usr/bin/ldns-key2ds",
- "-n", # output to stdout
- "-" + digest_type, # 1=SHA1, 2=SHA256, 4=SHA384
- key + ".key"
- ])
+ rr_ds = shell(
+ 'check_output',
+ [
+ "/usr/bin/ldns-key2ds",
+ "-n", # output to stdout
+ "-" + digest_type, # 1=SHA1, 2=SHA256, 4=SHA384
+ key + ".key"
+ ])
f.write(rr_ds)
# Remove the temporary patched key files.
@@ -837,13 +1006,16 @@ def sign_zone(domain, zonefile, env):
os.unlink(fn + ".private")
os.unlink(fn + ".key")
+
########################################################################
+
def write_opendkim_tables(domains, env):
# Append a record to OpenDKIM's KeyTable and SigningTable for each domain
# that we send mail from (zones and all subdomains).
- opendkim_key_file = os.path.join(env['STORAGE_ROOT'], 'mail/dkim/mail.private')
+ opendkim_key_file = os.path.join(env['STORAGE_ROOT'],
+ 'mail/dkim/mail.private')
config = load_settings(env)
if not os.path.exists(opendkim_key_file):
@@ -859,22 +1031,18 @@ def write_opendkim_tables(domains, env):
# to be From: the domain must be signed with a DKIM key on the same domain.
# So we must have a separate KeyTable entry for each domain.
"SigningTable":
- "".join(
- "*@{domain} {domain}\n".format(domain=domain)
- for domain in domains
- ),
+ "".join("*@{domain} {domain}\n".format(domain=domain)
+ for domain in domains),
# The KeyTable specifies the signing domain, the DKIM selector, and the
# path to the private key to use for signing some mail. Per DMARC, the
# signing domain must match the sender's From: domain.
"KeyTable":
- "".join(
- "{domain} {domain}:{selector}:{key_file}\n".format(
- domain=domain,
- key_file=opendkim_key_file,
- selector = config.get("local_dkim_selector", "mail")
- ) for domain in domains
- ),
+ "".join("{domain} {domain}:{selector}:{key_file}\n".format(
+ domain=domain,
+ key_file=opendkim_key_file,
+ selector=config.get("local_dkim_selector", "mail"))
+ for domain in domains),
}
did_update = False
@@ -894,17 +1062,22 @@ def write_opendkim_tables(domains, env):
# no need to kick the opendkim process.
return did_update
+
########################################################################
+
def get_custom_dns_config(env, only_real_records=False):
try:
- custom_dns = rtyaml.load(open(os.path.join(env['STORAGE_ROOT'], 'dns/custom.yaml')))
- if not isinstance(custom_dns, dict): raise ValueError() # caught below
+ custom_dns = rtyaml.load(
+ open(os.path.join(env['STORAGE_ROOT'], 'dns/custom.yaml')))
+ if not isinstance(custom_dns, dict):
+ raise ValueError() # caught below
except:
- return [ ]
+ return []
for qname, entry in custom_dns.items():
- if qname == "_secondary_nameserver" and only_real_records: continue # skip fake record
+ if qname == "_secondary_nameserver" and only_real_records:
+ continue # skip fake record
# Short form. Mapping a domain name to a string is short-hand
# for creating A records.
@@ -937,15 +1110,19 @@ def get_custom_dns_config(env, only_real_records=False):
# No other type of data is allowed.
raise ValueError()
+
def filter_custom_records(domain, custom_dns_iter):
for qname, rtype, value, ttl in custom_dns_iter:
# We don't count the secondary nameserver config (if present) as a record - that would just be
# confusing to users. Instead it is accessed/manipulated directly via (get/set)_custom_dns_config.
- if qname == "_secondary_nameserver": continue
+ if qname == "_secondary_nameserver":
+ continue
# Is this record for the domain or one of its subdomains?
# If `domain` is None, return records for all domains.
- if domain is not None and qname != domain and not qname.endswith("." + domain): continue
+ if domain is not None and qname != domain and not qname.endswith(
+ "." + domain):
+ continue
# Turn the fully qualified domain name in the YAML file into
# our short form (None => domain, or a relative QNAME) if
@@ -954,10 +1131,11 @@ def filter_custom_records(domain, custom_dns_iter):
if qname == domain:
qname = None
else:
- qname = qname[0:len(qname)-len("." + domain)]
+ qname = qname[0:len(qname) - len("." + domain)]
yield (qname, rtype, value, ttl)
+
def write_custom_dns_config(config, env):
# We get a list of (qname, rtype, value, ttl) triples. Convert this into a
# nice dictionary format for storage on disk.
@@ -968,11 +1146,14 @@ def write_custom_dns_config(config, env):
# Process the qnames in the order we see them.
for qname in [rec[0] for rec in config]:
- if qname in seen_qnames: continue
+ if qname in seen_qnames:
+ continue
seen_qnames.add(qname)
- records = [(rec[1], rec[2], rec[3]) for rec in config if rec[0] == qname]
- if len(records) == 1 and records[0][0] == "A" and records[0][2] is None:
+ records = [(rec[1], rec[2], rec[3]) for rec in config
+ if rec[0] == qname]
+ if len(
+ records) == 1 and records[0][0] == "A" and records[0][2] is None:
dns[qname] = records[0][1]
else:
dns[qname] = OrderedDict()
@@ -980,10 +1161,14 @@ def write_custom_dns_config(config, env):
# Process the rtypes in the order we see them.
for rtype in [rec[0] for rec in records]:
- if rtype in seen_rtypes: continue
+ if rtype in seen_rtypes:
+ continue
seen_rtypes.add(rtype)
- values = [(rec[1] if rec[2] is None else {"value": rec[1], "ttl": min(max(TTL_MIN, rec[2]), TTL_MAX)}) for rec in records if rec[0] == rtype]
+ values = [(rec[1] if rec[2] is None else {
+ "value": rec[1],
+ "ttl": min(max(TTL_MIN, rec[2]), TTL_MAX)
+ }) for rec in records if rec[0] == rtype]
if len(values) == 1:
values = values[0]
dns[qname][rtype] = values
@@ -993,17 +1178,20 @@ def write_custom_dns_config(config, env):
with open(os.path.join(env['STORAGE_ROOT'], 'dns/custom.yaml'), "w") as f:
f.write(config_yaml)
+
def set_custom_dns_record(qname, rtype, value, action, env, ttl=None):
# validate qname
for zone, fn in get_dns_zones(env):
# It must match a zone apex or be a subdomain of a zone
# that we are otherwise hosting.
- if qname == zone or qname.endswith("."+zone):
+ if qname == zone or qname.endswith("." + zone):
break
else:
# No match.
if qname != "_secondary_nameserver":
- raise ValueError("%s is not a domain name or a subdomain of a domain name managed by this box." % qname)
+ raise ValueError(
+ "%s is not a domain name or a subdomain of a domain name managed by this box."
+ % qname)
# validate rtype
rtype = rtype.upper()
@@ -1012,10 +1200,14 @@ def set_custom_dns_record(qname, rtype, value, action, env, ttl=None):
raise ValueError("Invalid name.")
if rtype in ("A", "AAAA"):
- if value != "local": # "local" is a special flag for us
- v = ipaddress.ip_address(value) # raises a ValueError if there's a problem
- if rtype == "A" and not isinstance(v, ipaddress.IPv4Address): raise ValueError("That's an IPv6 address.")
- if rtype == "AAAA" and not isinstance(v, ipaddress.IPv6Address): raise ValueError("That's an IPv4 address.")
+ if value != "local": # "local" is a special flag for us
+ # raises a ValueError if there's a problem
+ v = ipaddress.ip_address(value)
+ if rtype == "A" and not isinstance(v, ipaddress.IPv4Address):
+ raise ValueError("That's an IPv6 address.")
+ if rtype == "AAAA" and not isinstance(v,
+ ipaddress.IPv6Address):
+ raise ValueError("That's an IPv4 address.")
elif rtype in ("CNAME", "NS"):
if rtype == "NS" and qname == zone:
raise ValueError("NS records can only be set for subdomains.")
@@ -1078,15 +1270,18 @@ def set_custom_dns_record(qname, rtype, value, action, env, ttl=None):
write_custom_dns_config(newconfig, env)
return made_change
+
########################################################################
+
def get_secondary_dns(custom_dns, mode=None):
resolver = dns.resolver.get_default_resolver()
resolver.timeout = 10
values = []
for qname, rtype, value, ttl in custom_dns:
- if qname != '_secondary_nameserver': continue
+ if qname != '_secondary_nameserver':
+ continue
for hostname in value.split(" "):
hostname = hostname.strip()
if mode == None:
@@ -1100,9 +1295,13 @@ def get_secondary_dns(custom_dns, mode=None):
# doesn't.
if not hostname.startswith("xfr:"):
if mode == "xfr":
- response = dns.resolver.resolve(hostname+'.', "A", raise_on_no_answer=False)
+ response = dns.resolver.resolve(hostname + '.',
+ "A",
+ raise_on_no_answer=False)
values.extend(map(str, response))
- response = dns.resolver.resolve(hostname+'.', "AAAA", raise_on_no_answer=False)
+ response = dns.resolver.resolve(hostname + '.',
+ "AAAA",
+ raise_on_no_answer=False)
values.extend(map(str, response))
continue
values.append(hostname)
@@ -1116,6 +1315,7 @@ def get_secondary_dns(custom_dns, mode=None):
return values
+
def set_secondary_dns(hostnames, env):
if len(hostnames) > 0:
# Validate that all hostnames are valid and that all zone-xfer IP addresses are valid.
@@ -1126,23 +1326,31 @@ def set_secondary_dns(hostnames, env):
# Resolve hostname.
try:
response = resolver.resolve(item, "A")
- except (dns.resolver.NoNameservers, dns.resolver.NXDOMAIN, dns.resolver.NoAnswer):
+ except (dns.resolver.NoNameservers, dns.resolver.NXDOMAIN,
+ dns.resolver.NoAnswer):
try:
response = resolver.query(item, "AAAA")
- except (dns.resolver.NoNameservers, dns.resolver.NXDOMAIN, dns.resolver.NoAnswer):
- raise ValueError("Could not resolve the IP address of %s." % item)
+ except (dns.resolver.NoNameservers, dns.resolver.NXDOMAIN,
+ dns.resolver.NoAnswer):
+ raise ValueError(
+ "Could not resolve the IP address of %s." % item)
else:
# Validate IP address.
try:
if "/" in item[4:]:
- v = ipaddress.ip_network(item[4:]) # raises a ValueError if there's a problem
+ # raises a ValueError if there's a problem
+ v = ipaddress.ip_network(item[4:])
else:
- v = ipaddress.ip_address(item[4:]) # raises a ValueError if there's a problem
+ # raises a ValueError if there's a problem
+ v = ipaddress.ip_address(item[4:])
except ValueError:
- raise ValueError("'%s' is not an IPv4 or IPv6 address or subnet." % item[4:])
+ raise ValueError(
+ "'%s' is not an IPv4 or IPv6 address or subnet." %
+ item[4:])
# Set.
- set_custom_dns_record("_secondary_nameserver", "A", " ".join(hostnames), "set", env)
+ set_custom_dns_record("_secondary_nameserver", "A",
+ " ".join(hostnames), "set", env)
else:
# Clear.
set_custom_dns_record("_secondary_nameserver", "A", None, "set", env)
@@ -1157,8 +1365,10 @@ def get_custom_dns_records(custom_dns, qname, rtype):
yield value
return None
+
########################################################################
+
def build_recommended_dns(env):
ret = []
for (domain, zonefile, records) in build_zones(env):
@@ -1166,7 +1376,8 @@ def build_recommended_dns(env):
records = [r for r in records if r[3] is not False]
# put Required at the top, then Recommended, then everythiing else
- records.sort(key = lambda r : 0 if r[3].startswith("Required.") else (1 if r[3].startswith("Recommended.") else 2))
+ records.sort(key=lambda r: 0 if r[3].startswith("Required.") else
+ (1 if r[3].startswith("Recommended.") else 2))
# expand qnames
for i in range(len(records)):
@@ -1186,6 +1397,7 @@ def build_recommended_dns(env):
ret.append((domain, records))
return ret
+
if __name__ == "__main__":
from utils import load_environment
env = load_environment()
@@ -1195,5 +1407,8 @@ if __name__ == "__main__":
for zone, records in build_recommended_dns(env):
for record in records:
print("; " + record['explanation'])
- print(record['qname'], record['rtype'], record['value'], sep="\t")
+ print(record['qname'],
+ record['rtype'],
+ record['value'],
+ sep="\t")
print()
diff --git a/management/editconf.py b/management/editconf.py
index 27bbd93..f9d0841 100755
--- a/management/editconf.py
+++ b/management/editconf.py
@@ -24,9 +24,17 @@
# create the new config file in memory
-import sys, re
+import sys
+import re
-def edit_conf(filename, settings, delimiter_re, delimiter, comment_char, folded_lines = False, testing = False):
+
+def edit_conf(filename,
+ settings,
+ delimiter_re,
+ delimiter,
+ comment_char,
+ folded_lines=False,
+ testing=False):
found = set()
buf = ""
input_lines = list(open(filename, "r+"))
@@ -45,25 +53,26 @@ def edit_conf(filename, settings, delimiter_re, delimiter, comment_char, folded_
# Check that this line contain this setting from the command-line arguments.
name, val = settings[i].split("=", 1)
m = re.match(
- "(\s*)"
- + "(" + re.escape(comment_char) + "\s*)?"
- + re.escape(name) + delimiter_re + "(.*?)\s*$",
- line, re.S)
- if not m: continue
+ "(\s*)" + "(" + re.escape(comment_char) + "\s*)?" +
+ re.escape(name) + delimiter_re + "(.*?)\s*$", line, re.S)
+ if not m:
+ continue
indent, is_comment, existing_val = m.groups()
# If this is already the setting, do nothing.
if is_comment is None and existing_val == val:
# It may be that we've already inserted this setting higher
# in the file so check for that first.
- if i in found: break
+ if i in found:
+ break
buf += line
found.add(i)
break
# comment-out the existing line (also comment any folded lines)
if is_comment is None:
- buf += comment_char + line.rstrip().replace("\n", "\n" + comment_char) + "\n"
+ buf += comment_char + line.rstrip().replace(
+ "\n", "\n" + comment_char) + "\n"
else:
# the line is already commented, pass it through
buf += line
@@ -97,11 +106,14 @@ def edit_conf(filename, settings, delimiter_re, delimiter, comment_char, folded_
# Just print the new file to stdout.
print(buf)
+
# Run standalone
if __name__ == "__main__":
# sanity check
if len(sys.argv) < 3:
- print("usage: python3 editconf.py /etc/file.conf [-s] [-w] [-c ] [-t] NAME=VAL [NAME=VAL ...]")
+ print(
+ "usage: python3 editconf.py /etc/file.conf [-s] [-w] [-c ] [-t] NAME=VAL [NAME=VAL ...]"
+ )
sys.exit(1)
# parse command line arguments
@@ -140,4 +152,5 @@ if __name__ == "__main__":
print("Invalid command line: ", subprocess.list2cmdline(sys.argv))
sys.exit(1)
- edit_conf(filename, settings, delimiter_re, delimiter, comment_char, folded_lines, testing)
+ edit_conf(filename, settings, delimiter_re, delimiter, comment_char,
+ folded_lines, testing)
diff --git a/management/email_administrator.py b/management/email_administrator.py
index 5701392..75605bc 100755
--- a/management/email_administrator.py
+++ b/management/email_administrator.py
@@ -39,24 +39,26 @@ msg = MIMEMultipart('alternative')
noreply = "noreply-daemon@" + env['PRIMARY_HOSTNAME']
admin_addr = "administrator@" + env['PRIMARY_HOSTNAME']
-msg['From'] = "\"%s\" <%s>" % ("System Management Daemon", "noreply-daemon@" + env['PRIMARY_HOSTNAME'])
+msg['From'] = "\"%s\" <%s>" % ("System Management Daemon",
+ "noreply-daemon@" + env['PRIMARY_HOSTNAME'])
msg['To'] = "administrator@" + env['PRIMARY_HOSTNAME']
msg['Subject'] = "[%s] %s" % (env['PRIMARY_HOSTNAME'], subject)
-content_html = "
{}
".format(html.escape(content))
+content_html = "
{}
".format(
+ html.escape(content))
msg.attach(MIMEText(create_signature(content.encode()).decode(), 'plain'))
msg.attach(MIMEText(content_html, 'html'))
# In Python 3.6:
-#msg.set_content(content)
+# msg.set_content(content)
#msg.add_alternative(content_html, "html")
# send
smtpclient = smtplib.SMTP('127.0.0.1', 25)
smtpclient.ehlo()
smtpclient.sendmail(
- noreply, # MAIL FROM
- admin_addr, # RCPT TO
- msg.as_string())
+ noreply, # MAIL FROM
+ admin_addr, # RCPT TO
+ msg.as_string())
smtpclient.quit()
diff --git a/management/mail_log.py b/management/mail_log.py
index bdf757c..bdb8afb 100755
--- a/management/mail_log.py
+++ b/management/mail_log.py
@@ -16,32 +16,31 @@ from dateutil.relativedelta import relativedelta
import utils
-
LOG_FILES = (
- '/var/log/mail.log.6.gz',
- '/var/log/mail.log.5.gz',
- '/var/log/mail.log.4.gz',
- '/var/log/mail.log.3.gz',
- '/var/log/mail.log.2.gz',
- '/var/log/mail.log.1',
- '/var/log/mail.log',
+ '/var/log/mail.log.6.gz',
+ '/var/log/mail.log.5.gz',
+ '/var/log/mail.log.4.gz',
+ '/var/log/mail.log.3.gz',
+ '/var/log/mail.log.2.gz',
+ '/var/log/mail.log.1',
+ '/var/log/mail.log',
)
TIME_DELTAS = OrderedDict([
- ('all', datetime.timedelta(weeks=52)),
- ('month', datetime.timedelta(weeks=4)),
- ('2weeks', datetime.timedelta(days=14)),
- ('week', datetime.timedelta(days=7)),
- ('2days', datetime.timedelta(days=2)),
- ('day', datetime.timedelta(days=1)),
- ('12hours', datetime.timedelta(hours=12)),
- ('6hours', datetime.timedelta(hours=6)),
- ('hour', datetime.timedelta(hours=1)),
- ('30min', datetime.timedelta(minutes=30)),
- ('10min', datetime.timedelta(minutes=10)),
- ('5min', datetime.timedelta(minutes=5)),
- ('min', datetime.timedelta(minutes=1)),
- ('today', datetime.datetime.now() - datetime.datetime.now().replace(hour=0, minute=0, second=0))
+ ('all', datetime.timedelta(weeks=52)),
+ ('month', datetime.timedelta(weeks=4)),
+ ('2weeks', datetime.timedelta(days=14)),
+ ('week', datetime.timedelta(days=7)),
+ ('2days', datetime.timedelta(days=2)), ('day', datetime.timedelta(days=1)),
+ ('12hours', datetime.timedelta(hours=12)),
+ ('6hours', datetime.timedelta(hours=6)),
+ ('hour', datetime.timedelta(hours=1)),
+ ('30min', datetime.timedelta(minutes=30)),
+ ('10min', datetime.timedelta(minutes=10)),
+ ('5min', datetime.timedelta(minutes=5)),
+ ('min', datetime.timedelta(minutes=1)),
+ ('today', datetime.datetime.now() -
+ datetime.datetime.now().replace(hour=0, minute=0, second=0))
])
END_DATE = NOW = datetime.datetime.now()
@@ -61,816 +60,867 @@ SCAN_BLOCKED = False # Rejected email
def scan_files(collector):
- """ Scan files until they run out or the earliest date is reached """
+ """ Scan files until they run out or the earliest date is reached """
- stop_scan = False
+ stop_scan = False
- for fn in LOG_FILES:
+ for fn in LOG_FILES:
- tmp_file = None
+ tmp_file = None
- if not os.path.exists(fn):
- continue
- elif fn[-3:] == '.gz':
- tmp_file = tempfile.NamedTemporaryFile()
- shutil.copyfileobj(gzip.open(fn), tmp_file)
+ if not os.path.exists(fn):
+ continue
+ elif fn[-3:] == '.gz':
+ tmp_file = tempfile.NamedTemporaryFile()
+ shutil.copyfileobj(gzip.open(fn), tmp_file)
- if VERBOSE:
- print("Processing file", fn, "...")
- fn = tmp_file.name if tmp_file else fn
-
- for line in readline(fn):
- if scan_mail_log_line(line.strip(), collector) is False:
- if stop_scan:
- return
- stop_scan = True
- else:
- stop_scan = False
+ if VERBOSE:
+ print("Processing file", fn, "...")
+ fn = tmp_file.name if tmp_file else fn
+ for line in readline(fn):
+ if scan_mail_log_line(line.strip(), collector) is False:
+ if stop_scan:
+ return
+ stop_scan = True
+ else:
+ stop_scan = False
def scan_mail_log(env):
- """ Scan the system's mail log files and collect interesting data
+ """ Scan the system's mail log files and collect interesting data
- This function scans the 2 most recent mail log files in /var/log/.
+ This function scans the 2 most recent mail log files in /var/log/.
- Args:
- env (dict): Dictionary containing MiaB settings
+ Args:
+ env (dict): Dictionary containing MiaB settings
- """
+ """
- collector = {
- "scan_count": 0, # Number of lines scanned
- "parse_count": 0, # Number of lines parsed (i.e. that had their contents examined)
- "scan_time": time.time(), # The time in seconds the scan took
- "sent_mail": OrderedDict(), # Data about email sent by users
- "received_mail": OrderedDict(), # Data about email received by users
- "logins": OrderedDict(), # Data about login activity
- "postgrey": {}, # Data about greylisting of email addresses
- "rejected": OrderedDict(), # Emails that were blocked
- "known_addresses": None, # Addresses handled by the Miab installation
- "other-services": set(),
- }
+ collector = {
+ "scan_count": 0, # Number of lines scanned
+ "parse_count":
+ 0, # Number of lines parsed (i.e. that had their contents examined)
+ "scan_time": time.time(), # The time in seconds the scan took
+ "sent_mail": OrderedDict(), # Data about email sent by users
+ "received_mail": OrderedDict(), # Data about email received by users
+ "logins": OrderedDict(), # Data about login activity
+ "postgrey": {}, # Data about greylisting of email addresses
+ "rejected": OrderedDict(), # Emails that were blocked
+ "known_addresses": None, # Addresses handled by the Miab installation
+ "other-services": set(),
+ }
- try:
- import mailconfig
- collector["known_addresses"] = (set(mailconfig.get_mail_users(env)) |
- set(alias[0] for alias in mailconfig.get_mail_aliases(env)))
- except ImportError:
- pass
+ try:
+ import mailconfig
+ collector["known_addresses"] = (
+ set(mailconfig.get_mail_users(env))
+ | set(alias[0] for alias in mailconfig.get_mail_aliases(env)))
+ except ImportError:
+ pass
- print("Scanning logs from {:%Y-%m-%d %H:%M:%S} to {:%Y-%m-%d %H:%M:%S}".format(
- START_DATE, END_DATE)
- )
+ print("Scanning logs from {:%Y-%m-%d %H:%M:%S} to {:%Y-%m-%d %H:%M:%S}".
+ format(START_DATE, END_DATE))
- # Scan the lines in the log files until the date goes out of range
- scan_files(collector)
+ # Scan the lines in the log files until the date goes out of range
+ scan_files(collector)
- if not collector["scan_count"]:
- print("No log lines scanned...")
- return
+ if not collector["scan_count"]:
+ print("No log lines scanned...")
+ return
- collector["scan_time"] = time.time() - collector["scan_time"]
+ collector["scan_time"] = time.time() - collector["scan_time"]
- print("{scan_count} Log lines scanned, {parse_count} lines parsed in {scan_time:.2f} "
- "seconds\n".format(**collector))
+ print(
+ "{scan_count} Log lines scanned, {parse_count} lines parsed in {scan_time:.2f} "
+ "seconds\n".format(**collector))
- # Print Sent Mail report
+ # Print Sent Mail report
- if collector["sent_mail"]:
- msg = "Sent email"
- print_header(msg)
+ if collector["sent_mail"]:
+ msg = "Sent email"
+ print_header(msg)
- data = OrderedDict(sorted(collector["sent_mail"].items(), key=email_sort))
+ data = OrderedDict(
+ sorted(collector["sent_mail"].items(), key=email_sort))
- print_user_table(
- data.keys(),
- data=[
- ("sent", [u["sent_count"] for u in data.values()]),
- ("hosts", [len(u["hosts"]) for u in data.values()]),
- ],
- sub_data=[
- ("sending hosts", [u["hosts"] for u in data.values()]),
- ],
- activity=[
- ("sent", [u["activity-by-hour"] for u in data.values()]),
- ],
- earliest=[u["earliest"] for u in data.values()],
- latest=[u["latest"] for u in data.values()],
- )
+ print_user_table(
+ data.keys(),
+ data=[
+ ("sent", [u["sent_count"] for u in data.values()]),
+ ("hosts", [len(u["hosts"]) for u in data.values()]),
+ ],
+ sub_data=[
+ ("sending hosts", [u["hosts"] for u in data.values()]),
+ ],
+ activity=[
+ ("sent", [u["activity-by-hour"] for u in data.values()]),
+ ],
+ earliest=[u["earliest"] for u in data.values()],
+ latest=[u["latest"] for u in data.values()],
+ )
- accum = defaultdict(int)
- data = collector["sent_mail"].values()
+ accum = defaultdict(int)
+ data = collector["sent_mail"].values()
- for h in range(24):
- accum[h] = sum(d["activity-by-hour"][h] for d in data)
+ for h in range(24):
+ accum[h] = sum(d["activity-by-hour"][h] for d in data)
- print_time_table(
- ["sent"],
- [accum]
- )
+ print_time_table(["sent"], [accum])
- # Print Received Mail report
+ # Print Received Mail report
- if collector["received_mail"]:
- msg = "Received email"
- print_header(msg)
+ if collector["received_mail"]:
+ msg = "Received email"
+ print_header(msg)
- data = OrderedDict(sorted(collector["received_mail"].items(), key=email_sort))
+ data = OrderedDict(
+ sorted(collector["received_mail"].items(), key=email_sort))
- print_user_table(
- data.keys(),
- data=[
- ("received", [u["received_count"] for u in data.values()]),
- ],
- activity=[
- ("sent", [u["activity-by-hour"] for u in data.values()]),
- ],
- earliest=[u["earliest"] for u in data.values()],
- latest=[u["latest"] for u in data.values()],
- )
+ print_user_table(
+ data.keys(),
+ data=[
+ ("received", [u["received_count"] for u in data.values()]),
+ ],
+ activity=[
+ ("sent", [u["activity-by-hour"] for u in data.values()]),
+ ],
+ earliest=[u["earliest"] for u in data.values()],
+ latest=[u["latest"] for u in data.values()],
+ )
- accum = defaultdict(int)
- for h in range(24):
- accum[h] = sum(d["activity-by-hour"][h] for d in data.values())
+ accum = defaultdict(int)
+ for h in range(24):
+ accum[h] = sum(d["activity-by-hour"][h] for d in data.values())
- print_time_table(
- ["received"],
- [accum]
- )
+ print_time_table(["received"], [accum])
- # Print login report
+ # Print login report
- if collector["logins"]:
- msg = "User logins per hour"
- print_header(msg)
+ if collector["logins"]:
+ msg = "User logins per hour"
+ print_header(msg)
- data = OrderedDict(sorted(collector["logins"].items(), key=email_sort))
+ data = OrderedDict(sorted(collector["logins"].items(), key=email_sort))
- # Get a list of all of the protocols seen in the logs in reverse count order.
- all_protocols = defaultdict(int)
- for u in data.values():
- for protocol_name, count in u["totals_by_protocol"].items():
- all_protocols[protocol_name] += count
- all_protocols = [k for k, v in sorted(all_protocols.items(), key=lambda kv : -kv[1])]
+ # Get a list of all of the protocols seen in the logs in reverse count order.
+ all_protocols = defaultdict(int)
+ for u in data.values():
+ for protocol_name, count in u["totals_by_protocol"].items():
+ all_protocols[protocol_name] += count
+ all_protocols = [
+ k for k, v in sorted(all_protocols.items(), key=lambda kv: -kv[1])
+ ]
- print_user_table(
- data.keys(),
- data=[
- (protocol_name, [
- round(u["totals_by_protocol"][protocol_name] / (u["latest"]-u["earliest"]).total_seconds() * 60*60, 1)
- if (u["latest"]-u["earliest"]).total_seconds() > 0
- else 0 # prevent division by zero
- for u in data.values()])
- for protocol_name in all_protocols
- ],
- sub_data=[
- ("Protocol and Source", [[
- "{} {}: {} times".format(protocol_name, host, count)
- for (protocol_name, host), count
- in sorted(u["totals_by_protocol_and_host"].items(), key=lambda kv:-kv[1])
- ] for u in data.values()])
- ],
- activity=[
- (protocol_name, [u["activity-by-hour"][protocol_name] for u in data.values()])
- for protocol_name in all_protocols
- ],
- earliest=[u["earliest"] for u in data.values()],
- latest=[u["latest"] for u in data.values()],
- numstr=lambda n : str(round(n, 1)),
- )
+ print_user_table(
+ data.keys(),
+ data=[
+ (
+ protocol_name,
+ [
+ round(
+ u["totals_by_protocol"][protocol_name] /
+ (u["latest"] - u["earliest"]).total_seconds() *
+ 60 * 60, 1) if
+ (u["latest"] - u["earliest"]).total_seconds() > 0 else
+ 0 # prevent division by zero
+ for u in data.values()
+ ]) for protocol_name in all_protocols
+ ],
+ sub_data=[("Protocol and Source", [[
+ "{} {}: {} times".format(protocol_name, host, count)
+ for (protocol_name, host), count in sorted(
+ u["totals_by_protocol_and_host"].items(),
+ key=lambda kv: -kv[1])
+ ] for u in data.values()])],
+ activity=[
+ (protocol_name,
+ [u["activity-by-hour"][protocol_name] for u in data.values()])
+ for protocol_name in all_protocols
+ ],
+ earliest=[u["earliest"] for u in data.values()],
+ latest=[u["latest"] for u in data.values()],
+ numstr=lambda n: str(round(n, 1)),
+ )
- accum = { protocol_name: defaultdict(int) for protocol_name in all_protocols }
- for h in range(24):
- for protocol_name in all_protocols:
- accum[protocol_name][h] = sum(d["activity-by-hour"][protocol_name][h] for d in data.values())
+ accum = {
+ protocol_name: defaultdict(int)
+ for protocol_name in all_protocols
+ }
+ for h in range(24):
+ for protocol_name in all_protocols:
+ accum[protocol_name][h] = sum(
+ d["activity-by-hour"][protocol_name][h]
+ for d in data.values())
- print_time_table(
- all_protocols,
- [accum[protocol_name] for protocol_name in all_protocols]
- )
+ print_time_table(
+ all_protocols,
+ [accum[protocol_name] for protocol_name in all_protocols])
- if collector["postgrey"]:
- msg = "Greylisted Email {:%Y-%m-%d %H:%M:%S} and {:%Y-%m-%d %H:%M:%S}"
- print_header(msg.format(START_DATE, END_DATE))
+ if collector["postgrey"]:
+ msg = "Greylisted Email {:%Y-%m-%d %H:%M:%S} and {:%Y-%m-%d %H:%M:%S}"
+ print_header(msg.format(START_DATE, END_DATE))
- print(textwrap.fill(
- "The following mail was greylisted, meaning the emails were temporarily rejected. "
- "Legitimate senders must try again after three minutes.",
- width=80, initial_indent=" ", subsequent_indent=" "
- ), end='\n\n')
+ print(textwrap.fill(
+ "The following mail was greylisted, meaning the emails were temporarily rejected. "
+ "Legitimate senders must try again after three minutes.",
+ width=80,
+ initial_indent=" ",
+ subsequent_indent=" "),
+ end='\n\n')
- data = OrderedDict(sorted(collector["postgrey"].items(), key=email_sort))
- users = []
- received = []
- senders = []
- sender_clients = []
- delivered_dates = []
+ data = OrderedDict(
+ sorted(collector["postgrey"].items(), key=email_sort))
+ users = []
+ received = []
+ senders = []
+ sender_clients = []
+ delivered_dates = []
- for recipient in data:
- sorted_recipients = sorted(data[recipient].items(), key=lambda kv: kv[1][0] or kv[1][1])
- for (client_address, sender), (first_date, delivered_date) in sorted_recipients:
- if first_date:
- users.append(recipient)
- received.append(first_date)
- senders.append(sender)
- delivered_dates.append(delivered_date)
- sender_clients.append(client_address)
+ for recipient in data:
+ sorted_recipients = sorted(data[recipient].items(),
+ key=lambda kv: kv[1][0] or kv[1][1])
+ for (client_address,
+ sender), (first_date, delivered_date) in sorted_recipients:
+ if first_date:
+ users.append(recipient)
+ received.append(first_date)
+ senders.append(sender)
+ delivered_dates.append(delivered_date)
+ sender_clients.append(client_address)
- print_user_table(
- users,
- data=[
- ("received", received),
- ("sender", senders),
- ("delivered", [str(d) or "no retry yet" for d in delivered_dates]),
- ("sending host", sender_clients)
- ],
- delimit=True,
- )
+ print_user_table(
+ users,
+ data=[("received", received), ("sender", senders),
+ ("delivered",
+ [str(d) or "no retry yet" for d in delivered_dates]),
+ ("sending host", sender_clients)],
+ delimit=True,
+ )
- if collector["rejected"]:
- msg = "Blocked Email {:%Y-%m-%d %H:%M:%S} and {:%Y-%m-%d %H:%M:%S}"
- print_header(msg.format(START_DATE, END_DATE))
+ if collector["rejected"]:
+ msg = "Blocked Email {:%Y-%m-%d %H:%M:%S} and {:%Y-%m-%d %H:%M:%S}"
+ print_header(msg.format(START_DATE, END_DATE))
- data = OrderedDict(sorted(collector["rejected"].items(), key=email_sort))
+ data = OrderedDict(
+ sorted(collector["rejected"].items(), key=email_sort))
- rejects = []
+ rejects = []
- if VERBOSE:
- for user_data in data.values():
- user_rejects = []
- for date, sender, message in user_data["blocked"]:
- if len(sender) > 64:
- sender = sender[:32] + "…" + sender[-32:]
- user_rejects.append("%s - %s " % (date, sender))
- user_rejects.append(" %s" % message)
- rejects.append(user_rejects)
+ if VERBOSE:
+ for user_data in data.values():
+ user_rejects = []
+ for date, sender, message in user_data["blocked"]:
+ if len(sender) > 64:
+ sender = sender[:32] + "…" + sender[-32:]
+ user_rejects.append("%s - %s " % (date, sender))
+ user_rejects.append(" %s" % message)
+ rejects.append(user_rejects)
- print_user_table(
- data.keys(),
- data=[
- ("blocked", [len(u["blocked"]) for u in data.values()]),
- ],
- sub_data=[
- ("blocked emails", rejects),
- ],
- earliest=[u["earliest"] for u in data.values()],
- latest=[u["latest"] for u in data.values()],
- )
+ print_user_table(
+ data.keys(),
+ data=[
+ ("blocked", [len(u["blocked"]) for u in data.values()]),
+ ],
+ sub_data=[
+ ("blocked emails", rejects),
+ ],
+ earliest=[u["earliest"] for u in data.values()],
+ latest=[u["latest"] for u in data.values()],
+ )
- if collector["other-services"] and VERBOSE and False:
- print_header("Other services")
- print("The following unkown services were found in the log file.")
- print(" ", *sorted(list(collector["other-services"])), sep='\n│ ')
+ if collector["other-services"] and VERBOSE and False:
+ print_header("Other services")
+ print("The following unkown services were found in the log file.")
+ print(" ", *sorted(list(collector["other-services"])), sep='\n│ ')
def scan_mail_log_line(line, collector):
- """ Scan a log line and extract interesting data """
+ """ Scan a log line and extract interesting data """
- m = re.match(r"(\w+[\s]+\d+ \d+:\d+:\d+) ([\w]+ )?([\w\-/]+)[^:]*: (.*)", line)
+ m = re.match(r"(\w+[\s]+\d+ \d+:\d+:\d+) ([\w]+ )?([\w\-/]+)[^:]*: (.*)",
+ line)
- if not m:
- return True
+ if not m:
+ return True
- date, system, service, log = m.groups()
- collector["scan_count"] += 1
+ date, system, service, log = m.groups()
+ collector["scan_count"] += 1
- # print()
- # print("date:", date)
- # print("host:", system)
- # print("service:", service)
- # print("log:", log)
+ # print()
+ # print("date:", date)
+ # print("host:", system)
+ # print("service:", service)
+ # print("log:", log)
- # Replaced the dateutil parser for a less clever way of parser that is roughly 4 times faster.
- # date = dateutil.parser.parse(date)
-
- # strptime fails on Feb 29 with ValueError: day is out of range for month if correct year is not provided.
- # See https://bugs.python.org/issue26460
- date = datetime.datetime.strptime(str(NOW.year) + ' ' + date, '%Y %b %d %H:%M:%S')
- # if log date in future, step back a year
- if date > NOW:
- date = date.replace(year = NOW.year - 1)
- #print("date:", date)
+ # Replaced the dateutil parser for a less clever way of parser that is roughly 4 times faster.
+ # date = dateutil.parser.parse(date)
- # Check if the found date is within the time span we are scanning
- if date > END_DATE:
- # Don't process, and halt
- return False
- elif date < START_DATE:
- # Don't process, but continue
- return True
+ # strptime fails on Feb 29 with ValueError: day is out of range for month if correct year is not provided.
+ # See https://bugs.python.org/issue26460
+ date = datetime.datetime.strptime(
+ str(NOW.year) + ' ' + date, '%Y %b %d %H:%M:%S')
+ # if log date in future, step back a year
+ if date > NOW:
+ date = date.replace(year=NOW.year - 1)
+ #print("date:", date)
- if service == "postfix/submission/smtpd":
- if SCAN_OUT:
- scan_postfix_submission_line(date, log, collector)
- elif service == "postfix/lmtp":
- if SCAN_IN:
- scan_postfix_lmtp_line(date, log, collector)
- elif service.endswith("-login"):
- if SCAN_DOVECOT_LOGIN:
- scan_dovecot_login_line(date, log, collector, service[:4])
- elif service == "postgrey":
- if SCAN_GREY:
- scan_postgrey_line(date, log, collector)
- elif service == "postfix/smtpd":
- if SCAN_BLOCKED:
- scan_postfix_smtpd_line(date, log, collector)
- elif service in ("postfix/qmgr", "postfix/pickup", "postfix/cleanup", "postfix/scache",
- "spampd", "postfix/anvil", "postfix/master", "opendkim", "postfix/lmtp",
- "postfix/tlsmgr", "anvil"):
- # nothing to look at
- return True
- else:
- collector["other-services"].add(service)
- return True
+ # Check if the found date is within the time span we are scanning
+ if date > END_DATE:
+ # Don't process, and halt
+ return False
+ elif date < START_DATE:
+ # Don't process, but continue
+ return True
- collector["parse_count"] += 1
- return True
+ if service == "postfix/submission/smtpd":
+ if SCAN_OUT:
+ scan_postfix_submission_line(date, log, collector)
+ elif service == "postfix/lmtp":
+ if SCAN_IN:
+ scan_postfix_lmtp_line(date, log, collector)
+ elif service.endswith("-login"):
+ if SCAN_DOVECOT_LOGIN:
+ scan_dovecot_login_line(date, log, collector, service[:4])
+ elif service == "postgrey":
+ if SCAN_GREY:
+ scan_postgrey_line(date, log, collector)
+ elif service == "postfix/smtpd":
+ if SCAN_BLOCKED:
+ scan_postfix_smtpd_line(date, log, collector)
+ elif service in ("postfix/qmgr", "postfix/pickup", "postfix/cleanup",
+ "postfix/scache", "spampd", "postfix/anvil",
+ "postfix/master", "opendkim", "postfix/lmtp",
+ "postfix/tlsmgr", "anvil"):
+ # nothing to look at
+ return True
+ else:
+ collector["other-services"].add(service)
+ return True
+
+ collector["parse_count"] += 1
+ return True
def scan_postgrey_line(date, log, collector):
- """ Scan a postgrey log line and extract interesting data """
+ """ Scan a postgrey log line and extract interesting data """
- m = re.match("action=(greylist|pass), reason=(.*?), (?:delay=\d+, )?client_name=(.*), "
- "client_address=(.*), sender=(.*), recipient=(.*)",
- log)
+ m = re.match(
+ "action=(greylist|pass), reason=(.*?), (?:delay=\d+, )?client_name=(.*), "
+ "client_address=(.*), sender=(.*), recipient=(.*)", log)
- if m:
+ if m:
- action, reason, client_name, client_address, sender, user = m.groups()
+ action, reason, client_name, client_address, sender, user = m.groups()
- if user_match(user):
+ if user_match(user):
- # Might be useful to group services that use a lot of mail different servers on sub
- # domains like 1.domein.com
+ # Might be useful to group services that use a lot of mail different servers on sub
+ # domains like 1.domein.com
- # if '.' in client_name:
- # addr = client_name.split('.')
- # if len(addr) > 2:
- # client_name = '.'.join(addr[1:])
+ # if '.' in client_name:
+ # addr = client_name.split('.')
+ # if len(addr) > 2:
+ # client_name = '.'.join(addr[1:])
- key = (client_address if client_name == 'unknown' else client_name, sender)
+ key = (client_address if client_name == 'unknown' else client_name,
+ sender)
- rep = collector["postgrey"].setdefault(user, {})
+ rep = collector["postgrey"].setdefault(user, {})
- if action == "greylist" and reason == "new":
- rep[key] = (date, rep[key][1] if key in rep else None)
- elif action == "pass":
- rep[key] = (rep[key][0] if key in rep else None, date)
+ if action == "greylist" and reason == "new":
+ rep[key] = (date, rep[key][1] if key in rep else None)
+ elif action == "pass":
+ rep[key] = (rep[key][0] if key in rep else None, date)
def scan_postfix_smtpd_line(date, log, collector):
- """ Scan a postfix smtpd log line and extract interesting data """
+ """ Scan a postfix smtpd log line and extract interesting data """
- # Check if the incoming mail was rejected
+ # Check if the incoming mail was rejected
- m = re.match("NOQUEUE: reject: RCPT from .*?: (.*?); from=<(.*?)> to=<(.*?)>", log)
+ m = re.match(
+ "NOQUEUE: reject: RCPT from .*?: (.*?); from=<(.*?)> to=<(.*?)>", log)
- if m:
- message, sender, user = m.groups()
+ if m:
+ message, sender, user = m.groups()
- # skip this, if reported in the greylisting report
- if "Recipient address rejected: Greylisted" in message:
- return
+ # skip this, if reported in the greylisting report
+ if "Recipient address rejected: Greylisted" in message:
+ return
- # only log mail to known recipients
- if user_match(user):
- if collector["known_addresses"] is None or user in collector["known_addresses"]:
- data = collector["rejected"].get(
- user,
- {
- "blocked": [],
- "earliest": None,
- "latest": None,
- }
- )
- # simplify this one
- m = re.search(
- r"Client host \[(.*?)\] blocked using zen.spamhaus.org; (.*)", message
- )
- if m:
- message = "ip blocked: " + m.group(2)
- else:
- # simplify this one too
- m = re.search(
- r"Sender address \[.*@(.*)\] blocked using dbl.spamhaus.org; (.*)", message
- )
- if m:
- message = "domain blocked: " + m.group(2)
+ # only log mail to known recipients
+ if user_match(user):
+ if collector["known_addresses"] is None or user in collector[
+ "known_addresses"]:
+ data = collector["rejected"].get(user, {
+ "blocked": [],
+ "earliest": None,
+ "latest": None,
+ })
+ # simplify this one
+ m = re.search(
+ r"Client host \[(.*?)\] blocked using zen.spamhaus.org; (.*)",
+ message)
+ if m:
+ message = "ip blocked: " + m.group(2)
+ else:
+ # simplify this one too
+ m = re.search(
+ r"Sender address \[.*@(.*)\] blocked using dbl.spamhaus.org; (.*)",
+ message)
+ if m:
+ message = "domain blocked: " + m.group(2)
- if data["earliest"] is None:
- data["earliest"] = date
- data["latest"] = date
- data["blocked"].append((date, sender, message))
+ if data["earliest"] is None:
+ data["earliest"] = date
+ data["latest"] = date
+ data["blocked"].append((date, sender, message))
- collector["rejected"][user] = data
+ collector["rejected"][user] = data
def scan_dovecot_login_line(date, log, collector, protocol_name):
- """ Scan a dovecot login log line and extract interesting data """
+ """ Scan a dovecot login log line and extract interesting data """
- m = re.match("Info: Login: user=<(.*?)>, method=PLAIN, rip=(.*?),", log)
+ m = re.match("Info: Login: user=<(.*?)>, method=PLAIN, rip=(.*?),", log)
- if m:
- # TODO: CHECK DIT
- user, host = m.groups()
+ if m:
+ # TODO: CHECK DIT
+ user, host = m.groups()
- if user_match(user):
- add_login(user, date, protocol_name, host, collector)
+ if user_match(user):
+ add_login(user, date, protocol_name, host, collector)
def add_login(user, date, protocol_name, host, collector):
- # Get the user data, or create it if the user is new
- data = collector["logins"].get(
- user,
- {
- "earliest": None,
- "latest": None,
- "totals_by_protocol": defaultdict(int),
- "totals_by_protocol_and_host": defaultdict(int),
- "activity-by-hour": defaultdict(lambda : defaultdict(int)),
- }
- )
+ # Get the user data, or create it if the user is new
+ data = collector["logins"].get(
+ user, {
+ "earliest": None,
+ "latest": None,
+ "totals_by_protocol": defaultdict(int),
+ "totals_by_protocol_and_host": defaultdict(int),
+ "activity-by-hour": defaultdict(lambda: defaultdict(int)),
+ })
- if data["earliest"] is None:
- data["earliest"] = date
- data["latest"] = date
+ if data["earliest"] is None:
+ data["earliest"] = date
+ data["latest"] = date
- data["totals_by_protocol"][protocol_name] += 1
- data["totals_by_protocol_and_host"][(protocol_name, host)] += 1
+ data["totals_by_protocol"][protocol_name] += 1
+ data["totals_by_protocol_and_host"][(protocol_name, host)] += 1
- if host not in ("127.0.0.1", "::1") or True:
- data["activity-by-hour"][protocol_name][date.hour] += 1
+ if host not in ("127.0.0.1", "::1") or True:
+ data["activity-by-hour"][protocol_name][date.hour] += 1
- collector["logins"][user] = data
+ collector["logins"][user] = data
def scan_postfix_lmtp_line(date, log, collector):
- """ Scan a postfix lmtp log line and extract interesting data
+ """ Scan a postfix lmtp log line and extract interesting data
- It is assumed that every log of postfix/lmtp indicates an email that was successfully
- received by Postfix.
+ It is assumed that every log of postfix/lmtp indicates an email that was successfully
+ received by Postfix.
- """
+ """
- m = re.match("([A-Z0-9]+): to=<(\S+)>, .* Saved", log)
+ m = re.match("([A-Z0-9]+): to=<(\S+)>, .* Saved", log)
- if m:
- _, user = m.groups()
+ if m:
+ _, user = m.groups()
- if user_match(user):
- # Get the user data, or create it if the user is new
- data = collector["received_mail"].get(
- user,
- {
- "received_count": 0,
- "earliest": None,
- "latest": None,
- "activity-by-hour": defaultdict(int),
- }
- )
+ if user_match(user):
+ # Get the user data, or create it if the user is new
+ data = collector["received_mail"].get(
+ user, {
+ "received_count": 0,
+ "earliest": None,
+ "latest": None,
+ "activity-by-hour": defaultdict(int),
+ })
- data["received_count"] += 1
- data["activity-by-hour"][date.hour] += 1
+ data["received_count"] += 1
+ data["activity-by-hour"][date.hour] += 1
- if data["earliest"] is None:
- data["earliest"] = date
- data["latest"] = date
+ if data["earliest"] is None:
+ data["earliest"] = date
+ data["latest"] = date
- collector["received_mail"][user] = data
+ collector["received_mail"][user] = data
def scan_postfix_submission_line(date, log, collector):
- """ Scan a postfix submission log line and extract interesting data
+ """ Scan a postfix submission log line and extract interesting data
- Lines containing a sasl_method with the values PLAIN or LOGIN are assumed to indicate a sent
- email.
+ Lines containing a sasl_method with the values PLAIN or LOGIN are assumed to indicate a sent
+ email.
- """
+ """
- # Match both the 'plain' and 'login' sasl methods, since both authentication methods are
- # allowed by Dovecot. Exclude trailing comma after the username when additional fields
+ # Match both the 'plain' and 'login' sasl methods, since both authentication methods are
+ # allowed by Dovecot. Exclude trailing comma after the username when additional fields
# follow after.
- m = re.match("([A-Z0-9]+): client=(\S+), sasl_method=(PLAIN|LOGIN), sasl_username=(\S+)(?%d} " % max(2, max_len)
+ for h in range(24):
+ max_len = max(len(str(d[h])) for d in data)
+ base = "{:>%d} " % max(2, max_len)
- for i, d in enumerate(data):
- lines[i] += base.format(d[h])
+ for i, d in enumerate(data):
+ lines[i] += base.format(d[h])
- lines.insert(0, "┬ totals by time of day:")
- lines.append("└" + (len(lines[-1]) - 2) * "─")
+ lines.insert(0, "┬ totals by time of day:")
+ lines.append("└" + (len(lines[-1]) - 2) * "─")
- if do_print:
- print("\n".join(lines))
- else:
- return lines
+ if do_print:
+ print("\n".join(lines))
+ else:
+ return lines
-def print_user_table(users, data=None, sub_data=None, activity=None, latest=None, earliest=None,
- delimit=False, numstr=str):
- str_temp = "{:<32} "
- lines = []
- data = data or []
+def print_user_table(users,
+ data=None,
+ sub_data=None,
+ activity=None,
+ latest=None,
+ earliest=None,
+ delimit=False,
+ numstr=str):
+ str_temp = "{:<32} "
+ lines = []
+ data = data or []
- col_widths = len(data) * [0]
- col_left = len(data) * [False]
- vert_pos = 0
+ col_widths = len(data) * [0]
+ col_left = len(data) * [False]
+ vert_pos = 0
- do_accum = all(isinstance(n, (int, float)) for _, d in data for n in d)
- data_accum = len(data) * ([0] if do_accum else [" "])
+ do_accum = all(isinstance(n, (int, float)) for _, d in data for n in d)
+ data_accum = len(data) * ([0] if do_accum else [" "])
- last_user = None
+ last_user = None
- for row, user in enumerate(users):
+ for row, user in enumerate(users):
- if delimit:
- if last_user and last_user != user:
- lines.append(len(lines[-1]) * "…")
- last_user = user
+ if delimit:
+ if last_user and last_user != user:
+ lines.append(len(lines[-1]) * "…")
+ last_user = user
- line = "{:<32} ".format(user[:31] + "…" if len(user) > 32 else user)
+ line = "{:<32} ".format(user[:31] + "…" if len(user) > 32 else user)
- for col, (l, d) in enumerate(data):
- if isinstance(d[row], str):
- col_str = str_temp.format(d[row][:31] + "…" if len(d[row]) > 32 else d[row])
- col_left[col] = True
- elif isinstance(d[row], datetime.datetime):
- col_str = "{:<20}".format(str(d[row]))
- col_left[col] = True
- else:
- temp = "{:>%s}" % max(5, len(l) + 1, len(str(d[row])) + 1)
- col_str = temp.format(str(d[row]))
- col_widths[col] = max(col_widths[col], len(col_str))
- line += col_str
+ for col, (l, d) in enumerate(data):
+ if isinstance(d[row], str):
+ col_str = str_temp.format(d[row][:31] +
+ "…" if len(d[row]) > 32 else d[row])
+ col_left[col] = True
+ elif isinstance(d[row], datetime.datetime):
+ col_str = "{:<20}".format(str(d[row]))
+ col_left[col] = True
+ else:
+ temp = "{:>%s}" % max(5, len(l) + 1, len(str(d[row])) + 1)
+ col_str = temp.format(str(d[row]))
+ col_widths[col] = max(col_widths[col], len(col_str))
+ line += col_str
- if do_accum:
- data_accum[col] += d[row]
+ if do_accum:
+ data_accum[col] += d[row]
- try:
- if None not in [latest, earliest]:
- vert_pos = len(line)
- e = earliest[row]
- l = latest[row]
- timespan = relativedelta(l, e)
- if timespan.months:
- temp = " │ {:0.1f} months"
- line += temp.format(timespan.months + timespan.days / 30.0)
- elif timespan.days:
- temp = " │ {:0.1f} days"
- line += temp.format(timespan.days + timespan.hours / 24.0)
- elif (e.hour, e.minute) == (l.hour, l.minute):
- temp = " │ {:%H:%M}"
- line += temp.format(e)
- else:
- temp = " │ {:%H:%M} - {:%H:%M}"
- line += temp.format(e, l)
+ try:
+ if None not in [latest, earliest]:
+ vert_pos = len(line)
+ e = earliest[row]
+ l = latest[row]
+ timespan = relativedelta(l, e)
+ if timespan.months:
+ temp = " │ {:0.1f} months"
+ line += temp.format(timespan.months + timespan.days / 30.0)
+ elif timespan.days:
+ temp = " │ {:0.1f} days"
+ line += temp.format(timespan.days + timespan.hours / 24.0)
+ elif (e.hour, e.minute) == (l.hour, l.minute):
+ temp = " │ {:%H:%M}"
+ line += temp.format(e)
+ else:
+ temp = " │ {:%H:%M} - {:%H:%M}"
+ line += temp.format(e, l)
- except KeyError:
- pass
+ except KeyError:
+ pass
- lines.append(line.rstrip())
+ lines.append(line.rstrip())
- try:
- if VERBOSE:
- if sub_data is not None:
- for l, d in sub_data:
- if d[row]:
- lines.append("┬")
- lines.append("│ %s" % l)
- lines.append("├─%s─" % (len(l) * "─"))
- lines.append("│")
- max_len = 0
- for v in list(d[row]):
- lines.append("│ %s" % v)
- max_len = max(max_len, len(v))
- lines.append("└" + (max_len + 1) * "─")
+ try:
+ if VERBOSE:
+ if sub_data is not None:
+ for l, d in sub_data:
+ if d[row]:
+ lines.append("┬")
+ lines.append("│ %s" % l)
+ lines.append("├─%s─" % (len(l) * "─"))
+ lines.append("│")
+ max_len = 0
+ for v in list(d[row]):
+ lines.append("│ %s" % v)
+ max_len = max(max_len, len(v))
+ lines.append("└" + (max_len + 1) * "─")
- if activity is not None:
- lines.extend(print_time_table(
- [label for label, _ in activity],
- [data[row] for _, data in activity],
- do_print=False
- ))
+ if activity is not None:
+ lines.extend(
+ print_time_table([label for label, _ in activity],
+ [data[row] for _, data in activity],
+ do_print=False))
- except KeyError:
- pass
+ except KeyError:
+ pass
- header = str_temp.format("")
+ header = str_temp.format("")
- for col, (l, _) in enumerate(data):
- if col_left[col]:
- header += l.ljust(max(5, len(l) + 1, col_widths[col]))
- else:
- header += l.rjust(max(5, len(l) + 1, col_widths[col]))
+ for col, (l, _) in enumerate(data):
+ if col_left[col]:
+ header += l.ljust(max(5, len(l) + 1, col_widths[col]))
+ else:
+ header += l.rjust(max(5, len(l) + 1, col_widths[col]))
- if None not in (latest, earliest):
- header += " │ timespan "
+ if None not in (latest, earliest):
+ header += " │ timespan "
- lines.insert(0, header.rstrip())
+ lines.insert(0, header.rstrip())
- table_width = max(len(l) for l in lines)
- t_line = table_width * "─"
- b_line = table_width * "─"
+ table_width = max(len(l) for l in lines)
+ t_line = table_width * "─"
+ b_line = table_width * "─"
- if vert_pos:
- t_line = t_line[:vert_pos + 1] + "┼" + t_line[vert_pos + 2:]
- b_line = b_line[:vert_pos + 1] + ("┬" if VERBOSE else "┼") + b_line[vert_pos + 2:]
+ if vert_pos:
+ t_line = t_line[:vert_pos + 1] + "┼" + t_line[vert_pos + 2:]
+ b_line = b_line[:vert_pos +
+ 1] + ("┬" if VERBOSE else "┼") + b_line[vert_pos + 2:]
- lines.insert(1, t_line)
- lines.append(b_line)
+ lines.insert(1, t_line)
+ lines.append(b_line)
- # Print totals
+ # Print totals
- data_accum = [numstr(a) for a in data_accum]
- footer = str_temp.format("Totals:" if do_accum else " ")
- for row, (l, _) in enumerate(data):
- temp = "{:>%d}" % max(5, len(l) + 1)
- footer += temp.format(data_accum[row])
+ data_accum = [numstr(a) for a in data_accum]
+ footer = str_temp.format("Totals:" if do_accum else " ")
+ for row, (l, _) in enumerate(data):
+ temp = "{:>%d}" % max(5, len(l) + 1)
+ footer += temp.format(data_accum[row])
- try:
- if None not in [latest, earliest]:
- max_l = max(latest)
- min_e = min(earliest)
- timespan = relativedelta(max_l, min_e)
- if timespan.days:
- temp = " │ {:0.2f} days"
- footer += temp.format(timespan.days + timespan.hours / 24.0)
- elif (min_e.hour, min_e.minute) == (max_l.hour, max_l.minute):
- temp = " │ {:%H:%M}"
- footer += temp.format(min_e)
- else:
- temp = " │ {:%H:%M} - {:%H:%M}"
- footer += temp.format(min_e, max_l)
+ try:
+ if None not in [latest, earliest]:
+ max_l = max(latest)
+ min_e = min(earliest)
+ timespan = relativedelta(max_l, min_e)
+ if timespan.days:
+ temp = " │ {:0.2f} days"
+ footer += temp.format(timespan.days + timespan.hours / 24.0)
+ elif (min_e.hour, min_e.minute) == (max_l.hour, max_l.minute):
+ temp = " │ {:%H:%M}"
+ footer += temp.format(min_e)
+ else:
+ temp = " │ {:%H:%M} - {:%H:%M}"
+ footer += temp.format(min_e, max_l)
- except KeyError:
- pass
+ except KeyError:
+ pass
- lines.append(footer)
+ lines.append(footer)
- print("\n".join(lines))
+ print("\n".join(lines))
def print_header(msg):
- print('\n' + msg)
- print("═" * len(msg), '\n')
+ print('\n' + msg)
+ print("═" * len(msg), '\n')
if __name__ == "__main__":
- try:
- env_vars = utils.load_environment()
- except FileNotFoundError:
- env_vars = {}
+ try:
+ env_vars = utils.load_environment()
+ except FileNotFoundError:
+ env_vars = {}
- parser = argparse.ArgumentParser(
- description="Scan the mail log files for interesting data. By default, this script "
- "shows today's incoming and outgoing mail statistics. This script was ("
- "re)written for the Mail-in-a-box email server."
- "https://github.com/mail-in-a-box/mailinabox",
- add_help=False
- )
+ parser = argparse.ArgumentParser(
+ description=
+ "Scan the mail log files for interesting data. By default, this script "
+ "shows today's incoming and outgoing mail statistics. This script was ("
+ "re)written for the Mail-in-a-box email server."
+ "https://github.com/mail-in-a-box/mailinabox",
+ add_help=False)
- # Switches to determine what to parse and what to ignore
+ # Switches to determine what to parse and what to ignore
- parser.add_argument("-r", "--received", help="Scan for received emails.",
- action="store_true")
- parser.add_argument("-s", "--sent", help="Scan for sent emails.",
- action="store_true")
- parser.add_argument("-l", "--logins", help="Scan for user logins to IMAP/POP3.",
- action="store_true")
- parser.add_argument("-g", "--grey", help="Scan for greylisted emails.",
- action="store_true")
- parser.add_argument("-b", "--blocked", help="Scan for blocked emails.",
- action="store_true")
+ parser.add_argument("-r",
+ "--received",
+ help="Scan for received emails.",
+ action="store_true")
+ parser.add_argument("-s",
+ "--sent",
+ help="Scan for sent emails.",
+ action="store_true")
+ parser.add_argument("-l",
+ "--logins",
+ help="Scan for user logins to IMAP/POP3.",
+ action="store_true")
+ parser.add_argument("-g",
+ "--grey",
+ help="Scan for greylisted emails.",
+ action="store_true")
+ parser.add_argument("-b",
+ "--blocked",
+ help="Scan for blocked emails.",
+ action="store_true")
- parser.add_argument("-t", "--timespan", choices=TIME_DELTAS.keys(), default='today',
- metavar='
-
-Enabling two-factor authentication does not protect access to your email
-
-
-Enabling two-factor authentication on this page only limits access to this control panel. Remember that most websites allow you to
-reset your password by checking your email, so anyone with access to your email can typically take over
-your other accounts. Additionally, if your email address or any alias that forwards to your email
-address is a typical domain control validation address (e.g admin@, administrator@, postmaster@, hostmaster@,
-webmaster@, abuse@), extra care should be taken to protect the account. Always use a strong password,
-and ensure every administrator account for this control panel does the same.
-
+
+ Enabling two-factor authentication does not protect access to your email
+
+
+ Enabling two-factor authentication on this page only limits access to this control panel. Remember that most
+ websites allow you to
+ reset your password by checking your email, so anyone with access to your email can typically take over
+ your other accounts. Additionally, if your email address or any alias that forwards to your email
+ address is a typical domain control validation address (e.g admin@, administrator@, postmaster@, hostmaster@,
+ webmaster@, abuse@), extra care should be taken to protect the account. Always use a strong password,
+ and ensure every administrator account for this control panel does the same.
+
-
Loading...
+
Loading...
-
+
+
+
+
+ When you click Enable Two-Factor Authentication, you will be logged out of the control panel and will
+ have to log in
+ again, now using your two-factor authentication app.
+
- You can upload your public key/keychain here. Keys must be submitted in ASCII-armored format.
-
- If you're using gpg, you can export your public key by following this example:
-
+
Import Key
+
+ You can upload your public key/keychain here. Keys must be submitted in ASCII-armored format.
+
+ If you're using gpg, you can export your public key by following this example:
+
# Get all the keys in the ring
$ gpg --list-keys
/home/you/.gnupg/pubring.kbx
@@ -86,201 +86,202 @@ $ gpg --export --armor 52661092E5CD9EEFD7796B19E85F540C9318B69F
copy and paste this block in the area below
-----END PGP PUBLIC KEY BLOCK-----
A TLS (formerly called SSL) certificate is a cryptographic file that proves to anyone connecting to a web address that the connection is secure between you and the owner of that address.
+
A TLS (formerly called SSL) certificate is a cryptographic file that proves to anyone connecting to a web address
+ that the connection is secure between you and the owner of that address.
-
You need a TLS certificate for this box’s hostname ({{hostname}}) and every other domain name and subdomain that this box is hosting a website for (see the list below).
+
You need a TLS certificate for this box’s hostname ({{hostname}}) and every other domain name and subdomain
+ that this box is hosting a website for (see the list below).
-
Provision certificates
+
Provision certificates
-
-
A TLS certificate can be automatically provisioned from Let’s Encrypt, a free TLS certificate provider, for:
-
+
+
A TLS certificate can be automatically provisioned from Let’s Encrypt, a free TLS certificate provider, for:
+
-
-
-
-
-
+
+
+
+
+
Certificate status
-
Certificates expire after a period of time. All certificates will be automatically renewed through Let’s Encrypt 14 days prior to expiration.
+
Certificates expire after a period of time. All certificates will be automatically renewed
+ through Let’s Encrypt 14 days prior to expiration.
-
-
-
-
Domain
-
Certificate Status
-
-
-
-
-
+
+
+
+
Domain
+
Certificate Status
+
+
+
+
+
Install certificate
-
If you don't want to use our automatic Let's Encrypt integration, you can give any other certificate provider a try. You can generate the needed CSR below.
+
If you don't want to use our automatic Let's Encrypt integration, you can give any other certificate provider a try.
+ You can generate the needed CSR below.
-
-
-
-
+
+
+
+
-
A multi-domain or wildcard certificate will be automatically applied to any domains it is valid for besides the one you choose above.
+
A multi-domain or wildcard certificate will be automatically applied to any domains it is valid for besides
+ the one you choose above.
-
-
-
-
+
+
+
+
-
This is required by some TLS certificate providers. You may just pick any if you know your TLS certificate provider doesn't require it.
+
This is required by some TLS certificate providers. You may just pick any if you know your TLS certificate
+ provider doesn't require it.
-
You will need to provide the certificate provider this Certificate Signing Request (CSR):
+
You will need to provide the certificate provider this Certificate Signing Request (CSR):
-
-
-
+
+
+
-
-
- The CSR is safe to share. It can only be used in combination with a secret key stored on this machine.
+
+
+ The CSR is safe to share. It can only be used in combination with a secret key stored on this machine.
-
The certificate provider will then provide you with a TLS/SSL certificate. They may also provide you with an intermediate chain. Paste each separately into the boxes below:
+
The certificate provider will then provide you with a TLS/SSL certificate. They may also provide you with an
+ intermediate chain. Paste each separately into the boxes below:
The box makes an incremental backup each night. By default the backup is stored on the machine itself, but you can also store it on S3-compatible services like Amazon Web Services (AWS).
+
The box makes an incremental backup each night. By default the backup is stored on the machine itself, but you can
+ also store it on S3-compatible services like Amazon Web Services (AWS).
Configuration
Available backups
-
The backup location currently contains the backups listed below. The total size of the backups is currently .
+
The backup location currently contains the backups listed below. The total size of the backups is currently .
To restore account, create a new account with this
+ email address. Or to permanently delete the mailbox, delete the directory on the machine.
+
+
+
Mail user API (advanced)
@@ -134,36 +157,57 @@
Verbs
-
-
Verb
Action
-
GET
(none)
Returns a list of existing mail users. Adding ?format=json to the URL will give JSON-encoded results.
-
-
POST
-
/add
-
Adds a new mail user. Required POST-body parameters are email and password. Optional parameters: privilege=admin and quota
-
-
-
POST
-
/remove
-
Removes a mail user. Required POST-by parameter is email.
-
-
POST
/privileges/add
Used to make a mail user an admin. Required POST-body parameters are email and privilege=admin.
-
POST
/privileges/remove
Used to remove the admin privilege from a mail user. Required POST-body parameter is email.
-
-
GET
-
/quota
-
Get the quota for a mail user. Required POST-body parameters are email and will return JSON result
-
-
-
POST
-
/quota
-
Set the quota for a mail user. Required POST-body parameters are email and quota.
-
+
+
+
Verb
+
Action
+
+
+
+
GET
+
(none)
+
Returns a list of existing mail users. Adding ?format=json to the URL will give JSON-encoded
+ results.
+
+
+
POST
+
/add
+
Adds a new mail user. Required POST-body parameters are email and password. Optional
+ parameters: privilege=admin and quota
+
+
+
POST
+
/remove
+
Removes a mail user. Required POST-by parameter is email.
+
+
+
POST
+
/privileges/add
+
Used to make a mail user an admin. Required POST-body parameters are email and
+ privilege=admin.
+
+
+
POST
+
/privileges/remove
+
Used to remove the admin privilege from a mail user. Required POST-body parameter is email.
+
+
+
GET
+
/quota
+
Get the quota for a mail user. Required POST-body parameters are email and will return JSON result
+
+
+
+
POST
+
/quota
+
Set the quota for a mail user. Required POST-body parameters are email and quota.
+
Examples:
-
Try these examples. For simplicity the examples omit the --user me@mydomain.com:yourpassword command line argument which you must fill in with your administrative email address and password.
+
Try these examples. For simplicity the examples omit the --user me@mydomain.com:yourpassword command
+ line argument which you must fill in with your administrative email address and password.
# Gives a JSON-encoded list of all mail users
curl -X GET https://{{hostname}}/admin/mail/users?format=json
@@ -182,228 +226,228 @@ curl -X POST -d "email=new_user@mydomail.com" https://{{hostname}}/admin/mail/us
This machine is serving a simple, static website at https://{{hostname}} and at all domain names that you set up an email user or alias for.
+
This machine is serving a simple, static website at https://{{hostname}} and at
+ all domain names that you set up an email user or alias for.
Uploading web files
-
You can replace the default website with your own HTML pages and other static files. This control panel won’t help you design a website, but once you have .html files you can upload them following these instructions:
+
You can replace the default website with your own HTML pages and other static files. This control panel won’t
+ help you design a website, but once you have .html files you can upload them following these instructions:
+
-
Ensure that any domains you are publishing a website for have no problems on the Status Checks page.
+
Ensure that any domains you are publishing a website for have no problems on the Status Checks page.
-
On your personal computer, install an SSH file transfer program such as FileZilla or scp.
+
On your personal computer, install an SSH file transfer program such as FileZilla or scp.
-
Log in to this machine with the file transfer program. The server is {{hostname}}, the protocol is SSH or SFTP, and use the SSH login credentials that you used when you originally created this machine at your cloud host provider. This is not what you use to log in either for email or this control panel. Your SSH credentials probably involves a private key file.
+
Log in to this machine with the file transfer program. The server is {{hostname}}, the protocol
+ is SSH or SFTP, and use the SSH login credentials that you used when you originally created this
+ machine at your cloud host provider. This is not what you use to log in either for email or this
+ control panel. Your SSH credentials probably involves a private key file.
-
Upload your .html or other files to the directory {{storage_root}}/www/default on this machine. They will appear directly and immediately on the web.
+
Upload your .html or other files to the directory {{storage_root}}/www/default on this machine.
+ They will appear directly and immediately on the web.
-
The websites set up on this machine are listed in the table below with where to put the files for each website.
+
The websites set up on this machine are listed in the table below with where to put the files for each website.
+
-
-
-
-
-
Site
-
Directory for Files
-
-
-
-
-
+
+
+
+
+
Site
+
Directory for Files
+
+
+
+
+
-
To add a domain to this table, create a dummy mail user or alias on the domain first and see the setup guide for adding nameserver records to the new domain at your registrar (but not glue records).
+
To add a domain to this table, create a dummy mail user or
+ alias on the domain first and see the setup guide for adding nameserver records
+ to the new domain at your registrar (but not glue records).