Compare commits

..

1 commit

Author SHA1 Message Date
beenull
4d68d1f08e Update setup/bootstrap.sh 2024-10-19 12:35:36 +00:00
42 changed files with 265 additions and 580 deletions

View file

@ -1,90 +1,6 @@
CHANGELOG
=========
Version 67 (December 22, 2023)
------------------------------
* Guard against a newly published vulnerability called SMTP Smuggling. See https://sec-consult.com/blog/detail/smtp-smuggling-spoofing-e-mails-worldwide/.
Version 66 (December 17, 2023)
------------------------------
* Some users reported an error installing Mail-in-a-Box related to the virtualenv command. This is hopefully fixed.
* Roundcube is updated to 1.6.5 fixing a security vulnerability.
* For Mail-in-a-Box developers, a new setup variable is added to pull the source code from a different repository.
Version 65 (October 27, 2023)
-----------------------------
* Roundcube updated to 1.6.4 fixing a security vulnerability.
* zpush.sh updated to version 2.7.1.
* Fixed a typo in the control panel.
Version 64 (September 2, 2023)
------------------------------
* Fixed broken installation when upgrading from Mail-in-a-Box version 56 (Nextcloud 22) and earlier because of an upstream packaging issue.
* Fixed backups to work with the latest duplicity package which was not backwards compatible.
* Fixed setting B2 as a backup target with a slash in the application key.
* Turned off OpenDMARC diagnostic reports sent in response to incoming mail.
* Fixed some crashes when using an unrelased version of Mail-in-a-Box.
* Added z-push administration scripts.
Version 63 (July 27, 2023)
--------------------------
* Nextcloud updated to 25.0.7.
Version 62 (May 20, 2023)
-------------------------
Package updates:
* Nextcloud updated to 23.0.12 (and its apps also updated).
* Roundcube updated to 1.6.1.
* Z-Push to 2.7.0, which has compatibility for Ubuntu 22.04, so it works again.
Mail:
* Roundcube's password change page is now working again.
Control panel:
* Allow setting the backup location's S3 region name for non-AWS S3-compatible backup hosts.
* Control panel pages can be opened in a new tab/window and bookmarked and browser history navigation now works.
* Add a Copy button to put the rsync backup public key on clipboard.
* Allow secondary DNS xfr: items added in the control panel to be hostnames too.
* Fixed issue where sshkeygen fails when IPv6 is disabled.
* Fixed issue opening munin reports.
* Fixed report formatting in status emails sent to the administrator.
Version 61.1 (January 28, 2023)
-------------------------------
* Fixed rsync backups not working with the default port.
* Reverted "Improve error messages in the management tools when external command-line tools are run." because of the possibility of user secrets being included in error messages.
* Fix for TLS certificate SHA fingerprint not being displayed during setup.
Version 61 (January 21, 2023)
-----------------------------
System:
* fail2ban didn't start after setup.
Mail:
* Disable Roundcube password plugin since it was corrupting the user database.
Control panel:
* Fix changing existing backup settings when the rsync type is used.
* Allow setting a custom port for rsync backups.
* Fixes to DNS lookups during status checks when there are timeouts, enforce timeouts better.
* A new check is added to ensure fail2ban is running.
* Fixed a color.
* Improve error messages in the management tools when external command-line tools are run.
Version 60.1 (October 30, 2022)
-------------------------------

16
Vagrantfile vendored
View file

@ -4,6 +4,10 @@
ip = 2
machines = [
{
'iso' => "debian/buster64",
'host' => "buster"
},
{
'iso' => "generic/ubuntu2004",
'host' => "focal"
@ -16,23 +20,19 @@ machines = [
'iso' => "generic/ubuntu2204",
'host' => "jammy"
},
{
'iso' => "debian/bookworm64",
'host' => "bookworm"
}
]
Vagrant.configure("2") do |config|
config.vm.provider :virtualbox do |vb|
vb.customize ["modifyvm", :id, "--cpus", 1, "--memory", 1024]
vb.customize ["modifyvm", :id, "--cpus", 1, "--memory", 768]
end
config.vm.provider :libvirt do |v|
v.memory = 1024
v.memory = 768
v.cpus = 1
v.nested = true
end
config.vm.provider :kvm do |kvm|
kvm.memory_size = '1024m'
kvm.memory_size = '768m'
end
# Network config: Since it's a mail server, the machine must be connected
@ -49,8 +49,6 @@ Vagrant.configure("2") do |config|
m.vm.network "private_network", ip: "192.168.168.#{ip+n}"
m.vm.provision "shell", :inline => <<-SH
apt-get update
apt-get install git -y # Just in case git isn't installed
git config --global --add safe.directory /vagrant
# Set environment variables so that the setup script does

View file

@ -33,8 +33,6 @@
fastcgi_split_path_info ^/mail(/.*)()$;
fastcgi_index index.php;
fastcgi_param SCRIPT_FILENAME /usr/local/lib/roundcubemail/$fastcgi_script_name;
# ensure roudcube session id's aren't leaked to other parts of the server
fastcgi_param PHP_VALUE "session.cookie_path=/mail/";
fastcgi_pass php-default;
# Outgoing mail also goes through this endpoint, so increase the maximum
@ -100,10 +98,3 @@
rewrite ^/.well-known/host-meta.json /cloud/public.php?service=host-meta-json last;
rewrite ^/.well-known/carddav /cloud/remote.php/carddav/ redirect;
rewrite ^/.well-known/caldav /cloud/remote.php/caldav/ redirect;
# This addresses those service discovery issues mentioned in:
# https://docs.nextcloud.com/server/23/admin_manual/issues/general_troubleshooting.html#service-discovery
rewrite ^/.well-known/webfinger /cloud/index.php/.well-known/webfinger redirect;
rewrite ^/.well-known/nodeinfo /cloud/index.php/.well-known/nodeinfo redirect;
# ADDITIONAL DIRECTIVES HERE

View file

@ -74,11 +74,10 @@ def backup_status(env):
"/usr/local/bin/duplicity",
"collection-status",
"--archive-dir", backup_cache_dir,
"--gpg-options", "'--cipher-algo=AES256'",
"--gpg-options", "--cipher-algo=AES256",
"--log-fd", "1",
] + get_duplicity_additional_args(env) + [
get_duplicity_target_url(config)
],
get_duplicity_target_url(config),
] + get_duplicity_additional_args(env),
get_duplicity_env_vars(env),
trap=True)
if code != 0:
@ -240,10 +239,10 @@ def get_duplicity_target_url(config):
# the target URL must be the bucket name. The hostname is passed
# via get_duplicity_additional_args. Move the first part of the
# path (the bucket name) into the hostname URL component, and leave
# the rest for the path. (The S3 region name is also stored in the
# hostname part of the URL, in the username portion, which we also
# have to drop here).
target[1], target[2] = target[2].lstrip('/').split('/', 1)
# the rest for the path.
target_bucket = target[2].lstrip('/').split('/', 1)
target[1] = target_bucket[0]
target[2] = target_bucket[1] if len(target_bucket) > 1 else ''
target = urlunsplit(target)
@ -259,32 +258,16 @@ def get_duplicity_additional_args(env):
port = 22
if get_target_type(config) == 'rsync':
# Extract a port number for the ssh transport. Duplicity accepts the
# optional port number syntax in the target, but it doesn't appear to act
# on it, so we set the ssh port explicitly via the duplicity options.
from urllib.parse import urlsplit
try:
port = urlsplit(config["target"]).port
except ValueError:
port = 22
if port is None:
port = 22
return [
f"--ssh-options='-i /root/.ssh/id_rsa_miab -p {port}'",
f"--rsync-options='-e \"/usr/bin/ssh -oStrictHostKeyChecking=no -oBatchMode=yes -p {port} -i /root/.ssh/id_rsa_miab\"'",
f"--ssh-options= -i /root/.ssh/id_rsa_miab -p {port}",
f"--rsync-options= -e \"/usr/bin/ssh -oStrictHostKeyChecking=no -oBatchMode=yes -p {port} -i /root/.ssh/id_rsa_miab\"",
]
elif get_target_type(config) == 's3':
# See note about hostname in get_duplicity_target_url.
# The region name, which is required by some non-AWS endpoints,
# is saved inside the username portion of the URL.
from urllib.parse import urlsplit, urlunsplit
target = urlsplit(config["target"])
endpoint_url = urlunsplit(("https", target.hostname, '', '', ''))
args = ["--s3-endpoint-url", endpoint_url]
if target.username: # region name is stuffed here
args += ["--s3-region-name", target.username]
return args
endpoint_url = urlunsplit(("https", target.netloc, '', '', ''))
return ["--s3-endpoint-url", endpoint_url]
return []
@ -379,12 +362,11 @@ def perform_backup(full_backup, user_initiated=False):
"--archive-dir", backup_cache_dir,
"--exclude", backup_root,
"--volsize", "250",
"--gpg-options", "'--cipher-algo=AES256'",
"--allow-source-mismatch"
] + get_duplicity_additional_args(env) + [
"--gpg-options", "--cipher-algo=AES256",
env["STORAGE_ROOT"],
get_duplicity_target_url(config),
],
"--allow-source-mismatch"
] + get_duplicity_additional_args(env),
get_duplicity_env_vars(env))
finally:
# Start services again.
@ -402,9 +384,8 @@ def perform_backup(full_backup, user_initiated=False):
"--verbosity", "error",
"--archive-dir", backup_cache_dir,
"--force",
] + get_duplicity_additional_args(env) + [
get_duplicity_target_url(config)
],
] + get_duplicity_additional_args(env),
get_duplicity_env_vars(env))
# From duplicity's manual:
@ -418,9 +399,8 @@ def perform_backup(full_backup, user_initiated=False):
"--verbosity", "error",
"--archive-dir", backup_cache_dir,
"--force",
] + get_duplicity_additional_args(env) + [
get_duplicity_target_url(config)
],
] + get_duplicity_additional_args(env),
get_duplicity_env_vars(env))
# Change ownership of backups to the user-data user, so that the after-bcakup
@ -465,10 +445,9 @@ def run_duplicity_verification():
"--compare-data",
"--archive-dir", backup_cache_dir,
"--exclude", backup_root,
] + get_duplicity_additional_args(env) + [
get_duplicity_target_url(config),
env["STORAGE_ROOT"],
], get_duplicity_env_vars(env))
] + get_duplicity_additional_args(env), get_duplicity_env_vars(env))
def run_duplicity_restore(args):
env = load_environment()
@ -478,23 +457,9 @@ def run_duplicity_restore(args):
"/usr/local/bin/duplicity",
"restore",
"--archive-dir", backup_cache_dir,
] + get_duplicity_additional_args(env) + [
get_duplicity_target_url(config)
] + args,
get_duplicity_env_vars(env))
def print_duplicity_command():
import shlex
env = load_environment()
config = get_backup_config(env)
backup_cache_dir = os.path.join(env["STORAGE_ROOT"], 'backup', 'cache')
for k, v in get_duplicity_env_vars(env).items():
print(f"export {k}={shlex.quote(v)}")
print("duplicity", "{command}", shlex.join([
"--archive-dir", backup_cache_dir,
] + get_duplicity_additional_args(env) + [
get_duplicity_target_url(config)
]))
get_duplicity_target_url(config),
] + get_duplicity_additional_args(env) + args,
get_duplicity_env_vars(env))
def list_target_files(config):
import urllib.parse
@ -511,17 +476,6 @@ def list_target_files(config):
rsync_fn_size_re = re.compile(r'.* ([^ ]*) [^ ]* [^ ]* (.*)')
rsync_target = '{host}:{path}'
# Strip off any trailing port specifier because it's not valid in rsync's
# DEST syntax. Explicitly set the port number for the ssh transport.
user_host, *_ = target.netloc.rsplit(':', 1)
try:
port = target.port
except ValueError:
port = 22
if port is None:
port = 22
target_path = target.path
if not target_path.endswith('/'):
target_path = target_path + '/'
@ -604,7 +558,8 @@ def list_target_files(config):
# Extract information from target
b2_application_keyid = target.netloc[:target.netloc.index(':')]
b2_application_key = urllib.parse.unquote(target.netloc[target.netloc.index(':') + 1:target.netloc.index('@')])
b2_application_key = target.netloc[target.netloc.index(':') +
1:target.netloc.index('@')]
b2_bucket = target.netloc[target.netloc.index('@') + 1:]
try:
@ -662,9 +617,10 @@ def get_backup_config(env, for_save=False, for_ui=False):
# Merge in anything written to custom.yaml.
try:
with open(os.path.join(backup_root, 'custom.yaml'), 'r') as f:
custom_config = rtyaml.load(f)
if not isinstance(custom_config, dict): raise ValueError() # caught below
custom_config = rtyaml.load(
open(os.path.join(backup_root, 'custom.yaml')))
if not isinstance(custom_config, dict):
raise ValueError() # caught below
config.update(custom_config)
except:
pass
@ -688,8 +644,7 @@ def get_backup_config(env, for_save=False, for_ui=False):
config["target"] = "file://" + config["file_target_directory"]
ssh_pub_key = os.path.join('/root', '.ssh', 'id_rsa_miab.pub')
if os.path.exists(ssh_pub_key):
with open(ssh_pub_key, 'r') as f:
config["ssh_pub_key"] = f.read()
config["ssh_pub_key"] = open(ssh_pub_key, 'r').read()
return config
@ -724,9 +679,6 @@ if __name__ == "__main__":
# to duplicity. The restore path should be specified.
run_duplicity_restore(sys.argv[2:])
elif sys.argv[-1] == "--duplicity-command":
print_duplicity_command()
else:
# Perform a backup. Add --full to force a full backup rather than
# possibly performing an incremental backup.

View file

@ -61,8 +61,7 @@ def read_password():
def setup_key_auth(mgmt_uri):
with open('/var/lib/mailinabox/api.key', 'r') as f:
key = f.read().strip()
key = open('/var/lib/mailinabox/api.key').read().strip()
auth_handler = urllib.request.HTTPBasicAuthHandler()
auth_handler.add_password(realm='Mail-in-a-Box Management Server',

View file

@ -1230,7 +1230,7 @@ def munin_cgi(filename):
support infrastructure like spawn-fcgi.
"""
COMMAND = 'su munin --preserve-environment --shell=/bin/bash -c /usr/lib/munin/cgi/munin-cgi-graph'
COMMAND = 'su - munin --preserve-environment --shell=/bin/bash -c /usr/lib/munin/cgi/munin-cgi-graph'
# su changes user, we use the munin user here
# --preserve-environment retains the environment, which is where Popen's `env` data is
# --shell=/bin/bash ensures the shell used is bash

View file

@ -1081,9 +1081,10 @@ def write_opendkim_tables(domains, env):
def get_custom_dns_config(env, only_real_records=False):
try:
with open(os.path.join(env['STORAGE_ROOT'], 'dns/custom.yaml'), 'r') as f:
custom_dns = rtyaml.load(f)
if not isinstance(custom_dns, dict): raise ValueError() # caught below
custom_dns = rtyaml.load(
open(os.path.join(env['STORAGE_ROOT'], 'dns/custom.yaml')))
if not isinstance(custom_dns, dict):
raise ValueError() # caught below
except:
return []
@ -1289,7 +1290,6 @@ def set_custom_dns_record(qname, rtype, value, action, env, ttl=None):
def get_secondary_dns(custom_dns, mode=None):
resolver = dns.resolver.get_default_resolver()
resolver.timeout = 10
resolver.lifetime = 10
values = []
for qname, rtype, value, ttl in custom_dns:
@ -1302,34 +1302,26 @@ def get_secondary_dns(custom_dns, mode=None):
values.append(hostname)
continue
# If the entry starts with "xfr:" only include it in the zone transfer settings.
if hostname.startswith("xfr:"):
if mode != "xfr": continue
hostname = hostname[4:]
# If is a hostname, before including in zone xfr lines,
# resolve to an IP address.
# This is a hostname. Before including in zone xfr lines,
# resolve to an IP address. Otherwise just return the hostname.
# It may not resolve to IPv6, so don't throw an exception if it
# doesn't. Skip the entry if there is a DNS error.
if mode == "xfr":
try:
ipaddress.ip_interface(hostname) # test if it's an IP address or CIDR notation
values.append(hostname)
except ValueError:
try:
response = dns.resolver.resolve(hostname+'.', "A", raise_on_no_answer=False)
values.extend(map(str, response))
except dns.exception.DNSException:
pass
try:
response = dns.resolver.resolve(hostname+'.', "AAAA", raise_on_no_answer=False)
values.extend(map(str, response))
except dns.exception.DNSException:
pass
else:
# doesn't.
if not hostname.startswith("xfr:"):
if mode == "xfr":
response = dns.resolver.resolve(hostname+'.', "A", raise_on_no_answer=False)
values.extend(map(str, response))
response = dns.resolver.resolve(hostname+'.', "AAAA", raise_on_no_answer=False)
values.extend(map(str, response))
continue
values.append(hostname)
# This is a zone-xfer-only IP address. Do not return if
# we're querying for NS record hostnames. Only return if
# we're querying for zone xfer IP addresses - return the
# IP address.
elif mode == "xfr":
values.append(hostname[4:])
return values
@ -1338,17 +1330,15 @@ def set_secondary_dns(hostnames, env):
# Validate that all hostnames are valid and that all zone-xfer IP addresses are valid.
resolver = dns.resolver.get_default_resolver()
resolver.timeout = 5
resolver.lifetime = 5
for item in hostnames:
if not item.startswith("xfr:"):
# Resolve hostname.
try:
response = resolver.resolve(item, "A")
except (dns.resolver.NoNameservers, dns.resolver.NXDOMAIN, dns.resolver.NoAnswer, dns.resolver.Timeout):
except (dns.resolver.NoNameservers, dns.resolver.NXDOMAIN, dns.resolver.NoAnswer):
try:
response = resolver.resolve(item, "AAAA")
except (dns.resolver.NoNameservers, dns.resolver.NXDOMAIN, dns.resolver.NoAnswer, dns.resolver.Timeout):
except (dns.resolver.NoNameservers, dns.resolver.NXDOMAIN, dns.resolver.NoAnswer):
raise ValueError("Could not resolve the IP address of %s." % item)
else:
# Validate IP address.
@ -1388,7 +1378,7 @@ def get_custom_dns_records(custom_dns, qname, rtype):
def build_recommended_dns(env):
ret = []
for (domain, zonefile, records) in build_zones(env):
# remove records that we don't display
# remove records that we don't dislay
records = [r for r in records if r[3] is not False]
# put Required at the top, then Recommended, then everythiing else

View file

@ -38,8 +38,7 @@ def edit_conf(filename,
erase_setting=False):
found = set()
buf = ""
with open(filename, "r") as f:
input_lines = list(f)
input_lines = list(open(filename, "r+"))
while len(input_lines) > 0:
line = input_lines.pop(0)

View file

@ -28,7 +28,7 @@ content = sys.stdin.read().strip()
# If there's nothing coming in, just exit.
if content == "":
sys.exit(0)
sys.exit(0)
# create MIME message
msg = MIMEMultipart('alternative')
@ -44,7 +44,8 @@ msg['From'] = "\"%s\" <%s>" % ("System Management Daemon",
msg['To'] = "administrator@" + env['PRIMARY_HOSTNAME']
msg['Subject'] = "[%s] %s" % (env['PRIMARY_HOSTNAME'], subject)
content_html = '<html><body><pre style="overflow-x: scroll; white-space: pre;">{}</pre></body></html>'.format(html.escape(content))
content_html = "<html><body><pre>{}</pre></body></html>".format(
html.escape(content))
msg.attach(MIMEText(create_signature(content.encode()).decode(), 'plain'))
msg.attach(MIMEText(content_html, 'html'))

View file

@ -68,12 +68,11 @@ def scan_files(collector):
tmp_file = None
if not os.path.exists(fn):
continue
elif fn[-3:] == '.gz':
tmp_file = tempfile.NamedTemporaryFile()
with gzip.open(fn, 'rb') as f:
shutil.copyfileobj(f, tmp_file)
if not os.path.exists(fn):
continue
elif fn[-3:] == '.gz':
tmp_file = tempfile.NamedTemporaryFile()
shutil.copyfileobj(gzip.open(fn), tmp_file)
if VERBOSE:
print("Processing file", fn, "...")

View file

@ -611,8 +611,7 @@ def check_certificate(domain,
# Second, check that the certificate matches the private key.
if ssl_private_key is not None:
try:
with open(ssl_private_key, 'rb') as f:
priv_key = load_pem(f.read())
priv_key = load_pem(open(ssl_private_key, 'rb').read())
except ValueError as e:
return ("The private key file %s is not a private key file: %s" %
(ssl_private_key, str(e)), None)

View file

@ -185,12 +185,6 @@ def run_services_checks(env, output, pool):
fatal = fatal or fatal2
output2.playback(output)
# Check fail2ban.
code, ret = shell('check_output', ["fail2ban-client", "status"], capture_stderr=True, trap=True)
if code != 0:
output.print_error("fail2ban is not running.")
all_running = False
if all_running:
output.print_ok("All system services are running.")
@ -327,8 +321,7 @@ def check_ssh_password(env, output):
# the configuration file.
if not os.path.exists("/etc/ssh/sshd_config"):
return
with open("/etc/ssh/sshd_config", "r") as f:
sshd = f.read()
sshd = open("/etc/ssh/sshd_config").read()
if re.search("\nPasswordAuthentication\s+yes", sshd) \
or not re.search("\nPasswordAuthentication\s+no", sshd):
output.print_error(
@ -533,9 +526,9 @@ def run_network_checks(env, output):
if zen is None:
output.print_ok("IP address is not blacklisted by zen.spamhaus.org.")
elif zen == "[timeout]":
output.print_warning("Connection to zen.spamhaus.org timed out. We could not determine whether your server's IP address is blacklisted. Please try again later.")
elif zen == "[Not Set]":
output.print_warning("Could not connect to zen.spamhaus.org. We could not determine whether your server's IP address is blacklisted. Please try again later.")
output.print_warning(
"Connection to zen.spamhaus.org timed out. We could not determine whether your server's IP address is blacklisted. Please try again later."
)
else:
output.print_error(
"""The IP address of this machine %s is listed in the Spamhaus Block List (code %s),
@ -941,8 +934,10 @@ def check_dns_zone(domain, env, output, dns_zonefiles):
for ns in custom_secondary_ns:
# We must first resolve the nameserver to an IP address so we can query it.
ns_ips = query_dns(ns, "A")
if not ns_ips or ns_ips in {'[Not Set]', '[timeout]'}:
output.print_error("Secondary nameserver %s is not valid (it doesn't resolve to an IP address)." % ns)
if not ns_ips:
output.print_error(
"Secondary nameserver %s is not valid (it doesn't resolve to an IP address)."
% ns)
continue
# Choose the first IP if nameserver returns multiple
ns_ip = ns_ips.split('; ')[0]
@ -1013,9 +1008,13 @@ def check_dnssec(domain,
# Some registrars may want the public key so they can compute the digest. The DS
# record that we suggest using is for the KSK (and that's how the DS records were generated).
# We'll also give the nice name for the key algorithm.
dnssec_keys = load_env_vars_from_file(os.path.join(env['STORAGE_ROOT'], 'dns/dnssec/%s.conf' % alg_name_map[ds_alg]))
with open(os.path.join(env['STORAGE_ROOT'], 'dns/dnssec/' + dnssec_keys['KSK'] + '.key'), 'r') as f:
dnsssec_pubkey = f.read().split("\t")[3].split(" ")[3]
dnssec_keys = load_env_vars_from_file(
os.path.join(env['STORAGE_ROOT'],
'dns/dnssec/%s.conf' % alg_name_map[ds_alg]))
dnsssec_pubkey = open(
os.path.join(env['STORAGE_ROOT'],
'dns/dnssec/' + dnssec_keys['KSK'] +
'.key')).read().split("\t")[3].split(" ")[3]
expected_ds_records[(ds_keytag, ds_alg, ds_digalg, ds_digest)] = {
"record": rr_ds,
@ -1211,9 +1210,9 @@ def check_mail_domain(domain, env, output):
if dbl is None:
output.print_ok("Domain is not blacklisted by dbl.spamhaus.org.")
elif dbl == "[timeout]":
output.print_warning("Connection to dbl.spamhaus.org timed out. We could not determine whether the domain {} is blacklisted. Please try again later.".format(domain))
elif dbl == "[Not Set]":
output.print_warning("Could not connect to dbl.spamhaus.org. We could not determine whether the domain {} is blacklisted. Please try again later.".format(domain))
output.print_warning(
"Connection to dbl.spamhaus.org timed out. We could not determine whether the domain {} is blacklisted. Please try again later."
.format(domain))
else:
output.print_error(
"""This domain is listed in the Spamhaus Domain Block List (code %s),
@ -1266,17 +1265,12 @@ def query_dns(qname, rtype, nxdomain='[Not Set]', at=None, as_list=False):
# running bind server), or if the 'at' argument is specified, use that host
# as the nameserver.
resolver = dns.resolver.get_default_resolver()
# Make sure at is not a string that cannot be used as a nameserver
if at and at not in {'[Not set]', '[timeout]'}:
if at:
resolver = dns.resolver.Resolver()
resolver.nameservers = [at]
# Set a timeout so that a non-responsive server doesn't hold us back.
resolver.timeout = 5
# The number of seconds to spend trying to get an answer to the question. If the
# lifetime expires a dns.exception.Timeout exception will be raised.
resolver.lifetime = 5
# Do the query.
try:
@ -1409,7 +1403,7 @@ def list_apt_updates(apt_update=True):
def what_version_is_this(env):
# This function runs `git describe --always --abbrev=0` on the Mail-in-a-Box installation directory.
# This function runs `git describe --abbrev=0` on the Mail-in-a-Box installation directory.
# Git may not be installed and Mail-in-a-Box may not have been cloned from github,
# so this function may raise all sorts of exceptions.
miab_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
@ -1479,8 +1473,7 @@ def run_and_output_changes(env, pool):
# Load previously saved status checks.
cache_fn = "/var/cache/mailinabox/status_checks.json"
if os.path.exists(cache_fn):
with open(cache_fn, 'r') as f:
prev = json.load(f)
prev = json.load(open(cache_fn))
# Group the serial output into categories by the headings.
def group_by_heading(lines):

View file

@ -18,7 +18,8 @@
<h3>Add a mail alias</h3>
<p>Aliases are email forwarders. An alias can forward email to a <a href="#users">mail user</a> or to any email address.</p>
<p>Aliases are email forwarders. An alias can forward email to a <a href="#" onclick="return show_panel('users')">mail
user</a> or to any email address.</p>
<p>To use an alias or any address besides your own login username in outbound mail, the sending user must be included as
a permitted sender for the alias.</p>

View file

@ -94,7 +94,8 @@
<h3>Using a secondary nameserver</h3>
<p>If your TLD requires you to have two separate nameservers, you can either set up <a href="#external_dns">external DNS</a> and ignore the DNS server on this box entirely, or
<p>If your TLD requires you to have two separate nameservers, you can either set up <a href="#"
onclick="return show_panel('external_dns')">external DNS</a> and ignore the DNS server on this box entirely, or
use the DNS server on this box but add a secondary (aka &ldquo;slave&rdquo;) nameserver.</p>
<p>If you choose to use a secondary nameserver, you must find a secondary nameserver service provider. Your domain name
registrar or virtual cloud provider may provide this service for you. Once you set up the secondary nameserver

View file

@ -72,7 +72,8 @@
</tr>
</table>
<p>In addition to setting up your email, you&rsquo;ll also need to set up <a href="#sync_guide">contacts and calendar synchronization</a> separately.</p>
<p>In addition to setting up your email, you&rsquo;ll also need to set up <a href="#sync_guide"
onclick="return show_panel(this);">contacts and calendar synchronization</a> separately.</p>
<p>As an alternative to IMAP you can also use the POP protocol: choose POP as the protocol, port 995, and
SSL or TLS security in your mail client. The SMTP settings and usernames and passwords remain the same.

View file

@ -1,7 +1,7 @@
<div>
<h2>Manage Password</h2>
<p>Here you can change your account password. The new password is then valid for both this panel and your email.</p>
<p>If you have client emails configured, you'll then need to update the configuration with the new password. See the <a href="#mail-guide">Mail Guide</a> for more information about this.</p>
<p>If you have client emails configured, you'll then need to update the configuration with the new password. See the <a href="#mail-guide" onclick="return show_panel(this);">Mail Guide</a> for more information about this.</p>
<form class="form-horizontal" role="form" onsubmit="set_password_self(); return false;">
<div class="col-lg-10 col-xl-8 mb-3">

View file

@ -29,15 +29,17 @@
</tr>
</table>
<p>Log in settings are the same as with <a href="#mail-guide">mail</a>: your
complete email address and your mail password.</p>
<p>Log in settings are the same as with <a href="#mail-guide" onclick="return show_panel(this);">mail</a>:
your
complete email address and your mail password.</p>
</div>
<div class="col-lg-6">
<h4>On your mobile device</h4>
<p>If you set up your <a href="#mail-guide">mail</a> using Exchange/ActiveSync,
your contacts and calendar may already appear on your device.</p>
<p>If you set up your <a href="#mail-guide" onclick="return show_panel(this);">mail</a> using
Exchange/ActiveSync,
your contacts and calendar may already appear on your device.</p>
<p>Otherwise, here are some apps that can synchronize your contacts and calendar to your Android phone.</p>
<table class="table">

View file

@ -6,7 +6,8 @@
<h2>Backup Status</h2>
<p>The box makes an incremental backup each night. You can store the backup on any Amazon Web Services S3-compatible service, or other options.</p>
<p>The box makes an incremental backup each night. By default the backup is stored on the machine itself, but you can
also store it on S3-compatible services like Amazon Web Services (AWS).</p>
<h3>Configuration</h3>
@ -79,9 +80,6 @@
of target user on the backup server specified above. That way you'll enable secure and
passwordless authentication from your mail-in-a-box server and your backup server.
</div>
<div id="copy_pub_key_div" class="col-sm">
<button type="button" class="btn btn-small" onclick="copy_pub_key_to_clipboard()">Copy</button>
</div>
</div>
<!-- S3 BACKUP -->
<div class="col-lg-10 col-xl-8 mb-3 backup-target-s3">
@ -106,16 +104,11 @@
<input type="text" placeholder="Endpoint" class="form-control" id="backup-target-s3-host">
</div>
</div>
<div class="form-group backup-target-s3">
<div class="input-group">
<label for="backup-target-s3-region-name" class="input-group-text">S3 Region Name <small>(if required)</small></label>
<input type="text" placeholder="region.name" class="form-control" class="form-control" id="backup-target-s3-region-name">
</div>
</div>
<div class="col-lg-10 col-xl-8 mb-3 backup-target-s3">
<div class="input-group">
<label for="backup-target-s3-path" class="input-group-text">S3 Bucket &amp; Path</label>
<input type="text" placeholder="your-bucket-name/backup-directory" class="form-control" id="backup-target-s3-path">
<label for="backup-target-s3-path" class="input-group-text">S3 Path</label>
<input type="text" placeholder="your-bucket-name/backup-directory" class="form-control"
id="backup-target-s3-path">
</div>
</div>
<div class="col-lg-10 col-xl-8 mb-3 backup-target-s3">
@ -298,18 +291,30 @@
} else if (r.target == "off") {
$("#backup-target-type").val("off");
} else if (r.target.substring(0, 8) == "rsync://") {
const spec = url_split(r.target);
$("#backup-target-type").val(spec.scheme);
$("#backup-target-rsync-user").val(spec.user);
$("#backup-target-rsync-host").val(spec.host);
$("#backup-target-rsync-path").val(spec.path);
$("#backup-target-type").val("rsync");
let uri = r.target.substring(8)
let i = uri.indexOf("/")
let path = [uri.slice(0, i), uri.slice(i + 1)];
let host_parts = path.shift().split('@');
$("#backup-target-rsync-user").val(host_parts[0]);
$("#backup-target-rsync-host").val(host_parts[1]);
$("#backup-target-rsync-path").val(path[0]);
$("#backup-target-rsync-port").val(r.target_rsync_port)
} else if (r.target.substring(0, 5) == "s3://") {
const spec = url_split(r.target);
$("#backup-target-type").val("s3");
$("#backup-target-s3-host-select").val(spec.host);
$("#backup-target-s3-host").val(spec.host);
$("#backup-target-s3-region-name").val(spec.user); // stuffing the region name in the username
$("#backup-target-s3-path").val(spec.path);
var hostpath = r.target.substring(5).split('/');
var host = hostpath.shift();
let s3_options = $("#backup-target-s3-host-select option").map(function() {return this.value}).get()
$("#backup-target-s3-host-select").val("other")
for (let h of s3_options) {
console.log(h)
if (h == host) {
$("#backup-target-s3-host-select").val(host)
break
}
}
$("#backup-target-s3-host").val(host);
$("#backup-target-s3-path").val(hostpath.join('/'));
} else if (r.target.substring(0, 5) == "b2://") {
$("#backup-target-type").val("b2");
var targetPath = r.target.substring(5);
@ -317,11 +322,11 @@
var b2_applicationkey = targetPath.split(':')[1].split('@')[0];
var b2_bucket = targetPath.split('@')[1];
$("#backup-target-b2-user").val(b2_application_keyid);
$("#backup-target-b2-pass").val(decodeURIComponent(b2_applicationkey));
$("#backup-target-b2-pass").val(b2_applicationkey);
$("#backup-target-b2-bucket").val(b2_bucket);
}
toggle_form()
})
})
}
function set_custom_backup() {
@ -330,20 +335,17 @@
var target_pass = $("#backup-target-pass").val();
let target_port = $("#backup-target-rsync-port").val();
let target;
var target;
if (target_type == "local" || target_type == "off")
target = target_type;
else if (target_type == "s3")
target = "s3://"
+ ($("#backup-target-s3-region-name").val() ? ($("#backup-target-s3-region-name").val() + "@") : "")
+ $("#backup-target-s3-host").val()
+ "/" + $("#backup-target-s3-path").val();
target = "s3://" + $("#backup-target-s3-host").val() + "/" + $("#backup-target-s3-path").val();
else if (target_type == "rsync") {
target = "rsync://" + $("#backup-target-rsync-user").val() + "@" + $("#backup-target-rsync-host").val()
+ "/" + $("#backup-target-rsync-path").val();
+ "/" + $("#backup-target-rsync-path").val();
target_user = '';
} else if (target_type == "b2") {
target = 'b2://' + $('#backup-target-b2-user').val() + ':' + encodeURIComponent($('#backup-target-b2-pass').val())
target = 'b2://' + $('#backup-target-b2-user').val() + ':' + $('#backup-target-b2-pass').val()
+ '@' + $('#backup-target-b2-bucket').val()
target_user = '';
target_pass = '';
@ -411,41 +413,4 @@
})
}
// Return a two-element array of the substring preceding and the substring following
// the first occurence of separator in string. Return [undefined, string] if the
// separator does not appear in string.
const split1_rest = (string, separator) => {
const index = string.indexOf(separator);
return (index >= 0) ? [string.substring(0, index), string.substring(index + separator.length)] : [undefined, string];
};
// Note: The manifest JS URL class does not work in some security-conscious
// settings, e.g. Brave browser, so we roll our own that handles only what we need.
//
// Use greedy separator parsing to get parts of a MIAB backup target url.
// Note: path will not include a leading forward slash '/'
const url_split = url => {
const [ scheme, scheme_rest ] = split1_rest(url, '://');
const [ user, user_rest ] = split1_rest(scheme_rest, '@');
const [ host, path ] = split1_rest(user_rest, '/');
return {
scheme,
user,
host,
path,
}
};
// Hide Copy button if not in a modern clipboard-supporting environment.
// Using document API because jQuery is not necessarily available in this script scope.
if (!(navigator && navigator.clipboard && navigator.clipboard.writeText)) {
document.getElementById('copy_pub_key_div').hidden = true;
}
function copy_pub_key_to_clipboard() {
const ssh_pub_key = $("#ssh-pub-key").val();
navigator.clipboard.writeText(ssh_pub_key);
}
</script>

View file

@ -1,21 +1,21 @@
<h2>System Status Checks</h2>
<style>
.system-checks-table .message {
#system-checks .message {
display: inline;
}
.system-checks-table .icon {
#system-checks .icon {
min-width: 2em;
}
.system-checks-table .heading {
#system-checks .heading {
font-weight: bold;
font-size: 180%;
padding-top: 1.75em;
}
.system-checks-table .heading.first {
#system-checks .heading.first {
border-top: none;
padding-top: 0;
}
@ -98,18 +98,12 @@
Mail-in-a-Box.)</small></p>
</div>
<div class="system-checks-table" id="system-checks-summary">
</div>
<br>
<div class="system-checks-table" id="system-checks">
<div id="system-checks">
</div>
<script>
function show_system_status() {
$('#system-checks').html("")
$('#system-checks-summary').html("")
api(
"/system/privacy",
@ -137,12 +131,6 @@
"POST",
{},
function (r) {
let count_by_status = {
ok: 0,
error: 0,
warning: 0
}
for (let i = 0; i < r.length; i++) {
let n = $("<div class='col-12'><div class='icon'></div><p class='message status-text' style='margin: 0'/>");
if (i == 0) n.addClass('first')
@ -157,7 +145,6 @@
if (r[i].type == "ok") n.find(".icon").addClass("fa-check")
if (r[i].type == "error") n.find(".icon").addClass("fa-times")
if (r[i].type == "warning") n.find(".icon").addClass("fa-exclamation-triangle")
count_by_status[r[i].type]++
n.find('p.status-text').text(r[i].text)
@ -181,6 +168,7 @@
}
for (var j = 0; j < r[i].extra.length; j++) {
var m = $("<div/>").text(r[i].extra[j].text)
if (r[i].extra[j].monospace)
m.addClass("pre");
@ -192,17 +180,8 @@
$('#system-checks').append($("<hr>"));
}
}
})
let summary = $('#system-checks-summary')
if (count_by_status['error'] + count_by_status['warning'] == 0) {
summary.append($("<div class='col-12 status-ok'><div class='icon fas fa-check'></div><p class='message status-text' style='margin: 0'><b class='message status-text'/></p>")).find("p b").text(`All ${count_by_status['ok']} checks OK. No problems found!`);
} else {
summary.append($("<div class='col-12 status-ok'><div class='icon fas fa-check'></div><p class='message status-text' style='margin: 0'/>")).find(".status-ok p").text(`${count_by_status['ok']} OK`);
summary.append($("<div class='col-12 status-warning'><div class='icon fas fa-exclamation-triangle'></div><p class='message status-text' style='margin: 0'/>")).find(".status-warning p").text(`${count_by_status['warning']} Warnings`);
summary.append($("<div class='col-12 status-error'><div class='icon fas fa-times'></div><p class='message status-text' style='margin: 0'/>")).find(".status-error p").text(`${count_by_status['error']} Errors`);
}
}
)
}
var current_privacy_setting = null;

View file

@ -68,10 +68,11 @@
<ul style="margin-top: 1em; padding-left: 1.5em; font-size: 90%;">
<li>Passwords must be at least eight characters. If you're out of ideas, you can <a href="#"
onclick="return generate_random_password()">generate a random password</a>.</li>
<li>Use <a href="#aliases">aliases</a> to create email addresses that forward to
<li>Use <a href="#" onclick="return show_panel('aliases')">aliases</a> to create email addresses that forward to
existing accounts.</li>
<li>Administrators get access to this control panel.</li>
<li>User accounts cannot contain any international (non-ASCII) characters, but <a href="#aliases">aliases</a> can.</li>
<li>User accounts cannot contain any international (non-ASCII) characters, but <a href="#"
onclick="return show_panel('aliases');">aliases</a> can.</li>
<li>Quotas may not contain any spaces, commas or decimal points. Suffixes of G (gigabytes) and M (megabytes) are
allowed. For unlimited storage enter 0 (zero)</li>
</ul>
@ -79,13 +80,6 @@
<button type="submit" class="btn btn-primary">Add User</button>
</form>
<ul style="margin-top: 1em; padding-left: 1.5em; font-size: 90%;">
<li>Passwords must be at least eight characters consisting of English letters and numbers only. For best results, <a href="#" onclick="return generate_random_password()">generate a random password</a>.</li>
<li>Use <a href="#aliases">aliases</a> to create email addresses that forward to existing accounts.</li>
<li>Administrators get access to this control panel.</li>
<li>User accounts cannot contain any international (non-ASCII) characters, but <a href="#aliases">aliases</a> can.</li>
</ul>
<h3>Existing mail users</h3>
<table id="user_table" class="table col-12">
<caption></caption>

View file

@ -13,7 +13,9 @@
</p>
<ol>
<li>Ensure that any domains you are publishing a website for have no problems on the <a href="#system_status">Status Checks</a> page.</li>
<li>Ensure that any domains you are publishing a website for have no problems on the <a href="#system_status"
onclick="return show_panel(this);">Status Checks</a> page.</li>
<li>On your personal computer, install an SSH file transfer program such as <a
href="https://filezilla-project.org/">FileZilla</a> or <a
href="http://linuxcommand.org/man_pages/scp1.html">scp</a>.</li>
@ -41,11 +43,10 @@
</tbody>
</table>
<p>
To add a domain to this table, create a dummy <a href="#users">mail user</a> or <a href="#aliases">alias</a> on the domain first and see
the <a href="https://mailinabox.email/guide.html#domain-name-configuration">setup guide</a>
for adding nameserver records to the new domain at your registrar (but <i>not</i> glue records).
</p>
<p>To add a domain to this table, create a dummy <a href="#users" onclick="return show_panel(this);">mail user</a> or
<a href="#aliases" onclick="return show_panel(this);">alias</a> on the domain first and see the <a
href="https://mailinabox.email/guide.html#domain-name-configuration">setup guide</a> for adding nameserver records
to the new domain at your registrar (but <i>not</i> glue records).</p>
</ol>

View file

@ -13,13 +13,13 @@ def load_environment():
def load_env_vars_from_file(fn):
# Load settings from a KEY=VALUE file.
import collections
env = collections.OrderedDict()
with open(fn, 'r') as f:
for line in f:
env.setdefault(*line.strip().split("=", 1))
return env
# Load settings from a KEY=VALUE file.
import collections
env = collections.OrderedDict()
for line in open(fn):
env.setdefault(*line.strip().split("=", 1))
return env
def save_environment(env):
with open("/etc/mailinabox.conf", "w") as f:
@ -38,15 +38,16 @@ def write_settings(config, env):
def load_settings(env):
import rtyaml
fn = os.path.join(env['STORAGE_ROOT'], 'settings.yaml')
try:
with open(fn, "r") as f:
config = rtyaml.load(f)
if not isinstance(config, dict): raise ValueError() # caught below
return config
except:
return { }
import rtyaml
fn = os.path.join(env['STORAGE_ROOT'], 'settings.yaml')
try:
config = rtyaml.load(open(fn, "r"))
if not isinstance(config, dict):
raise ValueError() # caught below
return config
except:
return {}
# UTILITIES

View file

@ -81,8 +81,7 @@ def get_web_domains_with_root_overrides(env):
root_overrides = {}
nginx_conf_custom_fn = os.path.join(env["STORAGE_ROOT"], "www/custom.yaml")
if os.path.exists(nginx_conf_custom_fn):
with open(nginx_conf_custom_fn, 'r') as f:
custom_settings = rtyaml.load(f)
custom_settings = rtyaml.load(open(nginx_conf_custom_fn))
for domain, settings in custom_settings.items():
for type, value in [('redirect', settings.get('redirects',
{}).get('/')),
@ -137,18 +136,15 @@ def do_web_update(env):
# Pre-load what SSL certificates we will use for each domain.
ssl_certificates = get_ssl_certificates(env)
# Helper for reading config files and templates
def read_conf(conf_fn):
with open(os.path.join(os.path.dirname(__file__), "../conf", conf_fn), "r") as f:
return f.read()
# Build an nginx configuration file.
nginx_conf = read_conf("nginx-top.conf")
nginx_conf = open(
os.path.join(os.path.dirname(__file__),
"../conf/nginx-top.conf")).read()
nginx_conf = re.sub("{{phpver}}", get_php_version(), nginx_conf)
# Add upstream additions
nginx_upstream_include = os.path.join(env["STORAGE_ROOT"], "www", ".upstream.conf")
nginx_upstream_include = os.path.join(env["STORAGE_ROOT"], "www",
".upstream.conf")
if not os.path.exists(nginx_upstream_include):
with open(nginx_upstream_include, "a+") as f:
f.writelines([
@ -161,11 +157,18 @@ def do_web_update(env):
nginx_conf += "\ninclude %s;\n" % (nginx_upstream_include)
# Load the templates.
template0 = read_conf("nginx.conf")
template1 = read_conf("nginx-alldomains.conf")
template2 = read_conf("nginx-primaryonly.conf")
template0 = open(
os.path.join(os.path.dirname(__file__), "../conf/nginx.conf")).read()
template1 = open(
os.path.join(os.path.dirname(__file__),
"../conf/nginx-alldomains.conf")).read()
template2 = open(
os.path.join(os.path.dirname(__file__),
"../conf/nginx-primaryonly.conf")).read()
template3 = "\trewrite ^(.*) https://$REDIRECT_DOMAIN$1 permanent;\n"
template4 = read_conf("nginx-openpgpkey.conf")
template4 = open(
os.path.join(os.path.dirname(__file__),
"../conf/nginx-openpgpkey.conf")).read()
# Add the PRIMARY_HOST configuration first so it becomes nginx's default server.
nginx_conf += make_domain_config(env['PRIMARY_HOSTNAME'],
@ -237,8 +240,11 @@ def make_domain_config(domain, templates, ssl_certificates, env):
def hashfile(filepath):
import hashlib
sha1 = hashlib.sha1()
with open(filepath, 'rb') as f:
f = open(filepath, 'rb')
try:
sha1.update(f.read())
finally:
f.close()
return sha1.hexdigest()
nginx_conf_extra += "\t# ssl files sha1: %s / %s\n" % (hashfile(
@ -248,8 +254,7 @@ def make_domain_config(domain, templates, ssl_certificates, env):
hsts = "yes"
nginx_conf_custom_fn = os.path.join(env["STORAGE_ROOT"], "www/custom.yaml")
if os.path.exists(nginx_conf_custom_fn):
with open(nginx_conf_custom_fn, 'r') as f:
yaml = rtyaml.load(f)
yaml = rtyaml.load(open(nginx_conf_custom_fn))
if domain in yaml:
yaml = yaml[domain]
@ -296,9 +301,9 @@ def make_domain_config(domain, templates, ssl_certificates, env):
# Add the HSTS header.
if hsts == "yes":
nginx_conf_extra += "\tadd_header Strict-Transport-Security \"max-age=31536000\" always;\n"
nginx_conf_extra += "\tadd_header Strict-Transport-Security \"max-age=15768000\" always;\n"
elif hsts == "preload":
nginx_conf_extra += "\tadd_header Strict-Transport-Security \"max-age=31536000; includeSubDomains; preload\" always;\n"
nginx_conf_extra += "\tadd_header Strict-Transport-Security \"max-age=15768000; includeSubDomains; preload\" always;\n"
# Add in any user customizations in the includes/ folder.
nginx_conf_custom_include = os.path.join(

View file

@ -1,7 +1,7 @@
Mail-in-a-Box Security Guide
============================
Mail-in-a-Box turns a fresh Ubuntu 22.04 LTS 64-bit machine into a mail server appliance by installing and configuring various components.
Mail-in-a-Box turns a fresh Ubuntu 18.04 LTS 64-bit machine into a mail server appliance by installing and configuring various components.
This page documents the security posture of Mail-in-a-Box. The term “box” is used below to mean a configured Mail-in-a-Box.

View file

@ -19,7 +19,6 @@ if [ ! -f /usr/bin/lsb_release ]; then
echo "This script must be run on a system running one of the following OS-es:"
echo "* Debian 10 (buster)"
echo "* Debian 11 (bullseye)"
echo "* Debian 12 (bookworm)"
echo "* Ubuntu 20.04 LTS (Focal Fossa)"
echo "* Ubuntu 22.04 LTS (Jammy Jellyfish)"
exit 1
@ -34,7 +33,6 @@ if [ -z "$TAG" ]; then
# Make sure we're running on the correct operating system
OS=$(lsb_release -d | sed 's/.*:\s*//')
if [ "$OS" == "Debian GNU/Linux 11 (bullseye)" ] ||
[ "$OS" == "Debian GNU/Linux 12 (bookworm)" ] ||
[ "$(echo $OS | grep -o 'Ubuntu 20.04')" == "Ubuntu 20.04" ] ||
[ "$(echo $OS | grep -o 'Ubuntu 22.04')" == "Ubuntu 22.04" ]
then
@ -43,7 +41,6 @@ if [ -z "$TAG" ]; then
echo "We are going to install the last version of Power Mail-in-a-Box supporting Debian 10 (buster)."
echo "IF THIS IS A NEW INSTALLATION, STOP NOW, AND USE A SUPPORTED DISTRIBUTION INSTEAD (ONE OF THESE):"
echo "* Debian 11 (bullseye)"
echo "* Debian 12 (bookworm)"
echo "* Ubuntu 20.04 LTS (Focal Fossa)"
echo "* Ubuntu 22.04 LTS (Jammy Jellyfish)"
echo
@ -71,7 +68,6 @@ if [ -z "$TAG" ]; then
else
echo "This script must be run on a system running one of the following OS-es:"
echo "* Debian 11 (bullseye)"
echo "* Debian 12 (bookworm)"
echo "* Ubuntu 20.04 LTS (Focal Fossa)"
echo "* Ubuntu 22.04 LTS (Jammy Jellyfish)"
exit 1
@ -87,14 +83,10 @@ if [ ! -d $HOME/mailinabox ]; then
echo
fi
if [ "$SOURCE" == "" ]; then
SOURCE=https://github.com/ddavness/power-mailinabox
fi
echo Downloading Mail-in-a-Box $TAG. . .
git clone \
-b $TAG --depth 1 \
$SOURCE \
https://git.nibbletools.com/beenull/power-mailinabox \
$HOME/mailinabox \
< /dev/null 2> /dev/null

View file

@ -63,7 +63,7 @@ chmod go-rwx $STORAGE_ROOT/mail/dkim
management/editconf.py /etc/opendmarc.conf -s \
"Syslog=true" \
"Socket=inet:8893@[127.0.0.1]" \
"FailureReports=false"
"FailureReports=true"
# SPFIgnoreResults causes the filter to ignore any SPF results in the header
# of the message. This is useful if you want the filter to perfrom SPF checks
@ -82,11 +82,11 @@ management/editconf.py /etc/opendmarc.conf -s \
management/editconf.py /etc/opendmarc.conf -s \
"SPFSelfValidate=true"
# Disables generation of failure reports for sending domains that publish a
# Enables generation of failure reports for sending domains that publish a
# "none" policy.
management/editconf.py /etc/opendmarc.conf -s \
"FailureReportsOnNone=false"
"FailureReportsOnNone=true"
# AlwaysAddARHeader Adds an "Authentication-Results:" header field even to
# unsigned messages from domains with no "signs all" policy. The reported DKIM

View file

@ -234,7 +234,6 @@ export OS_DEBIAN_10=1
export OS_UBUNTU_2004=2
export OS_DEBIAN_11=3
export OS_UBUNTU_2204=4
export OS_DEBIAN_12=5
function get_os_code {
# A lot of if-statements here - dirty code looking tasting today
@ -248,9 +247,6 @@ function get_os_code {
elif [[ $VER == "11" ]]; then
echo $OS_DEBIAN_11
return 0
elif [[ $VER == "12" ]]; then
echo $OS_DEBIAN_12
return 0
fi
elif [[ $ID == "Ubuntu" ]]; then
if [[ $VER == "20.04" ]]; then

View file

@ -205,13 +205,13 @@ chmod -R o-rwx /etc/dovecot
# Ensure mailbox files have a directory that exists and are owned by the mail user.
mkdir -p $STORAGE_ROOT/mail/mailboxes
chown -R mail:mail $STORAGE_ROOT/mail/mailboxes
chown -R mail.mail $STORAGE_ROOT/mail/mailboxes
# Same for the sieve scripts.
mkdir -p $STORAGE_ROOT/mail/sieve
mkdir -p $STORAGE_ROOT/mail/sieve/global_before
mkdir -p $STORAGE_ROOT/mail/sieve/global_after
chown -R mail:mail $STORAGE_ROOT/mail/sieve
chown -R mail.mail $STORAGE_ROOT/mail/sieve
# Allow the IMAP/POP ports in the firewall.
ufw_allow imaps

View file

@ -69,11 +69,6 @@ management/editconf.py /etc/postfix/main.cf \
maximal_queue_lifetime=2d \
bounce_queue_lifetime=1d
# Guard against SMTP smuggling
# This short-term workaround is recommended at https://www.postfix.org/smtp-smuggling.html
management/editconf.py /etc/postfix/main.cf \
smtpd_data_restrictions=reject_unauth_pipelining
# ### Outgoing Mail
# Enable the 'submission' ports 465 and 587 and tweak their settings.

View file

@ -32,12 +32,6 @@ inst_dir=/usr/local/lib/mailinabox
mkdir -p $inst_dir
venv=$inst_dir/env
if [ ! -d $venv ]; then
# A bug specific to Ubuntu 22.04 and Python 3.10 requires
# forcing a virtualenv directory layout option (see #2335
# and https://github.com/pypa/virtualenv/pull/2415). In
# our issue, reportedly installing python3-distutils didn't
# fix the problem.)
export DEB_PYTHON_INSTALL_LAYOUT='deb'
hide_output virtualenv -ppython3 $venv
elif [ ! -f $venv/.oscode ]; then
echo "Re-creating Python environment..."

View file

@ -34,8 +34,8 @@ contact.admin.always_send warning critical
EOF
# The Debian installer touches these files and chowns them to www-data:adm for use with spawn-fcgi
chown munin /var/log/munin/munin-cgi-html.log
chown munin /var/log/munin/munin-cgi-graph.log
chown munin. /var/log/munin/munin-cgi-html.log
chown munin. /var/log/munin/munin-cgi-graph.log
# ensure munin-node knows the name of this machine
# and reduce logging level to warning

View file

@ -21,8 +21,8 @@ echo "Installing Nextcloud (contacts/calendar)..."
# we automatically install intermediate versions as needed.
# * The hash is the SHA1 hash of the ZIP package, which you can find by just running this script and
# copying it from the error message when it doesn't match what is below.
nextcloud_ver=25.0.13
nextcloud_hash=eaba90f0fedefade9b05ef40844df98d361259b7
nextcloud_ver=24.0.7
nextcloud_hash=7fb1afeb3c212bf5530c3d234b23bf314b47655a
# Nextcloud apps
# --------------
@ -33,18 +33,12 @@ nextcloud_hash=eaba90f0fedefade9b05ef40844df98d361259b7
# https://github.com/nextcloud-releases/user_external
# * The hash is the SHA1 hash of the ZIP package, which you can find by just running this script and
# copying it from the error message when it doesn't match what is below.
# Check here: https://apps.nextcloud.com/apps/contacts
contacts_ver=5.5.3
contacts_hash=b234ab410480a4106176a28f39c9b27f471d0473
# Always ensure the versions are supported, see https://apps.nextcloud.com/apps/calendar
calendar_ver=4.6.8
calendar_hash=b01187e58a18a35774ed6fa97c1d336454208ddd
# And https://apps.nextcloud.com/apps/user_external
user_external_ver=3.4.0
user_external_hash=7f9d8f4dd6adb85a0e3d7622d85eeb7bfe53f3b4
contacts_ver=4.2.2
contacts_hash=cbab9a7acdc11a9e2779c20b850bb21faec1c80f
calendar_ver=3.5.2
calendar_hash=dcf2cba6933dc8805ca4b4d04ed7b993ff4652a1
user_external_ver=3.0.0
user_external_hash=0df781b261f55bbde73d8c92da3f99397000972f
# Clear prior packages and install dependencies from apt.
@ -145,7 +139,7 @@ InstallNextcloud() {
# Make sure permissions are correct or the upgrade step won't run.
# $STORAGE_ROOT/owncloud may not yet exist, so use -f to suppress
# that error.
chown -f -R www-data:www-data $STORAGE_ROOT/owncloud /usr/local/lib/owncloud || /bin/true
chown -f -R www-data.www-data $STORAGE_ROOT/owncloud /usr/local/lib/owncloud || /bin/true
# If this isn't a new installation, immediately run the upgrade script.
# Then check for success (0=ok and 3=no upgrade needed, both are success).
@ -163,7 +157,6 @@ InstallNextcloud() {
# Add missing indices. NextCloud didn't include this in the normal upgrade because it might take some time.
sudo -u www-data php /usr/local/lib/owncloud/occ db:add-missing-indices
sudo -u www-data php /usr/local/lib/owncloud/occ db:add-missing-primary-keys
# Run conversion to BigInt identifiers, this process may take some time on large tables.
sudo -u www-data php /usr/local/lib/owncloud/occ db:convert-filecache-bigint --no-interaction
@ -227,12 +220,6 @@ if [ ! -d /usr/local/lib/owncloud/ ] || [[ ! ${CURRENT_NEXTCLOUD_VER} =~ ^$nextc
if [ ! -z ${CURRENT_NEXTCLOUD_VER} ]; then
# Database migrations from ownCloud are no longer possible because ownCloud cannot be run under
# PHP 7.
if [ -e $STORAGE_ROOT/owncloud/config.php ]; then
# Remove the read-onlyness of the config, which is needed for migrations, especially for v24
sed -i -e '/config_is_read_only/d' $STORAGE_ROOT/owncloud/config.php
fi
if [[ ${CURRENT_NEXTCLOUD_VER} =~ ^[89] ]]; then
echo "Upgrades from Mail-in-a-Box prior to v0.28 (dated July 30, 2018) with Nextcloud < 13.0.6 (you have ownCloud 8 or 9) are not supported. Upgrade to Mail-in-a-Box version v0.30 first. Setup will continue, but skip the Nextcloud migration."
return 0
@ -276,23 +263,18 @@ if [ ! -d /usr/local/lib/owncloud/ ] || [[ ! ${CURRENT_NEXTCLOUD_VER} =~ ^$nextc
CURRENT_NEXTCLOUD_VER="20.0.14"
fi
if [[ ${CURRENT_NEXTCLOUD_VER} =~ ^20 ]]; then
InstallNextcloud 21.0.9 cf8785107c3c079a1f450743558f4f13c85f37a8 4.2.5 f318636decb3b7276c1e63a06de61dcb10f04bbf 3.3.3 2bbb534d95fe1e0a7368ca3a7c10d6374705a6c1 2.1.0 6e5afe7f36f398f864bfdce9cad72200e70322aa
InstallNextcloud 21.0.9 cf8785107c3c079a1f450743558f4f13c85f37a8 4.1.0 38653b507bd7d953816bbc5e8bea7855867eb1cd 3.2.2 54e9a836adc739be4a2a9301b8d6d2e9d88e02f4 2.1.0 6e5afe7f36f398f864bfdce9cad72200e70322aa
CURRENT_NEXTCLOUD_VER="21.0.9"
fi
if [[ ${CURRENT_NEXTCLOUD_VER} =~ ^21 ]]; then
InstallNextcloud 22.2.6 9d39741f051a8da42ff7df46ceef2653a1dc70d9 4.2.5 f318636decb3b7276c1e63a06de61dcb10f04bbf 3.5.9 cb3b4df6c9aa99bfc055ab56ba4b7fdcd8e4629d 3.1.0 22cabc88b6fc9c26dad3b46be1a652979c9fcf15
InstallNextcloud 22.2.6 9d39741f051a8da42ff7df46ceef2653a1dc70d9 4.1.0 38653b507bd7d953816bbc5e8bea7855867eb1cd 3.2.2 54e9a836adc739be4a2a9301b8d6d2e9d88e02f4 3.0.0 0df781b261f55bbde73d8c92da3f99397000972f
CURRENT_NEXTCLOUD_VER="22.2.6"
fi
if [[ ${CURRENT_NEXTCLOUD_VER} =~ ^22 ]]; then
InstallNextcloud 23.0.4 87afec0bf90b3c66289e6fedd851867bc5a58f01 4.2.5 f318636decb3b7276c1e63a06de61dcb10f04bbf 3.5.9 cb3b4df6c9aa99bfc055ab56ba4b7fdcd8e4629d 3.1.0 22cabc88b6fc9c26dad3b46be1a652979c9fcf15
InstallNextcloud 23.0.4 87afec0bf90b3c66289e6fedd851867bc5a58f01 4.1.0 38653b507bd7d953816bbc5e8bea7855867eb1cd 3.2.2 54e9a836adc739be4a2a9301b8d6d2e9d88e02f4 3.0.0 0df781b261f55bbde73d8c92da3f99397000972f
CURRENT_NEXTCLOUD_VER="23.0.4"
fi
if [[ ${CURRENT_NEXTCLOUD_VER} =~ ^23 ]]; then
InstallNextcloud 24.0.12 7aa5d61632c1ccf4ca3ff00fb6b295d318c05599 4.2.5 f318636decb3b7276c1e63a06de61dcb10f04bbf 3.5.9 cb3b4df6c9aa99bfc055ab56ba4b7fdcd8e4629d 3.1.0 22cabc88b6fc9c26dad3b46be1a652979c9fcf15
CURRENT_NEXTCLOUD_VER="24.0.12"
fi
fi
InstallNextcloud $nextcloud_ver $nextcloud_hash $contacts_ver $contacts_hash $calendar_ver $calendar_hash $user_external_ver $user_external_hash
@ -362,7 +344,7 @@ EOF
EOF
# Set permissions
chown -R www-data:www-data $STORAGE_ROOT/owncloud /usr/local/lib/owncloud
chown -R www-data.www-data $STORAGE_ROOT/owncloud /usr/local/lib/owncloud
# Execute Nextcloud's setup step, which creates the Nextcloud sqlite database.
# It also wipes it if it exists. And it updates config.php with database
@ -386,12 +368,12 @@ php <<EOF > $CONFIG_TEMP && mv $CONFIG_TEMP $STORAGE_ROOT/owncloud/config.php;
<?php
include("$STORAGE_ROOT/owncloud/config.php");
\$CONFIG['config_is_read_only'] = false;
\$CONFIG['config_is_read_only'] = true;
\$CONFIG['trusted_domains'] = array('$PRIMARY_HOSTNAME');
\$CONFIG['memcache.local'] = '\OC\Memcache\APCu';
\$CONFIG['overwrite.cli.url'] = 'https://${PRIMARY_HOSTNAME}/cloud';
\$CONFIG['overwrite.cli.url'] = '/cloud';
\$CONFIG['mail_from_address'] = 'administrator'; # just the local part, matches our master administrator address
\$CONFIG['logtimezone'] = '$TIMEZONE';
@ -413,7 +395,7 @@ var_export(\$CONFIG);
echo ";";
?>
EOF
chown www-data:www-data $STORAGE_ROOT/owncloud/config.php
chown www-data.www-data $STORAGE_ROOT/owncloud/config.php
# Enable/disable apps. Note that this must be done after the Nextcloud setup.
# The firstrunwizard gave Josh all sorts of problems, so disabling that.
@ -455,45 +437,20 @@ management/editconf.py /etc/php/$(php_version)/cli/conf.d/10-opcache.ini -c ';'
opcache.save_comments=1 \
opcache.revalidate_freq=1
# Migrate users_external data from <0.6.0 to version 3.0.0
# (see https://github.com/nextcloud/user_external).
# Migrate users_external data from <0.6.0 to version 3.0.0 (see https://github.com/nextcloud/user_external).
# This version was probably in use in Mail-in-a-Box v0.41 (February 26, 2019) and earlier.
# We moved to v0.6.3 in 193763f8. Ignore errors - maybe there are duplicated users with the
# correct backend already.
sqlite3 $STORAGE_ROOT/owncloud/owncloud.db "UPDATE oc_users_external SET backend='127.0.0.1';" || /bin/true
# Set up a general cron job for Nextcloud.
# Also add another job for Calendar updates, per advice in the Nextcloud docs
# https://docs.nextcloud.com/server/24/admin_manual/groupware/calendar.html#background-jobs
# Set up a cron job for Nextcloud.
cat > /etc/cron.d/mailinabox-nextcloud << EOF;
#!/bin/bash
# Mail-in-a-Box
*/5 * * * * root sudo -u www-data php -f /usr/local/lib/owncloud/cron.php
*/5 * * * * root sudo -u www-data php -f /usr/local/lib/owncloud/occ dav:send-event-reminders
EOF
chmod +x /etc/cron.d/mailinabox-nextcloud
# We also need to change the sending mode from background-job to occ.
# Or else the reminders will just be sent as soon as possible when the background jobs run.
hide_output sudo -u www-data php -f /usr/local/lib/owncloud/occ config:app:set dav sendEventRemindersMode --value occ
# Now set the config to read-only.
# Do this only at the very bottom when no further occ commands are needed.
sed -i'' "s/'config_is_read_only'\s*=>\s*false/'config_is_read_only' => true/" $STORAGE_ROOT/owncloud/config.php
# Rotate the nextcloud.log file
cat > /etc/logrotate.d/nextcloud <<EOF
# Nextcloud logs
$STORAGE_ROOT/owncloud/nextcloud.log {
size 10M
create 640 www-data www-data
rotate 30
copytruncate
missingok
compress
}
EOF
# There's nothing much of interest that a user could do as an admin for Nextcloud,
# and there's a lot they could mess up, so we don't make any users admins of Nextcloud.
# But if we wanted to, we would do this:

View file

@ -13,8 +13,8 @@ fi
case $(get_os_code) in
$OS_UNSUPPORTED)
echo "This version of Power Mail-in-a-Box only supports being installed on one of these operating systems:"
# echo "* Debian 10 (buster)"
echo "* Debian 11 (bullseye)"
echo "* Debian 12 (bookworm)"
echo "* Ubuntu 20.04 LTS (Focal Fossa)"
echo "* Ubuntu 22.04 LTS (Jammy Jellyfish)"
echo

View file

@ -9,19 +9,13 @@ if [ -z "${NONINTERACTIVE:-}" ]; then
if [ ! -f /usr/bin/dialog ] || [ ! -f /usr/bin/python3 ] || [ ! -f /usr/bin/pip3 ]; then
echo Installing packages needed for setup...
apt-get -q -q update
apt_get_quiet install dialog file python3 python3-pip || exit 1
apt_get_quiet install dialog file python3 python3-pip || exit 1
fi
# Installing email_validator is repeated in setup/management.sh, but in setup/management.sh
# we install it inside a virtualenv. In this script, we don't have the virtualenv yet
# so we install the python package globally
# On Debian 12, this package must be installed via apt-get
if [ "$(get_os_code)" -eq "${OS_DEBIAN_12}" ]; then
apt_get_quiet install python3-email-validator
else
hide_output pip3 install "email_validator>=1.0.0"
fi
# so we install the python package globally.
hide_output pip3 install "email_validator>=1.0.0" || exit 1
message_box "Mail-in-a-Box Installation" \
"Hello and thanks for deploying a (Power) Mail-in-a-Box!

View file

@ -23,12 +23,8 @@ echo "Installing SpamAssassin..."
apt_install spampd razor pyzor dovecot-antispam libmail-dkim-perl
# Allow spamassassin to download new rules.
# On Debian 12, this file is named /etc/default/spamd
if [ -f /etc/default/spamassassin ]; then
management/editconf.py /etc/default/spamassassin CRON=1
else
management/editconf.py /etc/default/spamd CRON=1
fi
management/editconf.py /etc/default/spamassassin \
CRON=1
# Configure pyzor, which is a client to a live database of hashes of
# spam emails. Set the pyzor configuration directory to something sane.

View file

@ -90,7 +90,7 @@ f=$STORAGE_ROOT
while [[ $f != / ]]; do chmod a+rx "$f"; f=$(dirname "$f"); done;
if [ ! -f $STORAGE_ROOT/mailinabox.version ]; then
setup/migrate.py --current > $STORAGE_ROOT/mailinabox.version
chown $STORAGE_USER:$STORAGE_USER $STORAGE_ROOT/mailinabox.version
chown $STORAGE_USER.$STORAGE_USER $STORAGE_ROOT/mailinabox.version
fi
chmod 751 $STORAGE_ROOT
@ -185,7 +185,7 @@ if management/status_checks.py --check-primary-hostname; then
echo "If you have a DNS problem put the box's IP address in the URL"
echo "(https://$PUBLIC_IP/admin) but then check the TLS fingerprint:"
openssl x509 -in $STORAGE_ROOT/ssl/ssl_certificate.pem -noout -fingerprint -sha256\
| sed "s/SHA256 Fingerprint=//i"
| sed "s/SHA256 Fingerprint=//"
else
echo https://$PUBLIC_IP/admin
echo
@ -193,7 +193,7 @@ else
echo the certificate fingerprint matches:
echo
openssl x509 -in $STORAGE_ROOT/ssl/ssl_certificate.pem -noout -fingerprint -sha256\
| sed "s/SHA256 Fingerprint=//i"
| sed "s/SHA256 Fingerprint=//"
echo
echo Then you can confirm the security exception and continue.
echo

View file

@ -1,5 +1,3 @@
#!/usr/bin/env bash
source /etc/mailinabox.conf
source setup/functions.sh # load our functions
@ -127,7 +125,7 @@ apt_get_quiet autoremove
echo Installing system packages...
apt_install python3 python3-dev python3-pip python3-setuptools \
netcat-openbsd wget curl git sudo coreutils bc file \
pollinate xxd openssh-client unzip \
pollinate openssh-client unzip \
unattended-upgrades cron ntp fail2ban rsyslog
# ### Suppress Upgrade Prompts
@ -222,11 +220,10 @@ dd if=/dev/random of=/dev/urandom bs=1 count=32 2> /dev/null
# is really any good on virtualized systems, we'll also seed from Ubuntu's
# pollinate servers:
rm -rf /var/cache/pollinate/*
if ! sudo -u pollinate pollinate -q -r --strict 2> /dev/null; then
if ! pollinate -q -r --strict 2> /dev/null; then
# In the case pollinate is ill-configured (e.g. server is example.com), try using a server we know that works
# Even if this fails - don't bail and carry on.
sudo -u pollinate pollinate -q -r -s entropy.ubuntu.com 2> /dev/null
pollinate -q -r -s entropy.ubuntu.com 2> /dev/null
fi
# Between these two, we really ought to be all set.
@ -348,12 +345,6 @@ fi
# which is where bind9 will be running. Obviously don't do this before
# installing bind9 or else apt won't be able to resolve a server to
# download bind9 from.
# On Debian 12, this service needs to be installed first
if [ "$(get_os_code)" -eq "${OS_DEBIAN_12}" ]; then
apt_get_quiet install systemd-resolved
fi
rm -f /etc/resolv.conf
management/editconf.py /etc/systemd/resolved.conf DNSStubListener=no
echo "nameserver 127.0.0.1" > /etc/resolv.conf
@ -381,5 +372,3 @@ cp -f conf/fail2ban/filter.d/* /etc/fail2ban/filter.d/
# scripts will ensure the files exist and then fail2ban is given another
# restart at the very end of setup.
restart_service fail2ban
systemctl enable fail2ban

View file

@ -22,9 +22,8 @@ source /etc/mailinabox.conf # load global vars
echo "Installing Roundcube (webmail)..."
apt_install \
dbconfig-common \
php-cli php-sqlite3 php-intl php-json php-common php-curl php-ldap php-imap\
php-gd php-pspell libjs-jquery libjs-jquery-mousewheel libmagic1 php-mbstring php-gnupg \
sqlite3
php-cli php-sqlite3 php-intl php-json php-common php-curl php-ldap \
php-gd php-pspell libjs-jquery libjs-jquery-mousewheel libmagic1 php-mbstring php-gnupg
# Install Roundcube from source if it is not already present or if it is out of date.
# Combine the Roundcube version number with the commit hash of plugins to track
@ -36,9 +35,9 @@ apt_install \
# https://github.com/mstilkerich/rcmcarddav/releases
# The easiest way to get the package hashes is to run this script and get the hash from
# the error message.
VERSION=1.6.5
HASH=326fcc206cddc00355e98d1e40fd0bcd9baec69f
PERSISTENT_LOGIN_VERSION=bde7b6840c7d91de627ea14e81cf4133cbb3c07a # version 5.3
VERSION=1.6.0
HASH=fd84b4fac74419bb73e7a3bcae1978d5589c52de
PERSISTENT_LOGIN_VERSION=version-5.3.0
HTML5_NOTIFIER_VERSION=68d9ca194212e15b3c7225eb6085dbcf02fd13d7 # version 0.6.4+
CARDDAV_VERSION=4.4.4
CARDDAV_HASH=743fd6925b775f821aa8860982d2bdeec05f5d7b
@ -142,6 +141,8 @@ cat > $RCM_CONFIG <<EOF;
\$config['password_charset'] = 'UTF-8';
\$config['junk_mbox'] = 'Spam';
/* ensure roudcube session id's aren't leaked to other parts of the server */
\$config['session_path'] = '/mail/';
/* prevent CSRF, requires php 7.3+ */
\$config['session_samesite'] = 'Strict';
\$config['quota_zero_as_unlimited'] = true;
@ -199,7 +200,7 @@ EOF
# Create writable directories.
mkdir -p /var/log/roundcubemail /var/tmp/roundcubemail $STORAGE_ROOT/mail/roundcube
chown -R www-data:www-data /var/log/roundcubemail /var/tmp/roundcubemail $STORAGE_ROOT/mail/roundcube
chown -R www-data.www-data /var/log/roundcubemail /var/tmp/roundcubemail $STORAGE_ROOT/mail/roundcube
# Ensure the log file monitored by fail2ban exists, or else fail2ban can't start.
sudo -u www-data touch /var/log/roundcubemail/errors.log
@ -222,14 +223,14 @@ usermod -a -G dovecot www-data
# set permissions so that PHP can use users.sqlite
# could use dovecot instead of www-data, but not sure it matters
chown root:www-data $STORAGE_ROOT/mail
chown root.www-data $STORAGE_ROOT/mail
chmod 775 $STORAGE_ROOT/mail
chown root:www-data $STORAGE_ROOT/mail/users.sqlite
chown root.www-data $STORAGE_ROOT/mail/users.sqlite
chmod 664 $STORAGE_ROOT/mail/users.sqlite
# Fix Carddav permissions:
chown -f -R root:www-data ${RCM_PLUGIN_DIR}/carddav
# root:www-data need all permissions, others only read
chown -f -R root.www-data ${RCM_PLUGIN_DIR}/carddav
# root.www-data need all permissions, others only read
chmod -R 774 ${RCM_PLUGIN_DIR}/carddav
# Run Roundcube database migration script (database is created if it does not exist)
@ -237,16 +238,6 @@ php ${RCM_DIR}/bin/updatedb.sh --dir ${RCM_DIR}/SQL --package roundcube
chown www-data:www-data $STORAGE_ROOT/mail/roundcube/roundcube.sqlite
chmod 664 $STORAGE_ROOT/mail/roundcube/roundcube.sqlite
# Patch the Roundcube code to eliminate an issue that causes postfix to reject our sqlite
# user database (see https://github.com/mail-in-a-box/mailinabox/issues/2185)
sed -i.miabold 's/^[^#]\+.\+PRAGMA journal_mode = WAL.\+$/#&/' \
/usr/local/lib/roundcubemail/program/lib/Roundcube/db/sqlite.php
# Because Roundcube wants to set the PRAGMA we just deleted from the source, we apply it here
# to the roundcube database (see https://github.com/roundcube/roundcubemail/issues/8035)
# Database should exist, created by migration script
sqlite3 $STORAGE_ROOT/mail/roundcube/roundcube.sqlite 'PRAGMA journal_mode=WAL;'
# Enable PHP modules.
phpenmod -v php mcrypt imap
restart_service php$(php_version)-fpm

View file

@ -22,8 +22,8 @@ apt_install \
phpenmod -v php imap
# Copy Z-Push into place.
VERSION=2.7.1
TARGETHASH=f15c566b1ad50de24f3f08f505f0c3d8155c2d0d
VERSION=2.6.2
TARGETHASH=f0e8091a8030e5b851f5ba1f9f0e1a05b8762d80
needs_update=0 #NODOC
if [ ! -f /usr/local/lib/z-push/version ]; then
needs_update=1 #NODOC
@ -41,15 +41,7 @@ if [ $needs_update == 1 ]; then
mv /tmp/z-push/*/src /usr/local/lib/z-push
rm -rf /tmp/z-push.zip /tmp/z-push
# Create admin and top scripts with PHP_VER
rm -f /usr/sbin/z-push-{admin,top}
echo '#!/bin/bash' > /usr/sbin/z-push-admin
echo php /usr/local/lib/z-push/z-push-admin.php '"$@"' >> /usr/sbin/z-push-admin
chmod 755 /usr/sbin/z-push-admin
echo '#!/bin/bash' > /usr/sbin/z-push-top
echo php /usr/local/lib/z-push/z-push-top.php '"$@"' >> /usr/sbin/z-push-top
chmod 755 /usr/sbin/z-push-top
echo $VERSION > /usr/local/lib/z-push/version
fi

View file

@ -40,8 +40,8 @@ cp "$1/owncloud.db" $STORAGE_ROOT/owncloud/
cp "$1/config.php" $STORAGE_ROOT/owncloud/
ln -sf $STORAGE_ROOT/owncloud/config.php /usr/local/lib/owncloud/config/config.php
chown -f -R www-data:www-data $STORAGE_ROOT/owncloud /usr/local/lib/owncloud
chown www-data:www-data $STORAGE_ROOT/owncloud/config.php
chown -f -R www-data.www-data $STORAGE_ROOT/owncloud /usr/local/lib/owncloud
chown www-data.www-data $STORAGE_ROOT/owncloud/config.php
sudo -u www-data php /usr/local/lib/owncloud/occ maintenance:mode --off

View file

@ -17,8 +17,13 @@ accesses = set()
# Scan the current and rotated access logs.
for fn in glob.glob("/var/log/nginx/access.log*"):
# Gunzip if necessary.
if fn.endswith(".gz"):
f = gzip.open(fn)
else:
f = open(fn, "rb")
# Loop through the lines in the access log.
with (gzip.open if fn.endswith(".gz") else open)(fn, "rb") as f:
with f:
for line in f:
# Find lines that are GETs on the bootstrap script by either curl or wget.
# (Note that we purposely skip ...?ping=1 requests which is the admin panel querying us for updates.)
@ -38,8 +43,7 @@ for date, ip in accesses:
# Since logs are rotated, store the statistics permanently in a JSON file.
# Load in the stats from an existing file.
if os.path.exists(outfn):
with open(outfn, "r") as f:
existing_data = json.load(f)
existing_data = json.load(open(outfn))
for date, count in existing_data:
if date not in by_date:
by_date[date] = count

View file

@ -124,14 +124,13 @@ def generate_documentation():
""")
parser = Source.parser()
with open("setup/start.sh", "r") as start_file:
for line in start_file:
try:
fn = parser.parse_string(line).filename()
except:
continue
if fn in ("setup/start.sh", "setup/preflight.sh", "setup/questions.sh", "setup/firstuser.sh", "setup/management.sh"):
continue
for line in open("setup/start.sh"):
try:
fn = parser.parse_string(line).filename()
except:
continue
if fn in ("setup/start.sh", "setup/preflight.sh", "setup/questions.sh", "setup/firstuser.sh", "setup/management.sh"):
continue
import sys
print(fn, file=sys.stderr)
@ -402,8 +401,7 @@ class BashScript(Grammar):
@staticmethod
def parse(fn):
if fn in ("setup/functions.sh", "/etc/mailinabox.conf"): return ""
with open(fn, "r") as f:
string = f.read()
string = open(fn).read()
# tokenize
string = re.sub(".* #NODOC\n", "", string)