Compare commits
70 commits
main
...
current-de
Author | SHA1 | Date | |
---|---|---|---|
![]() |
c4b6811ef1 | ||
![]() |
f06996d5be | ||
![]() |
27be43fff8 | ||
![]() |
0a263ee037 | ||
![]() |
4cf1185c6c | ||
![]() |
17df17aa34 | ||
![]() |
d1ff6bff21 | ||
![]() |
0408dd163a | ||
![]() |
7f17792b0a | ||
![]() |
db9622b0af | ||
![]() |
7646095b94 | ||
![]() |
faf23f150c | ||
![]() |
8e4e9add78 | ||
![]() |
fa8c7ddef5 | ||
![]() |
6d6ce25e03 | ||
![]() |
371f5bc1b2 | ||
![]() |
0314554207 | ||
![]() |
46d55f7866 | ||
![]() |
2bbc317873 | ||
![]() |
28f929dc13 | ||
![]() |
e419b62034 | ||
![]() |
a966913963 | ||
![]() |
08defb12be | ||
![]() |
7be687e601 | ||
![]() |
62efe985f1 | ||
![]() |
df44056bae | ||
![]() |
3148c621d2 | ||
![]() |
81866de229 | ||
![]() |
674ce92e92 | ||
![]() |
c034b0f789 | ||
![]() |
cd45d08409 | ||
![]() |
98628622c7 | ||
![]() |
8b19d15735 | ||
![]() |
93380b243f | ||
![]() |
fb0a3b0489 | ||
![]() |
3bc9d07aeb | ||
![]() |
51ed030917 | ||
![]() |
e828d63a85 | ||
![]() |
0ee0784bde | ||
![]() |
6d43d24552 | ||
![]() |
963fb9f2e6 | ||
![]() |
c9584148a0 | ||
![]() |
9a33f9c5ff | ||
![]() |
95530affbf | ||
![]() |
f72be0be7c | ||
![]() |
8aa98b25b5 | ||
![]() |
3c15081673 | ||
![]() |
01d8e9f3b4 | ||
![]() |
aa176b09b0 | ||
![]() |
9d16118d34 | ||
![]() |
88260bb610 | ||
![]() |
ab5959e890 | ||
![]() |
6f94412204 | ||
![]() |
c77d1697a7 | ||
![]() |
31bbef3401 | ||
![]() |
7af713592a | ||
![]() |
4408cb1fba | ||
![]() |
5e3e4a2161 | ||
![]() |
61d1ea1ea7 | ||
![]() |
b3743a31e9 | ||
![]() |
26709a3c1d | ||
![]() |
20ec6c2080 | ||
![]() |
7a79153afe | ||
![]() |
a2565227f2 | ||
![]() |
02b34ce699 | ||
![]() |
820a39b865 | ||
![]() |
57047d96e9 | ||
![]() |
1587248762 | ||
![]() |
0fc5105da5 | ||
![]() |
c29593b5ef |
42 changed files with 581 additions and 266 deletions
84
CHANGELOG.md
84
CHANGELOG.md
|
@ -1,6 +1,90 @@
|
|||
CHANGELOG
|
||||
=========
|
||||
|
||||
Version 67 (December 22, 2023)
|
||||
------------------------------
|
||||
|
||||
* Guard against a newly published vulnerability called SMTP Smuggling. See https://sec-consult.com/blog/detail/smtp-smuggling-spoofing-e-mails-worldwide/.
|
||||
|
||||
Version 66 (December 17, 2023)
|
||||
------------------------------
|
||||
|
||||
* Some users reported an error installing Mail-in-a-Box related to the virtualenv command. This is hopefully fixed.
|
||||
* Roundcube is updated to 1.6.5 fixing a security vulnerability.
|
||||
* For Mail-in-a-Box developers, a new setup variable is added to pull the source code from a different repository.
|
||||
|
||||
Version 65 (October 27, 2023)
|
||||
-----------------------------
|
||||
|
||||
* Roundcube updated to 1.6.4 fixing a security vulnerability.
|
||||
* zpush.sh updated to version 2.7.1.
|
||||
* Fixed a typo in the control panel.
|
||||
|
||||
Version 64 (September 2, 2023)
|
||||
------------------------------
|
||||
|
||||
* Fixed broken installation when upgrading from Mail-in-a-Box version 56 (Nextcloud 22) and earlier because of an upstream packaging issue.
|
||||
* Fixed backups to work with the latest duplicity package which was not backwards compatible.
|
||||
* Fixed setting B2 as a backup target with a slash in the application key.
|
||||
* Turned off OpenDMARC diagnostic reports sent in response to incoming mail.
|
||||
* Fixed some crashes when using an unrelased version of Mail-in-a-Box.
|
||||
* Added z-push administration scripts.
|
||||
|
||||
Version 63 (July 27, 2023)
|
||||
--------------------------
|
||||
|
||||
* Nextcloud updated to 25.0.7.
|
||||
|
||||
Version 62 (May 20, 2023)
|
||||
-------------------------
|
||||
|
||||
Package updates:
|
||||
|
||||
* Nextcloud updated to 23.0.12 (and its apps also updated).
|
||||
* Roundcube updated to 1.6.1.
|
||||
* Z-Push to 2.7.0, which has compatibility for Ubuntu 22.04, so it works again.
|
||||
|
||||
Mail:
|
||||
|
||||
* Roundcube's password change page is now working again.
|
||||
|
||||
Control panel:
|
||||
|
||||
* Allow setting the backup location's S3 region name for non-AWS S3-compatible backup hosts.
|
||||
* Control panel pages can be opened in a new tab/window and bookmarked and browser history navigation now works.
|
||||
* Add a Copy button to put the rsync backup public key on clipboard.
|
||||
* Allow secondary DNS xfr: items added in the control panel to be hostnames too.
|
||||
* Fixed issue where sshkeygen fails when IPv6 is disabled.
|
||||
* Fixed issue opening munin reports.
|
||||
* Fixed report formatting in status emails sent to the administrator.
|
||||
|
||||
Version 61.1 (January 28, 2023)
|
||||
-------------------------------
|
||||
|
||||
* Fixed rsync backups not working with the default port.
|
||||
* Reverted "Improve error messages in the management tools when external command-line tools are run." because of the possibility of user secrets being included in error messages.
|
||||
* Fix for TLS certificate SHA fingerprint not being displayed during setup.
|
||||
|
||||
Version 61 (January 21, 2023)
|
||||
-----------------------------
|
||||
|
||||
System:
|
||||
|
||||
* fail2ban didn't start after setup.
|
||||
|
||||
Mail:
|
||||
|
||||
* Disable Roundcube password plugin since it was corrupting the user database.
|
||||
|
||||
Control panel:
|
||||
|
||||
* Fix changing existing backup settings when the rsync type is used.
|
||||
* Allow setting a custom port for rsync backups.
|
||||
* Fixes to DNS lookups during status checks when there are timeouts, enforce timeouts better.
|
||||
* A new check is added to ensure fail2ban is running.
|
||||
* Fixed a color.
|
||||
* Improve error messages in the management tools when external command-line tools are run.
|
||||
|
||||
Version 60.1 (October 30, 2022)
|
||||
-------------------------------
|
||||
|
||||
|
|
16
Vagrantfile
vendored
16
Vagrantfile
vendored
|
@ -4,10 +4,6 @@
|
|||
|
||||
ip = 2
|
||||
machines = [
|
||||
{
|
||||
'iso' => "debian/buster64",
|
||||
'host' => "buster"
|
||||
},
|
||||
{
|
||||
'iso' => "generic/ubuntu2004",
|
||||
'host' => "focal"
|
||||
|
@ -20,19 +16,23 @@ machines = [
|
|||
'iso' => "generic/ubuntu2204",
|
||||
'host' => "jammy"
|
||||
},
|
||||
{
|
||||
'iso' => "debian/bookworm64",
|
||||
'host' => "bookworm"
|
||||
}
|
||||
]
|
||||
|
||||
Vagrant.configure("2") do |config|
|
||||
config.vm.provider :virtualbox do |vb|
|
||||
vb.customize ["modifyvm", :id, "--cpus", 1, "--memory", 768]
|
||||
vb.customize ["modifyvm", :id, "--cpus", 1, "--memory", 1024]
|
||||
end
|
||||
config.vm.provider :libvirt do |v|
|
||||
v.memory = 768
|
||||
v.memory = 1024
|
||||
v.cpus = 1
|
||||
v.nested = true
|
||||
end
|
||||
config.vm.provider :kvm do |kvm|
|
||||
kvm.memory_size = '768m'
|
||||
kvm.memory_size = '1024m'
|
||||
end
|
||||
|
||||
# Network config: Since it's a mail server, the machine must be connected
|
||||
|
@ -49,6 +49,8 @@ Vagrant.configure("2") do |config|
|
|||
m.vm.network "private_network", ip: "192.168.168.#{ip+n}"
|
||||
|
||||
m.vm.provision "shell", :inline => <<-SH
|
||||
apt-get update
|
||||
apt-get install git -y # Just in case git isn't installed
|
||||
git config --global --add safe.directory /vagrant
|
||||
|
||||
# Set environment variables so that the setup script does
|
||||
|
|
|
@ -33,6 +33,8 @@
|
|||
fastcgi_split_path_info ^/mail(/.*)()$;
|
||||
fastcgi_index index.php;
|
||||
fastcgi_param SCRIPT_FILENAME /usr/local/lib/roundcubemail/$fastcgi_script_name;
|
||||
# ensure roudcube session id's aren't leaked to other parts of the server
|
||||
fastcgi_param PHP_VALUE "session.cookie_path=/mail/";
|
||||
fastcgi_pass php-default;
|
||||
|
||||
# Outgoing mail also goes through this endpoint, so increase the maximum
|
||||
|
@ -98,3 +100,10 @@
|
|||
rewrite ^/.well-known/host-meta.json /cloud/public.php?service=host-meta-json last;
|
||||
rewrite ^/.well-known/carddav /cloud/remote.php/carddav/ redirect;
|
||||
rewrite ^/.well-known/caldav /cloud/remote.php/caldav/ redirect;
|
||||
|
||||
# This addresses those service discovery issues mentioned in:
|
||||
# https://docs.nextcloud.com/server/23/admin_manual/issues/general_troubleshooting.html#service-discovery
|
||||
rewrite ^/.well-known/webfinger /cloud/index.php/.well-known/webfinger redirect;
|
||||
rewrite ^/.well-known/nodeinfo /cloud/index.php/.well-known/nodeinfo redirect;
|
||||
|
||||
# ADDITIONAL DIRECTIVES HERE
|
||||
|
|
|
@ -74,10 +74,11 @@ def backup_status(env):
|
|||
"/usr/local/bin/duplicity",
|
||||
"collection-status",
|
||||
"--archive-dir", backup_cache_dir,
|
||||
"--gpg-options", "--cipher-algo=AES256",
|
||||
"--gpg-options", "'--cipher-algo=AES256'",
|
||||
"--log-fd", "1",
|
||||
get_duplicity_target_url(config),
|
||||
] + get_duplicity_additional_args(env),
|
||||
] + get_duplicity_additional_args(env) + [
|
||||
get_duplicity_target_url(config)
|
||||
],
|
||||
get_duplicity_env_vars(env),
|
||||
trap=True)
|
||||
if code != 0:
|
||||
|
@ -239,10 +240,10 @@ def get_duplicity_target_url(config):
|
|||
# the target URL must be the bucket name. The hostname is passed
|
||||
# via get_duplicity_additional_args. Move the first part of the
|
||||
# path (the bucket name) into the hostname URL component, and leave
|
||||
# the rest for the path.
|
||||
target_bucket = target[2].lstrip('/').split('/', 1)
|
||||
target[1] = target_bucket[0]
|
||||
target[2] = target_bucket[1] if len(target_bucket) > 1 else ''
|
||||
# the rest for the path. (The S3 region name is also stored in the
|
||||
# hostname part of the URL, in the username portion, which we also
|
||||
# have to drop here).
|
||||
target[1], target[2] = target[2].lstrip('/').split('/', 1)
|
||||
|
||||
target = urlunsplit(target)
|
||||
|
||||
|
@ -258,16 +259,32 @@ def get_duplicity_additional_args(env):
|
|||
port = 22
|
||||
|
||||
if get_target_type(config) == 'rsync':
|
||||
# Extract a port number for the ssh transport. Duplicity accepts the
|
||||
# optional port number syntax in the target, but it doesn't appear to act
|
||||
# on it, so we set the ssh port explicitly via the duplicity options.
|
||||
from urllib.parse import urlsplit
|
||||
try:
|
||||
port = urlsplit(config["target"]).port
|
||||
except ValueError:
|
||||
port = 22
|
||||
if port is None:
|
||||
port = 22
|
||||
|
||||
return [
|
||||
f"--ssh-options= -i /root/.ssh/id_rsa_miab -p {port}",
|
||||
f"--rsync-options= -e \"/usr/bin/ssh -oStrictHostKeyChecking=no -oBatchMode=yes -p {port} -i /root/.ssh/id_rsa_miab\"",
|
||||
f"--ssh-options='-i /root/.ssh/id_rsa_miab -p {port}'",
|
||||
f"--rsync-options='-e \"/usr/bin/ssh -oStrictHostKeyChecking=no -oBatchMode=yes -p {port} -i /root/.ssh/id_rsa_miab\"'",
|
||||
]
|
||||
elif get_target_type(config) == 's3':
|
||||
# See note about hostname in get_duplicity_target_url.
|
||||
# The region name, which is required by some non-AWS endpoints,
|
||||
# is saved inside the username portion of the URL.
|
||||
from urllib.parse import urlsplit, urlunsplit
|
||||
target = urlsplit(config["target"])
|
||||
endpoint_url = urlunsplit(("https", target.netloc, '', '', ''))
|
||||
return ["--s3-endpoint-url", endpoint_url]
|
||||
endpoint_url = urlunsplit(("https", target.hostname, '', '', ''))
|
||||
args = ["--s3-endpoint-url", endpoint_url]
|
||||
if target.username: # region name is stuffed here
|
||||
args += ["--s3-region-name", target.username]
|
||||
return args
|
||||
|
||||
return []
|
||||
|
||||
|
@ -362,11 +379,12 @@ def perform_backup(full_backup, user_initiated=False):
|
|||
"--archive-dir", backup_cache_dir,
|
||||
"--exclude", backup_root,
|
||||
"--volsize", "250",
|
||||
"--gpg-options", "--cipher-algo=AES256",
|
||||
"--gpg-options", "'--cipher-algo=AES256'",
|
||||
"--allow-source-mismatch"
|
||||
] + get_duplicity_additional_args(env) + [
|
||||
env["STORAGE_ROOT"],
|
||||
get_duplicity_target_url(config),
|
||||
"--allow-source-mismatch"
|
||||
] + get_duplicity_additional_args(env),
|
||||
],
|
||||
get_duplicity_env_vars(env))
|
||||
finally:
|
||||
# Start services again.
|
||||
|
@ -384,8 +402,9 @@ def perform_backup(full_backup, user_initiated=False):
|
|||
"--verbosity", "error",
|
||||
"--archive-dir", backup_cache_dir,
|
||||
"--force",
|
||||
] + get_duplicity_additional_args(env) + [
|
||||
get_duplicity_target_url(config)
|
||||
] + get_duplicity_additional_args(env),
|
||||
],
|
||||
get_duplicity_env_vars(env))
|
||||
|
||||
# From duplicity's manual:
|
||||
|
@ -399,8 +418,9 @@ def perform_backup(full_backup, user_initiated=False):
|
|||
"--verbosity", "error",
|
||||
"--archive-dir", backup_cache_dir,
|
||||
"--force",
|
||||
] + get_duplicity_additional_args(env) + [
|
||||
get_duplicity_target_url(config)
|
||||
] + get_duplicity_additional_args(env),
|
||||
],
|
||||
get_duplicity_env_vars(env))
|
||||
|
||||
# Change ownership of backups to the user-data user, so that the after-bcakup
|
||||
|
@ -445,9 +465,10 @@ def run_duplicity_verification():
|
|||
"--compare-data",
|
||||
"--archive-dir", backup_cache_dir,
|
||||
"--exclude", backup_root,
|
||||
] + get_duplicity_additional_args(env) + [
|
||||
get_duplicity_target_url(config),
|
||||
env["STORAGE_ROOT"],
|
||||
] + get_duplicity_additional_args(env), get_duplicity_env_vars(env))
|
||||
], get_duplicity_env_vars(env))
|
||||
|
||||
def run_duplicity_restore(args):
|
||||
env = load_environment()
|
||||
|
@ -457,9 +478,23 @@ def run_duplicity_restore(args):
|
|||
"/usr/local/bin/duplicity",
|
||||
"restore",
|
||||
"--archive-dir", backup_cache_dir,
|
||||
get_duplicity_target_url(config),
|
||||
] + get_duplicity_additional_args(env) + args,
|
||||
get_duplicity_env_vars(env))
|
||||
] + get_duplicity_additional_args(env) + [
|
||||
get_duplicity_target_url(config)
|
||||
] + args,
|
||||
get_duplicity_env_vars(env))
|
||||
|
||||
def print_duplicity_command():
|
||||
import shlex
|
||||
env = load_environment()
|
||||
config = get_backup_config(env)
|
||||
backup_cache_dir = os.path.join(env["STORAGE_ROOT"], 'backup', 'cache')
|
||||
for k, v in get_duplicity_env_vars(env).items():
|
||||
print(f"export {k}={shlex.quote(v)}")
|
||||
print("duplicity", "{command}", shlex.join([
|
||||
"--archive-dir", backup_cache_dir,
|
||||
] + get_duplicity_additional_args(env) + [
|
||||
get_duplicity_target_url(config)
|
||||
]))
|
||||
|
||||
def list_target_files(config):
|
||||
import urllib.parse
|
||||
|
@ -476,6 +511,17 @@ def list_target_files(config):
|
|||
rsync_fn_size_re = re.compile(r'.* ([^ ]*) [^ ]* [^ ]* (.*)')
|
||||
rsync_target = '{host}:{path}'
|
||||
|
||||
# Strip off any trailing port specifier because it's not valid in rsync's
|
||||
# DEST syntax. Explicitly set the port number for the ssh transport.
|
||||
user_host, *_ = target.netloc.rsplit(':', 1)
|
||||
try:
|
||||
port = target.port
|
||||
except ValueError:
|
||||
port = 22
|
||||
|
||||
if port is None:
|
||||
port = 22
|
||||
|
||||
target_path = target.path
|
||||
if not target_path.endswith('/'):
|
||||
target_path = target_path + '/'
|
||||
|
@ -558,8 +604,7 @@ def list_target_files(config):
|
|||
|
||||
# Extract information from target
|
||||
b2_application_keyid = target.netloc[:target.netloc.index(':')]
|
||||
b2_application_key = target.netloc[target.netloc.index(':') +
|
||||
1:target.netloc.index('@')]
|
||||
b2_application_key = urllib.parse.unquote(target.netloc[target.netloc.index(':') + 1:target.netloc.index('@')])
|
||||
b2_bucket = target.netloc[target.netloc.index('@') + 1:]
|
||||
|
||||
try:
|
||||
|
@ -617,10 +662,9 @@ def get_backup_config(env, for_save=False, for_ui=False):
|
|||
|
||||
# Merge in anything written to custom.yaml.
|
||||
try:
|
||||
custom_config = rtyaml.load(
|
||||
open(os.path.join(backup_root, 'custom.yaml')))
|
||||
if not isinstance(custom_config, dict):
|
||||
raise ValueError() # caught below
|
||||
with open(os.path.join(backup_root, 'custom.yaml'), 'r') as f:
|
||||
custom_config = rtyaml.load(f)
|
||||
if not isinstance(custom_config, dict): raise ValueError() # caught below
|
||||
config.update(custom_config)
|
||||
except:
|
||||
pass
|
||||
|
@ -644,7 +688,8 @@ def get_backup_config(env, for_save=False, for_ui=False):
|
|||
config["target"] = "file://" + config["file_target_directory"]
|
||||
ssh_pub_key = os.path.join('/root', '.ssh', 'id_rsa_miab.pub')
|
||||
if os.path.exists(ssh_pub_key):
|
||||
config["ssh_pub_key"] = open(ssh_pub_key, 'r').read()
|
||||
with open(ssh_pub_key, 'r') as f:
|
||||
config["ssh_pub_key"] = f.read()
|
||||
|
||||
return config
|
||||
|
||||
|
@ -679,6 +724,9 @@ if __name__ == "__main__":
|
|||
# to duplicity. The restore path should be specified.
|
||||
run_duplicity_restore(sys.argv[2:])
|
||||
|
||||
elif sys.argv[-1] == "--duplicity-command":
|
||||
print_duplicity_command()
|
||||
|
||||
else:
|
||||
# Perform a backup. Add --full to force a full backup rather than
|
||||
# possibly performing an incremental backup.
|
||||
|
|
|
@ -61,7 +61,8 @@ def read_password():
|
|||
|
||||
|
||||
def setup_key_auth(mgmt_uri):
|
||||
key = open('/var/lib/mailinabox/api.key').read().strip()
|
||||
with open('/var/lib/mailinabox/api.key', 'r') as f:
|
||||
key = f.read().strip()
|
||||
|
||||
auth_handler = urllib.request.HTTPBasicAuthHandler()
|
||||
auth_handler.add_password(realm='Mail-in-a-Box Management Server',
|
||||
|
|
|
@ -1230,7 +1230,7 @@ def munin_cgi(filename):
|
|||
support infrastructure like spawn-fcgi.
|
||||
"""
|
||||
|
||||
COMMAND = 'su - munin --preserve-environment --shell=/bin/bash -c /usr/lib/munin/cgi/munin-cgi-graph'
|
||||
COMMAND = 'su munin --preserve-environment --shell=/bin/bash -c /usr/lib/munin/cgi/munin-cgi-graph'
|
||||
# su changes user, we use the munin user here
|
||||
# --preserve-environment retains the environment, which is where Popen's `env` data is
|
||||
# --shell=/bin/bash ensures the shell used is bash
|
||||
|
|
|
@ -1081,10 +1081,9 @@ def write_opendkim_tables(domains, env):
|
|||
|
||||
def get_custom_dns_config(env, only_real_records=False):
|
||||
try:
|
||||
custom_dns = rtyaml.load(
|
||||
open(os.path.join(env['STORAGE_ROOT'], 'dns/custom.yaml')))
|
||||
if not isinstance(custom_dns, dict):
|
||||
raise ValueError() # caught below
|
||||
with open(os.path.join(env['STORAGE_ROOT'], 'dns/custom.yaml'), 'r') as f:
|
||||
custom_dns = rtyaml.load(f)
|
||||
if not isinstance(custom_dns, dict): raise ValueError() # caught below
|
||||
except:
|
||||
return []
|
||||
|
||||
|
@ -1290,6 +1289,7 @@ def set_custom_dns_record(qname, rtype, value, action, env, ttl=None):
|
|||
def get_secondary_dns(custom_dns, mode=None):
|
||||
resolver = dns.resolver.get_default_resolver()
|
||||
resolver.timeout = 10
|
||||
resolver.lifetime = 10
|
||||
|
||||
values = []
|
||||
for qname, rtype, value, ttl in custom_dns:
|
||||
|
@ -1302,25 +1302,33 @@ def get_secondary_dns(custom_dns, mode=None):
|
|||
values.append(hostname)
|
||||
continue
|
||||
|
||||
# This is a hostname. Before including in zone xfr lines,
|
||||
# resolve to an IP address. Otherwise just return the hostname.
|
||||
# It may not resolve to IPv6, so don't throw an exception if it
|
||||
# doesn't.
|
||||
if not hostname.startswith("xfr:"):
|
||||
if mode == "xfr":
|
||||
response = dns.resolver.resolve(hostname+'.', "A", raise_on_no_answer=False)
|
||||
values.extend(map(str, response))
|
||||
response = dns.resolver.resolve(hostname+'.', "AAAA", raise_on_no_answer=False)
|
||||
values.extend(map(str, response))
|
||||
continue
|
||||
values.append(hostname)
|
||||
# If the entry starts with "xfr:" only include it in the zone transfer settings.
|
||||
if hostname.startswith("xfr:"):
|
||||
if mode != "xfr": continue
|
||||
hostname = hostname[4:]
|
||||
|
||||
# This is a zone-xfer-only IP address. Do not return if
|
||||
# we're querying for NS record hostnames. Only return if
|
||||
# we're querying for zone xfer IP addresses - return the
|
||||
# IP address.
|
||||
elif mode == "xfr":
|
||||
values.append(hostname[4:])
|
||||
# If is a hostname, before including in zone xfr lines,
|
||||
# resolve to an IP address.
|
||||
# It may not resolve to IPv6, so don't throw an exception if it
|
||||
# doesn't. Skip the entry if there is a DNS error.
|
||||
if mode == "xfr":
|
||||
try:
|
||||
ipaddress.ip_interface(hostname) # test if it's an IP address or CIDR notation
|
||||
values.append(hostname)
|
||||
except ValueError:
|
||||
try:
|
||||
response = dns.resolver.resolve(hostname+'.', "A", raise_on_no_answer=False)
|
||||
values.extend(map(str, response))
|
||||
except dns.exception.DNSException:
|
||||
pass
|
||||
try:
|
||||
response = dns.resolver.resolve(hostname+'.', "AAAA", raise_on_no_answer=False)
|
||||
values.extend(map(str, response))
|
||||
except dns.exception.DNSException:
|
||||
pass
|
||||
|
||||
else:
|
||||
values.append(hostname)
|
||||
|
||||
return values
|
||||
|
||||
|
@ -1330,15 +1338,17 @@ def set_secondary_dns(hostnames, env):
|
|||
# Validate that all hostnames are valid and that all zone-xfer IP addresses are valid.
|
||||
resolver = dns.resolver.get_default_resolver()
|
||||
resolver.timeout = 5
|
||||
resolver.lifetime = 5
|
||||
|
||||
for item in hostnames:
|
||||
if not item.startswith("xfr:"):
|
||||
# Resolve hostname.
|
||||
try:
|
||||
response = resolver.resolve(item, "A")
|
||||
except (dns.resolver.NoNameservers, dns.resolver.NXDOMAIN, dns.resolver.NoAnswer):
|
||||
except (dns.resolver.NoNameservers, dns.resolver.NXDOMAIN, dns.resolver.NoAnswer, dns.resolver.Timeout):
|
||||
try:
|
||||
response = resolver.resolve(item, "AAAA")
|
||||
except (dns.resolver.NoNameservers, dns.resolver.NXDOMAIN, dns.resolver.NoAnswer):
|
||||
except (dns.resolver.NoNameservers, dns.resolver.NXDOMAIN, dns.resolver.NoAnswer, dns.resolver.Timeout):
|
||||
raise ValueError("Could not resolve the IP address of %s." % item)
|
||||
else:
|
||||
# Validate IP address.
|
||||
|
@ -1378,7 +1388,7 @@ def get_custom_dns_records(custom_dns, qname, rtype):
|
|||
def build_recommended_dns(env):
|
||||
ret = []
|
||||
for (domain, zonefile, records) in build_zones(env):
|
||||
# remove records that we don't dislay
|
||||
# remove records that we don't display
|
||||
records = [r for r in records if r[3] is not False]
|
||||
|
||||
# put Required at the top, then Recommended, then everythiing else
|
||||
|
|
|
@ -38,7 +38,8 @@ def edit_conf(filename,
|
|||
erase_setting=False):
|
||||
found = set()
|
||||
buf = ""
|
||||
input_lines = list(open(filename, "r+"))
|
||||
with open(filename, "r") as f:
|
||||
input_lines = list(f)
|
||||
|
||||
while len(input_lines) > 0:
|
||||
line = input_lines.pop(0)
|
||||
|
|
|
@ -28,7 +28,7 @@ content = sys.stdin.read().strip()
|
|||
|
||||
# If there's nothing coming in, just exit.
|
||||
if content == "":
|
||||
sys.exit(0)
|
||||
sys.exit(0)
|
||||
|
||||
# create MIME message
|
||||
msg = MIMEMultipart('alternative')
|
||||
|
@ -44,8 +44,7 @@ msg['From'] = "\"%s\" <%s>" % ("System Management Daemon",
|
|||
msg['To'] = "administrator@" + env['PRIMARY_HOSTNAME']
|
||||
msg['Subject'] = "[%s] %s" % (env['PRIMARY_HOSTNAME'], subject)
|
||||
|
||||
content_html = "<html><body><pre>{}</pre></body></html>".format(
|
||||
html.escape(content))
|
||||
content_html = '<html><body><pre style="overflow-x: scroll; white-space: pre;">{}</pre></body></html>'.format(html.escape(content))
|
||||
|
||||
msg.attach(MIMEText(create_signature(content.encode()).decode(), 'plain'))
|
||||
msg.attach(MIMEText(content_html, 'html'))
|
||||
|
|
|
@ -68,11 +68,12 @@ def scan_files(collector):
|
|||
|
||||
tmp_file = None
|
||||
|
||||
if not os.path.exists(fn):
|
||||
continue
|
||||
elif fn[-3:] == '.gz':
|
||||
tmp_file = tempfile.NamedTemporaryFile()
|
||||
shutil.copyfileobj(gzip.open(fn), tmp_file)
|
||||
if not os.path.exists(fn):
|
||||
continue
|
||||
elif fn[-3:] == '.gz':
|
||||
tmp_file = tempfile.NamedTemporaryFile()
|
||||
with gzip.open(fn, 'rb') as f:
|
||||
shutil.copyfileobj(f, tmp_file)
|
||||
|
||||
if VERBOSE:
|
||||
print("Processing file", fn, "...")
|
||||
|
|
|
@ -611,7 +611,8 @@ def check_certificate(domain,
|
|||
# Second, check that the certificate matches the private key.
|
||||
if ssl_private_key is not None:
|
||||
try:
|
||||
priv_key = load_pem(open(ssl_private_key, 'rb').read())
|
||||
with open(ssl_private_key, 'rb') as f:
|
||||
priv_key = load_pem(f.read())
|
||||
except ValueError as e:
|
||||
return ("The private key file %s is not a private key file: %s" %
|
||||
(ssl_private_key, str(e)), None)
|
||||
|
|
|
@ -185,6 +185,12 @@ def run_services_checks(env, output, pool):
|
|||
fatal = fatal or fatal2
|
||||
output2.playback(output)
|
||||
|
||||
# Check fail2ban.
|
||||
code, ret = shell('check_output', ["fail2ban-client", "status"], capture_stderr=True, trap=True)
|
||||
if code != 0:
|
||||
output.print_error("fail2ban is not running.")
|
||||
all_running = False
|
||||
|
||||
if all_running:
|
||||
output.print_ok("All system services are running.")
|
||||
|
||||
|
@ -321,7 +327,8 @@ def check_ssh_password(env, output):
|
|||
# the configuration file.
|
||||
if not os.path.exists("/etc/ssh/sshd_config"):
|
||||
return
|
||||
sshd = open("/etc/ssh/sshd_config").read()
|
||||
with open("/etc/ssh/sshd_config", "r") as f:
|
||||
sshd = f.read()
|
||||
if re.search("\nPasswordAuthentication\s+yes", sshd) \
|
||||
or not re.search("\nPasswordAuthentication\s+no", sshd):
|
||||
output.print_error(
|
||||
|
@ -526,9 +533,9 @@ def run_network_checks(env, output):
|
|||
if zen is None:
|
||||
output.print_ok("IP address is not blacklisted by zen.spamhaus.org.")
|
||||
elif zen == "[timeout]":
|
||||
output.print_warning(
|
||||
"Connection to zen.spamhaus.org timed out. We could not determine whether your server's IP address is blacklisted. Please try again later."
|
||||
)
|
||||
output.print_warning("Connection to zen.spamhaus.org timed out. We could not determine whether your server's IP address is blacklisted. Please try again later.")
|
||||
elif zen == "[Not Set]":
|
||||
output.print_warning("Could not connect to zen.spamhaus.org. We could not determine whether your server's IP address is blacklisted. Please try again later.")
|
||||
else:
|
||||
output.print_error(
|
||||
"""The IP address of this machine %s is listed in the Spamhaus Block List (code %s),
|
||||
|
@ -934,10 +941,8 @@ def check_dns_zone(domain, env, output, dns_zonefiles):
|
|||
for ns in custom_secondary_ns:
|
||||
# We must first resolve the nameserver to an IP address so we can query it.
|
||||
ns_ips = query_dns(ns, "A")
|
||||
if not ns_ips:
|
||||
output.print_error(
|
||||
"Secondary nameserver %s is not valid (it doesn't resolve to an IP address)."
|
||||
% ns)
|
||||
if not ns_ips or ns_ips in {'[Not Set]', '[timeout]'}:
|
||||
output.print_error("Secondary nameserver %s is not valid (it doesn't resolve to an IP address)." % ns)
|
||||
continue
|
||||
# Choose the first IP if nameserver returns multiple
|
||||
ns_ip = ns_ips.split('; ')[0]
|
||||
|
@ -1008,13 +1013,9 @@ def check_dnssec(domain,
|
|||
# Some registrars may want the public key so they can compute the digest. The DS
|
||||
# record that we suggest using is for the KSK (and that's how the DS records were generated).
|
||||
# We'll also give the nice name for the key algorithm.
|
||||
dnssec_keys = load_env_vars_from_file(
|
||||
os.path.join(env['STORAGE_ROOT'],
|
||||
'dns/dnssec/%s.conf' % alg_name_map[ds_alg]))
|
||||
dnsssec_pubkey = open(
|
||||
os.path.join(env['STORAGE_ROOT'],
|
||||
'dns/dnssec/' + dnssec_keys['KSK'] +
|
||||
'.key')).read().split("\t")[3].split(" ")[3]
|
||||
dnssec_keys = load_env_vars_from_file(os.path.join(env['STORAGE_ROOT'], 'dns/dnssec/%s.conf' % alg_name_map[ds_alg]))
|
||||
with open(os.path.join(env['STORAGE_ROOT'], 'dns/dnssec/' + dnssec_keys['KSK'] + '.key'), 'r') as f:
|
||||
dnsssec_pubkey = f.read().split("\t")[3].split(" ")[3]
|
||||
|
||||
expected_ds_records[(ds_keytag, ds_alg, ds_digalg, ds_digest)] = {
|
||||
"record": rr_ds,
|
||||
|
@ -1210,9 +1211,9 @@ def check_mail_domain(domain, env, output):
|
|||
if dbl is None:
|
||||
output.print_ok("Domain is not blacklisted by dbl.spamhaus.org.")
|
||||
elif dbl == "[timeout]":
|
||||
output.print_warning(
|
||||
"Connection to dbl.spamhaus.org timed out. We could not determine whether the domain {} is blacklisted. Please try again later."
|
||||
.format(domain))
|
||||
output.print_warning("Connection to dbl.spamhaus.org timed out. We could not determine whether the domain {} is blacklisted. Please try again later.".format(domain))
|
||||
elif dbl == "[Not Set]":
|
||||
output.print_warning("Could not connect to dbl.spamhaus.org. We could not determine whether the domain {} is blacklisted. Please try again later.".format(domain))
|
||||
else:
|
||||
output.print_error(
|
||||
"""This domain is listed in the Spamhaus Domain Block List (code %s),
|
||||
|
@ -1265,12 +1266,17 @@ def query_dns(qname, rtype, nxdomain='[Not Set]', at=None, as_list=False):
|
|||
# running bind server), or if the 'at' argument is specified, use that host
|
||||
# as the nameserver.
|
||||
resolver = dns.resolver.get_default_resolver()
|
||||
if at:
|
||||
|
||||
# Make sure at is not a string that cannot be used as a nameserver
|
||||
if at and at not in {'[Not set]', '[timeout]'}:
|
||||
resolver = dns.resolver.Resolver()
|
||||
resolver.nameservers = [at]
|
||||
|
||||
# Set a timeout so that a non-responsive server doesn't hold us back.
|
||||
resolver.timeout = 5
|
||||
# The number of seconds to spend trying to get an answer to the question. If the
|
||||
# lifetime expires a dns.exception.Timeout exception will be raised.
|
||||
resolver.lifetime = 5
|
||||
|
||||
# Do the query.
|
||||
try:
|
||||
|
@ -1403,7 +1409,7 @@ def list_apt_updates(apt_update=True):
|
|||
|
||||
|
||||
def what_version_is_this(env):
|
||||
# This function runs `git describe --abbrev=0` on the Mail-in-a-Box installation directory.
|
||||
# This function runs `git describe --always --abbrev=0` on the Mail-in-a-Box installation directory.
|
||||
# Git may not be installed and Mail-in-a-Box may not have been cloned from github,
|
||||
# so this function may raise all sorts of exceptions.
|
||||
miab_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
|
@ -1473,7 +1479,8 @@ def run_and_output_changes(env, pool):
|
|||
# Load previously saved status checks.
|
||||
cache_fn = "/var/cache/mailinabox/status_checks.json"
|
||||
if os.path.exists(cache_fn):
|
||||
prev = json.load(open(cache_fn))
|
||||
with open(cache_fn, 'r') as f:
|
||||
prev = json.load(f)
|
||||
|
||||
# Group the serial output into categories by the headings.
|
||||
def group_by_heading(lines):
|
||||
|
|
|
@ -18,8 +18,7 @@
|
|||
|
||||
<h3>Add a mail alias</h3>
|
||||
|
||||
<p>Aliases are email forwarders. An alias can forward email to a <a href="#" onclick="return show_panel('users')">mail
|
||||
user</a> or to any email address.</p>
|
||||
<p>Aliases are email forwarders. An alias can forward email to a <a href="#users">mail user</a> or to any email address.</p>
|
||||
|
||||
<p>To use an alias or any address besides your own login username in outbound mail, the sending user must be included as
|
||||
a permitted sender for the alias.</p>
|
||||
|
|
|
@ -94,8 +94,7 @@
|
|||
|
||||
<h3>Using a secondary nameserver</h3>
|
||||
|
||||
<p>If your TLD requires you to have two separate nameservers, you can either set up <a href="#"
|
||||
onclick="return show_panel('external_dns')">external DNS</a> and ignore the DNS server on this box entirely, or
|
||||
<p>If your TLD requires you to have two separate nameservers, you can either set up <a href="#external_dns">external DNS</a> and ignore the DNS server on this box entirely, or
|
||||
use the DNS server on this box but add a secondary (aka “slave”) nameserver.</p>
|
||||
<p>If you choose to use a secondary nameserver, you must find a secondary nameserver service provider. Your domain name
|
||||
registrar or virtual cloud provider may provide this service for you. Once you set up the secondary nameserver
|
||||
|
|
|
@ -72,8 +72,7 @@
|
|||
</tr>
|
||||
</table>
|
||||
|
||||
<p>In addition to setting up your email, you’ll also need to set up <a href="#sync_guide"
|
||||
onclick="return show_panel(this);">contacts and calendar synchronization</a> separately.</p>
|
||||
<p>In addition to setting up your email, you’ll also need to set up <a href="#sync_guide">contacts and calendar synchronization</a> separately.</p>
|
||||
|
||||
<p>As an alternative to IMAP you can also use the POP protocol: choose POP as the protocol, port 995, and
|
||||
SSL or TLS security in your mail client. The SMTP settings and usernames and passwords remain the same.
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
<div>
|
||||
<h2>Manage Password</h2>
|
||||
<p>Here you can change your account password. The new password is then valid for both this panel and your email.</p>
|
||||
<p>If you have client emails configured, you'll then need to update the configuration with the new password. See the <a href="#mail-guide" onclick="return show_panel(this);">Mail Guide</a> for more information about this.</p>
|
||||
<p>If you have client emails configured, you'll then need to update the configuration with the new password. See the <a href="#mail-guide">Mail Guide</a> for more information about this.</p>
|
||||
|
||||
<form class="form-horizontal" role="form" onsubmit="set_password_self(); return false;">
|
||||
<div class="col-lg-10 col-xl-8 mb-3">
|
||||
|
|
|
@ -29,17 +29,15 @@
|
|||
</tr>
|
||||
</table>
|
||||
|
||||
<p>Log in settings are the same as with <a href="#mail-guide" onclick="return show_panel(this);">mail</a>:
|
||||
your
|
||||
complete email address and your mail password.</p>
|
||||
<p>Log in settings are the same as with <a href="#mail-guide">mail</a>: your
|
||||
complete email address and your mail password.</p>
|
||||
</div>
|
||||
|
||||
<div class="col-lg-6">
|
||||
<h4>On your mobile device</h4>
|
||||
|
||||
<p>If you set up your <a href="#mail-guide" onclick="return show_panel(this);">mail</a> using
|
||||
Exchange/ActiveSync,
|
||||
your contacts and calendar may already appear on your device.</p>
|
||||
<p>If you set up your <a href="#mail-guide">mail</a> using Exchange/ActiveSync,
|
||||
your contacts and calendar may already appear on your device.</p>
|
||||
<p>Otherwise, here are some apps that can synchronize your contacts and calendar to your Android phone.</p>
|
||||
|
||||
<table class="table">
|
||||
|
|
|
@ -6,8 +6,7 @@
|
|||
|
||||
<h2>Backup Status</h2>
|
||||
|
||||
<p>The box makes an incremental backup each night. By default the backup is stored on the machine itself, but you can
|
||||
also store it on S3-compatible services like Amazon Web Services (AWS).</p>
|
||||
<p>The box makes an incremental backup each night. You can store the backup on any Amazon Web Services S3-compatible service, or other options.</p>
|
||||
|
||||
<h3>Configuration</h3>
|
||||
|
||||
|
@ -80,6 +79,9 @@
|
|||
of target user on the backup server specified above. That way you'll enable secure and
|
||||
passwordless authentication from your mail-in-a-box server and your backup server.
|
||||
</div>
|
||||
<div id="copy_pub_key_div" class="col-sm">
|
||||
<button type="button" class="btn btn-small" onclick="copy_pub_key_to_clipboard()">Copy</button>
|
||||
</div>
|
||||
</div>
|
||||
<!-- S3 BACKUP -->
|
||||
<div class="col-lg-10 col-xl-8 mb-3 backup-target-s3">
|
||||
|
@ -104,11 +106,16 @@
|
|||
<input type="text" placeholder="Endpoint" class="form-control" id="backup-target-s3-host">
|
||||
</div>
|
||||
</div>
|
||||
<div class="form-group backup-target-s3">
|
||||
<div class="input-group">
|
||||
<label for="backup-target-s3-region-name" class="input-group-text">S3 Region Name <small>(if required)</small></label>
|
||||
<input type="text" placeholder="region.name" class="form-control" class="form-control" id="backup-target-s3-region-name">
|
||||
</div>
|
||||
</div>
|
||||
<div class="col-lg-10 col-xl-8 mb-3 backup-target-s3">
|
||||
<div class="input-group">
|
||||
<label for="backup-target-s3-path" class="input-group-text">S3 Path</label>
|
||||
<input type="text" placeholder="your-bucket-name/backup-directory" class="form-control"
|
||||
id="backup-target-s3-path">
|
||||
<label for="backup-target-s3-path" class="input-group-text">S3 Bucket & Path</label>
|
||||
<input type="text" placeholder="your-bucket-name/backup-directory" class="form-control" id="backup-target-s3-path">
|
||||
</div>
|
||||
</div>
|
||||
<div class="col-lg-10 col-xl-8 mb-3 backup-target-s3">
|
||||
|
@ -291,30 +298,18 @@
|
|||
} else if (r.target == "off") {
|
||||
$("#backup-target-type").val("off");
|
||||
} else if (r.target.substring(0, 8) == "rsync://") {
|
||||
$("#backup-target-type").val("rsync");
|
||||
let uri = r.target.substring(8)
|
||||
let i = uri.indexOf("/")
|
||||
let path = [uri.slice(0, i), uri.slice(i + 1)];
|
||||
let host_parts = path.shift().split('@');
|
||||
$("#backup-target-rsync-user").val(host_parts[0]);
|
||||
$("#backup-target-rsync-host").val(host_parts[1]);
|
||||
$("#backup-target-rsync-path").val(path[0]);
|
||||
$("#backup-target-rsync-port").val(r.target_rsync_port)
|
||||
const spec = url_split(r.target);
|
||||
$("#backup-target-type").val(spec.scheme);
|
||||
$("#backup-target-rsync-user").val(spec.user);
|
||||
$("#backup-target-rsync-host").val(spec.host);
|
||||
$("#backup-target-rsync-path").val(spec.path);
|
||||
} else if (r.target.substring(0, 5) == "s3://") {
|
||||
const spec = url_split(r.target);
|
||||
$("#backup-target-type").val("s3");
|
||||
var hostpath = r.target.substring(5).split('/');
|
||||
var host = hostpath.shift();
|
||||
let s3_options = $("#backup-target-s3-host-select option").map(function() {return this.value}).get()
|
||||
$("#backup-target-s3-host-select").val("other")
|
||||
for (let h of s3_options) {
|
||||
console.log(h)
|
||||
if (h == host) {
|
||||
$("#backup-target-s3-host-select").val(host)
|
||||
break
|
||||
}
|
||||
}
|
||||
$("#backup-target-s3-host").val(host);
|
||||
$("#backup-target-s3-path").val(hostpath.join('/'));
|
||||
$("#backup-target-s3-host-select").val(spec.host);
|
||||
$("#backup-target-s3-host").val(spec.host);
|
||||
$("#backup-target-s3-region-name").val(spec.user); // stuffing the region name in the username
|
||||
$("#backup-target-s3-path").val(spec.path);
|
||||
} else if (r.target.substring(0, 5) == "b2://") {
|
||||
$("#backup-target-type").val("b2");
|
||||
var targetPath = r.target.substring(5);
|
||||
|
@ -322,11 +317,11 @@
|
|||
var b2_applicationkey = targetPath.split(':')[1].split('@')[0];
|
||||
var b2_bucket = targetPath.split('@')[1];
|
||||
$("#backup-target-b2-user").val(b2_application_keyid);
|
||||
$("#backup-target-b2-pass").val(b2_applicationkey);
|
||||
$("#backup-target-b2-pass").val(decodeURIComponent(b2_applicationkey));
|
||||
$("#backup-target-b2-bucket").val(b2_bucket);
|
||||
}
|
||||
toggle_form()
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
function set_custom_backup() {
|
||||
|
@ -335,17 +330,20 @@
|
|||
var target_pass = $("#backup-target-pass").val();
|
||||
let target_port = $("#backup-target-rsync-port").val();
|
||||
|
||||
var target;
|
||||
let target;
|
||||
if (target_type == "local" || target_type == "off")
|
||||
target = target_type;
|
||||
else if (target_type == "s3")
|
||||
target = "s3://" + $("#backup-target-s3-host").val() + "/" + $("#backup-target-s3-path").val();
|
||||
target = "s3://"
|
||||
+ ($("#backup-target-s3-region-name").val() ? ($("#backup-target-s3-region-name").val() + "@") : "")
|
||||
+ $("#backup-target-s3-host").val()
|
||||
+ "/" + $("#backup-target-s3-path").val();
|
||||
else if (target_type == "rsync") {
|
||||
target = "rsync://" + $("#backup-target-rsync-user").val() + "@" + $("#backup-target-rsync-host").val()
|
||||
+ "/" + $("#backup-target-rsync-path").val();
|
||||
+ "/" + $("#backup-target-rsync-path").val();
|
||||
target_user = '';
|
||||
} else if (target_type == "b2") {
|
||||
target = 'b2://' + $('#backup-target-b2-user').val() + ':' + $('#backup-target-b2-pass').val()
|
||||
target = 'b2://' + $('#backup-target-b2-user').val() + ':' + encodeURIComponent($('#backup-target-b2-pass').val())
|
||||
+ '@' + $('#backup-target-b2-bucket').val()
|
||||
target_user = '';
|
||||
target_pass = '';
|
||||
|
@ -413,4 +411,41 @@
|
|||
})
|
||||
}
|
||||
|
||||
// Return a two-element array of the substring preceding and the substring following
|
||||
// the first occurence of separator in string. Return [undefined, string] if the
|
||||
// separator does not appear in string.
|
||||
const split1_rest = (string, separator) => {
|
||||
const index = string.indexOf(separator);
|
||||
return (index >= 0) ? [string.substring(0, index), string.substring(index + separator.length)] : [undefined, string];
|
||||
};
|
||||
|
||||
// Note: The manifest JS URL class does not work in some security-conscious
|
||||
// settings, e.g. Brave browser, so we roll our own that handles only what we need.
|
||||
//
|
||||
// Use greedy separator parsing to get parts of a MIAB backup target url.
|
||||
// Note: path will not include a leading forward slash '/'
|
||||
const url_split = url => {
|
||||
const [ scheme, scheme_rest ] = split1_rest(url, '://');
|
||||
const [ user, user_rest ] = split1_rest(scheme_rest, '@');
|
||||
const [ host, path ] = split1_rest(user_rest, '/');
|
||||
|
||||
return {
|
||||
scheme,
|
||||
user,
|
||||
host,
|
||||
path,
|
||||
}
|
||||
};
|
||||
|
||||
// Hide Copy button if not in a modern clipboard-supporting environment.
|
||||
// Using document API because jQuery is not necessarily available in this script scope.
|
||||
if (!(navigator && navigator.clipboard && navigator.clipboard.writeText)) {
|
||||
document.getElementById('copy_pub_key_div').hidden = true;
|
||||
}
|
||||
|
||||
function copy_pub_key_to_clipboard() {
|
||||
const ssh_pub_key = $("#ssh-pub-key").val();
|
||||
navigator.clipboard.writeText(ssh_pub_key);
|
||||
}
|
||||
|
||||
</script>
|
||||
|
|
|
@ -1,21 +1,21 @@
|
|||
<h2>System Status Checks</h2>
|
||||
|
||||
<style>
|
||||
#system-checks .message {
|
||||
.system-checks-table .message {
|
||||
display: inline;
|
||||
}
|
||||
|
||||
#system-checks .icon {
|
||||
.system-checks-table .icon {
|
||||
min-width: 2em;
|
||||
}
|
||||
|
||||
#system-checks .heading {
|
||||
.system-checks-table .heading {
|
||||
font-weight: bold;
|
||||
font-size: 180%;
|
||||
padding-top: 1.75em;
|
||||
}
|
||||
|
||||
#system-checks .heading.first {
|
||||
.system-checks-table .heading.first {
|
||||
border-top: none;
|
||||
padding-top: 0;
|
||||
}
|
||||
|
@ -98,12 +98,18 @@
|
|||
Mail-in-a-Box.)</small></p>
|
||||
</div>
|
||||
|
||||
<div id="system-checks">
|
||||
<div class="system-checks-table" id="system-checks-summary">
|
||||
</div>
|
||||
|
||||
<br>
|
||||
|
||||
<div class="system-checks-table" id="system-checks">
|
||||
</div>
|
||||
|
||||
<script>
|
||||
function show_system_status() {
|
||||
$('#system-checks').html("")
|
||||
$('#system-checks-summary').html("")
|
||||
|
||||
api(
|
||||
"/system/privacy",
|
||||
|
@ -131,6 +137,12 @@
|
|||
"POST",
|
||||
{},
|
||||
function (r) {
|
||||
let count_by_status = {
|
||||
ok: 0,
|
||||
error: 0,
|
||||
warning: 0
|
||||
}
|
||||
|
||||
for (let i = 0; i < r.length; i++) {
|
||||
let n = $("<div class='col-12'><div class='icon'></div><p class='message status-text' style='margin: 0'/>");
|
||||
if (i == 0) n.addClass('first')
|
||||
|
@ -145,6 +157,7 @@
|
|||
if (r[i].type == "ok") n.find(".icon").addClass("fa-check")
|
||||
if (r[i].type == "error") n.find(".icon").addClass("fa-times")
|
||||
if (r[i].type == "warning") n.find(".icon").addClass("fa-exclamation-triangle")
|
||||
count_by_status[r[i].type]++
|
||||
|
||||
n.find('p.status-text').text(r[i].text)
|
||||
|
||||
|
@ -168,7 +181,6 @@
|
|||
}
|
||||
|
||||
for (var j = 0; j < r[i].extra.length; j++) {
|
||||
|
||||
var m = $("<div/>").text(r[i].extra[j].text)
|
||||
if (r[i].extra[j].monospace)
|
||||
m.addClass("pre");
|
||||
|
@ -180,8 +192,17 @@
|
|||
$('#system-checks').append($("<hr>"));
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
let summary = $('#system-checks-summary')
|
||||
if (count_by_status['error'] + count_by_status['warning'] == 0) {
|
||||
summary.append($("<div class='col-12 status-ok'><div class='icon fas fa-check'></div><p class='message status-text' style='margin: 0'><b class='message status-text'/></p>")).find("p b").text(`All ${count_by_status['ok']} checks OK. No problems found!`);
|
||||
} else {
|
||||
summary.append($("<div class='col-12 status-ok'><div class='icon fas fa-check'></div><p class='message status-text' style='margin: 0'/>")).find(".status-ok p").text(`${count_by_status['ok']} OK`);
|
||||
summary.append($("<div class='col-12 status-warning'><div class='icon fas fa-exclamation-triangle'></div><p class='message status-text' style='margin: 0'/>")).find(".status-warning p").text(`${count_by_status['warning']} Warnings`);
|
||||
summary.append($("<div class='col-12 status-error'><div class='icon fas fa-times'></div><p class='message status-text' style='margin: 0'/>")).find(".status-error p").text(`${count_by_status['error']} Errors`);
|
||||
}
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
var current_privacy_setting = null;
|
||||
|
|
|
@ -68,11 +68,10 @@
|
|||
<ul style="margin-top: 1em; padding-left: 1.5em; font-size: 90%;">
|
||||
<li>Passwords must be at least eight characters. If you're out of ideas, you can <a href="#"
|
||||
onclick="return generate_random_password()">generate a random password</a>.</li>
|
||||
<li>Use <a href="#" onclick="return show_panel('aliases')">aliases</a> to create email addresses that forward to
|
||||
<li>Use <a href="#aliases">aliases</a> to create email addresses that forward to
|
||||
existing accounts.</li>
|
||||
<li>Administrators get access to this control panel.</li>
|
||||
<li>User accounts cannot contain any international (non-ASCII) characters, but <a href="#"
|
||||
onclick="return show_panel('aliases');">aliases</a> can.</li>
|
||||
<li>User accounts cannot contain any international (non-ASCII) characters, but <a href="#aliases">aliases</a> can.</li>
|
||||
<li>Quotas may not contain any spaces, commas or decimal points. Suffixes of G (gigabytes) and M (megabytes) are
|
||||
allowed. For unlimited storage enter 0 (zero)</li>
|
||||
</ul>
|
||||
|
@ -80,6 +79,13 @@
|
|||
<button type="submit" class="btn btn-primary">Add User</button>
|
||||
</form>
|
||||
|
||||
<ul style="margin-top: 1em; padding-left: 1.5em; font-size: 90%;">
|
||||
<li>Passwords must be at least eight characters consisting of English letters and numbers only. For best results, <a href="#" onclick="return generate_random_password()">generate a random password</a>.</li>
|
||||
<li>Use <a href="#aliases">aliases</a> to create email addresses that forward to existing accounts.</li>
|
||||
<li>Administrators get access to this control panel.</li>
|
||||
<li>User accounts cannot contain any international (non-ASCII) characters, but <a href="#aliases">aliases</a> can.</li>
|
||||
</ul>
|
||||
|
||||
<h3>Existing mail users</h3>
|
||||
<table id="user_table" class="table col-12">
|
||||
<caption></caption>
|
||||
|
|
|
@ -13,9 +13,7 @@
|
|||
</p>
|
||||
|
||||
<ol>
|
||||
<li>Ensure that any domains you are publishing a website for have no problems on the <a href="#system_status"
|
||||
onclick="return show_panel(this);">Status Checks</a> page.</li>
|
||||
|
||||
<li>Ensure that any domains you are publishing a website for have no problems on the <a href="#system_status">Status Checks</a> page.</li>
|
||||
<li>On your personal computer, install an SSH file transfer program such as <a
|
||||
href="https://filezilla-project.org/">FileZilla</a> or <a
|
||||
href="http://linuxcommand.org/man_pages/scp1.html">scp</a>.</li>
|
||||
|
@ -43,10 +41,11 @@
|
|||
</tbody>
|
||||
</table>
|
||||
|
||||
<p>To add a domain to this table, create a dummy <a href="#users" onclick="return show_panel(this);">mail user</a> or
|
||||
<a href="#aliases" onclick="return show_panel(this);">alias</a> on the domain first and see the <a
|
||||
href="https://mailinabox.email/guide.html#domain-name-configuration">setup guide</a> for adding nameserver records
|
||||
to the new domain at your registrar (but <i>not</i> glue records).</p>
|
||||
<p>
|
||||
To add a domain to this table, create a dummy <a href="#users">mail user</a> or <a href="#aliases">alias</a> on the domain first and see
|
||||
the <a href="https://mailinabox.email/guide.html#domain-name-configuration">setup guide</a>
|
||||
for adding nameserver records to the new domain at your registrar (but <i>not</i> glue records).
|
||||
</p>
|
||||
|
||||
</ol>
|
||||
|
||||
|
|
|
@ -13,13 +13,13 @@ def load_environment():
|
|||
|
||||
|
||||
def load_env_vars_from_file(fn):
|
||||
# Load settings from a KEY=VALUE file.
|
||||
import collections
|
||||
env = collections.OrderedDict()
|
||||
for line in open(fn):
|
||||
env.setdefault(*line.strip().split("=", 1))
|
||||
return env
|
||||
|
||||
# Load settings from a KEY=VALUE file.
|
||||
import collections
|
||||
env = collections.OrderedDict()
|
||||
with open(fn, 'r') as f:
|
||||
for line in f:
|
||||
env.setdefault(*line.strip().split("=", 1))
|
||||
return env
|
||||
|
||||
def save_environment(env):
|
||||
with open("/etc/mailinabox.conf", "w") as f:
|
||||
|
@ -38,16 +38,15 @@ def write_settings(config, env):
|
|||
|
||||
|
||||
def load_settings(env):
|
||||
import rtyaml
|
||||
fn = os.path.join(env['STORAGE_ROOT'], 'settings.yaml')
|
||||
try:
|
||||
config = rtyaml.load(open(fn, "r"))
|
||||
if not isinstance(config, dict):
|
||||
raise ValueError() # caught below
|
||||
return config
|
||||
except:
|
||||
return {}
|
||||
|
||||
import rtyaml
|
||||
fn = os.path.join(env['STORAGE_ROOT'], 'settings.yaml')
|
||||
try:
|
||||
with open(fn, "r") as f:
|
||||
config = rtyaml.load(f)
|
||||
if not isinstance(config, dict): raise ValueError() # caught below
|
||||
return config
|
||||
except:
|
||||
return { }
|
||||
|
||||
# UTILITIES
|
||||
|
||||
|
|
|
@ -81,7 +81,8 @@ def get_web_domains_with_root_overrides(env):
|
|||
root_overrides = {}
|
||||
nginx_conf_custom_fn = os.path.join(env["STORAGE_ROOT"], "www/custom.yaml")
|
||||
if os.path.exists(nginx_conf_custom_fn):
|
||||
custom_settings = rtyaml.load(open(nginx_conf_custom_fn))
|
||||
with open(nginx_conf_custom_fn, 'r') as f:
|
||||
custom_settings = rtyaml.load(f)
|
||||
for domain, settings in custom_settings.items():
|
||||
for type, value in [('redirect', settings.get('redirects',
|
||||
{}).get('/')),
|
||||
|
@ -136,15 +137,18 @@ def do_web_update(env):
|
|||
# Pre-load what SSL certificates we will use for each domain.
|
||||
ssl_certificates = get_ssl_certificates(env)
|
||||
|
||||
# Helper for reading config files and templates
|
||||
def read_conf(conf_fn):
|
||||
with open(os.path.join(os.path.dirname(__file__), "../conf", conf_fn), "r") as f:
|
||||
return f.read()
|
||||
|
||||
# Build an nginx configuration file.
|
||||
nginx_conf = open(
|
||||
os.path.join(os.path.dirname(__file__),
|
||||
"../conf/nginx-top.conf")).read()
|
||||
nginx_conf = read_conf("nginx-top.conf")
|
||||
|
||||
nginx_conf = re.sub("{{phpver}}", get_php_version(), nginx_conf)
|
||||
|
||||
# Add upstream additions
|
||||
nginx_upstream_include = os.path.join(env["STORAGE_ROOT"], "www",
|
||||
".upstream.conf")
|
||||
nginx_upstream_include = os.path.join(env["STORAGE_ROOT"], "www", ".upstream.conf")
|
||||
if not os.path.exists(nginx_upstream_include):
|
||||
with open(nginx_upstream_include, "a+") as f:
|
||||
f.writelines([
|
||||
|
@ -157,18 +161,11 @@ def do_web_update(env):
|
|||
nginx_conf += "\ninclude %s;\n" % (nginx_upstream_include)
|
||||
|
||||
# Load the templates.
|
||||
template0 = open(
|
||||
os.path.join(os.path.dirname(__file__), "../conf/nginx.conf")).read()
|
||||
template1 = open(
|
||||
os.path.join(os.path.dirname(__file__),
|
||||
"../conf/nginx-alldomains.conf")).read()
|
||||
template2 = open(
|
||||
os.path.join(os.path.dirname(__file__),
|
||||
"../conf/nginx-primaryonly.conf")).read()
|
||||
template0 = read_conf("nginx.conf")
|
||||
template1 = read_conf("nginx-alldomains.conf")
|
||||
template2 = read_conf("nginx-primaryonly.conf")
|
||||
template3 = "\trewrite ^(.*) https://$REDIRECT_DOMAIN$1 permanent;\n"
|
||||
template4 = open(
|
||||
os.path.join(os.path.dirname(__file__),
|
||||
"../conf/nginx-openpgpkey.conf")).read()
|
||||
template4 = read_conf("nginx-openpgpkey.conf")
|
||||
|
||||
# Add the PRIMARY_HOST configuration first so it becomes nginx's default server.
|
||||
nginx_conf += make_domain_config(env['PRIMARY_HOSTNAME'],
|
||||
|
@ -240,11 +237,8 @@ def make_domain_config(domain, templates, ssl_certificates, env):
|
|||
def hashfile(filepath):
|
||||
import hashlib
|
||||
sha1 = hashlib.sha1()
|
||||
f = open(filepath, 'rb')
|
||||
try:
|
||||
with open(filepath, 'rb') as f:
|
||||
sha1.update(f.read())
|
||||
finally:
|
||||
f.close()
|
||||
return sha1.hexdigest()
|
||||
|
||||
nginx_conf_extra += "\t# ssl files sha1: %s / %s\n" % (hashfile(
|
||||
|
@ -254,7 +248,8 @@ def make_domain_config(domain, templates, ssl_certificates, env):
|
|||
hsts = "yes"
|
||||
nginx_conf_custom_fn = os.path.join(env["STORAGE_ROOT"], "www/custom.yaml")
|
||||
if os.path.exists(nginx_conf_custom_fn):
|
||||
yaml = rtyaml.load(open(nginx_conf_custom_fn))
|
||||
with open(nginx_conf_custom_fn, 'r') as f:
|
||||
yaml = rtyaml.load(f)
|
||||
if domain in yaml:
|
||||
yaml = yaml[domain]
|
||||
|
||||
|
@ -301,9 +296,9 @@ def make_domain_config(domain, templates, ssl_certificates, env):
|
|||
|
||||
# Add the HSTS header.
|
||||
if hsts == "yes":
|
||||
nginx_conf_extra += "\tadd_header Strict-Transport-Security \"max-age=15768000\" always;\n"
|
||||
nginx_conf_extra += "\tadd_header Strict-Transport-Security \"max-age=31536000\" always;\n"
|
||||
elif hsts == "preload":
|
||||
nginx_conf_extra += "\tadd_header Strict-Transport-Security \"max-age=15768000; includeSubDomains; preload\" always;\n"
|
||||
nginx_conf_extra += "\tadd_header Strict-Transport-Security \"max-age=31536000; includeSubDomains; preload\" always;\n"
|
||||
|
||||
# Add in any user customizations in the includes/ folder.
|
||||
nginx_conf_custom_include = os.path.join(
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
Mail-in-a-Box Security Guide
|
||||
============================
|
||||
|
||||
Mail-in-a-Box turns a fresh Ubuntu 18.04 LTS 64-bit machine into a mail server appliance by installing and configuring various components.
|
||||
Mail-in-a-Box turns a fresh Ubuntu 22.04 LTS 64-bit machine into a mail server appliance by installing and configuring various components.
|
||||
|
||||
This page documents the security posture of Mail-in-a-Box. The term “box” is used below to mean a configured Mail-in-a-Box.
|
||||
|
||||
|
|
|
@ -19,6 +19,7 @@ if [ ! -f /usr/bin/lsb_release ]; then
|
|||
echo "This script must be run on a system running one of the following OS-es:"
|
||||
echo "* Debian 10 (buster)"
|
||||
echo "* Debian 11 (bullseye)"
|
||||
echo "* Debian 12 (bookworm)"
|
||||
echo "* Ubuntu 20.04 LTS (Focal Fossa)"
|
||||
echo "* Ubuntu 22.04 LTS (Jammy Jellyfish)"
|
||||
exit 1
|
||||
|
@ -33,6 +34,7 @@ if [ -z "$TAG" ]; then
|
|||
# Make sure we're running on the correct operating system
|
||||
OS=$(lsb_release -d | sed 's/.*:\s*//')
|
||||
if [ "$OS" == "Debian GNU/Linux 11 (bullseye)" ] ||
|
||||
[ "$OS" == "Debian GNU/Linux 12 (bookworm)" ] ||
|
||||
[ "$(echo $OS | grep -o 'Ubuntu 20.04')" == "Ubuntu 20.04" ] ||
|
||||
[ "$(echo $OS | grep -o 'Ubuntu 22.04')" == "Ubuntu 22.04" ]
|
||||
then
|
||||
|
@ -41,6 +43,7 @@ if [ -z "$TAG" ]; then
|
|||
echo "We are going to install the last version of Power Mail-in-a-Box supporting Debian 10 (buster)."
|
||||
echo "IF THIS IS A NEW INSTALLATION, STOP NOW, AND USE A SUPPORTED DISTRIBUTION INSTEAD (ONE OF THESE):"
|
||||
echo "* Debian 11 (bullseye)"
|
||||
echo "* Debian 12 (bookworm)"
|
||||
echo "* Ubuntu 20.04 LTS (Focal Fossa)"
|
||||
echo "* Ubuntu 22.04 LTS (Jammy Jellyfish)"
|
||||
echo
|
||||
|
@ -68,6 +71,7 @@ if [ -z "$TAG" ]; then
|
|||
else
|
||||
echo "This script must be run on a system running one of the following OS-es:"
|
||||
echo "* Debian 11 (bullseye)"
|
||||
echo "* Debian 12 (bookworm)"
|
||||
echo "* Ubuntu 20.04 LTS (Focal Fossa)"
|
||||
echo "* Ubuntu 22.04 LTS (Jammy Jellyfish)"
|
||||
exit 1
|
||||
|
@ -83,10 +87,14 @@ if [ ! -d $HOME/mailinabox ]; then
|
|||
echo
|
||||
fi
|
||||
|
||||
if [ "$SOURCE" == "" ]; then
|
||||
SOURCE=https://github.com/ddavness/power-mailinabox
|
||||
fi
|
||||
|
||||
echo Downloading Mail-in-a-Box $TAG. . .
|
||||
git clone \
|
||||
-b $TAG --depth 1 \
|
||||
https://git.nibbletools.com/beenull/power-mailinabox \
|
||||
$SOURCE \
|
||||
$HOME/mailinabox \
|
||||
< /dev/null 2> /dev/null
|
||||
|
||||
|
|
|
@ -63,7 +63,7 @@ chmod go-rwx $STORAGE_ROOT/mail/dkim
|
|||
management/editconf.py /etc/opendmarc.conf -s \
|
||||
"Syslog=true" \
|
||||
"Socket=inet:8893@[127.0.0.1]" \
|
||||
"FailureReports=true"
|
||||
"FailureReports=false"
|
||||
|
||||
# SPFIgnoreResults causes the filter to ignore any SPF results in the header
|
||||
# of the message. This is useful if you want the filter to perfrom SPF checks
|
||||
|
@ -82,11 +82,11 @@ management/editconf.py /etc/opendmarc.conf -s \
|
|||
management/editconf.py /etc/opendmarc.conf -s \
|
||||
"SPFSelfValidate=true"
|
||||
|
||||
# Enables generation of failure reports for sending domains that publish a
|
||||
# Disables generation of failure reports for sending domains that publish a
|
||||
# "none" policy.
|
||||
|
||||
management/editconf.py /etc/opendmarc.conf -s \
|
||||
"FailureReportsOnNone=true"
|
||||
"FailureReportsOnNone=false"
|
||||
|
||||
# AlwaysAddARHeader Adds an "Authentication-Results:" header field even to
|
||||
# unsigned messages from domains with no "signs all" policy. The reported DKIM
|
||||
|
|
|
@ -234,6 +234,7 @@ export OS_DEBIAN_10=1
|
|||
export OS_UBUNTU_2004=2
|
||||
export OS_DEBIAN_11=3
|
||||
export OS_UBUNTU_2204=4
|
||||
export OS_DEBIAN_12=5
|
||||
|
||||
function get_os_code {
|
||||
# A lot of if-statements here - dirty code looking tasting today
|
||||
|
@ -247,6 +248,9 @@ function get_os_code {
|
|||
elif [[ $VER == "11" ]]; then
|
||||
echo $OS_DEBIAN_11
|
||||
return 0
|
||||
elif [[ $VER == "12" ]]; then
|
||||
echo $OS_DEBIAN_12
|
||||
return 0
|
||||
fi
|
||||
elif [[ $ID == "Ubuntu" ]]; then
|
||||
if [[ $VER == "20.04" ]]; then
|
||||
|
|
|
@ -205,13 +205,13 @@ chmod -R o-rwx /etc/dovecot
|
|||
|
||||
# Ensure mailbox files have a directory that exists and are owned by the mail user.
|
||||
mkdir -p $STORAGE_ROOT/mail/mailboxes
|
||||
chown -R mail.mail $STORAGE_ROOT/mail/mailboxes
|
||||
chown -R mail:mail $STORAGE_ROOT/mail/mailboxes
|
||||
|
||||
# Same for the sieve scripts.
|
||||
mkdir -p $STORAGE_ROOT/mail/sieve
|
||||
mkdir -p $STORAGE_ROOT/mail/sieve/global_before
|
||||
mkdir -p $STORAGE_ROOT/mail/sieve/global_after
|
||||
chown -R mail.mail $STORAGE_ROOT/mail/sieve
|
||||
chown -R mail:mail $STORAGE_ROOT/mail/sieve
|
||||
|
||||
# Allow the IMAP/POP ports in the firewall.
|
||||
ufw_allow imaps
|
||||
|
|
|
@ -69,6 +69,11 @@ management/editconf.py /etc/postfix/main.cf \
|
|||
maximal_queue_lifetime=2d \
|
||||
bounce_queue_lifetime=1d
|
||||
|
||||
# Guard against SMTP smuggling
|
||||
# This short-term workaround is recommended at https://www.postfix.org/smtp-smuggling.html
|
||||
management/editconf.py /etc/postfix/main.cf \
|
||||
smtpd_data_restrictions=reject_unauth_pipelining
|
||||
|
||||
# ### Outgoing Mail
|
||||
|
||||
# Enable the 'submission' ports 465 and 587 and tweak their settings.
|
||||
|
|
|
@ -32,6 +32,12 @@ inst_dir=/usr/local/lib/mailinabox
|
|||
mkdir -p $inst_dir
|
||||
venv=$inst_dir/env
|
||||
if [ ! -d $venv ]; then
|
||||
# A bug specific to Ubuntu 22.04 and Python 3.10 requires
|
||||
# forcing a virtualenv directory layout option (see #2335
|
||||
# and https://github.com/pypa/virtualenv/pull/2415). In
|
||||
# our issue, reportedly installing python3-distutils didn't
|
||||
# fix the problem.)
|
||||
export DEB_PYTHON_INSTALL_LAYOUT='deb'
|
||||
hide_output virtualenv -ppython3 $venv
|
||||
elif [ ! -f $venv/.oscode ]; then
|
||||
echo "Re-creating Python environment..."
|
||||
|
|
|
@ -34,8 +34,8 @@ contact.admin.always_send warning critical
|
|||
EOF
|
||||
|
||||
# The Debian installer touches these files and chowns them to www-data:adm for use with spawn-fcgi
|
||||
chown munin. /var/log/munin/munin-cgi-html.log
|
||||
chown munin. /var/log/munin/munin-cgi-graph.log
|
||||
chown munin /var/log/munin/munin-cgi-html.log
|
||||
chown munin /var/log/munin/munin-cgi-graph.log
|
||||
|
||||
# ensure munin-node knows the name of this machine
|
||||
# and reduce logging level to warning
|
||||
|
|
|
@ -21,8 +21,8 @@ echo "Installing Nextcloud (contacts/calendar)..."
|
|||
# we automatically install intermediate versions as needed.
|
||||
# * The hash is the SHA1 hash of the ZIP package, which you can find by just running this script and
|
||||
# copying it from the error message when it doesn't match what is below.
|
||||
nextcloud_ver=24.0.7
|
||||
nextcloud_hash=7fb1afeb3c212bf5530c3d234b23bf314b47655a
|
||||
nextcloud_ver=25.0.13
|
||||
nextcloud_hash=eaba90f0fedefade9b05ef40844df98d361259b7
|
||||
|
||||
# Nextcloud apps
|
||||
# --------------
|
||||
|
@ -33,12 +33,18 @@ nextcloud_hash=7fb1afeb3c212bf5530c3d234b23bf314b47655a
|
|||
# https://github.com/nextcloud-releases/user_external
|
||||
# * The hash is the SHA1 hash of the ZIP package, which you can find by just running this script and
|
||||
# copying it from the error message when it doesn't match what is below.
|
||||
contacts_ver=4.2.2
|
||||
contacts_hash=cbab9a7acdc11a9e2779c20b850bb21faec1c80f
|
||||
calendar_ver=3.5.2
|
||||
calendar_hash=dcf2cba6933dc8805ca4b4d04ed7b993ff4652a1
|
||||
user_external_ver=3.0.0
|
||||
user_external_hash=0df781b261f55bbde73d8c92da3f99397000972f
|
||||
|
||||
# Check here: https://apps.nextcloud.com/apps/contacts
|
||||
contacts_ver=5.5.3
|
||||
contacts_hash=b234ab410480a4106176a28f39c9b27f471d0473
|
||||
|
||||
# Always ensure the versions are supported, see https://apps.nextcloud.com/apps/calendar
|
||||
calendar_ver=4.6.8
|
||||
calendar_hash=b01187e58a18a35774ed6fa97c1d336454208ddd
|
||||
|
||||
# And https://apps.nextcloud.com/apps/user_external
|
||||
user_external_ver=3.4.0
|
||||
user_external_hash=7f9d8f4dd6adb85a0e3d7622d85eeb7bfe53f3b4
|
||||
|
||||
# Clear prior packages and install dependencies from apt.
|
||||
|
||||
|
@ -139,7 +145,7 @@ InstallNextcloud() {
|
|||
# Make sure permissions are correct or the upgrade step won't run.
|
||||
# $STORAGE_ROOT/owncloud may not yet exist, so use -f to suppress
|
||||
# that error.
|
||||
chown -f -R www-data.www-data $STORAGE_ROOT/owncloud /usr/local/lib/owncloud || /bin/true
|
||||
chown -f -R www-data:www-data $STORAGE_ROOT/owncloud /usr/local/lib/owncloud || /bin/true
|
||||
|
||||
# If this isn't a new installation, immediately run the upgrade script.
|
||||
# Then check for success (0=ok and 3=no upgrade needed, both are success).
|
||||
|
@ -157,6 +163,7 @@ InstallNextcloud() {
|
|||
|
||||
# Add missing indices. NextCloud didn't include this in the normal upgrade because it might take some time.
|
||||
sudo -u www-data php /usr/local/lib/owncloud/occ db:add-missing-indices
|
||||
sudo -u www-data php /usr/local/lib/owncloud/occ db:add-missing-primary-keys
|
||||
|
||||
# Run conversion to BigInt identifiers, this process may take some time on large tables.
|
||||
sudo -u www-data php /usr/local/lib/owncloud/occ db:convert-filecache-bigint --no-interaction
|
||||
|
@ -220,6 +227,12 @@ if [ ! -d /usr/local/lib/owncloud/ ] || [[ ! ${CURRENT_NEXTCLOUD_VER} =~ ^$nextc
|
|||
if [ ! -z ${CURRENT_NEXTCLOUD_VER} ]; then
|
||||
# Database migrations from ownCloud are no longer possible because ownCloud cannot be run under
|
||||
# PHP 7.
|
||||
|
||||
if [ -e $STORAGE_ROOT/owncloud/config.php ]; then
|
||||
# Remove the read-onlyness of the config, which is needed for migrations, especially for v24
|
||||
sed -i -e '/config_is_read_only/d' $STORAGE_ROOT/owncloud/config.php
|
||||
fi
|
||||
|
||||
if [[ ${CURRENT_NEXTCLOUD_VER} =~ ^[89] ]]; then
|
||||
echo "Upgrades from Mail-in-a-Box prior to v0.28 (dated July 30, 2018) with Nextcloud < 13.0.6 (you have ownCloud 8 or 9) are not supported. Upgrade to Mail-in-a-Box version v0.30 first. Setup will continue, but skip the Nextcloud migration."
|
||||
return 0
|
||||
|
@ -263,18 +276,23 @@ if [ ! -d /usr/local/lib/owncloud/ ] || [[ ! ${CURRENT_NEXTCLOUD_VER} =~ ^$nextc
|
|||
|
||||
CURRENT_NEXTCLOUD_VER="20.0.14"
|
||||
fi
|
||||
|
||||
if [[ ${CURRENT_NEXTCLOUD_VER} =~ ^20 ]]; then
|
||||
InstallNextcloud 21.0.9 cf8785107c3c079a1f450743558f4f13c85f37a8 4.1.0 38653b507bd7d953816bbc5e8bea7855867eb1cd 3.2.2 54e9a836adc739be4a2a9301b8d6d2e9d88e02f4 2.1.0 6e5afe7f36f398f864bfdce9cad72200e70322aa
|
||||
InstallNextcloud 21.0.9 cf8785107c3c079a1f450743558f4f13c85f37a8 4.2.5 f318636decb3b7276c1e63a06de61dcb10f04bbf 3.3.3 2bbb534d95fe1e0a7368ca3a7c10d6374705a6c1 2.1.0 6e5afe7f36f398f864bfdce9cad72200e70322aa
|
||||
CURRENT_NEXTCLOUD_VER="21.0.9"
|
||||
fi
|
||||
if [[ ${CURRENT_NEXTCLOUD_VER} =~ ^21 ]]; then
|
||||
InstallNextcloud 22.2.6 9d39741f051a8da42ff7df46ceef2653a1dc70d9 4.1.0 38653b507bd7d953816bbc5e8bea7855867eb1cd 3.2.2 54e9a836adc739be4a2a9301b8d6d2e9d88e02f4 3.0.0 0df781b261f55bbde73d8c92da3f99397000972f
|
||||
InstallNextcloud 22.2.6 9d39741f051a8da42ff7df46ceef2653a1dc70d9 4.2.5 f318636decb3b7276c1e63a06de61dcb10f04bbf 3.5.9 cb3b4df6c9aa99bfc055ab56ba4b7fdcd8e4629d 3.1.0 22cabc88b6fc9c26dad3b46be1a652979c9fcf15
|
||||
CURRENT_NEXTCLOUD_VER="22.2.6"
|
||||
fi
|
||||
if [[ ${CURRENT_NEXTCLOUD_VER} =~ ^22 ]]; then
|
||||
InstallNextcloud 23.0.4 87afec0bf90b3c66289e6fedd851867bc5a58f01 4.1.0 38653b507bd7d953816bbc5e8bea7855867eb1cd 3.2.2 54e9a836adc739be4a2a9301b8d6d2e9d88e02f4 3.0.0 0df781b261f55bbde73d8c92da3f99397000972f
|
||||
InstallNextcloud 23.0.4 87afec0bf90b3c66289e6fedd851867bc5a58f01 4.2.5 f318636decb3b7276c1e63a06de61dcb10f04bbf 3.5.9 cb3b4df6c9aa99bfc055ab56ba4b7fdcd8e4629d 3.1.0 22cabc88b6fc9c26dad3b46be1a652979c9fcf15
|
||||
CURRENT_NEXTCLOUD_VER="23.0.4"
|
||||
fi
|
||||
if [[ ${CURRENT_NEXTCLOUD_VER} =~ ^23 ]]; then
|
||||
InstallNextcloud 24.0.12 7aa5d61632c1ccf4ca3ff00fb6b295d318c05599 4.2.5 f318636decb3b7276c1e63a06de61dcb10f04bbf 3.5.9 cb3b4df6c9aa99bfc055ab56ba4b7fdcd8e4629d 3.1.0 22cabc88b6fc9c26dad3b46be1a652979c9fcf15
|
||||
CURRENT_NEXTCLOUD_VER="24.0.12"
|
||||
fi
|
||||
fi
|
||||
|
||||
InstallNextcloud $nextcloud_ver $nextcloud_hash $contacts_ver $contacts_hash $calendar_ver $calendar_hash $user_external_ver $user_external_hash
|
||||
|
@ -344,7 +362,7 @@ EOF
|
|||
EOF
|
||||
|
||||
# Set permissions
|
||||
chown -R www-data.www-data $STORAGE_ROOT/owncloud /usr/local/lib/owncloud
|
||||
chown -R www-data:www-data $STORAGE_ROOT/owncloud /usr/local/lib/owncloud
|
||||
|
||||
# Execute Nextcloud's setup step, which creates the Nextcloud sqlite database.
|
||||
# It also wipes it if it exists. And it updates config.php with database
|
||||
|
@ -368,12 +386,12 @@ php <<EOF > $CONFIG_TEMP && mv $CONFIG_TEMP $STORAGE_ROOT/owncloud/config.php;
|
|||
<?php
|
||||
include("$STORAGE_ROOT/owncloud/config.php");
|
||||
|
||||
\$CONFIG['config_is_read_only'] = true;
|
||||
\$CONFIG['config_is_read_only'] = false;
|
||||
|
||||
\$CONFIG['trusted_domains'] = array('$PRIMARY_HOSTNAME');
|
||||
|
||||
\$CONFIG['memcache.local'] = '\OC\Memcache\APCu';
|
||||
\$CONFIG['overwrite.cli.url'] = '/cloud';
|
||||
\$CONFIG['overwrite.cli.url'] = 'https://${PRIMARY_HOSTNAME}/cloud';
|
||||
\$CONFIG['mail_from_address'] = 'administrator'; # just the local part, matches our master administrator address
|
||||
|
||||
\$CONFIG['logtimezone'] = '$TIMEZONE';
|
||||
|
@ -395,7 +413,7 @@ var_export(\$CONFIG);
|
|||
echo ";";
|
||||
?>
|
||||
EOF
|
||||
chown www-data.www-data $STORAGE_ROOT/owncloud/config.php
|
||||
chown www-data:www-data $STORAGE_ROOT/owncloud/config.php
|
||||
|
||||
# Enable/disable apps. Note that this must be done after the Nextcloud setup.
|
||||
# The firstrunwizard gave Josh all sorts of problems, so disabling that.
|
||||
|
@ -437,20 +455,45 @@ management/editconf.py /etc/php/$(php_version)/cli/conf.d/10-opcache.ini -c ';'
|
|||
opcache.save_comments=1 \
|
||||
opcache.revalidate_freq=1
|
||||
|
||||
# Migrate users_external data from <0.6.0 to version 3.0.0 (see https://github.com/nextcloud/user_external).
|
||||
# Migrate users_external data from <0.6.0 to version 3.0.0
|
||||
# (see https://github.com/nextcloud/user_external).
|
||||
# This version was probably in use in Mail-in-a-Box v0.41 (February 26, 2019) and earlier.
|
||||
# We moved to v0.6.3 in 193763f8. Ignore errors - maybe there are duplicated users with the
|
||||
# correct backend already.
|
||||
sqlite3 $STORAGE_ROOT/owncloud/owncloud.db "UPDATE oc_users_external SET backend='127.0.0.1';" || /bin/true
|
||||
|
||||
# Set up a cron job for Nextcloud.
|
||||
# Set up a general cron job for Nextcloud.
|
||||
# Also add another job for Calendar updates, per advice in the Nextcloud docs
|
||||
# https://docs.nextcloud.com/server/24/admin_manual/groupware/calendar.html#background-jobs
|
||||
cat > /etc/cron.d/mailinabox-nextcloud << EOF;
|
||||
#!/bin/bash
|
||||
# Mail-in-a-Box
|
||||
*/5 * * * * root sudo -u www-data php -f /usr/local/lib/owncloud/cron.php
|
||||
*/5 * * * * root sudo -u www-data php -f /usr/local/lib/owncloud/occ dav:send-event-reminders
|
||||
EOF
|
||||
chmod +x /etc/cron.d/mailinabox-nextcloud
|
||||
|
||||
# We also need to change the sending mode from background-job to occ.
|
||||
# Or else the reminders will just be sent as soon as possible when the background jobs run.
|
||||
hide_output sudo -u www-data php -f /usr/local/lib/owncloud/occ config:app:set dav sendEventRemindersMode --value occ
|
||||
|
||||
# Now set the config to read-only.
|
||||
# Do this only at the very bottom when no further occ commands are needed.
|
||||
sed -i'' "s/'config_is_read_only'\s*=>\s*false/'config_is_read_only' => true/" $STORAGE_ROOT/owncloud/config.php
|
||||
|
||||
# Rotate the nextcloud.log file
|
||||
cat > /etc/logrotate.d/nextcloud <<EOF
|
||||
# Nextcloud logs
|
||||
$STORAGE_ROOT/owncloud/nextcloud.log {
|
||||
size 10M
|
||||
create 640 www-data www-data
|
||||
rotate 30
|
||||
copytruncate
|
||||
missingok
|
||||
compress
|
||||
}
|
||||
EOF
|
||||
|
||||
# There's nothing much of interest that a user could do as an admin for Nextcloud,
|
||||
# and there's a lot they could mess up, so we don't make any users admins of Nextcloud.
|
||||
# But if we wanted to, we would do this:
|
||||
|
|
|
@ -13,8 +13,8 @@ fi
|
|||
case $(get_os_code) in
|
||||
$OS_UNSUPPORTED)
|
||||
echo "This version of Power Mail-in-a-Box only supports being installed on one of these operating systems:"
|
||||
# echo "* Debian 10 (buster)"
|
||||
echo "* Debian 11 (bullseye)"
|
||||
echo "* Debian 12 (bookworm)"
|
||||
echo "* Ubuntu 20.04 LTS (Focal Fossa)"
|
||||
echo "* Ubuntu 22.04 LTS (Jammy Jellyfish)"
|
||||
echo
|
||||
|
|
|
@ -9,13 +9,19 @@ if [ -z "${NONINTERACTIVE:-}" ]; then
|
|||
if [ ! -f /usr/bin/dialog ] || [ ! -f /usr/bin/python3 ] || [ ! -f /usr/bin/pip3 ]; then
|
||||
echo Installing packages needed for setup...
|
||||
apt-get -q -q update
|
||||
apt_get_quiet install dialog file python3 python3-pip || exit 1
|
||||
apt_get_quiet install dialog file python3 python3-pip || exit 1
|
||||
fi
|
||||
|
||||
# Installing email_validator is repeated in setup/management.sh, but in setup/management.sh
|
||||
# we install it inside a virtualenv. In this script, we don't have the virtualenv yet
|
||||
# so we install the python package globally.
|
||||
hide_output pip3 install "email_validator>=1.0.0" || exit 1
|
||||
# so we install the python package globally
|
||||
|
||||
# On Debian 12, this package must be installed via apt-get
|
||||
if [ "$(get_os_code)" -eq "${OS_DEBIAN_12}" ]; then
|
||||
apt_get_quiet install python3-email-validator
|
||||
else
|
||||
hide_output pip3 install "email_validator>=1.0.0"
|
||||
fi
|
||||
|
||||
message_box "Mail-in-a-Box Installation" \
|
||||
"Hello and thanks for deploying a (Power) Mail-in-a-Box!
|
||||
|
|
|
@ -23,8 +23,12 @@ echo "Installing SpamAssassin..."
|
|||
apt_install spampd razor pyzor dovecot-antispam libmail-dkim-perl
|
||||
|
||||
# Allow spamassassin to download new rules.
|
||||
management/editconf.py /etc/default/spamassassin \
|
||||
CRON=1
|
||||
# On Debian 12, this file is named /etc/default/spamd
|
||||
if [ -f /etc/default/spamassassin ]; then
|
||||
management/editconf.py /etc/default/spamassassin CRON=1
|
||||
else
|
||||
management/editconf.py /etc/default/spamd CRON=1
|
||||
fi
|
||||
|
||||
# Configure pyzor, which is a client to a live database of hashes of
|
||||
# spam emails. Set the pyzor configuration directory to something sane.
|
||||
|
|
|
@ -90,7 +90,7 @@ f=$STORAGE_ROOT
|
|||
while [[ $f != / ]]; do chmod a+rx "$f"; f=$(dirname "$f"); done;
|
||||
if [ ! -f $STORAGE_ROOT/mailinabox.version ]; then
|
||||
setup/migrate.py --current > $STORAGE_ROOT/mailinabox.version
|
||||
chown $STORAGE_USER.$STORAGE_USER $STORAGE_ROOT/mailinabox.version
|
||||
chown $STORAGE_USER:$STORAGE_USER $STORAGE_ROOT/mailinabox.version
|
||||
fi
|
||||
|
||||
chmod 751 $STORAGE_ROOT
|
||||
|
@ -185,7 +185,7 @@ if management/status_checks.py --check-primary-hostname; then
|
|||
echo "If you have a DNS problem put the box's IP address in the URL"
|
||||
echo "(https://$PUBLIC_IP/admin) but then check the TLS fingerprint:"
|
||||
openssl x509 -in $STORAGE_ROOT/ssl/ssl_certificate.pem -noout -fingerprint -sha256\
|
||||
| sed "s/SHA256 Fingerprint=//"
|
||||
| sed "s/SHA256 Fingerprint=//i"
|
||||
else
|
||||
echo https://$PUBLIC_IP/admin
|
||||
echo
|
||||
|
@ -193,7 +193,7 @@ else
|
|||
echo the certificate fingerprint matches:
|
||||
echo
|
||||
openssl x509 -in $STORAGE_ROOT/ssl/ssl_certificate.pem -noout -fingerprint -sha256\
|
||||
| sed "s/SHA256 Fingerprint=//"
|
||||
| sed "s/SHA256 Fingerprint=//i"
|
||||
echo
|
||||
echo Then you can confirm the security exception and continue.
|
||||
echo
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
source /etc/mailinabox.conf
|
||||
source setup/functions.sh # load our functions
|
||||
|
||||
|
@ -125,7 +127,7 @@ apt_get_quiet autoremove
|
|||
echo Installing system packages...
|
||||
apt_install python3 python3-dev python3-pip python3-setuptools \
|
||||
netcat-openbsd wget curl git sudo coreutils bc file \
|
||||
pollinate openssh-client unzip \
|
||||
pollinate xxd openssh-client unzip \
|
||||
unattended-upgrades cron ntp fail2ban rsyslog
|
||||
|
||||
# ### Suppress Upgrade Prompts
|
||||
|
@ -220,10 +222,11 @@ dd if=/dev/random of=/dev/urandom bs=1 count=32 2> /dev/null
|
|||
# is really any good on virtualized systems, we'll also seed from Ubuntu's
|
||||
# pollinate servers:
|
||||
|
||||
if ! pollinate -q -r --strict 2> /dev/null; then
|
||||
rm -rf /var/cache/pollinate/*
|
||||
if ! sudo -u pollinate pollinate -q -r --strict 2> /dev/null; then
|
||||
# In the case pollinate is ill-configured (e.g. server is example.com), try using a server we know that works
|
||||
# Even if this fails - don't bail and carry on.
|
||||
pollinate -q -r -s entropy.ubuntu.com 2> /dev/null
|
||||
sudo -u pollinate pollinate -q -r -s entropy.ubuntu.com 2> /dev/null
|
||||
fi
|
||||
|
||||
# Between these two, we really ought to be all set.
|
||||
|
@ -345,6 +348,12 @@ fi
|
|||
# which is where bind9 will be running. Obviously don't do this before
|
||||
# installing bind9 or else apt won't be able to resolve a server to
|
||||
# download bind9 from.
|
||||
|
||||
# On Debian 12, this service needs to be installed first
|
||||
if [ "$(get_os_code)" -eq "${OS_DEBIAN_12}" ]; then
|
||||
apt_get_quiet install systemd-resolved
|
||||
fi
|
||||
|
||||
rm -f /etc/resolv.conf
|
||||
management/editconf.py /etc/systemd/resolved.conf DNSStubListener=no
|
||||
echo "nameserver 127.0.0.1" > /etc/resolv.conf
|
||||
|
@ -372,3 +381,5 @@ cp -f conf/fail2ban/filter.d/* /etc/fail2ban/filter.d/
|
|||
# scripts will ensure the files exist and then fail2ban is given another
|
||||
# restart at the very end of setup.
|
||||
restart_service fail2ban
|
||||
|
||||
systemctl enable fail2ban
|
||||
|
|
|
@ -22,8 +22,9 @@ source /etc/mailinabox.conf # load global vars
|
|||
echo "Installing Roundcube (webmail)..."
|
||||
apt_install \
|
||||
dbconfig-common \
|
||||
php-cli php-sqlite3 php-intl php-json php-common php-curl php-ldap \
|
||||
php-gd php-pspell libjs-jquery libjs-jquery-mousewheel libmagic1 php-mbstring php-gnupg
|
||||
php-cli php-sqlite3 php-intl php-json php-common php-curl php-ldap php-imap\
|
||||
php-gd php-pspell libjs-jquery libjs-jquery-mousewheel libmagic1 php-mbstring php-gnupg \
|
||||
sqlite3
|
||||
|
||||
# Install Roundcube from source if it is not already present or if it is out of date.
|
||||
# Combine the Roundcube version number with the commit hash of plugins to track
|
||||
|
@ -35,9 +36,9 @@ apt_install \
|
|||
# https://github.com/mstilkerich/rcmcarddav/releases
|
||||
# The easiest way to get the package hashes is to run this script and get the hash from
|
||||
# the error message.
|
||||
VERSION=1.6.0
|
||||
HASH=fd84b4fac74419bb73e7a3bcae1978d5589c52de
|
||||
PERSISTENT_LOGIN_VERSION=version-5.3.0
|
||||
VERSION=1.6.5
|
||||
HASH=326fcc206cddc00355e98d1e40fd0bcd9baec69f
|
||||
PERSISTENT_LOGIN_VERSION=bde7b6840c7d91de627ea14e81cf4133cbb3c07a # version 5.3
|
||||
HTML5_NOTIFIER_VERSION=68d9ca194212e15b3c7225eb6085dbcf02fd13d7 # version 0.6.4+
|
||||
CARDDAV_VERSION=4.4.4
|
||||
CARDDAV_HASH=743fd6925b775f821aa8860982d2bdeec05f5d7b
|
||||
|
@ -141,8 +142,6 @@ cat > $RCM_CONFIG <<EOF;
|
|||
\$config['password_charset'] = 'UTF-8';
|
||||
\$config['junk_mbox'] = 'Spam';
|
||||
|
||||
/* ensure roudcube session id's aren't leaked to other parts of the server */
|
||||
\$config['session_path'] = '/mail/';
|
||||
/* prevent CSRF, requires php 7.3+ */
|
||||
\$config['session_samesite'] = 'Strict';
|
||||
\$config['quota_zero_as_unlimited'] = true;
|
||||
|
@ -200,7 +199,7 @@ EOF
|
|||
|
||||
# Create writable directories.
|
||||
mkdir -p /var/log/roundcubemail /var/tmp/roundcubemail $STORAGE_ROOT/mail/roundcube
|
||||
chown -R www-data.www-data /var/log/roundcubemail /var/tmp/roundcubemail $STORAGE_ROOT/mail/roundcube
|
||||
chown -R www-data:www-data /var/log/roundcubemail /var/tmp/roundcubemail $STORAGE_ROOT/mail/roundcube
|
||||
|
||||
# Ensure the log file monitored by fail2ban exists, or else fail2ban can't start.
|
||||
sudo -u www-data touch /var/log/roundcubemail/errors.log
|
||||
|
@ -223,14 +222,14 @@ usermod -a -G dovecot www-data
|
|||
|
||||
# set permissions so that PHP can use users.sqlite
|
||||
# could use dovecot instead of www-data, but not sure it matters
|
||||
chown root.www-data $STORAGE_ROOT/mail
|
||||
chown root:www-data $STORAGE_ROOT/mail
|
||||
chmod 775 $STORAGE_ROOT/mail
|
||||
chown root.www-data $STORAGE_ROOT/mail/users.sqlite
|
||||
chown root:www-data $STORAGE_ROOT/mail/users.sqlite
|
||||
chmod 664 $STORAGE_ROOT/mail/users.sqlite
|
||||
|
||||
# Fix Carddav permissions:
|
||||
chown -f -R root.www-data ${RCM_PLUGIN_DIR}/carddav
|
||||
# root.www-data need all permissions, others only read
|
||||
chown -f -R root:www-data ${RCM_PLUGIN_DIR}/carddav
|
||||
# root:www-data need all permissions, others only read
|
||||
chmod -R 774 ${RCM_PLUGIN_DIR}/carddav
|
||||
|
||||
# Run Roundcube database migration script (database is created if it does not exist)
|
||||
|
@ -238,6 +237,16 @@ php ${RCM_DIR}/bin/updatedb.sh --dir ${RCM_DIR}/SQL --package roundcube
|
|||
chown www-data:www-data $STORAGE_ROOT/mail/roundcube/roundcube.sqlite
|
||||
chmod 664 $STORAGE_ROOT/mail/roundcube/roundcube.sqlite
|
||||
|
||||
# Patch the Roundcube code to eliminate an issue that causes postfix to reject our sqlite
|
||||
# user database (see https://github.com/mail-in-a-box/mailinabox/issues/2185)
|
||||
sed -i.miabold 's/^[^#]\+.\+PRAGMA journal_mode = WAL.\+$/#&/' \
|
||||
/usr/local/lib/roundcubemail/program/lib/Roundcube/db/sqlite.php
|
||||
|
||||
# Because Roundcube wants to set the PRAGMA we just deleted from the source, we apply it here
|
||||
# to the roundcube database (see https://github.com/roundcube/roundcubemail/issues/8035)
|
||||
# Database should exist, created by migration script
|
||||
sqlite3 $STORAGE_ROOT/mail/roundcube/roundcube.sqlite 'PRAGMA journal_mode=WAL;'
|
||||
|
||||
# Enable PHP modules.
|
||||
phpenmod -v php mcrypt imap
|
||||
restart_service php$(php_version)-fpm
|
||||
|
|
|
@ -22,8 +22,8 @@ apt_install \
|
|||
phpenmod -v php imap
|
||||
|
||||
# Copy Z-Push into place.
|
||||
VERSION=2.6.2
|
||||
TARGETHASH=f0e8091a8030e5b851f5ba1f9f0e1a05b8762d80
|
||||
VERSION=2.7.1
|
||||
TARGETHASH=f15c566b1ad50de24f3f08f505f0c3d8155c2d0d
|
||||
needs_update=0 #NODOC
|
||||
if [ ! -f /usr/local/lib/z-push/version ]; then
|
||||
needs_update=1 #NODOC
|
||||
|
@ -41,7 +41,15 @@ if [ $needs_update == 1 ]; then
|
|||
mv /tmp/z-push/*/src /usr/local/lib/z-push
|
||||
rm -rf /tmp/z-push.zip /tmp/z-push
|
||||
|
||||
# Create admin and top scripts with PHP_VER
|
||||
rm -f /usr/sbin/z-push-{admin,top}
|
||||
echo '#!/bin/bash' > /usr/sbin/z-push-admin
|
||||
echo php /usr/local/lib/z-push/z-push-admin.php '"$@"' >> /usr/sbin/z-push-admin
|
||||
chmod 755 /usr/sbin/z-push-admin
|
||||
echo '#!/bin/bash' > /usr/sbin/z-push-top
|
||||
echo php /usr/local/lib/z-push/z-push-top.php '"$@"' >> /usr/sbin/z-push-top
|
||||
chmod 755 /usr/sbin/z-push-top
|
||||
|
||||
echo $VERSION > /usr/local/lib/z-push/version
|
||||
fi
|
||||
|
||||
|
|
|
@ -40,8 +40,8 @@ cp "$1/owncloud.db" $STORAGE_ROOT/owncloud/
|
|||
cp "$1/config.php" $STORAGE_ROOT/owncloud/
|
||||
|
||||
ln -sf $STORAGE_ROOT/owncloud/config.php /usr/local/lib/owncloud/config/config.php
|
||||
chown -f -R www-data.www-data $STORAGE_ROOT/owncloud /usr/local/lib/owncloud
|
||||
chown www-data.www-data $STORAGE_ROOT/owncloud/config.php
|
||||
chown -f -R www-data:www-data $STORAGE_ROOT/owncloud /usr/local/lib/owncloud
|
||||
chown www-data:www-data $STORAGE_ROOT/owncloud/config.php
|
||||
|
||||
sudo -u www-data php /usr/local/lib/owncloud/occ maintenance:mode --off
|
||||
|
||||
|
|
|
@ -17,13 +17,8 @@ accesses = set()
|
|||
# Scan the current and rotated access logs.
|
||||
for fn in glob.glob("/var/log/nginx/access.log*"):
|
||||
# Gunzip if necessary.
|
||||
if fn.endswith(".gz"):
|
||||
f = gzip.open(fn)
|
||||
else:
|
||||
f = open(fn, "rb")
|
||||
|
||||
# Loop through the lines in the access log.
|
||||
with f:
|
||||
with (gzip.open if fn.endswith(".gz") else open)(fn, "rb") as f:
|
||||
for line in f:
|
||||
# Find lines that are GETs on the bootstrap script by either curl or wget.
|
||||
# (Note that we purposely skip ...?ping=1 requests which is the admin panel querying us for updates.)
|
||||
|
@ -43,7 +38,8 @@ for date, ip in accesses:
|
|||
# Since logs are rotated, store the statistics permanently in a JSON file.
|
||||
# Load in the stats from an existing file.
|
||||
if os.path.exists(outfn):
|
||||
existing_data = json.load(open(outfn))
|
||||
with open(outfn, "r") as f:
|
||||
existing_data = json.load(f)
|
||||
for date, count in existing_data:
|
||||
if date not in by_date:
|
||||
by_date[date] = count
|
||||
|
|
|
@ -124,13 +124,14 @@ def generate_documentation():
|
|||
""")
|
||||
|
||||
parser = Source.parser()
|
||||
for line in open("setup/start.sh"):
|
||||
try:
|
||||
fn = parser.parse_string(line).filename()
|
||||
except:
|
||||
continue
|
||||
if fn in ("setup/start.sh", "setup/preflight.sh", "setup/questions.sh", "setup/firstuser.sh", "setup/management.sh"):
|
||||
continue
|
||||
with open("setup/start.sh", "r") as start_file:
|
||||
for line in start_file:
|
||||
try:
|
||||
fn = parser.parse_string(line).filename()
|
||||
except:
|
||||
continue
|
||||
if fn in ("setup/start.sh", "setup/preflight.sh", "setup/questions.sh", "setup/firstuser.sh", "setup/management.sh"):
|
||||
continue
|
||||
|
||||
import sys
|
||||
print(fn, file=sys.stderr)
|
||||
|
@ -401,7 +402,8 @@ class BashScript(Grammar):
|
|||
@staticmethod
|
||||
def parse(fn):
|
||||
if fn in ("setup/functions.sh", "/etc/mailinabox.conf"): return ""
|
||||
string = open(fn).read()
|
||||
with open(fn, "r") as f:
|
||||
string = f.read()
|
||||
|
||||
# tokenize
|
||||
string = re.sub(".* #NODOC\n", "", string)
|
||||
|
|
Loading…
Add table
Reference in a new issue