This commit is contained in:
James Turland 2023-09-20 22:34:29 +01:00
commit 6e00e0aa3f
77 changed files with 6752 additions and 0 deletions

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,49 @@
version: '3'
services:
authelia:
image: authelia/authelia
container_name: authelia
volumes:
- /home/ubuntu/docker/authelia/config:/config
networks:
- proxy
security_opt:
- no-new-privileges:true
labels:
- 'traefik.enable=true'
- 'traefik.http.routers.authelia.rule=Host(`auth.jimsgarage.co.uk`)'
- 'traefik.http.routers.authelia.entrypoints=https'
- 'traefik.http.routers.authelia.tls=true'
- 'traefik.http.middlewares.authelia.forwardAuth.address=http://authelia:9091/api/verify?rd=https://auth.jimsgarage.co.uk'
- 'traefik.http.middlewares.authelia.forwardAuth.trustForwardHeader=true'
- 'traefik.http.middlewares.authelia.forwardAuth.authResponseHeaders=Remote-User,Remote-Groups,Remote-Name,Remote-Email'
- 'traefik.http.middlewares.authelia-basic.forwardAuth.address=http://authelia:9091/api/verify?auth=basic'
- 'traefik.http.middlewares.authelia-basic.forwardAuth.trustForwardHeader=true'
- 'traefik.http.middlewares.authelia-basic.forwardAuth.authResponseHeaders=Remote-User,Remote-Groups,Remote-Name,Remote-Email'
- 'traefik.http.services.authelia.loadbalancer.server.port=9091'
ports:
- 9091:9091
restart: unless-stopped
environment:
- TZ=Europe/London
healthcheck:
disable: true
redis:
image: redis:alpine
container_name: redis
volumes:
- /home/ubuntu/docker/redis:/data
networks:
- proxy
expose:
- 6379
restart: unless-stopped
environment:
- TZ=Europe/London
networks:
proxy:
external: true

View file

@ -0,0 +1,19 @@
---
###############################################################
# Users Database #
###############################################################
# This file can be used if you do not have an LDAP set up.
# List of users
users:
authelia:
disabled: false
displayname: "Authelia User"
# Password is authelia
password: "$6$rounds=50000$BpLnfgDsc2WD8F2q$Zis.ixdg9s/UOJYrs56b5QEZFiZECu0qZVNsIYxBaNJ7ucIL.nlxVCT5tqh8KHG8X4tlwCFm5r6NTOZZ5qRFN/" # yamllint disable-line rule:line-length
email: authelia@authelia.com
groups:
- admins
- dev
...

View file

@ -0,0 +1,31 @@
version: "3.9"
services:
web:
image: nginx
container_name: nginx
volumes:
- /home/ubuntu/docker/nginx:/etc/nginx/templates
environment:
- NGINX_HOST=nginx.jimsgarage.co.uk
- NGINX_PORT=80
labels:
- "traefik.enable=true"
- "traefik.http.routers.nginx.entrypoints=http"
- "traefik.http.routers.nginx.rule=Host(`nginx.jimsgarage.co.uk`)"
- "traefik.http.middlewares.nginx-https-redirect.redirectscheme.scheme=https"
- "traefik.http.routers.nginx.middlewares=nginx-https-redirect"
- "traefik.http.routers.nginx-secure.entrypoints=https"
- "traefik.http.routers.nginx-secure.rule=Host(`nginx.jimsgarage.co.uk`)"
- "traefik.http.routers.nginx-secure.tls=true"
- "traefik.http.routers.nginx-secure.service=nginx"
- "traefik.http.services.nginx.loadbalancer.server.port=80"
- "traefik.http.routers.nginx-secure.middlewares=authelia@docker"
- "traefik.docker.network=proxy"
networks:
proxy:
security_opt:
- no-new-privileges:true
networks:
proxy:
external: true

View file

@ -0,0 +1,47 @@
version: '3.5'
services:
traefik:
image: traefik:latest
container_name: traefik
restart: unless-stopped
security_opt:
- no-new-privileges:true
networks:
proxy:
ports:
- 80:80
- 443:443
environment:
- CF_API_EMAIL=your@email.com
- CF_DNS_API_TOKEN=your-api-key
# - CF_API_KEY=YOU_API_KEY
volumes:
- /etc/localtime:/etc/localtime:ro
- /var/run/docker.sock:/var/run/docker.sock:ro
- /home/ubuntu/docker/traefik/traefik.yml:/traefik.yml:ro
- /home/ubuntu/docker/traefik/acme.json:/acme.json
- /home/ubuntu/docker/traefik/config.yml:/config.yml:ro
- /home/ubuntu/docker/traefik/logs:/var/log/traefik
labels:
- "traefik.enable=true"
- "traefik.http.routers.traefik.entrypoints=http"
- "traefik.http.routers.traefik.rule=Host(`traefik-dashboard.yourdomain.co.uk`)"
- "traefik.http.middlewares.traefik-auth.basicauth.users=YOUR_USERNAME_PASSWORD"
- "traefik.http.middlewares.traefik-https-redirect.redirectscheme.scheme=https"
- "traefik.http.middlewares.sslheader.headers.customrequestheaders.X-Forwarded-Proto=https"
- "traefik.http.routers.traefik.middlewares=traefik-https-redirect"
- "traefik.http.routers.traefik-secure.entrypoints=https"
- "traefik.http.routers.traefik-secure.rule=Host(`traefik-dashboard.yourdomain.co.uk`)"
- "traefik.http.routers.traefik-secure.middlewares=traefik-auth"
- "traefik.http.routers.traefik-secure.tls=true"
- "traefik.http.routers.traefik-secure.tls.certresolver=cloudflare"
- "traefik.http.routers.traefik-secure.tls.domains[0].main=yourdomain.co.uk"
- "traefik.http.routers.traefik-secure.tls.domains[0].sans=*.yourdomain.co.uk"
- "traefik.http.routers.traefik-secure.service=api@internal"
- "traefik.http.routers.api.middlewares=authelia@docker
networks:
proxy:
name: proxy

17
Authentik/.env Normal file
View file

@ -0,0 +1,17 @@
PG_PASS=password
AUTHENTIK_SECRET_KEY=password"
COMPOSE_PORT_HTTPS=1443
COMPOSE_PORT_HTTP=7000
# SMTP Host Emails are sent to
AUTHENTIK_EMAIL__HOST=localhost
AUTHENTIK_EMAIL__PORT=25
# Optionally authenticate (don't add quotation marks to your password)
AUTHENTIK_EMAIL__USERNAME=email@your-domain.com
AUTHENTIK_EMAIL__PASSWORD=password
# Use StartTLS
AUTHENTIK_EMAIL__USE_TLS=false
# Use SSL
AUTHENTIK_EMAIL__USE_SSL=false
AUTHENTIK_EMAIL__TIMEOUT=10
# Email address authentik will send from, should have a correct @domain
AUTHENTIK_EMAIL__FROM=authentik@localhost

View file

@ -0,0 +1,109 @@
---
version: "3.4"
services:
postgresql:
image: docker.io/library/postgres:12-alpine
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "pg_isready -d $${POSTGRES_DB} -U $${POSTGRES_USER}"]
start_period: 20s
interval: 30s
retries: 5
timeout: 5s
volumes:
- database:/var/lib/postgresql/data
environment:
POSTGRES_PASSWORD: ${PG_PASS:?database password required}
POSTGRES_USER: ${PG_USER:-authentik}
POSTGRES_DB: ${PG_DB:-authentik}
env_file:
- .env
networks:
proxy:
redis:
image: docker.io/library/redis:alpine
command: --save 60 1 --loglevel warning
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "redis-cli ping | grep PONG"]
start_period: 20s
interval: 30s
retries: 5
timeout: 3s
volumes:
- redis:/data
networks:
proxy:
server:
image: ${AUTHENTIK_IMAGE:-ghcr.io/goauthentik/server}:${AUTHENTIK_TAG:-2023.8.3}
container_name: authentik_server
restart: unless-stopped
command: server
environment:
AUTHENTIK_REDIS__HOST: redis
AUTHENTIK_POSTGRESQL__HOST: postgresql
AUTHENTIK_POSTGRESQL__USER: ${PG_USER:-authentik}
AUTHENTIK_POSTGRESQL__NAME: ${PG_DB:-authentik}
AUTHENTIK_POSTGRESQL__PASSWORD: ${PG_PASS}
volumes:
- ./media:/media
- ./custom-templates:/templates
env_file:
- .env
depends_on:
- postgresql
- redis
networks:
proxy:
labels:
- "traefik.enable=true"
- "traefik.http.routers.authentik.entrypoints=http"
- "traefik.http.routers.authentik.rule=Host(`authentik.yourdomain.com`)"
- "traefik.http.middlewares.authentik-https-redirect.redirectscheme.scheme=https"
- "traefik.http.routers.authentik.middlewares=authentik-https-redirect"
- "traefik.http.routers.authentik-secure.entrypoints=https"
- "traefik.http.routers.authentik-secure.rule=Host(`authentik.yourdomain.com`)"
- "traefik.http.routers.authentik-secure.tls=true"
- "traefik.http.routers.authentik-secure.service=authentik"
- "traefik.http.services.authentik.loadbalancer.server.port=9000"
- "traefik.docker.network=proxy"
worker:
image: ${AUTHENTIK_IMAGE:-ghcr.io/goauthentik/server}:${AUTHENTIK_TAG:-2023.8.3}
restart: unless-stopped
command: worker
environment:
AUTHENTIK_REDIS__HOST: redis
AUTHENTIK_POSTGRESQL__HOST: postgresql
AUTHENTIK_POSTGRESQL__USER: ${PG_USER:-authentik}
AUTHENTIK_POSTGRESQL__NAME: ${PG_DB:-authentik}
AUTHENTIK_POSTGRESQL__PASSWORD: ${PG_PASS}
# `user: root` and the docker socket volume are optional.
# See more for the docker socket integration here:
# https://goauthentik.io/docs/outposts/integrations/docker
# Removing `user: root` also prevents the worker from fixing the permissions
# on the mounted folders, so when removing this make sure the folders have the correct UID/GID
# (1000:1000 by default)
user: root
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- ./media:/media
- ./certs:/certs
- ./custom-templates:/templates
env_file:
- .env
depends_on:
- postgresql
- redis
networks:
proxy:
volumes:
database:
driver: local
redis:
driver: local
networks:
proxy:
external: true

View file

@ -0,0 +1,32 @@
version: "3.9"
services:
web:
image: nginx
container_name: jimsgarage
volumes:
- /home/ubuntu/docker/nginx/templates:/etc/nginx/templates
- /home/ubuntu/docker/nginx/web:/usr/share/nginx/html
environment:
- NGINX_HOST=nginx.jimsgarage.co.uk
- NGINX_PORT=80
labels:
- "traefik.enable=true"
- "traefik.http.routers.nginx.entrypoints=http"
- "traefik.http.routers.nginx.rule=Host(`nginx.jimsgarage.co.uk`)"
- "traefik.http.middlewares.nginx-https-redirect.redirectscheme.scheme=https"
- "traefik.http.routers.nginx.middlewares=nginx-https-redirect"
- "traefik.http.routers.nginx-secure.entrypoints=https"
- "traefik.http.routers.nginx-secure.rule=Host(`nginx.jimsgarage.co.uk`)"
- "traefik.http.routers.nginx-secure.tls=true"
- "traefik.http.routers.nginx-secure.service=nginx"
- "traefik.http.services.nginx.loadbalancer.server.port=80"
- "traefik.http.routers.nginx-secure.middlewares=middlewares-authentik@file" #add this to any container you want to use the Authentik web proxy
- "traefik.docker.network=proxy"
networks:
proxy:
security_opt:
- no-new-privileges:true
networks:
proxy:
external: true

View file

@ -0,0 +1,23 @@
http:
middlewares:
crowdsec-bouncer:
forwardauth:
address: http://bouncer-traefik:8080/api/v1/forwardAuth
trustForwardHeader: true
# https://github.com/goauthentik/authentik/issues/2366
middlewares-authentik:
forwardAuth:
address: "http://authentik_server:9000/outpost.goauthentik.io/auth/traefik"
trustForwardHeader: true
authResponseHeaders:
- X-authentik-username
- X-authentik-groups
- X-authentik-email
- X-authentik-name
- X-authentik-uid
- X-authentik-jwt
- X-authentik-meta-jwks
- X-authentik-meta-outpost
- X-authentik-meta-provider
- X-authentik-meta-app
- X-authentik-meta-version

View file

@ -0,0 +1,87 @@
---
version: "3.4"
services:
postgresql:
image: docker.io/library/postgres:12-alpine
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "pg_isready -d $${POSTGRES_DB} -U $${POSTGRES_USER}"]
start_period: 20s
interval: 30s
retries: 5
timeout: 5s
volumes:
- database:/var/lib/postgresql/data
environment:
POSTGRES_PASSWORD: ${PG_PASS:?database password required}
POSTGRES_USER: ${PG_USER:-authentik}
POSTGRES_DB: ${PG_DB:-authentik}
env_file:
- .env
redis:
image: docker.io/library/redis:alpine
command: --save 60 1 --loglevel warning
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "redis-cli ping | grep PONG"]
start_period: 20s
interval: 30s
retries: 5
timeout: 3s
volumes:
- redis:/data
server:
image: ${AUTHENTIK_IMAGE:-ghcr.io/goauthentik/server}:${AUTHENTIK_TAG:-2023.8.3}
restart: unless-stopped
command: server
environment:
AUTHENTIK_REDIS__HOST: redis
AUTHENTIK_POSTGRESQL__HOST: postgresql
AUTHENTIK_POSTGRESQL__USER: ${PG_USER:-authentik}
AUTHENTIK_POSTGRESQL__NAME: ${PG_DB:-authentik}
AUTHENTIK_POSTGRESQL__PASSWORD: ${PG_PASS}
volumes:
- ./media:/media
- ./custom-templates:/templates
env_file:
- .env
ports:
- "${COMPOSE_PORT_HTTP:-9000}:9000"
- "${COMPOSE_PORT_HTTPS:-9443}:9443"
depends_on:
- postgresql
- redis
worker:
image: ${AUTHENTIK_IMAGE:-ghcr.io/goauthentik/server}:${AUTHENTIK_TAG:-2023.8.3}
restart: unless-stopped
command: worker
environment:
AUTHENTIK_REDIS__HOST: redis
AUTHENTIK_POSTGRESQL__HOST: postgresql
AUTHENTIK_POSTGRESQL__USER: ${PG_USER:-authentik}
AUTHENTIK_POSTGRESQL__NAME: ${PG_DB:-authentik}
AUTHENTIK_POSTGRESQL__PASSWORD: ${PG_PASS}
# `user: root` and the docker socket volume are optional.
# See more for the docker socket integration here:
# https://goauthentik.io/docs/outposts/integrations/docker
# Removing `user: root` also prevents the worker from fixing the permissions
# on the mounted folders, so when removing this make sure the folders have the correct UID/GID
# (1000:1000 by default)
user: root
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- ./media:/media
- ./certs:/certs
- ./custom-templates:/templates
env_file:
- .env
depends_on:
- postgresql
- redis
volumes:
database:
driver: local
redis:
driver: local

View file

@ -0,0 +1,16 @@
version: "3.9"
services:
tunnel:
container_name: cloudflared-tunnel
image: cloudflare/cloudflared
restart: unless-stopped
command: tunnel run
environment:
- TUNNEL_TOKEN=YOUR_KEY_HERE
networks:
macvlan4: # change name to whatever you like
ipv4_address: 192.168.4.20 # change to your IP in your vLAN subnet
networks:
macvlan4:
external: true

View file

@ -0,0 +1,5 @@
docker network create -d macvlan \
--subnet=192.168.4.0/24 \
--gateway=192.168.4.1 \
-o parent=eth0.4 \
macvlan4

View file

@ -0,0 +1,37 @@
---
version: "2.1"
services:
code-server:
image: lscr.io/linuxserver/code-server:latest
container_name: code-server
environment:
- PUID=1000
- PGID=1000
- TZ=Etc/UTC
- PASSWORD=password #optional
- HASHED_PASSWORD= #optional
- SUDO_PASSWORD=password #optional
- SUDO_PASSWORD_HASH= #optional
- PROXY_DOMAIN=code-server.jimsgarage.co.uk #optional
- DEFAULT_WORKSPACE=/config/workspace #optional
volumes:
- /home/ubuntu/docker/code-server/config:/config
networks:
proxy:
labels:
- "traefik.enable=true"
- "traefik.http.routers.code-server.entrypoints=http"
- "traefik.http.routers.code-server.rule=Host(`code-server.yourdomain.com`)"
- "traefik.http.middlewares.code-server-https-redirect.redirectscheme.scheme=https"
- "traefik.http.routers.code-server.middlewares=code-server-https-redirect"
- "traefik.http.routers.code-server-secure.entrypoints=https"
- "traefik.http.routers.code-server-secure.rule=Host(`code-server.yourdomain.com`)"
- "traefik.http.routers.code-server-secure.tls=true"
- "traefik.http.routers.code-server-secure.service=code-server"
- "traefik.http.services.code-server.loadbalancer.server.port=8443"
- "traefik.docker.network=proxy"
restart: unless-stopped
networks:
proxy:
external: true

View file

@ -0,0 +1,6 @@
http:
middlewares:
crowdsec-bouncer:
forwardauth:
address: http://bouncer-traefik:8080/api/v1/forwardAuth
trustForwardHeader: true

View file

@ -0,0 +1,41 @@
api:
dashboard: true
debug: true
entryPoints:
http:
address: ":80"
http:
middlewares:
- crowdsec-bouncer@file
redirections:
entryPoint:
to: https
scheme: https
https:
address: ":443"
http:
middlewares:
- crowdsec-bouncer@file
serversTransport:
insecureSkipVerify: true
providers:
docker:
endpoint: "unix:///var/run/docker.sock"
exposedByDefault: false
file:
filename: /config.yml
certificatesResolvers:
cloudflare:
acme:
email: your@email.com #add your email
storage: acme.json
dnsChallenge:
provider: cloudflare
resolvers:
- "1.1.1.1:53"
- "1.0.0.1:53"
log:
level: "INFO"
filePath: "/var/log/traefik/traefik.log"
accessLog:
filePath: "/var/log/traefik/access.log"

4
Crowdsec/acquis.yaml Normal file
View file

@ -0,0 +1,4 @@
filenames:
- /var/log/traefik/*
labels:
type: traefik

View file

@ -0,0 +1,35 @@
version: '3.8'
services:
crowdsec:
image: crowdsecurity/crowdsec:latest
container_name: crowdsec
environment:
GID: "${GID-1000}"
COLLECTIONS: "crowdsecurity/linux crowdsecurity/traefik"
volumes:
- /home/ubuntu/docker/crowdsec/acquis.yaml:/etc/crowdsec/acquis.yaml
- /home/ubuntu/docker/crowdsec/db:/var/lib/crowdsec/data/
- /home/ubuntu/docker/crowdsec/config:/etc/crowdsec/
- /home/ubuntu/docker/traefik/logs:/var/log/traefik/:ro
networks:
- proxy
security_opt:
- no-new-privileges:true
restart: unless-stopped
bouncer-traefik:
image: docker.io/fbonalair/traefik-crowdsec-bouncer:latest
container_name: bouncer-traefik
environment:
CROWDSEC_BOUNCER_API_KEY: create_a_random_api_key
CROWDSEC_AGENT_HOST: crowdsec:8080
networks:
- proxy
depends_on:
- crowdsec
restart: unless-stopped
security_opt:
- no-new-privileges:true
networks:
proxy:
external: true

96
Frigate/config.yml Normal file
View file

@ -0,0 +1,96 @@
cameras:
garden:
ffmpeg:
inputs:
- path: rtsp://cameraIP:554/s0
roles:
- detect
- rtmp
detect:
width: 1920 # <---- update for your camera's resolution
height: 1080 # <---- update for your camera's resolution
fps: 25
garage:
ffmpeg:
inputs:
- path: rtsp://cameraIP:554/s0
roles:
- detect
- rtmp
detect:
width: 1920 # <---- update for your camera's resolution
height: 1080 # <---- update for your camera's resolution
fps: 25
motion:
mask:
- 0,0,1920,0,1920,393,297,241,314,464,0,541
# Optional: Database configuration
database:
# The path to store the SQLite DB (default: shown below)
path: /media/frigate/frigate.db
detectors:
coral1:
type: edgetpu
device: pci:0
coral2:
type: edgetpu
device: pci:1
objects:
track:
- person
- dog
- cat
- bird
snapshots:
enabled: True
timestamp: true
bounding_box: true
retain:
default: 30
mqtt:
host: 192.168.200.14
ffmpeg:
#hwaccel_args:
# - -hwaccel
# - vaapi
# - -hwaccel_device
# - /dev/dri/renderD128
# - -hwaccel_output_format
# - yuv420p
output_args:
record: -f segment -segment_time 10 -segment_format mp4 -reset_timestamps 1 -strftime 1 -c:v copy -c:a aac
record:
enabled: True
events:
pre_capture: 5
post_capture: 5
retain:
default: 30
mode: active_objects
objects:
- person
- bird
- cat
- dog
birdseye:
# Optional: Enable birdseye view (default: shown below)
enabled: True
# Optional: Width of the output resolution (default: shown below)
width: 1280
# Optional: Height of the output resolution (default: shown below)
height: 720
# Optional: Encoding quality of the mpeg1 feed (default: shown below)
# 1 is the highest quality, and 31 is the lowest. Lower quality feeds utilize less CPU resources.
quality: 8
# Optional: Mode of the view. Available options are: objects, motion, and continuous
# objects - cameras are included if they have had a tracked object within the last 30 seconds
# motion - cameras are included if motion was detected in the last 30 seconds
# continuous - all cameras are included always
mode: continuous

View file

@ -0,0 +1,45 @@
version: "3.9"
services:
frigate:
container_name: frigate
# privileged: true # this may not be necessary for all setups
restart: unless-stopped
image: blakeblackshear/frigate:stable
devices:
#- /dev/bus/usb:/dev/bus/usb
- /dev/apex_0:/dev/apex_0
- /dev/apex_1:/dev/apex_1
#- /dev/dri/renderD128:/dev/dri/renderD128 # for intel hwaccel, needs to be updated for your hardware
volumes:
- /etc/localtime:/etc/localtime:ro
- /home/ubuntu/docker/frigate/config.yml:/config/config.yml:ro
- /home/ubuntu/freenas/Frigate/media/clips:/media/frigate/clips
- /home/ubuntu/freenas/Frigate/media/recordings:/media/frigate/recordings
- /home/ubuntu/docker/frigate/database:/media/frigate
- type: tmpfs # Optional: 1GB of memory, reduces SSD/SD Card wear
target: /tmp/cache
tmpfs:
size: 2000000000
ports:
- "5000:5000"
- "1935:1935" # RTMP feeds (deprecated)
- "8554:8554" # RTSP feeds
environment:
FRIGATE_RTSP_PASSWORD: "password"
security_opt:
- no-new-privileges:true
labels:
- "traefik.enable=true"
- "traefik.http.routers.frigate.entrypoints=http"
- "traefik.http.routers.frigate.rule=Host(`frigate.jimsgarage.co.uk`)"
- "traefik.http.middlewares.frigate-https-redirect.redirectscheme.scheme=https"
- "traefik.http.routers.frigate.middlewares=frigate-https-redirect"
- "traefik.http.routers.frigate-secure.entrypoints=https"
- "traefik.http.routers.frigate-secure.rule=Host(`frigate.jimsgarage.co.uk`)"
- "traefik.http.routers.frigate-secure.tls=true"
- "traefik.http.routers.frigate-secure.service=frigate"
- "traefik.http.services.frigate.loadbalancer.server.port=5000"
- "traefik.docker.network=proxy"
proxy:
external: true

47
GPU_passthrough/readme.md Normal file
View file

@ -0,0 +1,47 @@
1) In your PC/Laptop/Server BIOS make sure the following is enabled in the BIOS:
Intel VT-d & VT-x Intel Compatible list
All AMD CPUs from Bulldozer onwards should be compatible.
2) Get device IDs:
lspci -nn
3) Enable IOMMU in GRUB (check Intel or AMD commands below - choose the right one)
nano /etc/default/grub
GRUB_CMDLINE_LINUX_DEFAULT="quiet intel_iommu=on iommu=pt"
GRUB_CMDLINE_LINUX_DEFAULT="quiet amd_iommu=on"
save and exit
5) run the command "update-grub"
now reboot
6) Enable VFIO Modules
nano /etc/modules
Add the following modules:
vfio
vfio_iommu_type1
vfio_pci
vfio_virqfd
Then, save and exit
Next run:
update-initramfs -u -k all
and reboot
7) GPU Isolation From the Host (amend the below to include the IDs of the device you want to isolate)
echo "options vfio-pci ids=10de:1381,10de:0fbc disable_vga=1" > /etc/modprobe.d/vfio.conf
8) Blacklist GPU drivers (here are all that you would ever need)
echo "blacklist radeon" >> /etc/modprobe.d/blacklist.conf
echo "blacklist nouveau" >> /etc/modprobe.d/blacklist.conf
echo "blacklist nvidia" >> /etc/modprobe.d/blacklist.conf
echo "blacklist nvidiafb" >> /etc/modprobe.d/blacklist.conf
echo "blacklist nvidia_drm" >> /etc/modprobe.d/blacklist.conf
9) Create a new VM and add the GPU via hardware menu
You may need to set it as primary GPU
You may need to add a ROM BAR

View file

@ -0,0 +1,30 @@
version: '3.3'
services:
gotify:
image: gotify/server
container_name: gotify
volumes:
- /home/ubuntu/docker/gotify:/app/data
restart: unless-stopped
security_opt:
- no-new-privileges:true
networks:
proxy:
environment:
- TZ=Europe/London
labels:
- "traefik.enable=true"
- "traefik.http.routers.gotify.entrypoints=http"
- "traefik.http.routers.gotify.rule=Host(`gotify.yourdomain.com`)"
- "traefik.http.middlewares.gotify-https-redirect.redirectscheme.scheme=https"
- "traefik.http.routers.gotify.middlewares=gotify-https-redirect"
- "traefik.http.routers.gotify-secure.entrypoints=https"
- "traefik.http.routers.gotify-secure.rule=Host(`gotify.yourdomain.com`)"
- "traefik.http.routers.gotify-secure.tls=true"
- "traefik.http.routers.gotify-secure.service=gotify"
- "traefik.http.services.gotify.loadbalancer.server.port=80"
- "traefik.docker.network=proxy"
networks:
proxy:
external: true

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,16 @@
# my global config
global:
scrape_interval: 15s
evaluation_interval: 30s
body_size_limit: 15MB
sample_limit: 1500
target_limit: 30
label_limit: 30
label_name_length_limit: 200
label_value_length_limit: 200
# scrape_timeout is set to the global default (10s).
scrape_configs:
- job_name: crowdsec_myMachine
static_configs:
- targets: ["192.168.7.114:6060"] # change this to your crowdsec IP. Be sure to enable port 6060 first

View file

@ -0,0 +1,404 @@
# Read metrics about docker containers
[[inputs.docker]]
## Docker Endpoint
## To use TCP, set endpoint = "tcp://[ip]:[port]"
## To use environment variables (ie, docker-machine), set endpoint = "ENV"
endpoint = "unix:///var/run/docker.sock"
## Set to true to collect Swarm metrics(desired_replicas, running_replicas)
## Note: configure this in one of the manager nodes in a Swarm cluster.
## configuring in multiple Swarm managers results in duplication of metrics.
gather_services = false
## Only collect metrics for these containers. Values will be appended to
## container_name_include.
## Deprecated (1.4.0), use container_name_include
container_names = []
## Set the source tag for the metrics to the container ID hostname, eg first 12 chars
source_tag = false
## Containers to include and exclude. Collect all if empty. Globs accepted.
container_name_include = []
container_name_exclude = []
## Container states to include and exclude. Globs accepted.
## When empty only containers in the "running" state will be captured.
## example: container_state_include = ["created", "restarting", "running", "removing", "paused", "exited", "dead"]
## example: container_state_exclude = ["created", "restarting", "running", "removing", "paused", "exited", "dead"]
# container_state_include = []
# container_state_exclude = []
## Timeout for docker list, info, and stats commands
timeout = "5s"
## Whether to report for each container per-device blkio (8:0, 8:1...),
## network (eth0, eth1, ...) and cpu (cpu0, cpu1, ...) stats or not.
## Usage of this setting is discouraged since it will be deprecated in favor of 'perdevice_include'.
## Default value is 'true' for backwards compatibility, please set it to 'false' so that 'perdevice_include' setting
## is honored.
perdevice = true
## Specifies for which classes a per-device metric should be issued
## Possible values are 'cpu' (cpu0, cpu1, ...), 'blkio' (8:0, 8:1, ...) and 'network' (eth0, eth1, ...)
## Please note that this setting has no effect if 'perdevice' is set to 'true'
# perdevice_include = ["cpu"]
## Whether to report for each container total blkio and network stats or not.
## Usage of this setting is discouraged since it will be deprecated in favor of 'total_include'.
## Default value is 'false' for backwards compatibility, please set it to 'true' so that 'total_include' setting
## is honored.
total = false
## Specifies for which classes a total metric should be issued. Total is an aggregated of the 'perdevice' values.
## Possible values are 'cpu', 'blkio' and 'network'
## Total 'cpu' is reported directly by Docker daemon, and 'network' and 'blkio' totals are aggregated by this plugin.
## Please note that this setting has no effect if 'total' is set to 'false'
# total_include = ["cpu", "blkio", "network"]
## docker labels to include and exclude as tags. Globs accepted.
## Note that an empty array for both will include all labels as tags
docker_label_include = []
docker_label_exclude = []
## Which environment variables should we use as a tag
tag_env = ["JAVA_HOME", "HEAP_SIZE"]
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
[[inputs.snmp]]
agents = [ "192.168.0.1:161" ] #change this to your Sophos IP
version = 1
community = "sophos-xg"
interval = "60s"
timeout = "10s"
retries = 3
[[inputs.snmp.field]]
name = "deviceName"
oid = "1.3.6.1.4.1.2604.5.1.1.1.0"
[[inputs.snmp.field]]
name = "deviceType"
oid = "1.3.6.1.4.1.2604.5.1.1.2.0"
[[inputs.snmp.field]]
name = "deviceFirewallVersion"
oid = "1.3.6.1.4.1.2604.5.1.1.3.0"
[[inputs.snmp.field]]
name = "deviceAppKey"
oid = "1.3.6.1.4.1.2604.5.1.1.4.0"
[[inputs.snmp.field]]
name = "webCatVersion"
oid = "1.3.6.1.4.1.2604.5.1.1.5.0"
[[inputs.snmp.field]]
name = "ipsVersion"
oid = "1.3.6.1.4.1.2604.5.1.1.6.0"
[[inputs.snmp.field]]
name = "ipsVersion"
oid = "1.3.6.1.4.1.2604.5.1.1.6.0"
[[inputs.snmp.field]]
name = "currentDate"
oid = "1.3.6.1.4.1.2604.5.1.2.1.0"
[[inputs.snmp.field]]
name = "upTime"
oid = "1.3.6.1.4.1.2604.5.1.2.2.0"
[[inputs.snmp.field]]
name = "liveUsers"
oid = "1.3.6.1.4.1.2604.5.1.2.6.0"
[[inputs.snmp.field]]
name = "httpHits"
oid = "1.3.6.1.4.1.2604.5.1.2.7.0"
[[inputs.snmp.field]]
name = "ftpHits"
oid = "1.3.6.1.4.1.2604.5.1.2.8.0"
[[inputs.snmp.field]]
name = "diskCapacity"
oid = "1.3.6.1.4.1.2604.5.1.2.4.1.0"
[[inputs.snmp.field]]
name = "diskPercentage"
oid = "1.3.6.1.4.1.2604.5.1.2.4.2.0"
[[inputs.snmp.field]]
name = "memoryCapacity"
oid = "1.3.6.1.4.1.2604.5.1.2.5.1.0"
[[inputs.snmp.field]]
name = "memoryPercentage"
oid = "1.3.6.1.4.1.2604.5.1.2.5.2.0"
[[inputs.snmp.field]]
name = "swapCapacity"
oid = "1.3.6.1.4.1.2604.5.1.2.5.3.0"
[[inputs.snmp.field]]
name = "swapPercentage"
oid = "1.3.6.1.4.1.2604.5.1.2.5.4.0"
[[inputs.snmp.field]]
name = "pop3Hits"
oid = "1.3.6.1.4.1.2604.5.1.2.9.1.0"
[[inputs.snmp.field]]
name = "imapHits"
oid = "1.3.6.1.4.1.2604.5.1.2.9.2.0"
[[inputs.snmp.field]]
name = "smtpHits"
oid = "1.3.6.1.4.1.2604.5.1.2.9.3.0"
[[inputs.snmp.field]]
name = "pop3service"
oid = "1.3.6.1.4.1.2604.5.1.3.1.0"
[[inputs.snmp.field]]
name = "imap4service"
oid = "1.3.6.1.4.1.2604.5.1.3.2.0"
[[inputs.snmp.field]]
name = "smtpService"
oid = "1.3.6.1.4.1.2604.5.1.3.3.0"
[[inputs.snmp.field]]
name = "ftpService"
oid = "1.3.6.1.4.1.2604.5.1.3.4.0"
[[inputs.snmp.field]]
name = "ftpService"
oid = "1.3.6.1.4.1.2604.5.1.3.4.0"
[[inputs.snmp.field]]
name = "httpService"
oid = "1.3.6.1.4.1.2604.5.1.3.5.0"
[[inputs.snmp.field]]
name = "avService"
oid = "1.3.6.1.4.1.2604.5.1.3.6.0"
[[inputs.snmp.field]]
name = "asService"
oid = "1.3.6.1.4.1.2604.5.1.3.7.0"
[[inputs.snmp.field]]
name = "dnsService"
oid = "1.3.6.1.4.1.2604.5.1.3.8.0"
[[inputs.snmp.field]]
name = "shaService"
oid = "1.3.6.1.4.1.2604.5.1.3.9.0"
[[inputs.snmp.field]]
name = "ipsService"
oid = "1.3.6.1.4.1.2604.5.1.3.10.0"
[[inputs.snmp.field]]
name = "apacheService"
oid = "1.3.6.1.4.1.2604.5.1.3.11.0"
[[inputs.snmp.field]]
name = "ntpService"
oid = "1.3.6.1.4.1.2604.5.1.3.12.0"
[[inputs.snmp.field]]
name = "tomcatService"
oid = "1.3.6.1.4.1.2604.5.1.3.13.0"
[[inputs.snmp.field]]
name = "sslVpnService"
oid = "1.3.6.1.4.1.2604.5.1.3.14.0"
[[inputs.snmp.field]]
name = "ipSecService"
oid = "1.3.6.1.4.1.2604.5.1.3.15.0"
[[inputs.snmp.field]]
name = "databaseService"
oid = "1.3.6.1.4.1.2604.5.1.3.16.0"
[[inputs.snmp.field]]
name = "networkService"
oid = "1.3.6.1.4.1.2604.5.1.3.17.0"
[[inputs.snmp.field]]
name = "garnerService"
oid = "1.3.6.1.4.1.2604.5.1.3.18.0"
[[inputs.snmp.field]]
name = "droutingService"
oid = "1.3.6.1.4.1.2604.5.1.3.19.0"
[[inputs.snmp.field]]
name = "sshdService"
oid = "1.3.6.1.4.1.2604.5.1.3.20.0"
[[inputs.snmp.field]]
name = "dgdService"
oid = "1.3.6.1.4.1.2604.5.1.3.21.0"
[[inputs.snmp.field]]
name = "haStatus"
oid = "1.3.6.1.4.1.2604.5.1.4.1.0"
[[inputs.snmp.field]]
name = "currentAppKey"
oid = "1.3.6.1.4.1.2604.5.1.4.2.0"
[[inputs.snmp.field]]
name = "peerAppKey"
oid = "1.3.6.1.4.1.2604.5.1.4.3.0"
[[inputs.snmp.field]]
name = "currentHAstate"
oid = "1.3.6.1.4.1.2604.5.1.4.4.0"
[[inputs.snmp.field]]
name = "peerHAstate"
oid = "1.3.6.1.4.1.2604.5.1.4.5.0"
[[inputs.snmp.field]]
name = "haConfigMode"
oid = "1.3.6.1.4.1.2604.5.1.4.6.0"
[[inputs.snmp.field]]
name = "loadBalancing"
oid = "1.3.6.1.4.1.2604.5.1.4.7.0"
[[inputs.snmp.field]]
name = "haPort"
oid = "1.3.6.1.4.1.2604.5.1.4.8.0"
[[inputs.snmp.field]]
name = "haPort"
oid = "1.3.6.1.4.1.2604.5.1.4.8.0"
[[inputs.snmp.field]]
name = "auxAdminPort"
oid = "1.3.6.1.4.1.2604.5.1.4.11.1.0"
[[inputs.snmp.field]]
name = "auxAdminPortipv6"
oid = "1.3.6.1.4.1.2604.5.1.4.11.3.0"
[[inputs.snmp.field]]
name = "baseFwLicenseStatus"
oid = "1.3.6.1.4.1.2604.5.1.5.1.1.0"
[[inputs.snmp.field]]
name = "baseFwLicenseExpiry"
oid = "1.3.6.1.4.1.2604.5.1.5.1.2.0"
[[inputs.snmp.field]]
name = "networkProtectLicenseStatus"
oid = "1.3.6.1.4.1.2604.5.1.5.2.1.0"
[[inputs.snmp.field]]
name = "networkProtectExpiryDate"
oid = "1.3.6.1.4.1.2604.5.1.5.2.2.0"
[[inputs.snmp.field]]
name = "webProtectLicenseStatus"
oid = "1.3.6.1.4.1.2604.5.1.5.3.1.0"
[[inputs.snmp.field]]
name = "webProtectExpiryDate"
oid = "1.3.6.1.4.1.2604.5.1.5.3.2.0"
[[inputs.snmp.field]]
name = "mailProtectLicenseStatus"
oid = "1.3.6.1.4.1.2604.5.1.5.4.1.0"
[[inputs.snmp.field]]
name = "mailProtectExpiryDate"
oid = "1.3.6.1.4.1.2604.5.1.5.4.2.0"
[[inputs.snmp.field]]
name = "webServerLicenseStatus"
oid = "1.3.6.1.4.1.2604.5.1.5.5.1.0"
[[inputs.snmp.field]]
name = "webServerExpiryDate"
oid = "1.3.6.1.4.1.2604.5.1.5.5.2.0"
[[inputs.snmp.field]]
name = "sandstormLicenseStatus"
oid = "1.3.6.1.4.1.2604.5.1.5.6.1.0"
[[inputs.snmp.field]]
name = "sandstormExpiryDate"
oid = "1.3.6.1.4.1.2604.5.1.5.6.2.0"
[[inputs.snmp.field]]
name = "enhancedSupportStatus"
oid = "1.3.6.1.4.1.2604.5.1.5.7.1.0"
[[inputs.snmp.field]]
name = "enhancedSupportExpiryDate"
oid = "1.3.6.1.4.1.2604.5.1.5.7.2.0"
[[inputs.snmp.field]]
name = "enhancedPlusStatus"
oid = "1.3.6.1.4.1.2604.5.1.5.8.1.0"
[[inputs.snmp.field]]
name = "enhancedPlustExpiryDate"
oid = "1.3.6.1.4.1.2604.5.1.5.8.2.0"
[[inputs.snmp.field]]
name = "ipSecVPNconnID"
oid = "1.3.6.1.4.1.2604.5.1.6.1.1.1.1.1"
[[inputs.snmp.field]]
name = "ipSecVPNconnName"
oid = "1.3.6.1.4.1.2604.5.1.6.1.1.1.1.2"
[[inputs.snmp.field]]
name = "ipSecVPNconnDescription"
oid = "1.3.6.1.4.1.2604.5.1.6.1.1.1.1.3"
[[inputs.snmp.field]]
name = "ipSecVPNpolicyName"
oid = "1.3.6.1.4.1.2604.5.1.6.1.1.1.1.4"
[[inputs.snmp.field]]
name = "ipSecVPNpolicyMode"
oid = "1.3.6.1.4.1.2604.5.1.6.1.1.1.1.5"
[[inputs.snmp.field]]
name = "ipSecVPNconnMode"
oid = "1.3.6.1.4.1.2604.5.1.6.1.1.1.1.6"
[[inputs.snmp.field]]
name = "ipSecVPNlocalGWport"
oid = "1.3.6.1.4.1.2604.5.1.6.1.1.1.1.7"
[[inputs.snmp.field]]
name = "ipSecVPNactiveTunnel"
oid = "1.3.6.1.4.1.2604.5.1.6.1.1.1.1.8"
[[inputs.snmp.field]]
name = "ipSecVPNpolicyID"
oid = "1.3.6.1.4.1.2604.5.1.6.1.2.1.1.1"
[[inputs.snmp.field]]
name = "ipSecVPNpolicyName"
oid = "1.3.6.1.4.1.2604.5.1.6.1.2.1.1.2"
[[inputs.snmp.field]]
name = "ipSecVPNpolicyKeyLife"
oid = "1.3.6.1.4.1.2604.5.1.6.1.2.1.1.3"
[[inputs.snmp.field]]
name = "ipSecVPNpolicyKeyMargin"
oid = "1.3.6.1.4.1.2604.5.1.6.1.2.1.1.4"
[[inputs.snmp.field]]
name = "ipSecVPNpolicyEncAlg1"
oid = "1.3.6.1.4.1.2604.5.1.6.1.2.1.1.5"
[[inputs.snmp.field]]
name = "ipSecVPNpolicyAuthAlg1"
oid = "1.3.6.1.4.1.2604.5.1.6.1.2.1.1.6"
[[inputs.snmp.field]]
name = "ipSecVPNpolicyEncAlg2"
oid = "1.3.6.1.4.1.2604.5.1.6.1.2.1.1.7"
[[inputs.snmp.field]]
name = "ipSecVPNpolicyAuthAlg2"
oid = "1.3.6.1.4.1.2604.5.1.6.1.2.1.1.8"
[[inputs.snmp.field]]
name = "ipSecVPNpolicyEncAlg3"
oid = "1.3.6.1.4.1.2604.5.1.6.1.2.1.1.9"
[[inputs.snmp.field]]
name = "ipSecVPNpolicyAuthAlg3"
oid = "1.3.6.1.4.1.2604.5.1.6.1.2.1.1.10"
[[inputs.snmp.field]]
name = "ipSecVPNpolicyKeyExchType"
oid = "1.3.6.1.4.1.2604.5.1.6.1.2.1.1.11"
[[inputs.snmp.field]]
name = "deviceAPname"
oid = "1.3.6.1.4.1.2604.5.1.7.1.1.1"
[[inputs.snmp.field]]
name = "deviceAPmodel"
oid = "1.3.6.1.4.1.2604.5.1.7.1.1.2"
[[inputs.snmp.field]]
name = "deviceAPmacAddr"
oid = "1.3.6.1.4.1.2604.5.1.7.1.1.3"
[[inputs.snmp.field]]
name = "deviceAPstatus"
oid = "1.3.6.1.4.1.2604.5.1.7.1.1.4"
[[inputs.snmp.field]]
name = "deviceAPclientCount"
oid = "1.3.6.1.4.1.2604.5.1.7.1.1.5"
[[inputs.snmp.field]]
name = "apClientIndex"
oid = "1.3.6.1.4.1.2604.5.1.7.2.1.1"
[[inputs.snmp.field]]
name = "apClientName"
oid = "1.3.6.1.4.1.2604.5.1.7.2.1.2"
[[inputs.snmp.field]]
name = "apClientIPaddrType"
oid = "1.3.6.1.4.1.2604.5.1.7.2.1.3"
[[inputs.snmp.field]]
name = "apClientIPaddr"
oid = "1.3.6.1.4.1.2604.5.1.7.2.1.4"
[[inputs.snmp.field]]
name = "apClientMacAddr"
oid = "1.3.6.1.4.1.2604.5.1.7.2.1.5"
[[inputs.snmp.field]]
name = "apClientChannel"
oid = "1.3.6.1.4.1.2604.5.1.7.2.1.6"
[[inputs.snmp.field]]
name = "apClientSSID"
oid = "1.3.6.1.4.1.2604.5.1.7.2.1.7"
[[inputs.snmp.field]]
name = "trapMessage"
oid = "1.3.6.1.4.1.2604.5.1.8.1.2.0"
[[outputs.influxdb_v2]]
## The URLs of the InfluxDB cluster nodes.
##
## Multiple URLs can be specified for a single cluster, only ONE of the
## urls will be written to each interval.
## ex: urls = ["https://us-west-2-1.aws.cloud2.influxdata.com"]
urls = ["http://influxdb:8086"]
## API token for authentication.
token = "ESdPkl_2F4IOipn7v428L5uHgvLLM6Zrv5vqQl9QObA5ROmPyvVCSMTKV8cB5ERRrSmFItr7V_VFb6vJYpizlA==" # change this to your API Token
## Organization is the name of the organization you wish to write to; must exist.
organization = "home"
## Destination bucket to write into.
bucket = "homelab"

View file

@ -0,0 +1,109 @@
version: "3"
services:
loki:
container_name: loki
image: grafana/loki:main
networks:
- grafana-monitoring
volumes:
- /home/ubuntu/docker/grafana-monitoring/loki:/etc/loki
ports:
- "3100:3100"
restart: unless-stopped
command: -config.file=/etc/loki/loki-config.yml
promtail:
container_name: promtail
image: grafana/promtail:main
networks:
- grafana-monitoring
volumes:
- /var/log:/var/log
- /home/ubuntu/docker/grafana-monitoring/promtail:/etc/promtail
ports:
- "1514:1514" # this is only needed if you are going to send syslogs
restart: unless-stopped
command: -config.file=/etc/promtail/promtail-config.yml
grafana:
container_name: grafana
image: grafana/grafana-oss:main-ubuntu
user: "0"
networks:
- grafana-monitoring
- proxy
volumes:
- /home/ubuntu/docker/grafana-monitoring/grafana:/var/lib/grafana
restart: unless-stopped
labels:
- "traefik.enable=true"
- "traefik.http.routers.grafana.entrypoints=http"
- "traefik.http.routers.grafana.rule=Host(`grafana.jimsgarage.co.uk`)"
- "traefik.http.routers.grafana.middlewares=default-whitelist@file"
- "traefik.http.middlewares.grafana-https-redirect.redirectscheme.scheme=https"
- "traefik.http.routers.grafana.middlewares=grafana-https-redirect"
- "traefik.http.routers.grafana-secure.entrypoints=https"
- "traefik.http.routers.grafana-secure.rule=Host(`grafana.jimsgarage.co.uk`)"
- "traefik.http.routers.grafana-secure.tls=true"
- "traefik.http.routers.grafana-secure.service=grafana"
- "traefik.http.services.grafana.loadbalancer.server.port=3000"
- "traefik.docker.network=proxy"
influxdb:
container_name: influxdb
image: influxdb:latest
restart: unless-stopped
ports:
- 8086:8086
- 8089:8089/udp
networks:
- grafana-monitoring
volumes:
- /home/ubuntu/docker/grafana-monitoring/influxdb:/var/lib/influxdb2
telegraf:
container_name: telegraf
restart: unless-stopped
user: telegraf:995 #you need to find the GID of Docker if not added to Sudo group
networks:
- grafana-monitoring
volumes:
- '/home/ubuntu/docker/grafana-monitoring/telegraf/telegraf.conf:/etc/telegraf/telegraf.conf:ro'
- '/:/hostfs:ro' # to monitor docker-vm
- '/var/run/docker.sock:/var/run/docker.sock' # to monitor docker containers
- '/home/ubuntu/docker/grafana-monitoring/telegraf/mibs:/usr/share/snmp/mibs' # mibs files [e.g., sophos]
environment:
- HOST_ETC=/hostfs/etc
- HOST_PROC=/hostfs/proc
- HOST_SYS=/hostfs/sys
- HOST_VAR=/hostfs/var
- HOST_RUN=/hostfs/run
- HOST_MOUNT_PREFIX=/hostfs
image: telegraf:latest
graphite:
image: graphiteapp/graphite-statsd
container_name: graphite
restart: unless-stopped
ports:
- 8050:80 # nginx
- 2003-2004 # carbon receiver - plaintext & pickle
- 2023-2024 # carbon aggregator - plaintext & pickle
- 8125:8125/udp # statsd
- 8126:8126 # statsd admin
volumes:
- /home/ubuntu/docker/grafana-monitoring/graphite/configs:/opt/graphite/conf
- /home/ubuntu/docker/grafana-monitoring/graphite/data:/opt/graphite/storage
- /home/ubuntu/docker/grafana-monitoring/graphite/statsd_config:/opt/statsd/config
networks:
- grafana-monitoring
prometheus:
image: prom/prometheus
container_name: prometheus
restart: unless-stopped
ports:
- 9090:9090
volumes:
- /home/ubuntu/docker/grafana-monitoring/prometheus/config/prometheus.yml:/etc/prometheus/prometheus.yml
networks:
- grafana-monitoring
networks:
grafana-monitoring:
proxy:
external: true

View file

@ -0,0 +1,11 @@
# my global config
global:
scrape_interval: 15s
evaluation_interval: 30s
body_size_limit: 15MB
sample_limit: 1500
target_limit: 30
label_limit: 30
label_name_length_limit: 200
label_value_length_limit: 200
# scrape_timeout is set to the global default (10s).

View file

@ -0,0 +1,72 @@
# Read metrics about docker containers
[[inputs.docker]]
## Docker Endpoint
## To use TCP, set endpoint = "tcp://[ip]:[port]"
## To use environment variables (ie, docker-machine), set endpoint = "ENV"
endpoint = "unix:///var/run/docker.sock"
## Set to true to collect Swarm metrics(desired_replicas, running_replicas)
## Note: configure this in one of the manager nodes in a Swarm cluster.
## configuring in multiple Swarm managers results in duplication of metrics.
gather_services = false
## Only collect metrics for these containers. Values will be appended to
## container_name_include.
## Deprecated (1.4.0), use container_name_include
container_names = []
## Set the source tag for the metrics to the container ID hostname, eg first 12 chars
source_tag = false
## Containers to include and exclude. Collect all if empty. Globs accepted.
container_name_include = []
container_name_exclude = []
## Container states to include and exclude. Globs accepted.
## When empty only containers in the "running" state will be captured.
## example: container_state_include = ["created", "restarting", "running", "removing", "paused", "exited", "dead"]
## example: container_state_exclude = ["created", "restarting", "running", "removing", "paused", "exited", "dead"]
# container_state_include = []
# container_state_exclude = []
## Timeout for docker list, info, and stats commands
timeout = "5s"
## Whether to report for each container per-device blkio (8:0, 8:1...),
## network (eth0, eth1, ...) and cpu (cpu0, cpu1, ...) stats or not.
## Usage of this setting is discouraged since it will be deprecated in favor of 'perdevice_include'.
## Default value is 'true' for backwards compatibility, please set it to 'false' so that 'perdevice_include' setting
## is honored.
perdevice = true
## Specifies for which classes a per-device metric should be issued
## Possible values are 'cpu' (cpu0, cpu1, ...), 'blkio' (8:0, 8:1, ...) and 'network' (eth0, eth1, ...)
## Please note that this setting has no effect if 'perdevice' is set to 'true'
# perdevice_include = ["cpu"]
## Whether to report for each container total blkio and network stats or not.
## Usage of this setting is discouraged since it will be deprecated in favor of 'total_include'.
## Default value is 'false' for backwards compatibility, please set it to 'true' so that 'total_include' setting
## is honored.
total = false
## Specifies for which classes a total metric should be issued. Total is an aggregated of the 'perdevice' values.
## Possible values are 'cpu', 'blkio' and 'network'
## Total 'cpu' is reported directly by Docker daemon, and 'network' and 'blkio' totals are aggregated by this plugin.
## Please note that this setting has no effect if 'total' is set to 'false'
# total_include = ["cpu", "blkio", "network"]
## docker labels to include and exclude as tags. Globs accepted.
## Note that an empty array for both will include all labels as tags
docker_label_include = []
docker_label_exclude = []
## Which environment variables should we use as a tag
tag_env = ["JAVA_HOME", "HEAP_SIZE"]
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false

View file

@ -0,0 +1,18 @@
services:
tailscale:
container_name: tailscale
image: tailscale/tailscale:stable
hostname: headtailscale
volumes:
- /home/ubuntu/docker/tailscale/data:/var/lib/tailscale
- /dev/net/tun:/dev/net/tun
network_mode: "host"
cap_add:
- NET_ADMIN
- NET_RAW
environment:
- TS_STATE_DIR=/var/lib/tailscale
- TS_EXTRA_ARGS=--login-server=https://headscale.jimsgarage.co.uk --advertise-exit-node --advertise-routes=192.168.0.0/16 --accept-dns=true
- TS_NO_LOGS_NO_SUPPORT=true
# - TS_AUTHKEY=e6f46b99f2ddsfsf3easdf125590e415db007 # generate this key inside your headscale server container
restart: unless-stopped

326
Headscale/config.yaml Normal file
View file

@ -0,0 +1,326 @@
---
# headscale will look for a configuration file named `config.yaml` (or `config.json`) in the following order:
#
# - `/etc/headscale`
# - `~/.headscale`
# - current working directory
# The url clients will connect to.
# Typically this will be a domain like:
#
# https://myheadscale.example.com:443
#
server_url: https://yourdomain.com
# Address to listen to / bind to on the server
#
# For production:
# listen_addr: 0.0.0.0:8080
listen_addr: 127.0.0.1:8080
# Address to listen to /metrics, you may want
# to keep this endpoint private to your internal
# network
#
metrics_listen_addr: 127.0.0.1:9090
# Address to listen for gRPC.
# gRPC is used for controlling a headscale server
# remotely with the CLI
# Note: Remote access _only_ works if you have
# valid certificates.
#
# For production:
# grpc_listen_addr: 0.0.0.0:50443
grpc_listen_addr: 127.0.0.1:50443
# Allow the gRPC admin interface to run in INSECURE
# mode. This is not recommended as the traffic will
# be unencrypted. Only enable if you know what you
# are doing.
grpc_allow_insecure: false
# Private key used to encrypt the traffic between headscale
# and Tailscale clients.
# The private key file will be autogenerated if it's missing.
#
private_key_path: /var/lib/headscale/private.key
# The Noise section includes specific configuration for the
# TS2021 Noise protocol
noise:
# The Noise private key is used to encrypt the
# traffic between headscale and Tailscale clients when
# using the new Noise-based protocol. It must be different
# from the legacy private key.
private_key_path: /var/lib/headscale/noise_private.key
# List of IP prefixes to allocate tailaddresses from.
# Each prefix consists of either an IPv4 or IPv6 address,
# and the associated prefix length, delimited by a slash.
# It must be within IP ranges supported by the Tailscale
# client - i.e., subnets of 100.64.0.0/10 and fd7a:115c:a1e0::/48.
# See below:
# IPv6: https://github.com/tailscale/tailscale/blob/22ebb25e833264f58d7c3f534a8b166894a89536/net/tsaddr/tsaddr.go#LL81C52-L81C71
# IPv4: https://github.com/tailscale/tailscale/blob/22ebb25e833264f58d7c3f534a8b166894a89536/net/tsaddr/tsaddr.go#L33
# Any other range is NOT supported, and it will cause unexpected issues.
ip_prefixes:
- fd7a:115c:a1e0::/48
- 100.64.0.0/10
# DERP is a relay system that Tailscale uses when a direct
# connection cannot be established.
# https://tailscale.com/blog/how-tailscale-works/#encrypted-tcp-relays-derp
#
# headscale needs a list of DERP servers that can be presented
# to the clients.
derp:
server:
# If enabled, runs the embedded DERP server and merges it into the rest of the DERP config
# The Headscale server_url defined above MUST be using https, DERP requires TLS to be in place
enabled: false
# Region ID to use for the embedded DERP server.
# The local DERP prevails if the region ID collides with other region ID coming from
# the regular DERP config.
region_id: 999
# Region code and name are displayed in the Tailscale UI to identify a DERP region
region_code: "headscale"
region_name: "Headscale Embedded DERP"
# Listens over UDP at the configured address for STUN connections - to help with NAT traversal.
# When the embedded DERP server is enabled stun_listen_addr MUST be defined.
#
# For more details on how this works, check this great article: https://tailscale.com/blog/how-tailscale-works/
stun_listen_addr: "0.0.0.0:3478"
# List of externally available DERP maps encoded in JSON
urls:
- https://controlplane.tailscale.com/derpmap/default
# Locally available DERP map files encoded in YAML
#
# This option is mostly interesting for people hosting
# their own DERP servers:
# https://tailscale.com/kb/1118/custom-derp-servers/
#
# paths:
# - /etc/headscale/derp-example.yaml
paths: []
# If enabled, a worker will be set up to periodically
# refresh the given sources and update the derpmap
# will be set up.
auto_update_enabled: true
# How often should we check for DERP updates?
update_frequency: 24h
# Disables the automatic check for headscale updates on startup
disable_check_updates: false
# Time before an inactive ephemeral node is deleted?
ephemeral_node_inactivity_timeout: 30m
# Period to check for node updates within the tailnet. A value too low will severely affect
# CPU consumption of Headscale. A value too high (over 60s) will cause problems
# for the nodes, as they won't get updates or keep alive messages frequently enough.
# In case of doubts, do not touch the default 10s.
node_update_check_interval: 10s
# SQLite config
db_type: sqlite3
# For production:
db_path: /var/lib/headscale/db.sqlite
# # Postgres config
# If using a Unix socket to connect to Postgres, set the socket path in the 'host' field and leave 'port' blank.
# db_type: postgres
# db_host: localhost
# db_port: 5432
# db_name: headscale
# db_user: foo
# db_pass: bar
# If other 'sslmode' is required instead of 'require(true)' and 'disabled(false)', set the 'sslmode' you need
# in the 'db_ssl' field. Refers to https://www.postgresql.org/docs/current/libpq-ssl.html Table 34.1.
# db_ssl: false
### TLS configuration
#
## Let's encrypt / ACME
#
# headscale supports automatically requesting and setting up
# TLS for a domain with Let's Encrypt.
#
# URL to ACME directory
acme_url: https://acme-v02.api.letsencrypt.org/directory
# Email to register with ACME provider
acme_email: ""
# Domain name to request a TLS certificate for:
tls_letsencrypt_hostname: ""
# Path to store certificates and metadata needed by
# letsencrypt
# For production:
tls_letsencrypt_cache_dir: /var/lib/headscale/cache
# Type of ACME challenge to use, currently supported types:
# HTTP-01 or TLS-ALPN-01
# See [docs/tls.md](docs/tls.md) for more information
tls_letsencrypt_challenge_type: HTTP-01
# When HTTP-01 challenge is chosen, letsencrypt must set up a
# verification endpoint, and it will be listening on:
# :http = port 80
tls_letsencrypt_listen: ":http"
## Use already defined certificates:
tls_cert_path: ""
tls_key_path: ""
log:
# Output formatting for logs: text or json
format: text
level: info
# Path to a file containg ACL policies.
# ACLs can be defined as YAML or HUJSON.
# https://tailscale.com/kb/1018/acls/
acl_policy_path: ""
## DNS
#
# headscale supports Tailscale's DNS configuration and MagicDNS.
# Please have a look to their KB to better understand the concepts:
#
# - https://tailscale.com/kb/1054/dns/
# - https://tailscale.com/kb/1081/magicdns/
# - https://tailscale.com/blog/2021-09-private-dns-with-magicdns/
#
dns_config:
# Whether to prefer using Headscale provided DNS or use local.
override_local_dns: true
# List of DNS servers to expose to clients.
nameservers:
- 1.1.1.1
# NextDNS (see https://tailscale.com/kb/1218/nextdns/).
# "abc123" is example NextDNS ID, replace with yours.
#
# With metadata sharing:
# nameservers:
# - https://dns.nextdns.io/abc123
#
# Without metadata sharing:
# nameservers:
# - 2a07:a8c0::ab:c123
# - 2a07:a8c1::ab:c123
# Split DNS (see https://tailscale.com/kb/1054/dns/),
# list of search domains and the DNS to query for each one.
#
# restricted_nameservers:
# foo.bar.com:
# - 1.1.1.1
# darp.headscale.net:
# - 1.1.1.1
# - 8.8.8.8
# Search domains to inject.
domains: []
# Extra DNS records
# so far only A-records are supported (on the tailscale side)
# See https://github.com/juanfont/headscale/blob/main/docs/dns-records.md#Limitations
# extra_records:
# - name: "grafana.myvpn.example.com"
# type: "A"
# value: "100.64.0.3"
#
# # you can also put it in one line
# - { name: "prometheus.myvpn.example.com", type: "A", value: "100.64.0.3" }
# Whether to use [MagicDNS](https://tailscale.com/kb/1081/magicdns/).
# Only works if there is at least a nameserver defined.
magic_dns: true
# Defines the base domain to create the hostnames for MagicDNS.
# `base_domain` must be a FQDNs, without the trailing dot.
# The FQDN of the hosts will be
# `hostname.user.base_domain` (e.g., _myhost.myuser.example.com_).
base_domain: example.com
# Unix socket used for the CLI to connect without authentication
# Note: for production you will want to set this to something like:
unix_socket: /var/run/headscale/headscale.sock
unix_socket_permission: "0770"
#
# headscale supports experimental OpenID connect support,
# it is still being tested and might have some bugs, please
# help us test it.
# OpenID Connect
# oidc:
# only_start_if_oidc_is_available: true
# issuer: "https://your-oidc.issuer.com/path"
# client_id: "your-oidc-client-id"
# client_secret: "your-oidc-client-secret"
# # Alternatively, set `client_secret_path` to read the secret from the file.
# # It resolves environment variables, making integration to systemd's
# # `LoadCredential` straightforward:
# client_secret_path: "${CREDENTIALS_DIRECTORY}/oidc_client_secret"
# # client_secret and client_secret_path are mutually exclusive.
#
# # The amount of time from a node is authenticated with OpenID until it
# # expires and needs to reauthenticate.
# # Setting the value to "0" will mean no expiry.
# expiry: 180d
#
# # Use the expiry from the token received from OpenID when the user logged
# # in, this will typically lead to frequent need to reauthenticate and should
# # only been enabled if you know what you are doing.
# # Note: enabling this will cause `oidc.expiry` to be ignored.
# use_expiry_from_token: false
#
# # Customize the scopes used in the OIDC flow, defaults to "openid", "profile" and "email" and add custom query
# # parameters to the Authorize Endpoint request. Scopes default to "openid", "profile" and "email".
#
# scope: ["openid", "profile", "email", "custom"]
# extra_params:
# domain_hint: example.com
#
# # List allowed principal domains and/or users. If an authenticated user's domain is not in this list, the
# # authentication request will be rejected.
#
# allowed_domains:
# - example.com
# # Note: Groups from keycloak have a leading '/'
# allowed_groups:
# - /headscale
# allowed_users:
# - alice@example.com
#
# # If `strip_email_domain` is set to `true`, the domain part of the username email address will be removed.
# # This will transform `first-name.last-name@example.com` to the user `first-name.last-name`
# # If `strip_email_domain` is set to `false` the domain part will NOT be removed resulting to the following
# user: `first-name.last-name.example.com`
#
# strip_email_domain: true
# Logtail configuration
# Logtail is Tailscales logging and auditing infrastructure, it allows the control panel
# to instruct tailscale nodes to log their activity to a remote server.
logtail:
# Enable logtail for this headscales clients.
# As there is currently no support for overriding the log server in headscale, this is
# disabled by default. Enabling this will make your clients send logs to Tailscale Inc.
enabled: false
# Enabling this option makes devices prefer a random port for WireGuard traffic over the
# default static port 41641. This option is intended as a workaround for some buggy
# firewall devices. See https://tailscale.com/kb/1181/firewalls/ for more information.
randomize_client_port: false

View file

@ -0,0 +1,21 @@
version: '3.9'
services:
headscale:
container_name: headscale
volumes:
- /home/ubuntu/docker/headscale/config:/etc/headscale/
- /home/ubuntu/docker/headscale/keys:/var/lib/headscale/
ports:
- 8080:8080
- 9090:9090
image: headscale/headscale:latest
command: headscale serve
restart: unless-stopped
headscale-ui:
image: ghcr.io/gurucomputing/headscale-ui:latest
pull_policy: always
container_name: headscale-ui
restart: unless-stopped
ports:
- 9999:80

View file

@ -0,0 +1,47 @@
version: '3.9'
services:
headscale:
image: headscale/headscale:latest
pull_policy: always
container_name: headscale
restart: unless-stopped
command: headscale serve
volumes:
- /home/ubuntu/docker/headscale/config:/etc/headscale
- /home/ubuntu/docker/headscale/data:/var/lib/headscale
labels:
- traefik.enable=true
- traefik.http.routers.headscale-rtr.rule=PathPrefix(`/`) # you might want to add: && Host(`your.domain.name`)"
- traefik.http.services.headscale-svc.loadbalancer.server.port=8080
headscale-ui:
image: ghcr.io/gurucomputing/headscale-ui:latest
pull_policy: always
container_name: headscale-ui
restart: unless-stopped
labels:
- traefik.enable=true
- traefik.http.routers.headscale-ui-rtr.rule=PathPrefix(`/web`) # you might want to add: && Host(`your.domain.name`)"
- traefik.http.services.headscale-ui-svc.loadbalancer.server.port=80
# If you are following my guides you will already have the below configured
#
# traefik:
# image: traefik:latest
# pull_policy: always
# restart: unless-stopped
# container_name: traefik
# command:
# - --api.insecure=true # remove in production
# - --providers.docker
# - --entrypoints.web.address=:80
# - --entrypoints.websecure.address=:443
# - --global.sendAnonymousUsage=false
# ports:
# - 80:80
# - 443:443
# - 8080:8080 # web UI (enabled with api.insecure)
# volumes:
# - /var/run/docker.sock:/var/run/docker.sock:ro
# - ./traefik/certificates:/certificates

View file

@ -0,0 +1,51 @@
# Homelab Buyer's Guide Q3-2023
* Consumer server build
* High End
* Intel
* CPU: Intel Core i7-13700K (with iGPU) : [https://amzn.to/3E6DbUT](https://amzn.to/44wT8yz)
* Mobo: Z690D4U (if you can find one) or MSI MAG Z790 TOMAHAWK WIFI : [https://amzn.to/3OICGoL](https://amzn.to/44tser9)
* RAM: Corsair Vengeance DDR5 32 GB (or more) : [https://amzn.to/3E3Gc8o](https://amzn.to/47S3Br2)
* PSU: 
* AMD
* CPU: AMD Ryzen 9 7900 : [https://amzn.to/45CDLoZ](https://amzn.to/47TqV7N)
* Mobo: ASRock B650D4U-2T/BCM (or B650D4U-2L2T/BCM for 10G)  or ASRock X670E Steel Legend ATX : [https://amzn.to/3KPrRA8](https://amzn.to/3YTrMkI)
* RAM: Corsair Vengeance DDR5 32 GB (or more) : [https://amzn.to/3E3Gc8o](https://amzn.to/47PgzWD)
* Budget
* Intel
* CPU: Intel Core i5-12400 : [https://amzn.to/3KKPhqA](https://amzn.to/3EjiG7m)
* Mobo: MSI MAG B660M MORTAR : [https://amzn.to/3P4HpSb](https://amzn.to/3sy1QPG)
* RAM: Corsair Vengeance LPX 16 GB : [https://amzn.to/3E3Gc8o](https://amzn.to/47PgzWD)
* AMD
* CPU: amd ryzen 5 5600 : [https://amzn.to/3QLToq0](https://amzn.to/3Ej9EYi)
* Mobo: MSI MAG B550 TOMAHAWK : [https://amzn.to/3OKh0bV](https://amzn.to/3OW3l1J)
* RAM: Corsair Vengeance LPX 16 GB : [https://amzn.to/3E3Gc8o](https://amzn.to/3Z2vIzN)
* PSU:
* Corsair HX: [https://amzn.to/3P4YfRN](https://amzn.to/3LoJveD)
* GPU:
* Budget: Intel Arc a380: [https://amzn.to/47Fa60k](https://amzn.to/3OU9hrS)
* All-in-One:
* Budget: Lenovo ThinkCentre : [https://amzn.to/3KLPdH1](https://amzn.to/3swN0c8)
* Premium: Intel NUC - pick generation to suit budget : [https://amzn.to/3YR0jQL](https://amzn.to/3KXW6VG)
* Enterprise server
* Server Form Factor: Dell r730
* Workstation ATX: Epyc 7302p with Gigabyte or SuperMicro Mobo (Check eBay)
* Switch
* Entry-level: Netgear GS108E (5/8/16 ports) : [https://amzn.to/3qCQBVz](https://amzn.to/3L25APA)
* Mid-level: Mikrotik CRS326-24G-2S+RM (or IN - non rack mount) (2x 10Gb SFP+) : [https://amzn.to/3P3BY76](https://amzn.to/3Piz0fd)
* Pro-sumer: Mikrotik CRS328-24P-4S+RM (POE, 1Gb, 10Gb SFP+) (£500) vs Unifi Professional 48 PoE (£1000) : [https://amzn.to/44lVhwC](https://amzn.to/3OYo3xI)
* NIC
* 1G: Intel i210 or i350t4v2
* 10G: Mellanox Connect-X3 10Gb SFP+, Intel x520DA2 or t2
* HBA:
* LSI
* SSD/HDD
* NAS: Toshiba MG Series (16TB), Seagate Ironwolf 16TB : [https://amzn.to/3ONcOs9](https://amzn.to/3qRXTVu)
* NVME: Firecuda 530 gen 4, or Samsung 970 EVO : [https://amzn.to/3E5rpKn](https://amzn.to/3KWnoMk)
* Access Point: Unifi U6 (choose model for situation) : [https://amzn.to/3E4x9UD](https://amzn.to/3qQjn5a)
* Rack: TrippLite
* Patch Panel: TRENDnet 24-Port Cat6A Shielded 1U Patch Panel : [https://amzn.to/3QO0fzp](https://amzn.to/3PcU4U9)
* UPS: APC SmartUPS : [https://amzn.to/3QRuaqf](https://amzn.to/3sysW9v)
* Cooling:
* Rack: AC Infinity CLOUDPLATE : [https://amzn.to/3QINupG](https://amzn.to/3QZq7bF)
* Fans: Nocuta : [https://amzn.to/3qxMcTT](https://amzn.to/3YU7t6M)https://amzn.to/3YU7t6M

View file

@ -0,0 +1,36 @@
version: "3.3"
services:
homepage:
image: ghcr.io/benphelps/homepage:latest
container_name: homepage
# uncomment if you do not want to run as root
#user: 1000:1000
# uncomment if you are not using a reverse proxy
#ports:
# - 3000:3000
volumes:
- /home/ubuntu/docker/homepage/config:/app/config # Make sure your local config directory exists
- /var/run/docker.sock:/var/run/docker.sock # (optional) For docker integrations
networks:
proxy:
labels:
- "traefik.enable=true"
- "traefik.http.routers.homepage.entrypoints=http"
- "traefik.http.routers.homepage.rule=Host(`homepage.jimsgarage.co.uk`)"
- "traefik.http.routers.homepage.middlewares=default-whitelist@file"
- "traefik.http.middlewares.homepage-https-redirect.redirectscheme.scheme=https"
- "traefik.http.routers.homepage.middlewares=homepage-https-redirect"
- "traefik.http.routers.homepage-secure.entrypoints=https"
- "traefik.http.routers.homepage-secure.rule=Host(`homepage.jimsgarage.co.uk`)"
- "traefik.http.routers.homepage-secure.tls=true"
- "traefik.http.routers.homepage-secure.service=homepage"
- "traefik.http.services.homepage.loadbalancer.server.port=3000"
# - "traefik.http.routers.homepage-secure.middlewares=default-whitelist@file" # uncomment if you want to use a Traefik whitelist to restrict access
# - "traefik.http.routers.homepage-secure.middlewares=authelia@docker" # uncomment if you want to use authelia
# - "traefik.docker.network=proxy"
security_opt:
- no-new-privileges:true
networks:
proxy:
external: true

View file

@ -0,0 +1,41 @@
---
# For configuration options and examples, please see:
# https://gethomepage.dev/en/configs/services
- My First Group:
- My First Service:
href: http://localhost/
description: Homepage is awesome
- Traefik:
icon: traefik.png
href: "http://traefik.jimsgarage.co.uk"
description: Reverse Proxy
server: my-docker # The docker server that was configured
container: traefik # The name of the container you'd like to connect
widget:
type: traefik
url: https://traefik.jimsgarage.co.uk
username: admin
password: gT8ni3iX6QkKreWfAdYKe4xqVsaMRUQ4GG7xn59Q
- PiHole:
icon: pi-hole.png
href: "http://pihole.jimsgarage.co.uk"
description: DNS Ad Blocker
server: my-docker # The docker server that was configured
container: pihole # The name of the container you'd like to connect
widget:
type: pihole
url: http://192.168.8.2
key: 73T8oBs9MFKLVAC3mAs2KQbWSsqA7oe2PN9r9H4TQWg2TXNAdq4ZPzvy8oEv
- My Second Group:
- My Second Service:
href: http://localhost/
description: Homepage is the best
- My Third Group:
- My Third Service:
href: http://localhost/
description: Homepage is 😎

22
Immich/.env Normal file
View file

@ -0,0 +1,22 @@
# You can find documentation for all the supported env variables at https://immich.app/docs/install/environment-variables
# The location where your uploaded files are stored
UPLOAD_LOCATION=/home/ubuntu/docker/immich/upload
# The Immich version to use. You can pin this to a specific version like "v1.71.0"
IMMICH_VERSION=release
# Connection secrets for postgres and typesense. You should change these to random passwords
TYPESENSE_API_KEY=some-random-text
DB_PASSWORD=postgres
# The values below this line do not need to be changed
###################################################################################
DB_HOSTNAME=immich_postgres
DB_USERNAME=postgres
DB_DATABASE_NAME=immich
REDIS_HOSTNAME=immich_redis
IMMICH_SERVER_URL=https://immich.yourdomain.com
IMMICH_WEB_URL=https://immich.yourdomain.com

View file

@ -0,0 +1,95 @@
version: "3.8"
services:
immich-server:
container_name: immich_server
image: ghcr.io/immich-app/immich-server:${IMMICH_VERSION:-release}
command: [ "start.sh", "immich" ]
volumes:
- /home/ubuntu/docker/immich/upload:/usr/src/app/upload
env_file:
- .env
depends_on:
- redis
- database
- typesense
restart: always
immich-microservices:
container_name: immich_microservices
image: ghcr.io/immich-app/immich-server:${IMMICH_VERSION:-release}
extends:
file: hwaccel.yml
service: hwaccel
command: [ "start.sh", "microservices" ]
volumes:
- /home/ubuntu/docker/immich/upload:/usr/src/app/upload
env_file:
- .env
depends_on:
- redis
- database
- typesense
restart: always
immich-machine-learning:
container_name: immich_machine_learning
image: ghcr.io/immich-app/immich-machine-learning:${IMMICH_VERSION:-release}
volumes:
- /home/ubuntu/docker/immich/model-cache:/cache
env_file:
- .env
restart: always
immich-web:
container_name: immich_web
image: ghcr.io/immich-app/immich-web:${IMMICH_VERSION:-release}
env_file:
- .env
restart: always
typesense:
container_name: immich_typesense
image: typesense/typesense:0.24.1@sha256:9bcff2b829f12074426ca044b56160ca9d777a0c488303469143dd9f8259d4dd
environment:
- TYPESENSE_API_KEY=${TYPESENSE_API_KEY}
- TYPESENSE_DATA_DIR=/data
# remove this to get debug messages
- GLOG_minloglevel=1
volumes:
- /home/ubuntu/docker/immich/tsdata:/data
restart: always
redis:
container_name: immich_redis
image: redis:6.2-alpine@sha256:70a7a5b641117670beae0d80658430853896b5ef269ccf00d1827427e3263fa3
restart: always
database:
container_name: immich_postgres
image: postgres:14-alpine@sha256:28407a9961e76f2d285dc6991e8e48893503cc3836a4755bbc2d40bcc272a441
env_file:
- .env
environment:
POSTGRES_PASSWORD: ${DB_PASSWORD}
POSTGRES_USER: ${DB_USERNAME}
POSTGRES_DB: ${DB_DATABASE_NAME}
volumes:
- /home/ubuntu/docker/immich/pgdata:/var/lib/postgresql/data
restart: always
immich-proxy:
container_name: immich_proxy
image: ghcr.io/immich-app/immich-proxy:${IMMICH_VERSION:-release}
environment:
# Make sure these values get passed through from the env file
- IMMICH_SERVER_URL
- IMMICH_WEB_URL
ports:
- 2283:8080
depends_on:
- immich-server
- immich-web
restart: always
## there is a known issue with Traefik: see here https://github.com/immich-app/immich/discussions/437#discussioncomment-3609797

23
Immich/hwaccel.yml Normal file
View file

@ -0,0 +1,23 @@
version: "3.8"
# Hardware acceleration for transcoding - Optional
# This is only needed if you want to use hardware acceleration for transcoding.
# Depending on your hardware, you should uncomment the relevant lines below.
services:
hwaccel:
devices:
- /dev/dri:/dev/dri # If using Intel QuickSync or VAAPI
# volumes:
# - /usr/lib/wsl:/usr/lib/wsl # If using VAAPI in WSL2
# environment:
# - NVIDIA_DRIVER_CAPABILITIES=all # If using NVIDIA GPU
# - LD_LIBRARY_PATH=/usr/lib/wsl/lib # If using VAAPI in WSL2
# - LIBVA_DRIVER_NAME=d3d12 # If using VAAPI in WSL2
# deploy: # Uncomment this section if using NVIDIA GPU
# resources:
# reservations:
# devices:
# - driver: nvidia
# count: 1
# capabilities: [gpu,video]

View file

@ -0,0 +1,46 @@
version: "2"
services:
jellyfin:
image: jellyfin/jellyfin
container_name: jellyfin
user: 1000:1000
#group_add:
# - '109' # This needs to be the group id of running `stat -c '%g' /dev/dri/renderD128` on the docker host
environment:
- TZ=Europe/London
volumes:
- /home/ubuntu/docker/jellyfin/config:/config
- /home/ubuntu/docker/jellyfin/cache:/cache
- /home/ubuntu/YOUR_NAS/Films:/Films:ro
- /home/ubuntu/YOUR_NAS/TVShows:/TVShows:ro
- /home/ubuntu/YOUR_NAS/Audiobooks:/Audiobooks:ro
- /home/ubuntu/YOUR_NAS/Music:/Music:ro
#ports: You will need to uncomment if you aren't running through a proxy
# - 8096:8096
# - 8920:8920 #optional
# - 7359:7359/udp #optional
# - 1900:1900/udp #optional
#devices: uncomment these and amend if you require GPU accelerated transcoding
# - /dev/dri/renderD128:/dev/dri/renderD128
# - /dev/dri/card0:/dev/dri/card0
restart: unless-stopped
labels:
- "traefik.enable=true"
- "traefik.http.routers.jellyfin.entrypoints=http"
- "traefik.http.routers.jellyfin.rule=Host(`jellyfin.YOUR_DOMAIN.co.uk`)"
- "traefik.http.middlewares.jellyfin-https-redirect.redirectscheme.scheme=https"
- "traefik.http.routers.jellyfin.middlewares=jellyfin-https-redirect"
- "traefik.http.routers.jellyfin-secure.entrypoints=https"
- "traefik.http.routers.jellyfin-secure.rule=Host(`jellyfin.YOUR_DOMAIN.co.uk`)"
- "traefik.http.routers.jellyfin-secure.tls=true"
- "traefik.http.routers.jellyfin-secure.service=jellyfin"
- "traefik.http.services.jellyfin.loadbalancer.server.port=8096"
- "traefik.docker.network=proxy"
networks:
proxy:
security_opt:
- no-new-privileges:true
networks:
proxy:
external: true

218
Jitsi/.env Normal file
View file

@ -0,0 +1,218 @@
# shellcheck disable=SC2034
################################################################################
################################################################################
# Welcome to the Jitsi Meet Docker setup!
#
# This sample .env file contains some basic options to get you started.
# The full options reference can be found here:
# https://jitsi.github.io/handbook/docs/devops-guide/devops-guide-docker
################################################################################
################################################################################
#
# Basic configuration options
#
# Directory where all configuration will be stored
CONFIG=~/.jitsi-meet-cfg
# Exposed HTTP port
HTTP_PORT=8000
# Exposed HTTPS port
HTTPS_PORT=8443
# System time zone
TZ=UTC
# Public URL for the web service (required)
PUBLIC_URL=https://meet.yourdomain.com
# Media IP addresses to advertise by the JVB
# This setting deprecates DOCKER_HOST_ADDRESS, and supports a comma separated list of IPs
# See the "Running behind NAT or on a LAN environment" section in the Handbook:
# https://jitsi.github.io/handbook/docs/devops-guide/devops-guide-docker#running-behind-nat-or-on-a-lan-environment
JVB_ADVERTISE_IPS=192.168.x.x # Add your Docker Host IP here
#
# JaaS Components (beta)
# https://jaas.8x8.vc
#
# Enable JaaS Components (hosted Jigasi)
# NOTE: if Let's Encrypt is enabled a JaaS account will be automatically created, using the provided email in LETSENCRYPT_EMAIL
#ENABLE_JAAS_COMPONENTS=0
#
# Let's Encrypt configuration
#
# Enable Let's Encrypt certificate generation
#ENABLE_LETSENCRYPT=1
# Domain for which to generate the certificate
#LETSENCRYPT_DOMAIN=meet.example.com
# E-Mail for receiving important account notifications (mandatory)
#LETSENCRYPT_EMAIL=alice@atlanta.net
# Use the staging server (for avoiding rate limits while testing)
#LETSENCRYPT_USE_STAGING=1
#
# Etherpad integration (for document sharing)
#
# Set etherpad-lite URL in docker local network (uncomment to enable)
#ETHERPAD_URL_BASE=http://etherpad.meet.jitsi:9001
# Set etherpad-lite public URL, including /p/ pad path fragment (uncomment to enable)
#ETHERPAD_PUBLIC_URL=https://etherpad.my.domain/p/
# Name your etherpad instance!
ETHERPAD_TITLE=Video Chat
# The default text of a pad
ETHERPAD_DEFAULT_PAD_TEXT="Welcome to Web Chat!\n\n"
# Name of the skin for etherpad
ETHERPAD_SKIN_NAME=colibris
# Skin variants for etherpad
ETHERPAD_SKIN_VARIANTS="super-light-toolbar super-light-editor light-background full-width-editor"
#
# Basic Jigasi configuration options (needed for SIP gateway support)
#
# SIP URI for incoming / outgoing calls
#JIGASI_SIP_URI=test@sip2sip.info
# Password for the specified SIP account as a clear text
#JIGASI_SIP_PASSWORD=passw0rd
# SIP server (use the SIP account domain if in doubt)
#JIGASI_SIP_SERVER=sip2sip.info
# SIP server port
#JIGASI_SIP_PORT=5060
# SIP server transport
#JIGASI_SIP_TRANSPORT=UDP
#
# Authentication configuration (see handbook for details)
#
# Enable authentication
ENABLE_AUTH=1
# Enable guest access
ENABLE_GUESTS=1
# Select authentication type: internal, jwt, ldap or matrix
AUTH_TYPE=internal
# JWT authentication
#
# Application identifier
#JWT_APP_ID=my_jitsi_app_id
# Application secret known only to your token generator
#JWT_APP_SECRET=my_jitsi_app_secret
# (Optional) Set asap_accepted_issuers as a comma separated list
#JWT_ACCEPTED_ISSUERS=my_web_client,my_app_client
# (Optional) Set asap_accepted_audiences as a comma separated list
#JWT_ACCEPTED_AUDIENCES=my_server1,my_server2
# LDAP authentication (for more information see the Cyrus SASL saslauthd.conf man page)
#
# LDAP url for connection
#LDAP_URL=ldaps://ldap.domain.com/
# LDAP base DN. Can be empty
#LDAP_BASE=DC=example,DC=domain,DC=com
# LDAP user DN. Do not specify this parameter for the anonymous bind
#LDAP_BINDDN=CN=binduser,OU=users,DC=example,DC=domain,DC=com
# LDAP user password. Do not specify this parameter for the anonymous bind
#LDAP_BINDPW=LdapUserPassw0rd
# LDAP filter. Tokens example:
# %1-9 - if the input key is user@mail.domain.com, then %1 is com, %2 is domain and %3 is mail
# %s - %s is replaced by the complete service string
# %r - %r is replaced by the complete realm string
#LDAP_FILTER=(sAMAccountName=%u)
# LDAP authentication method
#LDAP_AUTH_METHOD=bind
# LDAP version
#LDAP_VERSION=3
# LDAP TLS using
#LDAP_USE_TLS=1
# List of SSL/TLS ciphers to allow
#LDAP_TLS_CIPHERS=SECURE256:SECURE128:!AES-128-CBC:!ARCFOUR-128:!CAMELLIA-128-CBC:!3DES-CBC:!CAMELLIA-128-CBC
# Require and verify server certificate
#LDAP_TLS_CHECK_PEER=1
# Path to CA cert file. Used when server certificate verify is enabled
#LDAP_TLS_CACERT_FILE=/etc/ssl/certs/ca-certificates.crt
# Path to CA certs directory. Used when server certificate verify is enabled
#LDAP_TLS_CACERT_DIR=/etc/ssl/certs
# Wether to use starttls, implies LDAPv3 and requires ldap:// instead of ldaps://
# LDAP_START_TLS=1
#
# Security
#
# Set these to strong passwords to avoid intruders from impersonating a service account
# The service(s) won't start unless these are specified
# Running ./gen-passwords.sh will update .env with strong passwords
# You may skip the Jigasi and Jibri passwords if you are not using those
# DO NOT reuse passwords
#
# ENSURE YOU CHANGE THE BELOW!!!!!!!!
# XMPP password for Jicofo client connections
JICOFO_AUTH_PASSWORD=8fbfebe1142fac3efee956d7d65d0146
# XMPP password for JVB client connections
JVB_AUTH_PASSWORD=0a192f85b439288b54f520faacdf209d
# XMPP password for Jigasi MUC client connections
JIGASI_XMPP_PASSWORD=141b73ad8050cee2fe72c74ab1a86282
# XMPP recorder password for Jibri client connections
JIBRI_RECORDER_PASSWORD=45f6506538b556d3bfa4c04291fddad3
# XMPP password for Jibri client connections
JIBRI_XMPP_PASSWORD=7798d3b72dbffca6eb0e0ea384e816b7
#
# Docker Compose options
#
# Container restart policy
#RESTART_POLICY=unless-stopped
# Jitsi image version (useful for local development)
#JITSI_IMAGE_VERSION=latest

400
Jitsi/docker-compose.yml Normal file
View file

@ -0,0 +1,400 @@
version: '3.5'
services:
# Frontend
web:
image: jitsi/web:${JITSI_IMAGE_VERSION:-stable-8922}
restart: ${RESTART_POLICY:-unless-stopped}
# remember to uncomment below if you do not plan on using a reverse proxy
# ports:
# - '${HTTP_PORT}:80'
# - '${HTTPS_PORT}:443'
volumes:
- ${CONFIG}/web:/config:Z
- ${CONFIG}/web/crontabs:/var/spool/cron/crontabs:Z
- ${CONFIG}/transcripts:/usr/share/jitsi-meet/transcripts:Z
environment:
- AMPLITUDE_ID
- ANALYTICS_SCRIPT_URLS
- ANALYTICS_WHITELISTED_EVENTS
- AUDIO_QUALITY_OPUS_BITRATE
- AUTO_CAPTION_ON_RECORD
- BRANDING_DATA_URL
- CALLSTATS_CUSTOM_SCRIPT_URL
- CALLSTATS_ID
- CALLSTATS_SECRET
- CHROME_EXTENSION_BANNER_JSON
- COLIBRI_WEBSOCKET_PORT
- CONFCODE_URL
- CONFIG_EXTERNAL_CONNECT
- DEFAULT_LANGUAGE
- DEPLOYMENTINFO_ENVIRONMENT
- DEPLOYMENTINFO_ENVIRONMENT_TYPE
- DEPLOYMENTINFO_REGION
- DEPLOYMENTINFO_SHARD
- DEPLOYMENTINFO_USERREGION
- DESKTOP_SHARING_FRAMERATE_MIN
- DESKTOP_SHARING_FRAMERATE_MAX
- DIALIN_NUMBERS_URL
- DIALOUT_AUTH_URL
- DIALOUT_CODES_URL
- DISABLE_AUDIO_LEVELS
- DISABLE_DEEP_LINKING
- DISABLE_GRANT_MODERATOR
- DISABLE_HTTPS
- DISABLE_KICKOUT
- DISABLE_LOCAL_RECORDING
- DISABLE_POLLS
- DISABLE_PRIVATE_CHAT
- DISABLE_PROFILE
- DISABLE_REACTIONS
- DISABLE_REMOTE_VIDEO_MENU
- DISABLE_START_FOR_ALL
- DROPBOX_APPKEY
- DROPBOX_REDIRECT_URI
- DYNAMIC_BRANDING_URL
- ENABLE_AUDIO_PROCESSING
- ENABLE_AUTH
- ENABLE_BREAKOUT_ROOMS
- ENABLE_CALENDAR
- ENABLE_COLIBRI_WEBSOCKET
- ENABLE_E2EPING
- ENABLE_FILE_RECORDING_SHARING
- ENABLE_GUESTS
- ENABLE_HSTS
- ENABLE_HTTP_REDIRECT
- ENABLE_IPV6
- ENABLE_LETSENCRYPT
- ENABLE_LIPSYNC
- ENABLE_NO_AUDIO_DETECTION
- ENABLE_NOISY_MIC_DETECTION
- ENABLE_OCTO
- ENABLE_OPUS_RED
- ENABLE_PREJOIN_PAGE
- ENABLE_P2P
- ENABLE_WELCOME_PAGE
- ENABLE_CLOSE_PAGE
- ENABLE_LIVESTREAMING
- ENABLE_LIVESTREAMING_DATA_PRIVACY_LINK
- ENABLE_LIVESTREAMING_HELP_LINK
- ENABLE_LIVESTREAMING_TERMS_LINK
- ENABLE_LIVESTREAMING_VALIDATOR_REGEXP_STRING
- ENABLE_LOCAL_RECORDING_NOTIFY_ALL_PARTICIPANT
- ENABLE_LOCAL_RECORDING_SELF_START
- ENABLE_RECORDING
- ENABLE_REMB
- ENABLE_REQUIRE_DISPLAY_NAME
- ENABLE_SERVICE_RECORDING
- ENABLE_SIMULCAST
- ENABLE_STATS_ID
- ENABLE_STEREO
- ENABLE_SUBDOMAINS
- ENABLE_TALK_WHILE_MUTED
- ENABLE_TCC
- ENABLE_TRANSCRIPTIONS
- ENABLE_XMPP_WEBSOCKET
- ENABLE_JAAS_COMPONENTS
- ETHERPAD_PUBLIC_URL
- ETHERPAD_URL_BASE
- E2EPING_NUM_REQUESTS
- E2EPING_MAX_CONFERENCE_SIZE
- E2EPING_MAX_MESSAGE_PER_SECOND
- GOOGLE_ANALYTICS_ID
- GOOGLE_API_APP_CLIENT_ID
- HIDE_PREMEETING_BUTTONS
- HIDE_PREJOIN_DISPLAY_NAME
- HIDE_PREJOIN_EXTRA_BUTTONS
- INVITE_SERVICE_URL
- LETSENCRYPT_DOMAIN
- LETSENCRYPT_EMAIL
- LETSENCRYPT_USE_STAGING
- MATOMO_ENDPOINT
- MATOMO_SITE_ID
- MICROSOFT_API_APP_CLIENT_ID
- NGINX_RESOLVER
- NGINX_WORKER_PROCESSES
- NGINX_WORKER_CONNECTIONS
- PEOPLE_SEARCH_URL
- PREFERRED_LANGUAGE
- PUBLIC_URL
- P2P_PREFERRED_CODEC
- RESOLUTION
- RESOLUTION_MIN
- RESOLUTION_WIDTH
- RESOLUTION_WIDTH_MIN
- START_AUDIO_MUTED
- START_AUDIO_ONLY
- START_BITRATE
- START_SILENT
- START_WITH_AUDIO_MUTED
- START_VIDEO_MUTED
- START_WITH_VIDEO_MUTED
- TESTING_CAP_SCREENSHARE_BITRATE
- TESTING_OCTO_PROBABILITY
- TOKEN_AUTH_URL
- TOOLBAR_BUTTONS
- TRANSLATION_LANGUAGES
- TRANSLATION_LANGUAGES_HEAD
- TZ
- USE_APP_LANGUAGE
- VIDEOQUALITY_BITRATE_H264_LOW
- VIDEOQUALITY_BITRATE_H264_STANDARD
- VIDEOQUALITY_BITRATE_H264_HIGH
- VIDEOQUALITY_BITRATE_VP8_LOW
- VIDEOQUALITY_BITRATE_VP8_STANDARD
- VIDEOQUALITY_BITRATE_VP8_HIGH
- VIDEOQUALITY_BITRATE_VP9_LOW
- VIDEOQUALITY_BITRATE_VP9_STANDARD
- VIDEOQUALITY_BITRATE_VP9_HIGH
- VIDEOQUALITY_ENFORCE_PREFERRED_CODEC
- VIDEOQUALITY_PREFERRED_CODEC
- XMPP_AUTH_DOMAIN
- XMPP_BOSH_URL_BASE
- XMPP_DOMAIN
- XMPP_GUEST_DOMAIN
- XMPP_MUC_DOMAIN
- XMPP_RECORDER_DOMAIN
- XMPP_PORT
- WHITEBOARD_ENABLED
- WHITEBOARD_COLLAB_SERVER_PUBLIC_URL
networks:
meet.jitsi:
proxy: # remove if you're not using a reverse proxy (including labels below)
labels:
- "traefik.enable=true"
- "traefik.http.routers.jitsi.entrypoints=http"
- "traefik.http.routers.jitsi.rule=Host(`meet2.jimsgarage.co.uk`)"
- "traefik.http.middlewares.jitsi-https-redirect.redirectscheme.scheme=https"
- "traefik.http.routers.jitsi.middlewares=jitsi-https-redirect"
- "traefik.http.routers.jitsi-secure.entrypoints=https"
- "traefik.http.routers.jitsi-secure.rule=Host(`meet2.jimsgarage.co.uk`)"
- "traefik.http.routers.jitsi-secure.tls=true"
- "traefik.http.routers.jitsi-secure.service=jitsi"
- "traefik.http.services.jitsi.loadbalancer.server.port=80"
- "traefik.docker.network=proxy"
# XMPP server
prosody:
image: jitsi/prosody:${JITSI_IMAGE_VERSION:-stable-8922}
restart: ${RESTART_POLICY:-unless-stopped}
expose:
- '${XMPP_PORT:-5222}'
- '5347'
- '5280'
volumes:
- ${CONFIG}/prosody/config:/config:Z
- ${CONFIG}/prosody/prosody-plugins-custom:/prosody-plugins-custom:Z
environment:
- AUTH_TYPE
- DISABLE_POLLS
- ENABLE_AUTH
- ENABLE_AV_MODERATION
- ENABLE_BREAKOUT_ROOMS
- ENABLE_END_CONFERENCE
- ENABLE_GUESTS
- ENABLE_IPV6
- ENABLE_LOBBY
- ENABLE_RECORDING
- ENABLE_XMPP_WEBSOCKET
- ENABLE_JAAS_COMPONENTS
- GC_TYPE
- GC_INC_TH
- GC_INC_SPEED
- GC_INC_STEP_SIZE
- GC_GEN_MIN_TH
- GC_GEN_MAX_TH
- GLOBAL_CONFIG
- GLOBAL_MODULES
- JIBRI_RECORDER_USER
- JIBRI_RECORDER_PASSWORD
- JIBRI_XMPP_USER
- JIBRI_XMPP_PASSWORD
- JICOFO_AUTH_PASSWORD
- JICOFO_COMPONENT_SECRET
- JIGASI_XMPP_USER
- JIGASI_XMPP_PASSWORD
- JVB_AUTH_USER
- JVB_AUTH_PASSWORD
- JWT_APP_ID
- JWT_APP_SECRET
- JWT_ACCEPTED_ISSUERS
- JWT_ACCEPTED_AUDIENCES
- JWT_ASAP_KEYSERVER
- JWT_ALLOW_EMPTY
- JWT_AUTH_TYPE
- JWT_ENABLE_DOMAIN_VERIFICATION
- JWT_TOKEN_AUTH_MODULE
- MATRIX_UVS_URL
- MATRIX_UVS_ISSUER
- MATRIX_UVS_AUTH_TOKEN
- MATRIX_UVS_SYNC_POWER_LEVELS
- LOG_LEVEL
- LDAP_AUTH_METHOD
- LDAP_BASE
- LDAP_BINDDN
- LDAP_BINDPW
- LDAP_FILTER
- LDAP_VERSION
- LDAP_TLS_CIPHERS
- LDAP_TLS_CHECK_PEER
- LDAP_TLS_CACERT_FILE
- LDAP_TLS_CACERT_DIR
- LDAP_START_TLS
- LDAP_URL
- LDAP_USE_TLS
- MAX_PARTICIPANTS
- PROSODY_AUTH_TYPE
- PROSODY_RESERVATION_ENABLED
- PROSODY_RESERVATION_REST_BASE_URL
- PROSODY_ENABLE_RATE_LIMITS
- PROSODY_RATE_LIMIT_LOGIN_RATE
- PROSODY_RATE_LIMIT_SESSION_RATE
- PROSODY_RATE_LIMIT_TIMEOUT
- PROSODY_RATE_LIMIT_ALLOW_RANGES
- PROSODY_RATE_LIMIT_CACHE_SIZE
- PUBLIC_URL
- TURN_CREDENTIALS
- TURN_HOST
- TURNS_HOST
- TURN_PORT
- TURNS_PORT
- TURN_TRANSPORT
- TZ
- XMPP_DOMAIN
- XMPP_AUTH_DOMAIN
- XMPP_GUEST_DOMAIN
- XMPP_MUC_DOMAIN
- XMPP_INTERNAL_MUC_DOMAIN
- XMPP_MODULES
- XMPP_MUC_MODULES
- XMPP_MUC_CONFIGURATION
- XMPP_INTERNAL_MUC_MODULES
- XMPP_RECORDER_DOMAIN
- XMPP_PORT
networks:
meet.jitsi:
aliases:
- ${XMPP_SERVER:-xmpp.meet.jitsi}
# Focus component
jicofo:
image: jitsi/jicofo:${JITSI_IMAGE_VERSION:-stable-8922}
restart: ${RESTART_POLICY:-unless-stopped}
ports:
- '127.0.0.1:${JICOFO_REST_PORT:-8888}:8888'
volumes:
- ${CONFIG}/jicofo:/config:Z
environment:
- AUTH_TYPE
- BRIDGE_AVG_PARTICIPANT_STRESS
- BRIDGE_STRESS_THRESHOLD
- ENABLE_AUTH
- ENABLE_AUTO_OWNER
- ENABLE_CODEC_VP8
- ENABLE_CODEC_VP9
- ENABLE_CODEC_H264
- ENABLE_CODEC_OPUS_RED
- ENABLE_JVB_XMPP_SERVER
- ENABLE_OCTO
- ENABLE_RECORDING
- ENABLE_SCTP
- ENABLE_AUTO_LOGIN
- JICOFO_AUTH_LIFETIME
- JICOFO_AUTH_PASSWORD
- JICOFO_AUTH_TYPE
- JICOFO_BRIDGE_REGION_GROUPS
- JICOFO_ENABLE_AUTH
- JICOFO_ENABLE_BRIDGE_HEALTH_CHECKS
- JICOFO_CONF_INITIAL_PARTICIPANT_WAIT_TIMEOUT
- JICOFO_CONF_SINGLE_PARTICIPANT_TIMEOUT
- JICOFO_CONF_SOURCE_SIGNALING_DELAYS
- JICOFO_CONF_MAX_AUDIO_SENDERS
- JICOFO_CONF_MAX_VIDEO_SENDERS
- JICOFO_CONF_STRIP_SIMULCAST
- JICOFO_CONF_SSRC_REWRITING
- JICOFO_ENABLE_HEALTH_CHECKS
- JICOFO_ENABLE_REST
- JICOFO_HEALTH_CHECKS_USE_PRESENCE
- JICOFO_MULTI_STREAM_BACKWARD_COMPAT
- JICOFO_OCTO_REGION
- JIBRI_BREWERY_MUC
- JIBRI_REQUEST_RETRIES
- JIBRI_PENDING_TIMEOUT
- JIGASI_BREWERY_MUC
- JIGASI_SIP_URI
- JVB_BREWERY_MUC
- JVB_XMPP_AUTH_DOMAIN
- JVB_XMPP_INTERNAL_MUC_DOMAIN
- JVB_XMPP_PORT
- JVB_XMPP_SERVER
- MAX_BRIDGE_PARTICIPANTS
- OCTO_BRIDGE_SELECTION_STRATEGY
- SENTRY_DSN="${JICOFO_SENTRY_DSN:-0}"
- SENTRY_ENVIRONMENT
- SENTRY_RELEASE
- TZ
- XMPP_DOMAIN
- XMPP_AUTH_DOMAIN
- XMPP_INTERNAL_MUC_DOMAIN
- XMPP_MUC_DOMAIN
- XMPP_RECORDER_DOMAIN
- XMPP_SERVER
- XMPP_PORT
depends_on:
- prosody
networks:
meet.jitsi:
# Video bridge
jvb:
image: jitsi/jvb:${JITSI_IMAGE_VERSION:-stable-8922}
restart: ${RESTART_POLICY:-unless-stopped}
ports:
- '${JVB_PORT:-10000}:${JVB_PORT:-10000}/udp'
- '127.0.0.1:${JVB_COLIBRI_PORT:-8080}:8080'
volumes:
- ${CONFIG}/jvb:/config:Z
environment:
- DOCKER_HOST_ADDRESS
- ENABLE_COLIBRI_WEBSOCKET
- ENABLE_JVB_XMPP_SERVER
- ENABLE_OCTO
- JVB_ADVERTISE_IPS
- JVB_ADVERTISE_PRIVATE_CANDIDATES
- JVB_AUTH_USER
- JVB_AUTH_PASSWORD
- JVB_BREWERY_MUC
- JVB_DISABLE_STUN
- JVB_PORT
- JVB_MUC_NICKNAME
- JVB_STUN_SERVERS
- JVB_OCTO_BIND_ADDRESS
- JVB_OCTO_REGION
- JVB_OCTO_RELAY_ID
- JVB_WS_DOMAIN
- JVB_WS_SERVER_ID
- JVB_XMPP_AUTH_DOMAIN
- JVB_XMPP_INTERNAL_MUC_DOMAIN
- JVB_XMPP_PORT
- JVB_XMPP_SERVER
- PUBLIC_URL
- SENTRY_DSN="${JVB_SENTRY_DSN:-0}"
- SENTRY_ENVIRONMENT
- SENTRY_RELEASE
- COLIBRI_REST_ENABLED
- SHUTDOWN_REST_ENABLED
- TZ
- XMPP_AUTH_DOMAIN
- XMPP_INTERNAL_MUC_DOMAIN
- XMPP_SERVER
- XMPP_PORT
depends_on:
- prosody
networks:
meet.jitsi:
# Custom network so all services can communicate using a FQDN
networks:
meet.jitsi:
proxy: # remove if you're not using a proxy
external: true

19
Jitsi/gen-passwords.sh Normal file
View file

@ -0,0 +1,19 @@
#!/usr/bin/env bash
function generatePassword() {
openssl rand -hex 16
}
JICOFO_AUTH_PASSWORD=$(generatePassword)
JVB_AUTH_PASSWORD=$(generatePassword)
JIGASI_XMPP_PASSWORD=$(generatePassword)
JIBRI_RECORDER_PASSWORD=$(generatePassword)
JIBRI_XMPP_PASSWORD=$(generatePassword)
sed -i.bak \
-e "s#JICOFO_AUTH_PASSWORD=.*#JICOFO_AUTH_PASSWORD=${JICOFO_AUTH_PASSWORD}#g" \
-e "s#JVB_AUTH_PASSWORD=.*#JVB_AUTH_PASSWORD=${JVB_AUTH_PASSWORD}#g" \
-e "s#JIGASI_XMPP_PASSWORD=.*#JIGASI_XMPP_PASSWORD=${JIGASI_XMPP_PASSWORD}#g" \
-e "s#JIBRI_RECORDER_PASSWORD=.*#JIBRI_RECORDER_PASSWORD=${JIBRI_RECORDER_PASSWORD}#g" \
-e "s#JIBRI_XMPP_PASSWORD=.*#JIBRI_XMPP_PASSWORD=${JIBRI_XMPP_PASSWORD}#g" \
"$(dirname "$0")/.env"

BIN
Logo/Jim's Garage-1 (1).mp4 Normal file

Binary file not shown.

BIN
Logo/Jim's Garage-1 (1).png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 284 KiB

BIN
Logo/Jim's Garage-1 (2).png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 260 KiB

BIN
Logo/Jim's Garage-1 (3).png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 142 KiB

BIN
Logo/Jim's Garage-1 (4).png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 147 KiB

BIN
Logo/Jim's Garage-1 (5).png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 148 KiB

BIN
Logo/Jim's Garage-1.mp4 Normal file

Binary file not shown.

BIN
Logo/Jim's Garage-1.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 283 KiB

BIN
Logo/Jim'sGarage-1(2).png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 260 KiB

View file

@ -0,0 +1,33 @@
version: "2.1"
services:
nextcloud:
image: lscr.io/linuxserver/nextcloud:latest
container_name: nextcloud
environment:
- PUID=1000
- PGID=1000
- TZ=Etc/UTC
volumes:
- /home/ubuntu/docker/nextcloud/appdata:/config
- /home/ubuntu/docker/nextcloud/data:/data
# ports:
# - 443:443
restart: unless-stopped
labels:
- "traefik.enable=true"
- "traefik.http.routers.nextcloud.entrypoints=http"
- "traefik.http.routers.nextcloud.rule=Host(`nextcloud.jimsgarage.co.uk`)"
- "traefik.http.middlewares.nextcloud-https-redirect.redirectscheme.scheme=https"
- "traefik.http.routers.nextcloud.middlewares=nextcloud-https-redirect"
- "traefik.http.routers.nextcloud-secure.entrypoints=https"
- "traefik.http.routers.nextcloud-secure.rule=Host(`nextcloud.jimsgarage.co.uk`)"
- "traefik.http.routers.nextcloud-secure.tls=true"
- "traefik.http.routers.nextcloud-secure.service=nextcloud"
- "traefik.http.services.nextcloud.loadbalancer.server.port=80"
- "traefik.docker.network=proxy"
networks:
proxy:
networks:
proxy:
external: true

79
Pihole/docker-compose.yml Normal file
View file

@ -0,0 +1,79 @@
version: "3.6"
# More info at https://github.com/pi-hole/docker-pi-hole/ and https://docs.pi-hole.net/
services:
cloudflared:
container_name: cloudflared
# Restart on crashes and on reboots
restart: unless-stopped
image: cloudflare/cloudflared:latest
command: proxy-dns
environment:
- "TUNNEL_DNS_UPSTREAM=https://1.1.1.1/dns-query,https://1.0.0.1/dns-query,https://9.9.9.9/dns-query,https://149.112.112.9/dns-query"
# Listen on an unprivileged port
- "TUNNEL_DNS_PORT=5053"
# Listen on all interfaces
- "TUNNEL_DNS_ADDRESS=0.0.0.0"
# Attach cloudflared only to the private network
networks:
pihole_internal:
ipv4_address: 172.70.9.2
security_opt:
- no-new-privileges:true
pihole:
container_name: pihole
image: pihole/pihole:latest
ports:
- "53:53/tcp"
- "53:53/udp"
- "67:67/udp"
- "500:80/tcp"
# - "443:443/tcp"
networks:
pihole_internal:
ipv4_address: 172.70.9.3
proxy:
environment:
TZ: 'Europe/London'
WEBPASSWORD: 'password'
DNS1: '172.70.9.2#5053'
DNS2: 'no'
DNSMASQ_LISTENING: 'all'
VIRTUAL_HOST: pihole.yourdomain.com
# Volumes store your data between container upgrades
volumes:
- '/home/ubuntu/docker/pihole/:/etc/pihole/'
- '/home/ubuntu/docker/pihole/etc-dnsmasq.d/:/etc/dnsmasq.d/'
# Recommended but not required (DHCP needs NET_ADMIN)
# https://github.com/pi-hole/docker-pi-hole#note-on-capabilities
#cap_add:
# - NET_ADMIN
restart: unless-stopped
depends_on:
- cloudflared
labels:
- "traefik.enable=true"
- "traefik.http.routers.pihole.entrypoints=http"
- "traefik.http.routers.pihole.rule=Host(`pihole.yourdomain.com`)"
- "traefik.http.middlewares.pihole-https-redirect.redirectscheme.scheme=https"
- "traefik.http.routers.pihole.middlewares=pihole-https-redirect"
- "traefik.http.routers.pihole-secure.entrypoints=https"
- "traefik.http.routers.pihole-secure.rule=Host(`pihole.yourdomain.com`)"
- "traefik.http.routers.pihole-secure.tls=true"
- "traefik.http.routers.pihole-secure.service=pihole"
- "traefik.http.services.pihole.loadbalancer.server.port=80"
- "traefik.docker.network=proxy"
networks:
pihole_internal:
ipam:
config:
- subnet: 172.70.9.0/29
name: pihole_internal
proxy:
external: true

View file

@ -0,0 +1,7 @@
#run these commands to fix port bind error
sudo sed -r -i.orig 's/#?DNSStubListener=yes/DNSStubListener=no/g' /etc/systemd/resolved.conf
sudo sh -c 'rm /etc/resolv.conf && ln -s /run/systemd/resolve/resolv.conf /etc/resolv.conf'
systemctl restart systemd-resolved

8
README.md Normal file
View file

@ -0,0 +1,8 @@
[Discord](https://discord.gg/qW5vEBekz5) <br>
[Twitter](https://twitter.com/jimsgarage_) <br>
[Reddit](https://www.reddit.com/user/Jims-Garage)
![alt text](https://github.com/JamesTurland/JimsGarage/blob/main/Logo/Jim'sGarage-1(2).png?raw=true)
# Jim's Garage
Here's a collection of Docker Compose and config files for use in my videos. Simply tweak to your environment and deploy!

View file

@ -0,0 +1,75 @@
version: '3'
services:
synapse:
container_name: synapse
image: docker.io/matrixdotorg/synapse:latest
# Since synapse does not retry to connect to the database, restart upon
# failure
restart: unless-stopped
# See the readme for a full documentation of the environment settings
environment:
# config file location - be sure to create one first!
# https://github.com/matrix-org/synapse/tree/develop/docker#generating-a-configuration-file
- SYNAPSE_CONFIG_PATH=/data/homeserver.yaml
- UID=1000
- GID=1000
volumes:
# You may either store all the files in a local folder
- /home/ubuntu/docker/synapse:/data
# .. or you may split this between different storage points
# - ./files:/data
# - /path/to/ssd:/data/uploads
# - /path/to/large_hdd:/data/media
depends_on:
# We're going to use an external database as it's more performant for multiple
# users
- synapse-db
networks:
synapse:
proxy:
# Uncomment if not using a proxy. In order to expose Synapse, remove one of the following,
# you might for instance expose the TLS port directly:
#ports:
# - 8448:8448/tcp
# - 8008:8008/tcp
# ... or use a reverse proxy, here is an example for traefik:
labels:
- "traefik.enable=true"
- "traefik.http.routers.synapse.entrypoints=http"
- "traefik.http.routers.synapse.rule=Host(`matrix.yourdomain.com`)"
- "traefik.http.middlewares.synapse-https-redirect.redirectscheme.scheme=https"
- "traefik.http.routers.synapse.middlewares=synapse-https-redirect"
- "traefik.http.routers.synapse-secure.entrypoints=https"
- "traefik.http.routers.synapse-secure.rule=Host(`matrix.yourdomain.com`)"
- "traefik.http.routers.synapse-secure.tls=true"
- "traefik.http.routers.synapse-secure.service=synapse"
- "traefik.http.services.synapse.loadbalancer.server.port=8008"
- "traefik.docker.network=proxy"
synapse-db:
image: docker.io/postgres:15-alpine
container_name: synapse-db
restart: unless-stopped
# Change password below, of course!
environment:
- POSTGRES_USER=synapse_user
- POSTGRES_PASSWORD=xF2oDmvPLXhN8yivmI7PhPLozBl0ZgfQsjnd55yH # CHANGE ME!
- POSTGRES_DB=synapse
# ensure the database gets created correctly
# https://github.com/matrix-org/synapse/blob/master/docs/postgres.md#set-up-database
- POSTGRES_INITDB_ARGS=--encoding=UTF-8 --lc-collate=C --lc-ctype=C
volumes:
# You may store the database tables in a local folder..
- /home/ubuntu/docker/synapse-db/schemas:/var/lib/postgresql/data
# .. or store them on some high performance storage for better results
# - /path/to/ssd/storage:/var/lib/postgresql/data
ports:
- 5432:5432
networks:
synapse:
networks:
synapse:
proxy:
external: true

110
Synapse/homeserver.yaml Normal file
View file

@ -0,0 +1,110 @@
# DO NOT USE THIS CONFIGURATION FILE FOR PRODUCTION
# GENERATE YOUR OWN (SEE MY VIDEO)
# Configuration file for Synapse.
#
# This is a YAML file: see [1] for a quick introduction. Note in particular
# that *indentation is important*: all the elements of a list or dictionary
# should have the same indentation.
#
# [1] https://docs.ansible.com/ansible/latest/reference_appendices/YAMLSyntax.html
#
# For more information on how to configure Synapse, including a complete accounting of
# each option, go to docs/usage/configuration/config_documentation.md or
# https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html
server_name: "matrix.yourdomain.com"
pid_file: /data/homeserver.pid
listeners:
- port: 8008
tls: false
type: http
x_forwarded: true
resources:
- names: [client, federation]
compress: false
database:
name: psycopg2
args:
user: synapse_user
password: xF2oDmvPLXhN8yivmI7PhPLozBl0ZgfQsjnd55yH
database: synapse
host: synapse-db
cp_min: 5
cp_max: 10
log_config: "/data/matrix.yourdomain.com.log.config"
media_store_path: /data/media_store
registration_shared_secret: "YRGKetN~MIoXWrqJGTkLaY2.3n0#UDClRF1oteAEk7mWq=1f95" # DO NOT USE THIS
report_stats: false
macaroon_secret_key: "#stDom~z;7M_Fjd0o,Mtn7BFOdIUqb5~DUg4;ipSo+6xEzuV#i" # DO NOT USE THIS
form_secret: ":ANf04Ysn5mh^1PdRHOXe3tT01NERDtBRYHMStkjUu.^wO@mUU" # DO NOT USE THIS
signing_key_path: "/data/matrix.yourdomain.com.signing.key"
trusted_key_servers:
- server_name: "matrix.org"
enable_registration: true
# users require an email to sign up - needed for password resets
registrations_require_3pid:
- email
enable_registration_captcha: true
recaptcha_public_key: <recaptcha site key>
recaptcha_private_key: <recaptcha private key>
email:
# The hostname of the outgoing SMTP server to use. Defaults to 'localhost'.
#
smtp_host: smtp.gmail.com
# The port on the mail server for outgoing SMTP. Defaults to 25.
#
smtp_port: 587
# Username/password for authentication to the SMTP server. By default, no
# authentication is attempted.
#
smtp_user: "your_email"
smtp_pass: "your_smtp_password"
# Uncomment the following to require TLS transport security for SMTP.
# By default, Synapse will connect over plain text, and will then switch to
# TLS via STARTTLS *if the SMTP server supports it*. If this option is set,
# Synapse will refuse to connect unless the server supports STARTTLS.
#
require_transport_security: true
# notif_from defines the "From" address to use when sending emails.
# It must be set if email sending is enabled.
#
# The placeholder '%(app)s' will be replaced by the application name,
# which is normally 'app_name' (below), but may be overridden by the
# Matrix client application.
#
# Note that the placeholder must be written '%(app)s', including the
# trailing 's'.
#
notif_from: "Your Friendly %(app)s homeserver <noreply@yourdomain.com>"
# app_name defines the default value for '%(app)s' in notif_from and email
# subjects. It defaults to 'Matrix'.
#
app_name: YourHomeserver
# Uncomment the following to enable sending emails for messages that the user
# has missed. Disabled by default.
#
#enable_notifs: true
# Uncomment the following to disable automatic subscription to email
# notifications for new users. Enabled by default.
#
notif_for_new_users: false
# allows people to change their email address
enable_3pid_changes: true
# allows searching of all users in directory
user_directory.search_all_users: true
# allow room access over federation
matrix_synapse_allow_public_rooms_over_federation: true
# enable federation on port 443
serve_server_wellknown: true
# vim:ft=yaml

View file

@ -0,0 +1,51 @@
version: "3.7"
services:
mautrix-discord:
container_name: mautrix-discord
image: dock.mau.dev/mautrix/discord:latest
restart: unless-stopped
volumes:
- /home/ubuntu/docker/mautrix-discord:/data
# If you put the service above in the same docker-compose as the homeserver,
# ignore the parts below. Otherwise, see below for configuring networking.
# If synapse is running outside of docker, you'll need to expose the port.
# Note that in most cases you should either run everything inside docker
# or everything outside docker, rather than mixing docker things with
# non-docker things.
ports:
- "29334:29334"
# You'll also probably want this so the bridge can reach Synapse directly
# using something like `http://host.docker.internal:8008` as the address:
#extra_hosts:
#- "host.docker.internal:host-gateway"
# If synapse is in a different network, then add this container to that network.
networks:
mautrix-discord:
mautrix-discord-db:
image: docker.io/postgres:15-alpine
container_name: mautrix-discord-db
restart: always
environment:
- POSTGRES_USER=mautrix-discord
# change this to something unique (you will need it for the generated config file)
- POSTGRES_PASSWORD=bcH8mwBQKD225uZ99d2ReFDQRoDSmVVXASVXaMFMKj5FuNNp44bN7SdzsqVZ
- POSTGRES_DB=mautrix-discord
# ensure the database gets created correctly
# https://github.com/matrix-org/synapse/blob/master/docs/postgres.md#set-up-database
- POSTGRES_INITDB_ARGS=--encoding=UTF-8 --lc-collate=C --lc-ctype=C
volumes:
# You may either store all the files in a local folder
- /home/ubuntu/docker/mautrix-discord-db/schemas:/var/lib/postgresql/data
ports:
- 5432:5432
networks:
mautrix-discord:
networks:
mautrix-discord:

View file

@ -0,0 +1,361 @@
# Homeserver details.
homeserver:
# The address that this appservice can use to connect to the homeserver.
address: https://matrix.example.com
# Publicly accessible base URL for media, used for avatars in relay mode.
# If not set, the connection address above will be used.
public_address: null
# The domain of the homeserver (also known as server_name, used for MXIDs, etc).
domain: example.com
# What software is the homeserver running?
# Standard Matrix homeservers like Synapse, Dendrite and Conduit should just use "standard" here.
software: standard
# The URL to push real-time bridge status to.
# If set, the bridge will make POST requests to this URL whenever a user's discord connection state changes.
# The bridge will use the appservice as_token to authorize requests.
status_endpoint: null
# Endpoint for reporting per-message status.
message_send_checkpoint_endpoint: null
# Does the homeserver support https://github.com/matrix-org/matrix-spec-proposals/pull/2246?
async_media: false
# Should the bridge use a websocket for connecting to the homeserver?
# The server side is currently not documented anywhere and is only implemented by mautrix-wsproxy,
# mautrix-asmux (deprecated), and hungryserv (proprietary).
websocket: false
# How often should the websocket be pinged? Pinging will be disabled if this is zero.
ping_interval_seconds: 0
# Application service host/registration related details.
# Changing these values requires regeneration of the registration.
appservice:
# The address that the homeserver can use to connect to this appservice.
address: http://localhost:29334
# The hostname and port where this appservice should listen.
hostname: 0.0.0.0
port: 29334
# Database config.
database:
# The database type. "sqlite3-fk-wal" and "postgres" are supported.
type: postgres
# The database URI.
# SQLite: A raw file path is supported, but `file:<path>?_txlock=immediate` is recommended.
# https://github.com/mattn/go-sqlite3#connection-string
# Postgres: Connection string. For example, postgres://user:password@host/database?sslmode=disable
# To connect via Unix socket, use something like postgres:///dbname?host=/var/run/postgresql
uri: postgres://user:password@host/database?sslmode=disable
# Maximum number of connections. Mostly relevant for Postgres.
max_open_conns: 20
max_idle_conns: 2
# Maximum connection idle time and lifetime before they're closed. Disabled if null.
# Parsed with https://pkg.go.dev/time#ParseDuration
max_conn_idle_time: null
max_conn_lifetime: null
# The unique ID of this appservice.
id: discord
# Appservice bot details.
bot:
# Username of the appservice bot.
username: discordbot
# Display name and avatar for bot. Set to "remove" to remove display name/avatar, leave empty
# to leave display name/avatar as-is.
displayname: Discord bridge bot
avatar: mxc://maunium.net/nIdEykemnwdisvHbpxflpDlC
# Whether or not to receive ephemeral events via appservice transactions.
# Requires MSC2409 support (i.e. Synapse 1.22+).
ephemeral_events: true
# Should incoming events be handled asynchronously?
# This may be necessary for large public instances with lots of messages going through.
# However, messages will not be guaranteed to be bridged in the same order they were sent in.
async_transactions: false
# Authentication tokens for AS <-> HS communication. Autogenerated; do not modify.
as_token: "This value is generated when generating the registration"
hs_token: "This value is generated when generating the registration"
# Bridge config
bridge:
# Localpart template of MXIDs for Discord users.
# {{.}} is replaced with the internal ID of the Discord user.
username_template: discord_{{.}}
# Displayname template for Discord users. This is also used as the room name in DMs if private_chat_portal_meta is enabled.
# Available variables:
# .ID - Internal user ID
# .Username - Legacy display/username on Discord
# .GlobalName - New displayname on Discord
# .Discriminator - The 4 numbers after the name on Discord
# .Bot - Whether the user is a bot
# .System - Whether the user is an official system user
# .Webhook - Whether the user is a webhook and is not an application
# .Application - Whether the user is an application
displayname_template: '{{or .GlobalName .Username}}{{if .Bot}} (bot){{end}}'
# Displayname template for Discord channels (bridged as rooms, or spaces when type=4).
# Available variables:
# .Name - Channel name, or user displayname (pre-formatted with displayname_template) in DMs.
# .ParentName - Parent channel name (used for categories).
# .GuildName - Guild name.
# .NSFW - Whether the channel is marked as NSFW.
# .Type - Channel type (see values at https://github.com/bwmarrin/discordgo/blob/v0.25.0/structs.go#L251-L267)
channel_name_template: '{{if or (eq .Type 3) (eq .Type 4)}}{{.Name}}{{else}}#{{.Name}}{{end}}'
# Displayname template for Discord guilds (bridged as spaces).
# Available variables:
# .Name - Guild name
guild_name_template: '{{.Name}}'
# Whether to explicitly set the avatar and room name for private chat portal rooms.
# If set to `default`, this will be enabled in encrypted rooms and disabled in unencrypted rooms.
# If set to `always`, all DM rooms will have explicit names and avatars set.
# If set to `never`, DM rooms will never have names and avatars set.
private_chat_portal_meta: default
portal_message_buffer: 128
# Number of private channel portals to create on bridge startup.
# Other portals will be created when receiving messages.
startup_private_channel_create_limit: 5
# Should the bridge send a read receipt from the bridge bot when a message has been sent to Discord?
delivery_receipts: false
# Whether the bridge should send the message status as a custom com.beeper.message_send_status event.
message_status_events: false
# Whether the bridge should send error notices via m.notice events when a message fails to bridge.
message_error_notices: true
# Should the bridge use space-restricted join rules instead of invite-only for guild rooms?
# This can avoid unnecessary invite events in guild rooms when members are synced in.
restricted_rooms: true
# Should the bridge automatically join the user to threads on Discord when the thread is opened on Matrix?
# This only works with clients that support thread read receipts (MSC3771 added in Matrix v1.4).
autojoin_thread_on_open: true
# Should inline fields in Discord embeds be bridged as HTML tables to Matrix?
# Tables aren't supported in all clients, but are the only way to emulate the Discord inline field UI.
embed_fields_as_tables: true
# Should guild channels be muted when the portal is created? This only meant for single-user instances,
# it won't mute it for all users if there are multiple Matrix users in the same Discord guild.
mute_channels_on_create: false
# Should the bridge update the m.direct account data event when double puppeting is enabled.
# Note that updating the m.direct event is not atomic (except with mautrix-asmux)
# and is therefore prone to race conditions.
sync_direct_chat_list: false
# Set this to true to tell the bridge to re-send m.bridge events to all rooms on the next run.
# This field will automatically be changed back to false after it, except if the config file is not writable.
resend_bridge_info: false
# Should incoming custom emoji reactions be bridged as mxc:// URIs?
# If set to false, custom emoji reactions will be bridged as the shortcode instead, and the image won't be available.
custom_emoji_reactions: true
# Should the bridge attempt to completely delete portal rooms when a channel is deleted on Discord?
# If true, the bridge will try to kick Matrix users from the room. Otherwise, the bridge only makes ghosts leave.
delete_portal_on_channel_delete: false
# Should the bridge delete all portal rooms when you leave a guild on Discord?
# This only applies if the guild has no other Matrix users on this bridge instance.
delete_guild_on_leave: true
# Whether or not created rooms should have federation enabled.
# If false, created portal rooms will never be federated.
federate_rooms: true
# Prefix messages from webhooks with the profile info? This can be used along with a custom displayname_template
# to better handle webhooks that change their name all the time (like ones used by bridges).
prefix_webhook_messages: false
# Bridge webhook avatars?
enable_webhook_avatars: true
# Should the bridge upload media to the Discord CDN directly before sending the message when using a user token,
# like the official client does? The other option is sending the media in the message send request as a form part
# (which is always used by bots and webhooks).
use_discord_cdn_upload: true
# Should mxc uris copied from Discord be cached?
# This can be `never` to never cache, `unencrypted` to only cache unencrypted mxc uris, or `always` to cache everything.
# If you have a media repo that generates non-unique mxc uris, you should set this to never.
cache_media: unencrypted
# Patterns for converting Discord media to custom mxc:// URIs instead of reuploading.
# Each of the patterns can be set to null to disable custom URIs for that type of media.
# More details can be found at https://docs.mau.fi/bridges/go/discord/direct-media.html
media_patterns:
# Should custom mxc:// URIs be used instead of reuploading media?
enabled: false
# Pattern for normal message attachments.
attachments: mxc://discord-media.mau.dev/attachments|{{.ChannelID}}|{{.AttachmentID}}|{{.FileName}}
# Pattern for custom emojis.
emojis: mxc://discord-media.mau.dev/emojis|{{.ID}}.{{.Ext}}
# Pattern for stickers. Note that animated lottie stickers will not be converted if this is enabled.
stickers: mxc://discord-media.mau.dev/stickers|{{.ID}}.{{.Ext}}
# Pattern for static user avatars.
avatars: mxc://discord-media.mau.dev/avatars|{{.UserID}}|{{.AvatarID}}.{{.Ext}}
# Settings for converting animated stickers.
animated_sticker:
# Format to which animated stickers should be converted.
# disable - No conversion, send as-is (lottie JSON)
# png - converts to non-animated png (fastest)
# gif - converts to animated gif
# webm - converts to webm video, requires ffmpeg executable with vp9 codec and webm container support
# webp - converts to animated webp, requires ffmpeg executable with webp codec/container support
target: webp
# Arguments for converter. All converters take width and height.
args:
width: 320
height: 320
fps: 25 # only for webm, webp and gif (2, 5, 10, 20 or 25 recommended)
# Servers to always allow double puppeting from
double_puppet_server_map:
example.com: https://example.com
# Allow using double puppeting from any server with a valid client .well-known file.
double_puppet_allow_discovery: false
# Shared secrets for https://github.com/devture/matrix-synapse-shared-secret-auth
#
# If set, double puppeting will be enabled automatically for local users
# instead of users having to find an access token and run `login-matrix`
# manually.
login_shared_secret_map:
example.com: foobar
# The prefix for commands. Only required in non-management rooms.
command_prefix: '!discord'
# Messages sent upon joining a management room.
# Markdown is supported. The defaults are listed below.
management_room_text:
# Sent when joining a room.
welcome: "Hello, I'm a Discord bridge bot."
# Sent when joining a management room and the user is already logged in.
welcome_connected: "Use `help` for help."
# Sent when joining a management room and the user is not logged in.
welcome_unconnected: "Use `help` for help or `login` to log in."
# Optional extra text sent when joining a management room.
additional_help: ""
# Settings for backfilling messages.
backfill:
# Limits for forward backfilling.
forward_limits:
# Initial backfill (when creating portal). 0 means backfill is disabled.
# A special unlimited value is not supported, you must set a limit. Initial backfill will
# fetch all messages first before backfilling anything, so high limits can take a lot of time.
initial:
dm: 0
channel: 0
thread: 0
# Missed message backfill (on startup).
# 0 means backfill is disabled, -1 means fetch all messages since last bridged message.
# When using unlimited backfill (-1), messages are backfilled as they are fetched.
# With limits, all messages up to the limit are fetched first and backfilled afterwards.
missed:
dm: 0
channel: 0
thread: 0
# Maximum members in a guild to enable backfilling. Set to -1 to disable limit.
# This can be used as a rough heuristic to disable backfilling in channels that are too active.
# Currently only applies to missed message backfill.
max_guild_members: -1
# End-to-bridge encryption support options.
#
# See https://docs.mau.fi/bridges/general/end-to-bridge-encryption.html for more info.
encryption:
# Allow encryption, work in group chat rooms with e2ee enabled
allow: false
# Default to encryption, force-enable encryption in all portals the bridge creates
# This will cause the bridge bot to be in private chats for the encryption to work properly.
default: false
# Whether to use MSC2409/MSC3202 instead of /sync long polling for receiving encryption-related data.
appservice: false
# Require encryption, drop any unencrypted messages.
require: false
# Enable key sharing? If enabled, key requests for rooms where users are in will be fulfilled.
# You must use a client that supports requesting keys from other users to use this feature.
allow_key_sharing: false
# Should users mentions be in the event wire content to enable the server to send push notifications?
plaintext_mentions: false
# Options for deleting megolm sessions from the bridge.
delete_keys:
# Beeper-specific: delete outbound sessions when hungryserv confirms
# that the user has uploaded the key to key backup.
delete_outbound_on_ack: false
# Don't store outbound sessions in the inbound table.
dont_store_outbound: false
# Ratchet megolm sessions forward after decrypting messages.
ratchet_on_decrypt: false
# Delete fully used keys (index >= max_messages) after decrypting messages.
delete_fully_used_on_decrypt: false
# Delete previous megolm sessions from same device when receiving a new one.
delete_prev_on_new_session: false
# Delete megolm sessions received from a device when the device is deleted.
delete_on_device_delete: false
# Periodically delete megolm sessions when 2x max_age has passed since receiving the session.
periodically_delete_expired: false
# Delete inbound megolm sessions that don't have the received_at field used for
# automatic ratcheting and expired session deletion. This is meant as a migration
# to delete old keys prior to the bridge update.
delete_outdated_inbound: false
# What level of device verification should be required from users?
#
# Valid levels:
# unverified - Send keys to all device in the room.
# cross-signed-untrusted - Require valid cross-signing, but trust all cross-signing keys.
# cross-signed-tofu - Require valid cross-signing, trust cross-signing keys on first use (and reject changes).
# cross-signed-verified - Require valid cross-signing, plus a valid user signature from the bridge bot.
# Note that creating user signatures from the bridge bot is not currently possible.
# verified - Require manual per-device verification
# (currently only possible by modifying the `trust` column in the `crypto_device` database table).
verification_levels:
# Minimum level for which the bridge should send keys to when bridging messages from WhatsApp to Matrix.
receive: unverified
# Minimum level that the bridge should accept for incoming Matrix messages.
send: unverified
# Minimum level that the bridge should require for accepting key requests.
share: cross-signed-tofu
# Options for Megolm room key rotation. These options allow you to
# configure the m.room.encryption event content. See:
# https://spec.matrix.org/v1.3/client-server-api/#mroomencryption for
# more information about that event.
rotation:
# Enable custom Megolm room key rotation settings. Note that these
# settings will only apply to rooms created after this option is
# set.
enable_custom: false
# The maximum number of milliseconds a session should be used
# before changing it. The Matrix spec recommends 604800000 (a week)
# as the default.
milliseconds: 604800000
# The maximum number of messages that should be sent with a given a
# session before changing it. The Matrix spec recommends 100 as the
# default.
messages: 100
# Disable rotating keys when a user's devices change?
# You should not enable this option unless you understand all the implications.
disable_device_change_key_rotation: false
# Settings for provisioning API
provisioning:
# Prefix for the provisioning API paths.
prefix: /_matrix/provision
# Shared secret for authentication. If set to "generate", a random secret will be generated,
# or if set to "disable", the provisioning API will be disabled.
shared_secret: generate
# Permissions for using the bridge.
# Permitted values:
# relay - Talk through the relaybot (if enabled), no access otherwise
# user - Access to use the bridge to chat with a Discord account.
# admin - User level and some additional administration tools
# Permitted keys:
# * - All Matrix users
# domain - All users on that homeserver
# mxid - Specific user
permissions:
"*": relay
"example.com": user
"@admin:example.com": admin
# Logging config. See https://github.com/tulir/zeroconfig for details.
logging:
min_level: debug
writers:
- type: stdout
format: pretty-colored
- type: file
format: json
filename: ./logs/mautrix-discord.log
max_size: 100
max_backups: 10
compress: true

View file

@ -0,0 +1,14 @@
id: discord
url: http://<IP of DiscordBot>:29334
as_token: vpVJTrHGB6ZyVScf2SD4RzRLHcBeEM6fe9UhuQtsWd9JyFDmvN7mrqQMHBRn
hs_token: VqENuUGBb2NjChatnA7e36CZN7esjpL57mhvmKREQAH7Pj2ux6H835UXBDUC
sender_localpart: zHypwcFgaEKamUdbGyBgvibKpW
rate_limited: false
namespaces:
users:
- regex: ^@discordbot:matrix\.jimsgarage\.co\.uk$
exclusive: true
- regex: ^@discord_.*:matrix\.jimsgarage\.co\.uk$
exclusive: true
de.sorunome.msc2409.push_ephemeral: true
push_ephemeral: true

48
Synapse/readme.md Normal file
View file

@ -0,0 +1,48 @@
1) Create a config file
sudo docker run -it --rm \
--mount type=volume,src=synapse-data,dst=/data \
-e SYNAPSE_SERVER_NAME=matrix.jimsgarage.co.uk \
-e SYNAPSE_REPORT_STATS=no \
matrixdotorg/synapse:latest generate
2) become root and access the file
sudo -i
3) copy config file to your docker volume mount
4) become non-root user
5) change owner and permissions of configs so that we can edit them
su username
sudo chown ubuntu:ubuntu * (or whatever your user is)
6) edit config
change database section
name: psycopg2
args:
user: <user>
password: <pass>
database: <db>
host: <host>
cp_min: 5
cp_max: 10
copy over the credentials from the docker compose
7) create admin user
docker exec -it synapse register_new_matrix_user http://localhost:8008 -c /data/homeserver.yaml --help #remove help once ready
8) add record to dns server (remember needs to be external as well!)
9) check page to see it's up
10) element and profit
11) Add emails, recaptcha if you want to (recommended!)

View file

@ -0,0 +1,92 @@
version: "3"
services:
gluetun:
image: qmcgaw/gluetun
container_name: gluetun
# line above must be uncommented to allow external containers to connect.
# See https://github.com/qdm12/gluetun-wiki/blob/main/setup/connect-a-container-to-gluetun.md#external-container-to-gluetun
cap_add:
- NET_ADMIN
devices:
- /dev/net/tun:/dev/net/tun
ports:
- 6881:6881
- 6881:6881/udp
- 8085:8085 # qbittorrent
- 9117:9117 # Jackett
- 8989:8989 # Sonarr
- 9696:9696 # Prowlarr
volumes:
- /home/ubuntu/docker/arr-stack:/gluetun
environment:
# See https://github.com/qdm12/gluetun-wiki/tree/main/setup#setup
- VPN_SERVICE_PROVIDER=nordvpn
- VPN_TYPE=wireguard
# OpenVPN:
# - OPENVPN_USER=
# - OPENVPN_PASSWORD=
# Wireguard:
- WIREGUARD_PRIVATE_KEY=<YOUR_PRIVATE_KEY> # See https://github.com/qdm12/gluetun-wiki/blob/main/setup/providers/nordvpn.md#obtain-your-wireguard-private-key
- WIREGUARD_ADDRESSES=10.5.0.2/32
# Timezone for accurate log times
- TZ=Europe/London
# Server list updater
# See https://github.com/qdm12/gluetun-wiki/blob/main/setup/servers.md#update-the-vpn-servers-list
- UPDATER_PERIOD=24h
qbittorrent:
image: lscr.io/linuxserver/qbittorrent
container_name: qbittorrent
network_mode: "service:gluetun"
environment:
- PUID=1000
- PGID=1000
- TZ=Europe/London
- WEBUI_PORT=8085
volumes:
- /home/ubuntu/docker/arr-stack/qbittorrent:/config
- /home/ubuntu/docker/arr-stack/qbittorrent/downloads:/downloads
depends_on:
- gluetun
restart: always
jackett:
image: lscr.io/linuxserver/jackett:latest
container_name: jackett
network_mode: "service:gluetun"
environment:
- PUID=1000
- PGID=1000
- TZ=Etc/UTC
- AUTO_UPDATE=true #optional
- RUN_OPTS= #optional
volumes:
- /home/ubuntu/docker/arr-stack/jackett/data:/config
- /home/ubuntu/docker/arr-stack/jackett/blackhole:/downloads
restart: unless-stopped
sonarr:
image: lscr.io/linuxserver/sonarr:latest
container_name: sonarr
network_mode: "service:gluetun"
environment:
- PUID=1000
- PGID=1000
- TZ=Etc/UTC
volumes:
- /home/ubuntu/docker/arr-stack/sonarr/data:/config
- /home/ubuntu/docker/arr-stack/sonarr/tvseries:/tv #optional
- /home/ubuntu/docker/arr-stack/sonarr/downloadclient-downloads:/downloads #optional
restart: unless-stopped
prowlarr:
image: lscr.io/linuxserver/prowlarr:latest
container_name: prowlarr
network_mode: "service:gluetun"
environment:
- PUID=1000
- PGID=1000
- TZ=Etc/UTC
volumes:
- /home/ubuntu/docker/arr-stack/prowlarr/data:/config
restart: unless-stopped

View file

@ -0,0 +1,10 @@
http:
middlewares:
crowdsec-bouncer:
forwardauth:
address: http://bouncer-traefik:8080/api/v1/forwardAuth
trustForwardHeader: true
ip-whitelist:
ipWhiteList:
sourceRange:
- "1.2.3.4" # Add the IPs or networks you want to be able to access

View file

@ -0,0 +1,48 @@
version: '3'
services:
traefik:
image: traefik:latest
container_name: traefik
restart: unless-stopped
security_opt:
- no-new-privileges:true
networks:
proxy:
ports:
- 80:80
- 81:81 # external http
- 443:443
- 444:444 # external https
environment:
- CF_API_EMAIL=your@email.com
- CF_DNS_API_TOKEN=super-secure-token
# - CF_API_KEY=YOU_API_KEY
# be sure to use the correct one depending on if you are using a token or key
volumes:
- /etc/localtime:/etc/localtime:ro
- /var/run/docker.sock:/var/run/docker.sock:ro
- /home/ubuntu/docker/traefik/traefik.yml:/traefik.yml:ro
- /home/ubuntu/docker/traefik/acme.json:/acme.json
- /home/ubuntu/docker/traefik/config.yml:/config.yml:ro
- /home/ubuntu/docker/traefik/logs:/var/log/traefik
labels:
- "traefik.enable=true"
- "traefik.http.routers.traefik.entrypoints=http" # restricts dashboard to internal entrypoint
- "traefik.http.routers.traefik.rule=Host(`traefik-docker.yourdomain.com`)"
- "traefik.http.middlewares.traefik-auth.basicauth.users=admin:password-hash"
- "traefik.http.middlewares.traefik-https-redirect.redirectscheme.scheme=https"
- "traefik.http.middlewares.sslheader.headers.customrequestheaders.X-Forwarded-Proto=https"
- "traefik.http.routers.traefik.middlewares=traefik-https-redirect"
- "traefik.http.routers.traefik-secure.entrypoints=https"
- "traefik.http.routers.traefik-secure.rule=Host(`traefik-docker.yourdomain.com`)"
- "traefik.http.routers.traefik-secure.middlewares=traefik-auth"
- "traefik.http.routers.traefik-secure.tls=true"
- "traefik.http.routers.traefik-secure.tls.certresolver=cloudflare"
- "traefik.http.routers.traefik-secure.tls.domains[0].main=yourdomain.co.uk"
- "traefik.http.routers.traefik-secure.tls.domains[0].sans=*.yourdomain.co.uk"
- "traefik.http.routers.traefik-secure.service=api@internal"
networks:
proxy:
external: true

View file

@ -0,0 +1,58 @@
api:
dashboard: true
debug: true
entryPoints:
http:
address: ":80"
http:
middlewares:
- crowdsec-bouncer@file
redirections:
entrypoint:
to: https
scheme: https
https:
address: ":443"
http:
middlewares:
- crowdsec-bouncer@file
http-external:
address: ":81"
http:
middlewares:
- crowdsec-bouncer@file
redirections:
entrypoint:
to: https-external
scheme: https
https-external:
address: ":444"
http:
middlewares:
- crowdsec-bouncer@file
serversTransport:
insecureSkipVerify: true
providers:
docker:
endpoint: "unix:///var/run/docker.sock"
exposedByDefault: false
file:
filename: /config.yml
certificatesResolvers:
cloudflare:
acme:
email: your@email.com
storage: acme.json
dnsChallenge:
provider: cloudflare
#disablePropagationCheck: true # uncomment this if you have issues pulling certificates through cloudflare, By setting this flag to true disables the need to wait for the propagation of the TXT record to all authoritative name servers.
resolvers:
- "1.1.1.1:53"
- "1.0.0.1:53"
log:
level: "INFO"
filePath: "/var/log/traefik/traefik.log"
accessLog:
filePath: "/var/log/traefik/access.log"

View file

@ -0,0 +1,46 @@
version: '3.5'
services:
traefik:
image: traefik:latest
container_name: traefik
restart: unless-stopped
security_opt:
- no-new-privileges:true
networks:
proxy:
ports:
- 80:80
- 443:443
environment:
- CF_API_EMAIL=your@email.com
- CF_DNS_API_TOKEN=your-api-key
# - CF_API_KEY=YOU_API_KEY
volumes:
- /etc/localtime:/etc/localtime:ro
- /var/run/docker.sock:/var/run/docker.sock:ro
- /home/ubuntu/docker/traefik/traefik.yml:/traefik.yml:ro
- /home/ubuntu/docker/traefik/acme.json:/acme.json
- /home/ubuntu/docker/traefik/config.yml:/config.yml:ro
- /home/ubuntu/docker/traefik/logs:/var/log/traefik
labels:
- "traefik.enable=true"
- "traefik.http.routers.traefik.entrypoints=http"
- "traefik.http.routers.traefik.rule=Host(`traefik-dashboard.yourdomain.co.uk`)"
- "traefik.http.middlewares.traefik-auth.basicauth.users=YOUR_USERNAME_PASSWORD"
- "traefik.http.middlewares.traefik-https-redirect.redirectscheme.scheme=https"
- "traefik.http.middlewares.sslheader.headers.customrequestheaders.X-Forwarded-Proto=https"
- "traefik.http.routers.traefik.middlewares=traefik-https-redirect"
- "traefik.http.routers.traefik-secure.entrypoints=https"
- "traefik.http.routers.traefik-secure.rule=Host(`traefik-dashboard.yourdomain.co.uk`)"
- "traefik.http.routers.traefik-secure.middlewares=traefik-auth"
- "traefik.http.routers.traefik-secure.tls=true"
- "traefik.http.routers.traefik-secure.tls.certresolver=cloudflare"
- "traefik.http.routers.traefik-secure.tls.domains[0].main=yourdomain.co.uk"
- "traefik.http.routers.traefik-secure.tls.domains[0].sans=*.yourdomain.co.uk"
- "traefik.http.routers.traefik-secure.service=api@internal"
networks:
proxy:
name: proxy

View file

View file

View file

@ -0,0 +1,32 @@
api:
dashboard: true
debug: true
entryPoints:
http:
address: ":80"
http:
redirections:
entryPoint:
to: https
scheme: https
https:
address: ":443"
serversTransport:
insecureSkipVerify: true
providers:
docker:
endpoint: "unix:///var/run/docker.sock"
exposedByDefault: false
file:
filename: /config.yml
certificatesResolvers:
cloudflare:
acme:
email: your@email.com #add your email
storage: acme.json
dnsChallenge:
provider: cloudflare
#disablePropagationCheck: true # uncomment this if you have issues pulling certificates through cloudflare, By setting this flag to true disables the need to wait for the propagation of the TXT record to all authoritative name servers.
resolvers:
- "1.1.1.1:53"
- "1.0.0.1:53"

View file

@ -0,0 +1,28 @@
version: '3.3'
services:
uptime-kuma:
image: louislam/uptime-kuma:1
container_name: uptime-kuma
volumes:
- /home/ubuntu/docker/uptime-kuma:/app/data
restart: unless-stopped
security_opt:
- no-new-privileges:true
networks:
proxy:
labels:
- "traefik.enable=true"
- "traefik.http.routers.uptime-kuma.entrypoints=http"
- "traefik.http.routers.uptime-kuma.rule=Host(`uptime-kuma.yourdomain.com`)"
- "traefik.http.middlewares.uptime-kuma-https-redirect.redirectscheme.scheme=https"
- "traefik.http.routers.uptime-kuma.middlewares=uptime-kuma-https-redirect"
- "traefik.http.routers.uptime-kuma-secure.entrypoints=https"
- "traefik.http.routers.uptime-kuma-secure.rule=Host(`uptime-kuma.yourdomain.com`)"
- "traefik.http.routers.uptime-kuma-secure.tls=true"
- "traefik.http.routers.uptime-kuma-secure.service=uptime-kuma"
- "traefik.http.services.uptime-kuma.loadbalancer.server.port=3001"
- "traefik.docker.network=proxy"
networks:
proxy:
external: true

View file

@ -0,0 +1,29 @@
version: "3"
services:
vaultwarden:
container_name: vaultwarden
image: vaultwarden/server:latest
volumes:
- '/home/ubuntu/docker/vaultwarden/:/data/'
restart: unless-stopped
networks:
proxy:
labels:
- "traefik.enable=true"
- "traefik.http.routers.vaultwarden.entrypoints=http"
- "traefik.http.routers.vaultwarden.rule=Host(`vaultwarden.yourdomain.com`)"
- "traefik.http.middlewares.vaultwarden-https-redirect.redirectscheme.scheme=https"
- "traefik.http.routers.vaultwarden.middlewares=vaultwarden-https-redirect"
- "traefik.http.routers.vaultwarden-secure.entrypoints=https"
- "traefik.http.routers.vaultwarden-secure.rule=Host(`vaultwarden.yourdomain.com`)"
- "traefik.http.routers.vaultwarden-secure.tls=true"
- "traefik.http.routers.vaultwarden-secure.service=vaultwarden"
- "traefik.http.services.vaultwarden.loadbalancer.server.port=80"
- "traefik.docker.network=proxy"
security_opt:
- no-new-privileges:true
networks:
proxy:
external: true

View file

@ -0,0 +1,34 @@
version: "3.8"
services:
wg-easy:
environment:
# ⚠️ Required:
# Change this to your host's public address
- WG_HOST=mydomain.com # The public hostname of your VPN server.
# Optional:
# - PASSWORD=foobar123 # When set, requires a password when logging in to the Web UI.
# - WG_PORT=51820 # The public UDP port of your VPN server. WireGuard will always listen on 51820 inside the Docker container.
# - WG_DEFAULT_ADDRESS=10.8.0.x # Clients IP address range.
# - WG_DEFAULT_DNS=1.1.1.1 # DNS server clients will use.
# - WG_MTU=1420 # The MTU the clients will use. Server uses default WG MTU.
# - WG_ALLOWED_IPS=192.168.15.0/24, 10.0.1.0/24 # Allowed IPs clients will use.
# - WG_PRE_UP=echo "Pre Up" > /etc/wireguard/pre-up.txt
# - WG_POST_UP=echo "Post Up" > /etc/wireguard/post-up.txt
# - WG_PRE_DOWN=echo "Pre Down" > /etc/wireguard/pre-down.txt
# - WG_POST_DOWN=echo "Post Down" > /etc/wireguard/post-down.txt
image: weejewel/wg-easy
container_name: wg-easy
volumes:
- /home/ubuntu/docker/wireguard:/etc/wireguard
ports:
- "51820:51820/udp"
- "51821:51821/tcp"
restart: unless-stopped
cap_add:
- NET_ADMIN
- SYS_MODULE
sysctls:
- net.ipv4.ip_forward=1
- net.ipv4.conf.all.src_valid_mark=1

36
rClone/docker-compose.yml Normal file
View file

@ -0,0 +1,36 @@
version: '3'
services:
rclone:
image: rclone/rclone:latest
container_name: rclone
security_opt:
- no-new-privileges:true
restart: unless-stopped
command: rcd --rc-web-gui --rc-addr 0.0.0.0:5572 --rc-web-fetch-url=https://api.github.com/repos/rclone/rclone-webui-react/releases/latest --rc-web-gui-update --rc-user user --rc-pass rclone -vv --checksum --transfers=4 --checkers=4 --contimeout=60s --timeout=300s --retries=3 --low-level-retries=10 --stats=1s --stats-file-name-length=0
#ports:
# - "5572:5572" add this port if you're not using a proxy
volumes: #be sure to amend the below to your user and directories
- /home/ubuntu/docker/rclone/config:/config/rclone #location of your rclone config file (i.e., where the remote and secrets are stored)
- /home/ubuntu/docker/rclone-dashboard/sync_script:/sync_script #script to allow remote execution of backups (e.g., rclone copy --checksum --verbose --transfers 1 --checkers 4 --contimeout 60s --timeout 300s --retries 3 --low-level-retries 10 --stats 1s /YOUR_FOLDER YOUR_REMOTE:REMOTE_FOLDER/sync_script)
- /home/ubuntu/truenas:/data #this is my truenas SMB share - change to wherever you store the data you wish to upload
environment:
- PHP_TZ=Europe/London
networks:
proxy: #remove the proxy network if you're not using traefik
labels: #remove the labels section if you're not using traefik
- "traefik.enable=true"
- "traefik.http.routers.rclone-dashboard.entrypoints=http"
- "traefik.http.routers.rclone-dashboard.rule=Host(`rclone-dashboard.your-domain.co.uk`)"
- "traefik.http.routers.rclone-dashboard.middlewares=default-whitelist@file"
- "traefik.http.middlewares.rclone-dashboard-https-redirect.redirectscheme.scheme=https"
- "traefik.http.routers.rclone-dashboard.middlewares=rclone-dashboard-https-redirect"
- "traefik.http.routers.rclone-dashboard-secure.entrypoints=https"
- "traefik.http.routers.rclone-dashboard-secure.rule=Host(`rclone-dashboard.your-domain.co.uk`)"
- "traefik.http.routers.rclone-dashboard-secure.tls=true"
- "traefik.http.routers.rclone-dashboard-secure.service=rclone-dashboard"
- "traefik.http.services.rclone-dashboard.loadbalancer.server.port=5572"
- "traefik.docker.network=proxy"
networks:
proxy:
external: true

View file

@ -0,0 +1,26 @@
version: "3.8"
services:
rclone:
image: rclone/rclone
container_name: rclone_mount
security_opt:
- apparmor:unconfined
restart: unless-stopped
volumes:
- type: bind
source: /home/ubuntu/GoogleDrive_NAS_crypt #change this to the folder location you want to mount to (on your host)
target: /data
bind:
propagation: shared
- /etc/passwd:/etc/passwd:ro
- /etc/group:/etc/group:ro
- /home/ubuntu/docker/rclone/config:/config/rclone #rclone config location (i.e., the remote credentials)
- /home/ubuntu/docker/rclone/log:/log
- /home/ubuntu/docker/rclone/cache:/cache
privileged: true
cap_add:
- SYS_ADMIN
devices:
- /dev/fuse #IMPORTANT need to install FUSE on the host first. This is used to mount the remote to the host
command: "mount NAME_OF_YOUR_REMOTE:/NAME_OF_FOLDER_TO_MOUNT /data --log-file /log/rclone.log --log-level ERROR --umask 002 --buffer-size 128M --checkers 20 --transfers 5 --vfs-read-ahead 1G --vfs-cache-mode writes --allow-other --allow-non-empty &"

View file

@ -0,0 +1 @@
rclone mount YOUR_REMOTE: G: --dir-cache-time 1440m --poll-interval 15s --fast-list --buffer-size 256M --vfs-cache-mode writes --vfs-read-ahead 256M --vfs-read-chunk-size-limit=off --vfs-read-chunk-size=128M --tpslimit 10

4
rClone/remote-upload Normal file
View file

@ -0,0 +1,4 @@
#this script will initiate a remote upload using the docker container. Example use case: you can run this on a windows machine that will send a command to the docker container to tell it to perform a backup.
#you can track the progress of the operation using the rclone dashboard
#the exluderule will allow you to skip certain folders (example below)
rclone rc sync/copy srcFs="YOUR_SOURCE:/data" _filter={\"ExcludeRule\":[\"rClone/**\",\"Frigate/**\",\"dump/**\",\"ISOConvert/**\",\"GooglePhotosBackup/**\"]} dstFs="YOUR_REMOTE:FOLDER" --rc-addr=https://IP-OR-DNS-OF-RCLONE --rc-user=james --rc-pass=rclone _async=true -vv --checksum --transfers=1 --checkers=4 --contimeout=60s --timeout=300s --retries=3 --low-level-retries=10 --stats=1s --stats-file-name-length=0 --fast-list

1
rClone/sync_script Normal file
View file

@ -0,0 +1 @@
rclone rc sync/copy srcFS=FOLDER_ON_HOST:/ dstFs=REMOTE_NAME:REMOTE_FOLDER --rc-addr=:5572 --rc-user=user --rc-pass=rclone _async=true/sync_script

57
restic/docker-compose.yml Normal file
View file

@ -0,0 +1,57 @@
version: "3.3"
services:
backup:
image: mazzolino/restic
container_name: restic
hostname: your_host_name
environment:
RUN_ON_STARTUP: "true" #change as you wish
BACKUP_CRON: "0 */12 * * *" #this is twice daily, i.e., every 12 hours
RESTIC_REPOSITORY: /restic
RESTIC_PASSWORD: MY_SUPER_LONG_PASSWORD
RESTIC_BACKUP_SOURCES: /mnt/volumes
RESTIC_COMPRESSION: auto
RESTIC_BACKUP_ARGS: >-
--tag restic-proxmox #add tags, whatever you need to mark backups
--verbose
RESTIC_FORGET_ARGS: >- #change as required
--keep-last 10
--keep-daily 7
--keep-weekly 5
--keep-monthly 12
TZ: Europe/London
volumes:
- /home/ubuntu/truenas/Restic-Proxmox-Backup:/restic #change the left hand side to where you want to store the backups. As you can see I'm storing it on my NAS that is mounted to the host /home/truenas
- /home/ubuntu/truenas/Restic-Proxmox-Backup/tmp-for-restore:/tmp-for-restore #USE THIS FOLDER FOR RESTORE - CAN VIEW EACH CONTAINER
- /home/ubuntu/docker:/mnt/volumes:ro
security_opt:
- no-new-privileges:true
prune:
image: mazzolino/restic
container_name: restic-prune
hostname: your_host_name
environment:
RUN_ON_STARTUP: "true"
PRUNE_CRON: "0 0 4 * * *"
RESTIC_REPOSITORY: /restic
RESTIC_PASSWORD: USE_SAME_PASSWORD_AS_ABOVE
TZ: Europe/London
security_opt:
- no-new-privileges:true
check:
image: mazzolino/restic
container_name: restic-check
hostname: your_host_name
environment:
RUN_ON_STARTUP: "false"
CHECK_CRON: "0 15 5 * * *"
RESTIC_CHECK_ARGS: >-
--read-data-subset=10%
RESTIC_REPOSITORY: /restic
RESTIC_PASSWORD: USE_SAME_PASSWORD_AS_ABOVE
TZ: Europe/London
security_opt:
- no-new-privileges:true