mirror of
https://github.com/JamesTurland/JimsGarage.git
synced 2024-11-22 07:50:19 +00:00
Merge branch 'main' of https://github.com/JamesTurland/JimsGarage
This commit is contained in:
commit
7dab87b7ef
43 changed files with 2212 additions and 189 deletions
38
Docker-Swarm/portainer-agent-stack.yml
Normal file
38
Docker-Swarm/portainer-agent-stack.yml
Normal file
|
@ -0,0 +1,38 @@
|
|||
version: '3.2'
|
||||
|
||||
services:
|
||||
agent:
|
||||
image: portainer/agent:2.19.4
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- /var/lib/docker/volumes:/var/lib/docker/volumes
|
||||
networks:
|
||||
- agent_network
|
||||
deploy:
|
||||
mode: global
|
||||
placement:
|
||||
constraints: [node.platform.os == linux]
|
||||
|
||||
portainer:
|
||||
image: portainer/portainer-ce:2.19.4
|
||||
command: -H tcp://tasks.agent:9001 --tlsskipverify
|
||||
ports:
|
||||
- "9443:9443"
|
||||
- "9000:9000"
|
||||
- "8000:8000"
|
||||
volumes:
|
||||
- type: bind
|
||||
source: /mnt/Portainer
|
||||
target: /data
|
||||
networks:
|
||||
- agent_network
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
placement:
|
||||
constraints: [node.role == manager]
|
||||
|
||||
networks:
|
||||
agent_network:
|
||||
driver: overlay
|
||||
attachable: true
|
171
Docker-Swarm/swarm-3-nodes.sh
Normal file
171
Docker-Swarm/swarm-3-nodes.sh
Normal file
|
@ -0,0 +1,171 @@
|
|||
#!/bin/bash
|
||||
|
||||
echo -e " \033[33;5m __ _ _ ___ \033[0m"
|
||||
echo -e " \033[33;5m \ \(_)_ __ ___( )__ / _ \__ _ _ __ __ _ __ _ ___ \033[0m"
|
||||
echo -e " \033[33;5m \ \ | '_ \` _ \/ __| / /_\/ _\` | '__/ _\` |/ _\` |/ _ \ \033[0m"
|
||||
echo -e " \033[33;5m /\_/ / | | | | | \__ \ / /_\\ (_| | | | (_| | (_| | __/ \033[0m"
|
||||
echo -e " \033[33;5m \___/|_|_| |_| |_|___/ \____/\__,_|_| \__,_|\__, |\___| \033[0m"
|
||||
echo -e " \033[33;5m |___/ \033[0m"
|
||||
|
||||
echo -e " \033[36;5m ___ _ ___ \033[0m"
|
||||
echo -e " \033[36;5m | \ ___ __| |_____ _ _ / __|_ __ ____ _ _ _ _ __ \033[0m"
|
||||
echo -e " \033[36;5m | |) / _ \/ _| / / -_) '_| \__ \ V V / _\` | '_| ' \ \033[0m"
|
||||
echo -e " \033[36;5m |___/\___/\__|_\_\___|_| |___/\_/\_/\__,_|_| |_|_|_| \033[0m"
|
||||
echo -e " \033[36;5m \033[0m"
|
||||
echo -e " \033[32;5m https://youtube.com/@jims-garage \033[0m"
|
||||
echo -e " \033[32;5m \033[0m"
|
||||
|
||||
|
||||
#############################################
|
||||
# YOU SHOULD ONLY NEED TO EDIT THIS SECTION #
|
||||
#############################################
|
||||
|
||||
# Set the IP addresses of the admin, managers, and workers nodes
|
||||
admin=192.168.3.5
|
||||
manager1=192.168.3.21
|
||||
manager2=192.168.3.22
|
||||
manager3=192.168.3.23
|
||||
worker1=192.168.3.24
|
||||
worker2=192.168.3.25
|
||||
|
||||
# Set the workers' hostnames (if using cloud-init in Proxmox it's the name of the VM)
|
||||
workerHostname1=dockerSwarm-04
|
||||
workerHostname2=dockerSwarm-05
|
||||
|
||||
# User of remote machines
|
||||
user=ubuntu
|
||||
|
||||
# Interface used on remotes
|
||||
interface=eth0
|
||||
|
||||
# Array of all manager nodes
|
||||
allmanagers=($manager1 $manager2 $manager3)
|
||||
|
||||
# Array of manager nodes
|
||||
managers=($manager2 $manager3)
|
||||
|
||||
# Array of worker nodes
|
||||
workers=($worker1 $worker2)
|
||||
|
||||
# Array of all
|
||||
all=($manager1 $worker1 $worker2)
|
||||
|
||||
#ssh certificate name variable
|
||||
certName=id_rsa
|
||||
|
||||
#############################################
|
||||
# DO NOT EDIT BELOW #
|
||||
#############################################
|
||||
# For testing purposes - in case time is wrong due to VM snapshots
|
||||
sudo timedatectl set-ntp off
|
||||
sudo timedatectl set-ntp on
|
||||
|
||||
# Move SSH certs to ~/.ssh and change permissions
|
||||
cp /home/$user/{$certName,$certName.pub} /home/$user/.ssh
|
||||
chmod 600 /home/$user/.ssh/$certName
|
||||
chmod 644 /home/$user/.ssh/$certName.pub
|
||||
|
||||
# Create SSH Config file to ignore checking (don't use in production!)
|
||||
echo "StrictHostKeyChecking no" > ~/.ssh/config
|
||||
|
||||
#add ssh keys for all nodes
|
||||
for node in "${all[@]}"; do
|
||||
ssh-copy-id $user@$node
|
||||
done
|
||||
|
||||
# Copy SSH keys to MN1 to copy tokens back later
|
||||
scp -i /home/$user/.ssh/$certName /home/$user/$certName $user@$manager1:~/.ssh
|
||||
scp -i /home/$user/.ssh/$certName /home/$user/$certName.pub $user@$manager1:~/.ssh
|
||||
|
||||
|
||||
# Install dependencies for each node (Docker, GlusterFS)
|
||||
for newnode in "${all[@]}"; do
|
||||
ssh $user@$newnode -i ~/.ssh/$certName sudo su <<EOF
|
||||
iptables -F
|
||||
iptables -P INPUT ACCEPT
|
||||
# Add Docker's official GPG key:
|
||||
apt-get update
|
||||
NEEDRESTART_MODE=a apt install ca-certificates curl gnupg -y
|
||||
install -m 0755 -d /etc/apt/keyrings
|
||||
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg
|
||||
chmod a+r /etc/apt/keyrings/docker.gpg
|
||||
|
||||
# Add the repository to Apt sources:
|
||||
echo \
|
||||
"deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \
|
||||
$(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \
|
||||
tee /etc/apt/sources.list.d/docker.list > /dev/null
|
||||
apt-get update
|
||||
NEEDRESTART_MODE=a apt install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin -y
|
||||
NEEDRESTART_MODE=a apt install software-properties-common glusterfs-server -y
|
||||
systemctl start glusterd
|
||||
systemctl enable glusterd
|
||||
mkdir -p /gluster/volume1
|
||||
exit
|
||||
EOF
|
||||
echo -e " \033[32;5m$newnode - Docker & GlusterFS installed!\033[0m"
|
||||
done
|
||||
|
||||
# Step 1: Create Swarm on first node
|
||||
ssh -tt $user@$manager1 -i ~/.ssh/$certName sudo su <<EOF
|
||||
docker swarm init --advertise-addr $manager1 --default-addr-pool 10.20.0.0/16 --default-addr-pool-mask-length 26
|
||||
docker swarm join-token manager | sed -n 3p | grep -Po 'docker swarm join --token \\K[^\\s]*' > manager.txt
|
||||
docker swarm join-token worker | sed -n 3p | grep -Po 'docker swarm join --token \\K[^\\s]*' > worker.txt
|
||||
echo "StrictHostKeyChecking no" > ~/.ssh/config
|
||||
ssh-copy-id -i /home/$user/.ssh/$certName $user@$admin
|
||||
scp -i /home/$user/.ssh/$certName /home/$user/manager.txt $user@$admin:~/manager
|
||||
scp -i /home/$user/.ssh/$certName /home/$user/worker.txt $user@$admin:~/worker
|
||||
exit
|
||||
EOF
|
||||
echo -e " \033[32;5mManager1 Completed\033[0m"
|
||||
|
||||
# Step 2: Set variables
|
||||
managerToken=`cat manager`
|
||||
workerToken=`cat worker`
|
||||
|
||||
# Step 3: Connect additional worker
|
||||
for newnode in "${workers[@]}"; do
|
||||
ssh -tt $user@$newnode -i ~/.ssh/$certName sudo su <<EOF
|
||||
docker swarm join \
|
||||
--token $workerToken \
|
||||
$manager1
|
||||
exit
|
||||
EOF
|
||||
echo -e " \033[32;5m$newnode - Worker node joined successfully!\033[0m"
|
||||
done
|
||||
|
||||
# Step 4: Create GlusterFS Cluster across all nodes (connect to Manager1) - we will also label our nodes to restrict deployment of services to workers only
|
||||
ssh -tt $user@$manager1 -i ~/.ssh/$certName sudo su <<EOF
|
||||
gluster peer probe $manager1; gluster peer probe $worker1; gluster peer probe $worker2;
|
||||
gluster volume create staging-gfs replica 3 $manager1:/gluster/volume1 $worker1:/gluster/volume1 $worker2:/gluster/volume1 force
|
||||
gluster volume start staging-gfs
|
||||
chmod 666 /var/run/docker.sock
|
||||
docker node update --label-add worker=true $workerHostname1
|
||||
docker node update --label-add worker=true $workerHostname2
|
||||
exit
|
||||
EOF
|
||||
echo -e " \033[32;5mGlusterFS created\033[0m"
|
||||
|
||||
# Step 5: Connect to all machines to ensure that GlusterFS mount restarts after boot
|
||||
for newnode in "${all[@]}"; do
|
||||
ssh $user@$newnode -i ~/.ssh/$certName sudo su <<EOF
|
||||
echo 'localhost:/staging-gfs /mnt glusterfs defaults,_netdev,backupvolfile-server=localhost 0 0' >> /etc/fstab
|
||||
mount.glusterfs localhost:/staging-gfs /mnt
|
||||
chown -R root:docker /mnt
|
||||
exit
|
||||
EOF
|
||||
echo -e " \033[32;5m$newnode - GlusterFS mounted on reboot\033[0m"
|
||||
done
|
||||
|
||||
# OPTIONAL #
|
||||
# Step 6: Add Portainer
|
||||
ssh -tt $user@$manager1 -i ~/.ssh/$certName sudo su <<EOF
|
||||
mkdir /mnt/Portainer
|
||||
curl -L https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Docker-Swarm/portainer-agent-stack.yml -o portainer-agent-stack.yml
|
||||
docker stack deploy -c portainer-agent-stack.yml portainer
|
||||
docker node ls
|
||||
docker service ls
|
||||
gluster pool list
|
||||
exit
|
||||
EOF
|
||||
echo -e " \033[32;5mPortainer deployed\033[0m"
|
182
Docker-Swarm/swarm.sh
Normal file
182
Docker-Swarm/swarm.sh
Normal file
|
@ -0,0 +1,182 @@
|
|||
#!/bin/bash
|
||||
|
||||
echo -e " \033[33;5m __ _ _ ___ \033[0m"
|
||||
echo -e " \033[33;5m \ \(_)_ __ ___( )__ / _ \__ _ _ __ __ _ __ _ ___ \033[0m"
|
||||
echo -e " \033[33;5m \ \ | '_ \` _ \/ __| / /_\/ _\` | '__/ _\` |/ _\` |/ _ \ \033[0m"
|
||||
echo -e " \033[33;5m /\_/ / | | | | | \__ \ / /_\\ (_| | | | (_| | (_| | __/ \033[0m"
|
||||
echo -e " \033[33;5m \___/|_|_| |_| |_|___/ \____/\__,_|_| \__,_|\__, |\___| \033[0m"
|
||||
echo -e " \033[33;5m |___/ \033[0m"
|
||||
|
||||
echo -e " \033[36;5m ___ _ ___ \033[0m"
|
||||
echo -e " \033[36;5m | \ ___ __| |_____ _ _ / __|_ __ ____ _ _ _ _ __ \033[0m"
|
||||
echo -e " \033[36;5m | |) / _ \/ _| / / -_) '_| \__ \ V V / _\` | '_| ' \ \033[0m"
|
||||
echo -e " \033[36;5m |___/\___/\__|_\_\___|_| |___/\_/\_/\__,_|_| |_|_|_| \033[0m"
|
||||
echo -e " \033[36;5m \033[0m"
|
||||
echo -e " \033[32;5m https://youtube.com/@jims-garage \033[0m"
|
||||
echo -e " \033[32;5m \033[0m"
|
||||
|
||||
|
||||
#############################################
|
||||
# YOU SHOULD ONLY NEED TO EDIT THIS SECTION #
|
||||
#############################################
|
||||
|
||||
# Set the IP addresses of the admin, managers, and workers nodes
|
||||
admin=192.168.3.5
|
||||
manager1=192.168.3.21
|
||||
manager2=192.168.3.22
|
||||
manager3=192.168.3.23
|
||||
worker1=192.168.3.24
|
||||
worker2=192.168.3.25
|
||||
|
||||
# Set the workers' hostnames (if using cloud-init in Proxmox it's the name of the VM)
|
||||
workerHostname1=dockerSwarm-04
|
||||
workerHostname2=dockerSwarm-05
|
||||
|
||||
# User of remote machines
|
||||
user=ubuntu
|
||||
|
||||
# Interface used on remotes
|
||||
interface=eth0
|
||||
|
||||
# Array of all manager nodes
|
||||
allmanagers=($manager1 $manager2 $manager3)
|
||||
|
||||
# Array of extra managers
|
||||
managers=($manager2 $manager3)
|
||||
|
||||
# Array of worker nodes
|
||||
workers=($worker1 $worker2)
|
||||
|
||||
# Array of all
|
||||
all=($manager1 $manager2 $manager3 $worker1 $worker2)
|
||||
|
||||
#ssh certificate name variable
|
||||
certName=id_rsa
|
||||
|
||||
#############################################
|
||||
# DO NOT EDIT BELOW #
|
||||
#############################################
|
||||
# For testing purposes - in case time is wrong due to VM snapshots
|
||||
sudo timedatectl set-ntp off
|
||||
sudo timedatectl set-ntp on
|
||||
|
||||
# Move SSH certs to ~/.ssh and change permissions
|
||||
cp /home/$user/{$certName,$certName.pub} /home/$user/.ssh
|
||||
chmod 600 /home/$user/.ssh/$certName
|
||||
chmod 644 /home/$user/.ssh/$certName.pub
|
||||
|
||||
# Create SSH Config file to ignore checking (don't use in production!)
|
||||
echo "StrictHostKeyChecking no" > ~/.ssh/config
|
||||
|
||||
#add ssh keys for all nodes
|
||||
for node in "${all[@]}"; do
|
||||
ssh-copy-id $user@$node
|
||||
done
|
||||
|
||||
# Copy SSH keys to MN1 to copy tokens back later
|
||||
scp -i /home/$user/.ssh/$certName /home/$user/$certName $user@$manager1:~/.ssh
|
||||
scp -i /home/$user/.ssh/$certName /home/$user/$certName.pub $user@$manager1:~/.ssh
|
||||
|
||||
|
||||
# Install dependencies for each node (Docker, GlusterFS)
|
||||
for newnode in "${all[@]}"; do
|
||||
ssh $user@$newnode -i ~/.ssh/$certName sudo su <<EOF
|
||||
iptables -F
|
||||
iptables -P INPUT ACCEPT
|
||||
# Add Docker's official GPG key:
|
||||
apt-get update
|
||||
NEEDRESTART_MODE=a apt install ca-certificates curl gnupg -y
|
||||
install -m 0755 -d /etc/apt/keyrings
|
||||
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg
|
||||
chmod a+r /etc/apt/keyrings/docker.gpg
|
||||
|
||||
# Add the repository to Apt sources:
|
||||
echo \
|
||||
"deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \
|
||||
$(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \
|
||||
tee /etc/apt/sources.list.d/docker.list > /dev/null
|
||||
apt-get update
|
||||
NEEDRESTART_MODE=a apt install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin -y
|
||||
NEEDRESTART_MODE=a apt install software-properties-common glusterfs-server -y
|
||||
systemctl start glusterd
|
||||
systemctl enable glusterd
|
||||
mkdir -p /gluster/volume1
|
||||
exit
|
||||
EOF
|
||||
echo -e " \033[32;5m$newnode - Docker & GlusterFS installed!\033[0m"
|
||||
done
|
||||
|
||||
# Step 1: Create Swarm on first node
|
||||
ssh -tt $user@$manager1 -i ~/.ssh/$certName sudo su <<EOF
|
||||
docker swarm init --advertise-addr $manager1
|
||||
docker swarm join-token manager | sed -n 3p | grep -Po 'docker swarm join --token \\K[^\\s]*' > manager.txt
|
||||
docker swarm join-token worker | sed -n 3p | grep -Po 'docker swarm join --token \\K[^\\s]*' > worker.txt
|
||||
echo "StrictHostKeyChecking no" > ~/.ssh/config
|
||||
ssh-copy-id -i /home/$user/.ssh/$certName $user@$admin
|
||||
scp -i /home/$user/.ssh/$certName /home/$user/manager.txt $user@$admin:~/manager
|
||||
scp -i /home/$user/.ssh/$certName /home/$user/worker.txt $user@$admin:~/worker
|
||||
exit
|
||||
EOF
|
||||
echo -e " \033[32;5mManager1 Completed\033[0m"
|
||||
|
||||
# Step 2: Set variables
|
||||
managerToken=`cat manager`
|
||||
workerToken=`cat worker`
|
||||
|
||||
# Step 3: Connect additional managers
|
||||
for newnode in "${managers[@]}"; do
|
||||
ssh -tt $user@$newnode -i ~/.ssh/$certName sudo su <<EOF
|
||||
docker swarm join \
|
||||
--token $managerToken \
|
||||
$manager1
|
||||
exit
|
||||
EOF
|
||||
echo -e " \033[32;5m$newnode - Manager node joined successfully!\033[0m"
|
||||
done
|
||||
|
||||
# Step 4: Connect additional worker
|
||||
for newnode in "${workers[@]}"; do
|
||||
ssh -tt $user@$newnode -i ~/.ssh/$certName sudo su <<EOF
|
||||
docker swarm join \
|
||||
--token $workerToken \
|
||||
$manager1
|
||||
exit
|
||||
EOF
|
||||
echo -e " \033[32;5m$newnode - Worker node joined successfully!\033[0m"
|
||||
done
|
||||
|
||||
# Step 5: Create GlusterFS Cluster across all nodes (connect to Manager1) - we will also label our nodes to restrict deployment of services to workers only
|
||||
ssh -tt $user@$manager1 -i ~/.ssh/$certName sudo su <<EOF
|
||||
gluster peer probe $manager1; gluster peer probe $manager2; gluster peer probe $manager3; gluster peer probe $worker1; gluster peer probe $worker2;
|
||||
gluster volume create staging-gfs replica 5 $manager1:/gluster/volume1 $manager2:/gluster/volume1 $manager3:/gluster/volume1 $worker1:/gluster/volume1 $worker2:/gluster/volume1 force
|
||||
gluster volume start staging-gfs
|
||||
chmod 666 /var/run/docker.sock
|
||||
docker node update --label-add worker=true $workerHostname1
|
||||
docker node update --label-add worker=true $workerHostname2
|
||||
exit
|
||||
EOF
|
||||
echo -e " \033[32;5mGlusterFS created\033[0m"
|
||||
|
||||
# Step 6: Connect to all machines to ensure that GlusterFS mount restarts after boot
|
||||
for newnode in "${all[@]}"; do
|
||||
ssh $user@$newnode -i ~/.ssh/$certName sudo su <<EOF
|
||||
echo 'localhost:/staging-gfs /mnt glusterfs defaults,_netdev,backupvolfile-server=localhost 0 0' >> /etc/fstab
|
||||
mount.glusterfs localhost:/staging-gfs /mnt
|
||||
chown -R root:docker /mnt
|
||||
exit
|
||||
EOF
|
||||
echo -e " \033[32;5m$newnode - GlusterFS mounted on reboot\033[0m"
|
||||
done
|
||||
|
||||
# OPTIONAL #
|
||||
# Step 7: Add Portainer
|
||||
ssh -tt $user@$manager1 -i ~/.ssh/$certName sudo su <<EOF
|
||||
mkdir /mnt/Portainer
|
||||
curl -L https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Docker-Swarm/portainer-agent-stack.yml -o portainer-agent-stack.yml
|
||||
docker stack deploy -c portainer-agent-stack.yml portainer
|
||||
docker node ls
|
||||
docker service ls
|
||||
gluster pool list
|
||||
exit
|
||||
EOF
|
||||
echo -e " \033[32;5mPortainer deployed\033[0m"
|
|
@ -3,49 +3,49 @@
|
|||
* Consumer server build
|
||||
* High End
|
||||
* Intel
|
||||
* CPU: Intel Core i7-13700K (with iGPU) : [https://amzn.to/3E6DbUT](https://amzn.to/44wT8yz)
|
||||
* Mobo: Z690D4U (if you can find one) or MSI MAG Z790 TOMAHAWK WIFI : [https://amzn.to/3OICGoL](https://amzn.to/44tser9)
|
||||
* RAM: Corsair Vengeance DDR5 32 GB (or more) : [https://amzn.to/3E3Gc8o](https://amzn.to/47S3Br2)
|
||||
* CPU: Intel Core i7-13700K (with iGPU) : [https://amzn.to/46KzJeu](https://amzn.to/46KzJeu)
|
||||
* Mobo: Z690D4U (if you can find one [https://amzn.to/3uG0Qdc](https://amzn.to/3uG0Qdc)) or MSI MAG Z790 TOMAHAWK WIFI : [https://amzn.to/48n68cr](https://amzn.to/48n68cr)
|
||||
* RAM: Corsair Vengeance DDR5 32 GB (or more) : [https://amzn.to/3R6VhMB](https://amzn.to/3R6VhMB)
|
||||
* PSU:
|
||||
* AMD
|
||||
* CPU: AMD Ryzen 9 7900 : [https://amzn.to/45CDLoZ](https://amzn.to/47TqV7N)
|
||||
* Mobo: ASRock B650D4U-2T/BCM (or B650D4U-2L2T/BCM for 10G) or ASRock X670E Steel Legend ATX : [https://amzn.to/3KPrRA8](https://amzn.to/3YTrMkI)
|
||||
* RAM: Corsair Vengeance DDR5 32 GB (or more) : [https://amzn.to/3E3Gc8o](https://amzn.to/47PgzWD)
|
||||
* CPU: AMD Ryzen 9 7900 : [https://amzn.to/47GRdd1](https://amzn.to/47GRdd1)
|
||||
* Mobo: ASRock B650D4U-2T/BCM (or B650D4U-2L2T/BCM for 10G) or ASRock X670E Steel Legend ATX : [https://amzn.to/3GvShUZ](https://amzn.to/3GvShUZ)
|
||||
* RAM: Corsair Vengeance DDR5 32 GB (or more) : [https://amzn.to/3R6VhMB](https://amzn.to/3R6VhMB)
|
||||
* Budget
|
||||
* Intel
|
||||
* CPU: Intel Core i5-12400 : [https://amzn.to/3KKPhqA](https://amzn.to/3EjiG7m)
|
||||
* Mobo: MSI MAG B660M MORTAR : [https://amzn.to/3P4HpSb](https://amzn.to/3sy1QPG)
|
||||
* RAM: Corsair Vengeance LPX 16 GB : [https://amzn.to/3E3Gc8o](https://amzn.to/47PgzWD)
|
||||
* CPU: Intel Core i5-12400 : [https://amzn.to/4aaUG5o](https://amzn.to/4aaUG5o)
|
||||
* Mobo: MSI MAG B660M MORTAR : [https://amzn.to/3R4swjA](https://amzn.to/3R4swjA)
|
||||
* RAM: Corsair Vengeance LPX 16 GB : [https://amzn.to/3teJeES](https://amzn.to/3teJeES)
|
||||
* AMD
|
||||
* CPU: amd ryzen 5 5600 : [https://amzn.to/3QLToq0](https://amzn.to/3Ej9EYi)
|
||||
* Mobo: MSI MAG B550 TOMAHAWK : [https://amzn.to/3OKh0bV](https://amzn.to/3OW3l1J)
|
||||
* RAM: Corsair Vengeance LPX 16 GB : [https://amzn.to/3E3Gc8o](https://amzn.to/3Z2vIzN)
|
||||
* CPU: AMD Ryzen 5 5600 : [https://amzn.to/3R8HKUD](https://amzn.to/3R8HKUD)
|
||||
* Mobo: MSI MAG B550 TOMAHAWK : [https://amzn.to/3Rc0liz](https://amzn.to/3Rc0liz)
|
||||
* RAM: Corsair Vengeance LPX 16 GB : [https://amzn.to/3teJeES](https://amzn.to/3teJeES)
|
||||
* PSU:
|
||||
* Corsair HX: [https://amzn.to/3P4YfRN](https://amzn.to/3LoJveD)
|
||||
* Corsair HX: [https://amzn.to/4ab2wvx](https://amzn.to/4ab2wvx)
|
||||
* GPU:
|
||||
* Budget: Intel Arc a380: [https://amzn.to/47Fa60k](https://amzn.to/3OU9hrS)
|
||||
* Budget: Intel Arc a380: [https://amzn.to/3RsEcOC](https://amzn.to/3RsEcOC)
|
||||
* All-in-One:
|
||||
* Budget: Lenovo ThinkCentre : [https://amzn.to/3KLPdH1](https://amzn.to/3swN0c8)
|
||||
* Premium: Intel NUC - pick generation to suit budget : [https://amzn.to/3YR0jQL](https://amzn.to/3KXW6VG)
|
||||
* Budget: Lenovo ThinkCentre : [https://amzn.to/3TjGSiC](https://amzn.to/3TjGSiC)
|
||||
* Premium: Intel NUC - pick generation to suit budget : [https://amzn.to/4aauE1O](https://amzn.to/4aauE1O)
|
||||
* Enterprise server
|
||||
* Server Form Factor: Dell r730
|
||||
* Workstation ATX: Epyc 7302p with Gigabyte or SuperMicro Mobo (Check eBay)
|
||||
* Switch
|
||||
* Entry-level: Netgear GS108E (5/8/16 ports) : [https://amzn.to/3qCQBVz](https://amzn.to/3L25APA)
|
||||
* Mid-level: Mikrotik CRS326-24G-2S+RM (or IN - non rack mount) (2x 10Gb SFP+) : [https://amzn.to/3P3BY76](https://amzn.to/3Piz0fd)
|
||||
* Pro-sumer: Mikrotik CRS328-24P-4S+RM (POE, 1Gb, 10Gb SFP+) (£500) vs Unifi Professional 48 PoE (£1000) : [https://amzn.to/44lVhwC](https://amzn.to/3OYo3xI)
|
||||
* Entry-level: Netgear GS108E (5/8/16 ports) : [https://amzn.to/3uJFUCe](https://amzn.to/3uJFUCe)
|
||||
* Mid-level: Mikrotik CRS326-24G-2S+RM (or IN - non rack mount) (2x 10Gb SFP+) : [https://amzn.to/471cWLL](https://amzn.to/471cWLL)
|
||||
* Pro-sumer: Mikrotik CRS328-24P-4S+RM (POE, 1Gb, 10Gb SFP+) (£500) vs Unifi Professional 48 PoE (£1000) : [https://amzn.to/3R8I8T5](https://amzn.to/3R8I8T5)
|
||||
* NIC
|
||||
* 1G: Intel i210 or i350t4v2
|
||||
* 10G: Mellanox Connect-X3 10Gb SFP+, Intel x520DA2 or t2
|
||||
* HBA:
|
||||
* LSI
|
||||
* SSD/HDD
|
||||
* NAS: Toshiba MG Series (16TB), Seagate Ironwolf 16TB : [https://amzn.to/3ONcOs9](https://amzn.to/3qRXTVu)
|
||||
* NVME: Firecuda 530 gen 4, or Samsung 970 EVO : [https://amzn.to/3E5rpKn](https://amzn.to/3KWnoMk)
|
||||
* Access Point: Unifi U6 (choose model for situation) : [https://amzn.to/3E4x9UD](https://amzn.to/3qQjn5a)
|
||||
* NAS: Toshiba MG Series (16TB), Seagate Ironwolf 16TB : [https://amzn.to/417HwSs](https://amzn.to/417HwSs)
|
||||
* NVME: Firecuda 530 gen 4, or Samsung 970 EVO : [https://amzn.to/486pAtQ](https://amzn.to/486pAtQ)
|
||||
* Access Point: Unifi U6 (choose model for situation) : [https://amzn.to/484NrKd](https://amzn.to/484NrKd)
|
||||
* Rack: TrippLite
|
||||
* Patch Panel: TRENDnet 24-Port Cat6A Shielded 1U Patch Panel : [https://amzn.to/3QO0fzp](https://amzn.to/3PcU4U9)
|
||||
* UPS: APC SmartUPS : [https://amzn.to/3QRuaqf](https://amzn.to/3sysW9v)
|
||||
* Patch Panel: TRENDnet 24-Port Cat6A Shielded 1U Patch Panel : [https://amzn.to/4879mAD](https://amzn.to/4879mAD)
|
||||
* UPS: APC SmartUPS : [https://amzn.to/46IfSfT](https://amzn.to/46IfSfT)
|
||||
* Cooling:
|
||||
* Rack: AC Infinity CLOUDPLATE : [https://amzn.to/3QINupG](https://amzn.to/3QZq7bF)
|
||||
* Fans: Nocuta : [https://amzn.to/3qxMcTT](https://amzn.to/3YU7t6M)https://amzn.to/3YU7t6M
|
||||
* Rack: AC Infinity CLOUDPLATE : [https://amzn.to/3NeUFmX](https://amzn.to/3NeUFmX)
|
||||
* Fans: Nocuta : [https://amzn.to/46NaAzZ](https://amzn.to/46NaAzZ)
|
||||
|
|
301
Kubernetes/Create-VMS/create-vms.sh
Normal file
301
Kubernetes/Create-VMS/create-vms.sh
Normal file
|
@ -0,0 +1,301 @@
|
|||
#!/bin/bash
|
||||
|
||||
# for Debian this must be installed for Longhorn to work
|
||||
# sudo apt-get install -y open-iscsi
|
||||
|
||||
###########################
|
||||
# DEFAULT VALUES #
|
||||
###########################
|
||||
os_options=("Debian" "Ubuntu")
|
||||
os="Debian"
|
||||
# Proxmox path to the template folder
|
||||
template_path="/var/lib/vz/template"
|
||||
# Proxmox certificate path
|
||||
cert_path="/root/.ssh"
|
||||
# Number of VMs to be created
|
||||
vm_number=3
|
||||
# The first VM id, smallest id is 100
|
||||
id=121
|
||||
# Name prefix of the first VM
|
||||
name=k3s
|
||||
|
||||
drive_name=local-zfs
|
||||
agent=0 # TODO: Implement User Option for it
|
||||
disk_size=20G
|
||||
memory=2048
|
||||
core=2
|
||||
|
||||
# IP for the first VM
|
||||
ip=192.168.0.21
|
||||
gateway=192.168.0.1
|
||||
|
||||
# ssh certificate name variable
|
||||
cert_name=id_rsa
|
||||
|
||||
# User settings
|
||||
user=$USER
|
||||
password=password
|
||||
|
||||
ubuntu_url=https://cloud-images.ubuntu.com/lunar/current/lunar-server-cloudimg-amd64.img
|
||||
ubuntu_filename=lunar-server-cloudimg-amd64.img
|
||||
|
||||
debian_url=https://cloud.debian.org/images/cloud/bookworm/latest/debian-12-genericcloud-amd64.qcow2
|
||||
debian_filename=debian-12-genericcloud-amd64.qcow2
|
||||
|
||||
os_url=https://cloud.debian.org/images/cloud/bookworm/latest/debian-12-genericcloud-amd64.qcow2
|
||||
os_filename=debian-12-genericcloud-amd64.qcow2
|
||||
|
||||
##################
|
||||
# Functions #
|
||||
##################
|
||||
function run() {
|
||||
get_user_variables
|
||||
print_info # Prints information about what will be created based on defaults/user inputs
|
||||
setup # Do not worry it asks for confirmation before the setup/installation starts
|
||||
start_vms # You can choose to start all VMs if you want
|
||||
#qemu_agent # Not implemented yet, you can choose to add qemu-agent to the installation image
|
||||
}
|
||||
|
||||
function get_user_variables() {
|
||||
echo -e -n "\e[36mWhich OS cloud image would you like to use?\n\e[0m"
|
||||
PS3=""
|
||||
select option in "${os_options[@]}"; do
|
||||
# Check if the user selected an option
|
||||
if [[ -n "$option" ]]; then
|
||||
# Do something with the selected option
|
||||
case $option in
|
||||
"Debian") ;;
|
||||
"Ubuntu") ;;
|
||||
*)
|
||||
echo -e "\e[31mInvalid option selected. Exiting...\e[0m"
|
||||
exit
|
||||
;;
|
||||
esac
|
||||
else
|
||||
# No option was selected
|
||||
echo -e "\e[31mNo option was selected. Exiting...\e[0m"
|
||||
exit
|
||||
fi
|
||||
# Set the selected Operating system
|
||||
os=$option
|
||||
# Exit the select loop
|
||||
break
|
||||
done
|
||||
echo -e "\e[36mHow many VM do you want to create? \e[0m"
|
||||
read -e -p "" -i "$vm_number" vm_number
|
||||
echo -e "\e[36mFirst VM ID? (minimum 100)\e[0m"
|
||||
read -e -p "" -i $id id
|
||||
echo -e "\e[36mVM name prefix? \e[0m"
|
||||
read -e -p "" -i $name name
|
||||
echo -e "\e[36mIP address? \e[0m"
|
||||
read -e -p "" -i $ip ip
|
||||
|
||||
# Split the IP address into its parts using the '.' character as the delimiter.
|
||||
ip_address_parts=(${ip//./ })
|
||||
octet1=${ip_address_parts[0]}
|
||||
octet2=${ip_address_parts[1]}
|
||||
octet3=${ip_address_parts[2]}
|
||||
octet4=${ip_address_parts[3]}
|
||||
|
||||
echo -e "\e[36mGateway? \e[0m"
|
||||
read -e -p "" -i $gateway gateway
|
||||
echo -e "\e[36mDisk Size? \e[0m"
|
||||
read -e -p "" -i $disk_size disk_size
|
||||
echo -e "\e[36mMemory Size? \e[0m"
|
||||
read -e -p "" -i $memory memory
|
||||
echo -e "\e[36mNumber of processor cores? \e[0m"
|
||||
read -e -p "" -i $core core
|
||||
echo -e "\e[36mUser name? \e[0m"
|
||||
read -e -p "" -i $user user
|
||||
echo -e "\e[36mUser password? \e[0m"
|
||||
read -e -p "" -i $password password
|
||||
echo -e "\e[36mCertification name? \e[0m"
|
||||
read -e -p "" -i $cert_name cert_name
|
||||
echo -e "\e[36mDrive name to store images? \e[0m"
|
||||
read -e -p "" -i $drive_name drive_name
|
||||
}
|
||||
|
||||
#
|
||||
function qemu_agent() {
|
||||
yesno=n
|
||||
echo -e "\e[36mDo you want to add qemu agent to the VM images? (y/n) \e[0m"
|
||||
read -e -p "" -i $yesno yesno
|
||||
case $yesno in
|
||||
[Yy]*)
|
||||
# Install qemu agent packages for each VM
|
||||
echo -e "\e[32mInstalling qemu agent packages.\e[0m"
|
||||
|
||||
for ((i = 1; i <= $vm_number; i++)); do
|
||||
if [[ $i -le 9 ]]; then
|
||||
idx="0$i"
|
||||
else
|
||||
idx=$i
|
||||
fi
|
||||
|
||||
# TODO: ssh into all VMs one by one and intalll the necessary qemu agent packages
|
||||
done
|
||||
;;
|
||||
[Nn]*)
|
||||
echo -e "\e[33mSkipping qemu agent installation.\e[0m"
|
||||
;;
|
||||
*) ;;
|
||||
esac
|
||||
}
|
||||
|
||||
function print_info() {
|
||||
echo -e "\e[36m\nThe following Virtual Machines will be created:\e[0m"
|
||||
for ((i = 1; i <= $vm_number; i++)); do
|
||||
if [[ $i -le 9 ]]; then
|
||||
idx="0$i"
|
||||
else
|
||||
idx=$i
|
||||
fi
|
||||
echo -e "\e[32mVM ID: $(($id + $i - 1)), Name: $name-$idx, IP address: $octet1.$octet2.$octet3.$(($octet4 + $i - 1))\e[0m"
|
||||
done
|
||||
echo -e "\e[36m\nCommon VM parameters:\e[0m"
|
||||
echo -e "\e[32mOS cloud image:\e[0m" "$os"
|
||||
echo -e "\e[32mPublic Proxmox Certificate:\e[0m" "$cert_path/$cert_name.pub\n"
|
||||
echo -e "\e[32mGateway:\e[0m" "$gateway"
|
||||
echo -e "\e[32mDisk size:\e[0m" "$disk_size""B"
|
||||
echo -e "\e[32mMemory size:\e[0m" "$memory""GB"
|
||||
echo -e "\e[32mCPU cores:\e[0m" "$core"
|
||||
echo -e "\e[32mDrive name:\e[0m" "$drive_name"
|
||||
}
|
||||
|
||||
function setup() {
|
||||
yesno=n
|
||||
echo -e "\e[36mDo you want to proceed with the setup? (y/n) \e[0m"
|
||||
read -e -p "" -i $yesno yesno
|
||||
case $yesno in
|
||||
[Yy]*)
|
||||
get_os_image
|
||||
create_vms
|
||||
;;
|
||||
[Nn]*)
|
||||
echo -e "\e[31mInstallation aborted by user. No changes were made.\e[0m"
|
||||
exit
|
||||
;;
|
||||
*) ;;
|
||||
esac
|
||||
}
|
||||
|
||||
function start_vms() {
|
||||
yesno=n
|
||||
echo -e "\e[36mDo you want to start up the Virtual Machines now? (y/n) \e[0m"
|
||||
read -e -p "" -i $yesno yesno
|
||||
case $yesno in
|
||||
[Yy]*)
|
||||
# Start VMs
|
||||
for ((i = 1; i <= $vm_number; i++)); do
|
||||
if [[ $i -le 9 ]]; then
|
||||
idx="0$i"
|
||||
else
|
||||
idx=$i
|
||||
fi
|
||||
echo -e "\e[33mStarting Virtual Machine $idx\e[0m"
|
||||
qm start $(($id + $i - 1))
|
||||
done
|
||||
# Print VMs statuses
|
||||
for ((i = 1; i <= $vm_number; i++)); do
|
||||
if [[ $i -le 9 ]]; then
|
||||
idx="0$i"
|
||||
else
|
||||
idx=$i
|
||||
fi
|
||||
echo -e "\e[33mVirtual Machine $idx status: \e[0m"
|
||||
qm status $(($id + $i - 1))
|
||||
done
|
||||
;;
|
||||
[Nn]*)
|
||||
exit
|
||||
;;
|
||||
*) ;;
|
||||
esac
|
||||
}
|
||||
|
||||
function get_os_image() {
|
||||
case $os in
|
||||
"Debian")
|
||||
os_url=$debian_url
|
||||
os_filename=$debian_filename
|
||||
# Check if the directory exists.
|
||||
if [ ! -d "$template_path/qcow" ]; then
|
||||
mkdir $template_path/qcow
|
||||
fi
|
||||
cd $template_path/qcow
|
||||
;;
|
||||
"Ubuntu")
|
||||
os_url=$ubuntu_url
|
||||
os_filename=$ubuntu_filename
|
||||
# Check if the directory exists.
|
||||
if [ ! -d "$template_path/iso" ]; then
|
||||
mkdir $template_path/iso
|
||||
fi
|
||||
cd $template_path/iso
|
||||
;;
|
||||
*)
|
||||
echo -e "\e[31Invalid option.\e[0m"
|
||||
;;
|
||||
esac
|
||||
|
||||
# Check if the os image file already exists.
|
||||
# If not then download it.
|
||||
if [ ! -f "$os_filename" ]; then
|
||||
# Download the selected os cloud image
|
||||
echo -e "\e[33mDownloading $os cloud image ...\e[0m"
|
||||
wget $os_url
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
# Only runs if you uncomment the function in `create_vms`. Please be careful
|
||||
function destroy_existing_vms() {
|
||||
# Stop and destroy Virtual Machine if it already exists
|
||||
# TODO: Put loop and confirmation before doing anything
|
||||
qm stop $(($id + $i - 1))
|
||||
qm destroy $(($id + $i - 1)) --destroy-unreferenced-disks --purge
|
||||
}
|
||||
|
||||
function create_vms() {
|
||||
for ((i = 1; i <= $vm_number; i++)); do
|
||||
# Stop and destroy Virtual Machine if it already exists.
|
||||
# Be really careful with this only uncomment if you know what are you doing. !!!
|
||||
#
|
||||
# destroy_existing_vms
|
||||
#
|
||||
# #############################
|
||||
# Create VM from the cloud image
|
||||
if [[ $i -le 9 ]]; then
|
||||
idx="0$i"
|
||||
else
|
||||
idx=$i
|
||||
fi
|
||||
echo -e "\e[33mCreating Virtual Machine: $idx\e[0m"
|
||||
echo "VM ID: $(($id + $i - 1)), Name: $name-$idx, IP address: $octet1.$octet2.$octet3.$(($octet4 + $i - 1))"
|
||||
qm create $(($id + $i - 1)) \
|
||||
--memory $memory \
|
||||
--core $core \
|
||||
--numa 1 \
|
||||
--name $name-$idx \
|
||||
--net0 virtio,bridge=vmbr0 \
|
||||
--balloon 0 \
|
||||
--ipconfig0 gw=$gateway,ip=$octet1.$octet2.$octet3.$(($octet4 + $i - 1))/24 \
|
||||
--cipassword $password \
|
||||
--ciuser $user \
|
||||
--ciupgrade 1 \
|
||||
--sshkeys $cert_path/$cert_name.pub \
|
||||
--agent=$agent
|
||||
|
||||
qm importdisk $(($id + $i - 1)) $os_filename $drive_name
|
||||
qm set $(($id + $i - 1)) --scsihw virtio-scsi-pci --scsi0 $drive_name:vm-$(($id + $i - 1))-disk-0
|
||||
qm disk resize $(($id + $i - 1)) scsi0 $disk_size
|
||||
qm set $(($id + $i - 1)) --ide2 $drive_name:cloudinit
|
||||
qm set $(($id + $i - 1)) --boot c --bootdisk scsi0
|
||||
qm set $(($id + $i - 1)) --serial0 socket --vga serial0
|
||||
done
|
||||
}
|
||||
|
||||
#########################
|
||||
# Run the script #
|
||||
#########################
|
||||
run
|
9
Kubernetes/Create-VMS/readme.md
Normal file
9
Kubernetes/Create-VMS/readme.md
Normal file
|
@ -0,0 +1,9 @@
|
|||
# Simple script to create multiple Virtual Machines automatically
|
||||
|
||||
1. It will ask you some questions about your wished Virtual Machines.
|
||||
- You can select Debian or Ubuntu image
|
||||
2. Prints a detailed info with about the VMs going tyo be created.
|
||||
3. Let you confirm if You want to continue
|
||||
4. You can chose to start all VMs at the end 🚀
|
||||
|
||||
Enjoy 🙂
|
68
Kubernetes/GPU-Passthrough/jellyfin.yaml
Normal file
68
Kubernetes/GPU-Passthrough/jellyfin.yaml
Normal file
|
@ -0,0 +1,68 @@
|
|||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: jellyfin
|
||||
app.kubernetes.io/instance: jellyfin
|
||||
app.kubernetes.io/name: jellyfin
|
||||
name: jellyfin
|
||||
namespace: jellyfin
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: jellyfin
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: jellyfin
|
||||
app.kubernetes.io/name: jellyfin
|
||||
spec:
|
||||
nodeSelector:
|
||||
worker: "true"
|
||||
containers:
|
||||
- image: jellyfin/jellyfin
|
||||
imagePullPolicy: Always
|
||||
name: jellyfin
|
||||
resources:
|
||||
limits:
|
||||
gpu.intel.com/i915: "1" # requesting 1 GPU
|
||||
ports:
|
||||
- containerPort: 8096
|
||||
name: web
|
||||
protocol: TCP
|
||||
env:
|
||||
- name: TZ
|
||||
value: Europe/London
|
||||
volumeMounts:
|
||||
- mountPath: /config
|
||||
name: jellyfin
|
||||
subPath: config
|
||||
- mountPath: /cache
|
||||
name: jellyfin
|
||||
subPath: cache
|
||||
volumes:
|
||||
- name: jellyfin
|
||||
persistentVolumeClaim:
|
||||
claimName: jellyfin
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: jellyfin
|
||||
name: jellyfin
|
||||
namespace: jellyfin
|
||||
spec:
|
||||
ports:
|
||||
- name: web-tcp
|
||||
port: 8096
|
||||
protocol: TCP
|
||||
targetPort: 8096
|
||||
- name: web-udp
|
||||
port: 8096
|
||||
protocol: UDP
|
||||
targetPort: 8096
|
||||
selector:
|
||||
app: jellyfin
|
26
Kubernetes/GPU-Passthrough/readme.md
Normal file
26
Kubernetes/GPU-Passthrough/readme.md
Normal file
|
@ -0,0 +1,26 @@
|
|||
# Create directory
|
||||
```
|
||||
mkdir -p /etc/rancher/rke2
|
||||
```
|
||||
# Create File for RKE2 - Config
|
||||
```
|
||||
sudo nano /etc/rancher/rke2/config.yaml
|
||||
```
|
||||
# Add values
|
||||
```
|
||||
token: <ADD-TOKEN>
|
||||
server: https://<ADD-VIP>:9345
|
||||
node-label:
|
||||
- worker=true
|
||||
- longhorn=true
|
||||
```
|
||||
# Install RKE2
|
||||
```
|
||||
sudo su
|
||||
curl -sfL https://get.rke2.io | INSTALL_RKE2_TYPE="agent" sh -
|
||||
```
|
||||
# Enable RKE2
|
||||
```
|
||||
systemctl enable rke2-agent.service
|
||||
systemctl start rke2-agent.service
|
||||
```
|
|
@ -40,7 +40,7 @@ user=ubuntu
|
|||
interface=eth0
|
||||
|
||||
# Set the virtual IP address (VIP)
|
||||
vip=192.168.1.50
|
||||
vip=192.168.3.50
|
||||
|
||||
# Array of master nodes
|
||||
masters=($master2 $master3)
|
||||
|
@ -55,7 +55,7 @@ all=($master1 $master2 $master3 $worker1 $worker2)
|
|||
allnomaster1=($master2 $master3 $worker1 $worker2)
|
||||
|
||||
#Loadbalancer IP range
|
||||
lbrange=192.168.1.61-192.168.1.79
|
||||
lbrange=192.168.3.60-192.168.3.80
|
||||
|
||||
#ssh certificate name variable
|
||||
certName=id_rsa
|
||||
|
@ -92,17 +92,6 @@ else
|
|||
echo -e " \033[32;5mKubectl already installed\033[0m"
|
||||
fi
|
||||
|
||||
# Install Docker to generate manifest and daemonset if not already present
|
||||
if ! command -v docker version &> /dev/null
|
||||
then
|
||||
echo -e " \033[31;5mDocker not found, installing\033[0m"
|
||||
curl -fsSL https://get.docker.com -o get-docker.sh
|
||||
sudo sh get-docker.sh
|
||||
wait $!
|
||||
else
|
||||
echo -e " \033[32;5mDocker already installed\033[0m"
|
||||
fi
|
||||
|
||||
# Create SSH Config file to ignore checking (don't use in production!)
|
||||
echo "StrictHostKeyChecking no" > ~/.ssh/config
|
||||
|
||||
|
@ -128,7 +117,7 @@ k3sup install \
|
|||
--tls-san $vip \
|
||||
--cluster \
|
||||
--k3s-version $k3sVersion \
|
||||
--k3s-extra-args "--disable traefik --disable servicelb --flannel-iface=$interface --node-ip=$master1" \
|
||||
--k3s-extra-args "--disable traefik --disable servicelb --flannel-iface=$interface --node-ip=$master1 --node-taint node-role.kubernetes.io/master=true:NoSchedule" \
|
||||
--merge \
|
||||
--sudo \
|
||||
--local-path $HOME/.kube/config \
|
||||
|
@ -137,32 +126,23 @@ k3sup install \
|
|||
echo -e " \033[32;5mFirst Node bootstrapped successfully!\033[0m"
|
||||
|
||||
# Step 2: Install Kube-VIP for HA
|
||||
kubectl k3s-ha
|
||||
kubectl apply -f https://kube-vip.io/manifests/rbac.yaml
|
||||
|
||||
# Step 3: Generate Daemonset with Docker
|
||||
sudo docker run --network host --rm ghcr.io/kube-vip/kube-vip:$KVVERSION manifest daemonset \
|
||||
--interface $interface \
|
||||
--address $vip \
|
||||
--inCluster \
|
||||
--taint \
|
||||
--controlplane \
|
||||
--services \
|
||||
--arp \
|
||||
--leaderElection | tee $HOME/kube-vip.yaml
|
||||
# Step 3: Download kube-vip
|
||||
curl -sO https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/K3S-Deploy/kube-vip
|
||||
cat kube-vip | sed 's/$interface/'$interface'/g; s/$vip/'$vip'/g' > $HOME/kube-vip.yaml
|
||||
|
||||
# Step 4: Copy kube-vip.yaml to master1
|
||||
scp -i ~/.ssh/$certName $HOME/kube-vip.yaml $user@$master1:~/kube-vip.yaml
|
||||
|
||||
|
||||
# Step 5: Connect to Master1 and move kube-vip.yaml
|
||||
|
||||
ssh $user@$master1 -i ~/.ssh/$certName <<- EOF
|
||||
sudo mkdir -p /var/lib/rancher/k3s/server/manifests
|
||||
sudo mv kube-vip.yaml /var/lib/rancher/k3s/server/manifests/kube-vip.yaml
|
||||
EOF
|
||||
|
||||
# Step 6: Add new master nodes (servers)
|
||||
# Step 6: Add new master nodes (servers) & workers
|
||||
for newnode in "${masters[@]}"; do
|
||||
k3sup join \
|
||||
--ip $newnode \
|
||||
|
@ -172,11 +152,12 @@ for newnode in "${masters[@]}"; do
|
|||
--server \
|
||||
--server-ip $master1 \
|
||||
--ssh-key $HOME/.ssh/$certName \
|
||||
--k3s-extra-args "--disable traefik --disable servicelb --flannel-iface=$interface --node-ip=$newnode" \
|
||||
--k3s-extra-args "--disable traefik --disable servicelb --flannel-iface=$interface --node-ip=$newnode --node-taint node-role.kubernetes.io/master=true:NoSchedule" \
|
||||
--server-user $user
|
||||
echo -e " \033[32;5mMaster node joined successfully!\033[0m"
|
||||
done
|
||||
|
||||
# add workers
|
||||
for newagent in "${workers[@]}"; do
|
||||
k3sup join \
|
||||
--ip $newagent \
|
||||
|
@ -184,26 +165,39 @@ for newagent in "${workers[@]}"; do
|
|||
--sudo \
|
||||
--k3s-version $k3sVersion \
|
||||
--server-ip $master1 \
|
||||
--ssh-key $HOME/.ssh/$certName
|
||||
--ssh-key $HOME/.ssh/$certName \
|
||||
--k3s-extra-args "--node-label \"longhorn=true\" --node-label \"worker=true\""
|
||||
echo -e " \033[32;5mAgent node joined successfully!\033[0m"
|
||||
done
|
||||
|
||||
# Step 7: Install kube-vip as network LoadBalancer - Install the kube-vip Cloud Provider
|
||||
kubectl apply -f https://raw.githubusercontent.com/kube-vip/kube-vip-cloud-provider/main/manifest/kube-vip-cloud-controller.yaml
|
||||
|
||||
#IP range for loadbalancer services to use
|
||||
kubectl create configmap -n kube-system kubevip --from-literal range-global=$lbrange
|
||||
# Step 8: Install Metallb
|
||||
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.12.1/manifests/namespace.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.12/config/manifests/metallb-native.yaml
|
||||
# Download ipAddressPool and configure using lbrange above
|
||||
curl -sO https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/K3S-Deploy/ipAddressPool
|
||||
cat ipAddressPool | sed 's/$lbrange/'$lbrange'/g' > $HOME/ipAddressPool.yaml
|
||||
|
||||
# Step 8: Test with Nginx
|
||||
# Step 9: Test with Nginx
|
||||
kubectl apply -f https://raw.githubusercontent.com/inlets/inlets-operator/master/contrib/nginx-sample-deployment.yaml -n default
|
||||
kubectl expose deployment nginx-1 --port=80 --type=LoadBalancer -n default
|
||||
|
||||
echo -e " \033[32;5mWaiting 20s for K3S to sync and LoadBalancer to come online\033[0m"
|
||||
echo -e " \033[32;5mWaiting for K3S to sync and LoadBalancer to come online\033[0m"
|
||||
|
||||
while [[ $(kubectl get pods -l app=nginx -o 'jsonpath={..status.conditions[?(@.type=="Ready")].status}') != "True" ]]; do
|
||||
sleep 1
|
||||
done
|
||||
|
||||
# Step 10: Deploy IP Pools and l2Advertisement
|
||||
kubectl wait --namespace metallb-system \
|
||||
--for=condition=ready pod \
|
||||
--selector=component=controller \
|
||||
--timeout=120s
|
||||
kubectl apply -f ipAddressPool.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/K3S-Deploy/l2Advertisement.yaml
|
||||
|
||||
kubectl get nodes
|
||||
kubectl get svc
|
||||
kubectl get pods --all-namespaces -o wide
|
||||
|
|
|
@ -47,7 +47,7 @@ spec:
|
|||
- name: vip_ddns
|
||||
value: "false"
|
||||
- name: svc_enable
|
||||
value: "true"
|
||||
value: "false"
|
||||
- name: svc_leasename
|
||||
value: plndr-svcs-lock
|
||||
- name: vip_leaderelection
|
|
@ -20,6 +20,10 @@ echo -e " \033[32;5m \
|
|||
# YOU SHOULD ONLY NEED TO EDIT THIS SECTION #
|
||||
#############################################
|
||||
|
||||
# This is an update version of the K3S script that install longhorn on the worker nodes.
|
||||
# The worker nodes are scaled to 3 for redundancy and HA
|
||||
# This has the added benefit of using local storage on worker nodes (faster)
|
||||
|
||||
# Version of Kube-VIP to deploy
|
||||
KVVERSION="v0.6.3"
|
||||
|
||||
|
@ -32,6 +36,7 @@ master2=192.168.3.22
|
|||
master3=192.168.3.23
|
||||
worker1=192.168.3.24
|
||||
worker2=192.168.3.25
|
||||
worker3=192.168.3.26
|
||||
|
||||
# User of remote machines
|
||||
user=ubuntu
|
||||
|
@ -46,13 +51,13 @@ vip=192.168.3.50
|
|||
masters=($master2 $master3)
|
||||
|
||||
# Array of worker nodes
|
||||
workers=($worker1 $worker2)
|
||||
workers=($worker1 $worker2 $worker3)
|
||||
|
||||
# Array of all
|
||||
all=($master1 $master2 $master3 $worker1 $worker2)
|
||||
all=($master1 $master2 $master3 $worker1 $worker2 $worker3)
|
||||
|
||||
# Array of all minus master
|
||||
allnomaster1=($master2 $master3 $worker1 $worker2)
|
||||
allnomaster1=($master2 $master3 $worker1 $worker2 $worker3)
|
||||
|
||||
#Loadbalancer IP range
|
||||
lbrange=192.168.3.60-192.168.3.80
|
||||
|
@ -117,7 +122,7 @@ k3sup install \
|
|||
--tls-san $vip \
|
||||
--cluster \
|
||||
--k3s-version $k3sVersion \
|
||||
--k3s-extra-args "--disable traefik --disable servicelb --flannel-iface=$interface --node-ip=$master1" \
|
||||
--k3s-extra-args "--disable traefik --disable servicelb --flannel-iface=$interface --node-ip=$master1 --node-taint node-role.kubernetes.io/master=true:NoSchedule" \
|
||||
--merge \
|
||||
--sudo \
|
||||
--local-path $HOME/.kube/config \
|
||||
|
@ -126,24 +131,23 @@ k3sup install \
|
|||
echo -e " \033[32;5mFirst Node bootstrapped successfully!\033[0m"
|
||||
|
||||
# Step 2: Install Kube-VIP for HA
|
||||
kubectl k3s-ha
|
||||
kubectl apply -f https://kube-vip.io/manifests/rbac.yaml
|
||||
|
||||
# Step 3: Download kube-vip
|
||||
curl -sL https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/K3S-Deploy/kube-vip.yaml
|
||||
curl -sO https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/K3S-Deploy/kube-vip
|
||||
cat kube-vip | sed 's/$interface/'$interface'/g; s/$vip/'$vip'/g' > $HOME/kube-vip.yaml
|
||||
|
||||
# Step 4: Copy kube-vip.yaml to master1
|
||||
scp -i ~/.ssh/$certName $HOME/kube-vip.yaml $user@$master1:~/kube-vip.yaml
|
||||
|
||||
|
||||
# Step 5: Connect to Master1 and move kube-vip.yaml
|
||||
|
||||
ssh $user@$master1 -i ~/.ssh/$certName <<- EOF
|
||||
sudo mkdir -p /var/lib/rancher/k3s/server/manifests
|
||||
sudo mv kube-vip.yaml /var/lib/rancher/k3s/server/manifests/kube-vip.yaml
|
||||
EOF
|
||||
|
||||
# Step 6: Add new master nodes (servers)
|
||||
# Step 6: Add new master nodes (servers) & workers
|
||||
for newnode in "${masters[@]}"; do
|
||||
k3sup join \
|
||||
--ip $newnode \
|
||||
|
@ -153,11 +157,12 @@ for newnode in "${masters[@]}"; do
|
|||
--server \
|
||||
--server-ip $master1 \
|
||||
--ssh-key $HOME/.ssh/$certName \
|
||||
--k3s-extra-args "--disable traefik --disable servicelb --flannel-iface=$interface --node-ip=$newnode" \
|
||||
--k3s-extra-args "--disable traefik --disable servicelb --flannel-iface=$interface --node-ip=$newnode --node-taint node-role.kubernetes.io/master=true:NoSchedule" \
|
||||
--server-user $user
|
||||
echo -e " \033[32;5mMaster node joined successfully!\033[0m"
|
||||
done
|
||||
|
||||
# add workers
|
||||
for newagent in "${workers[@]}"; do
|
||||
k3sup join \
|
||||
--ip $newagent \
|
||||
|
@ -165,7 +170,8 @@ for newagent in "${workers[@]}"; do
|
|||
--sudo \
|
||||
--k3s-version $k3sVersion \
|
||||
--server-ip $master1 \
|
||||
--ssh-key $HOME/.ssh/$certName
|
||||
--ssh-key $HOME/.ssh/$certName \
|
||||
--k3s-extra-args "--node-label \"longhorn=true\" --node-label \"worker=true\""
|
||||
echo -e " \033[32;5mAgent node joined successfully!\033[0m"
|
||||
done
|
||||
|
||||
|
@ -183,13 +189,17 @@ cat ipAddressPool | sed 's/$lbrange/'$lbrange'/g' > $HOME/ipAddressPool.yaml
|
|||
kubectl apply -f https://raw.githubusercontent.com/inlets/inlets-operator/master/contrib/nginx-sample-deployment.yaml -n default
|
||||
kubectl expose deployment nginx-1 --port=80 --type=LoadBalancer -n default
|
||||
|
||||
echo -e " \033[32;5mWaiting 20s for K3S to sync and LoadBalancer to come online\033[0m"
|
||||
echo -e " \033[32;5mWaiting for K3S to sync and LoadBalancer to come online\033[0m"
|
||||
|
||||
while [[ $(kubectl get pods -l app=nginx -o 'jsonpath={..status.conditions[?(@.type=="Ready")].status}') != "True" ]]; do
|
||||
sleep 1
|
||||
done
|
||||
|
||||
# Step 10: Deploy IP Pools and l2Advertisement
|
||||
kubectl wait --namespace metallb-system \
|
||||
--for=condition=ready pod \
|
||||
--selector=component=controller \
|
||||
--timeout=120s
|
||||
kubectl apply -f ipAddressPool.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/K3S-Deploy/l2Advertisement.yaml
|
||||
|
||||
|
@ -198,3 +208,53 @@ kubectl get svc
|
|||
kubectl get pods --all-namespaces -o wide
|
||||
|
||||
echo -e " \033[32;5mHappy Kubing! Access Nginx at EXTERNAL-IP above\033[0m"
|
||||
|
||||
# Step 11: Install helm
|
||||
curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3
|
||||
chmod 700 get_helm.sh
|
||||
./get_helm.sh
|
||||
|
||||
# Step 12: Add Rancher Helm Repository
|
||||
helm repo add rancher-latest https://releases.rancher.com/server-charts/latest
|
||||
kubectl create namespace cattle-system
|
||||
|
||||
# Step 13: Install Cert-Manager
|
||||
kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.13.2/cert-manager.crds.yaml
|
||||
helm repo add jetstack https://charts.jetstack.io
|
||||
helm repo update
|
||||
helm install cert-manager jetstack/cert-manager \
|
||||
--namespace cert-manager \
|
||||
--create-namespace \
|
||||
--version v1.13.2
|
||||
kubectl get pods --namespace cert-manager
|
||||
|
||||
# Step 14: Install Rancher
|
||||
helm install rancher rancher-latest/rancher \
|
||||
--namespace cattle-system \
|
||||
--set hostname=rancher.my.org \
|
||||
--set bootstrapPassword=admin
|
||||
kubectl -n cattle-system rollout status deploy/rancher
|
||||
kubectl -n cattle-system get deploy rancher
|
||||
|
||||
# Step 15: Expose Rancher via Loadbalancer
|
||||
kubectl get svc -n cattle-system
|
||||
kubectl expose deployment rancher --name=rancher-lb --port=443 --type=LoadBalancer -n cattle-system
|
||||
kubectl get svc -n cattle-system
|
||||
|
||||
# Profit: Go to Rancher GUI
|
||||
echo -e " \033[32;5mHit the url… and create your account\033[0m"
|
||||
echo -e " \033[32;5mBe patient as it downloads and configures a number of pods in the background to support the UI (can be 5-10mins)\033[0m"
|
||||
|
||||
# Step 16: Install Longhorn (using modified Official to pin to Longhorn Nodes)
|
||||
echo -e " \033[32;5mInstalling Longhorn - It can take a while for all pods to deploy...\033[0m"
|
||||
kubectl apply -f https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/Longhorn/longhorn.yaml
|
||||
kubectl get pods \
|
||||
--namespace longhorn-system \
|
||||
--watch
|
||||
|
||||
# Step 17: Print out confirmation
|
||||
|
||||
kubectl get nodes
|
||||
kubectl get svc -n longhorn-system
|
||||
|
||||
echo -e " \033[32;5mHappy Kubing! Access Longhorn through Rancher UI\033[0m"
|
|
@ -14,7 +14,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.5.1
|
||||
app.kubernetes.io/version: v1.5.3
|
||||
---
|
||||
# Source: longhorn/templates/serviceaccount.yaml
|
||||
apiVersion: v1
|
||||
|
@ -25,7 +25,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.5.1
|
||||
app.kubernetes.io/version: v1.5.3
|
||||
---
|
||||
# Source: longhorn/templates/default-setting.yaml
|
||||
apiVersion: v1
|
||||
|
@ -36,7 +36,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.5.1
|
||||
app.kubernetes.io/version: v1.5.3
|
||||
data:
|
||||
default-setting.yaml: |-
|
||||
system-managed-components-node-selector: longhorn=true
|
||||
|
@ -50,7 +50,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.5.1
|
||||
app.kubernetes.io/version: v1.5.3
|
||||
data:
|
||||
storageclass.yaml: |
|
||||
kind: StorageClass
|
||||
|
@ -80,7 +80,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.5.1
|
||||
app.kubernetes.io/version: v1.5.3
|
||||
longhorn-manager: ""
|
||||
name: backingimagedatasources.longhorn.io
|
||||
spec:
|
||||
|
@ -251,7 +251,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.5.1
|
||||
app.kubernetes.io/version: v1.5.3
|
||||
longhorn-manager: ""
|
||||
name: backingimagemanagers.longhorn.io
|
||||
spec:
|
||||
|
@ -427,7 +427,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.5.1
|
||||
app.kubernetes.io/version: v1.5.3
|
||||
longhorn-manager: ""
|
||||
name: backingimages.longhorn.io
|
||||
spec:
|
||||
|
@ -586,7 +586,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.5.1
|
||||
app.kubernetes.io/version: v1.5.3
|
||||
longhorn-manager: ""
|
||||
name: backups.longhorn.io
|
||||
spec:
|
||||
|
@ -782,7 +782,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.5.1
|
||||
app.kubernetes.io/version: v1.5.3
|
||||
longhorn-manager: ""
|
||||
name: backuptargets.longhorn.io
|
||||
spec:
|
||||
|
@ -965,7 +965,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.5.1
|
||||
app.kubernetes.io/version: v1.5.3
|
||||
longhorn-manager: ""
|
||||
name: backupvolumes.longhorn.io
|
||||
spec:
|
||||
|
@ -1132,7 +1132,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.5.1
|
||||
app.kubernetes.io/version: v1.5.3
|
||||
longhorn-manager: ""
|
||||
name: engineimages.longhorn.io
|
||||
spec:
|
||||
|
@ -1324,7 +1324,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.5.1
|
||||
app.kubernetes.io/version: v1.5.3
|
||||
longhorn-manager: ""
|
||||
name: engines.longhorn.io
|
||||
spec:
|
||||
|
@ -1679,7 +1679,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.5.1
|
||||
app.kubernetes.io/version: v1.5.3
|
||||
longhorn-manager: ""
|
||||
name: instancemanagers.longhorn.io
|
||||
spec:
|
||||
|
@ -1920,7 +1920,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.5.1
|
||||
app.kubernetes.io/version: v1.5.3
|
||||
longhorn-manager: ""
|
||||
name: nodes.longhorn.io
|
||||
spec:
|
||||
|
@ -2164,7 +2164,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.5.1
|
||||
app.kubernetes.io/version: v1.5.3
|
||||
longhorn-manager: ""
|
||||
name: orphans.longhorn.io
|
||||
spec:
|
||||
|
@ -2435,7 +2435,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.5.1
|
||||
app.kubernetes.io/version: v1.5.3
|
||||
longhorn-manager: ""
|
||||
name: replicas.longhorn.io
|
||||
spec:
|
||||
|
@ -2652,7 +2652,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.5.1
|
||||
app.kubernetes.io/version: v1.5.3
|
||||
longhorn-manager: ""
|
||||
name: settings.longhorn.io
|
||||
spec:
|
||||
|
@ -2743,7 +2743,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.5.1
|
||||
app.kubernetes.io/version: v1.5.3
|
||||
longhorn-manager: ""
|
||||
name: sharemanagers.longhorn.io
|
||||
spec:
|
||||
|
@ -2858,7 +2858,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.5.1
|
||||
app.kubernetes.io/version: v1.5.3
|
||||
longhorn-manager: ""
|
||||
name: snapshots.longhorn.io
|
||||
spec:
|
||||
|
@ -2985,7 +2985,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.5.1
|
||||
app.kubernetes.io/version: v1.5.3
|
||||
longhorn-manager: ""
|
||||
name: supportbundles.longhorn.io
|
||||
spec:
|
||||
|
@ -3111,7 +3111,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.5.1
|
||||
app.kubernetes.io/version: v1.5.3
|
||||
longhorn-manager: ""
|
||||
name: systembackups.longhorn.io
|
||||
spec:
|
||||
|
@ -3239,7 +3239,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.5.1
|
||||
app.kubernetes.io/version: v1.5.3
|
||||
longhorn-manager: ""
|
||||
name: systemrestores.longhorn.io
|
||||
spec:
|
||||
|
@ -3341,7 +3341,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.5.1
|
||||
app.kubernetes.io/version: v1.5.3
|
||||
longhorn-manager: ""
|
||||
name: volumes.longhorn.io
|
||||
spec:
|
||||
|
@ -3703,7 +3703,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.5.1
|
||||
app.kubernetes.io/version: v1.5.3
|
||||
longhorn-manager: ""
|
||||
name: volumeattachments.longhorn.io
|
||||
spec:
|
||||
|
@ -3832,7 +3832,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.5.1
|
||||
app.kubernetes.io/version: v1.5.3
|
||||
rules:
|
||||
- apiGroups:
|
||||
- apiextensions.k8s.io
|
||||
|
@ -3898,7 +3898,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.5.1
|
||||
app.kubernetes.io/version: v1.5.3
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
|
@ -3916,7 +3916,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.5.1
|
||||
app.kubernetes.io/version: v1.5.3
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
|
@ -3933,7 +3933,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.5.1
|
||||
app.kubernetes.io/version: v1.5.3
|
||||
app: longhorn-manager
|
||||
name: longhorn-backend
|
||||
namespace: longhorn-system
|
||||
|
@ -3954,7 +3954,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.5.1
|
||||
app.kubernetes.io/version: v1.5.3
|
||||
app: longhorn-ui
|
||||
name: longhorn-frontend
|
||||
namespace: longhorn-system
|
||||
|
@ -3975,7 +3975,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.5.1
|
||||
app.kubernetes.io/version: v1.5.3
|
||||
app: longhorn-conversion-webhook
|
||||
name: longhorn-conversion-webhook
|
||||
namespace: longhorn-system
|
||||
|
@ -3996,7 +3996,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.5.1
|
||||
app.kubernetes.io/version: v1.5.3
|
||||
app: longhorn-admission-webhook
|
||||
name: longhorn-admission-webhook
|
||||
namespace: longhorn-system
|
||||
|
@ -4017,7 +4017,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.5.1
|
||||
app.kubernetes.io/version: v1.5.3
|
||||
app: longhorn-recovery-backend
|
||||
name: longhorn-recovery-backend
|
||||
namespace: longhorn-system
|
||||
|
@ -4038,7 +4038,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.5.1
|
||||
app.kubernetes.io/version: v1.5.3
|
||||
name: longhorn-engine-manager
|
||||
namespace: longhorn-system
|
||||
spec:
|
||||
|
@ -4054,7 +4054,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.5.1
|
||||
app.kubernetes.io/version: v1.5.3
|
||||
name: longhorn-replica-manager
|
||||
namespace: longhorn-system
|
||||
spec:
|
||||
|
@ -4070,7 +4070,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.5.1
|
||||
app.kubernetes.io/version: v1.5.3
|
||||
app: longhorn-manager
|
||||
name: longhorn-manager
|
||||
namespace: longhorn-system
|
||||
|
@ -4083,12 +4083,12 @@ spec:
|
|||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.5.1
|
||||
app.kubernetes.io/version: v1.5.3
|
||||
app: longhorn-manager
|
||||
spec:
|
||||
containers:
|
||||
- name: longhorn-manager
|
||||
image: longhornio/longhorn-manager:v1.5.1
|
||||
image: longhornio/longhorn-manager:v1.5.3
|
||||
imagePullPolicy: IfNotPresent
|
||||
securityContext:
|
||||
privileged: true
|
||||
|
@ -4097,17 +4097,17 @@ spec:
|
|||
- -d
|
||||
- daemon
|
||||
- --engine-image
|
||||
- "longhornio/longhorn-engine:v1.5.1"
|
||||
- "longhornio/longhorn-engine:v1.5.3"
|
||||
- --instance-manager-image
|
||||
- "longhornio/longhorn-instance-manager:v1.5.1"
|
||||
- "longhornio/longhorn-instance-manager:v1.5.3"
|
||||
- --share-manager-image
|
||||
- "longhornio/longhorn-share-manager:v1.5.1"
|
||||
- "longhornio/longhorn-share-manager:v1.5.3"
|
||||
- --backing-image-manager-image
|
||||
- "longhornio/backing-image-manager:v1.5.1"
|
||||
- "longhornio/backing-image-manager:v1.5.3"
|
||||
- --support-bundle-manager-image
|
||||
- "longhornio/support-bundle-kit:v0.0.25"
|
||||
- --manager-image
|
||||
- "longhornio/longhorn-manager:v1.5.1"
|
||||
- "longhornio/longhorn-manager:v1.5.3"
|
||||
- --service-account
|
||||
- longhorn-service-account
|
||||
ports:
|
||||
|
@ -4177,7 +4177,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.5.1
|
||||
app.kubernetes.io/version: v1.5.3
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
|
@ -4188,23 +4188,23 @@ spec:
|
|||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.5.1
|
||||
app.kubernetes.io/version: v1.5.3
|
||||
app: longhorn-driver-deployer
|
||||
spec:
|
||||
initContainers:
|
||||
- name: wait-longhorn-manager
|
||||
image: longhornio/longhorn-manager:v1.5.1
|
||||
image: longhornio/longhorn-manager:v1.5.3
|
||||
command: ['sh', '-c', 'while [ $(curl -m 1 -s -o /dev/null -w "%{http_code}" http://longhorn-backend:9500/v1) != "200" ]; do echo waiting; sleep 2; done']
|
||||
containers:
|
||||
- name: longhorn-driver-deployer
|
||||
image: longhornio/longhorn-manager:v1.5.1
|
||||
image: longhornio/longhorn-manager:v1.5.3
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- longhorn-manager
|
||||
- -d
|
||||
- deploy-driver
|
||||
- --manager-image
|
||||
- "longhornio/longhorn-manager:v1.5.1"
|
||||
- "longhornio/longhorn-manager:v1.5.3"
|
||||
- --manager-url
|
||||
- http://longhorn-backend:9500/v1
|
||||
env:
|
||||
|
@ -4245,7 +4245,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.5.1
|
||||
app.kubernetes.io/version: v1.5.3
|
||||
app: longhorn-ui
|
||||
name: longhorn-ui
|
||||
namespace: longhorn-system
|
||||
|
@ -4259,7 +4259,7 @@ spec:
|
|||
labels:
|
||||
app.kubernetes.io/name: longhorn
|
||||
app.kubernetes.io/instance: longhorn
|
||||
app.kubernetes.io/version: v1.5.1
|
||||
app.kubernetes.io/version: v1.5.3
|
||||
app: longhorn-ui
|
||||
spec:
|
||||
affinity:
|
||||
|
@ -4276,7 +4276,7 @@ spec:
|
|||
topologyKey: kubernetes.io/hostname
|
||||
containers:
|
||||
- name: longhorn-ui
|
||||
image: longhornio/longhorn-ui:v1.5.1
|
||||
image: longhornio/longhorn-ui:v1.5.3
|
||||
imagePullPolicy: IfNotPresent
|
||||
volumeMounts:
|
||||
- name : nginx-cache
|
||||
|
|
11
Kubernetes/NetworkPolicies/allow-all-ingress.yaml
Normal file
11
Kubernetes/NetworkPolicies/allow-all-ingress.yaml
Normal file
|
@ -0,0 +1,11 @@
|
|||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
name: allow-all-ingress
|
||||
spec:
|
||||
podSelector: {}
|
||||
ingress:
|
||||
- {}
|
||||
policyTypes:
|
||||
- Ingress
|
9
Kubernetes/NetworkPolicies/default-deny-all-ingress.yaml
Normal file
9
Kubernetes/NetworkPolicies/default-deny-all-ingress.yaml
Normal file
|
@ -0,0 +1,9 @@
|
|||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
name: default-deny-ingress
|
||||
spec:
|
||||
podSelector: {}
|
||||
policyTypes:
|
||||
- Ingress
|
35
Kubernetes/NetworkPolicies/example.yaml
Normal file
35
Kubernetes/NetworkPolicies/example.yaml
Normal file
|
@ -0,0 +1,35 @@
|
|||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
name: test-network-policy
|
||||
namespace: default
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
role: db
|
||||
policyTypes:
|
||||
- Ingress
|
||||
- Egress
|
||||
ingress:
|
||||
- from:
|
||||
- ipBlock:
|
||||
cidr: 172.17.0.0/16
|
||||
except:
|
||||
- 172.17.1.0/24
|
||||
- namespaceSelector:
|
||||
matchLabels:
|
||||
project: myproject
|
||||
- podSelector:
|
||||
matchLabels:
|
||||
role: frontend
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 6379
|
||||
egress:
|
||||
- to:
|
||||
- ipBlock:
|
||||
cidr: 10.0.0.0/24
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 5978
|
||||
|
17
Kubernetes/NetworkPolicies/namespace-example.yaml
Normal file
17
Kubernetes/NetworkPolicies/namespace-example.yaml
Normal file
|
@ -0,0 +1,17 @@
|
|||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
name: egress-namespaces
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
app: myapp
|
||||
policyTypes:
|
||||
- Egress
|
||||
egress:
|
||||
- to:
|
||||
- namespaceSelector:
|
||||
matchExpressions:
|
||||
- key: namespace
|
||||
operator: In
|
||||
values: ["frontend", "backend"]
|
24
Kubernetes/NetworkPolicies/networkpolicy-egress.yaml
Normal file
24
Kubernetes/NetworkPolicies/networkpolicy-egress.yaml
Normal file
|
@ -0,0 +1,24 @@
|
|||
kind: NetworkPolicy
|
||||
apiVersion: networking.k8s.io/v1
|
||||
metadata:
|
||||
name: allow-internet-only
|
||||
namespace: pihole
|
||||
spec:
|
||||
podSelector: {}
|
||||
policyTypes:
|
||||
- Egress
|
||||
egress:
|
||||
- to:
|
||||
- ipBlock:
|
||||
cidr: 0.0.0.0/0
|
||||
except:
|
||||
- 10.0.0.0/8
|
||||
- 192.168.0.0/16
|
||||
- 172.16.0.0/20
|
||||
- to:
|
||||
- namespaceSelector:
|
||||
matchLabels:
|
||||
kubernetes.io/metadata.name: "kube-system"
|
||||
- podSelector:
|
||||
matchLabels:
|
||||
k8s-app: "kube-dns"
|
17
Kubernetes/NetworkPolicies/networkpolicy-ingress.yaml
Normal file
17
Kubernetes/NetworkPolicies/networkpolicy-ingress.yaml
Normal file
|
@ -0,0 +1,17 @@
|
|||
kind: NetworkPolicy
|
||||
apiVersion: networking.k8s.io/v1
|
||||
metadata:
|
||||
name: restrict-internal
|
||||
namespace: pihole
|
||||
spec:
|
||||
podSelector: {}
|
||||
policyTypes:
|
||||
- Ingress
|
||||
ingress:
|
||||
- from:
|
||||
- ipBlock:
|
||||
cidr: 0.0.0.0/0
|
||||
except:
|
||||
- 10.0.0.0/8
|
||||
- 192.168.0.0/16
|
||||
- 172.16.0.0/20
|
20
Kubernetes/NetworkPolicies/port-example.yaml
Normal file
20
Kubernetes/NetworkPolicies/port-example.yaml
Normal file
|
@ -0,0 +1,20 @@
|
|||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
name: multi-port-egress
|
||||
namespace: default
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
role: db
|
||||
policyTypes:
|
||||
- Egress
|
||||
egress:
|
||||
- to:
|
||||
- ipBlock:
|
||||
cidr: 10.0.0.0/24
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 32000
|
||||
endPort: 32768
|
||||
|
8
Kubernetes/RKE2/ipAddressPool
Normal file
8
Kubernetes/RKE2/ipAddressPool
Normal file
|
@ -0,0 +1,8 @@
|
|||
apiVersion: metallb.io/v1beta1
|
||||
kind: IPAddressPool
|
||||
metadata:
|
||||
name: first-pool
|
||||
namespace: metallb-system
|
||||
spec:
|
||||
addresses:
|
||||
- $lbrange
|
|
@ -1,59 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
echo "apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
name: kube-vip-ds
|
||||
namespace: kube-system
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
name: kube-vip-ds
|
||||
template:
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
name: kube-vip-ds
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- manager
|
||||
env:
|
||||
- name: vip_arp
|
||||
value: \"true\"
|
||||
- name: vip_interface
|
||||
value: $vipInterface
|
||||
- name: port
|
||||
value: \"6443\"
|
||||
- name: vip_cidr
|
||||
value: \"32\"
|
||||
- name: cp_enable
|
||||
value: \"true\"
|
||||
- name: cp_namespace
|
||||
value: kube-system
|
||||
- name: svc_enable
|
||||
value: \"true\"
|
||||
- name: vip_address
|
||||
value: $vipAddress
|
||||
image: ghcr.io/kube-vip/kube-vip:v0.5.11
|
||||
imagePullPolicy: Always
|
||||
name: kube-vip
|
||||
resources: {}
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
- NET_ADMIN
|
||||
- NET_RAW
|
||||
- SYS_TIME
|
||||
hostNetwork: true
|
||||
serviceAccountName: kube-vip
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
key: node-role.kubernetes.io/master
|
||||
updateStrategy: {}
|
||||
status:
|
||||
currentNumberScheduled: 0
|
||||
desiredNumberScheduled: 0
|
||||
numberMisscheduled: 0
|
||||
numberReady: 0"
|
89
Kubernetes/RKE2/kube-vip
Normal file
89
Kubernetes/RKE2/kube-vip
Normal file
|
@ -0,0 +1,89 @@
|
|||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
app.kubernetes.io/name: kube-vip-ds
|
||||
app.kubernetes.io/version: v0.6.3
|
||||
name: kube-vip-ds
|
||||
namespace: kube-system
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: kube-vip-ds
|
||||
template:
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
app.kubernetes.io/name: kube-vip-ds
|
||||
app.kubernetes.io/version: v0.6.3
|
||||
spec:
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
- matchExpressions:
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
operator: Exists
|
||||
containers:
|
||||
- args:
|
||||
- manager
|
||||
env:
|
||||
- name: vip_arp
|
||||
value: "true"
|
||||
- name: port
|
||||
value: "6443"
|
||||
- name: vip_interface
|
||||
value: $interface
|
||||
- name: vip_cidr
|
||||
value: "32"
|
||||
- name: cp_enable
|
||||
value: "true"
|
||||
- name: cp_namespace
|
||||
value: kube-system
|
||||
- name: vip_ddns
|
||||
value: "false"
|
||||
- name: svc_enable
|
||||
value: "false"
|
||||
- name: svc_leasename
|
||||
value: plndr-svcs-lock
|
||||
- name: vip_leaderelection
|
||||
value: "true"
|
||||
- name: vip_leasename
|
||||
value: plndr-cp-lock
|
||||
- name: vip_leaseduration
|
||||
value: "5"
|
||||
- name: vip_renewdeadline
|
||||
value: "3"
|
||||
- name: vip_retryperiod
|
||||
value: "1"
|
||||
- name: address
|
||||
value: $vip
|
||||
- name: prometheus_server
|
||||
value: :2112
|
||||
image: ghcr.io/kube-vip/kube-vip:v0.6.3
|
||||
imagePullPolicy: Always
|
||||
name: kube-vip
|
||||
resources: {}
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
- NET_ADMIN
|
||||
- NET_RAW
|
||||
hostNetwork: true
|
||||
serviceAccountName: kube-vip
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
- effect: NoExecute
|
||||
operator: Exists
|
||||
updateStrategy: {}
|
||||
status:
|
||||
currentNumberScheduled: 0
|
||||
desiredNumberScheduled: 0
|
||||
numberMisscheduled: 0
|
||||
numberReady: 0
|
||||
|
8
Kubernetes/RKE2/l2Advertisement.yaml
Normal file
8
Kubernetes/RKE2/l2Advertisement.yaml
Normal file
|
@ -0,0 +1,8 @@
|
|||
apiVersion: metallb.io/v1beta1
|
||||
kind: L2Advertisement
|
||||
metadata:
|
||||
name: example
|
||||
namespace: metallb-system
|
||||
spec:
|
||||
ipAddressPools:
|
||||
- first-pool
|
|
@ -95,7 +95,10 @@ done
|
|||
# create RKE2's self-installing manifest dir
|
||||
sudo mkdir -p /var/lib/rancher/rke2/server/manifests
|
||||
# Install the kube-vip deployment into rke2's self-installing manifest folder
|
||||
curl -sL https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/RKE2/k3s | vipAddress=$vip vipInterface=$interface sh | sudo tee /var/lib/rancher/rke2/server/manifests/kube-vip.yaml
|
||||
curl -sO https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/RKE2/kube-vip
|
||||
cat kube-vip | sed 's/$interface/'$interface'/g; s/$vip/'$vip'/g' > $HOME/kube-vip.yaml
|
||||
sudo mv kube-vip.yaml /var/lib/rancher/rke2/server/manifests/kube-vip.yaml
|
||||
|
||||
# Find/Replace all k3s entries to represent rke2
|
||||
sudo sed -i 's/k3s/rke2/g' /var/lib/rancher/rke2/server/manifests/kube-vip.yaml
|
||||
# copy kube-vip.yaml to home directory
|
||||
|
@ -159,8 +162,6 @@ kubectl get nodes
|
|||
# Step 5: Install kube-vip as network LoadBalancer - Install the kube-vip Cloud Provider
|
||||
kubectl apply -f https://kube-vip.io/manifests/rbac.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/kube-vip/kube-vip-cloud-provider/main/manifest/kube-vip-cloud-controller.yaml
|
||||
#IP range for loadbalancer services to use
|
||||
kubectl create configmap -n kube-system kubevip --from-literal range-global=$lbrange
|
||||
|
||||
# Step 6: Add other Masternodes, note we import the token we extracted from step 3
|
||||
for newnode in "${masters[@]}"; do
|
||||
|
@ -199,13 +200,31 @@ for newnode in "${workers[@]}"; do
|
|||
systemctl start rke2-agent.service
|
||||
exit
|
||||
EOF
|
||||
echo -e " \033[32;5mMaster node joined successfully!\033[0m"
|
||||
echo -e " \033[32;5mWorker node joined successfully!\033[0m"
|
||||
done
|
||||
|
||||
kubectl get nodes
|
||||
|
||||
# Step 8: Install Rancher (Optional - Delete if not required)
|
||||
# Step 8: Install Metallb
|
||||
echo -e " \033[32;5mDeploying Metallb\033[0m"
|
||||
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.12.1/manifests/namespace.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.12/config/manifests/metallb-native.yaml
|
||||
# Download ipAddressPool and configure using lbrange above
|
||||
curl -sO https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/RKE2/ipAddressPool
|
||||
cat ipAddressPool | sed 's/$lbrange/'$lbrange'/g' > $HOME/ipAddressPool.yaml
|
||||
|
||||
# Step 9: Deploy IP Pools and l2Advertisement
|
||||
echo -e " \033[32;5mAdding IP Pools, waiting for Metallb to be available first. This can take a long time as we're likely being rate limited for container pulls...\033[0m"
|
||||
kubectl wait --namespace metallb-system \
|
||||
--for=condition=ready pod \
|
||||
--selector=component=controller \
|
||||
--timeout=1800s
|
||||
kubectl apply -f ipAddressPool.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/RKE2/l2Advertisement.yaml
|
||||
|
||||
# Step 10: Install Rancher (Optional - Delete if not required)
|
||||
#Install Helm
|
||||
echo -e " \033[32;5mInstalling Helm\033[0m"
|
||||
curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3
|
||||
chmod 700 get_helm.sh
|
||||
./get_helm.sh
|
||||
|
@ -215,6 +234,7 @@ helm repo add rancher-latest https://releases.rancher.com/server-charts/latest
|
|||
kubectl create namespace cattle-system
|
||||
|
||||
# Install Cert-Manager
|
||||
echo -e " \033[32;5mDeploying Cert-Manager\033[0m"
|
||||
kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.13.2/cert-manager.crds.yaml
|
||||
helm repo add jetstack https://charts.jetstack.io
|
||||
helm repo update
|
||||
|
@ -225,6 +245,7 @@ helm install cert-manager jetstack/cert-manager \
|
|||
kubectl get pods --namespace cert-manager
|
||||
|
||||
# Install Rancher
|
||||
echo -e " \033[32;5mDeploying Rancher\033[0m"
|
||||
helm install rancher rancher-latest/rancher \
|
||||
--namespace cattle-system \
|
||||
--set hostname=rancher.my.org \
|
||||
|
|
75
Kubernetes/SMB/deployment.yaml
Normal file
75
Kubernetes/SMB/deployment.yaml
Normal file
|
@ -0,0 +1,75 @@
|
|||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: jellyfin
|
||||
app.kubernetes.io/instance: jellyfin
|
||||
app.kubernetes.io/name: jellyfin
|
||||
name: jellyfin
|
||||
namespace: jellyfin
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: jellyfin
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: jellyfin
|
||||
app.kubernetes.io/name: jellyfin
|
||||
spec:
|
||||
nodeSelector:
|
||||
worker: "true"
|
||||
containers:
|
||||
- image: jellyfin/jellyfin
|
||||
imagePullPolicy: Always
|
||||
name: jellyfin
|
||||
ports:
|
||||
- containerPort: 8096
|
||||
name: web
|
||||
protocol: TCP
|
||||
env:
|
||||
- name: TZ
|
||||
value: Europe/London
|
||||
volumeMounts:
|
||||
- mountPath: "/Audiobooks"
|
||||
readOnly: false
|
||||
name: smb
|
||||
subPath: Audiobooks
|
||||
- mountPath: "/Films"
|
||||
readOnly: false
|
||||
name: smb
|
||||
subPath: Films
|
||||
- mountPath: "/TVShows"
|
||||
readOnly: false
|
||||
name: smb
|
||||
subPath: TVShows
|
||||
- mountPath: "/Music"
|
||||
readOnly: false
|
||||
name: smb
|
||||
subPath: Music
|
||||
volumes:
|
||||
- name: smb
|
||||
persistentVolumeClaim:
|
||||
claimName: pvc-jellyfin-smb
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: jellyfin
|
||||
name: jellyfin
|
||||
namespace: jellyfin
|
||||
spec:
|
||||
ports:
|
||||
- name: web-tcp
|
||||
port: 8096
|
||||
protocol: TCP
|
||||
targetPort: 8096
|
||||
- name: web-udp
|
||||
port: 8096
|
||||
protocol: UDP
|
||||
targetPort: 8096
|
||||
selector:
|
||||
app: jellyfin
|
27
Kubernetes/SMB/pv-smb.yaml
Normal file
27
Kubernetes/SMB/pv-smb.yaml
Normal file
|
@ -0,0 +1,27 @@
|
|||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
annotations:
|
||||
pv.kubernetes.io/provisioned-by: smb.csi.k8s.io
|
||||
name: pv-jellyfin-smb
|
||||
spec:
|
||||
capacity:
|
||||
storage: 100Gi
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
persistentVolumeReclaimPolicy: Retain
|
||||
storageClassName: smb
|
||||
mountOptions:
|
||||
- dir_mode=0777
|
||||
- file_mode=0777
|
||||
csi:
|
||||
driver: smb.csi.k8s.io
|
||||
readOnly: false
|
||||
# volumeHandle format: {smb-server-address}#{sub-dir-name}#{share-name}
|
||||
# make sure this value is unique for every share in the cluster
|
||||
volumeHandle: jellyfin
|
||||
volumeAttributes:
|
||||
source: "//192.168.6.2/FreeNAS" # Change this to your SMB IP and share name
|
||||
nodeStageSecretRef:
|
||||
name: smbcreds
|
||||
namespace: default
|
14
Kubernetes/SMB/pvc-smb.yaml
Normal file
14
Kubernetes/SMB/pvc-smb.yaml
Normal file
|
@ -0,0 +1,14 @@
|
|||
---
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: pvc-jellyfin-smb
|
||||
namespace: jellyfin
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Gi
|
||||
volumeName: pv-jellyfin-smb
|
||||
storageClassName: smb
|
20
Kubernetes/SMB/readme.md
Normal file
20
Kubernetes/SMB/readme.md
Normal file
|
@ -0,0 +1,20 @@
|
|||
# Install CSI driver
|
||||
```
|
||||
curl -skSL https://raw.githubusercontent.com/kubernetes-csi/csi-driver-smb/v1.13.0/deploy/install-driver.sh | bash -s v1.13.0 --
|
||||
```
|
||||
|
||||
# Create SMB creds
|
||||
```
|
||||
kubectl create secret generic smbcreds --from-literal username=USERNAME --from-literal password="PASSWORD"
|
||||
```
|
||||
|
||||
# Create storage class
|
||||
```
|
||||
kubectl create -f https://raw.githubusercontent.com/kubernetes-csi/csi-driver-smb/master/deploy/example/storageclass-smb.yaml
|
||||
```
|
||||
|
||||
# Check status
|
||||
```
|
||||
kubectl -n kube-system get pod -o wide --watch -l app=csi-smb-controller
|
||||
kubectl -n kube-system get pod -o wide --watch -l app=csi-smb-node
|
||||
```
|
|
@ -109,6 +109,6 @@ spec:
|
|||
targetPort: 53
|
||||
selector:
|
||||
app: pihole
|
||||
externalTrafficPolicy: Cluster
|
||||
externalTrafficPolicy: Local
|
||||
loadBalancerIP: 192.168.3.67 # this is your DNS IP, NOT THE GUI!
|
||||
type: LoadBalancer
|
||||
|
|
59
Kubernetes/Upgrade/readme.md
Normal file
59
Kubernetes/Upgrade/readme.md
Normal file
|
@ -0,0 +1,59 @@
|
|||
# Recommendations Before Upgrading
|
||||
1. Snapshot / Backup your VMs!
|
||||
2. Backup data and volumes if necessary
|
||||
3. Drain nodes / scale down deployments
|
||||
|
||||
# Upgrade Rancher
|
||||
```
|
||||
helm upgrade rancher rancher-latest/rancher \
|
||||
--namespace cattle-system \
|
||||
--set hostname=rancher.my.org \
|
||||
```
|
||||
# Upgrade RKE2 (Each node, not Admin!)
|
||||
```
|
||||
sudo curl -sfL https://get.rke2.io | INSTALL_RKE2_CHANNEL=latest sh -
|
||||
```
|
||||
then servers:
|
||||
```
|
||||
sudo systemctl restart rke2-server
|
||||
```
|
||||
or agents
|
||||
```
|
||||
sudo systemctl restart rke2-agent
|
||||
```
|
||||
# Upgrade K3S (Each node, not Admin!)
|
||||
```
|
||||
sudo curl -sfL https://get.k3s.io | INSTALL_K3S_CHANNEL=latest <EXISTING_K3S_ENV> sh -s - <EXISTING_K3S_ARGS>
|
||||
```
|
||||
then servers:
|
||||
```
|
||||
sudo systemctl restart k3s
|
||||
```
|
||||
or agents
|
||||
```
|
||||
sudo systemctl restart k3s-agent
|
||||
```
|
||||
|
||||
# Upgrade Longhorn
|
||||
```
|
||||
kubectl apply -f https://raw.githubusercontent.com/longhorn/longhorn/v1.5.3/deploy/longhorn.yaml
|
||||
```
|
||||
|
||||
# Upgrade Metallb
|
||||
1. Change version on the delete command to the version you are currently running (e.g., v0.13.11)
|
||||
2. Change version on the apply to the new version (e.g., v0.13.12)
|
||||
3. Ensure your Lbrange is still the one you want (check ipAddressPool.yaml)
|
||||
```
|
||||
kubectl delete -f https://raw.githubusercontent.com/metallb/metallb/v0.13.11/config/manifests/metallb-native.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.12/config/manifests/metallb-native.yaml
|
||||
kubectl apply -f ipAddressPool.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/RKE2/l2Advertisement.yaml
|
||||
```
|
||||
|
||||
# Upgrade Kube-VIP
|
||||
1. Delete the daemonset in Rancher or use kubectl delete
|
||||
2. Redeploy the daemonset with updated values (check kube-vip file)
|
||||
```
|
||||
kubectl delete -f kube-vip
|
||||
kubectl apply -f kube-vip
|
||||
```
|
44
Ollama/docker-compose.yml
Normal file
44
Ollama/docker-compose.yml
Normal file
|
@ -0,0 +1,44 @@
|
|||
version: '3.6'
|
||||
|
||||
services:
|
||||
ollama:
|
||||
# Uncomment below for GPU support
|
||||
# deploy:
|
||||
# resources:
|
||||
# reservations:
|
||||
# devices:
|
||||
# - driver: nvidia
|
||||
# count: 1
|
||||
# capabilities:
|
||||
# - gpu
|
||||
volumes:
|
||||
- ollama:/root/.ollama
|
||||
# Uncomment below to expose Ollama API outside the container stack
|
||||
# ports:
|
||||
# - 11434:11434
|
||||
container_name: ollama
|
||||
pull_policy: always
|
||||
tty: true
|
||||
restart: unless-stopped
|
||||
image: ollama/ollama:latest
|
||||
|
||||
ollama-webui:
|
||||
build:
|
||||
context: .
|
||||
args:
|
||||
OLLAMA_API_BASE_URL: '/ollama/api'
|
||||
dockerfile: Dockerfile
|
||||
image: ollama-webui:latest
|
||||
container_name: ollama-webui
|
||||
depends_on:
|
||||
- ollama
|
||||
ports:
|
||||
- 3000:8080
|
||||
environment:
|
||||
- "OLLAMA_API_BASE_URL=http://ollama:11434/api"
|
||||
extra_hosts:
|
||||
- host.docker.internal:host-gateway
|
||||
restart: unless-stopped
|
||||
|
||||
volumes:
|
||||
ollama: {}
|
5
Ollama/readme.md
Normal file
5
Ollama/readme.md
Normal file
|
@ -0,0 +1,5 @@
|
|||
1. Clone the repo from: https://github.com/ollama-webui/ollama-webui
|
||||
2. Tweak the docker-compose to your liking
|
||||
3. Run the container: sudo docker compose up -d
|
||||
|
||||
Let it build :)
|
|
@ -41,8 +41,7 @@ services:
|
|||
environment:
|
||||
TZ: 'Europe/London'
|
||||
WEBPASSWORD: 'password'
|
||||
DNS1: '172.70.9.2#5053'
|
||||
DNS2: 'no'
|
||||
PIHOLE_DNS_: '172.70.9.2#5053'
|
||||
DNSMASQ_LISTENING: 'all'
|
||||
VIRTUAL_HOST: pihole.yourdomain.com
|
||||
# Volumes store your data between container upgrades
|
||||
|
|
49
Proxmox-NAS/config.yml
Normal file
49
Proxmox-NAS/config.yml
Normal file
|
@ -0,0 +1,49 @@
|
|||
auth:
|
||||
- user: foo
|
||||
group: foo
|
||||
uid: 1000
|
||||
gid: 1000
|
||||
password: bar
|
||||
# - user: baz
|
||||
# group: xxx
|
||||
# uid: 1100
|
||||
# gid: 1200
|
||||
# password_file: /run/secrets/baz_password
|
||||
|
||||
global:
|
||||
- "force user = foo"
|
||||
- "force group = foo"
|
||||
|
||||
share:
|
||||
- name: public
|
||||
comment: Public
|
||||
path: /samba/public
|
||||
browsable: yes
|
||||
readonly: no
|
||||
guestok: yes
|
||||
veto: no
|
||||
recycle: yes
|
||||
# - name: share
|
||||
# path: /samba/share
|
||||
# browsable: yes
|
||||
# readonly: no
|
||||
# guestok: yes
|
||||
# writelist: foo
|
||||
# veto: no
|
||||
# - name: foo
|
||||
# path: /samba/foo
|
||||
# browsable: yes
|
||||
# readonly: no
|
||||
# guestok: no
|
||||
# validusers: foo
|
||||
# writelist: foo
|
||||
# veto: no
|
||||
# hidefiles: /_*/
|
||||
# - name: foo-baz
|
||||
# path: /samba/foo-baz
|
||||
# browsable: yes
|
||||
# readonly: no
|
||||
# guestok: no
|
||||
# validusers: foo,baz
|
||||
# writelist: foo,baz
|
||||
# veto: no
|
28
Proxmox-NAS/docker-compose.yaml
Normal file
28
Proxmox-NAS/docker-compose.yaml
Normal file
|
@ -0,0 +1,28 @@
|
|||
name: samba
|
||||
|
||||
services:
|
||||
samba:
|
||||
image: crazymax/samba
|
||||
container_name: samba
|
||||
network_mode: host
|
||||
volumes:
|
||||
- "./data:/data" # Contains cache, configuration and runtime data
|
||||
- "/smb:/samba/public"
|
||||
# - "./share:/samba/share" - optional additional share - see config.yml for permissions
|
||||
# - "./foo:/samba/foo" - optional additional share - see config.yml for permissions
|
||||
# - "./foo-baz:/samba/foo-baz" - optional additional share - see config.yml for permissions
|
||||
environment:
|
||||
- "TZ=Europe/London"
|
||||
# - "CONFIG_FILE=/your-location" this can be anywhere you want. Default is /data
|
||||
# - "SAMBA_WORKGROUP=WORKGROUP" change to your workgroup, default it WORKGROUP
|
||||
# - "SAMBA_SERVER_STRING=some string" is the equivalent of the NT Description field
|
||||
- "SAMBA_LOG_LEVEL=0"
|
||||
# - "SAMBA_FOLLOW_SYMLINKS=NO" default is yes
|
||||
# - "SAMBA_WIDE_LINKS=NO" default is yes
|
||||
# - "SAMBA_HOSTS_ALLOW=0.0.0.0/0" default 127.0.0.0/8 10.0.0.0/8 172.16.0.0/12 192.168.0.0/16
|
||||
# - "SAMBA_INTERFACES=some-interface" default all
|
||||
# - "WSDD2_ENABLE=1" default is 0
|
||||
# - "WSDD2_HOSTNAME=string" Override hostname (default to host or container name)
|
||||
# - "WSDD2_NETBIOS_NAME=some-name" Set NetBIOS name (default to hostname)
|
||||
# - "WSDD2_INTERFANCE=interface-name" Reply only on this interface
|
||||
restart: always
|
0
Unbound/a-records.conf
Normal file
0
Unbound/a-records.conf
Normal file
90
Unbound/docker-compose-vpn.yaml
Normal file
90
Unbound/docker-compose-vpn.yaml
Normal file
|
@ -0,0 +1,90 @@
|
|||
version: '3'
|
||||
|
||||
networks:
|
||||
dns_net:
|
||||
driver: bridge
|
||||
ipam:
|
||||
config:
|
||||
- subnet: 172.23.0.0/16
|
||||
proxy:
|
||||
external: true
|
||||
|
||||
services:
|
||||
gluetun:
|
||||
image: qmcgaw/gluetun
|
||||
networks:
|
||||
dns_net:
|
||||
ipv4_address: 172.23.0.9
|
||||
container_name: gluetun
|
||||
# line above must be uncommented to allow external containers to connect.
|
||||
# See https://github.com/qdm12/gluetun-wiki/blob/main/setup/connect-a-container-to-gluetun.md#external-container-to-gluetun
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
devices:
|
||||
- /dev/net/tun:/dev/net/tun
|
||||
ports:
|
||||
- 6881:6881
|
||||
- 6881:6881/udp
|
||||
volumes:
|
||||
- /home/ubuntu/docker/gluetun:/gluetun
|
||||
environment:
|
||||
# See https://github.com/qdm12/gluetun-wiki/tree/main/setup#setup
|
||||
- VPN_SERVICE_PROVIDER=nordvpn
|
||||
- VPN_TYPE=wireguard
|
||||
# OpenVPN:
|
||||
# - OPENVPN_USER=
|
||||
# - OPENVPN_PASSWORD=
|
||||
# Wireguard:
|
||||
- WIREGUARD_PRIVATE_KEY=<YOUR_PRIVATE_KEY> # See https://github.com/qdm12/gluetun-wiki/blob/main/setup/providers/nordvpn.md#obtain-your-wireguard-private-key
|
||||
- WIREGUARD_ADDRESSES=10.5.0.2/32
|
||||
# Timezone for accurate log times
|
||||
- TZ=Europe/London
|
||||
# Server list updater
|
||||
# See https://github.com/qdm12/gluetun-wiki/blob/main/setup/servers.md#update-the-vpn-servers-list
|
||||
- UPDATER_PERIOD=24h
|
||||
pihole:
|
||||
container_name: pihole
|
||||
hostname: pihole
|
||||
image: pihole/pihole:latest
|
||||
networks:
|
||||
dns_net:
|
||||
ipv4_address: 172.23.0.7
|
||||
proxy:
|
||||
ports:
|
||||
- "53:53/tcp"
|
||||
- "53:53/udp"
|
||||
- "85:80/tcp"
|
||||
#- "443:443/tcp"
|
||||
environment:
|
||||
- TZ: 'Europe/London'
|
||||
- WEBPASSWORD: 'password'
|
||||
- PIHOLE_DNS_: '172.23.0.8#5053'
|
||||
volumes:
|
||||
- '/home/ubuntu/docker/pihole/etc-pihole/:/etc/pihole/'
|
||||
- '/home/ubuntu/docker/pihole/etc-dnsmasq.d/:/etc/dnsmasq.d/'
|
||||
restart: unless-stopped
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.pihole.entrypoints=http"
|
||||
- "traefik.http.routers.pihole.rule=Host(`pihole.yourdomain.com`)"
|
||||
- "traefik.http.middlewares.pihole-https-redirect.redirectscheme.scheme=https"
|
||||
- "traefik.http.routers.pihole.middlewares=pihole-https-redirect"
|
||||
- "traefik.http.routers.pihole-secure.entrypoints=https"
|
||||
- "traefik.http.routers.pihole-secure.rule=Host(`pihole.yourdomain.com`)"
|
||||
- "traefik.http.routers.pihole-secure.tls=true"
|
||||
- "traefik.http.routers.pihole-secure.service=pihole"
|
||||
- "traefik.http.services.pihole.loadbalancer.server.port=80"
|
||||
- "traefik.docker.network=proxy"
|
||||
unbound:
|
||||
container_name: unbound
|
||||
image: mvance/unbound:latest
|
||||
networks:
|
||||
dns_net:
|
||||
ipv4_address: 172.23.0.8
|
||||
network_mode: "service:gluetun"
|
||||
volumes:
|
||||
- /home/ubuntu/docker/unbound:/opt/unbound/etc/unbound
|
||||
ports:
|
||||
- "5053:53/tcp"
|
||||
- "5053:53/udp"
|
||||
restart: unless-stopped
|
59
Unbound/docker-compose.yaml
Normal file
59
Unbound/docker-compose.yaml
Normal file
|
@ -0,0 +1,59 @@
|
|||
version: '3'
|
||||
|
||||
networks:
|
||||
dns_net:
|
||||
driver: bridge
|
||||
ipam:
|
||||
config:
|
||||
- subnet: 172.23.0.0/16
|
||||
proxy:
|
||||
external: true
|
||||
|
||||
services:
|
||||
pihole:
|
||||
container_name: pihole
|
||||
hostname: pihole
|
||||
image: pihole/pihole:latest # remember to change this if you're using rpi
|
||||
networks:
|
||||
dns_net:
|
||||
ipv4_address: 172.23.0.7
|
||||
proxy:
|
||||
ports:
|
||||
- "53:53/tcp"
|
||||
- "53:53/udp"
|
||||
- "85:80/tcp"
|
||||
#- "443:443/tcp"
|
||||
environment:
|
||||
TZ: 'Europe/London'
|
||||
WEBPASSWORD: 'password'
|
||||
PIHOLE_DNS_: '172.23.0.8#5053'
|
||||
volumes:
|
||||
- '/home/ubuntu/docker/pihole/etc-pihole/:/etc/pihole/'
|
||||
- '/home/ubuntu/docker/pihole/etc-dnsmasq.d/:/etc/dnsmasq.d/'
|
||||
restart: unless-stopped
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.pihole.entrypoints=http"
|
||||
- "traefik.http.routers.pihole.rule=Host(`pihole.yourdomain.com`)"
|
||||
- "traefik.http.middlewares.pihole-https-redirect.redirectscheme.scheme=https"
|
||||
- "traefik.http.routers.pihole.middlewares=pihole-https-redirect"
|
||||
- "traefik.http.routers.pihole-secure.entrypoints=https"
|
||||
- "traefik.http.routers.pihole-secure.rule=Host(`pihole.yourdomain.com`)"
|
||||
- "traefik.http.routers.pihole-secure.tls=true"
|
||||
- "traefik.http.routers.pihole-secure.service=pihole"
|
||||
- "traefik.http.services.pihole.loadbalancer.server.port=80"
|
||||
- "traefik.docker.network=proxy"
|
||||
unbound:
|
||||
container_name: unbound
|
||||
image: mvance/unbound:latest # remember to change this if you're using rpi
|
||||
networks:
|
||||
dns_net:
|
||||
ipv4_address: 172.23.0.8
|
||||
volumes:
|
||||
- /home/ubuntu/docker/unbound:/opt/unbound/etc/unbound
|
||||
ports:
|
||||
- "5053:53/tcp"
|
||||
- "5053:53/udp"
|
||||
healthcheck:
|
||||
test: ["NONE"]
|
||||
restart: unless-stopped
|
54
Unbound/forward-records.conf
Normal file
54
Unbound/forward-records.conf
Normal file
|
@ -0,0 +1,54 @@
|
|||
forward-zone:
|
||||
# Forward all queries (except those in cache and local zone) to
|
||||
# upstream recursive servers
|
||||
name: "."
|
||||
# Queries to this forward zone use TLS
|
||||
forward-tls-upstream: yes
|
||||
|
||||
# https://dnsprivacy.org/wiki/display/DP/DNS+Privacy+Test+Servers
|
||||
|
||||
## Cloudflare
|
||||
#forward-addr: 1.1.1.1@853#cloudflare-dns.com
|
||||
#forward-addr: 1.0.0.1@853#cloudflare-dns.com
|
||||
#forward-addr: 2606:4700:4700::1111@853#cloudflare-dns.com
|
||||
#forward-addr: 2606:4700:4700::1001@853#cloudflare-dns.com
|
||||
|
||||
## Cloudflare Malware
|
||||
# forward-addr: 1.1.1.2@853#security.cloudflare-dns.com
|
||||
# forward-addr: 1.0.0.2@853#security.cloudflare-dns.com
|
||||
# forward-addr: 2606:4700:4700::1112@853#security.cloudflare-dns.com
|
||||
# forward-addr: 2606:4700:4700::1002@853#security.cloudflare-dns.com
|
||||
|
||||
## Cloudflare Malware and Adult Content
|
||||
# forward-addr: 1.1.1.3@853#family.cloudflare-dns.com
|
||||
# forward-addr: 1.0.0.3@853#family.cloudflare-dns.com
|
||||
# forward-addr: 2606:4700:4700::1113@853#family.cloudflare-dns.com
|
||||
# forward-addr: 2606:4700:4700::1003@853#family.cloudflare-dns.com
|
||||
|
||||
## CleanBrowsing Security Filter
|
||||
# forward-addr: 185.228.168.9@853#security-filter-dns.cleanbrowsing.org
|
||||
# forward-addr: 185.228.169.9@853#security-filter-dns.cleanbrowsing.org
|
||||
# forward-addr: 2a0d:2a00:1::2@853#security-filter-dns.cleanbrowsing.org
|
||||
# forward-addr: 2a0d:2a00:2::2@853#security-filter-dns.cleanbrowsing.org
|
||||
|
||||
## CleanBrowsing Adult Filter
|
||||
# forward-addr: 185.228.168.10@853#adult-filter-dns.cleanbrowsing.org
|
||||
# forward-addr: 185.228.169.11@853#adult-filter-dns.cleanbrowsing.org
|
||||
# forward-addr: 2a0d:2a00:1::1@853#adult-filter-dns.cleanbrowsing.org
|
||||
# forward-addr: 2a0d:2a00:2::1@853#adult-filter-dns.cleanbrowsing.org
|
||||
|
||||
## CleanBrowsing Family Filter
|
||||
# forward-addr: 185.228.168.168@853#family-filter-dns.cleanbrowsing.org
|
||||
# forward-addr: 185.228.169.168@853#family-filter-dns.cleanbrowsing.org
|
||||
# forward-addr: 2a0d:2a00:1::@853#family-filter-dns.cleanbrowsing.org
|
||||
# forward-addr: 2a0d:2a00:2::@853#family-filter-dns.cleanbrowsing.org
|
||||
|
||||
## Quad9
|
||||
forward-addr: 9.9.9.9@853#dns.quad9.net
|
||||
forward-addr: 149.112.112.112@853#dns.quad9.net
|
||||
forward-addr: 2620:fe::fe@853#dns.quad9.net
|
||||
forward-addr: 2620:fe::9@853#dns.quad9.net
|
||||
|
||||
## getdnsapi.net
|
||||
# forward-addr: 185.49.141.37@853#getdnsapi.net
|
||||
# forward-addr: 2a04:b900:0:100::37@853#getdnsapi.net
|
0
Unbound/srv-records.conf
Normal file
0
Unbound/srv-records.conf
Normal file
387
Unbound/unbound.conf
Normal file
387
Unbound/unbound.conf
Normal file
|
@ -0,0 +1,387 @@
|
|||
server:
|
||||
###########################################################################
|
||||
# BASIC SETTINGS
|
||||
###########################################################################
|
||||
# Time to live maximum for RRsets and messages in the cache. If the maximum
|
||||
# kicks in, responses to clients still get decrementing TTLs based on the
|
||||
# original (larger) values. When the internal TTL expires, the cache item
|
||||
# has expired. Can be set lower to force the resolver to query for data
|
||||
# often, and not trust (very large) TTL values.
|
||||
cache-max-ttl: 86400
|
||||
|
||||
# Time to live minimum for RRsets and messages in the cache. If the minimum
|
||||
# kicks in, the data is cached for longer than the domain owner intended,
|
||||
# and thus less queries are made to look up the data. Zero makes sure the
|
||||
# data in the cache is as the domain owner intended, higher values,
|
||||
# especially more than an hour or so, can lead to trouble as the data in
|
||||
# the cache does not match up with the actual data any more.
|
||||
cache-min-ttl: 300
|
||||
|
||||
# Set the working directory for the program.
|
||||
directory: "/opt/unbound/etc/unbound"
|
||||
|
||||
# Enable or disable whether IPv4 queries are answered or issued.
|
||||
# Default: yes
|
||||
do-ip4: yes
|
||||
|
||||
# Enable or disable whether IPv6 queries are answered or issued.
|
||||
# If disabled, queries are not answered on IPv6, and queries are not sent
|
||||
# on IPv6 to the internet nameservers. With this option you can disable the
|
||||
# IPv6 transport for sending DNS traffic, it does not impact the contents
|
||||
# of the DNS traffic, which may have IPv4 (A) and IPv6 (AAAA) addresses in
|
||||
# it.
|
||||
# Default: yes
|
||||
# May be set to yes if you have IPv6 connectivity
|
||||
do-ip6: yes
|
||||
|
||||
# Enable or disable whether TCP queries are answered or issued.
|
||||
# Default: yes
|
||||
do-tcp: yes
|
||||
|
||||
# Enable or disable whether UDP queries are answered or issued.
|
||||
# Default: yes
|
||||
do-udp: yes
|
||||
|
||||
# RFC 6891. Number of bytes size to advertise as the EDNS reassembly buffer
|
||||
# size. This is the value put into datagrams over UDP towards peers.
|
||||
# The actual buffer size is determined by msg-buffer-size (both for TCP and
|
||||
# UDP). Do not set higher than that value.
|
||||
# Default is 1232 which is the DNS Flag Day 2020 recommendation.
|
||||
# Setting to 512 bypasses even the most stringent path MTU problems, but
|
||||
# is seen as extreme, since the amount of TCP fallback generated is
|
||||
# excessive (probably also for this resolver, consider tuning the outgoing
|
||||
# tcp number).
|
||||
edns-buffer-size: 1232
|
||||
|
||||
# Listen to for queries from clients and answer from this network interface
|
||||
# and port.
|
||||
interface: 0.0.0.0@5053
|
||||
# interface: ::0
|
||||
port: 53
|
||||
|
||||
# If enabled, prefer IPv6 transport for sending DNS queries to internet
|
||||
# nameservers.
|
||||
# Default: yes
|
||||
# You want to leave this to no unless you have *native* IPv6. With 6to4 and
|
||||
# Terredo tunnels your web browser should favor IPv4 for the same reasons
|
||||
prefer-ip6: no
|
||||
|
||||
# Rotates RRSet order in response (the pseudo-random number is taken from
|
||||
# the query ID, for speed and thread safety).
|
||||
rrset-roundrobin: yes
|
||||
|
||||
# Drop user privileges after binding the port.
|
||||
username: "_unbound"
|
||||
|
||||
###########################################################################
|
||||
# LOGGING
|
||||
###########################################################################
|
||||
|
||||
# Do not print log lines to inform about local zone actions
|
||||
log-local-actions: no
|
||||
|
||||
# Do not print one line per query to the log
|
||||
log-queries: no
|
||||
|
||||
# Do not print one line per reply to the log
|
||||
log-replies: no
|
||||
|
||||
# Do not print log lines that say why queries return SERVFAIL to clients
|
||||
log-servfail: no
|
||||
|
||||
# If you want to log to a file, use:
|
||||
# logfile: /opt/unbound/etc/unbound/unbound.log
|
||||
# Set log location (using /dev/null further limits logging)
|
||||
logfile: /dev/null
|
||||
|
||||
# Set logging level
|
||||
# Level 0: No verbosity, only errors.
|
||||
# Level 1: Gives operational information.
|
||||
# Level 2: Gives detailed operational information including short information per query.
|
||||
# Level 3: Gives query level information, output per query.
|
||||
# Level 4: Gives algorithm level information.
|
||||
# Level 5: Logs client identification for cache misses.
|
||||
verbosity: 0
|
||||
|
||||
###########################################################################
|
||||
# PERFORMANCE SETTINGS
|
||||
###########################################################################
|
||||
# https://nlnetlabs.nl/documentation/unbound/howto-optimise/
|
||||
# https://nlnetlabs.nl/news/2019/Feb/05/unbound-1.9.0-released/
|
||||
|
||||
# Number of slabs in the infrastructure cache. Slabs reduce lock contention
|
||||
# by threads. Must be set to a power of 2.
|
||||
infra-cache-slabs: 4
|
||||
|
||||
# Number of incoming TCP buffers to allocate per thread. Default
|
||||
# is 10. If set to 0, or if do-tcp is "no", no TCP queries from
|
||||
# clients are accepted. For larger installations increasing this
|
||||
# value is a good idea.
|
||||
incoming-num-tcp: 10
|
||||
|
||||
# Number of slabs in the key cache. Slabs reduce lock contention by
|
||||
# threads. Must be set to a power of 2. Setting (close) to the number
|
||||
# of cpus is a reasonable guess.
|
||||
key-cache-slabs: 4
|
||||
|
||||
# Number of bytes size of the message cache.
|
||||
# Unbound recommendation is to Use roughly twice as much rrset cache memory
|
||||
# as you use msg cache memory.
|
||||
msg-cache-size: 142768128
|
||||
|
||||
# Number of slabs in the message cache. Slabs reduce lock contention by
|
||||
# threads. Must be set to a power of 2. Setting (close) to the number of
|
||||
# cpus is a reasonable guess.
|
||||
msg-cache-slabs: 4
|
||||
|
||||
# The number of queries that every thread will service simultaneously. If
|
||||
# more queries arrive that need servicing, and no queries can be jostled
|
||||
# out (see jostle-timeout), then the queries are dropped.
|
||||
# This is best set at half the number of the outgoing-range.
|
||||
# This Unbound instance was compiled with libevent so it can efficiently
|
||||
# use more than 1024 file descriptors.
|
||||
num-queries-per-thread: 4096
|
||||
|
||||
# The number of threads to create to serve clients.
|
||||
# This is set dynamically at run time to effectively use available CPUs
|
||||
# resources
|
||||
num-threads: 3
|
||||
|
||||
# Number of ports to open. This number of file descriptors can be opened
|
||||
# per thread.
|
||||
# This Unbound instance was compiled with libevent so it can efficiently
|
||||
# use more than 1024 file descriptors.
|
||||
outgoing-range: 8192
|
||||
|
||||
# Number of bytes size of the RRset cache.
|
||||
# Use roughly twice as much rrset cache memory as msg cache memory
|
||||
rrset-cache-size: 285536256
|
||||
|
||||
# Number of slabs in the RRset cache. Slabs reduce lock contention by
|
||||
# threads. Must be set to a power of 2.
|
||||
rrset-cache-slabs: 4
|
||||
|
||||
# Do no insert authority/additional sections into response messages when
|
||||
# those sections are not required. This reduces response size
|
||||
# significantly, and may avoid TCP fallback for some responses. This may
|
||||
# cause a slight speedup.
|
||||
minimal-responses: yes
|
||||
|
||||
# # Fetch the DNSKEYs earlier in the validation process, when a DS record
|
||||
# is encountered. This lowers the latency of requests at the expense of
|
||||
# little more CPU usage.
|
||||
prefetch: yes
|
||||
|
||||
# Fetch the DNSKEYs earlier in the validation process, when a DS record is
|
||||
# encountered. This lowers the latency of requests at the expense of little
|
||||
# more CPU usage.
|
||||
prefetch-key: yes
|
||||
|
||||
# Have unbound attempt to serve old responses from cache with a TTL of 0 in
|
||||
# the response without waiting for the actual resolution to finish. The
|
||||
# actual resolution answer ends up in the cache later on.
|
||||
serve-expired: yes
|
||||
|
||||
# If not 0, then set the SO_RCVBUF socket option to get more buffer space on
|
||||
# UDP port 53 incoming queries. So that short spikes on busy servers do not
|
||||
# drop packets (see counter in netstat -su). Otherwise, the number of bytes
|
||||
# to ask for, try <20>4m<34> on a busy server.
|
||||
# The OS caps it at a maximum, on linux Unbound needs root permission to
|
||||
# bypass the limit, or the admin can use sysctl net.core.rmem_max.
|
||||
# Default: 0 (use system value)
|
||||
# For example: sysctl -w net.core.rmem_max=4194304
|
||||
# To persist reboots, edit /etc/sysctl.conf to include:
|
||||
# net.core.rmem_max=4194304
|
||||
# Larger socket buffer. OS may need config.
|
||||
# Ensure kernel buffer is large enough to not lose messages in traffic spikes
|
||||
#so-rcvbuf: 4m
|
||||
|
||||
# Open dedicated listening sockets for incoming queries for each thread and
|
||||
# try to set the SO_REUSEPORT socket option on each socket. May distribute
|
||||
# incoming queries to threads more evenly.
|
||||
so-reuseport: yes
|
||||
|
||||
# If not 0, then set the SO_SNDBUF socket option to get more buffer space
|
||||
# on UDP port 53 outgoing queries.
|
||||
# Specify the number of bytes to ask for, try <20>4m<34> on a very busy server.
|
||||
# The OS caps it at a maximum, on linux Unbound needs root permission to
|
||||
# bypass the limit, or the admin can use sysctl net.core.wmem_max.
|
||||
# For example: sysctl -w net.core.wmem_max=4194304
|
||||
# To persist reboots, edit /etc/sysctl.conf to include:
|
||||
# net.core.wmem_max=4194304
|
||||
# Default: 0 (use system value)
|
||||
# Larger socket buffer. OS may need config.
|
||||
# Ensure kernel buffer is large enough to not lose messages in traffic spikes
|
||||
#so-sndbuf: 4m
|
||||
|
||||
###########################################################################
|
||||
# PRIVACY SETTINGS
|
||||
###########################################################################
|
||||
|
||||
# RFC 8198. Use the DNSSEC NSEC chain to synthesize NXDO-MAIN and other
|
||||
# denials, using information from previous NXDO-MAINs answers. In other
|
||||
# words, use cached NSEC records to generate negative answers within a
|
||||
# range and positive answers from wildcards. This increases performance,
|
||||
# decreases latency and resource utilization on both authoritative and
|
||||
# recursive servers, and increases privacy. Also, it may help increase
|
||||
# resilience to certain DoS attacks in some circumstances.
|
||||
aggressive-nsec: yes
|
||||
|
||||
# Extra delay for timeouted UDP ports before they are closed, in msec.
|
||||
# This prevents very delayed answer packets from the upstream (recursive)
|
||||
# servers from bouncing against closed ports and setting off all sort of
|
||||
# close-port counters, with eg. 1500 msec. When timeouts happen you need
|
||||
# extra sockets, it checks the ID and remote IP of packets, and unwanted
|
||||
# packets are added to the unwanted packet counter.
|
||||
delay-close: 10000
|
||||
|
||||
# Prevent the unbound server from forking into the background as a daemon
|
||||
do-daemonize: no
|
||||
|
||||
# Add localhost to the do-not-query-address list.
|
||||
do-not-query-localhost: no
|
||||
|
||||
# Number of bytes size of the aggressive negative cache.
|
||||
neg-cache-size: 4M
|
||||
|
||||
# Send minimum amount of information to upstream servers to enhance
|
||||
# privacy (best privacy).
|
||||
qname-minimisation: yes
|
||||
|
||||
###########################################################################
|
||||
# SECURITY SETTINGS
|
||||
###########################################################################
|
||||
# Only give access to recursion clients from LAN IPs
|
||||
access-control: 127.0.0.1/32 allow
|
||||
access-control: 192.168.0.0/16 allow
|
||||
access-control: 172.16.0.0/12 allow
|
||||
access-control: 10.0.0.0/8 allow
|
||||
access-control: fc00::/7 allow
|
||||
access-control: ::1/128 allow
|
||||
|
||||
# File with trust anchor for one zone, which is tracked with RFC5011
|
||||
# probes.
|
||||
auto-trust-anchor-file: "var/root.key"
|
||||
|
||||
# Enable chroot (i.e, change apparent root directory for the current
|
||||
# running process and its children)
|
||||
chroot: "/opt/unbound/etc/unbound"
|
||||
|
||||
# Deny queries of type ANY with an empty response.
|
||||
deny-any: yes
|
||||
|
||||
# Harden against algorithm downgrade when multiple algorithms are
|
||||
# advertised in the DS record.
|
||||
harden-algo-downgrade: yes
|
||||
|
||||
# RFC 8020. returns nxdomain to queries for a name below another name that
|
||||
# is already known to be nxdomain.
|
||||
harden-below-nxdomain: yes
|
||||
|
||||
# Require DNSSEC data for trust-anchored zones, if such data is absent, the
|
||||
# zone becomes bogus. If turned off you run the risk of a downgrade attack
|
||||
# that disables security for a zone.
|
||||
harden-dnssec-stripped: yes
|
||||
|
||||
# Only trust glue if it is within the servers authority.
|
||||
harden-glue: yes
|
||||
|
||||
# Ignore very large queries.
|
||||
harden-large-queries: yes
|
||||
|
||||
# Perform additional queries for infrastructure data to harden the referral
|
||||
# path. Validates the replies if trust anchors are configured and the zones
|
||||
# are signed. This enforces DNSSEC validation on nameserver NS sets and the
|
||||
# nameserver addresses that are encountered on the referral path to the
|
||||
# answer. Experimental option.
|
||||
harden-referral-path: no
|
||||
|
||||
# Ignore very small EDNS buffer sizes from queries.
|
||||
harden-short-bufsize: yes
|
||||
|
||||
# If enabled the HTTP header User-Agent is not set. Use with caution
|
||||
# as some webserver configurations may reject HTTP requests lacking
|
||||
# this header. If needed, it is better to explicitly set the
|
||||
# the http-user-agent.
|
||||
hide-http-user-agent: no
|
||||
|
||||
# Refuse id.server and hostname.bind queries
|
||||
hide-identity: yes
|
||||
|
||||
# Refuse version.server and version.bind queries
|
||||
hide-version: yes
|
||||
|
||||
# Set the HTTP User-Agent header for outgoing HTTP requests. If
|
||||
# set to "", the default, then the package name and version are
|
||||
# used.
|
||||
http-user-agent: "DNS"
|
||||
|
||||
# Report this identity rather than the hostname of the server.
|
||||
identity: "DNS"
|
||||
|
||||
# These private network addresses are not allowed to be returned for public
|
||||
# internet names. Any occurrence of such addresses are removed from DNS
|
||||
# answers. Additionally, the DNSSEC validator may mark the answers bogus.
|
||||
# This protects against DNS Rebinding
|
||||
private-address: 10.0.0.0/8
|
||||
private-address: 172.16.0.0/12
|
||||
private-address: 192.168.0.0/16
|
||||
private-address: 169.254.0.0/16
|
||||
private-address: fd00::/8
|
||||
private-address: fe80::/10
|
||||
private-address: ::ffff:0:0/96
|
||||
|
||||
# Enable ratelimiting of queries (per second) sent to nameserver for
|
||||
# performing recursion. More queries are turned away with an error
|
||||
# (servfail). This stops recursive floods (e.g., random query names), but
|
||||
# not spoofed reflection floods. Cached responses are not rate limited by
|
||||
# this setting. Experimental option.
|
||||
ratelimit: 1000
|
||||
|
||||
# Use this certificate bundle for authenticating connections made to
|
||||
# outside peers (e.g., auth-zone urls, DNS over TLS connections).
|
||||
tls-cert-bundle: /etc/ssl/certs/ca-certificates.crt
|
||||
|
||||
# Set the total number of unwanted replies to eep track of in every thread.
|
||||
# When it reaches the threshold, a defensive action of clearing the rrset
|
||||
# and message caches is taken, hopefully flushing away any poison.
|
||||
# Unbound suggests a value of 10 million.
|
||||
unwanted-reply-threshold: 10000
|
||||
|
||||
# Use 0x20-encoded random bits in the query to foil spoof attempts. This
|
||||
# perturbs the lowercase and uppercase of query names sent to authority
|
||||
# servers and checks if the reply still has the correct casing.
|
||||
# This feature is an experimental implementation of draft dns-0x20.
|
||||
# Experimental option.
|
||||
# Don't use Capitalization randomization as it known to cause DNSSEC issues
|
||||
# see https://discourse.pi-hole.net/t/unbound-stubby-or-dnscrypt-proxy/9378
|
||||
use-caps-for-id: yes
|
||||
|
||||
# Help protect users that rely on this validator for authentication from
|
||||
# potentially bad data in the additional section. Instruct the validator to
|
||||
# remove data from the additional section of secure messages that are not
|
||||
# signed properly. Messages that are insecure, bogus, indeterminate or
|
||||
# unchecked are not affected.
|
||||
val-clean-additional: yes
|
||||
|
||||
###########################################################################
|
||||
# FORWARD ZONE
|
||||
###########################################################################
|
||||
|
||||
#include: /opt/unbound/etc/unbound/forward-records.conf
|
||||
|
||||
###########################################################################
|
||||
# LOCAL ZONE
|
||||
###########################################################################
|
||||
|
||||
# Include file for local-data and local-data-ptr
|
||||
#include: /opt/unbound/etc/unbound/a-records.conf
|
||||
#include: /opt/unbound/etc/unbound/srv-records.conf
|
||||
|
||||
###########################################################################
|
||||
# WILDCARD INCLUDE
|
||||
###########################################################################
|
||||
#include: "/opt/unbound/etc/unbound/*.conf"
|
||||
|
||||
remote-control:
|
||||
control-enable: no
|
62
Unifi-Controller/docker-compose.yaml
Normal file
62
Unifi-Controller/docker-compose.yaml
Normal file
|
@ -0,0 +1,62 @@
|
|||
---
|
||||
version: "2.1"
|
||||
services:
|
||||
unifi-network-application:
|
||||
image: lscr.io/linuxserver/unifi-network-application:latest
|
||||
container_name: unifi-network-application
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
- TZ=Etc/UTC
|
||||
- MONGO_USER=unifi
|
||||
- MONGO_PASS=5nHgg3G0cH9d
|
||||
- MONGO_HOST=unifi-db
|
||||
- MONGO_PORT=27017
|
||||
- MONGO_DBNAME=unifi
|
||||
- MEM_LIMIT=1024 #optional
|
||||
- MEM_STARTUP=1024 #optional
|
||||
# - MONGO_TLS= #optional
|
||||
# - MONGO_AUTHSOURCE= #optional
|
||||
volumes:
|
||||
- /home/ubuntu/docker/unifi-controller:/config
|
||||
ports:
|
||||
- 8443:8443
|
||||
- 3478:3478/udp
|
||||
- 10001:10001/udp
|
||||
- 8080:8080
|
||||
- 1900:1900/udp #optional
|
||||
- 8843:8843 #optional
|
||||
- 8880:8880 #optional
|
||||
- 6789:6789 #optional
|
||||
- 5514:5514/udp #optional
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.unifi.entrypoints=http"
|
||||
- "traefik.http.routers.unifi.rule=Host(`unifi.jimsgarage.co.uk`)"
|
||||
- "traefik.http.middlewares.unifi-https-redirect.redirectscheme.scheme=https"
|
||||
- "traefik.http.routers.unifi.middlewares=unifi-https-redirect"
|
||||
- "traefik.http.routers.unifi-secure.entrypoints=https"
|
||||
- "traefik.http.routers.unifi-secure.rule=Host(`unifi.jimsgarage.co.uk`)"
|
||||
- "traefik.http.routers.unifi-secure.tls=true"
|
||||
- "traefik.http.routers.unifi-secure.service=unifi"
|
||||
- "traefik.http.services.unifi.loadbalancer.server.port=8443"
|
||||
- "traefik.http.services.unifi.loadbalancer.server.scheme=https"
|
||||
- "traefik.docker.network=proxy"
|
||||
networks:
|
||||
proxy:
|
||||
unifi:
|
||||
restart: unless-stopped
|
||||
unifi-db:
|
||||
image: docker.io/mongo:4.4
|
||||
container_name: unifi-db
|
||||
volumes:
|
||||
- /home/ubuntu/docker/unifi-controller-db:/data/db
|
||||
- /home/ubuntu/docker-compose/unifi-controller/init-mongo.js:/docker-entrypoint-initdb.d/init-mongo.js:ro
|
||||
networks:
|
||||
unifi:
|
||||
restart: unless-stopped
|
||||
|
||||
networks:
|
||||
proxy:
|
||||
external: true
|
||||
unifi:
|
2
Unifi-Controller/init-mongo.js
Normal file
2
Unifi-Controller/init-mongo.js
Normal file
|
@ -0,0 +1,2 @@
|
|||
db.getSiblingDB("unifi").createUser({user: "unifi", pwd: "5nHgg3G0cH9d", roles: [{role: "dbOwner", db: "unifi"}]});
|
||||
db.getSiblingDB("unifi_stat").createUser({user: "unifi", pwd: "5nHgg3G0cH9d", roles: [{role: "dbOwner", db: "unifi_stat"}]});
|
Loading…
Reference in a new issue