diff --git a/Docker-Swarm/swarm-3-nodes.sh b/Docker-Swarm/swarm-3-nodes.sh new file mode 100644 index 0000000..9935dd1 --- /dev/null +++ b/Docker-Swarm/swarm-3-nodes.sh @@ -0,0 +1,181 @@ +#!/bin/bash + +echo -e " \033[33;5m __ _ _ ___ \033[0m" +echo -e " \033[33;5m \ \(_)_ __ ___( )__ / _ \__ _ _ __ __ _ __ _ ___ \033[0m" +echo -e " \033[33;5m \ \ | '_ \` _ \/ __| / /_\/ _\` | '__/ _\` |/ _\` |/ _ \ \033[0m" +echo -e " \033[33;5m /\_/ / | | | | | \__ \ / /_\\ (_| | | | (_| | (_| | __/ \033[0m" +echo -e " \033[33;5m \___/|_|_| |_| |_|___/ \____/\__,_|_| \__,_|\__, |\___| \033[0m" +echo -e " \033[33;5m |___/ \033[0m" + +echo -e " \033[36;5m ___ _ ___ \033[0m" +echo -e " \033[36;5m | \ ___ __| |_____ _ _ / __|_ __ ____ _ _ _ _ __ \033[0m" +echo -e " \033[36;5m | |) / _ \/ _| / / -_) '_| \__ \ V V / _\` | '_| ' \ \033[0m" +echo -e " \033[36;5m |___/\___/\__|_\_\___|_| |___/\_/\_/\__,_|_| |_|_|_| \033[0m" +echo -e " \033[36;5m \033[0m" +echo -e " \033[32;5m https://youtube.com/@jims-garage \033[0m" +echo -e " \033[32;5m \033[0m" + + +############################################# +# YOU SHOULD ONLY NEED TO EDIT THIS SECTION # +############################################# + +# Set the IP addresses of the admin, managers, and workers nodes +admin=192.168.3.5 +manager1=192.168.3.21 +manager2=192.168.3.22 +manager3=192.168.3.23 +worker1=192.168.3.24 +worker2=192.168.3.25 + +# Set the workers' hostnames (if using cloud-init in Proxmox it's the name of the VM) +workerHostname1=dockerSwarm-04 +workerHostname2=dockerSwarm-05 + +# User of remote machines +user=ubuntu + +# Interface used on remotes +interface=eth0 + +# Set the virtual IP address (VIP) +vip=192.168.3.50 + +# Array of all manager nodes +allmanagers=($manager1 $manager2 $manager3) + +# Array of manager nodes +managers=($manager2 $manager3) + +# Array of worker nodes +workers=($worker1 $worker2) + +# Array of all +all=($manager1 $worker1 $worker2) + +# Array of all minus manager1 +allnomanager1=($manager2 $manager3 $worker1 $worker2) + +#Loadbalancer IP range +lbrange=192.168.3.60-192.168.3.80 + +#ssh certificate name variable +certName=id_rsa + +############################################# +# DO NOT EDIT BELOW # +############################################# +# For testing purposes - in case time is wrong due to VM snapshots +sudo timedatectl set-ntp off +sudo timedatectl set-ntp on + +# Move SSH certs to ~/.ssh and change permissions +cp /home/$user/{$certName,$certName.pub} /home/$user/.ssh +chmod 600 /home/$user/.ssh/$certName +chmod 644 /home/$user/.ssh/$certName.pub + +# Create SSH Config file to ignore checking (don't use in production!) +echo "StrictHostKeyChecking no" > ~/.ssh/config + +#add ssh keys for all nodes +for node in "${all[@]}"; do + ssh-copy-id $user@$node +done + +# Copy SSH keys to MN1 to copy tokens back later +scp -i /home/$user/.ssh/$certName /home/$user/$certName $user@$manager1:~/.ssh +scp -i /home/$user/.ssh/$certName /home/$user/$certName.pub $user@$manager1:~/.ssh + + +# Install dependencies for each node (Docker, GlusterFS) +for newnode in "${all[@]}"; do + ssh $user@$newnode -i ~/.ssh/$certName sudo su < /dev/null + apt-get update + NEEDRESTART_MODE=a apt install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin -y + NEEDRESTART_MODE=a apt install software-properties-common glusterfs-server -y + systemctl start glusterd + systemctl enable glusterd + mkdir -p /gluster/volume1 + exit +EOF + echo -e " \033[32;5m$newnode - Docker & GlusterFS installed!\033[0m" +done + +# Step 1: Create Swarm on first node +ssh -tt $user@$manager1 -i ~/.ssh/$certName sudo su < manager.txt +docker swarm join-token worker | sed -n 3p | grep -Po 'docker swarm join --token \\K[^\\s]*' > worker.txt +echo "StrictHostKeyChecking no" > ~/.ssh/config +ssh-copy-id -i /home/$user/.ssh/$certName $user@$admin +scp -i /home/$user/.ssh/$certName /home/$user/manager.txt $user@$admin:~/manager +scp -i /home/$user/.ssh/$certName /home/$user/worker.txt $user@$admin:~/worker +exit +EOF +echo -e " \033[32;5mManager1 Completed\033[0m" + +# Step 2: Set variables +managerToken=`cat manager` +workerToken=`cat worker` + + + +# Step 4: Connect additional worker +for newnode in "${workers[@]}"; do + ssh -tt $user@$newnode -i ~/.ssh/$certName sudo su <> /etc/fstab + mount.glusterfs localhost:/staging-gfs /mnt + chown -R root:docker /mnt + exit +EOF + echo -e " \033[32;5m$newnode - GlusterFS mounted on reboot\033[0m" +done + +# OPTIONAL # +# Step 7: Add Portainer +ssh -tt $user@$manager1 -i ~/.ssh/$certName sudo su <