mirror of
https://github.com/JamesTurland/JimsGarage.git
synced 2024-11-21 23:40:19 +00:00
update
This commit is contained in:
parent
0b56372246
commit
b644046889
3 changed files with 44 additions and 26 deletions
36
Docker-Swarm/portainer-agent-stack.yml
Normal file
36
Docker-Swarm/portainer-agent-stack.yml
Normal file
|
@ -0,0 +1,36 @@
|
|||
version: '3.2'
|
||||
|
||||
services:
|
||||
agent:
|
||||
image: portainer/agent:2.19.4
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- /var/lib/docker/volumes:/var/lib/docker/volumes
|
||||
networks:
|
||||
- agent_network
|
||||
deploy:
|
||||
mode: global
|
||||
placement:
|
||||
constraints: [node.platform.os == linux]
|
||||
|
||||
portainer:
|
||||
image: portainer/portainer-ce:2.19.4
|
||||
command: -H tcp://tasks.agent:9001 --tlsskipverify
|
||||
ports:
|
||||
- "9443:9443"
|
||||
- "9000:9000"
|
||||
- "8000:8000"
|
||||
volumes:
|
||||
- /mnt/Portainer:/data
|
||||
networks:
|
||||
- agent_network
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
placement:
|
||||
constraints: [node.role == manager]
|
||||
|
||||
networks:
|
||||
agent_network:
|
||||
driver: overlay
|
||||
attachable: true
|
|
@ -38,9 +38,6 @@ user=ubuntu
|
|||
# Interface used on remotes
|
||||
interface=eth0
|
||||
|
||||
# Set the virtual IP address (VIP)
|
||||
vip=192.168.3.50
|
||||
|
||||
# Array of all manager nodes
|
||||
allmanagers=($manager1 $manager2 $manager3)
|
||||
|
||||
|
@ -53,12 +50,6 @@ workers=($worker1 $worker2)
|
|||
# Array of all
|
||||
all=($manager1 $worker1 $worker2)
|
||||
|
||||
# Array of all minus manager1
|
||||
allnomanager1=($manager2 $manager3 $worker1 $worker2)
|
||||
|
||||
#Loadbalancer IP range
|
||||
lbrange=192.168.3.60-192.168.3.80
|
||||
|
||||
#ssh certificate name variable
|
||||
certName=id_rsa
|
||||
|
||||
|
@ -132,9 +123,7 @@ echo -e " \033[32;5mManager1 Completed\033[0m"
|
|||
managerToken=`cat manager`
|
||||
workerToken=`cat worker`
|
||||
|
||||
|
||||
|
||||
# Step 4: Connect additional worker
|
||||
# Step 3: Connect additional worker
|
||||
for newnode in "${workers[@]}"; do
|
||||
ssh -tt $user@$newnode -i ~/.ssh/$certName sudo su <<EOF
|
||||
docker swarm join \
|
||||
|
@ -145,7 +134,7 @@ EOF
|
|||
echo -e " \033[32;5m$newnode - Worker node joined successfully!\033[0m"
|
||||
done
|
||||
|
||||
# Step 5: Create GlusterFS Cluster across all nodes (connect to Manager1) - we will also label our nodes to restrict deployment of services to workers only
|
||||
# Step 4: Create GlusterFS Cluster across all nodes (connect to Manager1) - we will also label our nodes to restrict deployment of services to workers only
|
||||
ssh -tt $user@$manager1 -i ~/.ssh/$certName sudo su <<EOF
|
||||
gluster peer probe $manager1; gluster peer probe $worker1; gluster peer probe $worker2;
|
||||
gluster volume create staging-gfs replica 3 $manager1:/gluster/volume1 $worker1:/gluster/volume1 $worker2:/gluster/volume1 force
|
||||
|
@ -157,7 +146,7 @@ exit
|
|||
EOF
|
||||
echo -e " \033[32;5mGlusterFS created\033[0m"
|
||||
|
||||
# Step 6: Connect to all machines to ensure that GlusterFS mount restarts after boot
|
||||
# Step 5: Connect to all machines to ensure that GlusterFS mount restarts after boot
|
||||
for newnode in "${all[@]}"; do
|
||||
ssh $user@$newnode -i ~/.ssh/$certName sudo su <<EOF
|
||||
echo 'localhost:/staging-gfs /mnt glusterfs defaults,_netdev,backupvolfile-server=localhost 0 0' >> /etc/fstab
|
||||
|
@ -169,7 +158,7 @@ EOF
|
|||
done
|
||||
|
||||
# OPTIONAL #
|
||||
# Step 7: Add Portainer
|
||||
# Step 6: Add Portainer
|
||||
ssh -tt $user@$manager1 -i ~/.ssh/$certName sudo su <<EOF
|
||||
curl -L https://downloads.portainer.io/ce2-19/portainer-agent-stack.yml -o portainer-agent-stack.yml
|
||||
docker stack deploy -c portainer-agent-stack.yml portainer
|
||||
|
|
|
@ -38,14 +38,11 @@ user=ubuntu
|
|||
# Interface used on remotes
|
||||
interface=eth0
|
||||
|
||||
# Set the virtual IP address (VIP)
|
||||
vip=192.168.3.50
|
||||
|
||||
# Array of all manager nodes
|
||||
allmanagers=($manager1 $manager2 $manager3)
|
||||
|
||||
# Array of manager nodes
|
||||
managers=($manager2 $manager3)
|
||||
# Array of extra managers
|
||||
$managers=($manager2 $manager3)
|
||||
|
||||
# Array of worker nodes
|
||||
workers=($worker1 $worker2)
|
||||
|
@ -53,12 +50,6 @@ workers=($worker1 $worker2)
|
|||
# Array of all
|
||||
all=($manager1 $manager2 $manager3 $worker1 $worker2)
|
||||
|
||||
# Array of all minus manager1
|
||||
allnomanager1=($manager2 $manager3 $worker1 $worker2)
|
||||
|
||||
#Loadbalancer IP range
|
||||
lbrange=192.168.3.60-192.168.3.80
|
||||
|
||||
#ssh certificate name variable
|
||||
certName=id_rsa
|
||||
|
||||
|
@ -90,6 +81,8 @@ scp -i /home/$user/.ssh/$certName /home/$user/$certName.pub $user@$manager1:~/.s
|
|||
# Install dependencies for each node (Docker, GlusterFS)
|
||||
for newnode in "${all[@]}"; do
|
||||
ssh $user@$newnode -i ~/.ssh/$certName sudo su <<EOF
|
||||
iptables -F
|
||||
iptables -P INPUT ACCEPT
|
||||
# Add Docker's official GPG key:
|
||||
apt-get update
|
||||
NEEDRESTART_MODE=a apt install ca-certificates curl gnupg -y
|
||||
|
|
Loading…
Reference in a new issue