This commit is contained in:
James Turland 2024-01-11 21:29:24 +00:00
commit 34dfa32741
12 changed files with 534 additions and 31 deletions

View file

@ -0,0 +1,38 @@
version: '3.2'
services:
agent:
image: portainer/agent:2.19.4
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- /var/lib/docker/volumes:/var/lib/docker/volumes
networks:
- agent_network
deploy:
mode: global
placement:
constraints: [node.platform.os == linux]
portainer:
image: portainer/portainer-ce:2.19.4
command: -H tcp://tasks.agent:9001 --tlsskipverify
ports:
- "9443:9443"
- "9000:9000"
- "8000:8000"
volumes:
- type: bind
source: /mnt/Portainer
target: /data
networks:
- agent_network
deploy:
mode: replicated
replicas: 1
placement:
constraints: [node.role == manager]
networks:
agent_network:
driver: overlay
attachable: true

View file

@ -38,9 +38,6 @@ user=ubuntu
# Interface used on remotes
interface=eth0
# Set the virtual IP address (VIP)
vip=192.168.3.50
# Array of all manager nodes
allmanagers=($manager1 $manager2 $manager3)
@ -53,12 +50,6 @@ workers=($worker1 $worker2)
# Array of all
all=($manager1 $worker1 $worker2)
# Array of all minus manager1
allnomanager1=($manager2 $manager3 $worker1 $worker2)
#Loadbalancer IP range
lbrange=192.168.3.60-192.168.3.80
#ssh certificate name variable
certName=id_rsa
@ -132,9 +123,7 @@ echo -e " \033[32;5mManager1 Completed\033[0m"
managerToken=`cat manager`
workerToken=`cat worker`
# Step 4: Connect additional worker
# Step 3: Connect additional worker
for newnode in "${workers[@]}"; do
ssh -tt $user@$newnode -i ~/.ssh/$certName sudo su <<EOF
docker swarm join \
@ -145,7 +134,7 @@ EOF
echo -e " \033[32;5m$newnode - Worker node joined successfully!\033[0m"
done
# Step 5: Create GlusterFS Cluster across all nodes (connect to Manager1) - we will also label our nodes to restrict deployment of services to workers only
# Step 4: Create GlusterFS Cluster across all nodes (connect to Manager1) - we will also label our nodes to restrict deployment of services to workers only
ssh -tt $user@$manager1 -i ~/.ssh/$certName sudo su <<EOF
gluster peer probe $manager1; gluster peer probe $worker1; gluster peer probe $worker2;
gluster volume create staging-gfs replica 3 $manager1:/gluster/volume1 $worker1:/gluster/volume1 $worker2:/gluster/volume1 force
@ -157,7 +146,7 @@ exit
EOF
echo -e " \033[32;5mGlusterFS created\033[0m"
# Step 6: Connect to all machines to ensure that GlusterFS mount restarts after boot
# Step 5: Connect to all machines to ensure that GlusterFS mount restarts after boot
for newnode in "${all[@]}"; do
ssh $user@$newnode -i ~/.ssh/$certName sudo su <<EOF
echo 'localhost:/staging-gfs /mnt glusterfs defaults,_netdev,backupvolfile-server=localhost 0 0' >> /etc/fstab
@ -169,9 +158,10 @@ EOF
done
# OPTIONAL #
# Step 7: Add Portainer
# Step 6: Add Portainer
ssh -tt $user@$manager1 -i ~/.ssh/$certName sudo su <<EOF
curl -L https://downloads.portainer.io/ce2-19/portainer-agent-stack.yml -o portainer-agent-stack.yml
mkdir /mnt/Portainer
curl -L https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Docker-Swarm/portainer-agent-stack.yml -o portainer-agent-stack.yml
docker stack deploy -c portainer-agent-stack.yml portainer
docker node ls
docker service ls

View file

@ -38,13 +38,10 @@ user=ubuntu
# Interface used on remotes
interface=eth0
# Set the virtual IP address (VIP)
vip=192.168.3.50
# Array of all manager nodes
allmanagers=($manager1 $manager2 $manager3)
# Array of manager nodes
# Array of extra managers
managers=($manager2 $manager3)
# Array of worker nodes
@ -53,12 +50,6 @@ workers=($worker1 $worker2)
# Array of all
all=($manager1 $manager2 $manager3 $worker1 $worker2)
# Array of all minus manager1
allnomanager1=($manager2 $manager3 $worker1 $worker2)
#Loadbalancer IP range
lbrange=192.168.3.60-192.168.3.80
#ssh certificate name variable
certName=id_rsa
@ -90,6 +81,8 @@ scp -i /home/$user/.ssh/$certName /home/$user/$certName.pub $user@$manager1:~/.s
# Install dependencies for each node (Docker, GlusterFS)
for newnode in "${all[@]}"; do
ssh $user@$newnode -i ~/.ssh/$certName sudo su <<EOF
iptables -F
iptables -P INPUT ACCEPT
# Add Docker's official GPG key:
apt-get update
NEEDRESTART_MODE=a apt install ca-certificates curl gnupg -y
@ -178,7 +171,8 @@ done
# OPTIONAL #
# Step 7: Add Portainer
ssh -tt $user@$manager1 -i ~/.ssh/$certName sudo su <<EOF
curl -L https://downloads.portainer.io/ce2-19/portainer-agent-stack.yml -o portainer-agent-stack.yml
mkdir /mnt/Portainer
curl -L https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Docker-Swarm/portainer-agent-stack.yml -o portainer-agent-stack.yml
docker stack deploy -c portainer-agent-stack.yml portainer
docker node ls
docker service ls

View file

@ -0,0 +1,301 @@
#!/bin/bash
# for Debian this must be installed for Longhorn to work
# sudo apt-get install -y open-iscsi
###########################
# DEFAULT VALUES #
###########################
os_options=("Debian" "Ubuntu")
os="Debian"
# Proxmox path to the template folder
template_path="/var/lib/vz/template"
# Proxmox certificate path
cert_path="/root/.ssh"
# Number of VMs to be created
vm_number=3
# The first VM id, smallest id is 100
id=121
# Name prefix of the first VM
name=k3s
drive_name=local-zfs
agent=0 # TODO: Implement User Option for it
disk_size=20G
memory=2048
core=2
# IP for the first VM
ip=192.168.0.21
gateway=192.168.0.1
# ssh certificate name variable
cert_name=id_rsa
# User settings
user=$USER
password=password
ubuntu_url=https://cloud-images.ubuntu.com/lunar/current/lunar-server-cloudimg-amd64.img
ubuntu_filename=lunar-server-cloudimg-amd64.img
debian_url=https://cloud.debian.org/images/cloud/bookworm/latest/debian-12-genericcloud-amd64.qcow2
debian_filename=debian-12-genericcloud-amd64.qcow2
os_url=https://cloud.debian.org/images/cloud/bookworm/latest/debian-12-genericcloud-amd64.qcow2
os_filename=debian-12-genericcloud-amd64.qcow2
##################
# Functions #
##################
function run() {
get_user_variables
print_info # Prints information about what will be created based on defaults/user inputs
setup # Do not worry it asks for confirmation before the setup/installation starts
start_vms # You can choose to start all VMs if you want
#qemu_agent # Not implemented yet, you can choose to add qemu-agent to the installation image
}
function get_user_variables() {
echo -e -n "\e[36mWhich OS cloud image would you like to use?\n\e[0m"
PS3=""
select option in "${os_options[@]}"; do
# Check if the user selected an option
if [[ -n "$option" ]]; then
# Do something with the selected option
case $option in
"Debian") ;;
"Ubuntu") ;;
*)
echo -e "\e[31mInvalid option selected. Exiting...\e[0m"
exit
;;
esac
else
# No option was selected
echo -e "\e[31mNo option was selected. Exiting...\e[0m"
exit
fi
# Set the selected Operating system
os=$option
# Exit the select loop
break
done
echo -e "\e[36mHow many VM do you want to create? \e[0m"
read -e -p "" -i "$vm_number" vm_number
echo -e "\e[36mFirst VM ID? (minimum 100)\e[0m"
read -e -p "" -i $id id
echo -e "\e[36mVM name prefix? \e[0m"
read -e -p "" -i $name name
echo -e "\e[36mIP address? \e[0m"
read -e -p "" -i $ip ip
# Split the IP address into its parts using the '.' character as the delimiter.
ip_address_parts=(${ip//./ })
octet1=${ip_address_parts[0]}
octet2=${ip_address_parts[1]}
octet3=${ip_address_parts[2]}
octet4=${ip_address_parts[3]}
echo -e "\e[36mGateway? \e[0m"
read -e -p "" -i $gateway gateway
echo -e "\e[36mDisk Size? \e[0m"
read -e -p "" -i $disk_size disk_size
echo -e "\e[36mMemory Size? \e[0m"
read -e -p "" -i $memory memory
echo -e "\e[36mNumber of processor cores? \e[0m"
read -e -p "" -i $core core
echo -e "\e[36mUser name? \e[0m"
read -e -p "" -i $user user
echo -e "\e[36mUser password? \e[0m"
read -e -p "" -i $password password
echo -e "\e[36mCertification name? \e[0m"
read -e -p "" -i $cert_name cert_name
echo -e "\e[36mDrive name to store images? \e[0m"
read -e -p "" -i $drive_name drive_name
}
#
function qemu_agent() {
yesno=n
echo -e "\e[36mDo you want to add qemu agent to the VM images? (y/n) \e[0m"
read -e -p "" -i $yesno yesno
case $yesno in
[Yy]*)
# Install qemu agent packages for each VM
echo -e "\e[32mInstalling qemu agent packages.\e[0m"
for ((i = 1; i <= $vm_number; i++)); do
if [[ $i -le 9 ]]; then
idx="0$i"
else
idx=$i
fi
# TODO: ssh into all VMs one by one and intalll the necessary qemu agent packages
done
;;
[Nn]*)
echo -e "\e[33mSkipping qemu agent installation.\e[0m"
;;
*) ;;
esac
}
function print_info() {
echo -e "\e[36m\nThe following Virtual Machines will be created:\e[0m"
for ((i = 1; i <= $vm_number; i++)); do
if [[ $i -le 9 ]]; then
idx="0$i"
else
idx=$i
fi
echo -e "\e[32mVM ID: $(($id + $i - 1)), Name: $name-$idx, IP address: $octet1.$octet2.$octet3.$(($octet4 + $i - 1))\e[0m"
done
echo -e "\e[36m\nCommon VM parameters:\e[0m"
echo -e "\e[32mOS cloud image:\e[0m" "$os"
echo -e "\e[32mPublic Proxmox Certificate:\e[0m" "$cert_path/$cert_name.pub\n"
echo -e "\e[32mGateway:\e[0m" "$gateway"
echo -e "\e[32mDisk size:\e[0m" "$disk_size""B"
echo -e "\e[32mMemory size:\e[0m" "$memory""GB"
echo -e "\e[32mCPU cores:\e[0m" "$core"
echo -e "\e[32mDrive name:\e[0m" "$drive_name"
}
function setup() {
yesno=n
echo -e "\e[36mDo you want to proceed with the setup? (y/n) \e[0m"
read -e -p "" -i $yesno yesno
case $yesno in
[Yy]*)
get_os_image
create_vms
;;
[Nn]*)
echo -e "\e[31mInstallation aborted by user. No changes were made.\e[0m"
exit
;;
*) ;;
esac
}
function start_vms() {
yesno=n
echo -e "\e[36mDo you want to start up the Virtual Machines now? (y/n) \e[0m"
read -e -p "" -i $yesno yesno
case $yesno in
[Yy]*)
# Start VMs
for ((i = 1; i <= $vm_number; i++)); do
if [[ $i -le 9 ]]; then
idx="0$i"
else
idx=$i
fi
echo -e "\e[33mStarting Virtual Machine $idx\e[0m"
qm start $(($id + $i - 1))
done
# Print VMs statuses
for ((i = 1; i <= $vm_number; i++)); do
if [[ $i -le 9 ]]; then
idx="0$i"
else
idx=$i
fi
echo -e "\e[33mVirtual Machine $idx status: \e[0m"
qm status $(($id + $i - 1))
done
;;
[Nn]*)
exit
;;
*) ;;
esac
}
function get_os_image() {
case $os in
"Debian")
os_url=$debian_url
os_filename=$debian_filename
# Check if the directory exists.
if [ ! -d "$template_path/qcow" ]; then
mkdir $template_path/qcow
fi
cd $template_path/qcow
;;
"Ubuntu")
os_url=$ubuntu_url
os_filename=$ubuntu_filename
# Check if the directory exists.
if [ ! -d "$template_path/iso" ]; then
mkdir $template_path/iso
fi
cd $template_path/iso
;;
*)
echo -e "\e[31Invalid option.\e[0m"
;;
esac
# Check if the os image file already exists.
# If not then download it.
if [ ! -f "$os_filename" ]; then
# Download the selected os cloud image
echo -e "\e[33mDownloading $os cloud image ...\e[0m"
wget $os_url
fi
}
# Only runs if you uncomment the function in `create_vms`. Please be careful
function destroy_existing_vms() {
# Stop and destroy Virtual Machine if it already exists
# TODO: Put loop and confirmation before doing anything
qm stop $(($id + $i - 1))
qm destroy $(($id + $i - 1)) --destroy-unreferenced-disks --purge
}
function create_vms() {
for ((i = 1; i <= $vm_number; i++)); do
# Stop and destroy Virtual Machine if it already exists.
# Be really careful with this only uncomment if you know what are you doing. !!!
#
# destroy_existing_vms
#
# #############################
# Create VM from the cloud image
if [[ $i -le 9 ]]; then
idx="0$i"
else
idx=$i
fi
echo -e "\e[33mCreating Virtual Machine: $idx\e[0m"
echo "VM ID: $(($id + $i - 1)), Name: $name-$idx, IP address: $octet1.$octet2.$octet3.$(($octet4 + $i - 1))"
qm create $(($id + $i - 1)) \
--memory $memory \
--core $core \
--numa 1 \
--name $name-$idx \
--net0 virtio,bridge=vmbr0 \
--balloon 0 \
--ipconfig0 gw=$gateway,ip=$octet1.$octet2.$octet3.$(($octet4 + $i - 1))/24 \
--cipassword $password \
--ciuser $user \
--ciupgrade 1 \
--sshkeys $cert_path/$cert_name.pub \
--agent=$agent
qm importdisk $(($id + $i - 1)) $os_filename $drive_name
qm set $(($id + $i - 1)) --scsihw virtio-scsi-pci --scsi0 $drive_name:vm-$(($id + $i - 1))-disk-0
qm disk resize $(($id + $i - 1)) scsi0 $disk_size
qm set $(($id + $i - 1)) --ide2 $drive_name:cloudinit
qm set $(($id + $i - 1)) --boot c --bootdisk scsi0
qm set $(($id + $i - 1)) --serial0 socket --vga serial0
done
}
#########################
# Run the script #
#########################
run

View file

@ -0,0 +1,9 @@
# Simple script to create multiple Virtual Machines automatically
1. It will ask you some questions about your wished Virtual Machines.
- You can select Debian or Ubuntu image
2. Prints a detailed info with about the VMs going tyo be created.
3. Let you confirm if You want to continue
4. You can chose to start all VMs at the end 🚀
Enjoy 🙂

View file

@ -117,7 +117,7 @@ k3sup install \
--tls-san $vip \
--cluster \
--k3s-version $k3sVersion \
--k3s-extra-args "--disable traefik --disable servicelb --flannel-iface=$interface --node-ip=$master1" \
--k3s-extra-args "--disable traefik --disable servicelb --flannel-iface=$interface --node-ip=$master1 --node-taint node-role.kubernetes.io/master=true:NoSchedule" \
--merge \
--sudo \
--local-path $HOME/.kube/config \
@ -152,7 +152,7 @@ for newnode in "${masters[@]}"; do
--server \
--server-ip $master1 \
--ssh-key $HOME/.ssh/$certName \
--k3s-extra-args "--disable traefik --disable servicelb --flannel-iface=$interface --node-ip=$newnode" \
--k3s-extra-args "--disable traefik --disable servicelb --flannel-iface=$interface --node-ip=$newnode --node-taint node-role.kubernetes.io/master=true:NoSchedule" \
--server-user $user
echo -e " \033[32;5mMaster node joined successfully!\033[0m"
done

View file

@ -0,0 +1,59 @@
# Recommendations Before Upgrading
1. Snapshot / Backup your VMs!
2. Backup data and volumes if necessary
3. Drain nodes / scale down deployments
# Upgrade Rancher
```
helm upgrade rancher rancher-latest/rancher \
--namespace cattle-system \
--set hostname=rancher.my.org \
```
# Upgrade RKE2 (Each node, not Admin!)
```
sudo curl -sfL https://get.rke2.io | INSTALL_RKE2_CHANNEL=latest sh -
```
then servers:
```
sudo systemctl restart rke2-server
```
or agents
```
sudo systemctl restart rke2-agent
```
# Upgrade K3S (Each node, not Admin!)
```
sudo curl -sfL https://get.k3s.io | INSTALL_K3S_CHANNEL=latest <EXISTING_K3S_ENV> sh -s - <EXISTING_K3S_ARGS>
```
then servers:
```
sudo systemctl restart k3s
```
or agents
```
sudo systemctl restart k3s-agent
```
# Upgrade Longhorn
```
kubectl apply -f https://raw.githubusercontent.com/longhorn/longhorn/v1.5.3/deploy/longhorn.yaml
```
# Upgrade Metallb
1. Change version on the delete command to the version you are currently running (e.g., v0.13.11)
2. Change version on the apply to the new version (e.g., v0.13.12)
3. Ensure your Lbrange is still the one you want (check ipAddressPool.yaml)
```
kubectl delete -f https://raw.githubusercontent.com/metallb/metallb/v0.13.11/config/manifests/metallb-native.yaml
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.12/config/manifests/metallb-native.yaml
kubectl apply -f ipAddressPool.yaml
kubectl apply -f https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/RKE2/l2Advertisement.yaml
```
# Upgrade Kube-VIP
1. Delete the daemonset in Rancher or use kubectl delete
2. Redeploy the daemonset with updated values (check kube-vip file)
```
kubectl delete -f kube-vip
kubectl apply -f kube-vip
```

44
Ollama/docker-compose.yml Normal file
View file

@ -0,0 +1,44 @@
version: '3.6'
services:
ollama:
# Uncomment below for GPU support
# deploy:
# resources:
# reservations:
# devices:
# - driver: nvidia
# count: 1
# capabilities:
# - gpu
volumes:
- ollama:/root/.ollama
# Uncomment below to expose Ollama API outside the container stack
# ports:
# - 11434:11434
container_name: ollama
pull_policy: always
tty: true
restart: unless-stopped
image: ollama/ollama:latest
ollama-webui:
build:
context: .
args:
OLLAMA_API_BASE_URL: '/ollama/api'
dockerfile: Dockerfile
image: ollama-webui:latest
container_name: ollama-webui
depends_on:
- ollama
ports:
- 3000:8080
environment:
- "OLLAMA_API_BASE_URL=http://ollama:11434/api"
extra_hosts:
- host.docker.internal:host-gateway
restart: unless-stopped
volumes:
ollama: {}

5
Ollama/readme.md Normal file
View file

@ -0,0 +1,5 @@
1. Clone the repo from: https://github.com/ollama-webui/ollama-webui
2. Tweak the docker-compose to your liking
3. Run the container: sudo docker compose up -d
Let it build :)

View file

@ -41,8 +41,7 @@ services:
environment:
TZ: 'Europe/London'
WEBPASSWORD: 'password'
DNS1: '172.70.9.2#5053'
DNS2: 'no'
PIHOLE_DNS_: '172.70.9.2#5053'
DNSMASQ_LISTENING: 'all'
VIRTUAL_HOST: pihole.yourdomain.com
# Volumes store your data between container upgrades

View file

@ -0,0 +1,62 @@
---
version: "2.1"
services:
unifi-network-application:
image: lscr.io/linuxserver/unifi-network-application:latest
container_name: unifi-network-application
environment:
- PUID=1000
- PGID=1000
- TZ=Etc/UTC
- MONGO_USER=unifi
- MONGO_PASS=5nHgg3G0cH9d
- MONGO_HOST=unifi-db
- MONGO_PORT=27017
- MONGO_DBNAME=unifi
- MEM_LIMIT=1024 #optional
- MEM_STARTUP=1024 #optional
# - MONGO_TLS= #optional
# - MONGO_AUTHSOURCE= #optional
volumes:
- /home/ubuntu/docker/unifi-controller:/config
ports:
- 8443:8443
- 3478:3478/udp
- 10001:10001/udp
- 8080:8080
- 1900:1900/udp #optional
- 8843:8843 #optional
- 8880:8880 #optional
- 6789:6789 #optional
- 5514:5514/udp #optional
labels:
- "traefik.enable=true"
- "traefik.http.routers.unifi.entrypoints=http"
- "traefik.http.routers.unifi.rule=Host(`unifi.jimsgarage.co.uk`)"
- "traefik.http.middlewares.unifi-https-redirect.redirectscheme.scheme=https"
- "traefik.http.routers.unifi.middlewares=unifi-https-redirect"
- "traefik.http.routers.unifi-secure.entrypoints=https"
- "traefik.http.routers.unifi-secure.rule=Host(`unifi.jimsgarage.co.uk`)"
- "traefik.http.routers.unifi-secure.tls=true"
- "traefik.http.routers.unifi-secure.service=unifi"
- "traefik.http.services.unifi.loadbalancer.server.port=8443"
- "traefik.http.services.unifi.loadbalancer.server.scheme=https"
- "traefik.docker.network=proxy"
networks:
proxy:
unifi:
restart: unless-stopped
unifi-db:
image: docker.io/mongo:4.4
container_name: unifi-db
volumes:
- /home/ubuntu/docker/unifi-controller-db:/data/db
- /home/ubuntu/docker-compose/unifi-controller/init-mongo.js:/docker-entrypoint-initdb.d/init-mongo.js:ro
networks:
unifi:
restart: unless-stopped
networks:
proxy:
external: true
unifi:

View file

@ -0,0 +1,2 @@
db.getSiblingDB("unifi").createUser({user: "unifi", pwd: "5nHgg3G0cH9d", roles: [{role: "dbOwner", db: "unifi"}]});
db.getSiblingDB("unifi_stat").createUser({user: "unifi", pwd: "5nHgg3G0cH9d", roles: [{role: "dbOwner", db: "unifi_stat"}]});