Compare commits
156 commits
2.1.15-rc1
...
main
Author | SHA1 | Date | |
---|---|---|---|
![]() |
3e70be3394 | ||
![]() |
37e5caf1fe | ||
![]() |
0ea418212e | ||
![]() |
370cbc6393 | ||
![]() |
7bc27c191e | ||
![]() |
381c3f8999 | ||
![]() |
625edd42b5 | ||
![]() |
600600f3e2 | ||
![]() |
f2a700a07c | ||
![]() |
75e3687a56 | ||
![]() |
ac24903f76 | ||
![]() |
eb29dad1aa | ||
![]() |
e4eda2a164 | ||
![]() |
e6787fcb0b | ||
![]() |
35a8cb008d | ||
![]() |
196aa685a9 | ||
![]() |
3cc0b8d933 | ||
![]() |
978c34242a | ||
![]() |
0e2831b878 | ||
![]() |
a461d448f3 | ||
![]() |
5908ac8d4b | ||
![]() |
542be67a7d | ||
![]() |
91689ef278 | ||
![]() |
4ac3f9606a | ||
![]() |
e8191b0550 | ||
![]() |
e5f13eb27c | ||
![]() |
63c8d71f91 | ||
![]() |
98eae4bb57 | ||
![]() |
9124abee7f | ||
![]() |
70575b5cb0 | ||
![]() |
95c6b9af0b | ||
![]() |
f4af397b02 | ||
![]() |
630d607fbd | ||
![]() |
28987a5371 | ||
![]() |
30c8f86aa9 | ||
![]() |
22668468e8 | ||
![]() |
61c80261de | ||
![]() |
9590abd6ec | ||
![]() |
13b331815b | ||
![]() |
1416a1297c | ||
![]() |
967d81ffdf | ||
![]() |
5ed7455276 | ||
![]() |
a98411e5dd | ||
![]() |
607a06a6a9 | ||
![]() |
7fdf7ba3f1 | ||
![]() |
d279af60c4 | ||
![]() |
88a4d59efe | ||
![]() |
c9ef887b4d | ||
![]() |
e6bdb09f8f | ||
![]() |
e33c4e01e0 | ||
![]() |
4d3dd86ed1 | ||
![]() |
010207e2f9 | ||
![]() |
cdfddb7062 | ||
![]() |
ff9bb79330 | ||
![]() |
32b36364e8 | ||
![]() |
15be682790 | ||
![]() |
4b764a9efe | ||
![]() |
bb6cec769d | ||
![]() |
7289061523 | ||
![]() |
e2a40c2ce2 | ||
![]() |
93d2ea6593 | ||
![]() |
169cdfab48 | ||
![]() |
26059e70b9 | ||
![]() |
d81300fa97 | ||
![]() |
da45cd1734 | ||
![]() |
f2c6c07bcb | ||
![]() |
abd13ebc09 | ||
![]() |
4e7e8fcd13 | ||
![]() |
365c1c0df2 | ||
![]() |
db594448ea | ||
![]() |
7664e19ade | ||
![]() |
62e1815759 | ||
![]() |
938433c1f2 | ||
![]() |
a7f29711ab | ||
![]() |
f0dbd6e61b | ||
![]() |
b89fd4196e | ||
![]() |
d270b4e02c | ||
![]() |
34e1c42a8a | ||
![]() |
c3d8435b03 | ||
![]() |
35057af946 | ||
![]() |
aaeb88a1cf | ||
![]() |
3771aff40f | ||
![]() |
8783f0dedc | ||
![]() |
a076b23dea | ||
![]() |
ca1281393d | ||
![]() |
6e00523c93 | ||
![]() |
aa909e0987 | ||
![]() |
32c5ad72c1 | ||
![]() |
f127b6089f | ||
![]() |
b4b3d46112 | ||
![]() |
49edc43bc9 | ||
![]() |
3f5dcdce61 | ||
![]() |
acb8a831d3 | ||
![]() |
5528e054cd | ||
![]() |
0220641c0c | ||
![]() |
2f7e4bf546 | ||
![]() |
2d9362a916 | ||
![]() |
c905d1d078 | ||
![]() |
4e0aa3adf1 | ||
![]() |
5828170473 | ||
![]() |
e9a9fb440b | ||
![]() |
fec0cb8dbd | ||
![]() |
7d44dda04b | ||
![]() |
9b70ebeffb | ||
![]() |
24908db799 | ||
![]() |
9e6a0b9007 | ||
![]() |
059ea4fa3e | ||
![]() |
31ff31f873 | ||
![]() |
1f274842ab | ||
![]() |
2805445628 | ||
![]() |
750ed0d219 | ||
![]() |
6a6c34be0b | ||
![]() |
c401ec7304 | ||
![]() |
4458caf20b | ||
![]() |
f4c51a7218 | ||
![]() |
4649a8dfab | ||
![]() |
c2e8434a11 | ||
![]() |
1776fbbd8a | ||
![]() |
162497748a | ||
![]() |
adedc027df | ||
![]() |
e716b2ced6 | ||
![]() |
8bf861e52f | ||
![]() |
84ca12d507 | ||
![]() |
35bb2d93f6 | ||
![]() |
e5a264556c | ||
![]() |
c07d492543 | ||
![]() |
fa95f84b69 | ||
![]() |
ff2bf71946 | ||
![]() |
bcb76af3b9 | ||
![]() |
050594159b | ||
![]() |
77db8bafe7 | ||
![]() |
1c93d07940 | ||
![]() |
2f5f3134bd | ||
![]() |
64d065af6e | ||
![]() |
4cde39a975 | ||
![]() |
c112c13768 | ||
![]() |
4932058283 | ||
![]() |
31eb1c0326 | ||
![]() |
46989e7b08 | ||
![]() |
e3f84b2dd0 | ||
![]() |
0a78f5d6d2 | ||
![]() |
28bd2c8cc6 | ||
![]() |
e99496a3f3 | ||
![]() |
fb62d50a42 | ||
![]() |
813c926323 | ||
![]() |
b755df59ed | ||
![]() |
d1aca3c0b8 | ||
![]() |
a36d172e4a | ||
![]() |
d8a45cbc3f | ||
![]() |
95b13a067b | ||
![]() |
6bb26d1508 | ||
![]() |
a1155bcefd | ||
![]() |
cf9b2298a2 | ||
![]() |
29fb9d6f09 | ||
![]() |
cfb8c792bb | ||
![]() |
aebfa0421c |
32 changed files with 867 additions and 449 deletions
43
.github/workflows/docker.yml
vendored
43
.github/workflows/docker.yml
vendored
|
@ -11,7 +11,7 @@ name: Build image to DockerHub
|
|||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
branches: [main, dev] # Include dev branch
|
||||
paths:
|
||||
- "docker/*/Dockerfile"
|
||||
|
||||
|
@ -38,8 +38,7 @@ jobs:
|
|||
needs: setup
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
# You can set it to choice where download from
|
||||
MEDIA_FROM: "source"
|
||||
MEDIA_FROM: "source" # You can set it to choice where download from
|
||||
strategy:
|
||||
matrix: ${{fromJson(needs.setup.outputs.matrix)}}
|
||||
steps:
|
||||
|
@ -53,23 +52,31 @@ jobs:
|
|||
APP=${{ matrix.app }}
|
||||
TAG=$(grep 'LABEL version' "docker/$APP/Dockerfile" | cut -d'"' -f2 | xargs)
|
||||
echo $APP version is $TAG
|
||||
if [[ "$TAG" == *"-"* ]]; then
|
||||
TAGS="$TAG"
|
||||
|
||||
# Determine the channel based on the branch and TAG
|
||||
if [[ $GITHUB_REF == *"refs/heads/dev"* ]]; then
|
||||
echo "CHANNEL=dev" >> $GITHUB_ENV
|
||||
TAGS="$TAG" # Use the TAG directly for dev
|
||||
else
|
||||
echo "CHANNEL=release" >> $GITHUB_ENV
|
||||
IFS='.' read -ra PARTS <<< "$TAG"
|
||||
TAGS="latest"
|
||||
TAG_PART=""
|
||||
for i in "${!PARTS[@]}"; do
|
||||
if [ "$i" -eq 0 ]; then
|
||||
TAG_PART="${PARTS[$i]}"
|
||||
else
|
||||
TAG_PART="${TAG_PART}.${PARTS[$i]}"
|
||||
fi
|
||||
TAGS="${TAGS},${TAG_PART}"
|
||||
done
|
||||
if [[ "$TAG" == *"-"* ]]; then
|
||||
echo "CHANNEL=rc" >> $GITHUB_ENV
|
||||
TAGS="$TAG"
|
||||
else
|
||||
echo "CHANNEL=release" >> $GITHUB_ENV
|
||||
IFS='.' read -ra PARTS <<< "$TAG"
|
||||
TAGS="latest"
|
||||
TAG_PART=""
|
||||
for i in "${!PARTS[@]}"; do
|
||||
if [ "$i" -eq 0 ]; then
|
||||
TAG_PART="${PARTS[$i]}"
|
||||
else
|
||||
TAG_PART="${TAG_PART}.${PARTS[$i]}"
|
||||
fi
|
||||
TAGS="${TAGS},${TAG_PART}"
|
||||
done
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "Building and pushing Docker image for $APP with tags: $TAGS"
|
||||
echo "TAGS=$TAGS" >> $GITHUB_ENV
|
||||
echo "APP=$APP" >> $GITHUB_ENV
|
||||
|
@ -159,4 +166,4 @@ jobs:
|
|||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
repository: websoft9dev/${{env.APP}}
|
||||
readme-filepath: ${{env.README}}
|
||||
if: needs.setup.outputs.matrix != ''
|
||||
if: needs.setup.outputs.matrix != ''
|
||||
|
|
11
.github/workflows/release.yml
vendored
11
.github/workflows/release.yml
vendored
|
@ -5,6 +5,7 @@ on:
|
|||
push:
|
||||
branches:
|
||||
- main
|
||||
- dev
|
||||
paths:
|
||||
- "version.json"
|
||||
|
||||
|
@ -24,9 +25,13 @@ jobs:
|
|||
version_core=${version%%-*}
|
||||
echo "VERSION=$version" >> $GITHUB_OUTPUT
|
||||
echo "VERSION_CORE=$version_core" >> $GITHUB_OUTPUT
|
||||
if [[ $version == *-* ]]; then
|
||||
echo "rc release version"
|
||||
|
||||
if [[ $GITHUB_REF == *"refs/heads/dev"* ]]; then
|
||||
echo "dev branch detected"
|
||||
echo "CHANNEL=dev" >> $GITHUB_OUTPUT
|
||||
elif [[ $version == *-* ]]; then
|
||||
echo "rc release version"
|
||||
echo "CHANNEL=rc" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "release version"
|
||||
echo "CHANNEL=release" >> $GITHUB_OUTPUT
|
||||
|
@ -71,6 +76,7 @@ jobs:
|
|||
destination-dir: ./${{ steps.convert_version.outputs.CHANNEL }}/websoft9
|
||||
|
||||
- name: Create Github Release
|
||||
if: github.ref == 'refs/heads/main' # 仅在 main 分支上触发
|
||||
uses: softprops/action-gh-release@v1
|
||||
with:
|
||||
files: |
|
||||
|
@ -84,6 +90,7 @@ jobs:
|
|||
|
||||
pages:
|
||||
name: Build Github Pages
|
||||
if: github.ref == 'refs/heads/main' # 仅在 main 分支上触发
|
||||
permissions:
|
||||
contents: read
|
||||
pages: write
|
||||
|
|
12
README.md
12
README.md
|
@ -1,7 +1,16 @@
|
|||

|
||||
|
||||
[](http://www.gnu.org/licenses/gpl-3.0)
|
||||
[](https://github.com/websoft9/websoft9)
|
||||
[](https://github.com/websoft9/websoft9)
|
||||
[](https://github.com/websoft9/websoft9)
|
||||
[](https://github.com/websoft9/websoft9)
|
||||
|
||||
**Certified Deployment on Major Cloud Platforms with business support**
|
||||
| [](https://azuremarketplace.microsoft.com/en-us/marketplace/apps/websoft9inc.websoft9) | [](https://aws.amazon.com/marketplace/pp/prodview-5jziwpvx4puq4) | [](https://marketplace.alibabacloud.com/products/201072001/sgcmjj00034378.html) | [](https://marketplace.huaweicloud.com/intl/contents/bf4480ae-d0af-422c-b246-e2ec67743f4e) |
|
||||
| ---- | ---- | ---- | ---- |
|
||||
| [](https://market.aliyun.com/products/53690006/cmjj00048735.html?userCode=yetrmi9y) | [](https://marketplace.huaweicloud.com/contents/29458a42-64b7-4637-aa7c-8bfddea1fb72#productid=OFFI1005787756558913536) | | |
|
||||
|
||||
|
||||
|
||||
# What is Websoft9?
|
||||
|
||||
|
@ -53,7 +62,6 @@ Need root privileges user to install Websoft9, if you use no-root user you can `
|
|||
# Install by default
|
||||
wget -O install.sh https://websoft9.github.io/websoft9/install/install.sh && bash install.sh
|
||||
|
||||
|
||||
# Install Websoft9 with parameters
|
||||
wget -O install.sh https://websoft9.github.io/websoft9/install/install.sh && bash install.sh --port 9000 --channel release --path "/data/websoft9/source" --version "latest"
|
||||
```
|
||||
|
|
|
@ -1,26 +1,26 @@
|
|||
[nginx_proxy_manager]
|
||||
base_url = http://websoft9-proxy:81/api
|
||||
user_name = admin@mydomain.com
|
||||
user_pwd = 9KCRwcL10saLCGaZ
|
||||
user_pwd = LMFuCnajkQhK3zeb
|
||||
nike_name = admin
|
||||
listen_port = 80
|
||||
listen_port = 443
|
||||
|
||||
[gitea]
|
||||
base_url = http://websoft9-git:3000/api/v1
|
||||
user_name = websoft9
|
||||
user_email = admin@mydomain.com
|
||||
user_pwd = kk95qed0Fxt8
|
||||
user_pwd = 93HDu6tUWeGx
|
||||
|
||||
[portainer]
|
||||
base_url = http://websoft9-deployment:9000/api
|
||||
user_name = admin
|
||||
user_pwd = FptfbwA1TpUr
|
||||
user_pwd = 93sX)LLHKJY$
|
||||
|
||||
[api_key]
|
||||
key = cc9223b3055471a6f4f9654e08371816a9637ba1c57383617b0684b92ac7b2f4
|
||||
|
||||
[domain]
|
||||
wildcard_domain =
|
||||
wildcard_domain =
|
||||
|
||||
[cockpit]
|
||||
port = 9000
|
||||
|
|
10
apphub/src/external/portainer_api.py
vendored
10
apphub/src/external/portainer_api.py
vendored
|
@ -1,3 +1,4 @@
|
|||
from datetime import datetime
|
||||
import json
|
||||
import threading
|
||||
|
||||
|
@ -215,6 +216,10 @@ class PortainerAPI:
|
|||
"repositoryAuthentication": True,
|
||||
"RepositoryUsername": usr_name,
|
||||
"RepositoryPassword": usr_password,
|
||||
"env":[{
|
||||
"name": "DEPLOY_TIME",
|
||||
"value": "-"+datetime.now().strftime("%Y%m%d%H%M%S")
|
||||
}]
|
||||
},
|
||||
)
|
||||
|
||||
|
@ -372,7 +377,10 @@ class PortainerAPI:
|
|||
path=f"stacks/{stackID}/git/redeploy",
|
||||
params={"endpointId": endpointId},
|
||||
json={
|
||||
"env":[],
|
||||
"env":[{
|
||||
"name": "DEPLOY_TIME",
|
||||
"value": "-"+datetime.now().strftime("%Y%m%d%H%M%S")
|
||||
}],
|
||||
"prune":False,
|
||||
"RepositoryReferenceName":"",
|
||||
"RepositoryAuthentication":True,
|
||||
|
|
|
@ -429,15 +429,17 @@ class AppManger:
|
|||
|
||||
# Verify the app is web app
|
||||
is_web_app = envHelper.get_value("W9_URL")
|
||||
url_with_port = envHelper.get_value("W9_URL_WITH_PORT")
|
||||
# url_with_port = envHelper.get_value("W9_URL_WITH_PORT")
|
||||
w9_url_with_replace = envHelper.get_value("W9_URL_REPLACE")
|
||||
|
||||
if is_web_app is not None:
|
||||
if url_with_port is None:
|
||||
if w9_url_with_replace is None:
|
||||
envHelper.set_value("W9_URL", domain_names[0])
|
||||
else:
|
||||
try:
|
||||
ipaddress.ip_address(domain_names[0])
|
||||
envHelper.set_value("W9_URL", domain_names[0] + ":" + envHelper.get_value("W9_HTTP_PORT_SET"))
|
||||
#envHelper.set_value("W9_URL", domain_names[0] + ":" + envHelper.get_value("W9_HTTP_PORT_SET"))
|
||||
envHelper.set_value("W9_URL", domain_names[0] + ":" + (envHelper.get_value("W9_HTTP_PORT_SET") or envHelper.get_value("W9_HTTPS_PORT_SET")))
|
||||
except ValueError:
|
||||
envHelper.set_value("W9_URL", domain_names[0])
|
||||
|
||||
|
@ -599,6 +601,24 @@ class AppManger:
|
|||
portainerManager.redeploy_stack(stack_id,endpointId,pull_image,user_name,user_pwd)
|
||||
logger.access(f"Redeployed app: [{app_id}]")
|
||||
|
||||
app_info = self.get_app_by_id(app_id,endpointId)
|
||||
forward_ports = [domain.get("forward_port") for domain in app_info.domain_names]
|
||||
|
||||
proxy_ids = [domain.get("id") for domain in app_info.domain_names]
|
||||
|
||||
if forward_ports:
|
||||
http_port = app_info.env.get("W9_HTTP_PORT")
|
||||
https_port = app_info.env.get("W9_HTTPS_PORT")
|
||||
|
||||
forward_port = http_port if http_port else https_port
|
||||
|
||||
forward_ports_str = [str(port) for port in forward_ports]
|
||||
|
||||
if not all(port == forward_port for port in forward_ports_str):
|
||||
for proxy_id in proxy_ids:
|
||||
ProxyManager().update_proxy_port_by_app(proxy_id, forward_port)
|
||||
logger.access(f"Updated proxy port: {forward_port} for app: {app_id}")
|
||||
|
||||
def uninstall_app(self,app_id:str,purge_data:bool,endpointId:int = None):
|
||||
"""
|
||||
Uninstall app
|
||||
|
@ -1082,7 +1102,7 @@ class AppManger:
|
|||
# Get the w9_url and w9_url_replace
|
||||
w9_url_replace = next((element.get("w9_url_replace") for element in app_info.domain_names if element.get("id") == proxy_id), None)
|
||||
w9_url = next((element.get("w9_url") for element in app_info.domain_names if element.get("id") == proxy_id), None)
|
||||
|
||||
|
||||
# validate w9_url_replace is true
|
||||
if w9_url_replace:
|
||||
domain_names = host.get("domain_names",None)
|
||||
|
@ -1093,7 +1113,8 @@ class AppManger:
|
|||
if w9_url in domain_names:
|
||||
new_w9_url = None
|
||||
if len(app_proxys) == 1 and app_proxys[0].get("id") == proxy_id:
|
||||
new_w9_url = client_host
|
||||
# 如果w9_url_with_port存在,并且值为: true
|
||||
new_w9_url = client_host+":"+ (app_info.env.get("W9_HTTP_PORT_SET") or app_info.env.get("W9_HTTPS_PORT_SET"))
|
||||
elif len(app_proxys) > 1:
|
||||
# Get the first proxy_host
|
||||
proxy_host = next((proxy for proxy in app_proxys if proxy.get("id") != proxy_id), None)
|
||||
|
@ -1228,4 +1249,4 @@ class AppManger:
|
|||
raise e
|
||||
except Exception as e:
|
||||
logger.error(f"Update the git repo env file error:{e}")
|
||||
raise CustomException()
|
||||
raise CustomException()
|
||||
|
|
|
@ -205,6 +205,44 @@ class ProxyManager:
|
|||
logger.error(f"Update proxy host:{proxy_id} error:{e}")
|
||||
raise CustomException()
|
||||
|
||||
def update_proxy_port_by_app(self, proxy_id: int, forward_port: int):
|
||||
"""
|
||||
Update a proxy host's forward port
|
||||
|
||||
Args:
|
||||
proxy_id (int): Proxy id
|
||||
forward_port (int): Forward port
|
||||
|
||||
Returns:
|
||||
dict: Proxy host
|
||||
"""
|
||||
# Get proxy host by id
|
||||
req_json = self.get_proxy_host_by_id(proxy_id)
|
||||
try:
|
||||
if req_json is None:
|
||||
raise CustomException(
|
||||
status_code=400,
|
||||
message=f"Invalid Request",
|
||||
details=f"Proxy host:{proxy_id} not found"
|
||||
)
|
||||
# update forward_port
|
||||
req_json["forward_port"] = forward_port
|
||||
# delete useless keys from req_json(because the req_json is from get_proxy_host_by_id and update_proxy_host need less keys)
|
||||
keys_to_delete = ["id", "created_on", "modified_on", "owner_user_id", "enabled", "certificate", "owner", "access_list", "use_default_location", "ipv6"]
|
||||
for key in keys_to_delete:
|
||||
req_json.pop(key, None)
|
||||
|
||||
response = self.nginx.update_proxy_host(proxy_id=proxy_id, json=req_json)
|
||||
if response.status_code == 200:
|
||||
return response.json()
|
||||
else:
|
||||
self._handler_nginx_error(response)
|
||||
except CustomException as e:
|
||||
raise e
|
||||
except Exception as e:
|
||||
logger.error(f"Update proxy host:{proxy_id} error:{e}")
|
||||
raise CustomException()
|
||||
|
||||
def get_proxy_host_by_app(self,app_id:str):
|
||||
"""
|
||||
Get proxy host by app
|
||||
|
@ -295,4 +333,4 @@ class ProxyManager:
|
|||
except Exception as e:
|
||||
logger.error(f"Get proxy host by id:{proxy_id} error:{e}")
|
||||
raise CustomException()
|
||||
|
||||
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
### enhancement:
|
||||
- fix update Websoft9 at install.sh #539
|
||||
- Build and push js file to CloudFlare worker #538
|
||||
- Create dockerhub proxy URL based on Cloudflare #536
|
||||
- add Dockerfile build function for apphub #534
|
||||
- Upgrade access to the Websoft9 platform from HTTP to HTTPS #566
|
||||
- cockpit upgrade,port change to 9090 when linux upgrage #556
|
||||
- upgrade error at CentOS Stream #554
|
||||
- Optimize install_docker.sh #547
|
||||
- Nginxproxymanage add named volume nginx_custom #527
|
||||
- Nginx how to dynamically apply proxy #516
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# docs: https://cockpit-project.org/guide/latest/cockpit.conf.5.html
|
||||
|
||||
[WebService]
|
||||
AllowUnencrypted = true
|
||||
#AllowUnencrypted = true
|
||||
LoginTitle= Websoft9 - Linux AppStore
|
|
@ -1,4 +1,4 @@
|
|||
APPHUB_VERSION=0.1.5-rc1
|
||||
APPHUB_VERSION=0.1.9
|
||||
DEPLOYMENT_VERSION=2.20.3
|
||||
GIT_VERSION=1.21.9
|
||||
PROXY_VERSION=2.11.3
|
||||
PROXY_VERSION=2.11.3
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
# This file can running at actions
|
||||
# MEDIA_VERSION and LIBRARY_VERSION will trigger its release
|
||||
# modify time: 202410291513, you can modify here to trigger Docker Build action
|
||||
# modify time: 202501021450, you can modify here to trigger Docker Build action
|
||||
|
||||
|
||||
FROM python:3.10-slim-bullseye
|
||||
LABEL maintainer="Websoft9<help@websoft9.com>"
|
||||
LABEL version="0.1.5-rc1"
|
||||
LABEL version="0.1.9"
|
||||
|
||||
WORKDIR /websoft9
|
||||
|
||||
|
@ -39,6 +39,7 @@ RUN apt update && apt install -y --no-install-recommends curl git jq cron iprout
|
|||
cp -r ./w9source/apphub/src/config ./config && \
|
||||
cp -r ./w9source/docker/apphub/script ./script && \
|
||||
curl -o ./script/update_zip.sh $SOURCE_GITHUB_PAGES/scripts/update_zip.sh && \
|
||||
curl -o /websoft9/version.json $SOURCE_GITHUB_PAGES/version.json && \
|
||||
pip install --no-cache-dir --upgrade -r apphub/requirements.txt && \
|
||||
pip install -e ./apphub && \
|
||||
# Clean cache and install files
|
||||
|
@ -46,6 +47,9 @@ RUN apt update && apt install -y --no-install-recommends curl git jq cron iprout
|
|||
apt clean && \
|
||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* /usr/share/man /usr/share/doc /usr/share/doc-base
|
||||
|
||||
# Create a file named migration_flag
|
||||
RUN touch /websoft9/migration_flag
|
||||
|
||||
# supervisor
|
||||
COPY config/supervisord.conf /etc/supervisor/conf.d/supervisord.conf
|
||||
COPY config/logging_config.yaml /etc/supervisor/conf.d/logging_config.yaml
|
||||
|
|
|
@ -6,7 +6,11 @@ export PATH
|
|||
|
||||
set -e
|
||||
|
||||
bash /websoft9/script/migration.sh
|
||||
# execute migration script when container create
|
||||
if [ -f /websoft9/migration_flag ]; then
|
||||
bash /websoft9/script/migration.sh
|
||||
rm -f /websoft9/migration_flag
|
||||
fi
|
||||
|
||||
try_times=5
|
||||
supervisord
|
||||
|
|
|
@ -3,10 +3,9 @@
|
|||
echo "$(date '+%Y-%m-%d %H:%M:%S') - INFO - start to migrate config.ini"
|
||||
|
||||
migrate_ini() {
|
||||
|
||||
# Define file paths, use template ini and syn exsit items from target ini
|
||||
# Combine source_ini to target ini
|
||||
export target_ini="$1"
|
||||
export template_ini="$2"
|
||||
export source_ini="$2"
|
||||
|
||||
python3 - <<EOF
|
||||
import configparser
|
||||
|
@ -14,36 +13,44 @@ import os
|
|||
import sys
|
||||
|
||||
target_ini = os.environ['target_ini']
|
||||
template_ini = os.environ['template_ini']
|
||||
source_ini = os.environ['source_ini']
|
||||
|
||||
# Create two config parsers
|
||||
target_parser = configparser.ConfigParser()
|
||||
template_parser = configparser.ConfigParser()
|
||||
source_parser = configparser.ConfigParser()
|
||||
|
||||
try:
|
||||
|
||||
target_parser.read(target_ini)
|
||||
template_parser.read(template_ini)
|
||||
source_parser.read(source_ini)
|
||||
except configparser.MissingSectionHeaderError:
|
||||
print("Error: The provided files are not valid INI files.")
|
||||
sys.exit(1)
|
||||
|
||||
# use target_parser to override template_parser
|
||||
# use target_parser to override source_parser
|
||||
for section in target_parser.sections():
|
||||
if template_parser.has_section(section):
|
||||
if source_parser.has_section(section):
|
||||
for key, value in target_parser.items(section):
|
||||
if template_parser.has_option(section, key):
|
||||
template_parser.set(section, key, value)
|
||||
if source_parser.has_option(section, key):
|
||||
source_parser.set(section, key, value)
|
||||
|
||||
|
||||
with open(target_ini, 'w') as f:
|
||||
template_parser.write(f)
|
||||
source_parser.write(f)
|
||||
EOF
|
||||
}
|
||||
|
||||
# Special migration
|
||||
post_migration(){
|
||||
echo "$(date '+%Y-%m-%d %H:%M:%S') - INFO - Set listen_port to nginx_proxy_manager"
|
||||
config_file="/websoft9/config/config.ini"
|
||||
listen_port=$(grep -Po '^\s*listen_port\s*=\s*\K[0-9]+' "$config_file")
|
||||
apphub setconfig --section nginx_proxy_manager --key listen_port --value "$listen_port"
|
||||
}
|
||||
|
||||
migrate_ini "/websoft9/apphub/src/config/config.ini" "/websoft9/config/config.ini"
|
||||
migrate_ini "/websoft9/apphub/src/config/system.ini" "/websoft9/config/system.ini"
|
||||
post_migration
|
||||
|
||||
if [ $? -eq 0 ]; then
|
||||
echo "$(date '+%Y-%m-%d %H:%M:%S') - INFO - Success to update config.ini"
|
||||
|
|
|
@ -1,9 +1,20 @@
|
|||
#!/bin/bash
|
||||
|
||||
echo "$(date '+%Y-%m-%d %H:%M:%S') - INFO - Compare remote version and local version." | tee -a /var/log/supervisord.log
|
||||
channel=release
|
||||
|
||||
if [ -f /websoft9/version.json ]; then
|
||||
version=$(cat /websoft9/version.json | jq -r .version)
|
||||
if [[ $version == *rc* ]]; then
|
||||
channel=dev
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "channel is $channel"
|
||||
|
||||
echo "$(date '+%Y-%m-%d %H:%M:%S') - INFO - Compare remote version and local version." | tee -a /var/log/supervisord.log
|
||||
echo "$(date '+%Y-%m-%d %H:%M:%S') - INFO - Download remote packages and replace local data." | tee -a /var/log/supervisord.log
|
||||
bash /websoft9/script/update_zip.sh --package_name "media-latest.zip" --sync_to "/websoft9/media"
|
||||
bash /websoft9/script/update_zip.sh --package_name "library-latest.zip" --sync_to "/websoft9/library"
|
||||
|
||||
bash /websoft9/script/update_zip.sh --channel $channel --package_name "media-latest.zip" --sync_to "/websoft9/media"
|
||||
bash /websoft9/script/update_zip.sh --channel $channel --package_name "library-latest.zip" --sync_to "/websoft9/library"
|
||||
|
||||
echo "$(date '+%Y-%m-%d %H:%M:%S') - INFO - Success to update library and media."
|
|
@ -1,4 +1,4 @@
|
|||
# modify time: 202407291102, you can modify here to trigger Docker Build action
|
||||
# modify time: 202412111429, you can modify here to trigger Docker Build action
|
||||
# step1: Build entrypoint execute program init_portainer by golang
|
||||
|
||||
FROM golang:latest AS builder
|
||||
|
@ -16,4 +16,4 @@ LABEL maintainer="websoft9<help@websoft9.com>"
|
|||
LABEL version="2.20.3"
|
||||
COPY --from=builder /init_portainer /
|
||||
|
||||
ENTRYPOINT ["/init_portainer"]
|
||||
ENTRYPOINT ["/init_portainer"]
|
||||
|
|
|
@ -22,7 +22,9 @@ const (
|
|||
retryDelay = 5 * time.Second
|
||||
charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789@$()_"
|
||||
credentialFilePath = "/data/credential"
|
||||
initFlagFilePath = "/data/init.flag"
|
||||
initCheckURL = portainerURL + "/users/admin/check"
|
||||
waitTimeout = 60 * time.Second
|
||||
waitInterval = 2 * time.Second
|
||||
)
|
||||
|
||||
type Credentials struct {
|
||||
|
@ -31,26 +33,22 @@ type Credentials struct {
|
|||
}
|
||||
|
||||
func main() {
|
||||
// 检查初始化标志文件是否存在
|
||||
if _, err := os.Stat(initFlagFilePath); err == nil {
|
||||
log.Println("Initialization has already been completed by another instance.")
|
||||
startPortainer()
|
||||
// 启动并等待 Portainer 启动
|
||||
cmd, err := startAndWaitForPortainer(os.Args[1:]...)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to start and wait for Portainer: %v", err)
|
||||
}
|
||||
|
||||
// 检查是否已经初始化
|
||||
if isPortainerInitialized() {
|
||||
log.Println("Portainer is already initialized.")
|
||||
// 等待 Portainer 进程结束
|
||||
if err := cmd.Wait(); err != nil {
|
||||
log.Fatalf("Portainer process exited with error: %v", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// 启动 Portainer
|
||||
// cmd := exec.Command("/portainer")
|
||||
cmd := exec.Command("/portainer", os.Args[1:]...)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
log.Fatalf("Failed to start Portainer: %v", err)
|
||||
}
|
||||
|
||||
// 等待 Portainer 启动
|
||||
waitForPortainer()
|
||||
|
||||
// 初始化 Portainer
|
||||
adminUsername := "admin"
|
||||
adminPassword := generateRandomPassword(12)
|
||||
|
@ -65,10 +63,6 @@ func main() {
|
|||
log.Fatalf("Failed to initialize local endpoint: %v", err)
|
||||
} else {
|
||||
fmt.Println("Portainer initialization completed successfully.")
|
||||
// 创建初始化标志文件
|
||||
if err := ioutil.WriteFile(initFlagFilePath, []byte("initialized"), 0644); err != nil {
|
||||
log.Fatalf("Failed to create initialization flag file: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -79,39 +73,31 @@ func main() {
|
|||
}
|
||||
}
|
||||
|
||||
func startPortainer() {
|
||||
cmd := exec.Command("/portainer")
|
||||
func startAndWaitForPortainer(args ...string) (*exec.Cmd, error) {
|
||||
cmd := exec.Command("/portainer", args...)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
log.Fatalf("Failed to start Portainer: %v", err)
|
||||
return nil, fmt.Errorf("failed to start Portainer: %w", err)
|
||||
}
|
||||
|
||||
// 等待 Portainer 进程结束
|
||||
if err := cmd.Wait(); err != nil {
|
||||
log.Fatalf("Portainer process exited with error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func waitForPortainer() {
|
||||
timeout := time.Duration(60) * time.Second
|
||||
start := time.Now()
|
||||
timeout := time.After(waitTimeout)
|
||||
ticker := time.NewTicker(waitInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
resp, err := http.Get(portainerURL + "/system/status")
|
||||
if err == nil && resp.StatusCode == http.StatusOK {
|
||||
fmt.Println("Portainer is up!")
|
||||
break
|
||||
select {
|
||||
case <-timeout:
|
||||
return nil, fmt.Errorf("timeout waiting for Portainer")
|
||||
case <-ticker.C:
|
||||
resp, err := http.Get(portainerURL + "/system/status")
|
||||
if err == nil && resp.StatusCode == http.StatusOK {
|
||||
fmt.Println("Portainer is up……!")
|
||||
return cmd, nil
|
||||
}
|
||||
fmt.Println("Waiting for Portainer...")
|
||||
}
|
||||
|
||||
if time.Since(start) > timeout {
|
||||
fmt.Println("Timeout waiting for Portainer")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
fmt.Println("Waiting for Portainer...")
|
||||
time.Sleep(2 * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -224,15 +210,7 @@ func writeCredentialsToFile(password string) error {
|
|||
func retryRequest(method, url, contentType string, body *bytes.Buffer) (*http.Response, error) {
|
||||
client := &http.Client{}
|
||||
for i := 0; i < maxRetries; i++ {
|
||||
var req *http.Request
|
||||
var err error
|
||||
|
||||
if body != nil {
|
||||
req, err = http.NewRequest(method, url, bytes.NewBuffer(body.Bytes()))
|
||||
} else {
|
||||
req, err = http.NewRequest(method, url, nil)
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(method, url, body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating request: %w", err)
|
||||
}
|
||||
|
@ -247,4 +225,23 @@ func retryRequest(method, url, contentType string, body *bytes.Buffer) (*http.Re
|
|||
time.Sleep(retryDelay)
|
||||
}
|
||||
return nil, fmt.Errorf("max retries reached")
|
||||
}
|
||||
}
|
||||
|
||||
func isPortainerInitialized() bool {
|
||||
resp, err := http.Get(initCheckURL)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to check Portainer initialization status: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode == http.StatusNoContent {
|
||||
return true
|
||||
}
|
||||
|
||||
if resp.StatusCode == http.StatusNotFound {
|
||||
return false
|
||||
}
|
||||
|
||||
log.Fatalf("Unexpected response status: %d", resp.StatusCode)
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# modify time: 202407251422, you can modify here to trigger Docker Build action
|
||||
# modify time: 202412211783, you can modify here to trigger Docker Build action
|
||||
# from Dockerfile: https://github.com/NginxProxyManager/nginx-proxy-manager/blob/develop/docker/Dockerfile
|
||||
# from image: https://hub.docker.com/r/jc21/nginx-proxy-manager
|
||||
|
||||
|
@ -7,7 +7,12 @@ FROM jc21/nginx-proxy-manager:2.11.3
|
|||
LABEL maintainer="Websoft9<help@websoft9.com>"
|
||||
LABEL version="2.11.3"
|
||||
|
||||
COPY ./config/initproxy.conf /data/nginx/default_host/initproxy.conf
|
||||
COPY README.md /data/nginx/README.md
|
||||
RUN mkdir /data/nginx/custom
|
||||
RUN mkdir -p /etc/websoft9
|
||||
COPY ./config/http.conf /data/nginx/custom/http.conf
|
||||
COPY ./config/landing/ /etc/websoft9/landing
|
||||
COPY ./config/initproxy.conf /etc/websoft9/initproxy.conf
|
||||
COPY ./init_nginx.sh /app/init_nginx.sh
|
||||
RUN chmod +x /app/init_nginx.sh
|
||||
|
||||
|
@ -30,4 +35,4 @@ RUN proxy_line=("proxy_set_header Upgrade \$http_upgrade;" "proxy_set_header Con
|
|||
fi; \
|
||||
done
|
||||
|
||||
ENTRYPOINT [ "/app/init_nginx.sh" ]
|
||||
ENTRYPOINT [ "/app/init_nginx.sh" ]
|
||||
|
|
|
@ -4,3 +4,4 @@ From official Nginx Proxy Manager image, and:
|
|||
|
||||
- Copy the initproxy.conf file to the nginx directory to initialize the custom configuration
|
||||
- Initialize username and password through environment variables
|
||||
- Add landing page designed by [figma](https://www.figma.com/)
|
2
docker/proxy/config/http.conf
Normal file
2
docker/proxy/config/http.conf
Normal file
|
@ -0,0 +1,2 @@
|
|||
limit_req_zone $binary_remote_addr zone=w9_limit_req_zone:10m rate=30r/s;
|
||||
limit_conn_zone $binary_remote_addr zone=w9_limit_conn_zone:10m;
|
|
@ -1,14 +1,20 @@
|
|||
# ------------------------------------------------------------
|
||||
# domain.com
|
||||
# ------------------------------------------------------------
|
||||
server {
|
||||
listen 80 default_server;
|
||||
listen [::]:80 default_server;
|
||||
|
||||
server_name ~\.?[0-9a-zA-Z]$;
|
||||
return 301 https://$host$request_uri;
|
||||
}
|
||||
|
||||
server {
|
||||
|
||||
listen 80;
|
||||
listen [::]:80;
|
||||
listen 443 ssl default_server;
|
||||
listen [::]:443 ssl default_server;
|
||||
|
||||
server_name ~\.?[0-9a-zA-Z]$;
|
||||
|
||||
ssl_certificate /data/custom_ssl/0-self-signed.cert;
|
||||
ssl_certificate_key /data/custom_ssl/0-self-signed.key;
|
||||
|
||||
access_log /data/logs/proxy-host-1_access.log proxy;
|
||||
error_log /data/logs/proxy-host-1_error.log warn;
|
||||
|
||||
|
@ -17,6 +23,8 @@ server {
|
|||
}
|
||||
|
||||
location / {
|
||||
root /data/nginx/default_www/landing;
|
||||
index index.html
|
||||
# Proxy!
|
||||
include conf.d/include/proxy.conf;
|
||||
}
|
||||
|
|
7
docker/proxy/config/landing/assets/bootstrap.min.css
vendored
Normal file
7
docker/proxy/config/landing/assets/bootstrap.min.css
vendored
Normal file
File diff suppressed because one or more lines are too long
76
docker/proxy/config/landing/assets/logo.svg
Normal file
76
docker/proxy/config/landing/assets/logo.svg
Normal file
|
@ -0,0 +1,76 @@
|
|||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<!-- Generator: Adobe Illustrator 16.0.0, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
|
||||
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
|
||||
<svg version="1.1" id="横版单英" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px"
|
||||
y="0px" width="502px" height="128px" viewBox="0 0 502 128" enable-background="new 0 0 502 128" xml:space="preserve">
|
||||
<g>
|
||||
<g>
|
||||
<path fill="#333333" d="M243.805,63.855c-0.189-0.188-0.394-0.409-0.615-0.663c-0.223-0.251-0.489-0.536-0.805-0.853
|
||||
c-0.632-0.63-1.482-1.197-2.558-1.703c-1.073-0.506-2.62-0.758-4.64-0.758c-4.421,0-7.782,1.547-10.086,4.641
|
||||
c-2.306,3.093-3.457,6.503-3.457,10.228c0,0.063,0,0.111,0,0.142c0,0.033,0,0.081,0,0.143c0.062,3.283,1.231,6.282,3.504,8.997
|
||||
c2.273,2.715,5.618,4.072,10.039,4.072c4.607,0,7.718-1.09,9.328-3.268c1.609-2.178,2.92-5.225,3.932-9.139
|
||||
c0-0.063,0.013-0.109,0.046-0.143c0.031-0.031,0.048-0.078,0.048-0.143l7.48,0.568v1.8c0,0.947-0.111,1.927-0.33,2.936
|
||||
c-0.223,1.011-0.554,2.021-0.994,3.03c-1.326,2.906-3.616,5.558-6.867,7.956c-3.251,2.399-7.465,3.599-12.643,3.599
|
||||
c-6.125,0-11.238-1.925-15.342-5.776c-4.105-3.851-6.156-8.775-6.156-14.774c0-6.503,1.736-11.49,5.209-14.963
|
||||
c3.472-3.472,7.26-5.684,11.364-6.631c0.883-0.189,1.72-0.33,2.51-0.426c0.789-0.095,1.594-0.142,2.415-0.142
|
||||
c4.672,0,8.681,1.295,12.027,3.883c3.347,2.589,6.062,5.745,8.145,9.471l-28.411,15.343l-3.314-5.968L243.805,63.855z"/>
|
||||
<path fill="#333333" d="M270.892,34.971v38.355c0,4.356,1.12,7.908,3.362,10.654c2.239,2.746,5.412,4.12,9.518,4.12
|
||||
c3.661,0,6.55-1.057,8.666-3.173c2.113-2.115,3.486-4.436,4.118-6.961c0.189-0.693,0.333-1.356,0.427-1.989
|
||||
c0.096-0.63,0.143-1.262,0.143-1.894c0-4.546-1.137-7.939-3.408-10.18c-2.274-2.241-4.771-3.553-7.483-3.931
|
||||
c-0.317,0-0.615-0.016-0.899-0.049c-0.283-0.031-0.584-0.046-0.9-0.046c-1.768,0-3.3,0.235-4.593,0.71
|
||||
c-1.295,0.474-2.542,0.995-3.741,1.563c-0.063,0-0.126,0.016-0.189,0.048c-0.063,0.031-0.126,0.046-0.188,0.046
|
||||
c-0.379,0.189-0.743,0.365-1.09,0.521c-0.349,0.159-0.71,0.331-1.09,0.521v-7.481c1.199-0.693,2.432-1.263,3.695-1.705
|
||||
c1.261-0.439,2.524-0.788,3.788-1.04c0.693-0.063,1.356-0.126,1.987-0.19c0.632-0.062,1.264-0.095,1.894-0.095
|
||||
c5.241,0,9.944,1.784,14.112,5.351c4.168,3.568,6.251,8.382,6.251,14.442c0,5.746-1.832,10.971-5.493,15.675
|
||||
c-3.662,4.703-8.996,7.055-16.005,7.055c-7.071,0-12.313-2.257-15.721-6.771c-3.41-4.513-5.115-9.833-5.115-15.958V34.971H270.892
|
||||
z"/>
|
||||
<path fill="#333333" d="M330.696,86.947c0.097,0,0.175,0,0.237,0c1.264-0.062,2.415-0.314,3.458-0.759
|
||||
c1.042-0.443,1.562-1.36,1.562-2.754c0-1.644-0.868-2.909-2.604-3.796c-1.736-0.886-3.993-1.71-6.771-2.468
|
||||
c-0.633-0.19-1.295-0.381-1.99-0.57c-0.694-0.188-1.419-0.379-2.177-0.57c-3.664-1.012-6.568-2.467-8.713-4.365
|
||||
c-2.147-1.897-3.221-4.494-3.221-7.783c0-4.683,1.765-7.243,4.736-8.876c3.366-1.853,6.913-1.853,11.269-1.853h15.438v6.631
|
||||
h-16.101c-2.021,0.063-3.757,0.316-5.208,0.756c-1.453,0.443-2.18,1.358-2.18,2.747c0,1.579,0.76,2.746,2.275,3.504
|
||||
c1.513,0.759,11.205,3.568,11.458,3.694c4.358,1.642,7.401,3.426,9.14,5.35c1.735,1.928,2.605,4.469,2.605,7.625
|
||||
c-0.192,3.599-1.219,6.141-3.08,7.624c-1.862,1.484-3.898,2.415-6.108,2.793c-0.756,0.128-1.516,0.206-2.271,0.237
|
||||
c-0.759,0.033-1.484,0.048-2.18,0.048c-0.063,0-19.793,0-19.793,0v-7.215C310.478,86.947,330.604,86.947,330.696,86.947z"/>
|
||||
<path fill="#333333" d="M399.879,49.65c0-4.168,0.867-7.276,2.603-9.329c1.736-2.05,3.614-3.456,5.637-4.214
|
||||
c1.072-0.441,2.113-0.726,3.125-0.853c1.009-0.126,1.895-0.189,2.65-0.189l6.82-0.095v7.293h-5.493
|
||||
c-3.221,0.063-5.337,0.898-6.346,2.509c-1.01,1.61-1.579,3.362-1.705,5.256c-0.063,0.317-0.093,0.632-0.093,0.947
|
||||
c0,0.316,0,0.632,0,0.947c0,0.128,0,0.254,0,0.38s0.03,0.221,0.093,0.283c0,0.063,0,0.128,0,0.189c0,0.063,0,0.127,0,0.188
|
||||
c0,0.189,0.016,0.365,0.048,0.521c0.032,0.159,0.048,0.3,0.048,0.426l12.121-0.095v7.293h-12.121v32.862h-7.387V49.65z"/>
|
||||
<path fill="#333333" d="M213.125,35.248h-4.288h-4.289l-10.634,46.825c-0.187,0.784-0.342,1.553-0.462,2.31
|
||||
c-0.106,0.671-0.162,1.149-0.175,1.459c-0.013-0.353-0.075-0.86-0.195-1.54c-0.134-0.757-0.308-1.499-0.521-2.229l-11.133-42.104
|
||||
c-0.044-0.218-0.103-0.433-0.17-0.643l-0.136-0.513h-0.055c-0.897-2.104-2.965-3.575-5.374-3.575s-4.477,1.472-5.374,3.575h-0.061
|
||||
l-0.153,0.577c-0.005,0.014-0.007,0.027-0.011,0.042l-11.36,42.884c-0.188,0.703-0.348,1.418-0.481,2.147
|
||||
c-0.125,0.68-0.188,1.188-0.196,1.54c-0.009-0.304-0.065-0.777-0.177-1.439c-0.12-0.715-0.274-1.464-0.461-2.248l-10.502-47.068
|
||||
h-4.329H138.3l14.403,55.582h0.02c0.708,2.295,2.815,3.962,5.319,3.962c2.503,0,4.611-1.667,5.319-3.962h0.02L174.8,48.736
|
||||
c0.213-0.755,0.387-1.505,0.52-2.248c0.134-0.741,0.2-1.289,0.2-1.641h0.012c0,0.297,0.079,0.824,0.24,1.58
|
||||
c0.16,0.757,0.347,1.526,0.562,2.309l11.686,42.094h0.035c0.707,2.295,2.816,3.962,5.318,3.962c2.504,0,4.612-1.667,5.32-3.962
|
||||
h0.029L213.125,35.248z"/>
|
||||
<path fill="#333333" d="M391.734,73.279c0,11.69-9.479,21.166-21.167,21.166c-11.691,0-21.167-9.476-21.167-21.166
|
||||
c0-11.691,9.476-21.167,21.167-21.167C382.256,52.112,391.734,61.588,391.734,73.279z M370.567,59.184
|
||||
c-7.786,0-14.096,6.31-14.096,14.096c0,7.785,6.31,14.095,14.096,14.095c7.785,0,14.097-6.31,14.097-14.095
|
||||
C384.664,65.493,378.353,59.184,370.567,59.184z"/>
|
||||
<path fill="#333333" d="M502,55.476c0-11.69-9.477-21.168-21.166-21.168c-11.69,0-21.168,9.478-21.168,21.168
|
||||
c0,11.192,8.689,20.35,19.691,21.108l5.087-7.482c-1.152,0.305-2.362,0.468-3.61,0.468c-7.786,0-14.096-6.31-14.096-14.094
|
||||
c0-7.785,6.31-14.096,14.096-14.096c7.784,0,14.096,6.311,14.096,14.096c0,3.054-0.976,5.877-2.625,8.187l0,0l-8.319,12.745
|
||||
l-12.307,17.575h8.021l18.103-25.853c0.236-0.316,0.461-0.642,0.682-0.973l0.093-0.136h-0.006C500.736,63.7,502,59.736,502,55.476
|
||||
z"/>
|
||||
<path fill="#333333" d="M448.746,53.153h-15.533V43.021h-7.955v37.597v0.095c0,0.081,0.006,0.611,0.011,1.059
|
||||
c-0.002,0.09-0.011,0.179-0.011,0.269c0,6.695,5.428,12.122,12.121,12.122c0,0,0,0,0.001,0c0.062,0,10.703,0,10.703,0v-7.387
|
||||
h-9.472c-0.062,0-0.119-0.003-0.181-0.005c-0.115,0.009-0.232,0.018-0.351,0.018c-0.257,0-0.506-0.025-0.754-0.064
|
||||
c-0.039-0.003-0.082-0.004-0.121-0.007c-0.155-0.014-0.313-0.062-0.469-0.12c-1.75-0.505-3.092-1.965-3.425-3.782v-0.002
|
||||
c0,0,0-0.01-0.002-0.013c-0.049-0.281-0.081-0.567-0.081-0.862c0-0.039,0.005-0.075,0.007-0.113
|
||||
c-0.002-0.047-0.005-0.094-0.007-0.134V70.664c-0.004,0.001-0.01,0.004-0.016,0.006V60.446h15.533V53.153z"/>
|
||||
</g>
|
||||
<linearGradient id="SVGID_1_" gradientUnits="userSpaceOnUse" x1="58.5918" y1="1.8193" x2="58.5918" y2="126.1812">
|
||||
<stop offset="0.1" style="stop-color:#52E5E7"/>
|
||||
<stop offset="0.9" style="stop-color:#130CB7"/>
|
||||
</linearGradient>
|
||||
<path fill="url(#SVGID_1_)" d="M58.592,112.915l56.592-32.672v5.881c0,5.749-4.075,12.808-9.055,15.682l-38.482,22.22
|
||||
c-4.98,2.874-13.129,2.874-18.109,0l-38.482-22.22C6.075,98.932,2,91.873,2,86.124v-5.881L58.592,112.915z M106.129,26.005
|
||||
L67.646,3.786c-1.77-1.021-3.94-1.672-6.226-1.967v14.709l39.615,22.873L61.421,62.272v16.337l53.763-31.041v-5.881
|
||||
C115.184,35.938,111.109,28.881,106.129,26.005z M55.763,78.609L2,47.568v-5.881c0-5.75,4.075-12.807,9.055-15.683L49.537,3.786
|
||||
c1.769-1.021,3.94-1.672,6.226-1.967v14.717L16.157,39.41l39.605,22.857V78.609z M61.421,85.155l53.763-31.04v6.664L61.421,91.764
|
||||
V85.155z M61.421,98.117l53.763-30.986v6.55l-53.763,31.04V98.117z"/>
|
||||
</g>
|
||||
</svg>
|
After Width: | Height: | Size: 7.3 KiB |
BIN
docker/proxy/config/landing/assets/websoft9-appstore.png
Normal file
BIN
docker/proxy/config/landing/assets/websoft9-appstore.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 213 KiB |
113
docker/proxy/config/landing/index.html
Normal file
113
docker/proxy/config/landing/index.html
Normal file
|
@ -0,0 +1,113 @@
|
|||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
|
||||
<head>
|
||||
<!-- Required meta tags-->
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
|
||||
<meta http-equiv="X-UA-Compatible" content="ie=edge">
|
||||
<meta name="author" content="Websoft9">
|
||||
<link rel="icon" href="favicon.ico" />
|
||||
<!-- Title Page-->
|
||||
<title>Websoft9 Applications Hosting Platform</title>
|
||||
|
||||
<script>
|
||||
document.addEventListener('DOMContentLoaded', function() {
|
||||
const domain = window.location.hostname;
|
||||
const linkElement = document.getElementById('myLink');
|
||||
linkElement.href = `http://${domain}:9000`;
|
||||
});
|
||||
</script>
|
||||
|
||||
<!-- english tags-->
|
||||
<meta name="description" content="Websoft9 is a Self-Hosting Applications platform that can deploy multiple applications in your own cloud infrastructure.">
|
||||
<meta name="keywords" content="Cloud computing, runtime environment, Docker, auto-deployment, Self-Hosting, install WordPress, Panel, PaaS, Iac, GitOps">
|
||||
<!-- Chinese tags-->
|
||||
<meta name="description" content="Websoft9(微聚云)是一个企业级的开源软件聚合与多应用托管平台,旨在让任何人都能轻松地在自己的基础设施中部署和管理软件。" lang="zh">
|
||||
<meta name="keywords" content="云计算,运行环境,自托管,自动部署,Docker,部署 WordPress,服务器面板,自动化部署,PaaS,IaC, GitOps" lang="zh">
|
||||
|
||||
<link rel="stylesheet" href="assets/bootstrap.min.css">
|
||||
<style>
|
||||
.logo {
|
||||
width: 100px;
|
||||
margin-left: 20px;
|
||||
}
|
||||
|
||||
.header {
|
||||
padding: 20px 0;
|
||||
background-color: #f8f9fa;
|
||||
border-bottom: 1px solid #dee2e6;
|
||||
}
|
||||
|
||||
.content {
|
||||
padding: 40px 0;
|
||||
}
|
||||
|
||||
.sub-block {
|
||||
margin-bottom: 30px;
|
||||
text-align: left; /* 设置文本左对齐 */
|
||||
}
|
||||
|
||||
.btn-custom {
|
||||
background-color: #086ad8;
|
||||
color: white;
|
||||
}
|
||||
|
||||
.btn-custom:hover {
|
||||
background-color: #0056b3;
|
||||
color: white;
|
||||
}
|
||||
|
||||
.link-container {
|
||||
display: flex;
|
||||
flex-wrap: wrap; /* 允许换行 */
|
||||
gap: 0; /* 设置间距为0 */
|
||||
}
|
||||
.link-container a {
|
||||
margin: 0; /* 确保没有外边距 */
|
||||
padding: 0 10px; /* 可以根据需要调整内边距 */
|
||||
}
|
||||
|
||||
.welcome-title {
|
||||
font-family: Arial, Helvetica, sans-serif; /* 使用 Arial 和 Helvetica 作为首选字体 */
|
||||
font-size: 32px; /* 设置字号为 24px */
|
||||
}
|
||||
</style>
|
||||
|
||||
</head>
|
||||
|
||||
|
||||
<body>
|
||||
<div class="container-fluid">
|
||||
<!-- 第一个块 -->
|
||||
<div class="header w-100 d-flex justify-content-between align-items-center">
|
||||
<div class="container">
|
||||
<div class="d-flex justify-content-between align-items-center">
|
||||
<img src="assets/logo.svg" alt="Logo" class="logo">
|
||||
<div class="link-container">
|
||||
<a target="_blank" href="https://support.websoft9.com/docs/next/helpdesk/#contact" class="text-dark">Support</a>
|
||||
<a target="_blank" href="https://support.websoft9.com/docs" class="text-dark">Documentation</a>
|
||||
<a target="_blank" href="https://www.websoft9.com" class="text-dark">Website</a>
|
||||
<a target="_blank" href="https://github.com/Websoft9/websoft9" class="text-dark">Github</a>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- 第二个块 -->
|
||||
<div class="content">
|
||||
<div class="container" style="display: flex;">
|
||||
<div class="sub-block" style="flex: 1; padding: 20px;">
|
||||
<h1 class="welcome-title">Welcome to Websoft9 Applications Hosting Platform</h1>
|
||||
<p>GitOps-driven, multi-application hosting for cloud servers and home servers, one-click deployment of 200+ open source apps.</p>
|
||||
<a id="myLink" class="btn btn-custom" >Access Websoft9 Console</a>
|
||||
</div>
|
||||
<div class="image-block" style="flex: 1; padding: 20px;">
|
||||
<img src="assets/websoft9-appstore.png" alt="Description of image" style="width: 100%; height: auto;">
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
|
@ -1,31 +1,40 @@
|
|||
#!/bin/bash
|
||||
|
||||
# 设置密码目录
|
||||
# Define variables
|
||||
credential_path="/data/credential"
|
||||
|
||||
# 检查是否已经存在密码文件
|
||||
# Migrating initproxy.conf file
|
||||
if [ ! -d /data/nginx/default_host ]; then mkdir -p /data/nginx/default_host; fi
|
||||
cp -f /etc/websoft9/initproxy.conf /data/nginx/default_host/initproxy.conf
|
||||
[ -f /etc/websoft9/initproxy.conf ] && rm -f /data/nginx/proxy_host/initproxy.conf
|
||||
|
||||
# Deploy Websoft9 landing pages
|
||||
if [ ! -d /data/nginx/default_www/landing ]; then
|
||||
mkdir -p /data/nginx/default_www/
|
||||
cp -r /etc/websoft9/landing /data/nginx/default_www/
|
||||
else
|
||||
echo "/data/nginx/default_www/landing already exists."
|
||||
fi
|
||||
|
||||
# If credential file then create it and init credential for NPM
|
||||
# Reload NPM docker image Environments
|
||||
|
||||
if [ ! -f "$credential_path" ]; then
|
||||
# 设置用户名和生成随机密码
|
||||
# Set init credential
|
||||
INITIAL_ADMIN_EMAIL="admin@mydomain.com"
|
||||
INITIAL_ADMIN_PASSWORD=$(openssl rand -base64 16 | tr -d '/+' | cut -c1-16)
|
||||
|
||||
# 设置环境变量
|
||||
export INITIAL_ADMIN_EMAIL
|
||||
export INITIAL_ADMIN_PASSWORD
|
||||
|
||||
# 写入密码文件
|
||||
# Write credential to file
|
||||
mkdir -p "$(dirname "$credential_path")"
|
||||
credential_json="{\"username\":\"$INITIAL_ADMIN_EMAIL\",\"password\":\"$INITIAL_ADMIN_PASSWORD\"}"
|
||||
echo "$credential_json" > "$credential_path"
|
||||
else
|
||||
# 从密码文件中读取用户名和密码
|
||||
INITIAL_ADMIN_EMAIL=$(jq -r '.username' "$credential_path")
|
||||
INITIAL_ADMIN_PASSWORD=$(jq -r '.password' "$credential_path")
|
||||
echo "{\"username\":\"$INITIAL_ADMIN_EMAIL\",\"password\":\"$INITIAL_ADMIN_PASSWORD\"}" > "$credential_path"
|
||||
|
||||
# 设置环境变量
|
||||
export INITIAL_ADMIN_EMAIL
|
||||
export INITIAL_ADMIN_PASSWORD
|
||||
else
|
||||
read -r INITIAL_ADMIN_EMAIL INITIAL_ADMIN_PASSWORD < <(jq -r '.username + " " + .password' "$credential_path")
|
||||
fi
|
||||
|
||||
# 启动 Nginx
|
||||
# Reload NPM docker image Environments
|
||||
export INITIAL_ADMIN_EMAIL
|
||||
export INITIAL_ADMIN_PASSWORD
|
||||
|
||||
# Start NPM
|
||||
exec /init
|
||||
|
|
34
install/VMimage/OracleLinux.md
Normal file
34
install/VMimage/OracleLinux.md
Normal file
|
@ -0,0 +1,34 @@
|
|||
# Oracle Linux
|
||||
|
||||
## How to create VM image?
|
||||
|
||||
You should download [Oracle Linux image](https://yum.oracle.com/oracle-linux-templates.html) to OSS/S3 directly, don't try to build image from ISO manual setup by KVM/VMVare/VirtualBox
|
||||
|
||||
## Cloud image requirements
|
||||
|
||||
- Kernel:
|
||||
- Unbreakable Enterprise Kernel (UEK)(√)
|
||||
- Red Hat Compatible Kernel(RHCK)
|
||||
- OS disk automaticlly resize
|
||||
- User can user password or key both for create VM or reset password
|
||||
- OS start methond on Cloud: UEFI-Preferred
|
||||
- Logic Disk partition: LVM
|
||||
- File system type: xfs(√)、[btrfs](https://blogs.oracle.com/linux/post/btrfs-on-oracle-linuxefficiently-backup-and-recover-systems)
|
||||
- Softwares: cloud-init, agent of Cloud provider, virtio, NVMe
|
||||
- Other config: https://github.com/Websoft9/mcloud/blob/master/ansible/roles/desktop/tasks/image.yml
|
||||
- Applicaitons: Desktop or Docker/Podman
|
||||
- Other repository
|
||||
```
|
||||
yum install -y oraclelinux-developer-release-e* oracle-nodejs-release-e* oracle-epel-release-e*; fi
|
||||
python3 and pip at OracleLinux7?
|
||||
```
|
||||
|
||||
## Upgrade Oracle Linux
|
||||
|
||||
You can use [leapp](https://docs.oracle.com/en/learn/ol-linux-leapp) to upgrade major version, e.g Oracle Linux8 > Oracle Linux9
|
||||
|
||||
## Test your Cloud private image
|
||||
|
||||
Some Cloud provider have tools for your image testing:
|
||||
|
||||
- [阿里云 sersi](https://help.aliyun.com/zh/ecs/user-guide/check-whether-an-image-meets-the-import-requirements)
|
|
@ -18,7 +18,7 @@ export PATH
|
|||
#
|
||||
# $ sudo bash install.sh --port 9001
|
||||
#
|
||||
# --channel <release|dev>
|
||||
# --channel <release|rc|dev>
|
||||
# Use the --channel option to install a release(production) or dev distribution. default is release, for example:
|
||||
#
|
||||
# $ sudo bash install.sh --channel release
|
||||
|
@ -43,12 +43,19 @@ export PATH
|
|||
#
|
||||
# $ sudo bash install.sh --devto "/data/dev/mycode"
|
||||
#
|
||||
# --execute_mode <auto|install|upgrade>
|
||||
# Use the --execute_mode option to tell script is install Websoft9 or Ugrade Websoff9. The default value is auto
|
||||
# and script will automaticlly check it need install or upgrade, for example:
|
||||
#
|
||||
# $ sudo bash install.sh --execute_mode "upgrade"
|
||||
#
|
||||
# ==============================================================================
|
||||
|
||||
|
||||
# 设置参数的默认值
|
||||
version="latest"
|
||||
channel="release"
|
||||
execute_mode="auto"
|
||||
path="/data/websoft9/source"
|
||||
apps=""
|
||||
mirrors="https://dockerhub.websoft9.com"
|
||||
|
@ -119,6 +126,15 @@ while [[ $# -gt 0 ]]; do
|
|||
devto="$1"
|
||||
shift
|
||||
;;
|
||||
--execute_mode)
|
||||
shift
|
||||
if [[ $1 == --* ]]; then
|
||||
echo "Missing value for --execute_mode"
|
||||
exit 1
|
||||
fi
|
||||
execute_mode="$1"
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
echo "Unknown parameter: $1"
|
||||
exit 1
|
||||
|
@ -126,6 +142,12 @@ while [[ $# -gt 0 ]]; do
|
|||
esac
|
||||
done
|
||||
|
||||
# check it is root user or have sudo changed to root user,if not exit 1
|
||||
if [ $(id -u) -ne 0 ]; then
|
||||
echo "You must be the root user to run this script."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -n "$port" ]; then
|
||||
export port
|
||||
else
|
||||
|
@ -135,15 +157,18 @@ fi
|
|||
|
||||
starttime=$(date +%s)
|
||||
|
||||
# Check is install or upgrade
|
||||
if systemctl cat websoft9 >/dev/null 2>&1 && systemctl cat cockpit >/dev/null 2>&1 && sudo docker ps -a --format '{{.Names}}' 2>/dev/null | grep -q '^websoft9-apphub'; then
|
||||
echo "execute_mode=upgrade"
|
||||
export execute_mode="upgrade"
|
||||
else
|
||||
echo "execute_mode=install"
|
||||
export execute_mode="install"
|
||||
# Automaticlly check the $execute_mode to install or upgrade if is auto
|
||||
if [ "$execute_mode" = "auto" ]; then
|
||||
if sudo systemctl cat websoft9 >/dev/null 2>&1 && sudo systemctl cat cockpit >/dev/null 2>&1 && sudo docker ps -a --format '{{.Names}}' 2>/dev/null | grep -q '^websoft9-apphub'; then
|
||||
echo "execute_mode=upgrade"
|
||||
export execute_mode="upgrade"
|
||||
else
|
||||
echo "execute_mode=install"
|
||||
export execute_mode="install"
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
# 输出参数值
|
||||
echo -e "\n------ Welcome to install Websoft9, it will take 3-5 minutes ------"
|
||||
echo -e "\nYour installation parameters are as follows: "
|
||||
|
@ -154,6 +179,7 @@ echo "--path: $path"
|
|||
echo "--apps: $apps"
|
||||
echo "--mirrors: $mirrors"
|
||||
echo "--devto: $devto"
|
||||
echo "--execute_mode: $execute_mode"
|
||||
|
||||
echo -e "\nYour OS: "
|
||||
cat /etc/os-release | head -n 3 2>/dev/null
|
||||
|
@ -276,6 +302,35 @@ install_tools(){
|
|||
fi
|
||||
}
|
||||
|
||||
download_artifact() {
|
||||
local artifact_url="$1"
|
||||
local source_zip="$2"
|
||||
local max_attempts="$3"
|
||||
|
||||
for ((i=1; i<=max_attempts; i++)); do
|
||||
wget -P /tmp "$artifact_url/$source_zip"
|
||||
if [ $? -eq 0 ]; then
|
||||
echo "Downloaded successfully using wget on attempt $i."
|
||||
return 0
|
||||
else
|
||||
echo "Attempt $i failed using wget."
|
||||
fi
|
||||
done
|
||||
|
||||
for ((i=1; i<=max_attempts; i++)); do
|
||||
curl -o /tmp/"$source_zip" "$artifact_url/$source_zip"
|
||||
if [ $? -eq 0 ]; then
|
||||
echo "Downloaded successfully using curl on attempt $i."
|
||||
return 0
|
||||
else
|
||||
echo "Attempt $i failed using curl."
|
||||
fi
|
||||
done
|
||||
|
||||
echo "Failed to download source package after $((max_attempts * 2)) attempts."
|
||||
return 1
|
||||
}
|
||||
|
||||
download_source_and_checkimage() {
|
||||
echo_prefix_source=$'\n[Download Source] - '
|
||||
echo "$echo_prefix_source Download Websoft9 source code from $artifact_url/$source_zip"
|
||||
|
@ -283,7 +338,7 @@ download_source_and_checkimage() {
|
|||
find . -type f -name "websoft9*.zip*" -exec rm -f {} \;
|
||||
rm -rf /tmp/$source_unzip
|
||||
|
||||
wget -P /tmp "$artifact_url/$source_zip"
|
||||
download_artifact "$artifact_url" "$source_zip" 10
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to download source package."
|
||||
exit 1
|
||||
|
@ -298,10 +353,6 @@ download_source_and_checkimage() {
|
|||
|
||||
# install docker
|
||||
bash /tmp/$source_unzip/install/install_docker.sh
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "install_docker failed with error $?. Exiting."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cd /tmp/$source_unzip/docker
|
||||
docker compose pull
|
||||
|
@ -462,7 +513,7 @@ install_systemd() {
|
|||
echo "$echo_prefix_systemd Install Systemd service"
|
||||
|
||||
if [ ! -d "$systemd_path" ]; then
|
||||
sudo mkdir -p "$systemd_path"
|
||||
sudo mkdir -p "$systemd_path"
|
||||
fi
|
||||
|
||||
sudo cp -r $install_path/systemd/script/* "$systemd_path"
|
||||
|
@ -484,7 +535,7 @@ install_systemd() {
|
|||
exit 1
|
||||
fi
|
||||
|
||||
sudo systemctl start websoft9
|
||||
sudo systemctl restart websoft9
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to start Systemd service."
|
||||
exit 1
|
||||
|
@ -500,14 +551,14 @@ download_source_and_checkimage | tee -a $log_path
|
|||
|
||||
install_backends | tee -a $log_path
|
||||
|
||||
install_systemd | tee -a $log_path
|
||||
|
||||
bash $install_path/install/install_cockpit.sh | tee -a $log_path
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "install_cockpit failed with error $?. Exiting."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
install_systemd | tee -a $log_path
|
||||
|
||||
bash $install_path/install/install_plugins.sh | tee -a $log_path
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "install_plugins failed with error $?. Exiting."
|
||||
|
@ -523,4 +574,4 @@ endtime=$(date +%s)
|
|||
runtime=$((endtime-starttime))
|
||||
echo "Script execution time: $runtime seconds"
|
||||
echo -e "\n-- Install success! ------"
|
||||
echo "Access Websoft9 console by: http://Internet IP:$(grep ListenStream /lib/systemd/system/cockpit.socket | cut -d= -f2) and using Linux user for login"
|
||||
echo "Access Websoft9 console by: http://Internet IP:$(grep ListenStream /lib/systemd/system/cockpit.socket | cut -d= -f2) and using Linux user for login"
|
||||
|
|
|
@ -2,212 +2,206 @@
|
|||
PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin:~/bin
|
||||
export PATH
|
||||
|
||||
# Install and Upgade Docker for mosts of Linux
|
||||
# This script is intended from https://get.docker.com and add below:
|
||||
#
|
||||
# - install or update Docker
|
||||
# - support Redhat, CentOS-Stream, OracleLinux, AmazonLinux
|
||||
#
|
||||
# 1. download the script
|
||||
#
|
||||
# $ curl -fsSL https://websoft9.github.io/websoft9/install/install_docker.sh -o install_docker.sh
|
||||
#
|
||||
# 2. verify the script's content
|
||||
#
|
||||
# $ cat install_docker.sh
|
||||
#
|
||||
# 3. run the script with --dry-run to verify the steps it executes
|
||||
#
|
||||
# $ sh install_docker.sh --dry-run
|
||||
#
|
||||
# 4. run the script either as root, or using sudo to perform the installation.
|
||||
#
|
||||
# $ sudo sh install_docker.sh
|
||||
# Download docker install script
|
||||
download_docker_script() {
|
||||
local urls=("https://get.docker.com" "https://getdocker.websoft9.com")
|
||||
local output="get-docker.sh"
|
||||
local retries=10
|
||||
local timeout=5
|
||||
|
||||
|
||||
# it must export, otherwise Rocky Linux cannot used at yum command
|
||||
export docker_packages="docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin"
|
||||
echo_prefix_docker=$'\n[Docker] - '
|
||||
|
||||
docker_exist() {
|
||||
# 检查 `docker` 命令是否存在
|
||||
if ! command -v docker &> /dev/null; then
|
||||
echo "docker command not exist"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# 检查 Docker 服务是否正在运行
|
||||
systemctl is-active docker.service &> /dev/null
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Docker service is not running, trying to start it..."
|
||||
systemctl start docker.service
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to start Docker service."
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
Install_Docker(){
|
||||
local mirror=$1
|
||||
local timeout=$2
|
||||
local repo_url=$3
|
||||
|
||||
echo "$echo_prefix_docker Installing Docker from ${mirror} with timeout ${timeout} seconds for your system"
|
||||
|
||||
if [ "$mirror" = "Official" ]; then
|
||||
mirror=""
|
||||
fi
|
||||
|
||||
curl -fsSL --max-time 5 https://get.docker.com -o /dev/null
|
||||
|
||||
if [ $? -eq 0 ]; then
|
||||
# For redhat family
|
||||
if [[ -f /etc/redhat-release ]] || command -v amazon-linux-extras >/dev/null 2>&1; then
|
||||
# For CentOS, Fedora, or RHEL(only s390x)
|
||||
if [[ $(cat /etc/redhat-release) =~ "Red Hat" ]] && [[ $(uname -m) == "s390x" ]] || [[ $(cat /etc/redhat-release) =~ "CentOS" ]] || [[ $(cat /etc/redhat-release) =~ "Fedora" ]]; then
|
||||
curl -fsSL https://get.docker.com -o get-docker.sh
|
||||
timeout $timeout sh get-docker.sh --channel stable --mirror $mirror
|
||||
else
|
||||
# For other distributions(Redhat and Rocky linux ...)
|
||||
dnf --version >/dev/null 2>&1
|
||||
dnf_status=$?
|
||||
yum --version >/dev/null 2>&1
|
||||
yum_status=$?
|
||||
|
||||
if [ $dnf_status -eq 0 ]; then
|
||||
sudo dnf install dnf-utils -y > /dev/null
|
||||
sudo dnf config-manager --add-repo $repo_url
|
||||
timeout $timeout sudo dnf install $docker_packages -y
|
||||
elif [ $yum_status -eq 0 ]; then
|
||||
sudo yum install yum-utils -y > /dev/null
|
||||
sudo yum-config-manager --add-repo $repo_url
|
||||
if command -v amazon-linux-extras >/dev/null 2>&1; then
|
||||
wget -O /etc/yum.repos.d/CentOS7-Base.repo https://websoft9.github.io/stackhub/apps/roles/role_common/files/CentOS7-Base.repo
|
||||
sudo sed -i "s/\$releasever/7/g" /etc/yum.repos.d/docker-ce.repo
|
||||
timeout $timeout sudo yum install $docker_packages --disablerepo='amzn2-extras,amzn2-core' -y
|
||||
else
|
||||
timeout $timeout sudo yum install $docker_packages -y
|
||||
fi
|
||||
|
||||
else
|
||||
echo "None of the required package managers are installed."
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ $(cat /etc/os-release) =~ "Amazon Linux" ]]; then
|
||||
sudo dnf install docker -y
|
||||
sudo systemctl enable docker
|
||||
sudo systemctl start docker
|
||||
sudo mkdir -p /usr/local/lib/docker/cli-plugins/
|
||||
sudo curl -SL "https://github.com/docker/compose/releases/latest/download/docker-compose-linux-$(uname -m)" -o /usr/local/lib/docker/cli-plugins/docker-compose
|
||||
sudo chmod +x /usr/local/lib/docker/cli-plugins/docker-compose
|
||||
fi
|
||||
|
||||
# For Ubuntu, Debian, or Raspbian
|
||||
if type apt >/dev/null 2>&1; then
|
||||
# Wait for apt to be unlocked
|
||||
curl -fsSL https://get.docker.com -o get-docker.sh
|
||||
timeout $timeout sh get-docker.sh --channel stable --mirror $mirror
|
||||
fi
|
||||
else
|
||||
echo "can not install by installation script, use special way to install docker"
|
||||
dnf --version >/dev/null 2>&1
|
||||
dnf_status=$?
|
||||
yum --version >/dev/null 2>&1
|
||||
yum_status=$?
|
||||
apt --version >/dev/null 2>&1
|
||||
apt_status=$?
|
||||
|
||||
if [ $dnf_status -eq 0 ]; then
|
||||
sudo dnf install yum-utils -y
|
||||
sudo dnf config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
|
||||
sudo dnf install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
|
||||
elif [ $yum_status -eq 0 ]; then
|
||||
sudo yum install yum-utils -y
|
||||
sudo yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
|
||||
sudo yum install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
|
||||
elif [ $apt_status -eq 0 ]; then
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y ca-certificates curl gnupg lsb-release
|
||||
curl -fsSL https://mirrors.aliyun.com/docker-ce/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
|
||||
echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://mirrors.aliyun.com/docker-ce/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
|
||||
else
|
||||
echo "don't know package tool"
|
||||
fi
|
||||
|
||||
sudo systemctl enable docker
|
||||
sudo systemctl restart docker
|
||||
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
Upgrade_Docker(){
|
||||
if docker_exist; then
|
||||
echo "$echo_prefix_docker Upgrading Docker for your system..."
|
||||
dnf --version >/dev/null 2>&1
|
||||
dnf_status=$?
|
||||
yum --version >/dev/null 2>&1
|
||||
yum_status=$?
|
||||
apt --version >/dev/null 2>&1
|
||||
apt_status=$?
|
||||
|
||||
if [ $dnf_status -eq 0 ]; then
|
||||
sudo dnf update -y $docker_packages
|
||||
elif [ $yum_status -eq 0 ]; then
|
||||
sudo yum update -y $docker_packages
|
||||
elif [ $apt_status -eq 0 ]; then
|
||||
sudo apt update -y
|
||||
sudo apt -y install --only-upgrade $docker_packages
|
||||
else
|
||||
echo "Docker installed, but cannot upgrade"
|
||||
fi
|
||||
else
|
||||
local mirrors=("Official" "Official" "AzureChinaCloud" "Aliyun")
|
||||
local urls=("https://download.docker.com/linux/centos/docker-ce.repo" "https://download.docker.com/linux/centos/docker-ce.repo" "https://mirror.azure.cn/docker-ce/linux/centos/docker-ce.repo" "https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo")
|
||||
local timeout=180
|
||||
local max_retries=4
|
||||
local retry_count=0
|
||||
|
||||
while ((retry_count < max_retries)); do
|
||||
Install_Docker ${mirrors[$retry_count]} $timeout ${urls[$retry_count]}
|
||||
if ! docker_exist; then
|
||||
echo "Installation timeout or failed, retrying with ${mirrors[$retry_count]} mirror..."
|
||||
((retry_count++))
|
||||
sleep 3
|
||||
else
|
||||
echo "Docker installed successfully."
|
||||
exit 0
|
||||
fi
|
||||
download_with_tool() {
|
||||
local tool=$1
|
||||
local url=$2
|
||||
local count=0
|
||||
until [ $count -ge $retries ]; do
|
||||
count=$((count+1))
|
||||
echo "[Websoft9] - Attempting to download official Docker install script from: $url using $tool (attempt $count of $retries)"
|
||||
if [ "$tool" = "curl" ]; then
|
||||
curl -fsSL --max-time $timeout $url -o $output
|
||||
else
|
||||
wget --timeout=$timeout -O $output $url
|
||||
fi
|
||||
if verify_download; then
|
||||
echo "[Websoft9] - Download official Docker install script succeeded from: $url using $tool"
|
||||
return 0
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
echo "[Websoft9] - Download official Docker install script failed from: $url using $tool after $retries attempts"
|
||||
return 1
|
||||
}
|
||||
|
||||
echo "Docker Installation failed after $max_retries retries."
|
||||
verify_download() {
|
||||
if [ -f "$output" ] && [ -s "$output" ]; then
|
||||
echo "[Websoft9] - Verification official Docker install script succeeded: $output"
|
||||
return 0
|
||||
else
|
||||
echo "[Websoft9] - Verification failed: $output is missing or empty"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
for url in "${urls[@]}"; do
|
||||
download_with_tool "curl" $url && break
|
||||
done
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
for url in "${urls[@]}"; do
|
||||
download_with_tool "wget" $url && break
|
||||
done
|
||||
fi
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "[Websoft9] - Download failed after $retries attempts, please check your network connection."
|
||||
exit 1
|
||||
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
Start_Docker(){
|
||||
# should have Docker server and Docker cli
|
||||
if docker_exist; then
|
||||
echo "$echo_prefix_docker Starting Docker"
|
||||
sudo systemctl enable docker
|
||||
sudo systemctl restart docker
|
||||
else
|
||||
echo "Docker not installed or start failed, exit..."
|
||||
exit 1
|
||||
fi
|
||||
# install docker by custom
|
||||
install_docker_custom() {
|
||||
if [ -n "$1" ]; then
|
||||
lsb_dist=$(echo "$1" | tr '[:upper:]' '[:lower:]')
|
||||
else
|
||||
if [ -r /etc/os-release ]; then
|
||||
lsb_dist="$(. /etc/os-release && echo "$ID" | tr '[:upper:]' '[:lower:]')"
|
||||
else
|
||||
echo "[Websoft9] - Unable to determine distribution. Exiting."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "[Websoft9] - Beginning custom Docker installation for: $lsb_dist"
|
||||
|
||||
local repos_base=("https://download.docker.com/linux" "https://mirrors.aliyun.com/docker-ce/linux" "https://mirror.azure.cn/docker-ce/linux")
|
||||
local repos
|
||||
|
||||
install_docker_from_repo() {
|
||||
local repo=$1
|
||||
if command_exists dnf5; then
|
||||
echo "[Websoft9] - Using dnf5 package manager for Docker installation from repo: $repo."
|
||||
sudo dnf -y -q install dnf-plugins-core
|
||||
sudo dnf5 config-manager addrepo --save-filename=docker-ce.repo --from-repofile=$repo
|
||||
sudo dnf makecache
|
||||
package_manager="dnf5"
|
||||
elif command_exists dnf; then
|
||||
echo "[Websoft9] - Using dnf package manager for Docker installation from repo: $repo."
|
||||
sudo dnf -y -q install dnf-plugins-core
|
||||
sudo dnf config-manager --add-repo $repo
|
||||
sudo dnf makecache
|
||||
package_manager="dnf"
|
||||
else
|
||||
echo "[Websoft9] - Using yum package manager for Docker installation from repo: $repo."
|
||||
sudo yum -y -q install yum-utils
|
||||
sudo yum-config-manager --add-repo $repo
|
||||
sudo yum makecache
|
||||
package_manager="yum"
|
||||
fi
|
||||
sudo $package_manager install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
|
||||
}
|
||||
|
||||
if command_exists dnf5 || command_exists dnf || command_exists yum; then
|
||||
if [ "$lsb_dist" = "amzn" ]; then
|
||||
sudo yum makecache
|
||||
sudo yum install -y docker
|
||||
sudo mkdir -p /usr/local/lib/docker/cli-plugins/
|
||||
sudo curl -SL "https://github.com/docker/compose/releases/latest/download/docker-compose-linux-$(uname -m)" -o /usr/local/lib/docker/cli-plugins/docker-compose
|
||||
sudo chmod +x /usr/local/lib/docker/cli-plugins/docker-compose
|
||||
else
|
||||
repos=("${repos_base[@]/%//${lsb_dist}/docker-ce.repo}")
|
||||
sudo dnf remove -y podman || sudo yum remove -y podman
|
||||
|
||||
for repo in "${repos[@]}"; do
|
||||
install_docker_from_repo $repo && break
|
||||
done
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "[Websoft9] - Installation failed with ${lsb_dist} repo, retrying with rhel and centos repos."
|
||||
for fallback_dist in "rhel" "centos"; do
|
||||
repos=("${repos_base[@]/%//${fallback_dist}/docker-ce.repo}")
|
||||
for repo in "${repos[@]}"; do
|
||||
install_docker_from_repo $repo && break 2
|
||||
done
|
||||
done
|
||||
fi
|
||||
fi
|
||||
elif command_exists apt; then
|
||||
repos=("${repos_base[@]/%//ubuntu}")
|
||||
for repo in "${repos[@]}"; do
|
||||
sudo apt-get update
|
||||
sudo apt-get install ca-certificates curl
|
||||
sudo install -m 0755 -d /etc/apt/keyrings
|
||||
sudo curl -fsSL $repo/gpg -o /etc/apt/keyrings/docker.asc
|
||||
sudo chmod a+r /etc/apt/keyrings/docker.asc
|
||||
|
||||
echo \
|
||||
"deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] $repo \
|
||||
$(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \
|
||||
sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
|
||||
|
||||
sudo apt-get update
|
||||
if sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin; then
|
||||
break
|
||||
fi
|
||||
done
|
||||
else
|
||||
echo "[Websoft9] - Unsupported system distribution: $1. Exiting."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if sudo systemctl start docker && sudo systemctl enable docker; then
|
||||
if command_exists docker && docker compose version >/dev/null 2>&1; then
|
||||
echo "[Websoft9] - Docker and Docker Compose installation verified successfully."
|
||||
return 0
|
||||
else
|
||||
echo "[Websoft9] - Docker or Docker Compose installation verification failed."
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo "[Websoft9] - Failed to start Docker."
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
echo -e "\n\n-------- Docker --------"
|
||||
Upgrade_Docker
|
||||
# Install docker by official script
|
||||
install_docker_official() {
|
||||
# define install command parameters
|
||||
install_params=("" "--mirror Aliyun" "--mirror AzureChinaCloud")
|
||||
install_timeout=300 # set timeout for each install attempt in seconds
|
||||
|
||||
for param in "${install_params[@]}"; do
|
||||
cmd="sh get-docker.sh $param"
|
||||
echo "[Websoft9] - Attempting to install Docker with command: $cmd"
|
||||
output=$(timeout $install_timeout $cmd 2>&1)
|
||||
echo "$output"
|
||||
if echo "$output" | grep -q "ERROR: Unsupported distribution"; then
|
||||
lsb_dist=$(echo "$output" | grep "ERROR: Unsupported distribution" | awk -F"'" '{print $2}')
|
||||
echo "[Websoft9] - Detected unsupported distribution: $lsb_dist. Executing custom operation."
|
||||
install_docker_custom "$lsb_dist"
|
||||
exit 1
|
||||
elif echo "$output" | grep -q "ERROR"; then
|
||||
echo "[Websoft9] - Docker installation failed with command: $cmd"
|
||||
install_docker_custom "$lsb_dist"
|
||||
exit 1
|
||||
elif command_exists docker && docker compose version >/dev/null 2>&1; then
|
||||
echo "[Websoft9] - Docker installation succeeded with command: $cmd"
|
||||
return 0
|
||||
elif echo "$output" | grep -q "timeout"; then
|
||||
echo "[Websoft9] - Docker installation attempt timed out with command: $cmd. Trying next mirror."
|
||||
fi
|
||||
done
|
||||
|
||||
echo "[Websoft9] - Docker installation failed after use official script. Attempting custom installation."
|
||||
install_docker_custom "$lsb_dist"
|
||||
exit 1
|
||||
}
|
||||
|
||||
if [ -z "$execute_mode" ] || [ "$execute_mode" = "install" ]; then
|
||||
Start_Docker
|
||||
fi
|
||||
command_exists() {
|
||||
command -v "$@" > /dev/null 2>&1
|
||||
}
|
||||
|
||||
|
||||
# download docker install script
|
||||
download_docker_script
|
||||
|
||||
# install docker
|
||||
install_docker_official
|
|
@ -4,24 +4,6 @@ PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin:~/bin
|
|||
# Export PATH
|
||||
export PATH
|
||||
|
||||
|
||||
# Command-line options
|
||||
# ==============================================================================
|
||||
#
|
||||
# --cockpit
|
||||
# Use the --cockpit option to remove cockpit:
|
||||
#
|
||||
# $ sudo sh install.sh --cockpit
|
||||
#
|
||||
# --files
|
||||
# Use the --files option remove files have installed:
|
||||
#
|
||||
# $ sudo sh install.sh --files
|
||||
#
|
||||
#
|
||||
# ==============================================================================
|
||||
|
||||
|
||||
install_path="/data/websoft9/source"
|
||||
systemd_path="/opt/websoft9/systemd"
|
||||
cockpit_plugin_path="/usr/share/cockpit"
|
||||
|
@ -31,18 +13,48 @@ echo -e "\n---Remove Websoft9 backend service containers---"
|
|||
sudo docker compose -p websoft9 down -v
|
||||
|
||||
echo -e "\n---Remove Websoft9 systemd service---"
|
||||
sudo systemctl disable websoft9
|
||||
sudo systemctl stop websoft9
|
||||
rm -rf /lib/systemd/system/websoft9.service
|
||||
|
||||
|
||||
if systemctl list-units --full --all | grep -Fq websoft9.service; then
|
||||
sudo systemctl disable websoft9
|
||||
sudo systemctl stop websoft9
|
||||
rm -rf /lib/systemd/system/websoft9.service
|
||||
else
|
||||
echo "websoft9.service does not exist."
|
||||
fi
|
||||
|
||||
remove_cockpit() {
|
||||
echo -e "\n---Remove Cockpit---"
|
||||
sudo systemctl stop cockpit.socket cockpit
|
||||
for package in $cockpit_packages; do
|
||||
sudo pkcon remove $package -y || true
|
||||
done
|
||||
sudo systemctl disable cockpit.socket cockpit
|
||||
|
||||
dnf --version >/dev/null 2>&1
|
||||
dnf_status=$?
|
||||
|
||||
yum --version >/dev/null 2>&1
|
||||
yum_status=$?
|
||||
|
||||
apt --version >/dev/null 2>&1
|
||||
apt_status=$?
|
||||
|
||||
if [ $dnf_status -eq 0 ]; then
|
||||
for pkg in $cockpit_packages; do
|
||||
echo "Uninstalling $pkg"
|
||||
sudo dnf remove -y "$pkg" > /dev/null || echo "$pkg failed to uninstall"
|
||||
done
|
||||
elif [ $yum_status -eq 0 ]; then
|
||||
for pkg in $cockpit_packages; do
|
||||
echo "Uninstalling $pkg"
|
||||
sudo yum remove -y "$pkg" > /dev/null || echo "$pkg failed to uninstall"
|
||||
done
|
||||
elif [ $apt_status -eq 0 ]; then
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
for pkg in $cockpit_packages; do
|
||||
echo "Uninstalling $pkg"
|
||||
sudo apt-get remove -y "$pkg" > /dev/null || echo "$pkg failed to uninstall"
|
||||
done
|
||||
else
|
||||
echo "Neither apt, dnf nor yum found. Please install one of them and try again."
|
||||
fi # 修正这里,使用 fi 而不是 end
|
||||
|
||||
sudo rm -rf /etc/cockpit/*
|
||||
}
|
||||
|
||||
|
@ -51,22 +63,7 @@ remove_files() {
|
|||
sudo rm -rf $install_path/* $systemd_path/* $cockpit_plugin_path/*
|
||||
}
|
||||
|
||||
for arg in "$@"
|
||||
do
|
||||
case $arg in
|
||||
--cockpit)
|
||||
remove_cockpit
|
||||
shift
|
||||
;;
|
||||
--files)
|
||||
remove_files
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
echo "Unknown argument: $arg"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
remove_cockpit
|
||||
remove_files
|
||||
|
||||
echo -e "\nCongratulations, Websoft9 uninstall is complete!"
|
||||
echo -e "\nCongratulations, Websoft9 uninstall is complete!"
|
||||
|
|
|
@ -4,20 +4,9 @@ PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin:~/bin
|
|||
cockpit_port="9000"
|
||||
container_name="websoft9-apphub"
|
||||
volume_name="websoft9_apphub_config"
|
||||
|
||||
check_ports() {
|
||||
|
||||
local ports=("$@")
|
||||
for port in "${ports[@]}"; do
|
||||
echo "Check port: $port"
|
||||
if ss -tuln | grep ":$port " >/dev/null && ! systemctl status cockpit.socket | grep "$port" >/dev/null; then
|
||||
echo "Port $port is in use, can not set this port to config.ini"
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
echo "All ports are available"
|
||||
return 1
|
||||
}
|
||||
cockpit_service_path="/lib/systemd/system/cockpit.socket"
|
||||
cockpit_ssl_path="/etc/cockpit/ws-certs.d/"
|
||||
npm_ssl_path="/var/lib/docker/volumes/websoft9_nginx_data/_data/custom_ssl/"
|
||||
|
||||
# get volume from container
|
||||
function get_volume_path() {
|
||||
|
@ -42,28 +31,23 @@ function get_volume_path() {
|
|||
echo "Cannot get volume path"
|
||||
exit 1
|
||||
}
|
||||
|
||||
volume_path=$(get_volume_path "$container_name" "$volume_name")
|
||||
config_path="$volume_path/config.ini"
|
||||
cockpit_service_path="/lib/systemd/system/cockpit.socket"
|
||||
FILES="$cockpit_service_path $config_path"
|
||||
|
||||
# 监控文件发生变动时需要做的事情
|
||||
on_change() {
|
||||
sync_cockpit_port() {
|
||||
echo "sync cockpit port from config.ini"
|
||||
set +e
|
||||
cockpit_port=$(docker exec -i websoft9-apphub apphub getconfig --section cockpit --key port)
|
||||
listen_stream=$(grep -Po 'ListenStream=\K[0-9]*' /lib/systemd/system/cockpit.socket)
|
||||
if [ "$cockpit_port" != "$listen_stream" ]; then
|
||||
check_ports "$cockpit_port"
|
||||
if [ $? -eq 0 ]; then
|
||||
sudo docker exec -i websoft9-apphub apphub setconfig --section cockpit --key port --value "$listen_stream"
|
||||
else
|
||||
ex -s -c "g/ListenStream=${listen_stream}/s//ListenStream=${cockpit_port}/" -c wq "$cockpit_service_path"
|
||||
systemctl daemon-reload
|
||||
systemctl restart cockpit.socket 2> /dev/null
|
||||
systemctl restart cockpit || exit 1
|
||||
set_Firewalld
|
||||
fi
|
||||
|
||||
ex -s -c "g/ListenStream=${listen_stream}/s//ListenStream=${cockpit_port}/" -c wq "$cockpit_service_path"
|
||||
systemctl daemon-reload
|
||||
systemctl restart cockpit.socket 2> /dev/null
|
||||
systemctl restart cockpit || exit 1
|
||||
set_Firewalld
|
||||
|
||||
fi
|
||||
set -e
|
||||
}
|
||||
|
@ -75,9 +59,32 @@ set_Firewalld(){
|
|||
firewall-cmd --reload 2>/dev/nul
|
||||
}
|
||||
|
||||
# monitor /lib/systemd/system/cockpit.socket and config.ini, make sure config.ini port is the same with cockpit.socket
|
||||
inotifywait -e modify -m $FILES | while read PATH EVENT FILE; do
|
||||
echo "Set cockpit port by config.ini..."
|
||||
export PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:$PATH
|
||||
on_change
|
||||
done
|
||||
force_sync(){
|
||||
echo "Force sync cockpit port and certs"
|
||||
sync_cockpit_port
|
||||
cp -r "${cockpit_ssl_path}"* $npm_ssl_path
|
||||
}
|
||||
|
||||
# when websoft9 restart, force sync cockpit port and certs
|
||||
force_sync
|
||||
|
||||
(
|
||||
# monitor cockpit.socket and config.ini, make sure port at config.ins sync to cockpit.socket
|
||||
inotifywait -e modify,attrib -m $FILES | while read PATH EVENT FILE; do
|
||||
echo "Reset cockpit port when config.ini changed"
|
||||
export PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:$PATH
|
||||
sync_cockpit_port
|
||||
done
|
||||
) &
|
||||
|
||||
(
|
||||
# monitor cockpit ssl path and sync to NPM ssl path if changed
|
||||
inotifywait -e create,modify,delete,attrib -m $cockpit_ssl_path | while read PATH EVENT FILE; do
|
||||
echo "Sync CA files from cockipt to NPM when changed"
|
||||
export PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:$PATH
|
||||
cp -r "${cockpit_ssl_path}"* $npm_ssl_path
|
||||
done
|
||||
) &
|
||||
|
||||
# Wait for background processes to finish
|
||||
wait
|
|
@ -54,9 +54,9 @@ for i in ${!containers[@]}; do
|
|||
((counter++))
|
||||
done
|
||||
if [[ $success == true ]]; then
|
||||
echo "Successfully retrieved credentials for $container"
|
||||
echo "Successfully get credentials from $container"
|
||||
else
|
||||
echo "Failed to retrieve credentials for $container after $max_retries attempts"
|
||||
echo "Failed to get credentials from $container after $max_retries attempts"
|
||||
fi
|
||||
done
|
||||
|
||||
|
@ -68,10 +68,10 @@ for ((i=0; i<$length; i++)); do
|
|||
container=${containers[$i]}
|
||||
section=${sections[$i]}
|
||||
if [[ -n ${passwords[$container]} ]]; then
|
||||
echo "$container start to set password"
|
||||
echo "Sync credentials of $container to websoft9-apphub"
|
||||
docker exec -i websoft9-apphub apphub setconfig --section $section --key user_name --value ${usernames[$container]}
|
||||
docker exec -i websoft9-apphub apphub setconfig --section $section --key user_pwd --value ${passwords[$container]}
|
||||
else
|
||||
echo "Password for $container is not set or empty. Skipping..."
|
||||
echo "Password of $container is not set or empty. Skipping..."
|
||||
fi
|
||||
done
|
||||
|
|
10
version.json
10
version.json
|
@ -1,22 +1,22 @@
|
|||
{
|
||||
"version": "2.1.15-rc1",
|
||||
"version": "2.1.20",
|
||||
"plugins": {
|
||||
"portainer": "0.1.3",
|
||||
"nginx": "0.1.0",
|
||||
"gitea": "0.0.8",
|
||||
"myapps": "0.2.7",
|
||||
"appstore": "0.2.6",
|
||||
"settings": "0.1.4",
|
||||
"settings": "0.1.5",
|
||||
"navigator": "0.5.10"
|
||||
},
|
||||
"OS": {
|
||||
"Fedora": [
|
||||
"38",
|
||||
"37",
|
||||
"35"
|
||||
"41",
|
||||
"40"
|
||||
],
|
||||
"RedHat": [
|
||||
"9",
|
||||
"8",
|
||||
"7"
|
||||
],
|
||||
"CentOS": [
|
||||
|
|
Loading…
Add table
Reference in a new issue