Compare commits

..

No commits in common. "main" and "2.1.14-rc1" have entirely different histories.

40 changed files with 555 additions and 1241 deletions

View file

@ -11,7 +11,7 @@ name: Build image to DockerHub
on:
push:
branches: [main, dev] # Include dev branch
branches: [main]
paths:
- "docker/*/Dockerfile"
@ -38,7 +38,8 @@ jobs:
needs: setup
runs-on: ubuntu-latest
env:
MEDIA_FROM: "source" # You can set it to choice where download from
# You can set it to choice where download from
MEDIA_FROM: "source"
strategy:
matrix: ${{fromJson(needs.setup.outputs.matrix)}}
steps:
@ -52,31 +53,23 @@ jobs:
APP=${{ matrix.app }}
TAG=$(grep 'LABEL version' "docker/$APP/Dockerfile" | cut -d'"' -f2 | xargs)
echo $APP version is $TAG
# Determine the channel based on the branch and TAG
if [[ $GITHUB_REF == *"refs/heads/dev"* ]]; then
if [[ "$TAG" == *"-"* ]]; then
TAGS="$TAG"
echo "CHANNEL=dev" >> $GITHUB_ENV
TAGS="$TAG" # Use the TAG directly for dev
else
if [[ "$TAG" == *"-"* ]]; then
echo "CHANNEL=rc" >> $GITHUB_ENV
TAGS="$TAG"
else
echo "CHANNEL=release" >> $GITHUB_ENV
IFS='.' read -ra PARTS <<< "$TAG"
TAGS="latest"
TAG_PART=""
for i in "${!PARTS[@]}"; do
if [ "$i" -eq 0 ]; then
TAG_PART="${PARTS[$i]}"
else
TAG_PART="${TAG_PART}.${PARTS[$i]}"
fi
TAGS="${TAGS},${TAG_PART}"
done
fi
echo "CHANNEL=release" >> $GITHUB_ENV
IFS='.' read -ra PARTS <<< "$TAG"
TAGS="latest"
TAG_PART=""
for i in "${!PARTS[@]}"; do
if [ "$i" -eq 0 ]; then
TAG_PART="${PARTS[$i]}"
else
TAG_PART="${TAG_PART}.${PARTS[$i]}"
fi
TAGS="${TAGS},${TAG_PART}"
done
fi
echo "Building and pushing Docker image for $APP with tags: $TAGS"
echo "TAGS=$TAGS" >> $GITHUB_ENV
echo "APP=$APP" >> $GITHUB_ENV
@ -166,4 +159,4 @@ jobs:
password: ${{ secrets.DOCKER_PASSWORD }}
repository: websoft9dev/${{env.APP}}
readme-filepath: ${{env.README}}
if: needs.setup.outputs.matrix != ''
if: needs.setup.outputs.matrix != ''

View file

@ -1,28 +0,0 @@
name: Build and push to CloudFlare Worker
on:
workflow_dispatch:
jobs:
build:
name: Spellcheck
runs-on: ubuntu-latest
steps:
# The checkout step
- name: Checkout
uses: actions/checkout@v4
- name: Download _worker.js
run: |
curl -o _worker.js https://raw.githubusercontent.com/cmliu/CF-Workers-docker.io/main/_worker.js
- name: Set compatibility date
id: set-date
run: echo "COMPATIBILITY_DATE=$(date +%Y-%m-%d)" >> $GITHUB_ENV
- name: Deploy to cloudflare
uses: cloudflare/wrangler-action@v3
with:
apiToken: ${{ secrets.EDIT_CLOUDFLARE_WORKERS }}
accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
command: deploy _worker.js --name dockerhub --compatibility-date ${{ env.COMPATIBILITY_DATE }}

View file

@ -5,7 +5,6 @@ on:
push:
branches:
- main
- dev
paths:
- "version.json"
@ -25,13 +24,9 @@ jobs:
version_core=${version%%-*}
echo "VERSION=$version" >> $GITHUB_OUTPUT
echo "VERSION_CORE=$version_core" >> $GITHUB_OUTPUT
if [[ $GITHUB_REF == *"refs/heads/dev"* ]]; then
echo "dev branch detected"
echo "CHANNEL=dev" >> $GITHUB_OUTPUT
elif [[ $version == *-* ]]; then
if [[ $version == *-* ]]; then
echo "rc release version"
echo "CHANNEL=rc" >> $GITHUB_OUTPUT
echo "CHANNEL=dev" >> $GITHUB_OUTPUT
else
echo "release version"
echo "CHANNEL=release" >> $GITHUB_OUTPUT
@ -76,7 +71,6 @@ jobs:
destination-dir: ./${{ steps.convert_version.outputs.CHANNEL }}/websoft9
- name: Create Github Release
if: github.ref == 'refs/heads/main' # 仅在 main 分支上触发
uses: softprops/action-gh-release@v1
with:
files: |
@ -90,7 +84,6 @@ jobs:
pages:
name: Build Github Pages
if: github.ref == 'refs/heads/main' # 仅在 main 分支上触发
permissions:
contents: read
pages: write

3
.gitignore vendored
View file

@ -7,5 +7,4 @@ logs
apphub/swagger-ui
apphub/apphub.egg-info
cli/__pycache__
source
node_modules
source

View file

@ -1,16 +1,7 @@
![image](https://github.com/user-attachments/assets/bb01fa37-1f53-4fc6-8992-9f784d02dd40)
[![License: GPL v3](https://img.shields.io/badge/License-GPL%20v3-blue.svg)](http://www.gnu.org/licenses/gpl-3.0)
[![GitHub last commit](https://img.shields.io/github/last-commit/websoft9/websoft9)](https://github.com/websoft9/websoft9)
[![GitHub Release Date](https://img.shields.io/github/release-date/websoft9/websoft9)](https://github.com/websoft9/websoft9)
[![GitHub Repo stars](https://img.shields.io/github/stars/websoft9/websoft9?style=social)](https://github.com/websoft9/websoft9)
**Certified Deployment on Major Cloud Platforms with business support**
| [![](https://libs.websoft9.com/Websoft9/logo/marketplace/azure-logo.png)](https://azuremarketplace.microsoft.com/en-us/marketplace/apps/websoft9inc.websoft9) | [![](https://libs.websoft9.com/Websoft9/logo/marketplace/aws-logo.png)](https://aws.amazon.com/marketplace/pp/prodview-5jziwpvx4puq4) | [![](https://libs.websoft9.com/Websoft9/logo/marketplace/alibabacloud-logo.png)](https://marketplace.alibabacloud.com/products/201072001/sgcmjj00034378.html) | [![](https://libs.websoft9.com/Websoft9/logo/marketplace/huaweicloud-logo.png)](https://marketplace.huaweicloud.com/intl/contents/bf4480ae-d0af-422c-b246-e2ec67743f4e) |
| ---- | ---- | ---- | ---- |
| [![](https://libs.websoft9.com/Websoft9/logo/marketplace/aliyun-logo.png)](https://market.aliyun.com/products/53690006/cmjj00048735.html?userCode=yetrmi9y) | [![](https://libs.websoft9.com/Websoft9/logo/marketplace/huaweiyun-logo.png)](https://marketplace.huaweicloud.com/contents/29458a42-64b7-4637-aa7c-8bfddea1fb72#productid=OFFI1005787756558913536) | | |
[![GitHub Repo stars](https://img.shields.io/github/stars/websoft9/websoft9?style=social)](https://github.com/websoft9/websoft9)
# What is Websoft9?
@ -62,6 +53,7 @@ Need root privileges user to install Websoft9, if you use no-root user you can `
# Install by default
wget -O install.sh https://websoft9.github.io/websoft9/install/install.sh && bash install.sh
# Install Websoft9 with parameters
wget -O install.sh https://websoft9.github.io/websoft9/install/install.sh && bash install.sh --port 9000 --channel release --path "/data/websoft9/source" --version "latest"
```

View file

@ -1,98 +0,0 @@
import sys
import os
import json
import subprocess
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../..')))
import click
from src.services.apikey_manager import APIKeyManager
from src.services.settings_manager import SettingsManager
from src.core.exception import CustomException
from src.core.config import ConfigManager
@click.group()
def cli():
pass
@cli.command()
def genkey():
"""Generate a new API key"""
try:
key = APIKeyManager().generate_key()
click.echo(f"{key}")
except CustomException as e:
raise click.ClickException(e.details)
except Exception as e:
raise click.ClickException(str(e))
@cli.command()
def getkey():
"""Get the API key"""
try:
key = APIKeyManager().get_key()
click.echo(f"{key}")
except CustomException as e:
raise click.ClickException(e.details)
except Exception as e:
raise click.ClickException(str(e))
@cli.command()
@click.option('--section',required=True, help='The section name')
@click.option('--key', required=True, help='The key name')
@click.option('--value', required=True,help='The value of the key')
def setconfig(section, key, value):
"""Set a config value"""
try:
SettingsManager().write_section(section, key, value)
except CustomException as e:
raise click.ClickException(e.details)
except Exception as e:
raise click.ClickException(str(e))
@cli.command()
@click.option('--section',required=True, help='The section name')
@click.option('--key', help='The key name')
def getconfig(section, key):
"""Get a config value"""
try:
if key is None:
value = SettingsManager().read_section(section)
value = json.dumps(value)
click.echo(f"{value}")
else:
value = SettingsManager().read_key(section, key)
click.echo(f"{value}")
except CustomException as e:
raise click.ClickException(e.details)
except Exception as e:
raise click.ClickException(str(e))
@cli.command()
@click.option('--appname',required=True, help='The App Name')
@click.option('--appid',required=True, help='The App Id')
@click.option('--github_email', help='The Github Email')
@click.option('--github_user', help='The Github User')
def push(appname, appid, github_email, github_user):
"""Push the app to the Github"""
# 从配置文件读取gitea的用户名和密码
try:
giteat_user = ConfigManager().get_value("gitea", "user_name")
giteat_pwd = ConfigManager().get_value("gitea", "user_pwd")
except CustomException as e:
raise click.ClickException(e.details)
except Exception as e:
raise click.ClickException(str(e))
# 拼接git仓库的url
repo_url = f"http://{giteat_user}:{giteat_pwd}@websoft9-git:3000/websoft9/{appid}.git"
# 执行git clone命令
try:
subprocess.run(["git", "clone", repo_url])
except Exception as e:
raise click.ClickException(str(e))
if __name__ == "__main__":
cli()

View file

@ -1,5 +1,3 @@
# setup command: pip install -e .
from setuptools import find_packages, setup
setup(

View file

@ -1,19 +1,13 @@
import sys
import os
import uuid
import json
import shutil
import requests
import subprocess
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../..')))
import click
from dotenv import dotenv_values, set_key,unset_key
from src.services.apikey_manager import APIKeyManager
from src.services.settings_manager import SettingsManager
from src.core.exception import CustomException
from src.core.config import ConfigManager
@click.group()
def cli():
@ -71,114 +65,6 @@ def getconfig(section, key):
raise click.ClickException(e.details)
except Exception as e:
raise click.ClickException(str(e))
@cli.command()
@click.option('--appid',required=True, help='The App Id')
@click.option('--github_token', required=True, help='The Github Token')
def commit(appid, github_token):
"""Commit the app to the Github"""
try:
# 从配置文件读取gitea的用户名和密码
gitea_user = ConfigManager().get_value("gitea", "user_name")
gitea_pwd = ConfigManager().get_value("gitea", "user_pwd")
# 将/tmp目录作为工作目录如果不存在则创建如果存在则清空
work_dir = "/tmp/git"
if os.path.exists(work_dir):
shutil.rmtree(work_dir)
os.makedirs(work_dir)
os.chdir(work_dir)
# 执行git clone命令将gitea仓库克隆到本地
gitea_repo_url = f"http://{gitea_user}:{gitea_pwd}@websoft9-git:3000/websoft9/{appid}.git"
subprocess.run(["git", "clone", gitea_repo_url], check=True)
# 执行git clone命令将github仓库克隆到本地(dev分支)
github_repo_url = f"https://github.com/Websoft9/docker-library.git"
subprocess.run(["git", "clone", "--branch", "dev", github_repo_url], check=True)
# 解析gitea_repo_url下载的目录下的.env文件
gitea_env_path = os.path.join(work_dir, appid, '.env')
gitea_env_vars = dotenv_values(gitea_env_path)
w9_app_name = gitea_env_vars.get('W9_APP_NAME')
if not w9_app_name:
raise click.ClickException("W9_APP_NAME not found in Gitea .env file")
# 解析github_repo_url下载的目录下的/apps/W9_APP_NAME目录下的.env文件
github_env_path = os.path.join(work_dir, 'docker-library', 'apps', w9_app_name, '.env')
github_env_vars = dotenv_values(github_env_path)
# 需要复制的变量
env_vars_to_copy = ['W9_URL', 'W9_ID']
port_set_vars = {key: value for key, value in github_env_vars.items() if key.endswith('PORT_SET')}
# 将这些值去替换gitea_repo_url目录下.env中对应项的值
for key in env_vars_to_copy:
if key in github_env_vars:
set_key(gitea_env_path, key, github_env_vars[key])
for key, value in port_set_vars.items():
set_key(gitea_env_path, key, value)
# 删除W9_APP_NAME
unset_key(gitea_env_path, 'W9_APP_NAME')
# 将整个gitea目录覆盖到docker-library/apps/w9_app_name目录
gitea_repo_dir = os.path.join(work_dir, appid)
github_app_dir = os.path.join(work_dir, 'docker-library', 'apps', w9_app_name)
if os.path.exists(github_app_dir):
shutil.rmtree(github_app_dir)
shutil.copytree(gitea_repo_dir, github_app_dir)
# 切换到docker-library目录
os.chdir(os.path.join(work_dir, 'docker-library'))
# 创建一个新的分支
new_branch_name = f"update-{w9_app_name}-{uuid.uuid4().hex[:8]}"
subprocess.run(["git", "checkout", "-b", new_branch_name], check=True)
# 将修改提交到新的分支
subprocess.run(["git", "add", "."], check=True)
subprocess.run(["git", "commit", "-m", f"Update {w9_app_name}"], check=True)
# 推送新的分支到 GitHub
# subprocess.run(["git", "push", "origin", new_branch_name], check=True)
# 推送新的分支到 GitHub
github_push_url = f"https://{github_token}:x-oauth-basic@github.com/websoft9/docker-library.git"
subprocess.run(["git", "push", github_push_url, new_branch_name], check=True)
# 创建 Pull Request 使用 GitHub API
pr_data = {
"title": f"Update {w9_app_name}",
"head": new_branch_name,
"base": "dev",
"body": "Automated update"
}
response = requests.post(
f"https://api.github.com/repos/websoft9/docker-library/pulls",
headers={
"Authorization": f"token {github_token}",
"Accept": "application/vnd.github.v3+json"
},
data=json.dumps(pr_data)
)
if response.status_code != 201:
raise click.ClickException(f"Failed to create Pull Request: {response.json()}")
click.echo(f"Pull Request created: {response.json().get('html_url')}")
except subprocess.CalledProcessError as e:
raise click.ClickException(f"Command failed: {e}")
except Exception as e:
raise click.ClickException(str(e))
finally:
# 删除工作目录
if os.path.exists(work_dir):
shutil.rmtree(work_dir)
if __name__ == "__main__":
cli()

View file

@ -1,26 +1,26 @@
[nginx_proxy_manager]
base_url = http://websoft9-proxy:81/api
user_name = admin@mydomain.com
user_pwd = LMFuCnajkQhK3zeb
user_pwd = cPCB9bSsX91ljxCQ
nike_name = admin
listen_port = 443
listen_port = 80
[gitea]
base_url = http://websoft9-git:3000/api/v1
user_name = websoft9
user_email = admin@mydomain.com
user_pwd = 93HDu6tUWeGx
user_pwd = KhuXTC7idguL
[portainer]
base_url = http://websoft9-deployment:9000/api
user_name = admin
user_pwd = 93sX)LLHKJY$
user_pwd = yzJZ938iOoDG
[api_key]
key = cc9223b3055471a6f4f9654e08371816a9637ba1c57383617b0684b92ac7b2f4
[domain]
wildcard_domain =
wildcard_domain =
[cockpit]
port = 9000

View file

@ -1,4 +1,3 @@
from datetime import datetime
import json
import threading
@ -216,10 +215,6 @@ class PortainerAPI:
"repositoryAuthentication": True,
"RepositoryUsername": usr_name,
"RepositoryPassword": usr_password,
"env":[{
"name": "DEPLOY_TIME",
"value": "-"+datetime.now().strftime("%Y%m%d%H%M%S")
}]
},
)
@ -377,10 +372,7 @@ class PortainerAPI:
path=f"stacks/{stackID}/git/redeploy",
params={"endpointId": endpointId},
json={
"env":[{
"name": "DEPLOY_TIME",
"value": "-"+datetime.now().strftime("%Y%m%d%H%M%S")
}],
"env":[],
"prune":False,
"RepositoryReferenceName":"",
"RepositoryAuthentication":True,

View file

@ -429,28 +429,24 @@ class AppManger:
# Verify the app is web app
is_web_app = envHelper.get_value("W9_URL")
# url_with_port = envHelper.get_value("W9_URL_WITH_PORT")
w9_url_with_replace = envHelper.get_value("W9_URL_REPLACE")
if is_web_app is not None:
if w9_url_with_replace is None:
url_with_port = envHelper.get_value("W9_URL_WITH_PORT")
if is_web_app is not None and url_with_port is not None:
try:
ipaddress.ip_address(domain_names[0])
envHelper.set_value("W9_URL", domain_names[0] + ":" + envHelper.get_value("W9_HTTP_PORT_SET"))
except ValueError:
envHelper.set_value("W9_URL", domain_names[0])
else:
try:
ipaddress.ip_address(domain_names[0])
#envHelper.set_value("W9_URL", domain_names[0] + ":" + envHelper.get_value("W9_HTTP_PORT_SET"))
envHelper.set_value("W9_URL", domain_names[0] + ":" + (envHelper.get_value("W9_HTTP_PORT_SET") or envHelper.get_value("W9_HTTPS_PORT_SET")))
except ValueError:
envHelper.set_value("W9_URL", domain_names[0])
# if is_web_app is not None and url_with_port is not None:
# try:
# ipaddress.ip_address(domain_names[0])
# envHelper.set_value("W9_URL", domain_names[0] + ":" + envHelper.get_value("W9_HTTP_PORT_SET"))
# except ValueError:
# envHelper.set_value("W9_URL", domain_names[0])
# elif url_with_port is None:
# envHelper.set_value("W9_URL", domain_names[0])
elif url_with_port is None:
envHelper.set_value("W9_URL", domain_names[0])
# validate is bind ip(proxy_enabled is false)
# if not proxy_enabled:
# envHelper.set_value("W9_URL", domain_names[0])
# else:
# replace_domain_name = domain_names[0]
# replace_domain_name = replace_domain_name.replace(replace_domain_name.split(".")[0], app_id, 1)
# domain_names[0] = replace_domain_name
# envHelper.set_value("W9_URL", domain_names[0])
# Commit and push to remote repo
self._init_local_repo_and_push_to_remote(app_tmp_dir_path,repo_url)
@ -601,24 +597,6 @@ class AppManger:
portainerManager.redeploy_stack(stack_id,endpointId,pull_image,user_name,user_pwd)
logger.access(f"Redeployed app: [{app_id}]")
app_info = self.get_app_by_id(app_id,endpointId)
forward_ports = [domain.get("forward_port") for domain in app_info.domain_names]
proxy_ids = [domain.get("id") for domain in app_info.domain_names]
if forward_ports:
http_port = app_info.env.get("W9_HTTP_PORT")
https_port = app_info.env.get("W9_HTTPS_PORT")
forward_port = http_port if http_port else https_port
forward_ports_str = [str(port) for port in forward_ports]
if not all(port == forward_port for port in forward_ports_str):
for proxy_id in proxy_ids:
ProxyManager().update_proxy_port_by_app(proxy_id, forward_port)
logger.access(f"Updated proxy port: {forward_port} for app: {app_id}")
def uninstall_app(self,app_id:str,purge_data:bool,endpointId:int = None):
"""
Uninstall app
@ -1102,7 +1080,7 @@ class AppManger:
# Get the w9_url and w9_url_replace
w9_url_replace = next((element.get("w9_url_replace") for element in app_info.domain_names if element.get("id") == proxy_id), None)
w9_url = next((element.get("w9_url") for element in app_info.domain_names if element.get("id") == proxy_id), None)
# validate w9_url_replace is true
if w9_url_replace:
domain_names = host.get("domain_names",None)
@ -1113,8 +1091,7 @@ class AppManger:
if w9_url in domain_names:
new_w9_url = None
if len(app_proxys) == 1 and app_proxys[0].get("id") == proxy_id:
# 如果w9_url_with_port存在并且值为: true
new_w9_url = client_host+":"+ (app_info.env.get("W9_HTTP_PORT_SET") or app_info.env.get("W9_HTTPS_PORT_SET"))
new_w9_url = client_host
elif len(app_proxys) > 1:
# Get the first proxy_host
proxy_host = next((proxy for proxy in app_proxys if proxy.get("id") != proxy_id), None)
@ -1249,4 +1226,4 @@ class AppManger:
raise e
except Exception as e:
logger.error(f"Update the git repo env file error:{e}")
raise CustomException()
raise CustomException()

View file

@ -205,44 +205,6 @@ class ProxyManager:
logger.error(f"Update proxy host:{proxy_id} error:{e}")
raise CustomException()
def update_proxy_port_by_app(self, proxy_id: int, forward_port: int):
"""
Update a proxy host's forward port
Args:
proxy_id (int): Proxy id
forward_port (int): Forward port
Returns:
dict: Proxy host
"""
# Get proxy host by id
req_json = self.get_proxy_host_by_id(proxy_id)
try:
if req_json is None:
raise CustomException(
status_code=400,
message=f"Invalid Request",
details=f"Proxy host:{proxy_id} not found"
)
# update forward_port
req_json["forward_port"] = forward_port
# delete useless keys from req_json(because the req_json is from get_proxy_host_by_id and update_proxy_host need less keys)
keys_to_delete = ["id", "created_on", "modified_on", "owner_user_id", "enabled", "certificate", "owner", "access_list", "use_default_location", "ipv6"]
for key in keys_to_delete:
req_json.pop(key, None)
response = self.nginx.update_proxy_host(proxy_id=proxy_id, json=req_json)
if response.status_code == 200:
return response.json()
else:
self._handler_nginx_error(response)
except CustomException as e:
raise e
except Exception as e:
logger.error(f"Update proxy host:{proxy_id} error:{e}")
raise CustomException()
def get_proxy_host_by_app(self,app_id:str):
"""
Get proxy host by app
@ -333,4 +295,4 @@ class ProxyManager:
except Exception as e:
logger.error(f"Get proxy host by id:{proxy_id} error:{e}")
raise CustomException()

View file

@ -1,8 +1,7 @@
### enhancement:
- Upgrade access to the Websoft9 platform from HTTP to HTTPS #566
- cockpit upgradeport change to 9090 when linux upgrage #556
- upgrade error at CentOS Stream #554
- Optimize install_docker.sh #547
- Nginxproxymanage add named volume nginx_custom #527
- Nginx how to dynamically apply proxy #516
- websoft9-apphub service logs unfriendly #518
### appstore:
- [cannot pull image for jetty](https://github.com/Websoft9/docker-library/issues/712)
- [tomee pull tomee9 image error](https://github.com/Websoft9/docker-library/issues/702)
- [some errors of flowise on AWS ubuntu](https://github.com/Websoft9/docker-library/issues/693)

View file

@ -1,5 +1,5 @@
# docs: https://cockpit-project.org/guide/latest/cockpit.conf.5.html
[WebService]
#AllowUnencrypted = true
AllowUnencrypted = true
LoginTitle= Websoft9 - Linux AppStore

View file

@ -1,4 +1,4 @@
APPHUB_VERSION=0.1.9
APPHUB_VERSION=0.1.4
DEPLOYMENT_VERSION=2.20.3
GIT_VERSION=1.21.9
PROXY_VERSION=2.11.3
PROXY_VERSION=2.11.3

View file

@ -1,11 +1,11 @@
# This file can running at actions
# MEDIA_VERSION and LIBRARY_VERSION will trigger its release
# modify time: 202501021450, you can modify here to trigger Docker Build action
# modify time: 202410151230, you can modify here to trigger Docker Build action
FROM python:3.10-slim-bullseye
LABEL maintainer="Websoft9<help@websoft9.com>"
LABEL version="0.1.9"
LABEL version="0.1.4"
WORKDIR /websoft9
@ -39,7 +39,6 @@ RUN apt update && apt install -y --no-install-recommends curl git jq cron iprout
cp -r ./w9source/apphub/src/config ./config && \
cp -r ./w9source/docker/apphub/script ./script && \
curl -o ./script/update_zip.sh $SOURCE_GITHUB_PAGES/scripts/update_zip.sh && \
curl -o /websoft9/version.json $SOURCE_GITHUB_PAGES/version.json && \
pip install --no-cache-dir --upgrade -r apphub/requirements.txt && \
pip install -e ./apphub && \
# Clean cache and install files
@ -47,9 +46,6 @@ RUN apt update && apt install -y --no-install-recommends curl git jq cron iprout
apt clean && \
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* /usr/share/man /usr/share/doc /usr/share/doc-base
# Create a file named migration_flag
RUN touch /websoft9/migration_flag
# supervisor
COPY config/supervisord.conf /etc/supervisor/conf.d/supervisord.conf
COPY config/logging_config.yaml /etc/supervisor/conf.d/logging_config.yaml

View file

@ -6,11 +6,7 @@ export PATH
set -e
# execute migration script when container create
if [ -f /websoft9/migration_flag ]; then
bash /websoft9/script/migration.sh
rm -f /websoft9/migration_flag
fi
bash /websoft9/script/migration.sh
try_times=5
supervisord

View file

@ -3,9 +3,10 @@
echo "$(date '+%Y-%m-%d %H:%M:%S') - INFO - start to migrate config.ini"
migrate_ini() {
# Combine source_ini to target ini
# Define file paths, use template ini and syn exsit items from target ini
export target_ini="$1"
export source_ini="$2"
export template_ini="$2"
python3 - <<EOF
import configparser
@ -13,44 +14,36 @@ import os
import sys
target_ini = os.environ['target_ini']
source_ini = os.environ['source_ini']
template_ini = os.environ['template_ini']
# Create two config parsers
target_parser = configparser.ConfigParser()
source_parser = configparser.ConfigParser()
template_parser = configparser.ConfigParser()
try:
target_parser.read(target_ini)
source_parser.read(source_ini)
template_parser.read(template_ini)
except configparser.MissingSectionHeaderError:
print("Error: The provided files are not valid INI files.")
sys.exit(1)
# use target_parser to override source_parser
# use target_parser to override template_parser
for section in target_parser.sections():
if source_parser.has_section(section):
if template_parser.has_section(section):
for key, value in target_parser.items(section):
if source_parser.has_option(section, key):
source_parser.set(section, key, value)
if template_parser.has_option(section, key):
template_parser.set(section, key, value)
with open(target_ini, 'w') as f:
source_parser.write(f)
template_parser.write(f)
EOF
}
# Special migration
post_migration(){
echo "$(date '+%Y-%m-%d %H:%M:%S') - INFO - Set listen_port to nginx_proxy_manager"
config_file="/websoft9/config/config.ini"
listen_port=$(grep -Po '^\s*listen_port\s*=\s*\K[0-9]+' "$config_file")
apphub setconfig --section nginx_proxy_manager --key listen_port --value "$listen_port"
}
migrate_ini "/websoft9/apphub/src/config/config.ini" "/websoft9/config/config.ini"
migrate_ini "/websoft9/apphub/src/config/system.ini" "/websoft9/config/system.ini"
post_migration
if [ $? -eq 0 ]; then
echo "$(date '+%Y-%m-%d %H:%M:%S') - INFO - Success to update config.ini"

View file

@ -1,20 +1,9 @@
#!/bin/bash
channel=release
if [ -f /websoft9/version.json ]; then
version=$(cat /websoft9/version.json | jq -r .version)
if [[ $version == *rc* ]]; then
channel=dev
fi
fi
echo "channel is $channel"
echo "$(date '+%Y-%m-%d %H:%M:%S') - INFO - Compare remote version and local version." | tee -a /var/log/supervisord.log
echo "$(date '+%Y-%m-%d %H:%M:%S') - INFO - Download remote packages and replace local data." | tee -a /var/log/supervisord.log
bash /websoft9/script/update_zip.sh --channel $channel --package_name "media-latest.zip" --sync_to "/websoft9/media"
bash /websoft9/script/update_zip.sh --channel $channel --package_name "library-latest.zip" --sync_to "/websoft9/library"
echo "$(date '+%Y-%m-%d %H:%M:%S') - INFO - Download remote packages and replace local data." | tee -a /var/log/supervisord.log
bash /websoft9/script/update_zip.sh --package_name "media-latest.zip" --sync_to "/websoft9/media"
bash /websoft9/script/update_zip.sh --package_name "library-latest.zip" --sync_to "/websoft9/library"
echo "$(date '+%Y-%m-%d %H:%M:%S') - INFO - Success to update library and media."

View file

@ -1,61 +0,0 @@
addEventListener('fetch', event => {
event.respondWith(handleRequest(event.request))
})
var backends = [];
async function checkHealth() {
const healthChecks = backends.map(async url => {
const start = Date.now();
try {
const response = await fetch(url, { method: 'HEAD' });
const end = Date.now();
return {
url,
healthy: response.ok,
responseTime: response.ok ? end - start : Infinity
};
} catch (error) {
return {
url,
healthy: false,
responseTime: Infinity
};
}
});
return await Promise.all(healthChecks);
}
async function handleRequest(request) {
// 检查健康状态
const healthResults = await checkHealth();
// 过滤出健康的后端服务器
const healthyBackends = healthResults.filter(result => result.healthy);
if (healthyBackends.length === 0) {
return new Response('All backend servers are down', { status: 503 });
}
// 按响应时间排序健康的后端服务器
healthyBackends.sort((a, b) => a.responseTime - b.responseTime);
// 尝试按顺序转发请求到健康的后端服务器
for (const backend of healthyBackends) {
try {
const url = new URL(request.url);
url.hostname = new URL(backend.url).hostname;
const modifiedRequest = new Request(url, request);
const response = await fetch(modifiedRequest);
if (response.ok) {
return response;
}
} catch (error) {
console.error(`Failed to fetch from ${backend.url}: ${error}`);
}
}
// 如果所有后端服务器都失败
return new Response('Failed to fetch from all backends', { status: 502 });
}

View file

@ -3,6 +3,5 @@
"log-opts": {
"max-size": "10m",
"max-file": "5"
},
"registry-mirrors": ["https://dockerhub.websoft9.com"]
}
}

View file

@ -1,4 +1,4 @@
# modify time: 202412111429, you can modify here to trigger Docker Build action
# modify time: 202407291102, you can modify here to trigger Docker Build action
# step1: Build entrypoint execute program init_portainer by golang
FROM golang:latest AS builder
@ -16,4 +16,4 @@ LABEL maintainer="websoft9<help@websoft9.com>"
LABEL version="2.20.3"
COPY --from=builder /init_portainer /
ENTRYPOINT ["/init_portainer"]
ENTRYPOINT ["/init_portainer"]

View file

@ -22,9 +22,7 @@ const (
retryDelay = 5 * time.Second
charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789@$()_"
credentialFilePath = "/data/credential"
initCheckURL = portainerURL + "/users/admin/check"
waitTimeout = 60 * time.Second
waitInterval = 2 * time.Second
initFlagFilePath = "/data/init.flag"
)
type Credentials struct {
@ -33,22 +31,26 @@ type Credentials struct {
}
func main() {
// 启动并等待 Portainer 启动
cmd, err := startAndWaitForPortainer(os.Args[1:]...)
if err != nil {
log.Fatalf("Failed to start and wait for Portainer: %v", err)
}
// 检查是否已经初始化
if isPortainerInitialized() {
log.Println("Portainer is already initialized.")
// 等待 Portainer 进程结束
if err := cmd.Wait(); err != nil {
log.Fatalf("Portainer process exited with error: %v", err)
}
// 检查初始化标志文件是否存在
if _, err := os.Stat(initFlagFilePath); err == nil {
log.Println("Initialization has already been completed by another instance.")
startPortainer()
return
}
// 启动 Portainer
// cmd := exec.Command("/portainer")
cmd := exec.Command("/portainer", os.Args[1:]...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Start(); err != nil {
log.Fatalf("Failed to start Portainer: %v", err)
}
// 等待 Portainer 启动
waitForPortainer()
// 初始化 Portainer
adminUsername := "admin"
adminPassword := generateRandomPassword(12)
@ -63,6 +65,10 @@ func main() {
log.Fatalf("Failed to initialize local endpoint: %v", err)
} else {
fmt.Println("Portainer initialization completed successfully.")
// 创建初始化标志文件
if err := ioutil.WriteFile(initFlagFilePath, []byte("initialized"), 0644); err != nil {
log.Fatalf("Failed to create initialization flag file: %v", err)
}
}
}
}
@ -73,31 +79,39 @@ func main() {
}
}
func startAndWaitForPortainer(args ...string) (*exec.Cmd, error) {
cmd := exec.Command("/portainer", args...)
func startPortainer() {
cmd := exec.Command("/portainer")
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Start(); err != nil {
return nil, fmt.Errorf("failed to start Portainer: %w", err)
log.Fatalf("Failed to start Portainer: %v", err)
}
timeout := time.After(waitTimeout)
ticker := time.NewTicker(waitInterval)
defer ticker.Stop()
// 等待 Portainer 进程结束
if err := cmd.Wait(); err != nil {
log.Fatalf("Portainer process exited with error: %v", err)
}
}
func waitForPortainer() {
timeout := time.Duration(60) * time.Second
start := time.Now()
for {
select {
case <-timeout:
return nil, fmt.Errorf("timeout waiting for Portainer")
case <-ticker.C:
resp, err := http.Get(portainerURL + "/system/status")
if err == nil && resp.StatusCode == http.StatusOK {
fmt.Println("Portainer is up……!")
return cmd, nil
}
fmt.Println("Waiting for Portainer...")
resp, err := http.Get(portainerURL + "/system/status")
if err == nil && resp.StatusCode == http.StatusOK {
fmt.Println("Portainer is up!")
break
}
if time.Since(start) > timeout {
fmt.Println("Timeout waiting for Portainer")
os.Exit(1)
}
fmt.Println("Waiting for Portainer...")
time.Sleep(2 * time.Second)
}
}
@ -210,7 +224,15 @@ func writeCredentialsToFile(password string) error {
func retryRequest(method, url, contentType string, body *bytes.Buffer) (*http.Response, error) {
client := &http.Client{}
for i := 0; i < maxRetries; i++ {
req, err := http.NewRequest(method, url, body)
var req *http.Request
var err error
if body != nil {
req, err = http.NewRequest(method, url, bytes.NewBuffer(body.Bytes()))
} else {
req, err = http.NewRequest(method, url, nil)
}
if err != nil {
return nil, fmt.Errorf("error creating request: %w", err)
}
@ -225,23 +247,4 @@ func retryRequest(method, url, contentType string, body *bytes.Buffer) (*http.Re
time.Sleep(retryDelay)
}
return nil, fmt.Errorf("max retries reached")
}
func isPortainerInitialized() bool {
resp, err := http.Get(initCheckURL)
if err != nil {
log.Fatalf("Failed to check Portainer initialization status: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusNoContent {
return true
}
if resp.StatusCode == http.StatusNotFound {
return false
}
log.Fatalf("Unexpected response status: %d", resp.StatusCode)
return false
}
}

View file

@ -1,4 +1,4 @@
# modify time: 202412211783, you can modify here to trigger Docker Build action
# modify time: 202407251422, you can modify here to trigger Docker Build action
# from Dockerfile: https://github.com/NginxProxyManager/nginx-proxy-manager/blob/develop/docker/Dockerfile
# from image: https://hub.docker.com/r/jc21/nginx-proxy-manager
@ -7,12 +7,7 @@ FROM jc21/nginx-proxy-manager:2.11.3
LABEL maintainer="Websoft9<help@websoft9.com>"
LABEL version="2.11.3"
COPY README.md /data/nginx/README.md
RUN mkdir /data/nginx/custom
RUN mkdir -p /etc/websoft9
COPY ./config/http.conf /data/nginx/custom/http.conf
COPY ./config/landing/ /etc/websoft9/landing
COPY ./config/initproxy.conf /etc/websoft9/initproxy.conf
COPY ./config/initproxy.conf /data/nginx/default_host/initproxy.conf
COPY ./init_nginx.sh /app/init_nginx.sh
RUN chmod +x /app/init_nginx.sh
@ -35,4 +30,4 @@ RUN proxy_line=("proxy_set_header Upgrade \$http_upgrade;" "proxy_set_header Con
fi; \
done
ENTRYPOINT [ "/app/init_nginx.sh" ]
ENTRYPOINT [ "/app/init_nginx.sh" ]

View file

@ -4,4 +4,3 @@ From official Nginx Proxy Manager image, and:
- Copy the initproxy.conf file to the nginx directory to initialize the custom configuration
- Initialize username and password through environment variables
- Add landing page designed by [figma](https://www.figma.com/)

View file

@ -1,2 +0,0 @@
limit_req_zone $binary_remote_addr zone=w9_limit_req_zone:10m rate=30r/s;
limit_conn_zone $binary_remote_addr zone=w9_limit_conn_zone:10m;

View file

@ -1,20 +1,14 @@
server {
listen 80 default_server;
listen [::]:80 default_server;
server_name ~\.?[0-9a-zA-Z]$;
return 301 https://$host$request_uri;
}
# ------------------------------------------------------------
# domain.com
# ------------------------------------------------------------
server {
listen 443 ssl default_server;
listen [::]:443 ssl default_server;
listen 80;
listen [::]:80;
server_name ~\.?[0-9a-zA-Z]$;
ssl_certificate /data/custom_ssl/0-self-signed.cert;
ssl_certificate_key /data/custom_ssl/0-self-signed.key;
access_log /data/logs/proxy-host-1_access.log proxy;
error_log /data/logs/proxy-host-1_error.log warn;
@ -23,8 +17,6 @@ server {
}
location / {
root /data/nginx/default_www/landing;
index index.html
# Proxy!
include conf.d/include/proxy.conf;
}

File diff suppressed because one or more lines are too long

View file

@ -1,76 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<!-- Generator: Adobe Illustrator 16.0.0, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<svg version="1.1" id="横版单英" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px"
y="0px" width="502px" height="128px" viewBox="0 0 502 128" enable-background="new 0 0 502 128" xml:space="preserve">
<g>
<g>
<path fill="#333333" d="M243.805,63.855c-0.189-0.188-0.394-0.409-0.615-0.663c-0.223-0.251-0.489-0.536-0.805-0.853
c-0.632-0.63-1.482-1.197-2.558-1.703c-1.073-0.506-2.62-0.758-4.64-0.758c-4.421,0-7.782,1.547-10.086,4.641
c-2.306,3.093-3.457,6.503-3.457,10.228c0,0.063,0,0.111,0,0.142c0,0.033,0,0.081,0,0.143c0.062,3.283,1.231,6.282,3.504,8.997
c2.273,2.715,5.618,4.072,10.039,4.072c4.607,0,7.718-1.09,9.328-3.268c1.609-2.178,2.92-5.225,3.932-9.139
c0-0.063,0.013-0.109,0.046-0.143c0.031-0.031,0.048-0.078,0.048-0.143l7.48,0.568v1.8c0,0.947-0.111,1.927-0.33,2.936
c-0.223,1.011-0.554,2.021-0.994,3.03c-1.326,2.906-3.616,5.558-6.867,7.956c-3.251,2.399-7.465,3.599-12.643,3.599
c-6.125,0-11.238-1.925-15.342-5.776c-4.105-3.851-6.156-8.775-6.156-14.774c0-6.503,1.736-11.49,5.209-14.963
c3.472-3.472,7.26-5.684,11.364-6.631c0.883-0.189,1.72-0.33,2.51-0.426c0.789-0.095,1.594-0.142,2.415-0.142
c4.672,0,8.681,1.295,12.027,3.883c3.347,2.589,6.062,5.745,8.145,9.471l-28.411,15.343l-3.314-5.968L243.805,63.855z"/>
<path fill="#333333" d="M270.892,34.971v38.355c0,4.356,1.12,7.908,3.362,10.654c2.239,2.746,5.412,4.12,9.518,4.12
c3.661,0,6.55-1.057,8.666-3.173c2.113-2.115,3.486-4.436,4.118-6.961c0.189-0.693,0.333-1.356,0.427-1.989
c0.096-0.63,0.143-1.262,0.143-1.894c0-4.546-1.137-7.939-3.408-10.18c-2.274-2.241-4.771-3.553-7.483-3.931
c-0.317,0-0.615-0.016-0.899-0.049c-0.283-0.031-0.584-0.046-0.9-0.046c-1.768,0-3.3,0.235-4.593,0.71
c-1.295,0.474-2.542,0.995-3.741,1.563c-0.063,0-0.126,0.016-0.189,0.048c-0.063,0.031-0.126,0.046-0.188,0.046
c-0.379,0.189-0.743,0.365-1.09,0.521c-0.349,0.159-0.71,0.331-1.09,0.521v-7.481c1.199-0.693,2.432-1.263,3.695-1.705
c1.261-0.439,2.524-0.788,3.788-1.04c0.693-0.063,1.356-0.126,1.987-0.19c0.632-0.062,1.264-0.095,1.894-0.095
c5.241,0,9.944,1.784,14.112,5.351c4.168,3.568,6.251,8.382,6.251,14.442c0,5.746-1.832,10.971-5.493,15.675
c-3.662,4.703-8.996,7.055-16.005,7.055c-7.071,0-12.313-2.257-15.721-6.771c-3.41-4.513-5.115-9.833-5.115-15.958V34.971H270.892
z"/>
<path fill="#333333" d="M330.696,86.947c0.097,0,0.175,0,0.237,0c1.264-0.062,2.415-0.314,3.458-0.759
c1.042-0.443,1.562-1.36,1.562-2.754c0-1.644-0.868-2.909-2.604-3.796c-1.736-0.886-3.993-1.71-6.771-2.468
c-0.633-0.19-1.295-0.381-1.99-0.57c-0.694-0.188-1.419-0.379-2.177-0.57c-3.664-1.012-6.568-2.467-8.713-4.365
c-2.147-1.897-3.221-4.494-3.221-7.783c0-4.683,1.765-7.243,4.736-8.876c3.366-1.853,6.913-1.853,11.269-1.853h15.438v6.631
h-16.101c-2.021,0.063-3.757,0.316-5.208,0.756c-1.453,0.443-2.18,1.358-2.18,2.747c0,1.579,0.76,2.746,2.275,3.504
c1.513,0.759,11.205,3.568,11.458,3.694c4.358,1.642,7.401,3.426,9.14,5.35c1.735,1.928,2.605,4.469,2.605,7.625
c-0.192,3.599-1.219,6.141-3.08,7.624c-1.862,1.484-3.898,2.415-6.108,2.793c-0.756,0.128-1.516,0.206-2.271,0.237
c-0.759,0.033-1.484,0.048-2.18,0.048c-0.063,0-19.793,0-19.793,0v-7.215C310.478,86.947,330.604,86.947,330.696,86.947z"/>
<path fill="#333333" d="M399.879,49.65c0-4.168,0.867-7.276,2.603-9.329c1.736-2.05,3.614-3.456,5.637-4.214
c1.072-0.441,2.113-0.726,3.125-0.853c1.009-0.126,1.895-0.189,2.65-0.189l6.82-0.095v7.293h-5.493
c-3.221,0.063-5.337,0.898-6.346,2.509c-1.01,1.61-1.579,3.362-1.705,5.256c-0.063,0.317-0.093,0.632-0.093,0.947
c0,0.316,0,0.632,0,0.947c0,0.128,0,0.254,0,0.38s0.03,0.221,0.093,0.283c0,0.063,0,0.128,0,0.189c0,0.063,0,0.127,0,0.188
c0,0.189,0.016,0.365,0.048,0.521c0.032,0.159,0.048,0.3,0.048,0.426l12.121-0.095v7.293h-12.121v32.862h-7.387V49.65z"/>
<path fill="#333333" d="M213.125,35.248h-4.288h-4.289l-10.634,46.825c-0.187,0.784-0.342,1.553-0.462,2.31
c-0.106,0.671-0.162,1.149-0.175,1.459c-0.013-0.353-0.075-0.86-0.195-1.54c-0.134-0.757-0.308-1.499-0.521-2.229l-11.133-42.104
c-0.044-0.218-0.103-0.433-0.17-0.643l-0.136-0.513h-0.055c-0.897-2.104-2.965-3.575-5.374-3.575s-4.477,1.472-5.374,3.575h-0.061
l-0.153,0.577c-0.005,0.014-0.007,0.027-0.011,0.042l-11.36,42.884c-0.188,0.703-0.348,1.418-0.481,2.147
c-0.125,0.68-0.188,1.188-0.196,1.54c-0.009-0.304-0.065-0.777-0.177-1.439c-0.12-0.715-0.274-1.464-0.461-2.248l-10.502-47.068
h-4.329H138.3l14.403,55.582h0.02c0.708,2.295,2.815,3.962,5.319,3.962c2.503,0,4.611-1.667,5.319-3.962h0.02L174.8,48.736
c0.213-0.755,0.387-1.505,0.52-2.248c0.134-0.741,0.2-1.289,0.2-1.641h0.012c0,0.297,0.079,0.824,0.24,1.58
c0.16,0.757,0.347,1.526,0.562,2.309l11.686,42.094h0.035c0.707,2.295,2.816,3.962,5.318,3.962c2.504,0,4.612-1.667,5.32-3.962
h0.029L213.125,35.248z"/>
<path fill="#333333" d="M391.734,73.279c0,11.69-9.479,21.166-21.167,21.166c-11.691,0-21.167-9.476-21.167-21.166
c0-11.691,9.476-21.167,21.167-21.167C382.256,52.112,391.734,61.588,391.734,73.279z M370.567,59.184
c-7.786,0-14.096,6.31-14.096,14.096c0,7.785,6.31,14.095,14.096,14.095c7.785,0,14.097-6.31,14.097-14.095
C384.664,65.493,378.353,59.184,370.567,59.184z"/>
<path fill="#333333" d="M502,55.476c0-11.69-9.477-21.168-21.166-21.168c-11.69,0-21.168,9.478-21.168,21.168
c0,11.192,8.689,20.35,19.691,21.108l5.087-7.482c-1.152,0.305-2.362,0.468-3.61,0.468c-7.786,0-14.096-6.31-14.096-14.094
c0-7.785,6.31-14.096,14.096-14.096c7.784,0,14.096,6.311,14.096,14.096c0,3.054-0.976,5.877-2.625,8.187l0,0l-8.319,12.745
l-12.307,17.575h8.021l18.103-25.853c0.236-0.316,0.461-0.642,0.682-0.973l0.093-0.136h-0.006C500.736,63.7,502,59.736,502,55.476
z"/>
<path fill="#333333" d="M448.746,53.153h-15.533V43.021h-7.955v37.597v0.095c0,0.081,0.006,0.611,0.011,1.059
c-0.002,0.09-0.011,0.179-0.011,0.269c0,6.695,5.428,12.122,12.121,12.122c0,0,0,0,0.001,0c0.062,0,10.703,0,10.703,0v-7.387
h-9.472c-0.062,0-0.119-0.003-0.181-0.005c-0.115,0.009-0.232,0.018-0.351,0.018c-0.257,0-0.506-0.025-0.754-0.064
c-0.039-0.003-0.082-0.004-0.121-0.007c-0.155-0.014-0.313-0.062-0.469-0.12c-1.75-0.505-3.092-1.965-3.425-3.782v-0.002
c0,0,0-0.01-0.002-0.013c-0.049-0.281-0.081-0.567-0.081-0.862c0-0.039,0.005-0.075,0.007-0.113
c-0.002-0.047-0.005-0.094-0.007-0.134V70.664c-0.004,0.001-0.01,0.004-0.016,0.006V60.446h15.533V53.153z"/>
</g>
<linearGradient id="SVGID_1_" gradientUnits="userSpaceOnUse" x1="58.5918" y1="1.8193" x2="58.5918" y2="126.1812">
<stop offset="0.1" style="stop-color:#52E5E7"/>
<stop offset="0.9" style="stop-color:#130CB7"/>
</linearGradient>
<path fill="url(#SVGID_1_)" d="M58.592,112.915l56.592-32.672v5.881c0,5.749-4.075,12.808-9.055,15.682l-38.482,22.22
c-4.98,2.874-13.129,2.874-18.109,0l-38.482-22.22C6.075,98.932,2,91.873,2,86.124v-5.881L58.592,112.915z M106.129,26.005
L67.646,3.786c-1.77-1.021-3.94-1.672-6.226-1.967v14.709l39.615,22.873L61.421,62.272v16.337l53.763-31.041v-5.881
C115.184,35.938,111.109,28.881,106.129,26.005z M55.763,78.609L2,47.568v-5.881c0-5.75,4.075-12.807,9.055-15.683L49.537,3.786
c1.769-1.021,3.94-1.672,6.226-1.967v14.717L16.157,39.41l39.605,22.857V78.609z M61.421,85.155l53.763-31.04v6.664L61.421,91.764
V85.155z M61.421,98.117l53.763-30.986v6.55l-53.763,31.04V98.117z"/>
</g>
</svg>

Before

Width:  |  Height:  |  Size: 7.3 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 213 KiB

View file

@ -1,113 +0,0 @@
<!DOCTYPE html>
<html lang="en">
<head>
<!-- Required meta tags-->
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
<meta http-equiv="X-UA-Compatible" content="ie=edge">
<meta name="author" content="Websoft9">
<link rel="icon" href="favicon.ico" />
<!-- Title Page-->
<title>Websoft9 Applications Hosting Platform</title>
<script>
document.addEventListener('DOMContentLoaded', function() {
const domain = window.location.hostname;
const linkElement = document.getElementById('myLink');
linkElement.href = `http://${domain}:9000`;
});
</script>
<!-- english tags-->
<meta name="description" content="Websoft9 is a Self-Hosting Applications platform that can deploy multiple applications in your own cloud infrastructure.">
<meta name="keywords" content="Cloud computing, runtime environment, Docker, auto-deployment, Self-Hosting, install WordPress, Panel, PaaS, Iac, GitOps">
<!-- Chinese tags-->
<meta name="description" content="Websoft9微聚云是一个企业级的开源软件聚合与多应用托管平台旨在让任何人都能轻松地在自己的基础设施中部署和管理软件。" lang="zh">
<meta name="keywords" content="云计算运行环境自托管自动部署Docker部署 WordPress服务器面板自动化部署PaaSIaC, GitOps" lang="zh">
<link rel="stylesheet" href="assets/bootstrap.min.css">
<style>
.logo {
width: 100px;
margin-left: 20px;
}
.header {
padding: 20px 0;
background-color: #f8f9fa;
border-bottom: 1px solid #dee2e6;
}
.content {
padding: 40px 0;
}
.sub-block {
margin-bottom: 30px;
text-align: left; /* 设置文本左对齐 */
}
.btn-custom {
background-color: #086ad8;
color: white;
}
.btn-custom:hover {
background-color: #0056b3;
color: white;
}
.link-container {
display: flex;
flex-wrap: wrap; /* 允许换行 */
gap: 0; /* 设置间距为0 */
}
.link-container a {
margin: 0; /* 确保没有外边距 */
padding: 0 10px; /* 可以根据需要调整内边距 */
}
.welcome-title {
font-family: Arial, Helvetica, sans-serif; /* 使用 Arial 和 Helvetica 作为首选字体 */
font-size: 32px; /* 设置字号为 24px */
}
</style>
</head>
<body>
<div class="container-fluid">
<!-- 第一个块 -->
<div class="header w-100 d-flex justify-content-between align-items-center">
<div class="container">
<div class="d-flex justify-content-between align-items-center">
<img src="assets/logo.svg" alt="Logo" class="logo">
<div class="link-container">
<a target="_blank" href="https://support.websoft9.com/docs/next/helpdesk/#contact" class="text-dark">Support</a>
<a target="_blank" href="https://support.websoft9.com/docs" class="text-dark">Documentation</a>
<a target="_blank" href="https://www.websoft9.com" class="text-dark">Website</a>
<a target="_blank" href="https://github.com/Websoft9/websoft9" class="text-dark">Github</a>
</div>
</div>
</div>
</div>
<!-- 第二个块 -->
<div class="content">
<div class="container" style="display: flex;">
<div class="sub-block" style="flex: 1; padding: 20px;">
<h1 class="welcome-title">Welcome to Websoft9 Applications Hosting Platform</h1>
<p>GitOps-driven, multi-application hosting for cloud servers and home servers, one-click deployment of 200+ open source apps.</p>
<a id="myLink" class="btn btn-custom" >Access Websoft9 Console</a>
</div>
<div class="image-block" style="flex: 1; padding: 20px;">
<img src="assets/websoft9-appstore.png" alt="Description of image" style="width: 100%; height: auto;">
</div>
</div>
</div>
</div>
</body>
</html>

View file

@ -1,40 +1,31 @@
#!/bin/bash
# Define variables
# 设置密码目录
credential_path="/data/credential"
# Migrating initproxy.conf file
if [ ! -d /data/nginx/default_host ]; then mkdir -p /data/nginx/default_host; fi
cp -f /etc/websoft9/initproxy.conf /data/nginx/default_host/initproxy.conf
[ -f /etc/websoft9/initproxy.conf ] && rm -f /data/nginx/proxy_host/initproxy.conf
# Deploy Websoft9 landing pages
if [ ! -d /data/nginx/default_www/landing ]; then
mkdir -p /data/nginx/default_www/
cp -r /etc/websoft9/landing /data/nginx/default_www/
else
echo "/data/nginx/default_www/landing already exists."
fi
# If credential file then create it and init credential for NPM
# Reload NPM docker image Environments
# 检查是否已经存在密码文件
if [ ! -f "$credential_path" ]; then
# Set init credential
# 设置用户名和生成随机密码
INITIAL_ADMIN_EMAIL="admin@mydomain.com"
INITIAL_ADMIN_PASSWORD=$(openssl rand -base64 16 | tr -d '/+' | cut -c1-16)
# Write credential to file
mkdir -p "$(dirname "$credential_path")"
echo "{\"username\":\"$INITIAL_ADMIN_EMAIL\",\"password\":\"$INITIAL_ADMIN_PASSWORD\"}" > "$credential_path"
# 设置环境变量
export INITIAL_ADMIN_EMAIL
export INITIAL_ADMIN_PASSWORD
# 写入密码文件
mkdir -p "$(dirname "$credential_path")"
credential_json="{\"username\":\"$INITIAL_ADMIN_EMAIL\",\"password\":\"$INITIAL_ADMIN_PASSWORD\"}"
echo "$credential_json" > "$credential_path"
else
read -r INITIAL_ADMIN_EMAIL INITIAL_ADMIN_PASSWORD < <(jq -r '.username + " " + .password' "$credential_path")
# 从密码文件中读取用户名和密码
INITIAL_ADMIN_EMAIL=$(jq -r '.username' "$credential_path")
INITIAL_ADMIN_PASSWORD=$(jq -r '.password' "$credential_path")
# 设置环境变量
export INITIAL_ADMIN_EMAIL
export INITIAL_ADMIN_PASSWORD
fi
# Reload NPM docker image Environments
export INITIAL_ADMIN_EMAIL
export INITIAL_ADMIN_PASSWORD
# Start NPM
# 启动 Nginx
exec /init

View file

@ -1,34 +0,0 @@
# Oracle Linux
## How to create VM image?
You should download [Oracle Linux image](https://yum.oracle.com/oracle-linux-templates.html) to OSS/S3 directly, don't try to build image from ISO manual setup by KVM/VMVare/VirtualBox
## Cloud image requirements
- Kernel:
- Unbreakable Enterprise Kernel (UEK)(√)
- Red Hat Compatible Kernel(RHCK)
- OS disk automaticlly resize
- User can user password or key both for create VM or reset password
- OS start methond on Cloud: UEFI-Preferred
- Logic Disk partition: LVM
- File system type: xfs(√)、[btrfs](https://blogs.oracle.com/linux/post/btrfs-on-oracle-linuxefficiently-backup-and-recover-systems)
- Softwares: cloud-init, agent of Cloud provider, virtio, NVMe
- Other config: https://github.com/Websoft9/mcloud/blob/master/ansible/roles/desktop/tasks/image.yml
- Applicaitons: Desktop or Docker/Podman
- Other repository
```
yum install -y oraclelinux-developer-release-e* oracle-nodejs-release-e* oracle-epel-release-e*; fi
python3 and pip at OracleLinux7?
```
## Upgrade Oracle Linux
You can use [leapp](https://docs.oracle.com/en/learn/ol-linux-leapp) to upgrade major version, e.g Oracle Linux8 > Oracle Linux9
## Test your Cloud private image
Some Cloud provider have tools for your image testing:
- [阿里云 sersi](https://help.aliyun.com/zh/ecs/user-guide/check-whether-an-image-meets-the-import-requirements)

View file

@ -18,7 +18,7 @@ export PATH
#
# $ sudo bash install.sh --port 9001
#
# --channel <release|rc|dev>
# --channel <release|dev>
# Use the --channel option to install a release(production) or dev distribution. default is release, for example:
#
# $ sudo bash install.sh --channel release
@ -43,22 +43,15 @@ export PATH
#
# $ sudo bash install.sh --devto "/data/dev/mycode"
#
# --execute_mode <auto|install|upgrade>
# Use the --execute_mode option to tell script is install Websoft9 or Ugrade Websoff9. The default value is auto
# and script will automaticlly check it need install or upgrade, for example:
#
# $ sudo bash install.sh --execute_mode "upgrade"
#
# ==============================================================================
# 设置参数的默认值
version="latest"
channel="release"
execute_mode="auto"
path="/data/websoft9/source"
apps=""
mirrors="https://dockerhub.websoft9.com"
mirrors="https://docker.rainbond.cc,https://registry.inner.websoft9.cn"
# 获取参数值
while [[ $# -gt 0 ]]; do
@ -126,15 +119,6 @@ while [[ $# -gt 0 ]]; do
devto="$1"
shift
;;
--execute_mode)
shift
if [[ $1 == --* ]]; then
echo "Missing value for --execute_mode"
exit 1
fi
execute_mode="$1"
shift
;;
*)
echo "Unknown parameter: $1"
exit 1
@ -142,12 +126,6 @@ while [[ $# -gt 0 ]]; do
esac
done
# check it is root user or have sudo changed to root user,if not exit 1
if [ $(id -u) -ne 0 ]; then
echo "You must be the root user to run this script."
exit 1
fi
if [ -n "$port" ]; then
export port
else
@ -157,18 +135,15 @@ fi
starttime=$(date +%s)
# Automaticlly check the $execute_mode to install or upgrade if is auto
if [ "$execute_mode" = "auto" ]; then
if sudo systemctl cat websoft9 >/dev/null 2>&1 && sudo systemctl cat cockpit >/dev/null 2>&1 && sudo docker ps -a --format '{{.Names}}' 2>/dev/null | grep -q '^websoft9-apphub'; then
echo "execute_mode=upgrade"
export execute_mode="upgrade"
else
echo "execute_mode=install"
export execute_mode="install"
fi
# Check is install or upgrade
if systemctl cat websoft9 >/dev/null 2>&1 && systemctl cat cockpit >/dev/null 2>&1 && sudo docker ps -a --format '{{.Names}}' 2>/dev/null | grep -q '^websoft9-apphub'; then
echo "execute_mode=upgrade"
export execute_mode="upgrade"
else
echo "execute_mode=install"
export execute_mode="install"
fi
# 输出参数值
echo -e "\n------ Welcome to install Websoft9, it will take 3-5 minutes ------"
echo -e "\nYour installation parameters are as follows: "
@ -179,7 +154,6 @@ echo "--path: $path"
echo "--apps: $apps"
echo "--mirrors: $mirrors"
echo "--devto: $devto"
echo "--execute_mode: $execute_mode"
echo -e "\nYour OS: "
cat /etc/os-release | head -n 3 2>/dev/null
@ -302,93 +276,21 @@ install_tools(){
fi
}
download_artifact() {
local artifact_url="$1"
local source_zip="$2"
local max_attempts="$3"
for ((i=1; i<=max_attempts; i++)); do
wget -P /tmp "$artifact_url/$source_zip"
if [ $? -eq 0 ]; then
echo "Downloaded successfully using wget on attempt $i."
return 0
else
echo "Attempt $i failed using wget."
fi
done
for ((i=1; i<=max_attempts; i++)); do
curl -o /tmp/"$source_zip" "$artifact_url/$source_zip"
if [ $? -eq 0 ]; then
echo "Downloaded successfully using curl on attempt $i."
return 0
else
echo "Attempt $i failed using curl."
fi
done
echo "Failed to download source package after $((max_attempts * 2)) attempts."
return 1
}
download_source_and_checkimage() {
download_source() {
echo_prefix_source=$'\n[Download Source] - '
echo "$echo_prefix_source Download Websoft9 source code from $artifact_url/$source_zip"
find . -type f -name "websoft9*.zip*" -exec rm -f {} \;
rm -rf /tmp/$source_unzip
download_artifact "$artifact_url" "$source_zip" 10
wget "$artifact_url/$source_zip"
if [ $? -ne 0 ]; then
echo "Failed to download source package."
exit 1
fi
## unzip and check image
sudo unzip -o "/tmp/$source_zip" -d /tmp > /dev/null
if [ $? -ne 0 ]; then
echo "Failed to unzip source package."
exit 1
fi
# install docker
bash /tmp/$source_unzip/install/install_docker.sh
cd /tmp/$source_unzip/docker
docker compose pull
if [ $? -ne 0 ]; then
echo "Can not pull images from docker hub, set mirrors...."
if [ -f "/etc/docker/daemon.json" ]; then
if grep -q "registry-mirrors" "/etc/docker/daemon.json"; then
mv /etc/docker/daemon.json /etc/docker/daemon.json.bak
cp daemon.json /etc/docker/daemon.json
else
rm -f /etc/docker/daemon.json
cp daemon.json /etc/docker/daemon.json
fi
else
cp daemon.json /etc/docker/daemon.json
fi
sudo systemctl daemon-reload
sudo systemctl restart docker
# pull image by new mirrors
docker compose pull
if [ $? -ne 0 ]; then
echo "image pull failed again, exit install"
exit 1
else
echo "image pull success by new mirrors"
fi
else
echo "image pull success"
fi
rm -rf /tmp/$source_unzip
sudo unzip -o "/tmp/$source_zip" -d "$install_path" > /dev/null
sudo unzip -o "$source_zip" -d "$install_path" > /dev/null
if [ $? -ne 0 ]; then
echo "Failed to unzip source package."
exit 1
@ -400,10 +302,12 @@ download_source_and_checkimage() {
exit 1
fi
rm -rf "/tmp/$source_zip" "$install_path/$source_unzip"
rm -rf "$source_zip" "$install_path/$source_unzip"
}
check_ports() {
local ports=("$@")
@ -427,8 +331,61 @@ check_ports() {
echo "All ports are available"
}
merge_json_files() {
local target_path="/etc/docker/daemon.json"
python3 - <<EOF 2>/dev/null
import json
import urllib.request
import os
def merge_json_files(file1, file2):
print("Merge from local file... ")
with open(file1, 'r') as f1, open(file2, 'r') as f2:
data1 = json.load(f1)
data2 = json.load(f2)
merged_data = {**data1, **data2}
with open(file1, 'w') as f:
json.dump(merged_data, f, indent=4)
def download_and_merge(url, file_path):
print("Download daemon.json from url and merge... ")
with urllib.request.urlopen(url) as response:
data = json.loads(response.read().decode())
with open(file_path, 'r') as f:
local_data = json.load(f)
merged_data = {**local_data, **data}
with open(file_path, 'w') as f:
json.dump(merged_data, f, indent=4)
# Create target file if it does not exist
if not os.path.exists("${target_path}"):
os.makedirs(os.path.dirname("${target_path}"), exist_ok=True)
with open("${target_path}", 'w') as f:
json.dump({}, f)
if os.path.exists("${install_path}/docker/daemon.json"):
merge_json_files("${target_path}", "${install_path}/docker/daemon.json")
elif urllib.request.urlopen("${source_github_pages}/docker/daemon.json").getcode() == 200:
download_and_merge("${source_github_pages}/docker/daemon.json", "${target_path}")
else:
print("No target daemon.json file need to merged")
EOF
if [ $? -ne 0 ]; then
echo "merge daemon.json failed, but install continue running"
fi
}
set_docker(){
echo "Set Docker for Websoft9 backend service..."
merge_json_files
if ! systemctl is-active --quiet firewalld; then
echo "firewalld is not running"
else
@ -487,24 +444,48 @@ install_backends() {
else
echo "No containers to delete."
fi
sudo docker compose -p websoft9 -f $composefile up -d
DOCKER_CONFIG_FILE="/etc/docker/daemon.json"
MIRROR_ADDRESS=$mirrors
timeout 10s sudo docker compose -f $composefile pull
if [ $? -eq 0 ]; then
echo "Docker Compose pull succeeded"
else
echo "Can not pull images from docker hub, set mirrors..."
if [ ! -f "$DOCKER_CONFIG_FILE" ]; then
echo "{}" | sudo tee "$DOCKER_CONFIG_FILE" > /dev/null
fi
if command -v jq >/dev/null 2>&1; then
MIRROR_ARRAY=$(echo $MIRROR_ADDRESS | sed 's/,/","/g' | sed 's/^/["/' | sed 's/$/"]/')
#jq --arg mirrors "$MIRROR_ARRAY" '.["registry-mirrors"] = ($mirrors | fromjson)' "$DOCKER_CONFIG_FILE" > "$DOCKER_CONFIG_FILE.tmp" && sudo mv "$DOCKER_CONFIG_FILE.tmp" "$DOCKER_CONFIG_FILE"
jq ".\"registry-mirrors\" = $MIRROR_ARRAY" "$DOCKER_CONFIG_FILE" > "$DOCKER_CONFIG_FILE.tmp" && sudo mv "$DOCKER_CONFIG_FILE.tmp" "$DOCKER_CONFIG_FILE"
else
echo "jq not installed!"
exit 1
fi
sudo systemctl daemon-reload
sudo systemctl restart docker
fi
sudo docker compose -p websoft9 -f $composefile up -d --build
if [ $? -ne 0 ]; then
echo "Failed to start docker services."
exit 1
fi
if jq -e '.["registry-mirrors"]' "$DOCKER_CONFIG_FILE" > /dev/null; then
jq 'del(.["registry-mirrors"])' "$DOCKER_CONFIG_FILE" > "${DOCKER_CONFIG_FILE}.tmp" && sudo mv "${DOCKER_CONFIG_FILE}.tmp" "$DOCKER_CONFIG_FILE"
sudo systemctl daemon-reload
sudo systemctl restart docker
fi
if [ "$execute_mode" = "install" ]; then
sudo docker exec -i websoft9-apphub apphub setconfig --section domain --key wildcard_domain --value ""
if [ -n "$apps" ]; then
sudo docker exec -i websoft9-apphub apphub setconfig --section initial_apps --key keys --value "$apps"
fi
fi
if [ -f "/etc/docker/daemon.json.bak" ]; then
rm -rf /etc/docker/daemon.json
mv /etc/docker/daemon.json.bak /etc/docker/daemon.json
fi
}
install_systemd() {
@ -513,7 +494,7 @@ install_systemd() {
echo "$echo_prefix_systemd Install Systemd service"
if [ ! -d "$systemd_path" ]; then
sudo mkdir -p "$systemd_path"
sudo mkdir -p "$systemd_path"
fi
sudo cp -r $install_path/systemd/script/* "$systemd_path"
@ -535,30 +516,36 @@ install_systemd() {
exit 1
fi
sudo systemctl restart websoft9
sudo systemctl start websoft9
if [ $? -ne 0 ]; then
echo "Failed to start Systemd service."
exit 1
fi
}
#--------------- main-----------------------------------------
log_path="$install_path/install.log"
check_ports $http_port $https_port $port | tee -a $log_path
install_tools | tee -a $log_path
download_source | tee -a $log_path
download_source_and_checkimage | tee -a $log_path
bash $install_path/install/install_docker.sh | tee -a $log_path
if [ $? -ne 0 ]; then
echo "install_docker failed with error $?. Exiting."
exit 1
fi
install_backends | tee -a $log_path
install_systemd | tee -a $log_path
bash $install_path/install/install_cockpit.sh | tee -a $log_path
if [ $? -ne 0 ]; then
echo "install_cockpit failed with error $?. Exiting."
exit 1
fi
install_systemd | tee -a $log_path
bash $install_path/install/install_plugins.sh | tee -a $log_path
if [ $? -ne 0 ]; then
echo "install_plugins failed with error $?. Exiting."
@ -574,4 +561,4 @@ endtime=$(date +%s)
runtime=$((endtime-starttime))
echo "Script execution time: $runtime seconds"
echo -e "\n-- Install success! ------"
echo "Access Websoft9 console by: http://Internet IP:$(grep ListenStream /lib/systemd/system/cockpit.socket | cut -d= -f2) and using Linux user for login"
echo "Access Websoft9 console by: http://Internet IP:$(grep ListenStream /lib/systemd/system/cockpit.socket | cut -d= -f2) and using Linux user for login"

View file

@ -2,206 +2,212 @@
PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin:~/bin
export PATH
# Download docker install script
download_docker_script() {
local urls=("https://get.docker.com" "https://getdocker.websoft9.com")
local output="get-docker.sh"
local retries=10
local timeout=5
# Install and Upgade Docker for mosts of Linux
# This script is intended from https://get.docker.com and add below:
#
# - install or update Docker
# - support Redhat, CentOS-Stream, OracleLinux, AmazonLinux
#
# 1. download the script
#
# $ curl -fsSL https://websoft9.github.io/websoft9/install/install_docker.sh -o install_docker.sh
#
# 2. verify the script's content
#
# $ cat install_docker.sh
#
# 3. run the script with --dry-run to verify the steps it executes
#
# $ sh install_docker.sh --dry-run
#
# 4. run the script either as root, or using sudo to perform the installation.
#
# $ sudo sh install_docker.sh
download_with_tool() {
local tool=$1
local url=$2
local count=0
until [ $count -ge $retries ]; do
count=$((count+1))
echo "[Websoft9] - Attempting to download official Docker install script from: $url using $tool (attempt $count of $retries)"
if [ "$tool" = "curl" ]; then
curl -fsSL --max-time $timeout $url -o $output
else
wget --timeout=$timeout -O $output $url
fi
if verify_download; then
echo "[Websoft9] - Download official Docker install script succeeded from: $url using $tool"
return 0
fi
sleep 1
done
echo "[Websoft9] - Download official Docker install script failed from: $url using $tool after $retries attempts"
return 1
}
verify_download() {
if [ -f "$output" ] && [ -s "$output" ]; then
echo "[Websoft9] - Verification official Docker install script succeeded: $output"
return 0
else
echo "[Websoft9] - Verification failed: $output is missing or empty"
return 1
# it must export, otherwise Rocky Linux cannot used at yum command
export docker_packages="docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin"
echo_prefix_docker=$'\n[Docker] - '
docker_exist() {
# 检查 `docker` 命令是否存在
if ! command -v docker &> /dev/null; then
echo "docker command not exist"
return 1
fi
}
for url in "${urls[@]}"; do
download_with_tool "curl" $url && break
done
# 检查 Docker 服务是否正在运行
systemctl is-active docker.service &> /dev/null
if [ $? -ne 0 ]; then
echo "Docker service is not running, trying to start it..."
systemctl start docker.service
if [ $? -ne 0 ]; then
echo "Failed to start Docker service."
return 1
fi
fi
if [ $? -ne 0 ]; then
for url in "${urls[@]}"; do
download_with_tool "wget" $url && break
return 0
}
Install_Docker(){
local mirror=$1
local timeout=$2
local repo_url=$3
echo "$echo_prefix_docker Installing Docker from ${mirror} with timeout ${timeout} seconds for your system"
if [ "$mirror" = "Official" ]; then
mirror=""
fi
curl -fsSL --max-time 5 https://get.docker.com -o /dev/null
if [ $? -eq 0 ]; then
# For redhat family
if [[ -f /etc/redhat-release ]] || command -v amazon-linux-extras >/dev/null 2>&1; then
# For CentOS, Fedora, or RHEL(only s390x)
if [[ $(cat /etc/redhat-release) =~ "Red Hat" ]] && [[ $(uname -m) == "s390x" ]] || [[ $(cat /etc/redhat-release) =~ "CentOS" ]] || [[ $(cat /etc/redhat-release) =~ "Fedora" ]]; then
curl -fsSL https://get.docker.com -o get-docker.sh
timeout $timeout sh get-docker.sh --channel stable --mirror $mirror
else
# For other distributions(Redhat and Rocky linux ...)
dnf --version >/dev/null 2>&1
dnf_status=$?
yum --version >/dev/null 2>&1
yum_status=$?
if [ $dnf_status -eq 0 ]; then
sudo dnf install dnf-utils -y > /dev/null
sudo dnf config-manager --add-repo $repo_url
timeout $timeout sudo dnf install $docker_packages -y
elif [ $yum_status -eq 0 ]; then
sudo yum install yum-utils -y > /dev/null
sudo yum-config-manager --add-repo $repo_url
if command -v amazon-linux-extras >/dev/null 2>&1; then
wget -O /etc/yum.repos.d/CentOS7-Base.repo https://websoft9.github.io/stackhub/apps/roles/role_common/files/CentOS7-Base.repo
sudo sed -i "s/\$releasever/7/g" /etc/yum.repos.d/docker-ce.repo
timeout $timeout sudo yum install $docker_packages --disablerepo='amzn2-extras,amzn2-core' -y
else
timeout $timeout sudo yum install $docker_packages -y
fi
else
echo "None of the required package managers are installed."
fi
fi
fi
if [[ $(cat /etc/os-release) =~ "Amazon Linux" ]]; then
sudo dnf install docker -y
sudo systemctl enable docker
sudo systemctl start docker
sudo mkdir -p /usr/local/lib/docker/cli-plugins/
sudo curl -SL "https://github.com/docker/compose/releases/latest/download/docker-compose-linux-$(uname -m)" -o /usr/local/lib/docker/cli-plugins/docker-compose
sudo chmod +x /usr/local/lib/docker/cli-plugins/docker-compose
fi
# For Ubuntu, Debian, or Raspbian
if type apt >/dev/null 2>&1; then
# Wait for apt to be unlocked
curl -fsSL https://get.docker.com -o get-docker.sh
timeout $timeout sh get-docker.sh --channel stable --mirror $mirror
fi
else
echo "can not install by installation script, use special way to install docker"
dnf --version >/dev/null 2>&1
dnf_status=$?
yum --version >/dev/null 2>&1
yum_status=$?
apt --version >/dev/null 2>&1
apt_status=$?
if [ $dnf_status -eq 0 ]; then
sudo dnf install yum-utils -y
sudo dnf config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
sudo dnf install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
elif [ $yum_status -eq 0 ]; then
sudo yum install yum-utils -y
sudo yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
sudo yum install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
elif [ $apt_status -eq 0 ]; then
sudo apt-get update
sudo apt-get install -y ca-certificates curl gnupg lsb-release
curl -fsSL https://mirrors.aliyun.com/docker-ce/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://mirrors.aliyun.com/docker-ce/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
sudo apt-get update
sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
else
echo "don't know package tool"
fi
sudo systemctl enable docker
sudo systemctl restart docker
fi
}
Upgrade_Docker(){
if docker_exist; then
echo "$echo_prefix_docker Upgrading Docker for your system..."
dnf --version >/dev/null 2>&1
dnf_status=$?
yum --version >/dev/null 2>&1
yum_status=$?
apt --version >/dev/null 2>&1
apt_status=$?
if [ $dnf_status -eq 0 ]; then
sudo dnf update -y $docker_packages
elif [ $yum_status -eq 0 ]; then
sudo yum update -y $docker_packages
elif [ $apt_status -eq 0 ]; then
sudo apt update -y
sudo apt -y install --only-upgrade $docker_packages
else
echo "Docker installed, but cannot upgrade"
fi
else
local mirrors=("Official" "Official" "AzureChinaCloud" "Aliyun")
local urls=("https://download.docker.com/linux/centos/docker-ce.repo" "https://download.docker.com/linux/centos/docker-ce.repo" "https://mirror.azure.cn/docker-ce/linux/centos/docker-ce.repo" "https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo")
local timeout=180
local max_retries=4
local retry_count=0
while ((retry_count < max_retries)); do
Install_Docker ${mirrors[$retry_count]} $timeout ${urls[$retry_count]}
if ! docker_exist; then
echo "Installation timeout or failed, retrying with ${mirrors[$retry_count]} mirror..."
((retry_count++))
sleep 3
else
echo "Docker installed successfully."
exit 0
fi
done
fi
if [ $? -ne 0 ]; then
echo "[Websoft9] - Download failed after $retries attempts, please check your network connection."
echo "Docker Installation failed after $max_retries retries."
exit 1
fi
fi
}
# install docker by custom
install_docker_custom() {
if [ -n "$1" ]; then
lsb_dist=$(echo "$1" | tr '[:upper:]' '[:lower:]')
else
if [ -r /etc/os-release ]; then
lsb_dist="$(. /etc/os-release && echo "$ID" | tr '[:upper:]' '[:lower:]')"
else
echo "[Websoft9] - Unable to determine distribution. Exiting."
exit 1
fi
fi
echo "[Websoft9] - Beginning custom Docker installation for: $lsb_dist"
local repos_base=("https://download.docker.com/linux" "https://mirrors.aliyun.com/docker-ce/linux" "https://mirror.azure.cn/docker-ce/linux")
local repos
install_docker_from_repo() {
local repo=$1
if command_exists dnf5; then
echo "[Websoft9] - Using dnf5 package manager for Docker installation from repo: $repo."
sudo dnf -y -q install dnf-plugins-core
sudo dnf5 config-manager addrepo --save-filename=docker-ce.repo --from-repofile=$repo
sudo dnf makecache
package_manager="dnf5"
elif command_exists dnf; then
echo "[Websoft9] - Using dnf package manager for Docker installation from repo: $repo."
sudo dnf -y -q install dnf-plugins-core
sudo dnf config-manager --add-repo $repo
sudo dnf makecache
package_manager="dnf"
else
echo "[Websoft9] - Using yum package manager for Docker installation from repo: $repo."
sudo yum -y -q install yum-utils
sudo yum-config-manager --add-repo $repo
sudo yum makecache
package_manager="yum"
fi
sudo $package_manager install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
}
if command_exists dnf5 || command_exists dnf || command_exists yum; then
if [ "$lsb_dist" = "amzn" ]; then
sudo yum makecache
sudo yum install -y docker
sudo mkdir -p /usr/local/lib/docker/cli-plugins/
sudo curl -SL "https://github.com/docker/compose/releases/latest/download/docker-compose-linux-$(uname -m)" -o /usr/local/lib/docker/cli-plugins/docker-compose
sudo chmod +x /usr/local/lib/docker/cli-plugins/docker-compose
else
repos=("${repos_base[@]/%//${lsb_dist}/docker-ce.repo}")
sudo dnf remove -y podman || sudo yum remove -y podman
for repo in "${repos[@]}"; do
install_docker_from_repo $repo && break
done
if [ $? -ne 0 ]; then
echo "[Websoft9] - Installation failed with ${lsb_dist} repo, retrying with rhel and centos repos."
for fallback_dist in "rhel" "centos"; do
repos=("${repos_base[@]/%//${fallback_dist}/docker-ce.repo}")
for repo in "${repos[@]}"; do
install_docker_from_repo $repo && break 2
done
done
fi
fi
elif command_exists apt; then
repos=("${repos_base[@]/%//ubuntu}")
for repo in "${repos[@]}"; do
sudo apt-get update
sudo apt-get install ca-certificates curl
sudo install -m 0755 -d /etc/apt/keyrings
sudo curl -fsSL $repo/gpg -o /etc/apt/keyrings/docker.asc
sudo chmod a+r /etc/apt/keyrings/docker.asc
echo \
"deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] $repo \
$(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \
sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
sudo apt-get update
if sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin; then
break
fi
done
else
echo "[Websoft9] - Unsupported system distribution: $1. Exiting."
exit 1
fi
if sudo systemctl start docker && sudo systemctl enable docker; then
if command_exists docker && docker compose version >/dev/null 2>&1; then
echo "[Websoft9] - Docker and Docker Compose installation verified successfully."
return 0
else
echo "[Websoft9] - Docker or Docker Compose installation verification failed."
exit 1
fi
else
echo "[Websoft9] - Failed to start Docker."
return 1
fi
Start_Docker(){
# should have Docker server and Docker cli
if docker_exist; then
echo "$echo_prefix_docker Starting Docker"
sudo systemctl enable docker
sudo systemctl restart docker
else
echo "Docker not installed or start failed, exit..."
exit 1
fi
}
# Install docker by official script
install_docker_official() {
# define install command parameters
install_params=("" "--mirror Aliyun" "--mirror AzureChinaCloud")
install_timeout=300 # set timeout for each install attempt in seconds
for param in "${install_params[@]}"; do
cmd="sh get-docker.sh $param"
echo "[Websoft9] - Attempting to install Docker with command: $cmd"
output=$(timeout $install_timeout $cmd 2>&1)
echo "$output"
if echo "$output" | grep -q "ERROR: Unsupported distribution"; then
lsb_dist=$(echo "$output" | grep "ERROR: Unsupported distribution" | awk -F"'" '{print $2}')
echo "[Websoft9] - Detected unsupported distribution: $lsb_dist. Executing custom operation."
install_docker_custom "$lsb_dist"
exit 1
elif echo "$output" | grep -q "ERROR"; then
echo "[Websoft9] - Docker installation failed with command: $cmd"
install_docker_custom "$lsb_dist"
exit 1
elif command_exists docker && docker compose version >/dev/null 2>&1; then
echo "[Websoft9] - Docker installation succeeded with command: $cmd"
return 0
elif echo "$output" | grep -q "timeout"; then
echo "[Websoft9] - Docker installation attempt timed out with command: $cmd. Trying next mirror."
fi
done
echo "[Websoft9] - Docker installation failed after use official script. Attempting custom installation."
install_docker_custom "$lsb_dist"
exit 1
}
echo -e "\n\n-------- Docker --------"
Upgrade_Docker
command_exists() {
command -v "$@" > /dev/null 2>&1
}
# download docker install script
download_docker_script
# install docker
install_docker_official
if [ -z "$execute_mode" ] || [ "$execute_mode" = "install" ]; then
Start_Docker
fi

View file

@ -4,6 +4,24 @@ PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin:~/bin
# Export PATH
export PATH
# Command-line options
# ==============================================================================
#
# --cockpit
# Use the --cockpit option to remove cockpit:
#
# $ sudo sh install.sh --cockpit
#
# --files
# Use the --files option remove files have installed:
#
# $ sudo sh install.sh --files
#
#
# ==============================================================================
install_path="/data/websoft9/source"
systemd_path="/opt/websoft9/systemd"
cockpit_plugin_path="/usr/share/cockpit"
@ -13,48 +31,18 @@ echo -e "\n---Remove Websoft9 backend service containers---"
sudo docker compose -p websoft9 down -v
echo -e "\n---Remove Websoft9 systemd service---"
if systemctl list-units --full --all | grep -Fq websoft9.service; then
sudo systemctl disable websoft9
sudo systemctl stop websoft9
rm -rf /lib/systemd/system/websoft9.service
else
echo "websoft9.service does not exist."
fi
sudo systemctl disable websoft9
sudo systemctl stop websoft9
rm -rf /lib/systemd/system/websoft9.service
remove_cockpit() {
echo -e "\n---Remove Cockpit---"
sudo systemctl stop cockpit.socket cockpit
sudo systemctl disable cockpit.socket cockpit
dnf --version >/dev/null 2>&1
dnf_status=$?
yum --version >/dev/null 2>&1
yum_status=$?
apt --version >/dev/null 2>&1
apt_status=$?
if [ $dnf_status -eq 0 ]; then
for pkg in $cockpit_packages; do
echo "Uninstalling $pkg"
sudo dnf remove -y "$pkg" > /dev/null || echo "$pkg failed to uninstall"
done
elif [ $yum_status -eq 0 ]; then
for pkg in $cockpit_packages; do
echo "Uninstalling $pkg"
sudo yum remove -y "$pkg" > /dev/null || echo "$pkg failed to uninstall"
done
elif [ $apt_status -eq 0 ]; then
export DEBIAN_FRONTEND=noninteractive
for pkg in $cockpit_packages; do
echo "Uninstalling $pkg"
sudo apt-get remove -y "$pkg" > /dev/null || echo "$pkg failed to uninstall"
done
else
echo "Neither apt, dnf nor yum found. Please install one of them and try again."
fi # 修正这里,使用 fi 而不是 end
for package in $cockpit_packages; do
sudo pkcon remove $package -y || true
done
sudo rm -rf /etc/cockpit/*
}
@ -63,7 +51,22 @@ remove_files() {
sudo rm -rf $install_path/* $systemd_path/* $cockpit_plugin_path/*
}
remove_cockpit
remove_files
for arg in "$@"
do
case $arg in
--cockpit)
remove_cockpit
shift
;;
--files)
remove_files
shift
;;
*)
echo "Unknown argument: $arg"
exit 1
;;
esac
done
echo -e "\nCongratulations, Websoft9 uninstall is complete!"
echo -e "\nCongratulations, Websoft9 uninstall is complete!"

View file

@ -4,9 +4,20 @@ PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin:~/bin
cockpit_port="9000"
container_name="websoft9-apphub"
volume_name="websoft9_apphub_config"
cockpit_service_path="/lib/systemd/system/cockpit.socket"
cockpit_ssl_path="/etc/cockpit/ws-certs.d/"
npm_ssl_path="/var/lib/docker/volumes/websoft9_nginx_data/_data/custom_ssl/"
check_ports() {
local ports=("$@")
for port in "${ports[@]}"; do
echo "Check port: $port"
if ss -tuln | grep ":$port " >/dev/null && ! systemctl status cockpit.socket | grep "$port" >/dev/null; then
echo "Port $port is in use, can not set this port to config.ini"
return 0
fi
done
echo "All ports are available"
return 1
}
# get volume from container
function get_volume_path() {
@ -31,23 +42,28 @@ function get_volume_path() {
echo "Cannot get volume path"
exit 1
}
volume_path=$(get_volume_path "$container_name" "$volume_name")
config_path="$volume_path/config.ini"
cockpit_service_path="/lib/systemd/system/cockpit.socket"
FILES="$cockpit_service_path $config_path"
sync_cockpit_port() {
echo "sync cockpit port from config.ini"
# 监控文件发生变动时需要做的事情
on_change() {
set +e
cockpit_port=$(docker exec -i websoft9-apphub apphub getconfig --section cockpit --key port)
listen_stream=$(grep -Po 'ListenStream=\K[0-9]*' /lib/systemd/system/cockpit.socket)
if [ "$cockpit_port" != "$listen_stream" ]; then
ex -s -c "g/ListenStream=${listen_stream}/s//ListenStream=${cockpit_port}/" -c wq "$cockpit_service_path"
systemctl daemon-reload
systemctl restart cockpit.socket 2> /dev/null
systemctl restart cockpit || exit 1
set_Firewalld
check_ports "$cockpit_port"
if [ $? -eq 0 ]; then
sudo docker exec -i websoft9-apphub apphub setconfig --section cockpit --key port --value "$listen_stream"
else
ex -s -c "g/ListenStream=${listen_stream}/s//ListenStream=${cockpit_port}/" -c wq "$cockpit_service_path"
systemctl daemon-reload
systemctl restart cockpit.socket 2> /dev/null
systemctl restart cockpit || exit 1
set_Firewalld
fi
fi
set -e
}
@ -59,32 +75,9 @@ set_Firewalld(){
firewall-cmd --reload 2>/dev/nul
}
force_sync(){
echo "Force sync cockpit port and certs"
sync_cockpit_port
cp -r "${cockpit_ssl_path}"* $npm_ssl_path
}
# when websoft9 restart, force sync cockpit port and certs
force_sync
(
# monitor cockpit.socket and config.ini, make sure port at config.ins sync to cockpit.socket
inotifywait -e modify,attrib -m $FILES | while read PATH EVENT FILE; do
echo "Reset cockpit port when config.ini changed"
export PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:$PATH
sync_cockpit_port
done
) &
(
# monitor cockpit ssl path and sync to NPM ssl path if changed
inotifywait -e create,modify,delete,attrib -m $cockpit_ssl_path | while read PATH EVENT FILE; do
echo "Sync CA files from cockipt to NPM when changed"
export PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:$PATH
cp -r "${cockpit_ssl_path}"* $npm_ssl_path
done
) &
# Wait for background processes to finish
wait
# monitor /lib/systemd/system/cockpit.socket and config.ini, make sure config.ini port is the same with cockpit.socket
inotifywait -e modify -m $FILES | while read PATH EVENT FILE; do
echo "Set cockpit port by config.ini..."
export PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:$PATH
on_change
done

View file

@ -54,9 +54,9 @@ for i in ${!containers[@]}; do
((counter++))
done
if [[ $success == true ]]; then
echo "Successfully get credentials from $container"
echo "Successfully retrieved credentials for $container"
else
echo "Failed to get credentials from $container after $max_retries attempts"
echo "Failed to retrieve credentials for $container after $max_retries attempts"
fi
done
@ -68,10 +68,10 @@ for ((i=0; i<$length; i++)); do
container=${containers[$i]}
section=${sections[$i]}
if [[ -n ${passwords[$container]} ]]; then
echo "Sync credentials of $container to websoft9-apphub"
echo "$container start to set password"
docker exec -i websoft9-apphub apphub setconfig --section $section --key user_name --value ${usernames[$container]}
docker exec -i websoft9-apphub apphub setconfig --section $section --key user_pwd --value ${passwords[$container]}
else
echo "Password of $container is not set or empty. Skipping..."
echo "Password for $container is not set or empty. Skipping..."
fi
done

View file

@ -1,22 +1,22 @@
{
"version": "2.1.20",
"version": "2.1.14-rc1",
"plugins": {
"portainer": "0.1.3",
"nginx": "0.1.0",
"gitea": "0.0.8",
"myapps": "0.2.7",
"appstore": "0.2.6",
"settings": "0.1.5",
"settings": "0.1.4",
"navigator": "0.5.10"
},
"OS": {
"Fedora": [
"41",
"40"
"38",
"37",
"35"
],
"RedHat": [
"9",
"8",
"7"
],
"CentOS": [