mirror of
https://github.com/Websoft9/websoft9.git
synced 2024-11-24 00:20:24 +00:00
test docker build
This commit is contained in:
parent
f5a6573dde
commit
aa2f781fae
98 changed files with 6 additions and 5894 deletions
3
.github/workflows/media.yml
vendored
3
.github/workflows/media.yml
vendored
|
@ -1,3 +1,6 @@
|
|||
# This action will be trigger by docker.yml action
|
||||
# docker.yml action download the artifact for build
|
||||
|
||||
name: Build media for apphub image
|
||||
|
||||
on:
|
||||
|
|
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -7,3 +7,4 @@ logs
|
|||
apphub/swagger-ui
|
||||
apphub/apphub.egg-info
|
||||
cli/__pycache__
|
||||
source
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -1,4 +1,4 @@
|
|||
# modify time: 202312081656, you can modify here to trigger Docker Build action
|
||||
# modify time: 202312081709, you can modify here to trigger Docker Build action
|
||||
|
||||
FROM python:3.10-slim-bullseye
|
||||
LABEL maintainer="Websoft9<help@websoft9.com>"
|
||||
|
@ -16,18 +16,15 @@ RUN apt update && apt install -y --no-install-recommends curl git jq cron iprout
|
|||
git clone --depth=1 $library_repo \
|
||||
mv docker-library w9library && \
|
||||
rm -rf w9library/.github && \
|
||||
|
||||
# Prepare media
|
||||
if [ ! -f ./media.zip ]; then \
|
||||
wget $websoft9_artifact/plugin/media/media-latest.zip -O ./media.zip && \
|
||||
unzip media.zip \
|
||||
fi \
|
||||
mv media* w9media \
|
||||
|
||||
git clone --depth=1 https://github.com/swagger-api/swagger-ui.git && \
|
||||
wget https://cdn.redoc.ly/redoc/latest/bundles/redoc.standalone.js && \
|
||||
cp redoc.standalone.js swagger-ui/dist && \
|
||||
|
||||
git clone --depth=1 $websoft9_repo ./w9source && \
|
||||
cp -r ./w9media ./media && \
|
||||
cp -r ./w9library ./library && \
|
||||
|
|
|
@ -1,126 +0,0 @@
|
|||
## 0.8.30-rc1 release on 2023-11-16
|
||||
1. improve all plugins githubaction
|
||||
2. install plugin by shell
|
||||
## 0.8.29 release on 2023-11-04
|
||||
|
||||
1. gitea,myapps,appstore update
|
||||
2. apphub domains
|
||||
3. apphub docs nginx config
|
||||
|
||||
## 0.8.28 release on 2023-11-01
|
||||
|
||||
1. improve dockerfile to reduce image size
|
||||
2. fixed update_zip.sh
|
||||
3. hide websoft9 containers
|
||||
|
||||
## 0.8.27 release on 2023-10-31
|
||||
|
||||
1. new websoft9 init
|
||||
|
||||
## 0.8.26 release on 2023-09-27
|
||||
|
||||
1. appmanage change to apphub
|
||||
|
||||
## 0.8.20 release on 2023-08-23
|
||||
|
||||
1. appmanage config files error:bug fix
|
||||
|
||||
## 0.8.19 release on 2023-08-23
|
||||
|
||||
1. New App Store preview push function added
|
||||
2. Fix some known bugs
|
||||
|
||||
## 0.8.18 release on 2023-08-17
|
||||
|
||||
1. appmanage volumes bug edit
|
||||
|
||||
## 0.8.15 release on 2023-08-17
|
||||
|
||||
1. service menu bug
|
||||
|
||||
## 0.8.14 release on 2023-08-16
|
||||
|
||||
1. myapps plugins refresh bug
|
||||
|
||||
## 0.8.13 release on 2023-08-15
|
||||
|
||||
1. update plugins
|
||||
2. fix bug data save in session
|
||||
|
||||
## 0.8.12 release on 2023-08-12
|
||||
|
||||
1. navigator plugin install way change
|
||||
2. update plugin
|
||||
|
||||
## 0.8.11 release on 2023-08-03
|
||||
|
||||
1. Optimize interface calls
|
||||
2. library artifacts directory: websoft9/plugin/library
|
||||
3. add init apps: nocobase, affine
|
||||
|
||||
## 0.8.10 release on 2023-08-01
|
||||
|
||||
1. improve update.sh
|
||||
2. add docs to artifacts
|
||||
3. improve server's hostname
|
||||
|
||||
## 0.8.8 release on 2023-07-27
|
||||
|
||||
fixed update search api bug
|
||||
|
||||
## 0.8.5 release on 2023-07-26
|
||||
|
||||
add docs
|
||||
|
||||
## 0.8.4 release on 2023-07-26
|
||||
|
||||
add appstore search api
|
||||
|
||||
## 0.8.2 release on 2023-07-24
|
||||
|
||||
1. install from artifacts
|
||||
2. add extre version.json into artifacts
|
||||
|
||||
## 0.7.2 release on 2023-06-25
|
||||
|
||||
1. appmanage 自动更新接口升级
|
||||
|
||||
## 0.7.1 release on 2023-06-21
|
||||
|
||||
1. appmanage version 文件意外删除时 bug 修改
|
||||
2. 自动更新的时间频率调整为一天
|
||||
3. 更新脚本 version 文件不存在的 bug 修改
|
||||
|
||||
## 0.7.0 release on 2023-06-20
|
||||
|
||||
1. appstore 增加 更新功能
|
||||
2. myapps 功能优化
|
||||
3. 新增 settings(设置) 功能
|
||||
|
||||
## 0.6.0 release on 2023-06-17
|
||||
|
||||
1. 上架 wordpress
|
||||
2. 下架 moodle
|
||||
3. 修改 redmine
|
||||
4. 升级 discuzq,zabbix
|
||||
5. 新增自动更新软件商店功能
|
||||
|
||||
## 0.4.0 release on 2023-06-15
|
||||
|
||||
1. owncloud 下线测试
|
||||
|
||||
## 0.3.0 release on 2023-06-06
|
||||
|
||||
1. appmanage docker 镜像更新到 0.3.0
|
||||
2. 修复 prestashop 无法访问的 bug
|
||||
3. 修复 odoo 无法安装的 bug
|
||||
|
||||
## 0.2.0 release on 2023-06-03
|
||||
|
||||
1. appmanage docker 镜像更新到 0.2.0
|
||||
2. Portainer 插件修复自动登录 bug
|
||||
3. My Apps 插件修复首次使用获取容器 bug
|
||||
|
||||
## 0.1.0 release on 2023-05-26
|
||||
|
||||
1. stackhub 预发布,基本功能实现
|
|
@ -1,68 +0,0 @@
|
|||
# Contributing to Websoft9
|
||||
|
||||
From opening a bug report to creating a pull request: every contribution is appreciated and welcome.
|
||||
|
||||
If you're planning to implement a new feature or change the api please [create an issue](https://github.com/websoft9/websoft9/issues/new/choose) first. This way we can ensure that your precious work is not in vain.
|
||||
|
||||
|
||||
## Not Sure Architecture?
|
||||
|
||||
It's important to figure out the design [architecture of Websoft9](docs/architecture.md)
|
||||
|
||||
## Fork
|
||||
|
||||
Contributor only allow to fork [main branch](https://github.com/Websoft9/websoft9/tree/main) and pull request for it. Maintainers don't accept any pr to **production branch**
|
||||
|
||||
## Branch
|
||||
|
||||
This repository have these branchs:
|
||||
|
||||
* **Contributor's branch**: Develpoer can fork main branch as their delelopment branch anytime
|
||||
* **main branch**: The only branch that accepts PR from Contributors's branch
|
||||
* **production branch**: For version release and don't permit modify directly, only merge PR from **main branch**
|
||||
|
||||
|
||||
Flow: Contributor's branch → main branch → production branch
|
||||
|
||||
|
||||
## Pull request
|
||||
|
||||
[Pull request](https://docs.github.com/pull-requests) let you tell others about changes you've pushed to a branch in a repository on GitHub.
|
||||
|
||||
#### When is PR produced?
|
||||
|
||||
* Contributor commit to main branch
|
||||
* main branch commit to production branch
|
||||
|
||||
#### How to deal with PR?
|
||||
|
||||
1. [pull request reviews](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/reviewing-changes-in-pull-requests/about-pull-request-reviews)
|
||||
2. Merge RP and CI/CD for it
|
||||
|
||||
## DevOps principle
|
||||
|
||||
DevOps thinks the same way **[5m1e](https://www.dgmfmoldclamps.com/what-is-5m1e-in-injection-molding-industry/)** for manufacturing companies
|
||||
|
||||
We follow the development principle of minimization, rapid release
|
||||
|
||||
### Version
|
||||
|
||||
Use *[[major].[minor].[patch]](https://semver.org/lang/zh-CN/)* for version serial number and [version.json](../version.json) for version dependencies
|
||||
|
||||
### Artifact
|
||||
|
||||
Websoft9 use below [Artifact](https://jfrog.com/devops-tools/article/what-is-a-software-artifact/) for different usage:
|
||||
|
||||
* **Dockerhub for image**: Access [Websoft9 docker images](https://hub.docker.com/u/websoft9dev) on Dockerhub
|
||||
* **Azure Storage for files**: Access [packages list](https://artifact.azureedge.net/release?restype=container&comp=list) at [Azure Storage](https://learn.microsoft.com/en-us/azure/storage/storage-dotnet-how-to-use-blobs#list-the-blobs-in-a-container)
|
||||
|
||||
### Tags
|
||||
|
||||
- Type tags: Bug, enhancement, Documetation
|
||||
- Stages Tags: PRD, Dev, QA(include deployment), Documentation
|
||||
|
||||
### WorkFlow
|
||||
|
||||
Websoft9 use the [Production branch with GitLab flow](https://cm-gitlab.stanford.edu/help/workflow/gitlab_flow.md#production-branch-with-gitlab-flow) for development collaboration
|
||||
|
||||
> [gitlab workflow](https://docs.gitlab.com/ee/topics/gitlab_flow.html) is improvement model for git
|
|
@ -1,167 +0,0 @@
|
|||
This program is released under LGPL-3.0 and with the additional Terms:
|
||||
Without authorization, it is not allowed to publish free or paid image based on this program in any Cloud platform's Marketplace.
|
||||
|
||||
GNU LESSER GENERAL PUBLIC LICENSE
|
||||
Version 3, 29 June 2007
|
||||
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
This version of the GNU Lesser General Public License incorporates
|
||||
the terms and conditions of version 3 of the GNU General Public
|
||||
License, supplemented by the additional permissions listed below.
|
||||
|
||||
0. Additional Definitions.
|
||||
|
||||
As used herein, "this License" refers to version 3 of the GNU Lesser
|
||||
General Public License, and the "GNU GPL" refers to version 3 of the GNU
|
||||
General Public License.
|
||||
|
||||
"The Library" refers to a covered work governed by this License,
|
||||
other than an Application or a Combined Work as defined below.
|
||||
|
||||
An "Application" is any work that makes use of an interface provided
|
||||
by the Library, but which is not otherwise based on the Library.
|
||||
Defining a subclass of a class defined by the Library is deemed a mode
|
||||
of using an interface provided by the Library.
|
||||
|
||||
A "Combined Work" is a work produced by combining or linking an
|
||||
Application with the Library. The particular version of the Library
|
||||
with which the Combined Work was made is also called the "Linked
|
||||
Version".
|
||||
|
||||
The "Minimal Corresponding Source" for a Combined Work means the
|
||||
Corresponding Source for the Combined Work, excluding any source code
|
||||
for portions of the Combined Work that, considered in isolation, are
|
||||
based on the Application, and not on the Linked Version.
|
||||
|
||||
The "Corresponding Application Code" for a Combined Work means the
|
||||
object code and/or source code for the Application, including any data
|
||||
and utility programs needed for reproducing the Combined Work from the
|
||||
Application, but excluding the System Libraries of the Combined Work.
|
||||
|
||||
1. Exception to Section 3 of the GNU GPL.
|
||||
|
||||
You may convey a covered work under sections 3 and 4 of this License
|
||||
without being bound by section 3 of the GNU GPL.
|
||||
|
||||
2. Conveying Modified Versions.
|
||||
|
||||
If you modify a copy of the Library, and, in your modifications, a
|
||||
facility refers to a function or data to be supplied by an Application
|
||||
that uses the facility (other than as an argument passed when the
|
||||
facility is invoked), then you may convey a copy of the modified
|
||||
version:
|
||||
|
||||
a) under this License, provided that you make a good faith effort to
|
||||
ensure that, in the event an Application does not supply the
|
||||
function or data, the facility still operates, and performs
|
||||
whatever part of its purpose remains meaningful, or
|
||||
|
||||
b) under the GNU GPL, with none of the additional permissions of
|
||||
this License applicable to that copy.
|
||||
|
||||
3. Object Code Incorporating Material from Library Header Files.
|
||||
|
||||
The object code form of an Application may incorporate material from
|
||||
a header file that is part of the Library. You may convey such object
|
||||
code under terms of your choice, provided that, if the incorporated
|
||||
material is not limited to numerical parameters, data structure
|
||||
layouts and accessors, or small macros, inline functions and templates
|
||||
(ten or fewer lines in length), you do both of the following:
|
||||
|
||||
a) Give prominent notice with each copy of the object code that the
|
||||
Library is used in it and that the Library and its use are
|
||||
covered by this License.
|
||||
|
||||
b) Accompany the object code with a copy of the GNU GPL and this license
|
||||
document.
|
||||
|
||||
4. Combined Works.
|
||||
|
||||
You may convey a Combined Work under terms of your choice that,
|
||||
taken together, effectively do not restrict modification of the
|
||||
portions of the Library contained in the Combined Work and reverse
|
||||
engineering for debugging such modifications, if you also do each of
|
||||
the following:
|
||||
|
||||
a) Give prominent notice with each copy of the Combined Work that
|
||||
the Library is used in it and that the Library and its use are
|
||||
covered by this License.
|
||||
|
||||
b) Accompany the Combined Work with a copy of the GNU GPL and this license
|
||||
document.
|
||||
|
||||
c) For a Combined Work that displays copyright notices during
|
||||
execution, include the copyright notice for the Library among
|
||||
these notices, as well as a reference directing the user to the
|
||||
copies of the GNU GPL and this license document.
|
||||
|
||||
d) Do one of the following:
|
||||
|
||||
0) Convey the Minimal Corresponding Source under the terms of this
|
||||
License, and the Corresponding Application Code in a form
|
||||
suitable for, and under terms that permit, the user to
|
||||
recombine or relink the Application with a modified version of
|
||||
the Linked Version to produce a modified Combined Work, in the
|
||||
manner specified by section 6 of the GNU GPL for conveying
|
||||
Corresponding Source.
|
||||
|
||||
1) Use a suitable shared library mechanism for linking with the
|
||||
Library. A suitable mechanism is one that (a) uses at run time
|
||||
a copy of the Library already present on the user's computer
|
||||
system, and (b) will operate properly with a modified version
|
||||
of the Library that is interface-compatible with the Linked
|
||||
Version.
|
||||
|
||||
e) Provide Installation Information, but only if you would otherwise
|
||||
be required to provide such information under section 6 of the
|
||||
GNU GPL, and only to the extent that such information is
|
||||
necessary to install and execute a modified version of the
|
||||
Combined Work produced by recombining or relinking the
|
||||
Application with a modified version of the Linked Version. (If
|
||||
you use option 4d0, the Installation Information must accompany
|
||||
the Minimal Corresponding Source and Corresponding Application
|
||||
Code. If you use option 4d1, you must provide the Installation
|
||||
Information in the manner specified by section 6 of the GNU GPL
|
||||
for conveying Corresponding Source.)
|
||||
|
||||
5. Combined Libraries.
|
||||
|
||||
You may place library facilities that are a work based on the
|
||||
Library side by side in a single library together with other library
|
||||
facilities that are not Applications and are not covered by this
|
||||
License, and convey such a combined library under terms of your
|
||||
choice, if you do both of the following:
|
||||
|
||||
a) Accompany the combined library with a copy of the same work based
|
||||
on the Library, uncombined with any other library facilities,
|
||||
conveyed under the terms of this License.
|
||||
|
||||
b) Give prominent notice with the combined library that part of it
|
||||
is a work based on the Library, and explaining where to find the
|
||||
accompanying uncombined form of the same work.
|
||||
|
||||
6. Revised Versions of the GNU Lesser General Public License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions
|
||||
of the GNU Lesser General Public License from time to time. Such new
|
||||
versions will be similar in spirit to the present version, but may
|
||||
differ in detail to address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Library as you received it specifies that a certain numbered version
|
||||
of the GNU Lesser General Public License "or any later version"
|
||||
applies to it, you have the option of following the terms and
|
||||
conditions either of that published version or of any later version
|
||||
published by the Free Software Foundation. If the Library as you
|
||||
received it does not specify a version number of the GNU Lesser
|
||||
General Public License, you may choose any version of the GNU Lesser
|
||||
General Public License ever published by the Free Software Foundation.
|
||||
|
||||
If the Library as you received it specifies that a proxy can decide
|
||||
whether future versions of the GNU Lesser General Public License shall
|
||||
apply, that proxy's public statement of acceptance of any version is
|
||||
permanent authorization for you to choose that version for the
|
||||
Library.
|
|
@ -1,72 +0,0 @@
|
|||
[![License: GPL v3](https://img.shields.io/badge/License-GPL%20v3-blue.svg)](http://www.gnu.org/licenses/gpl-3.0)
|
||||
[![GitHub last commit](https://img.shields.io/github/last-commit/websoft9/websoft9)](https://github.com/websoft9/websoft9)
|
||||
[![GitHub Release Date](https://img.shields.io/github/release-date/websoft9/websoft9)](https://github.com/websoft9/websoft9)
|
||||
[![GitHub Repo stars](https://img.shields.io/github/stars/websoft9/websoft9?style=social)](https://github.com/websoft9/websoft9)
|
||||
|
||||
# What is Websoft9?
|
||||
|
||||
Websoft9 is web-based PaaS platform for running 200+ hot [open source application](https://github.com/Websoft9/docker-library/tree/main/apps) on your own server.
|
||||
|
||||
Websoft9 help you running multiple applications in a single server, that means we believe Microservices on single machine is reasonable. On the contrary, it becomes more and more valuable as computing power increases
|
||||
|
||||
Although the Cloud Native emphasizes high availability and clustering, but most of the time, applications do not need to implement complex clusters or K8S.
|
||||
|
||||
Websoft9's [architecture](https://github.com/Websoft9/websoft9/blob/main/docs/architecture.md) is simple, it did not create any new technology stack, and we fully utilize popular technology components to achieve our product goals, allowing users and developers to participate in our projects without the need to learn new technologies.
|
||||
|
||||
## Demos
|
||||
|
||||
You can see the sceenshoots below:
|
||||
|
||||
| ![image](https://github.com/Websoft9/websoft9/assets/16741975/8321780c-4824-4e40-997d-676a31534063) | ![image](https://github.com/Websoft9/websoft9/assets/16741975/e842575b-60bc-4b0d-a57b-28c26b16196a) | ![image](https://github.com/Websoft9/websoft9/assets/16741975/c598412a-9529-4286-ba03-6234d6da99b9) |
|
||||
| --------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------- |
|
||||
| ![image](https://github.com/Websoft9/websoft9/assets/16741975/7bed3744-1e9f-429e-8678-3714c8c262e2) | ![image](https://github.com/Websoft9/websoft9/assets/16741975/a0923c69-2792-4cde-bfaf-bc018b61aee9) | ![image](https://github.com/Websoft9/websoft9/assets/16741975/901efd1c-31a0-4b31-b79c-fc2d441bb679) |
|
||||
|
||||
## Features
|
||||
|
||||
- Applications listing
|
||||
- Install 200+ template applications without any configuration
|
||||
- Web-based file browser to manage files and folder
|
||||
- Manage user accounts
|
||||
- Use a terminal on a remote server in your local web browser
|
||||
- Nginx gui for proxy and free SSL with Let's Encrypt
|
||||
- Deploy, configure, troubleshoot and secure containers in minutes on Kubernetes, Docker, and Swarm in any data center, cloud, network edge or IIOT device.
|
||||
- Manage your Linux by GUI: Inspect and change network settings, Configure a firewall, Manage storage, Browse and search system logs, Inspect a system’s hardware, Inspect and interact with systemd-based services,
|
||||
- Supported languages: English, Chinese(中文)
|
||||
|
||||
# Installation
|
||||
|
||||
You should have root privileges user to install or upgrade or uninstall Websoft9, if you use no-root user you can `sudo su` for it
|
||||
|
||||
## Install & Upgrade
|
||||
|
||||
```
|
||||
# Install by default
|
||||
wget -O install.sh https://websoft9.github.io/websoft9/install/install.sh && bash install.sh
|
||||
|
||||
|
||||
# Install Websoft9 with parameters
|
||||
wget -O install.sh https://websoft9.github.io/websoft9/install/install.sh && bash install.sh --port 9000 --channel release --path "/data/websoft9/source" --version "latest"
|
||||
```
|
||||
After installation, access it by: **http://Internet IP:9000** and using **Linux user** for login
|
||||
|
||||
## Uninstall
|
||||
|
||||
```
|
||||
# Uninstall by default
|
||||
curl https://websoft9.github.io/websoft9/install/uninstall.sh | bash
|
||||
|
||||
# Uninstall all
|
||||
wget -O - https://websoft9.github.io/websoft9/install/uninstall.sh | bash /dev/stdin --cockpit --files
|
||||
```
|
||||
|
||||
# Contributing
|
||||
|
||||
Follow the [contributing guidelines](CONTRIBUTING.md) if you want to propose a change in the Websoft9 core. For more information about participating in the community and contributing to the Websoft9 project, see [this page](https://support.websoft9.com/docs/community/contributing).
|
||||
|
||||
- Documentation for [Websoft9 core maintainers](docs/MAINTAINERS.md)
|
||||
- Documentation for application templates based on Docker maintainers is in the [docker-library](https://github.com/Websoft9/docker-library).
|
||||
- [Articles promoting Websoft9](https://github.com/Websoft9/websoft9/issues/327)
|
||||
|
||||
# License
|
||||
|
||||
Websoft9 is licensed under the [LGPL-3.0](/License.md), and additional Terms: It is not allowed to publish free or paid image based on this repository in any Cloud platform's Marketplace without authorization
|
|
@ -1,15 +0,0 @@
|
|||
# Security Policy
|
||||
|
||||
## Versions
|
||||
|
||||
As an open source product, we will only patch the latest major version for security vulnerabilities. Previous versions of Websoft9 will not be retroactively patched.
|
||||
|
||||
## Disclosing
|
||||
|
||||
You can get in touch with us regarding a vulnerability via [issue](https://github.com/Websoft9/websoft9/issues) or email at help@websoft9.com.
|
||||
|
||||
You can also disclose via huntr.dev. If you believe you have found a vulnerability, please disclose it on huntr and let us know.
|
||||
|
||||
https://huntr.dev/bounties/disclose
|
||||
|
||||
This will enable us to review the vulnerability and potentially reward you for your work.
|
|
@ -1,2 +0,0 @@
|
|||
1. improve all plugins githubaction
|
||||
2. install plugin by shell
|
|
@ -1,24 +0,0 @@
|
|||
# Cockpit
|
||||
|
||||
Cockpit is used for backend service gatway, we have not modify Cockpit core, just improve the installation and modify config for Websoft9
|
||||
|
||||
## Install
|
||||
|
||||
```
|
||||
# default install
|
||||
wget https://websoft9.github.io/websoft9/install/install_cockpit.sh && bash install_cockpit.sh
|
||||
|
||||
# define Cockpit port and install
|
||||
wget https://websoft9.github.io/websoft9/install/install_cockpit.sh && bash install_cockpit.sh --port 9099
|
||||
```
|
||||
|
||||
## Development
|
||||
|
||||
Developer should improve these codes:
|
||||
|
||||
- Install and Upgrade Cockpit: */install/install_cockpit.sh*
|
||||
|
||||
- Override the default menus: */cockpit/menu_override*
|
||||
> shell.override.json is used for Top menu of Cockpit。Override function until Cockpit 297
|
||||
|
||||
- Cockipt configuration file: */cockpit/cockpit.conf*
|
|
@ -1,5 +0,0 @@
|
|||
# docs: https://cockpit-project.org/guide/latest/cockpit.conf.5.html
|
||||
|
||||
[WebService]
|
||||
AllowUnencrypted = true
|
||||
LoginTitle= Websoft9 - Linux AppStore
|
|
@ -1,3 +0,0 @@
|
|||
{
|
||||
"tools": null
|
||||
}
|
|
@ -1,3 +0,0 @@
|
|||
{
|
||||
"tools": null
|
||||
}
|
|
@ -1,3 +0,0 @@
|
|||
{
|
||||
"menu": null
|
||||
}
|
|
@ -1,57 +0,0 @@
|
|||
{
|
||||
"menu": null,
|
||||
"tools": {
|
||||
"index": {
|
||||
"label": "Networking",
|
||||
"order": 40,
|
||||
"docs": [
|
||||
{
|
||||
"label": "Managing networking bonds",
|
||||
"url": "https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/managing_systems_using_the_rhel_8_web_console/configuring-network-bonds-using-the-web-console_system-management-using-the-rhel-8-web-console"
|
||||
},
|
||||
{
|
||||
"label": "Managing networking teams",
|
||||
"url": "https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/managing_systems_using_the_rhel_8_web_console/configuring-network-teams-using-the-web-console_system-management-using-the-rhel-8-web-console"
|
||||
},
|
||||
{
|
||||
"label": "Managing networking bridges",
|
||||
"url": "https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/managing_systems_using_the_rhel_8_web_console/configuring-network-bridges-in-the-web-console_system-management-using-the-rhel-8-web-console"
|
||||
},
|
||||
{
|
||||
"label": "Managing VLANs",
|
||||
"url": "https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/managing_systems_using_the_rhel_8_web_console/configuring-vlans-in-the-web-console_system-management-using-the-rhel-8-web-console"
|
||||
},
|
||||
{
|
||||
"label": "Managing firewall",
|
||||
"url": "https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/managing_systems_using_the_rhel_8_web_console/managing_firewall_using_the_web_console"
|
||||
}
|
||||
],
|
||||
"keywords": [
|
||||
{
|
||||
"matches": [
|
||||
"network",
|
||||
"interface",
|
||||
"bridge",
|
||||
"vlan",
|
||||
"bond",
|
||||
"team",
|
||||
"port",
|
||||
"mac",
|
||||
"ipv4",
|
||||
"ipv6"
|
||||
]
|
||||
},
|
||||
{
|
||||
"matches": [
|
||||
"firewall",
|
||||
"firewalld",
|
||||
"zone",
|
||||
"tcp",
|
||||
"udp"
|
||||
],
|
||||
"goto": "/network/firewall"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,3 +0,0 @@
|
|||
{
|
||||
"tools": null
|
||||
}
|
|
@ -1,3 +0,0 @@
|
|||
{
|
||||
"tools": null
|
||||
}
|
|
@ -1,3 +0,0 @@
|
|||
{
|
||||
"menu": null
|
||||
}
|
|
@ -1,29 +0,0 @@
|
|||
{
|
||||
"locales": {
|
||||
"cs-cz": null,
|
||||
"de-de": null,
|
||||
"es-es": null,
|
||||
"fi-fi": null,
|
||||
"fr-fr": null,
|
||||
"he-il": null,
|
||||
"it-it": null,
|
||||
"ja-jp": null,
|
||||
"ka-ge": null,
|
||||
"ko-kr": null,
|
||||
"nb-no": null,
|
||||
"nl-nl": null,
|
||||
"pl-pl": null,
|
||||
"pt-br": null,
|
||||
"ru-ru": null,
|
||||
"sk-sk": null,
|
||||
"sv-se": null,
|
||||
"tr-tr": null,
|
||||
"uk-ua": null
|
||||
},
|
||||
"docs": [
|
||||
{
|
||||
"label": "Documentation",
|
||||
"url": "https://support.websoft9.com/en/docs/"
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,3 +0,0 @@
|
|||
{
|
||||
"tools": null
|
||||
}
|
|
@ -1,69 +0,0 @@
|
|||
{
|
||||
"menu": null,
|
||||
"tools": {
|
||||
"index": {
|
||||
"label": "Storage",
|
||||
"order": 30,
|
||||
"docs": [
|
||||
{
|
||||
"label": "Managing partitions",
|
||||
"url": "https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/managing_systems_using_the_rhel_8_web_console/managing-partitions-using-the-web-console_system-management-using-the-rhel-8-web-console"
|
||||
},
|
||||
{
|
||||
"label": "Managing NFS mounts",
|
||||
"url": "https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/managing_systems_using_the_rhel_8_web_console/managing-nfs-mounts-in-the-web-console_system-management-using-the-rhel-8-web-console"
|
||||
},
|
||||
{
|
||||
"label": "Managing RAIDs",
|
||||
"url": "https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/managing_systems_using_the_rhel_8_web_console/managing-redundant-arrays-of-independent-disks-in-the-web-console_system-management-using-the-rhel-8-web-console"
|
||||
},
|
||||
{
|
||||
"label": "Managing LVMs",
|
||||
"url": "https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/managing_systems_using_the_rhel_8_web_console/using-the-web-console-for-configuring-lvm-logical-volumes_system-management-using-the-rhel-8-web-console"
|
||||
},
|
||||
{
|
||||
"label": "Managing physical drives",
|
||||
"url": "https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/managing_systems_using_the_rhel_8_web_console/using-the-web-console-for-changing-physical-drives-in-volume-groups_system-management-using-the-rhel-8-web-console"
|
||||
},
|
||||
{
|
||||
"label": "Managing VDOs",
|
||||
"url": "https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/managing_systems_using_the_rhel_8_web_console/using-the-web-console-for-managing-virtual-data-optimizer-volumes_system-management-using-the-rhel-8-web-console"
|
||||
},
|
||||
{
|
||||
"label": "Using LUKS encryption",
|
||||
"url": "https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/managing_systems_using_the_rhel_8_web_console/locking-data-with-luks-password-in-the-rhel-web-console_system-management-using-the-rhel-8-web-console"
|
||||
},
|
||||
{
|
||||
"label": "Using Tang server",
|
||||
"url": "https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/managing_systems_using_the_rhel_8_web_console/configuring-automated-unlocking-using-a-tang-key-in-the-web-console_system-management-using-the-rhel-8-web-console"
|
||||
}
|
||||
],
|
||||
"keywords": [
|
||||
{
|
||||
"matches": [
|
||||
"filesystem",
|
||||
"partition",
|
||||
"nfs",
|
||||
"raid",
|
||||
"volume",
|
||||
"disk",
|
||||
"vdo",
|
||||
"iscsi",
|
||||
"drive",
|
||||
"mount",
|
||||
"unmount",
|
||||
"udisks",
|
||||
"mkfs",
|
||||
"format",
|
||||
"fstab",
|
||||
"lvm2",
|
||||
"luks",
|
||||
"encryption",
|
||||
"nbde",
|
||||
"tang"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,3 +0,0 @@
|
|||
{
|
||||
"tools": null
|
||||
}
|
|
@ -1,147 +0,0 @@
|
|||
{
|
||||
"tools": {
|
||||
"terminal": null,
|
||||
"services": {
|
||||
"label": "Services",
|
||||
"order": 10,
|
||||
"docs": [
|
||||
{
|
||||
"label": "Managing services",
|
||||
"url": "https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/managing_systems_using_the_rhel_8_web_console/managing-services-in-the-web-console_system-management-using-the-rhel-8-web-console"
|
||||
}
|
||||
],
|
||||
"keywords": [
|
||||
{
|
||||
"matches": [
|
||||
"service",
|
||||
"systemd",
|
||||
"target",
|
||||
"socket",
|
||||
"timer",
|
||||
"path",
|
||||
"unit",
|
||||
"systemctl"
|
||||
]
|
||||
},
|
||||
{
|
||||
"matches": [
|
||||
"boot",
|
||||
"mask",
|
||||
"unmask",
|
||||
"restart",
|
||||
"enable",
|
||||
"disable"
|
||||
],
|
||||
"weight": 1
|
||||
}
|
||||
]
|
||||
},
|
||||
"logs": {
|
||||
"label": "Logs",
|
||||
"order": 20,
|
||||
"docs": [
|
||||
{
|
||||
"label": "Reviewing logs",
|
||||
"url": "https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/managing_systems_using_the_rhel_8_web_console/reviewing-logs_system-management-using-the-rhel-8-web-console"
|
||||
}
|
||||
],
|
||||
"keywords": [
|
||||
{
|
||||
"matches": [
|
||||
"journal",
|
||||
"warning",
|
||||
"error",
|
||||
"debug"
|
||||
]
|
||||
},
|
||||
{
|
||||
"matches": [
|
||||
"abrt",
|
||||
"crash",
|
||||
"coredump"
|
||||
],
|
||||
"goto": "?tag=abrt-notification"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"menu": {
|
||||
"logs": null,
|
||||
"services": null,
|
||||
"index": {
|
||||
"label": "Overview",
|
||||
"order": -2,
|
||||
"docs": [
|
||||
{
|
||||
"label": "Configuring system settings",
|
||||
"url": "https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/managing_systems_using_the_rhel_8_web_console/getting-started-with-the-rhel-8-web-console_system-management-using-the-rhel-8-web-console"
|
||||
}
|
||||
],
|
||||
"keywords": [
|
||||
{
|
||||
"matches": [
|
||||
"time",
|
||||
"date",
|
||||
"restart",
|
||||
"shut",
|
||||
"domain",
|
||||
"machine",
|
||||
"operating system",
|
||||
"os",
|
||||
"asset tag",
|
||||
"ssh",
|
||||
"power",
|
||||
"version",
|
||||
"host"
|
||||
]
|
||||
},
|
||||
{
|
||||
"matches": [
|
||||
"hardware",
|
||||
"mitigation",
|
||||
"pci",
|
||||
"memory",
|
||||
"cpu",
|
||||
"bios",
|
||||
"ram",
|
||||
"dimm",
|
||||
"serial"
|
||||
],
|
||||
"goto": "/system/hwinfo"
|
||||
},
|
||||
{
|
||||
"matches": [
|
||||
"graphs",
|
||||
"metrics",
|
||||
"history",
|
||||
"pcp",
|
||||
"cpu",
|
||||
"memory",
|
||||
"disks",
|
||||
"network",
|
||||
"cgroups",
|
||||
"performance"
|
||||
],
|
||||
"goto": "/metrics"
|
||||
}
|
||||
]
|
||||
},
|
||||
"terminal": {
|
||||
"label": "Terminal",
|
||||
"keywords": [
|
||||
{
|
||||
"matches": [
|
||||
"console",
|
||||
"command",
|
||||
"bash",
|
||||
"shell"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"preload": [
|
||||
"index"
|
||||
],
|
||||
"content-security-policy": "img-src 'self' data:"
|
||||
}
|
|
@ -1,3 +0,0 @@
|
|||
{
|
||||
"tools": null
|
||||
}
|
|
@ -1,31 +0,0 @@
|
|||
{
|
||||
"menu": null,
|
||||
"tools": {
|
||||
"index": {
|
||||
"label": "Accounts",
|
||||
"order": 70,
|
||||
"docs": [
|
||||
{
|
||||
"label": "Managing user accounts",
|
||||
"url": "https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/managing_systems_using_the_rhel_8_web_console/managing-user-accounts-in-the-web-console_system-management-using-the-rhel-8-web-console"
|
||||
}
|
||||
],
|
||||
"keywords": [
|
||||
{
|
||||
"matches": [
|
||||
"user",
|
||||
"password",
|
||||
"useradd",
|
||||
"passwd",
|
||||
"username",
|
||||
"login",
|
||||
"access",
|
||||
"roles",
|
||||
"ssh",
|
||||
"keys"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,4 +0,0 @@
|
|||
git clone --depth=1 https://github.com/Websoft9/websoft9.git
|
||||
rm -rf /etc/cockpit/*.override.json
|
||||
cp -r websoft9/cockpit/menu_override/* /etc/cockpit
|
||||
rm -rf websoft9
|
|
@ -1,4 +0,0 @@
|
|||
APPHUB_VERSION=0.0.6
|
||||
DEPLOYMENT_VERSION=2.19.0
|
||||
GIT_VERSION=1.20.4
|
||||
PROXY_VERSION=2.10.4
|
|
@ -1,28 +0,0 @@
|
|||
# Docker
|
||||
|
||||
## Test it
|
||||
|
||||
All backend services of Websoft9 is packaged to Docker image, just these steps you can running them:
|
||||
|
||||
```
|
||||
curl -fsSL https://get.docker.com -o get-docker.sh && sh get-docker.sh && sudo systemctl enable docker && sudo systemctl start docker
|
||||
sudo docker network create websoft9
|
||||
wget https://websoft9.github.io/websoft9/docker/.env
|
||||
wget https://websoft9.github.io/websoft9/docker/docker-compose.yml
|
||||
sudo docker compose -p websoft9 up -d
|
||||
```
|
||||
> If you want only want to change to development, you should execute following commands:
|
||||
```
|
||||
sudo docker compose -p websoft9 down -v
|
||||
wget https://websoft9.github.io/websoft9/docker/docker-compose-dev.yml
|
||||
# /data/source is development sources path in host
|
||||
docker compose -f docker-compose-dev.yml -p websoft9 up -d --build
|
||||
```
|
||||
|
||||
## Develop it
|
||||
|
||||
The folder **apphub, deployment, git, proxy** stored development files, and used for:
|
||||
|
||||
- Optimize dockerfile
|
||||
- Release version
|
||||
- Build docker image by Githuh action
|
|
@ -1,59 +0,0 @@
|
|||
# modify time: 202311131740, you can modify here to trigger Docker Build action
|
||||
|
||||
FROM python:3.10-slim-bullseye
|
||||
LABEL maintainer="Websoft9<help@websoft9.com>"
|
||||
LABEL version="0.0.6"
|
||||
|
||||
WORKDIR /websoft9
|
||||
|
||||
ENV LIBRARY_VERSION=0.5.8
|
||||
ENV MEDIA_VERSION=0.0.3
|
||||
ENV websoft9_repo="https://github.com/Websoft9/websoft9"
|
||||
ENV docker_library_repo="https://github.com/Websoft9/docker-library"
|
||||
ENV media_repo="https://github.com/Websoft9/media"
|
||||
ENV source_github_pages="https://websoft9.github.io/websoft9"
|
||||
|
||||
RUN apt update && apt install -y --no-install-recommends curl git jq cron iproute2 supervisor rsync wget unzip zip && \
|
||||
# Prepare source files
|
||||
wget $docker_library_repo/archive/refs/tags/$LIBRARY_VERSION.zip -O ./library.zip && \
|
||||
unzip library.zip && \
|
||||
mv docker-library-* w9library && \
|
||||
rm -rf w9library/.github && \
|
||||
wget $media_repo/archive/refs/tags/$MEDIA_VERSION.zip -O ./media.zip && \
|
||||
unzip media.zip && \
|
||||
mv media-* w9media && \
|
||||
rm -rf w9media/.github && \
|
||||
git clone --depth=1 https://github.com/swagger-api/swagger-ui.git && \
|
||||
wget https://cdn.redoc.ly/redoc/latest/bundles/redoc.standalone.js && \
|
||||
cp redoc.standalone.js swagger-ui/dist && \
|
||||
git clone --depth=1 $websoft9_repo ./w9source && \
|
||||
cp -r ./w9media ./media && \
|
||||
cp -r ./w9library ./library && \
|
||||
cp -r ./w9source/apphub ./apphub && \
|
||||
cp -r ./swagger-ui/dist ./apphub/swagger-ui && \
|
||||
cp -r ./w9source/apphub/src/config ./config && \
|
||||
cp -r ./w9source/docker/apphub/script ./script && \
|
||||
curl -o ./script/update_zip.sh $source_github_pages/scripts/update_zip.sh && \
|
||||
pip install --no-cache-dir --upgrade -r apphub/requirements.txt && \
|
||||
pip install -e ./apphub && \
|
||||
# Clean cache and install files
|
||||
rm -rf apphub/docs apphub/tests library.zip media.zip redoc.standalone.js swagger-ui w9library w9media w9source && \
|
||||
apt clean && \
|
||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* /usr/share/man /usr/share/doc /usr/share/doc-base
|
||||
|
||||
# supervisor
|
||||
COPY config/supervisord.conf /etc/supervisor/conf.d/supervisord.conf
|
||||
RUN chmod +r /etc/supervisor/conf.d/supervisord.conf
|
||||
|
||||
# cron
|
||||
COPY config/cron /etc/cron.d/cron
|
||||
RUN echo "" >> /etc/cron.d/cron && crontab /etc/cron.d/cron
|
||||
|
||||
# chmod for all .sh script
|
||||
RUN find /websoft9/script -name "*.sh" -exec chmod +x {} \;
|
||||
|
||||
VOLUME /websoft9/apphub/logs
|
||||
VOLUME /websoft9/apphub/src/config
|
||||
|
||||
EXPOSE 8080
|
||||
ENTRYPOINT ["/websoft9/script/entrypoint.sh"]
|
|
@ -1,4 +0,0 @@
|
|||
ARG APPHUB_VERSION
|
||||
FROM websoft9dev/apphub:${APPHUB_VERSION} as buildstage
|
||||
RUN mkdir -p /websoft9/apphub-dev
|
||||
RUN sed -i '/supervisorctl start apphub/c\supervisorctl start apphubdev' /websoft9/script/entrypoint.sh
|
|
@ -1,11 +0,0 @@
|
|||
# README
|
||||
|
||||
- Download docker-library release to image
|
||||
- install git
|
||||
- entrypoint: config git credential for remote gitea
|
||||
- health.sh: gitea/portaner/nginx credentials, if have exception output to logs
|
||||
- use virtualenv for pip install requirements.txt
|
||||
- create volumes at dockerfile
|
||||
- EXPOSE port
|
||||
- process logs should output to docker logs by supervisord
|
||||
- [uvicorn](https://www.uvicorn.org/) load Fastapi
|
|
@ -1 +0,0 @@
|
|||
{"username":"appuser","password":"apppassword"}
|
|
@ -1 +0,0 @@
|
|||
0 2 * * * /websoft9/script/update.sh
|
|
@ -1,45 +0,0 @@
|
|||
[supervisord]
|
||||
nodaemon=false
|
||||
logfile=/var/log/supervisord.log
|
||||
logfile_maxbytes=50MB
|
||||
logfile_backups=10
|
||||
loglevel=info
|
||||
user=root
|
||||
|
||||
[program:apphub]
|
||||
command=uvicorn src.main:app --host 0.0.0.0 --port 8080
|
||||
autostart=false
|
||||
user=root
|
||||
directory=/websoft9/apphub
|
||||
stdout_logfile=/var/log/supervisord.log
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/var/log/supervisord.log
|
||||
stderr_logfile_maxbytes=0
|
||||
|
||||
[program:apphubdev]
|
||||
command=/websoft9/script/developer.sh
|
||||
autostart=false
|
||||
user=root
|
||||
stdout_logfile=/var/log/supervisord.log
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/var/log/supervisord.log
|
||||
stderr_logfile_maxbytes=0
|
||||
|
||||
[program:cron]
|
||||
command=cron -f
|
||||
autostart=true
|
||||
user=root
|
||||
stdout_logfile=/var/log/supervisord.log
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/var/log/supervisord.log
|
||||
stderr_logfile_maxbytes=0
|
||||
|
||||
[program:media]
|
||||
command=uvicorn src.media:app --host 0.0.0.0 --port 8081
|
||||
autostart=true
|
||||
user=root
|
||||
directory=/websoft9/apphub
|
||||
stdout_logfile=/var/log/supervisord.log
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/var/log/supervisord.log
|
||||
stderr_logfile_maxbytes=0
|
|
@ -1,17 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
source_path="/websoft9/apphub-dev"
|
||||
|
||||
echo "Start to cp source code"
|
||||
if [ ! "$(ls -A $source_path)" ]; then
|
||||
cp -r /websoft9/apphub/* $source_path
|
||||
fi
|
||||
cp -r /websoft9/apphub/swagger-ui $source_path
|
||||
|
||||
echo "Install apphub cli"
|
||||
pip uninstall apphub -y
|
||||
pip install -e $source_path
|
||||
|
||||
echo "Running the apphub"
|
||||
cd $source_path
|
||||
exec uvicorn src.main:app --reload --host 0.0.0.0 --port 8080
|
|
@ -1,56 +0,0 @@
|
|||
#!/bin/bash
|
||||
# Define PATH
|
||||
PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin:~/bin
|
||||
# Export PATH
|
||||
export PATH
|
||||
|
||||
set -e
|
||||
|
||||
bash /websoft9/script/migration.sh
|
||||
|
||||
try_times=5
|
||||
supervisord
|
||||
supervisorctl start apphub
|
||||
|
||||
# set git user and email
|
||||
for ((i=0; i<$try_times; i++)); do
|
||||
set +e
|
||||
username=$(apphub getconfig --section gitea --key user_name 2>/dev/null)
|
||||
email=$(apphub getconfig --section gitea --key user_email 2>/dev/null)
|
||||
set -e
|
||||
if [ -n "$username" ] && [ -n "$email" ]; then
|
||||
break
|
||||
fi
|
||||
echo "Wait for service running, retrying..."
|
||||
sleep 3
|
||||
done
|
||||
|
||||
if [[ -n "$username" ]]; then
|
||||
echo "git config --global user.name $username"
|
||||
git config --global user.name "$username"
|
||||
else
|
||||
echo "username is null, git config username failed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
regex="^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$"
|
||||
if [[ $email =~ $regex ]]; then
|
||||
echo "git config --global user.email $email"
|
||||
git config --global user.email "$email"
|
||||
else
|
||||
echo "Not have correct email, git config email failed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
create_apikey() {
|
||||
|
||||
if [ ! -f /websoft9/apphub/src/config/initialized ] || [ -z "$(apphub getkey)" ]; then
|
||||
echo "Create new apikey"
|
||||
apphub genkey
|
||||
touch /websoft9/apphub/src/config/initialized 2>/dev/null
|
||||
fi
|
||||
}
|
||||
|
||||
create_apikey
|
||||
|
||||
tail -n 1000 -f /var/log/supervisord.log
|
|
@ -1,51 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
echo "start to migrate config.ini"
|
||||
|
||||
migrate_ini() {
|
||||
|
||||
# Define file paths, use template ini and syn exsit items from target ini
|
||||
export target_ini="$1"
|
||||
export template_ini="$2"
|
||||
|
||||
python3 - <<EOF
|
||||
import configparser
|
||||
import os
|
||||
import sys
|
||||
|
||||
target_ini = os.environ['target_ini']
|
||||
template_ini = os.environ['template_ini']
|
||||
|
||||
# Create two config parsers
|
||||
target_parser = configparser.ConfigParser()
|
||||
template_parser = configparser.ConfigParser()
|
||||
|
||||
try:
|
||||
|
||||
target_parser.read(target_ini)
|
||||
template_parser.read(template_ini)
|
||||
except configparser.MissingSectionHeaderError:
|
||||
print("Error: The provided files are not valid INI files.")
|
||||
sys.exit(1)
|
||||
|
||||
# use target_parser to override template_parser
|
||||
for section in target_parser.sections():
|
||||
if template_parser.has_section(section):
|
||||
for key, value in target_parser.items(section):
|
||||
if template_parser.has_option(section, key):
|
||||
template_parser.set(section, key, value)
|
||||
|
||||
|
||||
with open(target_ini, 'w') as f:
|
||||
template_parser.write(f)
|
||||
EOF
|
||||
}
|
||||
|
||||
|
||||
migrate_ini "/websoft9/apphub/src/config/config.ini" "/websoft9/config/config.ini"
|
||||
|
||||
if [ $? -eq 0 ]; then
|
||||
echo "Success to update config.ini"
|
||||
else
|
||||
echo "Fail to update config.ini, skip it"
|
||||
fi
|
|
@ -1,9 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
echo "$(date) - Compare remote version and local version." | tee -a /var/log/supervisord.log
|
||||
|
||||
echo "$(date) - Download remote packages and replace local data." | tee -a /var/log/supervisord.log
|
||||
bash /websoft9/script/update_zip.sh --package_name "media-latest.zip" --sync_to "/websoft9/media"
|
||||
bash /websoft9/script/update_zip.sh --package_name "library-latest.zip" --sync_to "/websoft9/library"
|
||||
|
||||
echo "$(date) - Success to update library and media."
|
|
@ -1,7 +0,0 @@
|
|||
{
|
||||
"log-driver": "json-file",
|
||||
"log-opts": {
|
||||
"max-size": "10m",
|
||||
"max-file": "5"
|
||||
}
|
||||
}
|
|
@ -1,23 +0,0 @@
|
|||
# modify time: 202311031633, you can modify here to trigger Docker Build action
|
||||
# step1: Build entrypoint execute program init_portainer by golang
|
||||
|
||||
FROM golang:latest AS builder
|
||||
WORKDIR /
|
||||
COPY init_portainer.go /
|
||||
# CGO_ENABLED=0 can not depend on any dynamic library
|
||||
RUN CGO_ENABLED=0 go build -o init_portainer /init_portainer.go
|
||||
RUN chmod +x /init_portainer
|
||||
|
||||
COPY endpoint.go /
|
||||
RUN CGO_ENABLED=0 go build -o endpoint /endpoint.go
|
||||
RUN chmod +x /endpoint
|
||||
|
||||
# step2: Copy build go program to portainer
|
||||
# Dockerfile refer to: https://github.com/portainer/portainer/blob/develop/build/linux/Dockerfile
|
||||
FROM portainer/portainer-ce:2.19.0
|
||||
LABEL maintainer="websoft9<help@websoft9.com>"
|
||||
LABEL version="2.19.0"
|
||||
COPY --from=builder /init_portainer /
|
||||
COPY --from=builder /endpoint /
|
||||
|
||||
ENTRYPOINT ["/init_portainer"]
|
|
@ -1,3 +0,0 @@
|
|||
# Readme
|
||||
|
||||
- create local endpoint and lock
|
|
@ -1,167 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
AdminUser = "admin"
|
||||
EndpointURL = "http://localhost:9000/api/endpoints"
|
||||
AuthURL = "http://localhost:9000/api/auth"
|
||||
CredentialLoc = "/data/credential"
|
||||
)
|
||||
|
||||
type Endpoint struct {
|
||||
Name string `json:"name"`
|
||||
URL string `json:"url"`
|
||||
}
|
||||
|
||||
type EndpointCreation struct {
|
||||
Name string `json:"name"`
|
||||
EndpointCreationType int `json:"EndpointCreationType"`
|
||||
}
|
||||
|
||||
type AuthResponse struct {
|
||||
Jwt string `json:"jwt"`
|
||||
}
|
||||
|
||||
type Credentials struct {
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
}
|
||||
|
||||
func main() {
|
||||
|
||||
fmt.Println("Start to create endpoint...")
|
||||
client := &http.Client{}
|
||||
|
||||
password, err := getPassword()
|
||||
if err != nil {
|
||||
fmt.Println("Failed to get password:", err)
|
||||
return
|
||||
}
|
||||
|
||||
token, err := authenticate(client, AdminUser, password)
|
||||
if err != nil {
|
||||
fmt.Println("Failed to authenticate:", err)
|
||||
return
|
||||
}
|
||||
|
||||
endpoints, err := queryEndpoints(client, token)
|
||||
if err != nil {
|
||||
fmt.Println("Failed to query endpoints:", err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, endpoint := range endpoints {
|
||||
if endpoint.Name == "local" && endpoint.URL == "unix:///var/run/docker.sock" {
|
||||
fmt.Println("Endpoint exists, exiting...")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Println("Endpoint does not exist, creating...")
|
||||
createEndpoint(client, token)
|
||||
|
||||
fmt.Println("Endpoint created successfully")
|
||||
}
|
||||
|
||||
func getPassword() (string, error) {
|
||||
for {
|
||||
if _, err := os.Stat(CredentialLoc); os.IsNotExist(err) {
|
||||
fmt.Printf("%s does not exist, waiting for 3 seconds...\n", CredentialLoc)
|
||||
time.Sleep(3 * time.Second)
|
||||
} else {
|
||||
fmt.Printf("%s exists, proceeding...\n", CredentialLoc)
|
||||
data, err := ioutil.ReadFile(CredentialLoc)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(data), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func authenticate(client *http.Client, username, password string) (string, error) {
|
||||
credentials := Credentials{Username: username, Password: password}
|
||||
credentialsJson, err := json.Marshal(credentials)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("POST", AuthURL, bytes.NewBuffer(credentialsJson))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var authResponse AuthResponse
|
||||
err = json.Unmarshal(body, &authResponse)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return authResponse.Jwt, nil
|
||||
}
|
||||
|
||||
func queryEndpoints(client *http.Client, token string) ([]Endpoint, error) {
|
||||
req, err := http.NewRequest("GET", EndpointURL, nil)
|
||||
req.Header.Set("Authorization", "Bearer "+token)
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var endpoints []Endpoint
|
||||
err = json.Unmarshal(body, &endpoints)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return endpoints, nil
|
||||
}
|
||||
|
||||
func createEndpoint(client *http.Client, token string) error {
|
||||
data := url.Values{
|
||||
"Name": {"local"},
|
||||
"EndpointCreationType": {"1"},
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("POST", EndpointURL, strings.NewReader(data.Encode()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||
req.Header.Set("Authorization", "Bearer "+token)
|
||||
|
||||
resp, err := client.Do(req)
|
||||
|
||||
if resp.StatusCode != http.StatusCreated {
|
||||
body, _ := ioutil.ReadAll(resp.Body)
|
||||
return fmt.Errorf("Failed to create endpoint: %s, Response body: %s", resp.Status, string(body))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -1,70 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"os"
|
||||
"os/exec"
|
||||
"time"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
||||
filePath := "/data/credential"
|
||||
initPath := "/data/init"
|
||||
|
||||
_, err := os.Stat(filePath)
|
||||
if os.IsNotExist(err) {
|
||||
|
||||
_, err := os.Stat(initPath)
|
||||
|
||||
if os.IsNotExist(err) {
|
||||
fmt.Println("credential is not exist, create it.")
|
||||
password := generatePassword(16)
|
||||
err := writeToFile(filePath, password)
|
||||
if err != nil {
|
||||
fmt.Println("write file error:", err)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
fmt.Println("credential is exist, skip it.")
|
||||
}
|
||||
|
||||
// call portainer
|
||||
cmd := exec.Command("./portainer", "--admin-password-file", filePath, "--hide-label", "owner=websoft9")
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
err = cmd.Run()
|
||||
if err != nil {
|
||||
fmt.Println("error running compiled_program:", err)
|
||||
return
|
||||
}else{
|
||||
os.Create(initPath)
|
||||
}
|
||||
}else{
|
||||
fmt.Println("credential is exist, skip it.")
|
||||
cmd := exec.Command("./portainer", "--hide-label", "owner=websoft9")
|
||||
cmd.Run()
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func generatePassword(length int) string {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
|
||||
charset := "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789@$()_"
|
||||
|
||||
password := make([]byte, length)
|
||||
for i := range password {
|
||||
password[i] = charset[rand.Intn(len(charset))]
|
||||
}
|
||||
|
||||
return string(password)
|
||||
}
|
||||
|
||||
func writeToFile(filePath , content string) error {
|
||||
|
||||
return ioutil.WriteFile(filePath , []byte(content), 0755)
|
||||
}
|
|
@ -1,98 +0,0 @@
|
|||
# this file for developer
|
||||
# install --devto "/data/mysource"
|
||||
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
apphub:
|
||||
image: websoft9dev/apphub:${APPHUB_VERSION}
|
||||
container_name: websoft9-apphub
|
||||
build:
|
||||
context: .
|
||||
dockerfile: ./apphub/Dockerfiledev
|
||||
args:
|
||||
- APPHUB_VERSION=${APPHUB_VERSION}
|
||||
ports:
|
||||
- 9001-9999:8080
|
||||
restart: always
|
||||
volumes:
|
||||
- /data/websoft9/apphub:/websoft9/apphub-dev
|
||||
- apphub_media:/websoft9/media
|
||||
depends_on:
|
||||
- deployment
|
||||
- git
|
||||
- proxy
|
||||
labels:
|
||||
- "owner=websoft9"
|
||||
- "com.docker.compose.w9_http.port:8080"
|
||||
|
||||
deployment:
|
||||
image: websoft9dev/deployment:$DEPLOYMENT_VERSION
|
||||
container_name: websoft9-deployment
|
||||
restart: always
|
||||
ports:
|
||||
- 9001-9999:9000
|
||||
volumes:
|
||||
- portainer:/data
|
||||
- /data/compose:/data/compose
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
#- /run/podman/podman.sock:/var/run/docker.sock
|
||||
healthcheck:
|
||||
test: ["CMD", "/endpoint"]
|
||||
interval: 10s
|
||||
timeout: 30s
|
||||
retries: 4
|
||||
labels:
|
||||
- "owner=websoft9"
|
||||
- "com.docker.compose.w9_http.port:9000"
|
||||
|
||||
git:
|
||||
image: websoft9dev/git:$GIT_VERSION
|
||||
container_name: websoft9-git
|
||||
restart: always
|
||||
volumes:
|
||||
- gitea:/data
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
ports:
|
||||
- 9001-9999:3000
|
||||
environment:
|
||||
- INSTALL_LOCK=true
|
||||
- DISABLE_SSH=true
|
||||
- RUN_MODE=prod
|
||||
- HTTP_PORT=3000
|
||||
- DISABLE_REGISTRATION=false
|
||||
- REQUIRE_SIGNIN_VIEW=false
|
||||
- ROOT_URL=http://localhost/w9git/
|
||||
labels:
|
||||
- "owner=websoft9"
|
||||
- "com.docker.compose.w9_http.port:3000"
|
||||
|
||||
proxy:
|
||||
image: websoft9dev/proxy:$PROXY_VERSION
|
||||
container_name: websoft9-proxy
|
||||
restart: always
|
||||
ports:
|
||||
- "80:80"
|
||||
- "443:443"
|
||||
- "9001-9999:81"
|
||||
volumes:
|
||||
- nginx_data:/data
|
||||
- nginx_letsencrypt:/etc/letsencrypt
|
||||
labels:
|
||||
- "owner=websoft9"
|
||||
- "com.docker.compose.w9_http.port: 80"
|
||||
- "com.docker.compose.w9_https.port: 443"
|
||||
- "com.docker.compose.w9_console.port: 81"
|
||||
|
||||
networks:
|
||||
default:
|
||||
name: websoft9
|
||||
external: true
|
||||
|
||||
volumes:
|
||||
apphub_media:
|
||||
portainer:
|
||||
gitea:
|
||||
nginx_data:
|
||||
nginx_letsencrypt:
|
|
@ -1,84 +0,0 @@
|
|||
version: "3.8"
|
||||
|
||||
services:
|
||||
apphub:
|
||||
image: websoft9dev/apphub:$APPHUB_VERSION
|
||||
container_name: websoft9-apphub
|
||||
restart: always
|
||||
volumes:
|
||||
- apphub_logs:/websoft9/apphub/logs
|
||||
- apphub_config:/websoft9/apphub/src/config
|
||||
depends_on:
|
||||
- deployment
|
||||
- git
|
||||
- proxy
|
||||
labels:
|
||||
- "owner=websoft9"
|
||||
- "com.docker.compose.w9_http.port:8080"
|
||||
|
||||
deployment:
|
||||
image: websoft9dev/deployment:$DEPLOYMENT_VERSION
|
||||
container_name: websoft9-deployment
|
||||
restart: always
|
||||
volumes:
|
||||
- portainer:/data
|
||||
- /data/compose:/data/compose
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
#- /run/podman/podman.sock:/var/run/docker.sock
|
||||
healthcheck:
|
||||
test: ["CMD", "/endpoint"]
|
||||
interval: 10s
|
||||
timeout: 30s
|
||||
retries: 4
|
||||
labels:
|
||||
- "owner=websoft9"
|
||||
- "com.docker.compose.w9_http.port:9000"
|
||||
|
||||
git:
|
||||
image: websoft9dev/git:$GIT_VERSION
|
||||
container_name: websoft9-git
|
||||
restart: always
|
||||
volumes:
|
||||
- gitea:/data
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
environment:
|
||||
- INSTALL_LOCK=true
|
||||
- DISABLE_SSH=true
|
||||
- RUN_MODE=prod
|
||||
- HTTP_PORT=3000
|
||||
- DISABLE_REGISTRATION=false
|
||||
- REQUIRE_SIGNIN_VIEW=false
|
||||
- ROOT_URL=http://localhost/w9git/
|
||||
labels:
|
||||
- "owner=websoft9"
|
||||
- "com.docker.compose.w9_http.port:3000"
|
||||
|
||||
proxy:
|
||||
image: websoft9dev/proxy:$PROXY_VERSION
|
||||
container_name: websoft9-proxy
|
||||
restart: always
|
||||
ports:
|
||||
- "80:80"
|
||||
- "443:443"
|
||||
volumes:
|
||||
- nginx_data:/data
|
||||
- nginx_letsencrypt:/etc/letsencrypt
|
||||
labels:
|
||||
- "owner=websoft9"
|
||||
- "com.docker.compose.w9_http.port: 80"
|
||||
- "com.docker.compose.w9_https.port: 443"
|
||||
- "com.docker.compose.w9_console.port: 81"
|
||||
|
||||
networks:
|
||||
default:
|
||||
name: websoft9
|
||||
external: true
|
||||
|
||||
volumes:
|
||||
apphub_logs:
|
||||
apphub_config:
|
||||
portainer:
|
||||
gitea:
|
||||
nginx_data:
|
||||
nginx_letsencrypt:
|
|
@ -1,8 +0,0 @@
|
|||
# modify time: 202310250925, you can modify here to trigger Docker Build action
|
||||
# Dockerfile refer to: https://github.com/go-gitea/gitea/blob/main/Dockerfile
|
||||
FROM gitea/gitea:1.20.4
|
||||
|
||||
LABEL maintainer="Websoft9<help@websoft9.com>"
|
||||
LABEL version="1.20.4"
|
||||
COPY ./src/s6/user /etc/s6/user
|
||||
RUN chmod -R 755 /etc/s6/user
|
|
@ -1,19 +0,0 @@
|
|||
# Readme
|
||||
|
||||
## Development
|
||||
|
||||
From official Gitea image, and:
|
||||
|
||||
- Complete install wizard automaticlly by enviroment INSTALL_LOCK
|
||||
- Use default URL localhost for Host/Root_URL settings
|
||||
|
||||
## User
|
||||
|
||||
### Repository
|
||||
|
||||
How to clone a Repository ?
|
||||
|
||||
- Git clone a repository by external network address(gitea repository page's HTTP URL)
|
||||
- Git clone a repository by internal network address(e.g. http://websoft9-git:3000/organization/repository.git
|
||||
|
||||
|
|
@ -1,98 +0,0 @@
|
|||
APP_NAME = gitea
|
||||
RUN_MODE = prod
|
||||
RUN_USER = git
|
||||
WORK_PATH = /data/gitea
|
||||
|
||||
[repository]
|
||||
ROOT = /data/git/repositories
|
||||
|
||||
[repository.local]
|
||||
LOCAL_COPY_PATH = /data/gitea/tmp/local-repo
|
||||
|
||||
[repository.upload]
|
||||
TEMP_PATH = /data/gitea/uploads
|
||||
|
||||
[server]
|
||||
APP_DATA_PATH = /data/gitea
|
||||
DOMAIN = 119.8.96.66
|
||||
SSH_DOMAIN = 119.8.96.66
|
||||
HTTP_PORT = 3000
|
||||
ROOT_URL = http://119.8.96.66:3000/
|
||||
DISABLE_SSH = true
|
||||
SSH_PORT = 22
|
||||
SSH_LISTEN_PORT = 22
|
||||
LFS_START_SERVER = true
|
||||
LFS_JWT_SECRET = prcv5KuvKilAB_369Vr4saJf4QBdlMwD-vOXD2l7IHo
|
||||
OFFLINE_MODE = false
|
||||
|
||||
[database]
|
||||
PATH = /data/gitea/gitea.db
|
||||
DB_TYPE = sqlite3
|
||||
HOST = localhost:3306
|
||||
NAME = gitea
|
||||
USER = root
|
||||
PASSWD =
|
||||
LOG_SQL = false
|
||||
SCHEMA =
|
||||
SSL_MODE = disable
|
||||
|
||||
[indexer]
|
||||
ISSUE_INDEXER_PATH = /data/gitea/indexers/issues.bleve
|
||||
|
||||
[session]
|
||||
PROVIDER_CONFIG = /data/gitea/sessions
|
||||
PROVIDER = file
|
||||
|
||||
[picture]
|
||||
AVATAR_UPLOAD_PATH = /data/gitea/avatars
|
||||
REPOSITORY_AVATAR_UPLOAD_PATH = /data/gitea/repo-avatars
|
||||
|
||||
[attachment]
|
||||
PATH = /data/gitea/attachments
|
||||
|
||||
[log]
|
||||
MODE = console
|
||||
LEVEL = info
|
||||
ROOT_PATH = /data/gitea/log
|
||||
|
||||
[security]
|
||||
INSTALL_LOCK = true
|
||||
SECRET_KEY =
|
||||
REVERSE_PROXY_LIMIT = 1
|
||||
REVERSE_PROXY_TRUSTED_PROXIES = *
|
||||
INTERNAL_TOKEN = eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJuYmYiOjE2OTQ1NzI0NDN9.kzFr-t0io9M_XQWojbvG20KPiXIZPS2GMadRBgR9xMM
|
||||
PASSWORD_HASH_ALGO = pbkdf2
|
||||
|
||||
[service]
|
||||
DISABLE_REGISTRATION = false
|
||||
REQUIRE_SIGNIN_VIEW = false
|
||||
REGISTER_EMAIL_CONFIRM = false
|
||||
ENABLE_NOTIFY_MAIL = false
|
||||
ALLOW_ONLY_EXTERNAL_REGISTRATION = false
|
||||
ENABLE_CAPTCHA = false
|
||||
DEFAULT_KEEP_EMAIL_PRIVATE = false
|
||||
DEFAULT_ALLOW_CREATE_ORGANIZATION = true
|
||||
DEFAULT_ENABLE_TIMETRACKING = true
|
||||
NO_REPLY_ADDRESS = noreply.119.8.96.66
|
||||
|
||||
[lfs]
|
||||
PATH = /data/git/lfs
|
||||
|
||||
[mailer]
|
||||
ENABLED = false
|
||||
|
||||
[openid]
|
||||
ENABLE_OPENID_SIGNIN = true
|
||||
ENABLE_OPENID_SIGNUP = true
|
||||
|
||||
[cron.update_checker]
|
||||
ENABLED = false
|
||||
|
||||
[repository.pull-request]
|
||||
DEFAULT_MERGE_STYLE = merge
|
||||
|
||||
[repository.signing]
|
||||
DEFAULT_TRUST_MODEL = committer
|
||||
|
||||
[oauth2]
|
||||
JWT_SECRET = vIGxNkS5o0NYgDZwxMIcS_zXbbN3GBLCTO5MseHgO8Q
|
|
@ -1,5 +0,0 @@
|
|||
## Readme
|
||||
|
||||
- Create admin credential by admin cli
|
||||
- to do: Disable user register
|
||||
- to do: Disable Gravatar
|
|
@ -1,6 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
[[ -f ./setup ]] && source ./setup
|
||||
pushd /root >/dev/null
|
||||
exec s6-svc -D /etc/s6/user
|
||||
popd
|
|
@ -1,53 +0,0 @@
|
|||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
echo "start create user shell" >> /tmp/user
|
||||
# Confirm gitea is running
|
||||
count=1
|
||||
response=""
|
||||
cred_path="/data/gitea/credential"
|
||||
admin_username="websoft9"
|
||||
admin_email="help@websoft9.com"
|
||||
|
||||
while [ "$response" != "200" ]; do
|
||||
response=$(curl -s -o /dev/null -w "%{http_code}" localhost:3000)
|
||||
if [ "$response" = "200" ]; then
|
||||
echo "gitea is runing"
|
||||
break
|
||||
fi
|
||||
count=$((count+1))
|
||||
if [ $count -gt 10 ]; then
|
||||
echo "gitea is not runing"
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if [ -e "$cred_path" ]; then
|
||||
echo "File $cred_path exists. Exiting script."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "Create admin credential by admin cli"
|
||||
su -c "
|
||||
if gitea admin user list | grep -q '$admin_username'; then
|
||||
echo 'User already exists.'
|
||||
exit 0
|
||||
else
|
||||
gitea admin user create --admin --username '$admin_username' --random-password --email '$admin_email' > /tmp/credential
|
||||
touch /data/gitea/create_user 2>/dev/null
|
||||
fi
|
||||
" git
|
||||
|
||||
if [ -f /data/gitea/create_user ]; then
|
||||
echo "Read credential from tmp"
|
||||
username=$(grep -o "New user '[^']*" /tmp/credential | sed "s/New user '//")
|
||||
if [ -z "$username" ] || [ "$username" != "websoft9" ]; then
|
||||
echo "username is not websoft9, exit"
|
||||
fi
|
||||
password=$(grep -o "generated random password is '[^']*" /tmp/credential | sed "s/generated random password is '//")
|
||||
rm -rf /tmp/credential
|
||||
|
||||
echo "Save to credential"
|
||||
json="{\"username\":\"$admin_username\",\"password\":\"$password\",\"email\":\"$admin_email\"}"
|
||||
echo "$json" > "$cred_path"
|
||||
fi
|
|
@ -1,23 +0,0 @@
|
|||
# modify time: 202311071641, you can modify here to trigger Docker Build action
|
||||
# from Dockerfile: https://github.com/NginxProxyManager/nginx-proxy-manager/blob/develop/docker/Dockerfile
|
||||
# from image: https://hub.docker.com/r/jc21/nginx-proxy-manager
|
||||
|
||||
FROM jc21/nginx-proxy-manager:2.10.4
|
||||
|
||||
LABEL maintainer="Websoft9<help@websoft9.com>"
|
||||
LABEL version="2.10.4"
|
||||
|
||||
RUN apt-get update && apt-get install --no-install-recommends -y curl jq && rm -rf /var/lib/apt/lists/*
|
||||
COPY ./config/initproxy.conf /etc/
|
||||
COPY ./s6/w9init/setuser.sh /app/setuser.sh
|
||||
COPY ./s6/w9init/migration.sh /app/migration.sh
|
||||
RUN chmod +x /app/setuser.sh /app/migration.sh
|
||||
|
||||
RUN export add_ip_data="const ipDataFile={[CLOUDFRONT_URL]:'ip-ranges.json',[CLOUDFARE_V4_URL]:'ips-v4',[CLOUDFARE_V6_URL]:'ips-v6'}[url];logger.info(ipDataFile);if(ipDataFile){return fs.readFile(__dirname+'/../lib/ipData/'+ipDataFile,'utf8',(error,data)=>{if(error){logger.error('fetch '+ipDataFile+' error');reject(error);return}logger.info('fetch '+ipDataFile+' success');resolve(data)})}" && \
|
||||
sed -i "s#url);#&${add_ip_data}#g" /app/internal/ip_ranges.js && \
|
||||
mkdir -p /app/lib/ipData && cd /app/lib/ipData && \
|
||||
curl -O https://ip-ranges.amazonaws.com/ip-ranges.json && \
|
||||
curl -O https://www.cloudflare.com/ips-v4 && \
|
||||
curl -O https://www.cloudflare.com/ips-v6
|
||||
|
||||
CMD ["/bin/sh", "-c", "/app/migration.sh && /app/setuser.sh && tail -f /dev/null"]
|
|
@ -1,7 +0,0 @@
|
|||
# Readme
|
||||
|
||||
From official Nginx Proxy Manager image, and:
|
||||
|
||||
- add init_proxy.conf to image
|
||||
- init install wizard and modify user and password
|
||||
- lock the line of BoltDB at Portainer where envrionment=1
|
|
@ -1,125 +0,0 @@
|
|||
# ------------------------------------------------------------
|
||||
# domain.com
|
||||
# ------------------------------------------------------------
|
||||
|
||||
server {
|
||||
|
||||
listen 80;
|
||||
listen [::]:80;
|
||||
|
||||
server_name ~\.?[0-9a-zA-Z]$;
|
||||
|
||||
access_log /data/logs/proxy-host-1_access.log proxy;
|
||||
error_log /data/logs/proxy-host-1_error.log warn;
|
||||
|
||||
if ($http_referer ~* /w9deployment/) {
|
||||
rewrite ^/locales/(.*) /w9deployment/locales/$1 break;
|
||||
}
|
||||
|
||||
location / {
|
||||
# Proxy!
|
||||
include conf.d/include/proxy.conf;
|
||||
}
|
||||
|
||||
# proxy for portainer
|
||||
location /w9deployment/ {
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Forwarded-Scheme $scheme;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header X-Forwarded-For $remote_addr;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header Accept-Encoding \"\";
|
||||
proxy_pass http://websoft9-deployment:9000/;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection $http_connection;
|
||||
# proxy_http_version 1.1;
|
||||
add_header 'Access-Control-Allow-Origin' $http_origin;
|
||||
add_header 'Access-Control-Allow-Methods' 'GET, POST, PUT, DELETE, OPTIONS';
|
||||
add_header 'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Authorization';
|
||||
if ($request_method = OPTIONS) {
|
||||
return 204;
|
||||
}
|
||||
set $quot_tmp "\"";
|
||||
set $portainer_jwt "${quot_tmp}${cookie_portainerJWT}${quot_tmp}";
|
||||
sub_filter '</head>' "<script>($portainer_jwt)?window.localStorage.setItem('portainer.JWT', '$portainer_jwt'):null;</script></head>";
|
||||
sub_filter_once on;
|
||||
sub_filter_types *;
|
||||
}
|
||||
|
||||
# proxy for Nginx proxy Manager
|
||||
location /w9proxy/ {
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Forwarded-Scheme $scheme;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header X-Forwarded-For $remote_addr;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_pass http://websoft9-proxy:81;
|
||||
rewrite ^/w9proxy/?(.*)$ /$1 break;
|
||||
proxy_http_version 1.1;
|
||||
proxy_redirect off;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_cache_bypass $http_upgrade;
|
||||
proxy_set_header Connection $http_connection;
|
||||
proxy_cache_bypass $http_secret_header;
|
||||
proxy_set_header Accept-Encoding \"\";
|
||||
add_header Pragma "no-cache";
|
||||
add_header Cache-Control "no-cache";
|
||||
if ($request_method = OPTIONS) {
|
||||
return 204;
|
||||
}
|
||||
sub_filter '</head>' "<script>var tokens='$cookie_nginx_tokens';(tokens)?window.localStorage.setItem('nginx-proxy-manager-tokens', '[{\"t\":\"$cookie_nginx_tokens\",\"n\":\"$cookie_nginx_nikeName\"}]'):null;</script></head>";
|
||||
# source changes
|
||||
sub_filter 'href="/' 'href="/w9proxy/';
|
||||
sub_filter 'src="/' 'src="/w9proxy/';
|
||||
sub_filter '/api' '/w9proxy/api';
|
||||
sub_filter '/assets' '/w9proxy/assets';
|
||||
sub_filter '/js/' '/w9proxy/js/';
|
||||
# script changes
|
||||
sub_filter 'r.p="/' 'r.p="/w9proxy/';
|
||||
sub_filter '"/login' '"/w9proxy/login';
|
||||
sub_filter 'case"/logout"' 'case"/w9proxy/logout"';
|
||||
sub_filter 'window.location="/"' 'window.location="/w9proxy/"';
|
||||
sub_filter 'history.start({pushState:!0})' 'history.start({pushState:!0,root: "/w9proxy/"})';
|
||||
sub_filter 'i.history.navigate(e.' 'i.history.navigate(e.replace("/w9proxy","").';
|
||||
sub_filter_types *;
|
||||
sub_filter_once off;
|
||||
}
|
||||
|
||||
# proxy for Gitea
|
||||
location /w9git/ {
|
||||
proxy_pass http://websoft9-git:3000/;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_hide_header X-Frame-Options;
|
||||
add_header 'Access-Control-Allow-Origin' $http_origin;
|
||||
add_header 'Access-Control-Allow-Methods' 'GET, POST, PUT, DELETE, OPTIONS';
|
||||
add_header 'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Authorization';
|
||||
add_header 'Access-Control-Allow-Credentials' 'true';
|
||||
if ($request_method = OPTIONS) {
|
||||
return 204;
|
||||
}
|
||||
}
|
||||
|
||||
location /api/ {
|
||||
proxy_pass http://websoft9-apphub:8080/;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
add_header 'Access-Control-Allow-Origin' $http_origin always;
|
||||
add_header 'Access-Control-Allow-Methods' 'GET, POST, PUT, DELETE, OPTIONS' always;
|
||||
add_header 'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Authorization,x-api-key' always;
|
||||
if ($request_method = OPTIONS) {
|
||||
return 204;
|
||||
}
|
||||
}
|
||||
|
||||
location /media/ {
|
||||
proxy_pass http://websoft9-apphub:8081/images/;
|
||||
}
|
||||
|
||||
# Custom
|
||||
include /data/nginx/custom/server_proxy[.]conf;
|
||||
}
|
|
@ -1,5 +0,0 @@
|
|||
# S6
|
||||
|
||||
S6 is a mulitply process management tools at Nginx Proxy Manager.
|
||||
|
||||
- nginx_proxy() at migration.sh: Migration initproxy.conf to Nginx, condition is compare Container created time and Named Volumes created time
|
|
@ -1,23 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
set +e
|
||||
|
||||
nginx_proxy(){
|
||||
|
||||
current_time=$(date +%s)
|
||||
shadow_modified_time=$(stat -c %Y /etc/shadow)
|
||||
time_difference=$((current_time - shadow_modified_time))
|
||||
|
||||
if [ ! -f /data/nginx/proxy_host/initproxy.conf ] || [ $time_difference -le 60 ]
|
||||
then
|
||||
cp /etc/initproxy.conf /data/nginx/proxy_host/
|
||||
echo "Update initproxy.conf to Nginx"
|
||||
else
|
||||
echo "Don't need to update initproxy.conf to Nginx"
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
nginx_proxy
|
||||
|
||||
set -e
|
|
@ -1,61 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
set +e
|
||||
username="help@websoft9.com"
|
||||
password=$(openssl rand -base64 16 | tr -d '/+' | cut -c1-16)
|
||||
token=""
|
||||
cred_path="/data/credential"
|
||||
max_attempts=10
|
||||
|
||||
echo "Start to change nginxproxymanage users"
|
||||
if [ -e "$cred_path" ]; then
|
||||
echo "File $cred_path exists. Exiting script."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "create diretory"
|
||||
mkdir -p "$(dirname "$cred_path")"
|
||||
|
||||
sleep 10
|
||||
while [ -z "$token" ]; do
|
||||
sleep 5
|
||||
login_data=$(curl -X POST -H "Content-Type: application/json" -d '{"identity":"admin@example.com","scope":"user", "secret":"changeme"}' http://localhost:81/api/tokens)
|
||||
token=$(echo $login_data | jq -r '.token')
|
||||
done
|
||||
|
||||
echo "Change username(email)"
|
||||
for attempt in $(seq 1 $max_attempts); do
|
||||
response=$(curl -X PUT -H "Content-Type: application/json" -H "Authorization: Bearer $token" -d '{"email": "'$username'", "nickname": "admin", "is_disabled": false, "roles": ["admin"]}' http://localhost:81/api/users/1)
|
||||
if [ $? -eq 0 ]; then
|
||||
echo "Set username successful"
|
||||
break
|
||||
else
|
||||
echo "Set username failed, retrying..."
|
||||
sleep 5
|
||||
if [ $attempt -eq $max_attempts ]; then
|
||||
echo "Failed to set username after $max_attempts attempts. Exiting."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
echo "Update password"
|
||||
for attempt in $(seq 1 $max_attempts); do
|
||||
response=$(curl -X PUT -H "Content-Type: application/json" -H "Authorization: Bearer $token" -d '{"type":"password","current":"changeme","secret":"'$password'"}' http://localhost:81/api/users/1/auth)
|
||||
if [ $? -eq 0 ]; then
|
||||
echo "Set password successful"
|
||||
echo "Save to credential"
|
||||
json="{\"username\":\"$username\",\"password\":\"$password\"}"
|
||||
echo "$json" > "$cred_path"
|
||||
break
|
||||
else
|
||||
echo "Set password failed, retrying..."
|
||||
sleep 5
|
||||
if [ $attempt -eq $max_attempts ]; then
|
||||
echo "Failed to set password after $max_attempts attempts. Exiting."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
set -e
|
|
@ -1,42 +0,0 @@
|
|||
# Documentation for core maintainers
|
||||
|
||||
This documentaion is from [jenkins MAINTAINERS](https://github.com/jenkinsci/jenkins/blob/master/docs/MAINTAINERS.adoc) which have a paradigm of rigorous open source project maintenance
|
||||
|
||||
## Scope
|
||||
|
||||
This document applies to the following components:
|
||||
|
||||
- Websoft9 core
|
||||
- Websoft9 core plugins
|
||||
- docker-library
|
||||
|
||||
## Roles
|
||||
|
||||
| Role/job | submit pr | review pr | assign pr | merge pr | close pr | create issue | manage issue | release |
|
||||
| ------------ | --------- | --------- | --------- | -------- | -------- | ------------ | ------------ | ------- |
|
||||
| Contributor | √ | | | | | √ | | |
|
||||
| Issue Team | √ | | | | | √ | √ | |
|
||||
| PR Reviewer | √ | √ | | | | √ | | |
|
||||
| Release Team | √ | | | | | √ | | √ |
|
||||
| Maintainer | √ | √ | √ | √ | √ | √ | | |
|
||||
| PR Assignee | | | | √ | | √ | | |
|
||||
|
||||
|
||||
* **Contributor**: submit pull requests to the Jenkins core and review changes submitted by others. There are no special preconditions to do so. Anyone is welcome to contribute.
|
||||
* **Issue Triage Team Member**: review the incoming issues: bug reports, requests for enhancement, etc. Special permissions are not required to take this role or to contribute.
|
||||
* **Core Pull Request Reviewer**: A team for contributors who are willing to regularly review pull requests and eventually become core maintainers.
|
||||
* **Core Maintainer**: Get permissions in the repository, and hence they are able to merge pull requests.Their responsibility is to perform pull request reviews on a regular basis and to bring pull requests to closure, either by merging ready pull requests towards weekly releases ( branch) or by closing pull requests that are not ready for merge because of submitter inaction after an extended period of time.
|
||||
* **Pull Request Assignee**: Core maintainers make a commitment to bringing a pull request to closure by becoming an Assignee. They are also responsible to monitor the weekly release status and to perform triage of critical issues.
|
||||
* **Release Team Member**: Responsible for Websoft9 weekly and LTS releases
|
||||
|
||||
## Pull request review process
|
||||
|
||||
## Pull request Merge process
|
||||
|
||||
## Issue triage
|
||||
|
||||
## Release process
|
||||
|
||||
## Tools
|
||||
|
||||
## Communication
|
|
@ -1,24 +0,0 @@
|
|||
## Architecture
|
||||
|
||||
Websoft9 is very simple [architecture](https://www.canva.cn/design/DAFpI9loqzQ/hI_2vrtfoK7zJwauhJzipQ/view?utm_content=DAFpI9loqzQ&utm_campaign=designshare&utm_medium=link&utm_source=publishsharelink) which used [Redhat Cockpit ](https://cockpit-project.org/) for web framework and [Docker](https://www.docker.com/) for running [application](https://github.com/Websoft9/docker-library).
|
||||
|
||||
The benefits of this architecture means you don't have to learn new technology stacks or worry about the lack of maintenance this project.
|
||||
|
||||
![Alt text](image/archi.png)
|
||||
|
||||
|
||||
What we do is integrating below stacks's API or interfaces to Cockpit console by [Cockpit packages (Also known as plugin)](https://cockpit-project.org/guide/latest/packages.html) :
|
||||
|
||||
- [Nginx Proxy Manager](https://nginxproxymanager.com/): A web-based Nginx management
|
||||
- [Portainer](https://www.portainer.io/): Powerful container management for DevSecOps
|
||||
- [Duplicati](https://www.duplicati.com/): Backup software to store encrypted backups online
|
||||
- [Redis](https://redis.io/): The open source, in-memory data store
|
||||
- [Appmanage](https://github.com/Websoft9/websoft9/tree/main/appmanage): API for create and manage docker compose based application powered by Websoft9
|
||||
- [websoft9-plugins](https://github.com/websoft9?q=plugin&type=all&language=&sort=): Cockpit packages powered by Websoft9
|
||||
|
||||
As Websoft9 is a complete product, we also offer:
|
||||
|
||||
* API
|
||||
* CLI
|
||||
|
||||
And Websoft9 is more attractive to users is [200+ application templates](https://github.com/Websoft9/docker-library).
|
|
@ -1,24 +0,0 @@
|
|||
# Developer Guide
|
||||
|
||||
|
||||
|
||||
|
||||
## Release
|
||||
|
||||
|
||||
|
||||
#### 制品库自动化
|
||||
|
||||
- 插件制品管理:开发人员开发测试完成后,修改插件版本,触发 Action 构建 Github packages 制品
|
||||
- docker-libaray 库制品管理:开发人员测试完成后,修改 library 版本,触发 Action 构建 Github packages 制品
|
||||
- websoft9 制品管理:开发人员修改 appmanage 源码或微服务 docker-compose 测试完成后,修改 微服务 版本,触发 Action 构建 Dockerhub 镜像制品以及后台微服务 Github packages 制品
|
||||
|
||||
> Portainer,redis,nginxproxymanager 使用外部 dockerhub 镜像
|
||||
|
||||
### 自动化测试
|
||||
|
||||
当各个制品更新后,项目管理者修改 version_test.json 对应的组件的版本,构建 Action 触发自动化系统测试。
|
||||
自动化测试失败,通知各开发人员,删除制品,修改后重新生成制品。
|
||||
自动化测试成功,同步 version_test.json 到 version.json, 新制品正式发布。
|
||||
|
||||
|
Binary file not shown.
Before Width: | Height: | Size: 73 KiB |
Binary file not shown.
Before Width: | Height: | Size: 4.1 KiB |
|
@ -1,151 +0,0 @@
|
|||
# 需求
|
||||
|
||||
从两个主线理解 stackhub 的需求:
|
||||
|
||||
- 应用生命周期管理:寻找、安装、发布、停止、卸载、升级等软件全生命周期。
|
||||
- 基础设施运维管理:安全、存储、文件、容器、监控等系统管理
|
||||
|
||||
## 应用生命周期
|
||||
|
||||
### 业务需求
|
||||
|
||||
#### 寻找
|
||||
|
||||
用户可以通过两个入口寻找应用:
|
||||
|
||||
- 应用商店:采用一级分类的方式展现应用,并支持**筛选+搜索**的方式以便于用户检索
|
||||
- Docker 镜像仓库:检索 Docker 镜像仓库,找到对应的应用
|
||||
|
||||
#### 安装
|
||||
|
||||
- 用户自主安装应用,后端按顺序依次启动目标应用
|
||||
- 启动应用之前先进行资源约束判断,不符合条件的目标应用不予安装
|
||||
- 与安装有关的状态:安装中、运行中、安装失败、反复重启、已停止
|
||||
|
||||
#### 发布
|
||||
|
||||
- 以域名或端口的方式,将运行中的应用发布出去,供外部用户访问。
|
||||
- 自助设置 HTTPS,上传或更新证书
|
||||
|
||||
#### 停止
|
||||
|
||||
将应用服务停止
|
||||
|
||||
#### 卸载
|
||||
|
||||
卸载应用并删除数据
|
||||
|
||||
#### 升级
|
||||
|
||||
升级应用,如果升级失败会自动回滚到升级之前的状态
|
||||
|
||||
#### 恢复
|
||||
|
||||
在已有的完整备份的基础,恢复应用。
|
||||
|
||||
可能存在两种情况:
|
||||
|
||||
- 覆盖现有应用
|
||||
- 恢复成一个新的应用
|
||||
|
||||
#### 克隆
|
||||
|
||||
克隆一个已经存在的应用,命名为新应用
|
||||
|
||||
### 技术需求
|
||||
|
||||
#### 模板编排
|
||||
|
||||
应用的底层编排 100% 以 Docker Compose 语法作为编排语言
|
||||
|
||||
#### 多语言
|
||||
|
||||
- 前端支持 i18n
|
||||
- 后端接口支持英文
|
||||
|
||||
#### 用户管理
|
||||
|
||||
- 支持多个用户,用户角色分为普通用户和管理员用户
|
||||
- 普通用户可以创建和管理自己的应用,不可以删除他人的应用
|
||||
|
||||
#### UI 自适应
|
||||
|
||||
UI 自适应各种屏幕尺寸
|
||||
|
||||
#### 2FA
|
||||
|
||||
引入一种双重登录策略
|
||||
|
||||
#### 商店基础设置
|
||||
|
||||
- 商店 Logo 可自定义
|
||||
- 语言、时区可选
|
||||
- 绑定域名
|
||||
- SMTP 信息填写
|
||||
|
||||
#### 通知
|
||||
|
||||
- SMTP 邮件通知
|
||||
|
||||
#### 商店更新
|
||||
|
||||
商店支持在线更新提示和在线更新
|
||||
|
||||
#### API
|
||||
|
||||
支持生成 API Tokens
|
||||
|
||||
#### CLI
|
||||
|
||||
基于 API 的 CLI
|
||||
|
||||
#### 仓库管理
|
||||
|
||||
默认以 DockerHub 作为镜像仓库,支持自建仓库并同步 DockerHub 镜像
|
||||
|
||||
#### 安装程序
|
||||
|
||||
一键自动化安装程序,类似:
|
||||
|
||||
```
|
||||
curl https://websoft9.github.io/stackhub/install/install.sh | bash
|
||||
```
|
||||
|
||||
主要步骤包括:
|
||||
|
||||
1. Check 硬件、操作系统、cpu 架构
|
||||
2. 安装依赖包
|
||||
3. 安装 docker
|
||||
4. 下载各源码包
|
||||
5. 启动个源码对应服务
|
||||
|
||||
## 基础设施运维
|
||||
|
||||
### SSH 终端
|
||||
|
||||
Web-Based SSH 终端
|
||||
|
||||
### 文件管理器
|
||||
|
||||
Web-Based 文件管理器
|
||||
|
||||
### 存储管理
|
||||
|
||||
- 支持接入第三方对象存储
|
||||
|
||||
### 备份
|
||||
|
||||
备份完整的应用数据:
|
||||
|
||||
- 自定义备份时间区间
|
||||
- 自动备份可取消
|
||||
- 备份可以管理:删除、下载等
|
||||
|
||||
### 容器管理
|
||||
|
||||
可视化的容器管理,包括:拉镜像、创建/删除/停止容器、SSH 进入容器、向容器上传文件等
|
||||
|
||||
### 系统监控
|
||||
|
||||
- 监控容器的 CPU,内存和存储消耗情况
|
||||
- 监控系统的 CPU,内存和存储消耗情况
|
|
@ -1,339 +0,0 @@
|
|||
# Process
|
||||
|
||||
要理解整个架构设计,先打开[组件图](https://www.canva.cn/design/DAFt2DhfqYM/3uwKe09X5xaD4QPc47rNMQ/view?utm_content=DAFt2DhfqYM&utm_campaign=designshare&utm_medium=link&utm_source=publishsharelink),然后结合一下内容进行阅读:
|
||||
|
||||
所有的微操作一定归属于如下三个类别:
|
||||
|
||||
- CI:持续集成,即源码准确性
|
||||
- CD:持续部署,即让软件 running,目前采用拉式方式与CI协作
|
||||
- CP:持续发布,即通过域名让用户可以访问
|
||||
|
||||
另外还有与系统维护相关的:
|
||||
|
||||
- Settings
|
||||
- CLI
|
||||
|
||||
## API
|
||||
|
||||
API 接口功能设计:
|
||||
|
||||
### app/install
|
||||
|
||||
功能:安装应用并自动绑定域名
|
||||
|
||||
输入参数:
|
||||
|
||||
```
|
||||
body:
|
||||
{
|
||||
- app_name # 产品名
|
||||
- app_id # 自定义应用名称
|
||||
- domains[] #域名-可选
|
||||
- default_domain #默认域名-可选:设置.env中APP_URL
|
||||
- edition{dist:community, version:5.0} #应用版本,来自variable.json,但目前variable.json中只有 version 的数据
|
||||
- endpointId: 安装目的地(portainer中有定义),默认为 local
|
||||
}
|
||||
```
|
||||
|
||||
过程:
|
||||
|
||||
1. 参数验证:
|
||||
app_id 验证:
|
||||
业务要求:gitea 中是否存在同名的 repository,Portainer中是否存在同名stack
|
||||
技术要求-【非空,容器要求:2位-20位,字母数字以及-组成 gitea:todo portainer:todo】
|
||||
app_name 验证: 在gitea容器的library目录下验证
|
||||
domains[]验证:是否绑定过,数量不能超过2:泛域名+其他域名
|
||||
default_domain验证:来自domains[]中,自定义域名优先
|
||||
edition: community这个不做验证,保留扩展,只做version处理
|
||||
endpointId:通过Portainer容器取名称【local】的endpointId,不存在报错
|
||||
2. CI:Gitea 创建 repository:通过Gitea创建仓库,并修改.env文件
|
||||
3. CD: Portainer :
|
||||
创建websoft9网络,判断 websoft9 network (先判断是否存在)
|
||||
Portainer 基于 Gitea Repository 在对应的 endpointId 中创建项目(staus: [active,inactive])
|
||||
4. CP:Nginx 为应用创建 Proxy 访问:如果Proxy创建失败,应用安装成功,但提示Proxy创建失败,不做应用安装回滚
|
||||
|
||||
2-3 步骤是有状态操作(产生对后续操作有影响的记录),故需考虑事务完整性。
|
||||
|
||||
### apps
|
||||
|
||||
查询所有apps的信息,返回完整数据。等同于 CD: deploy/apps
|
||||
|
||||
另外,app 的状态以及各个状态对应的操作:
|
||||
|
||||
- 状态:
|
||||
- Active,等同于 Portainer Active。此状态下,显示容器的状态 running(1),stopped(2)
|
||||
- Unactive,等同于 Portainer Unactive
|
||||
- 操作:
|
||||
- for running: stop | start | restart | redeploy | delete && delete(down -v)
|
||||
- for Unactive: redeploy | delete(down -v)
|
||||
|
||||
### apps/{id}/*
|
||||
|
||||
对单个 apps 的增删改查:
|
||||
|
||||
- 查询:deploy/apps/{id} with **get** method
|
||||
- 启动:deploy/apps/{id}/start
|
||||
- 停止:deploy/apps/{id}/stop
|
||||
- 重启:deploy/apps/{id}/restart
|
||||
- 迁移:deploy/apps/{id}/migrate
|
||||
- 重建:deploy/apps/{id}/git/redeploy + deploy/apps/{id}/restart
|
||||
- 卸载:
|
||||
- APP_Manage 调用 integration/,删除 Repository
|
||||
- APP_Manage 调用 publish/nginx/proxy,删除 Proxy
|
||||
- APP_Manage 调用 deploy/apps/{id} with **delete** method,删除应用
|
||||
|
||||
> 卸载必须是一个事务操作,确保完成所有步骤,以上先后顺序非常关键
|
||||
|
||||
### app/domains
|
||||
|
||||
App 域名的:增、删、改、查。
|
||||
|
||||
输入参数:
|
||||
|
||||
pulisherId: 默认为本地 nginx,将来可扩展支持云平台的应用访问网关。
|
||||
|
||||
```
|
||||
body:
|
||||
{
|
||||
- app_id
|
||||
- domains[] 可选
|
||||
- default_domain 可选
|
||||
}
|
||||
```
|
||||
|
||||
流程:
|
||||
|
||||
- CP: publish/nginx/proxy
|
||||
- CI: Gitea 修改 repository 的 .env 文件中的 APP_URL 为默认域名
|
||||
- CD:deploy/apps/{id}/git/redeploy + deploy/apps/{id}/restart
|
||||
|
||||
## Settings
|
||||
|
||||
|
||||
|
||||
配置文件可以通过接口和CLI进行更改
|
||||
|
||||
### 系统配置
|
||||
|
||||
系统配置,需重启服务后生效。
|
||||
|
||||
system.ini
|
||||
|
||||
|
||||
### 应用配置
|
||||
|
||||
app.ini
|
||||
|
||||
应用配置一般会提供API,供前端调用。应用配置更改后,不需要重启。
|
||||
|
||||
功能:
|
||||
|
||||
- settings 增删改查
|
||||
|
||||
```
|
||||
[system]
|
||||
# websoft9 install path, it can not modify now
|
||||
install_path=/data/websoft9
|
||||
|
||||
# apps install path, it can not modify now
|
||||
apps_path=/data/compose
|
||||
|
||||
# enable appstore preview, it use for CLI upgrade COMMAND
|
||||
appstore_preview=false
|
||||
|
||||
[address]
|
||||
# Wildcard Domain Name for application
|
||||
wildcard_domain=test.websoft9.com
|
||||
|
||||
|
||||
[smtp]
|
||||
smtp_port=743
|
||||
smtp_server=smtp.websoft9.com
|
||||
smtp_tls/ssl=true
|
||||
smtp_user=admin
|
||||
smtp_password=password
|
||||
|
||||
[receive]
|
||||
# receive the notify of system
|
||||
email=help@websoft9.com
|
||||
wechat=
|
||||
```
|
||||
|
||||
|
||||
## CLI
|
||||
|
||||
CLI 是安装到服务器的服务端命令行工具。它的功能有几种来源:
|
||||
|
||||
1. 继承:由 API 直接转换
|
||||
2. 相关:多个 API 以及 组合
|
||||
3. 无关:与 API 无关,
|
||||
|
||||
具体指令以及参数设计如下:
|
||||
|
||||
```
|
||||
Usage: w9 [OPTIONS] COMMAND sub-COMMAND
|
||||
|
||||
Common Commands
|
||||
version 查询 websoft9 版本
|
||||
repair 修复 websoft9
|
||||
clean 清空 websoft9 使用过程中不需要的残留资源
|
||||
upgrade 检查并更新 [core|plugin], --check 只检查
|
||||
uninstall 删除 Websoft9 所有服务以及组件,除了 Docker 以及 Docker 应用之外
|
||||
environments list all Environments
|
||||
apikey 生产以及管理 AppManage keys
|
||||
ip --replace newIP,直接更改 gitea 和 Nginx IP相关的配置
|
||||
|
||||
App Commands:
|
||||
install 安装应用
|
||||
ls List 应用列表 [app_id, app_name, status, time, endpointId]
|
||||
inspect 显示 APP 详细信息
|
||||
start 启动一个停止的应用
|
||||
stop 停止一个运行中的应用
|
||||
restart 重启应用
|
||||
redeploy 重建应用(包含更新镜像后重建)
|
||||
delete 删除应用
|
||||
|
||||
Global Options:
|
||||
-c, --context string
|
||||
-D, --debug Enable debug mode
|
||||
-e, --environment which environment you used
|
||||
|
||||
Run 'w9 COMMAND --help' for more information on a command.
|
||||
```
|
||||
|
||||
## Core
|
||||
|
||||
### CI
|
||||
|
||||
CI 遵循几个法则:
|
||||
|
||||
* 为 CD 准备一个完全可用的编排物料
|
||||
* Git 驱动,保证编排物料与应用运行环境分离,编排物料可修改可复原
|
||||
* 编排物料中的资源(包/镜像)具备良好的网络可达性
|
||||
|
||||
CI 过程中除了直接使用 [Gitea API](https://docs.gitea.cn/api/1.19/) 之外,还需增加如下业务:
|
||||
|
||||
|
||||
#### integation/repository/create
|
||||
|
||||
功能:
|
||||
|
||||
基于本地目录 library/apps/app_name,创建一个符合 Websoft9 规范格式的 repository(名称为:app_id):
|
||||
> app_name 是软件名称,例如:wordpress。app_id 是用户安装的应用名称,例如:mywordpress
|
||||
|
||||
步骤:
|
||||
1. 在 Gitea 中创建一个名称为 app_id 的 repository
|
||||
2. 修改 Gitea repository 仓库的设置属性,只保留【代码】栏
|
||||
|
||||
#### integation/repository/modify
|
||||
|
||||
更改临时目录 .env 文件中的重要参数:
|
||||
|
||||
- APP_URL 用域名/公网IP替换
|
||||
- POWER_PASSWORD 使用 16位 【大小写数字特殊字符】 替代
|
||||
- APP_VERSION 根据安装输入参数替换
|
||||
- APP_NAME 更换为 app_id
|
||||
|
||||
然后 git push
|
||||
|
||||
#### integation/repository/delete
|
||||
|
||||
### CD
|
||||
|
||||
CD 遵循几个法则:
|
||||
|
||||
* 可以管理应用的完全生命周期
|
||||
* 应用可以指定部署到local之外的服务器或集群环境(portainer 中对应的术语为 endpoint 或 environment)
|
||||
* 部署编排物料(CI 的产出物)可以是 docker-compose,也可以是 helm
|
||||
* 也可以支持源码编译成镜像后自动部署(参考:waypoint)
|
||||
|
||||
#### deploy/apps/create/standalone/repository
|
||||
|
||||
基于 repository 创建应用,100% 使用 Portainer API /stacks/create/standalone/repository
|
||||
|
||||
#### deploy/apps/{id}/git
|
||||
|
||||
设置 portainer 与 repository 之间的连接关系,100% 使用 Portainer API /stacks/{id}/git
|
||||
|
||||
#### deploy/apps
|
||||
|
||||
List all apps,继承 Portainer API /stacks:
|
||||
|
||||
额外需要增加如下几类数据:
|
||||
|
||||
1. 将 app 主容器的 "Env" 合并到 Portainer API 返回的 env[] 中。
|
||||
> portaier 中的 repository 安装方式中,.env 并不会被 portainer 保存到接口中
|
||||
|
||||
2. portainer 中的应用目录的 variables.json 或 repository variables.json
|
||||
3. Gitea API 列出当前 APP 的 repository 之 URL,提供访问链接?
|
||||
4. 所用应用的数据目录:/var/lib/docker/volumes/...
|
||||
5. Portainer 通过主容器的 Label 标签和 Ports,获取 app_*_port等
|
||||
|
||||
#### deploy/apps/{id}
|
||||
|
||||
与 Portainer API /stacks{id} 雷同,支持 get(查询), delete(删除)
|
||||
|
||||
#### deploy/apps/{id}/git/redeploy
|
||||
|
||||
100% 使用 Portainer API /stacks/{id}/git/redeploy
|
||||
|
||||
#### deploy/apps/{id}/start
|
||||
|
||||
100% 使用 Portainer API /stacks/{id}/start
|
||||
|
||||
#### deploy/apps/{id}/stop
|
||||
|
||||
100% 使用 Portainer API /stacks/{id}/stop
|
||||
|
||||
#### deploy/apps/{id}/restart
|
||||
|
||||
Portainer 未提供对应的 API,可以创建此操作,也可以直接在前端通过 stop & start 组合实现。
|
||||
|
||||
#### deploy/apps/{id}/migrate
|
||||
|
||||
将 Docker 应用迁移到另外一台服务器上。此需求暂不实现
|
||||
|
||||
100% 使用 Portainer API /stacks/{id}/migrate
|
||||
|
||||
### CP
|
||||
|
||||
#### publish/nginx/proxy
|
||||
|
||||
function proxy(host,domains[], Optional:port, Optional:exra_proxy.conf)
|
||||
|
||||
**init()**
|
||||
|
||||
也可以使用 getPort(), getExra_proxy()
|
||||
|
||||
1. 获取 Port: 从 portainer.containers 接口中 Label 属性集中获取 http 或 https
|
||||
|
||||
> com.docker.compose.http.port": "9001" | com.docker.compose.https.port": "9002"
|
||||
|
||||
2. 获取 exra_proxy.conf: 从 Gitea 接口中获取 repository 的 src/nginx_proxy.conf
|
||||
|
||||
**update()**
|
||||
|
||||
修改 proxy 中的 domain
|
||||
|
||||
**add()**
|
||||
|
||||
查询 Nginx 中是否有此应用的 Proxy?
|
||||
|
||||
- Y:将新的域名插入到 Proxy 中(忽略 nginx_proxy.conf?)
|
||||
- N:新增 Proxy
|
||||
|
||||
**delete()**
|
||||
|
||||
删除所有相关的 proxys
|
||||
|
||||
**list()**
|
||||
|
||||
查询所有相关的 proxys
|
||||
|
||||
**enable()**
|
||||
|
||||
enable所有相关的 proxys
|
||||
|
||||
**disable()**
|
||||
|
||||
disable 所有相关的 proxys
|
|
@ -1,57 +0,0 @@
|
|||
# 概述
|
||||
|
||||
## 需求草稿
|
||||
|
||||
| | Cloudron | [casaos](https://www.casaos.io/) | umbrel | runtipi |
|
||||
| -------------- | -------- | -------------------------------------------------------- | ------------ | ------- |
|
||||
| 应用编排 | | 单一镜像 | | 多镜像,compose 编排 |
|
||||
| 市场应用来源 | | 官方+社区 | 官方+社区 | |
|
||||
| 一键安装程度 | | 不需任何配置 | 不需任何配置 | |
|
||||
| 应用访问方式 | | 端口 | 端口 | |
|
||||
| 自定义安装应用 | | Y | N | N |
|
||||
| Web 管理容器 | | Y | N | |
|
||||
| 默认镜像仓库 | | DockerHub | | |
|
||||
| 自适应 | | Y | Y | |
|
||||
| 多语言 | | Y | N | |
|
||||
| 用户管理 | | 单一用户 | 单一用户 | |
|
||||
| 自带应用 | | 文件,服务器终端,容器终端,监控,日志 | 监控,日志 | |
|
||||
| 应用管理 | | 完整容器参数设置,克隆,绑定域名?备份?证书? | 无 | |
|
||||
| 应用更新 | | N | | |
|
||||
| 后端语言 | | Go | | |
|
||||
| API | | HTTP API | | |
|
||||
| 前端 | | vue.js | | |
|
||||
| CLI | | Y | | |
|
||||
| HTTP 服务器 | | 无,端口访问应用 | | traefik |
|
||||
| 公共数据库 | | 无 | | |
|
||||
| 开发文档 | | [wiki](https://wiki.casaos.io/en/contribute/development) | | |
|
||||
| 2FA | | N | Y | |
|
||||
| 安装方式 | | 服务器安装 | 容器安装 | |
|
||||
| 商店更新 | | N | Y | Y |
|
||||
| 商店绑定域名 | Y | N | N | |
|
||||
| DNS服务 | Y | N | | |
|
||||
|
||||
* 应用自动分配4级域名后,如何再 CNAME 二级域名?
|
||||
|
||||
### casaos 架构分析
|
||||
|
||||
#### 安装脚本
|
||||
|
||||
1. Check硬件、操作系统、cpu架构
|
||||
2. 安装依赖包
|
||||
3. 安装docker
|
||||
4. 下载各源码包
|
||||
5. 启动个源码对应服务
|
||||
|
||||
#### 源码解析
|
||||
|
||||
| 运行时项目 | 对应项目源码 | 说明 |
|
||||
| -------------- | -------- | -------------------------------------------------------- |
|
||||
| casaos | CasaOS | 每隔5秒通过websocekt推送内存/CPU/网络等系统信息;提供ssh登录操作的http接口;提供"sys", "port", "file", "folder", "batch", "image", "samba", "notify"这些http接口的访问|
|
||||
| casaos-message-bus | CasaOS-MessageBus | 类似一个MQ提供消息的发布/订阅 |
|
||||
| casaos-local-storage | CasaOS-LocalStorage | 每隔5S统计磁盘/USB信息,提供监控信息;提供http接口访问disk/usb/storage信息 |
|
||||
| casaos-user-service | CasaOS-UserService | 通过http server提供用户管理的接口 |
|
||||
| casaos-app-management | CasaOS-AppManagement | 使用CasaOS-AppStore中App的元数据;提供所有appList的分类/列表/详细信息;通过docker来管理app,提供安装/启动/关闭/重启/日志查看等相关接口;docker-compose管理(V2);|
|
||||
| casaos-gateway | CasaOS-Gateway | 提供Gateway自身管理接口,比如切换Gateway的port的接口,查看所有路由的接口;提供CasaOS-UI的静态资源访问服务;根据请求的PATH将请求代理转发至其它模块 |
|
||||
| casaos-cli | CasaOS-CLI | 通过命令行的方式来调用CasaOS-Gateway的接口,该模块未完全实现,实现了部分命令 |
|
||||
| linux-all-casaos | CasaOS-UI | VUE2,CasaOS的Web源码,编译后的html/js/image/css等由CasaOS-Gateway提供访问入口,所有API接口指向CasaOS-Gateway |
|
||||
| - | CasaOS-Common | Common structs and functions for CasaOS |
|
|
@ -1,37 +0,0 @@
|
|||
# 软件工厂
|
||||
|
||||
由 Websoft9 自主研发的面向高校的【软件工厂】解决方案,学生和老师可以自由使用镜像库用于教学。
|
||||
|
||||
## 商业需求
|
||||
|
||||
高校老师和学生在教学中需要使用大量的开源软件作为教学的载体,以及通过使用开源软件学习实战的经验,打开商业化软件领域的大门。
|
||||
目前,老师和学生受制于眼界以及技术原因,无法很方便的搭建和使用各种开源软件,大大的制约了教学的发展。
|
||||
|
||||
我们目前的方案只需要加以【盒子化】即可满足用户的需要。
|
||||
|
||||
## 业务模式
|
||||
|
||||
对我们既有的方案进行盒子化之后,通过如下方式盈利:
|
||||
|
||||
- 售卖软件解决方案以及技术支持
|
||||
- 云资源分成
|
||||
- 镜像按小时付费
|
||||
- 知识库付费
|
||||
- 课程合作付费
|
||||
|
||||
## 功能需求
|
||||
|
||||
盒子化的解决方案包括:
|
||||
|
||||
### 业务功能
|
||||
|
||||
- 可以一键使用的软件库(提供 300+场景方案)
|
||||
- 可以在线使用的工具库(基于 Web 的工具库,学生在上课中无需安装大量的客户端工具即可完成任务)
|
||||
- 可以管理教学过程的慕课系统
|
||||
|
||||
### 系统功能
|
||||
|
||||
- 账号管理
|
||||
- 日志管理
|
||||
- 安全管理
|
||||
- 资源消耗管理
|
|
@ -1,8 +0,0 @@
|
|||
# Developer Guide
|
||||
|
||||
## Mulitiple language
|
||||
|
||||
Below points you should know if you want to tranlate:
|
||||
|
||||
- Every plugin's po.zh_CN.js can be used for other Cockpit plugin
|
||||
- po.zh_CN.js.gz at base1 is the system language file
|
|
@ -1,29 +0,0 @@
|
|||
# recruit
|
||||
|
||||
In order to optimize the app management architecture and code specifications, and perform daily maintenance on new features and bugs, Websoft9 recruits a senior Python development expert.
|
||||
|
||||
## Requirements
|
||||
|
||||
1. Proficient in Python and have architectural experience in Python web projects
|
||||
|
||||
2. Have experience in developing distributed (caching, message middleware)
|
||||
|
||||
3. Familiar with Docker and other container technologies
|
||||
|
||||
4. Love coding and willing to continuously optimize code at work
|
||||
|
||||
5. Strong document reading and understanding skills as well as document writing experience
|
||||
|
||||
## Job Description
|
||||
|
||||
1. Complete additional features and modify bugs for existing projects
|
||||
|
||||
2. Provide reasons and solutions for optimizing the project architecture and API methods
|
||||
|
||||
## Work form
|
||||
|
||||
Remote, must complete 40 hours of work per month
|
||||
|
||||
## Remuneration and payment
|
||||
|
||||
Pay 4000 yuan before the 10th of each month
|
|
@ -1,33 +0,0 @@
|
|||
# Team and Division
|
||||
|
||||
## Active Team{#active-team}
|
||||
|
||||
The Websoft9 active team works on the core functionality, as well as the documentation website.
|
||||
|
||||
* Xu Wei: One of the founders of Websoft9, responsible for code review
|
||||
* Darren Chen: One of the founders of Websoft9, responsible for user experience design and architecture
|
||||
* Morning Tan: Testing
|
||||
* Lao Zhou: Document writing and issue collection feedback
|
||||
|
||||
## Architect Consultant
|
||||
|
||||
Create more, better and more realistic software solutions for the world
|
||||
|
||||
* Liu Guanghui: Engaged in enterprise architecture for 20 years, experienced complete typical enterprise applications such as ERP, MES, WMS, e-commerce, OA, etc.
|
||||
|
||||
## Candidate
|
||||
|
||||
- [mayo7e](https://github.com/mayo7e): [#296](https://github.com/Websoft9/websoft9/issues/296) mayowa.wh@gmail.com
|
||||
|
||||
## Honorary Alumni
|
||||
|
||||
Websoft9 would never be what it is today without the huge contributions from these folks who have moved on to bigger and greater things.
|
||||
|
||||
* Zengxc: RHCE
|
||||
* [Brendan](https://github.com/dudeisbrendan03): Infra Engineer in Greater Manchester. Studying MSc @ Lancaster
|
||||
* [Biao Yang](https://github.com/hotHeart48156): Hot heart and smart developer
|
||||
* [Junhao](https://github.com/hnczhjh): RedHat Engineer CA, Studding at ChangSha colleague
|
||||
* [QiuJiaHon](https://github.com/orgs/Websoft9/people/QiuJiaHon): Studing at Hunan Normal University
|
||||
* [Ryan Gates](https://github.com/gatesry)
|
||||
* [Kai Jiao](https://github.com/jiaosir-cn)
|
||||
* [Geraintl yu](https://github.com/geraintlyu)
|
|
@ -1,28 +0,0 @@
|
|||
# User Guide
|
||||
|
||||
## FAQ
|
||||
|
||||
#### user can not sudo?
|
||||
|
||||
```
|
||||
# add user to sudo/admin group (select one command)
|
||||
usermod -aG wheel username
|
||||
usermod -aG sudo username
|
||||
|
||||
# sudo not need to input password
|
||||
```
|
||||
|
||||
#### Can not login with correct credential?
|
||||
|
||||
Many reason may make you login failed with the correct credential:
|
||||
|
||||
- Cookie at you browser if IP change, need to clear cookie
|
||||
- *.override.json is not correct
|
||||
- TLS certificate
|
||||
- User not allowed login, need to modify ssh_config file
|
||||
|
||||
More details, you can get it from `sudo grep cockpit /var/log/messages`
|
||||
|
||||
#### How to modify Websoft9 port?
|
||||
|
||||
Access web console > settings or use cli to modify port
|
|
@ -1,58 +0,0 @@
|
|||
# Install
|
||||
|
||||
- The [install.sh](./install.sh) is the entry file for install or upgrade
|
||||
- You can separate running the [install_cockpit.sh](./install_cockpit.sh), [install_docker.sh](./install_docker.sh), [install_plugins.sh](./install_plugins.sh) also
|
||||
- The [uninstall.sh](./install.sh) is the entry file for uninstall
|
||||
|
||||
|
||||
## User it
|
||||
|
||||
```
|
||||
# install or upgrade Websoft9
|
||||
wget -O install.sh https://websoft9.github.io/websoft9/install/install.sh && bash install.sh
|
||||
|
||||
# install or upgrade Websoft9 with parameters
|
||||
wget -O install.sh https://websoft9.github.io/websoft9/install/install.sh && bash install.sh --port 9000 --channel release --path "/data/websoft9/source" --version "latest"
|
||||
|
||||
# install or upgrade Cockpit with parameters
|
||||
wget -O - https://websoft9.github.io/websoft9/install/install_cockpit.sh | bash --port 9000
|
||||
|
||||
# install or upgrade Docker
|
||||
wget -O - https://websoft9.github.io/websoft9/install/install_docker.sh | bash
|
||||
|
||||
# uninstall by default
|
||||
curl https://websoft9.github.io/websoft9/install/uninstall.sh | bash
|
||||
|
||||
# uninstall all
|
||||
wget -O - https://websoft9.github.io/websoft9/install/uninstall.sh | bash /dev/stdin --cockpit --files
|
||||
```
|
||||
|
||||
## Develop it
|
||||
|
||||
This install script have below related resources:
|
||||
|
||||
- Tools: Install or upgrade some useful software packages at Linux
|
||||
- Source Code: Download source code from artifactory
|
||||
- Docker: Install and upgrade Docker, compose up **backend service** with docker-compose.yml
|
||||
- Cockpit: Install and upgrade Cockpit and its Packages, manage it port, fix it menu
|
||||
- Plugins: Install and upgrade Websoft9 plugins which is the **frontend**
|
||||
- Systemd: Install and upgrade websoft9.serivce
|
||||
- Set Firewalld: let 80,443 and Cockpit port allowed, Cockpit and Docker service with firewalld
|
||||
|
||||
The install script should adhere to the following principles:
|
||||
|
||||
1. Not allowed to modify the source code of the application.
|
||||
2. Every task must have an exception exit mechanism.
|
||||
3. Both installation and updates should be considered simultaneously.
|
||||
4. Upgrade script should not overwrite existing configurations.
|
||||
5. Duplication of codes in any form is not allowed, it must used function.
|
||||
6. Paths, ports, etc. must be defined using variables.
|
||||
|
||||
|
||||
Some default parameters you should know:
|
||||
|
||||
- Websoft9 root path:*/data/websoft9/source*
|
||||
- Websoft9 Systemd script path: */opt/websoft9/systemd*
|
||||
- Plugins path: */usr/share/cockpit*
|
||||
- Cockpit config path: */ect/cockpit*
|
||||
- Cockpit default port: 9000
|
|
@ -1,448 +0,0 @@
|
|||
#!/bin/bash
|
||||
# Define PATH
|
||||
PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin:~/bin
|
||||
# Export PATH
|
||||
export PATH
|
||||
|
||||
|
||||
# Command-line options
|
||||
# ==============================================================================
|
||||
#
|
||||
# --version
|
||||
# Use the --version option to install a special version for installation. default is latest, for example:
|
||||
#
|
||||
# $ sudo bash install.sh --version "0.8.25"
|
||||
#
|
||||
# --port <9000>
|
||||
# Use the --port option to set Websoft9 cosole port. default is 9000, for example:
|
||||
#
|
||||
# $ sudo bash install.sh --port 9001
|
||||
#
|
||||
# --channel <release|dev>
|
||||
# Use the --channel option to install a release(production) or dev distribution. default is release, for example:
|
||||
#
|
||||
# $ sudo bash install.sh --channel release
|
||||
#
|
||||
# --path
|
||||
# Use the --path option to for installation path for example:
|
||||
#
|
||||
# $ sudo bash install.sh --path "/data/websoft9/source"
|
||||
#
|
||||
# --devto
|
||||
# Use the --devto option to developer mode, devto is the developer code path, for example:
|
||||
#
|
||||
# $ sudo bash install.sh --devto "/data/dev/mycode"
|
||||
#
|
||||
# ==============================================================================
|
||||
|
||||
|
||||
# 设置参数的默认值
|
||||
version="latest"
|
||||
channel="release"
|
||||
path="/data/websoft9/source"
|
||||
|
||||
# 获取参数值
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
--version)
|
||||
version="$2"
|
||||
shift 2
|
||||
;;
|
||||
--port)
|
||||
port="$2"
|
||||
shift 2
|
||||
;;
|
||||
--channel)
|
||||
channel="$2"
|
||||
shift 2
|
||||
;;
|
||||
--path)
|
||||
path="$2"
|
||||
shift 2
|
||||
;;
|
||||
--devto)
|
||||
devto="$2"
|
||||
shift 2
|
||||
;;
|
||||
*)
|
||||
shift
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [ -n "$port" ]; then
|
||||
export port
|
||||
else
|
||||
export port=9000
|
||||
fi
|
||||
|
||||
|
||||
starttime=$(date +%s)
|
||||
|
||||
# 输出参数值
|
||||
echo -e "\n------ Welcome to install Websoft9, it will take 3-5 minutes ------"
|
||||
echo -e "\nYour installation parameters are as follows: "
|
||||
echo "--version: $version"
|
||||
echo "--port: $port"
|
||||
echo "--channel: $channel"
|
||||
echo "--path: $path"
|
||||
echo "--devto: $devto"
|
||||
|
||||
echo -e "\nYour OS: "
|
||||
cat /etc/os-release | head -n 3 2>/dev/null
|
||||
|
||||
# Define global vars
|
||||
# export var can send it to subprocess
|
||||
|
||||
export http_port=80
|
||||
export https_port=443
|
||||
export install_path=$path
|
||||
export channel
|
||||
export version
|
||||
export systemd_path="/opt/websoft9/systemd"
|
||||
export source_zip="websoft9-$version.zip"
|
||||
export source_unzip="websoft9"
|
||||
export source_github_pages="https://websoft9.github.io/websoft9"
|
||||
# inotify-tools is at epel-release
|
||||
export repo_tools_yum="epel-release"
|
||||
export tools_yum="git curl wget yum-utils jq bc unzip inotify-tools"
|
||||
export tools_apt="git curl wget jq bc unzip inotify-tools"
|
||||
export docker_network="websoft9"
|
||||
export artifact_url="https://w9artifact.blob.core.windows.net/$channel/websoft9"
|
||||
# export OS release environments
|
||||
if [ -f /etc/os-release ]; then
|
||||
. /etc/os-release
|
||||
else
|
||||
echo "Can't judge your Linux distribution"
|
||||
exit 1
|
||||
fi
|
||||
echo Install from url: $artifact_url
|
||||
|
||||
# Define common functions
|
||||
|
||||
Wait_apt() {
|
||||
# Function to check if apt is locked
|
||||
local lock_files=("/var/lib/dpkg/lock" "/var/lib/apt/lists/lock")
|
||||
|
||||
for lock_file in "${lock_files[@]}"; do
|
||||
while fuser "${lock_file}" >/dev/null 2>&1 ; do
|
||||
echo "${lock_file} is locked by another process. Waiting..."
|
||||
sleep 5
|
||||
done
|
||||
done
|
||||
|
||||
echo "APT locks are not held by any processes. You can proceed."
|
||||
}
|
||||
|
||||
export -f Wait_apt
|
||||
|
||||
|
||||
install_tools(){
|
||||
echo_prefix_tools=$'\n[Tools] - '
|
||||
echo "$echo_prefix_tools Starting install necessary tool..."
|
||||
|
||||
if [ "$ID" = "rhel" ] || [ "$ID" = "ol" ]; then
|
||||
RHEL_VERSION=${VERSION_ID%%.*}
|
||||
sudo yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-${RHEL_VERSION}.noarch.rpm >/dev/null
|
||||
if [ $? -ne 0 ]; then
|
||||
exit 1
|
||||
fi
|
||||
elif [ "$ID" = "centos" ] || [ "$ID" = "rocky" ]; then
|
||||
sudo yum install -y "$repo_tools_yum" >/dev/null
|
||||
if [ $? -ne 0 ]; then
|
||||
exit 1
|
||||
fi
|
||||
elif [ "$ID" = "amzn" ]; then
|
||||
sudo amazon-linux-extras install epel -y >/dev/null
|
||||
if [ $? -ne 0 ]; then
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
dnf --version >/dev/null 2>&1
|
||||
dnf_status=$?
|
||||
yum --version >/dev/null 2>&1
|
||||
yum_status=$?
|
||||
apt --version >/dev/null 2>&1
|
||||
apt_status=$?
|
||||
|
||||
if [ $dnf_status -eq 0 ]; then
|
||||
for package in $tools_yum; do
|
||||
echo "Start to install $package"
|
||||
sudo dnf install -y $package > /dev/null
|
||||
if [ $? -ne 0 ]; then
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
elif [ $yum_status -eq 0 ]; then
|
||||
for package in $tools_yum; do
|
||||
echo "Start to install $package"
|
||||
sudo yum install -y $package > /dev/null
|
||||
if [ $? -ne 0 ]; then
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
elif [ $apt_status -eq 0 ]; then
|
||||
while fuser /var/lib/dpkg/lock >/dev/null 2>&1 ; do
|
||||
echo "Waiting for other software managers to finish..."
|
||||
sleep 5
|
||||
done
|
||||
sudo apt-get update -y 1>/dev/null 2>&1
|
||||
for package in $tools_apt; do
|
||||
echo "Start to install $package"
|
||||
sudo apt-get install $package -y > /dev/null
|
||||
if [ $? -ne 0 ]; then
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
else
|
||||
echo "You system can not install Websoft9 because not have available Linux Package Manager"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
|
||||
download_source() {
|
||||
echo_prefix_source=$'\n[Download Source] - '
|
||||
echo "$echo_prefix_source Download Websoft9 source code from $artifact_url/$source_zip"
|
||||
|
||||
find . -type f -name "websoft9*.zip*" -exec rm -f {} \;
|
||||
if [ -d "$install_path" ]; then
|
||||
echo "Directory $install_path already exists and installation will cover it."
|
||||
else
|
||||
sudo mkdir -p "$install_path"
|
||||
fi
|
||||
|
||||
wget "$artifact_url/$source_zip"
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to download source package."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
sudo unzip -o "$source_zip" -d "$install_path" > /dev/null
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to unzip source package."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cp -r $install_path/$source_unzip/* "$install_path"
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Move directory failed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
rm -rf "$source_zip" "$install_path/$source_unzip"
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
check_ports() {
|
||||
local ports=("$@")
|
||||
|
||||
echo "Stop Websoft9 Proxy and Cockpit service for reserve ports..."
|
||||
sudo docker stop websoft9-proxy 2>/dev/null || echo "docker stop websoft9-proxy not need "
|
||||
|
||||
for port in "${ports[@]}"; do
|
||||
if ss -tuln | grep ":$port " >/dev/null && ! systemctl status cockpit.socket | grep "$port" >/dev/null; then
|
||||
echo "Port $port is in use or not in cockpit.socket, install failed"
|
||||
exit
|
||||
fi
|
||||
done
|
||||
|
||||
echo "All ports are available"
|
||||
}
|
||||
|
||||
merge_json_files() {
|
||||
local target_path="/etc/docker/daemon.json"
|
||||
|
||||
python3 - <<EOF 2>/dev/null
|
||||
import json
|
||||
import urllib.request
|
||||
import os
|
||||
|
||||
def merge_json_files(file1, file2):
|
||||
print("Merge from local file... ")
|
||||
with open(file1, 'r') as f1, open(file2, 'r') as f2:
|
||||
data1 = json.load(f1)
|
||||
data2 = json.load(f2)
|
||||
|
||||
merged_data = {**data1, **data2}
|
||||
|
||||
with open(file1, 'w') as f:
|
||||
json.dump(merged_data, f, indent=4)
|
||||
|
||||
def download_and_merge(url, file_path):
|
||||
print("Download daemon.json from url and merge... ")
|
||||
with urllib.request.urlopen(url) as response:
|
||||
data = json.loads(response.read().decode())
|
||||
|
||||
with open(file_path, 'r') as f:
|
||||
local_data = json.load(f)
|
||||
|
||||
merged_data = {**local_data, **data}
|
||||
|
||||
with open(file_path, 'w') as f:
|
||||
json.dump(merged_data, f, indent=4)
|
||||
|
||||
# Create target file if it does not exist
|
||||
if not os.path.exists("${target_path}"):
|
||||
os.makedirs(os.path.dirname("${target_path}"), exist_ok=True)
|
||||
with open("${target_path}", 'w') as f:
|
||||
json.dump({}, f)
|
||||
|
||||
if os.path.exists("${install_path}/docker/daemon.json"):
|
||||
merge_json_files("${target_path}", "${install_path}/docker/daemon.json")
|
||||
elif urllib.request.urlopen("${source_github_pages}/docker/daemon.json").getcode() == 200:
|
||||
download_and_merge("${source_github_pages}/docker/daemon.json", "${target_path}")
|
||||
else:
|
||||
print("No target daemon.json file need to merged")
|
||||
EOF
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "merge daemon.json failed, but install continue running"
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
set_docker(){
|
||||
echo "Set Docker for Websoft9 backend service..."
|
||||
merge_json_files
|
||||
if ! systemctl is-active --quiet firewalld; then
|
||||
echo "firewalld is not running"
|
||||
else
|
||||
echo "Set firewall for Docker..."
|
||||
sudo sudo firewall-cmd --permanent --new-zone=docker 2> /dev/null
|
||||
sudo firewall-cmd --permanent --zone=docker --add-interface=docker0 2> /dev/null
|
||||
sudo firewall-cmd --permanent --zone=docker --set-target=ACCEPT
|
||||
sudo firewall-cmd --reload
|
||||
sudo systemctl stop firewalld
|
||||
sudo systemctl disable firewalld
|
||||
fi
|
||||
sudo systemctl restart docker
|
||||
}
|
||||
|
||||
install_backends() {
|
||||
echo_prefix_backends=$'\n[Backend] - '
|
||||
echo "$echo_prefix_backends Install backend docker services"
|
||||
set_docker
|
||||
|
||||
cd "$install_path/docker"
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to change directory."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
sudo docker network inspect $docker_network >/dev/null 2>&1
|
||||
if [ $? -eq 0 ]; then
|
||||
echo "Docker network '$docker_network' already exists."
|
||||
else
|
||||
sudo docker network create $docker_network
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to create docker network."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# set to devloper mode
|
||||
if [ -n "$devto" ]; then
|
||||
sed -i "s|\(- \).*:/websoft9/apphub-dev|\1$devto:/websoft9/apphub-dev|g" docker-compose-dev.yml
|
||||
composefile=docker-compose-dev.yml
|
||||
else
|
||||
composefile=docker-compose.yml
|
||||
fi
|
||||
|
||||
container_names=$(docker ps -a --format "{{.Names}}" --filter "name=websoft9")
|
||||
sudo docker compose -p websoft9 -f $composefile down
|
||||
|
||||
# delete some dead containers that docker compose cannot deleted
|
||||
if [ ! -z "$container_names" ]; then
|
||||
echo "Deleting containers:"
|
||||
echo $container_names
|
||||
docker rm -f $container_names 2>/dev/null
|
||||
else
|
||||
echo "No containers to delete."
|
||||
fi
|
||||
|
||||
sudo docker compose -f $composefile pull
|
||||
sudo docker compose -p websoft9 -f $composefile up -d --build
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to start docker services."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
install_systemd() {
|
||||
echo -e "\n\n-------- Systemd --------"
|
||||
echo_prefix_systemd=$'\n[Systemd] - '
|
||||
echo "$echo_prefix_systemd Install Systemd service"
|
||||
|
||||
if [ ! -d "$systemd_path" ]; then
|
||||
sudo mkdir -p "$systemd_path"
|
||||
fi
|
||||
|
||||
sudo cp -r $install_path/systemd/script/* "$systemd_path"
|
||||
sudo cp -f "$install_path/systemd/websoft9.service" /lib/systemd/system/
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to copy Systemd service file."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
sudo systemctl daemon-reload
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to reload Systemd daemon."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
sudo systemctl enable websoft9.service
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to enable Systemd service."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
sudo systemctl start websoft9
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to start Systemd service."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
|
||||
#--------------- main-----------------------------------------
|
||||
check_ports $http_port $https_port $port
|
||||
install_tools
|
||||
download_source
|
||||
|
||||
bash $install_path/install/install_docker.sh
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "install_docker failed with error $?. Exiting."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
install_backends
|
||||
|
||||
install_systemd
|
||||
|
||||
bash $install_path/install/install_cockpit.sh
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "install_cockpit failed with error $?. Exiting."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
bash $install_path/install/install_plugins.sh
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "install_plugins failed with error $?. Exiting."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Restart Docker for Firewalld..."
|
||||
sudo systemctl restart docker
|
||||
|
||||
endtime=$(date +%s)
|
||||
runtime=$((endtime-starttime))
|
||||
echo "Script execution time: $runtime seconds"
|
||||
echo -e "\n-- Install success! ------"
|
||||
echo "Access Websoft9 console by: http://Internet IP:$(grep ListenStream /lib/systemd/system/cockpit.socket | cut -d= -f2) and using Linux user for login"
|
|
@ -1,391 +0,0 @@
|
|||
#!/bin/bash
|
||||
# Define PATH
|
||||
PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin:~/bin
|
||||
# Export PATH
|
||||
export PATH
|
||||
|
||||
## This script is used for install or upgrade Cockpit on Linux
|
||||
## Cockpit build at redhat family: https://copr.fedorainfracloud.org/coprs/g/cockpit/cockpit-preview/monitor/
|
||||
## Cockpit reposoitory list: https://pkgs.org/download/cockpit
|
||||
## PackageKit: https://www.freedesktop.org/software/PackageKit/
|
||||
## Not use pkcon install/update cockpit, the reason is: https://cockpit-project.org/faq.html#error-message-about-being-offline
|
||||
## pkcon can read repositories at you system directly, it don't provide exra repository
|
||||
## [apt show cockpit] or [apt install cockpit] show all additional packages
|
||||
## Ubuntu have backports at file /etc/apt/sources.list by default
|
||||
## Cockpit application: https://cockpit-project.org/applications
|
||||
|
||||
# Command-line options
|
||||
# ==========================================================
|
||||
#
|
||||
# --port <9000>
|
||||
# Use the --port option to set Websoft9 cosole port. default is 9000, for example:
|
||||
#
|
||||
# $ sudo sh install_cockpit.sh --port 9001
|
||||
|
||||
############################################################
|
||||
# Below vars export from install.sh
|
||||
# $port
|
||||
# $install_path
|
||||
############################################################
|
||||
|
||||
echo -e "\n\n-------- Cockpit --------"
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
--port)
|
||||
port="$2"
|
||||
shift 2
|
||||
;;
|
||||
*)
|
||||
shift
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Port priority: --port > ListenStream= > 9000
|
||||
|
||||
cockpit_exist() {
|
||||
systemctl list-unit-files | grep -q "cockpit.service"
|
||||
return $?
|
||||
}
|
||||
|
||||
if cockpit_exist; then
|
||||
cockpit_now_port=$(grep -oP "(?<=^ListenStream=).*" "/lib/systemd/system/cockpit.socket")
|
||||
if [ -z "${cockpit_now_port// }" ]; then
|
||||
echo "cockpit port is null,set it to 9000"
|
||||
cockpit_now_port=9000
|
||||
else
|
||||
echo "$cockpit_now_port at cockpit.socket"
|
||||
fi
|
||||
|
||||
else
|
||||
cockpit_now_port=9000
|
||||
fi
|
||||
|
||||
if [ -n "$port" ]; then
|
||||
cockpit_port=$port
|
||||
else
|
||||
cockpit_port=$cockpit_now_port
|
||||
fi
|
||||
|
||||
|
||||
if [ -n "$install_path" ]; then
|
||||
echo "Have found install files"
|
||||
else
|
||||
install_path="/data/websoft9/source"
|
||||
fi
|
||||
|
||||
echo -e "\nYour installation parameters are as follows: "
|
||||
echo "cockpit_port:$cockpit_port"
|
||||
echo "install_path:$install_path"
|
||||
|
||||
related_containers=("websoft9-apphub")
|
||||
echo_prefix_cockpit=$'\n[Cockpit] - '
|
||||
# package cockpit depends_on [cockpit-bridge,cockpit-ws,cockpit-system], but update cockpit the depends don't update
|
||||
cockpit_packages="cockpit cockpit-ws cockpit-bridge cockpit-system cockpit-pcp cockpit-networkmanager cockpit-session-recording cockpit-sosreport"
|
||||
menu_overrides_github_page_url="https://websoft9.github.io/websoft9/cockpit/menu_override"
|
||||
cockpit_config_github_page_url="https://websoft9.github.io/websoft9/cockpit/cockpit.conf"
|
||||
cockpit_menu_overrides=()
|
||||
# export OS release environments
|
||||
if [ -f /etc/os-release ]; then
|
||||
. /etc/os-release
|
||||
else
|
||||
echo "Can't judge your Linux distribution"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# This solution from: https://help.ubuntu.com/community/PinningHowto
|
||||
pin_config="
|
||||
Package: cockpit*
|
||||
Pin: release a=$VERSION_CODENAME-backports
|
||||
Pin-Priority: 1000
|
||||
"
|
||||
|
||||
check_ports() {
|
||||
local ports=("$@")
|
||||
|
||||
for port in "${ports[@]}"; do
|
||||
if netstat -tuln | grep ":$port " >/dev/null; then
|
||||
echo "Port $port is in use, install failed"
|
||||
exit
|
||||
fi
|
||||
done
|
||||
|
||||
echo "All ports are available"
|
||||
}
|
||||
|
||||
Print_Version(){
|
||||
sudo /usr/libexec/cockpit-ws --version 2>/dev/null || sudo /usr/lib/cockpit-ws --version 2>/dev/null || /usr/lib/cockpit/cockpit-ws --version 2>/dev/null
|
||||
}
|
||||
|
||||
Install_PackageKit(){
|
||||
echo "$echo_prefix_cockpit Install PackageKit(pkcon) and Cockpit repository"
|
||||
|
||||
if command -v pkcon &> /dev/null; then
|
||||
echo "pkcon is at your system ..."
|
||||
|
||||
elif command -v yum &> /dev/null; then
|
||||
if [ "$(cat /etc/redhat-release)" = "Redhat7" ]; then
|
||||
sudo subscription-manager repos --enable rhel-7-server-extras-rpms
|
||||
fi
|
||||
sudo yum install PackageKit -y
|
||||
|
||||
elif command -v dnf &> /dev/null; then
|
||||
sudo dnf install PackageKit -y
|
||||
|
||||
elif command -v apt &> /dev/null; then
|
||||
sudo apt update
|
||||
sudo apt install packagekit -y
|
||||
|
||||
else
|
||||
echo "PackageKit not found, Cockpit cannot be installed"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
Set_Repository() {
|
||||
echo "$echo_prefix_cockpit Set Cockpit deb repository"
|
||||
if command -v apt &> /dev/null; then
|
||||
if [ "$NAME" = "Debian" ]; then
|
||||
echo "deb http://deb.debian.org/debian $VERSION_CODENAME-backports main" > /etc/apt/sources.list.d/backports.list
|
||||
fi
|
||||
echo "Set the cockpit repository priority on Ubuntu/Debian..."
|
||||
sudo bash -c "echo '$pin_config' > /etc/apt/preferences.d/cockpit_backports"
|
||||
fi
|
||||
echo "Complete set Cockpit repository"
|
||||
}
|
||||
|
||||
|
||||
Restart_Cockpit(){
|
||||
echo "$echo_prefix_cockpit Restart Cockpit"
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl restart cockpit.socket 2> /dev/null
|
||||
sudo systemctl restart cockpit || exit 1
|
||||
}
|
||||
|
||||
Add_Firewalld(){
|
||||
echo "Add cockpit service to Firewalld..."
|
||||
# cockpit.xml is not always the same path at Linux distributions
|
||||
sudo sed -i "s/port=\"[0-9]*\"/port=\"$cockpit_port\"/g" /etc/firewalld/services/cockpit.xml
|
||||
sudo sed -i "s/port=\"[0-9]*\"/port=\"$cockpit_port\"/g" /usr/lib/firewalld/services/cockpit.xml
|
||||
sudo firewall-cmd --zone=public --add-service=cockpit --permanent
|
||||
sudo firewall-cmd --zone=public --add-port=443/tcp --permanent
|
||||
sudo firewall-cmd --zone=public --add-port=80/tcp --permanent
|
||||
sudo firewall-cmd --reload
|
||||
}
|
||||
|
||||
Set_Firewalld(){
|
||||
echo "$echo_prefix_cockpit Set firewalld for cockpit access"
|
||||
if command -v firewall-cmd &> /dev/null; then
|
||||
echo "Set firewall for Cockpit..."
|
||||
if ! systemctl is-active --quiet firewalld; then
|
||||
sudo systemctl start firewalld
|
||||
Add_Firewalld
|
||||
sudo systemctl stop firewalld
|
||||
else
|
||||
Add_Firewalld
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
Set_Selinux(){
|
||||
echo "$echo_prefix_cockpit Set Selinux for cockpit access"
|
||||
if [ -f /etc/selinux/config ]; then
|
||||
echo "Set Selinux for Cockpit..."
|
||||
sudo setenforce 0 1>/dev/null 2>&1
|
||||
sudo sed -i 's/SELINUX=.*/SELINUX=disabled/' /etc/selinux/config 1>/dev/null 2>&1
|
||||
fi
|
||||
}
|
||||
|
||||
Set_Cockpit(){
|
||||
echo "$echo_prefix_cockpit Set Cockpit for Websoft9"
|
||||
|
||||
echo "Cockpit allowed root user ..."
|
||||
echo "" > /etc/cockpit/disallowed-users
|
||||
|
||||
# fix bug: https://github.com/Websoft9/websoft9/issues/332
|
||||
sed 's/selector(:is():where())/selector(:is(*):where(*))/' -i /usr/share/cockpit/static/login.js
|
||||
|
||||
echo "Set Cockpit config file..."
|
||||
if [ -f "$install_path/cockpit/cockpit.conf" ]; then
|
||||
cp -f "$install_path/cockpit/cockpit.conf" /etc/cockpit/cockpit.conf
|
||||
else
|
||||
echo "Download config from URL $cockpit_config_github_page_url"
|
||||
curl -sSL $cockpit_config_github_page_url | sudo tee /etc/cockpit/cockpit.conf > /dev/null
|
||||
fi
|
||||
|
||||
|
||||
echo "Change cockpit default port to $cockpit_port ..."
|
||||
sudo sed -i "s/ListenStream=[0-9]*/ListenStream=${cockpit_port}/" /lib/systemd/system/cockpit.socket
|
||||
|
||||
|
||||
if docker ps --format '{{.Names}}' | grep -wq "${related_containers[0]}"; then
|
||||
echo "Try to change cockpit port at ${related_containers[0]} container..."
|
||||
sudo docker exec -i ${related_containers[0]} apphub setconfig --section cockpit --key port --value $cockpit_port || true
|
||||
else
|
||||
echo "Not found ${related_containers[0]} container"
|
||||
fi
|
||||
|
||||
|
||||
# fwupd-refresh.service may push error for Cockpit menu, so disable it
|
||||
if sudo systemctl is-active --quiet fwupd-refresh.service; then
|
||||
echo "fwupd-refresh.service is already running. Stopping and disabling it..."
|
||||
sudo systemctl stop fwupd-refresh.service
|
||||
sudo systemctl disable fwupd-refresh.service
|
||||
echo "fwupd-refresh.service stopped and disabled."
|
||||
else
|
||||
echo "fwupd-refresh.service is not running."
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
get_github_files() {
|
||||
python3 - <<EOF
|
||||
import requests
|
||||
import json
|
||||
|
||||
url = "https://api.github.com/repos/Websoft9/websoft9/contents/cockpit/menu_override?ref=main"
|
||||
headers = {
|
||||
"Accept": "application/vnd.github.v3+json"
|
||||
}
|
||||
|
||||
response = requests.get(url, headers=headers)
|
||||
|
||||
if response.status_code == 200:
|
||||
files = json.loads(response.text)
|
||||
for file in files:
|
||||
print(file['name'])
|
||||
else:
|
||||
print(f"Error: {response.status_code}")
|
||||
EOF
|
||||
}
|
||||
|
||||
|
||||
Download_Menu_Override(){
|
||||
|
||||
cockpit_menu_overrides=($(get_github_files))
|
||||
|
||||
for file in "${cockpit_menu_overrides[@]}"
|
||||
do
|
||||
|
||||
echo "$menu_overrides_github_page_url/$file"
|
||||
|
||||
curl -sSL "$menu_overrides_github_page_url/$file" | sudo tee /etc/cockpit/"$file" > /dev/null
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to download files"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
done
|
||||
}
|
||||
|
||||
Edit_Menu(){
|
||||
echo "$echo_prefix_cockpit Start to edit Cockpit origin Menu"
|
||||
if [ -f "$install_path/cockpit/cockpit.conf" ]; then
|
||||
cp -f "$install_path/cockpit/cockpit.conf" /etc/cockpit/cockpit.conf
|
||||
else
|
||||
echo "Download config file from URL..."
|
||||
curl -sSL $cockpit_config_github_page_url | sudo tee /etc/cockpit/cockpit.conf > /dev/null
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to download cockpit.conf"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if test -d "$install_path/cockpit/menu_override"; then
|
||||
cp -r $install_path/cockpit/menu_override/* /etc/cockpit
|
||||
else
|
||||
echo "Download override files from URL..."
|
||||
Download_Menu_Override
|
||||
fi
|
||||
}
|
||||
|
||||
Install_Cockpit(){
|
||||
if cockpit_exist; then
|
||||
echo "$echo_prefix_cockpit Prepare to upgrade Cockpit"
|
||||
echo "You installed version: "
|
||||
Print_Version
|
||||
else
|
||||
echo "$echo_prefix_cockpit Prepare to install Cockpit"
|
||||
check_ports $port
|
||||
fi
|
||||
|
||||
dnf --version >/dev/null 2>&1
|
||||
dnf_status=$?
|
||||
yum --version >/dev/null 2>&1
|
||||
yum_status=$?
|
||||
apt --version >/dev/null 2>&1
|
||||
apt_status=$?
|
||||
|
||||
if [ $dnf_status -eq 0 ]; then
|
||||
for pkg in $cockpit_packages
|
||||
do
|
||||
echo "Install or upgrade $pkg"
|
||||
sudo dnf upgrade -y "$pkg" > /dev/null || echo "$pkg failed to upgrade"
|
||||
sudo dnf install -y "$pkg" > /dev/null || echo "$pkg failed to install"
|
||||
done
|
||||
elif [ $yum_status -eq 0 ]; then
|
||||
for pkg in $cockpit_packages
|
||||
do
|
||||
echo "Install or update $pkg"
|
||||
sudo yum update -y "$pkg" > /dev/null || echo "$pkg failed to update"
|
||||
sudo yum install -y "$pkg" > /dev/null || echo "$pkg failed to install"
|
||||
done
|
||||
elif [ $apt_status -eq 0 ]; then
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
sudo dpkg --configure -a
|
||||
apt-get update -y >/dev/null
|
||||
apt-get --fix-broken install
|
||||
for pkg in $cockpit_packages
|
||||
do
|
||||
echo "Installing $pkg"
|
||||
sudo apt-get install -u -y "$pkg" > /dev/null || echo "$pkg failed to install"
|
||||
done
|
||||
else
|
||||
echo "Neither apt,dnf nor yum found. Please install one of them and try again."
|
||||
fi
|
||||
|
||||
Set_Firewalld
|
||||
Set_Selinux
|
||||
Set_Cockpit
|
||||
Edit_Menu
|
||||
Restart_Cockpit
|
||||
}
|
||||
|
||||
Test_Cockpit(){
|
||||
echo "$echo_prefix_cockpit Test Cockpit console accessibility"
|
||||
test_cmd="curl localhost:$cockpit_port"
|
||||
start_time=$(date +%s)
|
||||
timeout=30
|
||||
while true; do
|
||||
if $test_cmd >/dev/null 2>&1; then
|
||||
echo "Cockpit running OK..."
|
||||
break
|
||||
else
|
||||
current_time=$(date +%s)
|
||||
elapsed_time=$(($current_time - $start_time))
|
||||
if [ $elapsed_time -ge $timeout ]; then
|
||||
echo "Cockpit is not running... Timeout after waiting $timeout seconds."
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
fi
|
||||
done
|
||||
|
||||
Print_Version
|
||||
}
|
||||
|
||||
|
||||
#### -------------- main() start here ------------------- ####
|
||||
|
||||
Set_Repository
|
||||
Install_Cockpit
|
||||
Test_Cockpit
|
||||
|
||||
# release package memory
|
||||
if systemctl cat packagekit > /dev/null 2>&1; then
|
||||
sudo systemctl restart packagekit
|
||||
else
|
||||
echo "no packagekit"
|
||||
fi
|
|
@ -1,165 +0,0 @@
|
|||
#!/bin/bash
|
||||
PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin:~/bin
|
||||
export PATH
|
||||
|
||||
# Install and Upgade Docker for mosts of Linux
|
||||
# This script is intended from https://get.docker.com and add below:
|
||||
#
|
||||
# - install or update Docker
|
||||
# - support Redhat, CentOS-Stream, OracleLinux, AmazonLinux
|
||||
#
|
||||
# 1. download the script
|
||||
#
|
||||
# $ curl -fsSL https://websoft9.github.io/websoft9/install/install_docker.sh -o install_docker.sh
|
||||
#
|
||||
# 2. verify the script's content
|
||||
#
|
||||
# $ cat install_docker.sh
|
||||
#
|
||||
# 3. run the script with --dry-run to verify the steps it executes
|
||||
#
|
||||
# $ sh install_docker.sh --dry-run
|
||||
#
|
||||
# 4. run the script either as root, or using sudo to perform the installation.
|
||||
#
|
||||
# $ sudo sh install_docker.sh
|
||||
|
||||
|
||||
# it must export, otherwise Rocky Linux cannot used at yum command
|
||||
export docker_packages="docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin"
|
||||
echo_prefix_docker=$'\n[Docker] - '
|
||||
|
||||
docker_exist() {
|
||||
# 检查 `docker` 命令是否存在
|
||||
if ! command -v docker &> /dev/null; then
|
||||
echo "docker command not exist"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# 检查 Docker 服务是否正在运行
|
||||
systemctl is-active docker.service &> /dev/null
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Docker service is not running, trying to start it..."
|
||||
systemctl start docker.service
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to start Docker service."
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
Install_Docker(){
|
||||
local mirror=$1
|
||||
local timeout=$2
|
||||
local repo_url=$3
|
||||
|
||||
echo "$echo_prefix_docker Installing Docker from ${mirror} with timeout ${timeout} seconds for your system"
|
||||
|
||||
if [ "$mirror" = "Official" ]; then
|
||||
mirror=""
|
||||
fi
|
||||
|
||||
# For redhat family
|
||||
if [[ -f /etc/redhat-release ]] || command -v amazon-linux-extras >/dev/null 2>&1; then
|
||||
# For CentOS, Fedora, or RHEL(only s390x)
|
||||
if [[ $(cat /etc/redhat-release) =~ "Red Hat" ]] && [[ $(uname -m) == "s390x" ]] || [[ $(cat /etc/redhat-release) =~ "CentOS" ]] || [[ $(cat /etc/redhat-release) =~ "Fedora" ]]; then
|
||||
curl -fsSL https://get.docker.com -o get-docker.sh
|
||||
timeout $timeout sh get-docker.sh --channel stable --mirror $mirror
|
||||
else
|
||||
# For other distributions(Redhat and Rocky linux ...)
|
||||
dnf --version >/dev/null 2>&1
|
||||
dnf_status=$?
|
||||
yum --version >/dev/null 2>&1
|
||||
yum_status=$?
|
||||
|
||||
if [ $dnf_status -eq 0 ]; then
|
||||
sudo dnf install dnf-utils -y > /dev/null
|
||||
sudo dnf config-manager --add-repo $repo_url
|
||||
timeout $timeout sudo dnf install $docker_packages -y
|
||||
elif [ $yum_status -eq 0 ]; then
|
||||
sudo yum install yum-utils -y > /dev/null
|
||||
sudo yum-config-manager --add-repo $repo_url
|
||||
if command -v amazon-linux-extras >/dev/null 2>&1; then
|
||||
wget -O /etc/yum.repos.d/CentOS7-Base.repo https://websoft9.github.io/stackhub/apps/roles/role_common/files/CentOS7-Base.repo
|
||||
sudo sed -i "s/\$releasever/7/g" /etc/yum.repos.d/docker-ce.repo
|
||||
timeout $timeout sudo yum install $docker_packages --disablerepo='amzn2-extras,amzn2-core' -y
|
||||
else
|
||||
timeout $timeout sudo yum install $docker_packages -y
|
||||
fi
|
||||
|
||||
else
|
||||
echo "None of the required package managers are installed."
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# For Ubuntu, Debian, or Raspbian
|
||||
if type apt >/dev/null 2>&1; then
|
||||
# Wait for apt to be unlocked
|
||||
curl -fsSL https://get.docker.com -o get-docker.sh
|
||||
timeout $timeout sh get-docker.sh --channel stable --mirror $mirror
|
||||
fi
|
||||
}
|
||||
|
||||
Upgrade_Docker(){
|
||||
if docker_exist; then
|
||||
echo "$echo_prefix_docker Upgrading Docker for your system..."
|
||||
dnf --version >/dev/null 2>&1
|
||||
dnf_status=$?
|
||||
yum --version >/dev/null 2>&1
|
||||
yum_status=$?
|
||||
apt --version >/dev/null 2>&1
|
||||
apt_status=$?
|
||||
|
||||
if [ $dnf_status -eq 0 ]; then
|
||||
sudo dnf update -y $docker_packages
|
||||
elif [ $yum_status -eq 0 ]; then
|
||||
sudo yum update -y $docker_packages
|
||||
elif [ $apt_status -eq 0 ]; then
|
||||
sudo apt update -y
|
||||
sudo apt -y install --only-upgrade $docker_packages
|
||||
else
|
||||
echo "Docker installed, but cannot upgrade"
|
||||
fi
|
||||
else
|
||||
local mirrors=("Official" "Official" "AzureChinaCloud" "Aliyun")
|
||||
local urls=("https://download.docker.com/linux/centos/docker-ce.repo" "https://download.docker.com/linux/centos/docker-ce.repo" "https://mirror.azure.cn/docker-ce/linux/centos/docker-ce.repo" "https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo")
|
||||
local timeout=180
|
||||
local max_retries=4
|
||||
local retry_count=0
|
||||
|
||||
while ((retry_count < max_retries)); do
|
||||
Install_Docker ${mirrors[$retry_count]} $timeout ${urls[$retry_count]}
|
||||
if ! docker_exist; then
|
||||
echo "Installation timeout or failed, retrying with ${mirrors[$retry_count]} mirror..."
|
||||
((retry_count++))
|
||||
sleep 3
|
||||
else
|
||||
echo "Docker installed successfully."
|
||||
exit 0
|
||||
fi
|
||||
done
|
||||
|
||||
echo "Docker Installation failed after $max_retries retries."
|
||||
exit 1
|
||||
|
||||
fi
|
||||
}
|
||||
|
||||
Start_Docker(){
|
||||
# should have Docker server and Docker cli
|
||||
if docker_exist; then
|
||||
echo "$echo_prefix_docker Starting Docker"
|
||||
sudo systemctl enable docker
|
||||
sudo systemctl restart docker
|
||||
else
|
||||
echo "Docker not installed or start failed, exit..."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
echo -e "\n\n-------- Docker --------"
|
||||
Upgrade_Docker
|
||||
Start_Docker
|
|
@ -1,82 +0,0 @@
|
|||
#!/bin/bash
|
||||
PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin:~/bin
|
||||
export PATH
|
||||
|
||||
|
||||
# Command-line options
|
||||
# ==========================================================
|
||||
#
|
||||
# --channel <release|dev>
|
||||
# Use the --channel option to install a release(production) or dev distribution. default is release, for example:
|
||||
#
|
||||
# $ sudo sh install.sh --channel release
|
||||
#
|
||||
# ==============================================================================
|
||||
|
||||
# get input and define vars
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
--channel)
|
||||
channel="$2"
|
||||
shift 2
|
||||
;;
|
||||
*)
|
||||
shift
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# channel,source_github_pages,install_path from install.sh priority
|
||||
if [ -z "$channel" ]; then
|
||||
channel="release"
|
||||
fi
|
||||
|
||||
if [ -z "$source_github_pages" ]; then
|
||||
source_github_pages="https://websoft9.github.io/websoft9"
|
||||
fi
|
||||
|
||||
if [ -z "$install_path" ]; then
|
||||
install_path="/data/websoft9/source"
|
||||
fi
|
||||
|
||||
echo -e "\n\n-------- Plugins --------"
|
||||
echo "Your installation parameters are as follows: "
|
||||
echo "--channel: $channel"
|
||||
echo "--install_path: $install_path"
|
||||
|
||||
artifact_url="https://w9artifact.blob.core.windows.net/$channel/websoft9/plugin"
|
||||
echo_prefix_plugins=$'\n[Plugins] - '
|
||||
|
||||
versions_local_file="$install_path/version.json"
|
||||
versions_url="$source_github_pages/version.json"
|
||||
file_suffix=".zip"
|
||||
plugin_path="/usr/share/cockpit"
|
||||
|
||||
echo "$echo_prefix_plugins Starting download plugin and update it"
|
||||
|
||||
if [ -f "$versions_local_file" ]; then
|
||||
echo "File $versions_local_file exists."
|
||||
else
|
||||
echo "File $versions_local_file does not exist. Downloading from $versions_url"
|
||||
wget -q $versions_url -O $versions_local_file
|
||||
fi
|
||||
|
||||
plugins=$(jq -c '.plugins' $versions_local_file)
|
||||
if [ -z "$plugins" ]; then
|
||||
echo "No plugins found in $versions_local_file"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
for key in $(jq -r '.plugins | keys[]' $versions_local_file); do
|
||||
version=$(jq -r ".plugins.${key}" $versions_local_file)
|
||||
file_url=$artifact_url/$key/$key-$version$file_suffix
|
||||
file_name=$key-$version$file_suffix
|
||||
echo "Download from $file_url"
|
||||
wget -q $file_url -O $file_name
|
||||
unzip -oq $file_name -d $plugin_path
|
||||
rm -rf $file_name
|
||||
done
|
||||
|
||||
find /usr/share/cockpit -type f -name "*.py3" -exec chmod +x {} \;
|
||||
|
||||
echo "Plugins install successfully..."
|
|
@ -1,27 +0,0 @@
|
|||
#!/bin/bash
|
||||
PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin:~/bin
|
||||
export PATH
|
||||
|
||||
# This script is on development, can not use now
|
||||
# to do
|
||||
|
||||
|
||||
Remove_Podman(){
|
||||
echo "$echo_prefix_docker Try to remove Podman"
|
||||
podman pod stop --all
|
||||
# Remove Podman and its dependencies
|
||||
if [ -x "$(command -v dnf)" ]; then
|
||||
sudo dnf remove podman -y
|
||||
elif [ -x "$(command -v apt)" ]; then
|
||||
sudo apt remove podman -y
|
||||
elif [ -x "$(command -v zypper)" ]; then
|
||||
sudo zypper remove podman -y
|
||||
elif [ -x "$(command -v pacman)" ]; then
|
||||
sudo pacman -Rs podman --noconfirm
|
||||
else
|
||||
echo "Unable to find a suitable package manager to remove Podman."
|
||||
exit 1
|
||||
fi
|
||||
echo "Podman has been stopped and removed."
|
||||
|
||||
}
|
|
@ -1,72 +0,0 @@
|
|||
#!/bin/bash
|
||||
# Define PATH
|
||||
PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin:~/bin
|
||||
# Export PATH
|
||||
export PATH
|
||||
|
||||
|
||||
# Command-line options
|
||||
# ==============================================================================
|
||||
#
|
||||
# --cockpit
|
||||
# Use the --cockpit option to remove cockpit:
|
||||
#
|
||||
# $ sudo sh install.sh --cockpit
|
||||
#
|
||||
# --files
|
||||
# Use the --files option remove files have installed:
|
||||
#
|
||||
# $ sudo sh install.sh --files
|
||||
#
|
||||
#
|
||||
# ==============================================================================
|
||||
|
||||
|
||||
install_path="/data/websoft9/source"
|
||||
systemd_path="/opt/websoft9/systemd"
|
||||
cockpit_plugin_path="/usr/share/cockpit"
|
||||
cockpit_packages="cockpit cockpit-ws cockpit-bridge cockpit-system cockpit-pcp cockpit-storaged cockpit-networkmanager cockpit-session-recording cockpit-doc cockpit-packagekit cockpit-sosreport"
|
||||
|
||||
echo -e "\n---Remove Websoft9 backend service containers---"
|
||||
sudo docker compose -p websoft9 down -v
|
||||
|
||||
echo -e "\n---Remove Websoft9 systemd service---"
|
||||
sudo systemctl disable websoft9
|
||||
sudo systemctl stop websoft9
|
||||
rm -rf /lib/systemd/system/websoft9.service
|
||||
|
||||
|
||||
|
||||
remove_cockpit() {
|
||||
echo -e "\n---Remove Cockpit---"
|
||||
sudo systemctl stop cockpit.socket cockpit
|
||||
for package in $cockpit_packages; do
|
||||
sudo pkcon remove $package -y || true
|
||||
done
|
||||
sudo rm -rf /etc/cockpit/*
|
||||
}
|
||||
|
||||
remove_files() {
|
||||
echo -e "\n---Remove files---"
|
||||
sudo rm -rf $install_path/* $systemd_path/* $cockpit_plugin_path/*
|
||||
}
|
||||
|
||||
for arg in "$@"
|
||||
do
|
||||
case $arg in
|
||||
--cockpit)
|
||||
remove_cockpit
|
||||
shift
|
||||
;;
|
||||
--files)
|
||||
remove_files
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
echo "Unknown argument: $arg"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
echo -e "\nCongratulations, Websoft9 uninstall is complete!"
|
|
@ -1,3 +0,0 @@
|
|||
# Scripts
|
||||
|
||||
Some useful script for Websoft9 maintenance
|
|
@ -1,16 +0,0 @@
|
|||
#!/bin/bash
|
||||
export PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin
|
||||
|
||||
app_port=$(cat /data/apps/$1/.env |grep APP_HTTP_PORT |cut -d= -f2 |sed -n 1p)
|
||||
|
||||
while true
|
||||
do
|
||||
app_port_lines=$(cat /tmp/port.txt |grep "$app_port" |wc -l)
|
||||
if [ "$app_port_lines" -gt 0 ];then
|
||||
app_port=`expr $app_port + 1`
|
||||
else
|
||||
echo $app_port >> /tmp/port.txt
|
||||
sed -i "s/APP_HTTP_PORT=.*/APP_HTTP_PORT=$app_port/g" /data/apps/$1/.env
|
||||
break
|
||||
fi
|
||||
done
|
|
@ -1,511 +0,0 @@
|
|||
#!/bin/bash
|
||||
export PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin
|
||||
|
||||
set -e
|
||||
|
||||
# Please modify this version and time after update
|
||||
version(){
|
||||
sudo echo "version: 1.6"
|
||||
sudo echo "updated date: 2021-05-26"
|
||||
}
|
||||
|
||||
# Tool list, apt or yum installation is only supported
|
||||
tool_list=(
|
||||
git
|
||||
pwgen
|
||||
jq
|
||||
wget
|
||||
curl
|
||||
figlet
|
||||
boxes
|
||||
)
|
||||
|
||||
help_str="
|
||||
Usage:
|
||||
-h, --help Show this help message
|
||||
-r, --repo_name The name of the warehouse
|
||||
-p, --package Making local packs,Only when you need to pack
|
||||
-v, --version Show version info
|
||||
example: bash install.sh -r template -p
|
||||
"
|
||||
make_package=false
|
||||
|
||||
# Get option parameters
|
||||
getopt_cmd=$(getopt -o r:phv --long repo_name:,package,help,version -n "Parameter error" -- "$@")
|
||||
eval set -- "$getopt_cmd"
|
||||
|
||||
while [ -n "$1" ]
|
||||
do
|
||||
case "$1" in
|
||||
-r|--repo_name)
|
||||
repo_name=$2
|
||||
shift ;;
|
||||
-h|--help)
|
||||
sudo echo -e "$help_str"
|
||||
exit ;;
|
||||
-p|--package)
|
||||
make_package=true
|
||||
shift ;;
|
||||
-v|--version)
|
||||
version
|
||||
shift ;;
|
||||
--)
|
||||
break
|
||||
shift ;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
|
||||
[ ! -n "$repo_name" ] && exit 1
|
||||
|
||||
install_tools(){
|
||||
if command -v apt > /dev/null;then
|
||||
sudo apt update 1>/dev/null 2>&1
|
||||
sudo apt install ${tool_list[*]} -y 1>/dev/null 2>&1
|
||||
elif command -v yum > /dev/null;then
|
||||
sudo yum clean all 1>/dev/null 2>&1
|
||||
sudo yum makecache 1>/dev/null 2>&1
|
||||
sudo yum install ${tool_list[*]} -y 1>/dev/null 2>&1
|
||||
fi
|
||||
}
|
||||
|
||||
download_docker_source(){
|
||||
docker_download_url="https://download.docker.com/linux/static/stable/x86_64/docker-20.10.6.tgz"
|
||||
cd /tmp/
|
||||
sudo rm -rf docker.tgz
|
||||
sudo wget $docker_download_url -O docker.tgz 1>/dev/null 2>&1
|
||||
sudo echo -e "docker downloaded successfully"
|
||||
sudo cat > /tmp/docker.service <<EOF
|
||||
[Unit]
|
||||
Description=Docker Application Container Engine
|
||||
Documentation=https://docs.docker.com
|
||||
After=network-online.target firewalld.service
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=notify
|
||||
# the default is not to use systemd for cgroups because the delegate issues still
|
||||
# exists and systemd currently does not support the cgroup feature set required
|
||||
# for containers run by docker
|
||||
ExecStart=/usr/bin/dockerd
|
||||
ExecReload=/bin/kill -s HUP \$MAINPID
|
||||
# Having non-zero Limit*s causes performance problems due to accounting overhead
|
||||
# in the kernel. We recommend using cgroups to do container-local accounting.
|
||||
LimitNOFILE=infinity
|
||||
LimitNPROC=infinity
|
||||
LimitCORE=infinity
|
||||
# Uncomment TasksMax if your systemd version supports it.
|
||||
# Only systemd 226 and above support this version.
|
||||
#TasksMax=infinity
|
||||
TimeoutStartSec=0
|
||||
# set delegate yes so that systemd does not reset the cgroups of docker containers
|
||||
Delegate=yes
|
||||
# kill only the docker process, not all processes in the cgroup
|
||||
KillMode=process
|
||||
# restart the docker process if it exits prematurely
|
||||
Restart=on-failure
|
||||
StartLimitBurst=3
|
||||
StartLimitInterval=60s
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
}
|
||||
|
||||
install_docker_script(){
|
||||
if command -v docker > /dev/null;then
|
||||
sudo echo -e `docker -v`
|
||||
sudo echo -e "Docker installed successfully"
|
||||
else
|
||||
sudo curl -fsSL https://get.docker.com -o get-docker.sh &>/dev/null && sh get-docker.sh &>/dev/null
|
||||
sudo rm -rf get-docker.sh
|
||||
sudo systemctl start docker
|
||||
sudo systemctl enable docker &>/dev/null
|
||||
sudo echo -e `docker -v`
|
||||
sudo echo -e "Docker installed successfully"
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
uninstall_docker(){
|
||||
sudo rm -f /etc/systemd/system/docker.service
|
||||
sudo rm -rf /usr/bin/docker*
|
||||
sudo systemctl daemon-reload
|
||||
sudo echo -e "Docker uninstalled successfully"
|
||||
}
|
||||
|
||||
download_docker_compose(){
|
||||
sudo curl -L "https://github.com/docker/compose/releases/download/1.29.0/docker-compose-$(uname -s)-$(uname -m)" -o /tmp/docker-compose 1>/dev/null 2>&1
|
||||
sudo chmod +x /tmp/docker-compose
|
||||
sudo echo -e "docker-compose downloaded successfully"
|
||||
}
|
||||
|
||||
install_docker_compose(){
|
||||
curl -L "https://github.com/docker/compose/releases/download/1.29.0/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose 1>/dev/null 2>&1
|
||||
sudo chmod +x /usr/local/bin/docker-compose
|
||||
sudo ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose &>/dev/mull || true
|
||||
sudo echo `docker-compose -v`
|
||||
sudo echo -e "docker-compose installed successfully"
|
||||
}
|
||||
|
||||
save_images(){
|
||||
sudo rm -rf /tmp/docker-$repo_name
|
||||
sudo git clone https://github.com/Websoft9/docker-$repo_name.git /tmp/docker-$repo_name || sudo git clone https://github.com.cnpmjs.org/Websoft9/docker-$repo_name.git /tmp/docker-$repo_name
|
||||
sudo docker rmi `docker images -aq` -f &>/dev/null || true
|
||||
|
||||
cd /tmp/docker-$repo_name
|
||||
|
||||
|
||||
# Pull images and save images
|
||||
sudo systemctl start docker
|
||||
docker-compose pull
|
||||
sudo echo -e "In image packaging, there is a long wait..."
|
||||
sudo docker save $(docker images | grep -v REPOSITORY | awk 'BEGIN{OFS=":";ORS=" "}{print $1,$2}') -o /tmp/$repo_name.tar
|
||||
sudo echo -e "The image was successfully saved as a tar package"
|
||||
}
|
||||
|
||||
installation(){
|
||||
sudo rm -rf $install_dir
|
||||
sudo mkdir -p $install_dir /credentials 1>/dev/null 2>&1 && cd $install_dir
|
||||
sudo git clone https://github.com/Websoft9/docker-$repo_name.git $install_dir || sudo git clone https://github.com.cnpmjs.org/Websoft9/docker-$repo_name.git $install_dir
|
||||
|
||||
# Rename compose and env file name
|
||||
cd $install_dir
|
||||
|
||||
# Stop the container and remove the Volumes for sec_installation
|
||||
cd $install_dir
|
||||
rm -rf volumes
|
||||
sudo docker-compose down -v 1>/dev/null 2>&1
|
||||
|
||||
# Avoiding db port conflicts
|
||||
sudo echo -e "The database port is changing"
|
||||
db_port_lines=$(cat $install_dir/.env |grep DB_.*PORT |wc -l)
|
||||
db_port=$(cat $install_dir/.env |grep DB_.*PORT |cut -d= -f2 |sed -n 1p)
|
||||
|
||||
while true
|
||||
do
|
||||
if [ "$db_port_lines" -gt 0 ];then
|
||||
os_db_port_lines=$(ss -ntulp |grep -w "$db_port" |wc -l)
|
||||
if [ "$os_db_port_lines" -gt 0 ];then
|
||||
db_port=`expr $db_port + 1`
|
||||
sed -ri "s/(DB.*_PORT=).*/\1$db_port/" $install_dir/.env
|
||||
else
|
||||
break
|
||||
fi
|
||||
else
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
# DB Random password
|
||||
sudo echo -e "---$repo_name Installation Wizard----" |boxes |tee -a /credentials/password.txt
|
||||
new_password=$(pwgen -ncCs 15 1)
|
||||
|
||||
db_password_lines=`cat $install_dir/.env |grep DB.*PASSWORD |wc -l`
|
||||
db_user_lines=`cat $install_dir/.env |grep DB_.*_USER |wc -l`
|
||||
|
||||
if [ $db_password_lines -gt 0 ];then
|
||||
if [ $db_user_lines -gt 0 ];then
|
||||
db_username=$(cat $install_dir/.env |grep DB_.*_USER |cut -d= -f2 |sed -n 1p )
|
||||
sudo echo "db username: $db_username" |tee -a /credentials/password.txt
|
||||
else
|
||||
sudo echo "db username: root" |tee -a /credentials/password.txt
|
||||
fi
|
||||
sudo sed -ri "s/(DB_.*_PASSWORD=).*/\1$new_password/" $install_dir/.env &>/dev/null || true
|
||||
sudo echo "db password: $new_password" |tee -a /credentials/password.txt
|
||||
else
|
||||
sudo echo "No database password" |tee -a /credentials/password.txt
|
||||
fi
|
||||
|
||||
if [ "$db_port_lines" -gt 0 ];then
|
||||
sudo echo "db port: $db_port" |tee -a /credentials/password.txt
|
||||
fi
|
||||
|
||||
sudo echo -e "************************************\n"|tee -a /credentials/password.txt
|
||||
|
||||
# APP Random password
|
||||
app_password_lines=$(cat $install_dir/.env |grep -w "APP_PASSWORD_INIT" |wc -l)
|
||||
app_user_lines=$(cat $install_dir/.env |grep -w "APP_USER" |wc -l)
|
||||
app_port_lines=$(cat $install_dir/.env |grep -w "APP_PORT" |wc -l)
|
||||
|
||||
if [ "$app_user_lines" -gt 0 ];then
|
||||
app_username=$(cat $install_dir/.env |grep -w "APP_USER" |cut -d= -f2 |sed -n 1p)
|
||||
sudo echo "$repo_name login username: $app_username" |tee -a /credentials/password.txt
|
||||
else
|
||||
sudo echo "$repo_name login username: default username, please see the $install_dir/.env" |tee -a /credentials/password.txt
|
||||
fi
|
||||
|
||||
if [ "$app_password_lines" -gt 0 ];then
|
||||
sudo sed -ri "s/(APP_PASSWORD=).*/\1$new_password/" $install_dir/.env &>/dev/null || true
|
||||
sudo echo "$repo_name login password: $new_password" |tee -a /credentials/password.txt
|
||||
else
|
||||
sudo echo "$repo_name login password: default password, please see the $install_dir/.env" |tee -a /credentials/password.txt
|
||||
fi
|
||||
|
||||
if [ "$app_port_lines" -gt 0 ];then
|
||||
app_port=$(cat $install_dir/.env |grep -w "APP_PORT" |cut -d= -f2 |sed -n 1p)
|
||||
sudo echo "$repo_name login port: $app_port" |tee -a /credentials/password.txt
|
||||
fi
|
||||
|
||||
sudo echo -e "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~" |tee -a /credentials/password.txt
|
||||
|
||||
# Modify public network IP
|
||||
public_ip=`wget -O - https://download.websoft9.com/ansible/get_ip.sh 2>/dev/null |bash`
|
||||
case $repo_name in
|
||||
"erpnext")
|
||||
sudo sed -i "s/APP_SITE_NAME.*/APP_SITE_NAME=$public_ip/g" $install_dir/.env
|
||||
sudo sed -i "s/APP_SITES=.*/APP_SITES=\`$public_ip\`/g" $install_dir/.env
|
||||
;;
|
||||
"graylog")
|
||||
sudo sed -i "s#APP_HTTP_EXTERNAL_URI=.*#APP_HTTP_EXTERNAL_URI=http://$public_ip:9001/#g" $install_dir/.env
|
||||
;;
|
||||
"rocketchat")
|
||||
sudo sed -i "s#APP_ROOT_URL=.*#APP_ROOT_URL=http://$public_ip:9001/#g" $install_dir/.env
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
|
||||
# Change compose cli environment
|
||||
export DOCKER_CLIENT_TIMEOUT=500
|
||||
export COMPOSE_HTTP_TIMEOUT=500
|
||||
|
||||
sudo systemctl start docker
|
||||
sudo docker-compose up -d
|
||||
sleep 5
|
||||
sudo clear
|
||||
sudo echo -e "\n $repo_name installation complete\n" |boxes -d whirly
|
||||
sudo echo -e "\n Please go to $repo_name to view the README file"
|
||||
sudo docker ps -a
|
||||
}
|
||||
|
||||
add_install_script(){
|
||||
sudo rm -rf /tmp/install.sh /tmp/README /tmp/setup.sh
|
||||
|
||||
# Mirror package installation script
|
||||
cat > /tmp/install.sh <<-EOF
|
||||
# Install docker
|
||||
sudo tar -xf docker.tgz
|
||||
sudo systemctl stop docker &>/dev/mull || true
|
||||
sudo mv docker.service /etc/systemd/system/docker.service
|
||||
sudo mv docker/* /usr/bin/ 1>/dev/null 2>&1
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl start docker
|
||||
sudo systemctl enable docker &>/dev/null
|
||||
sudo echo \$(docker -v)
|
||||
sudo echo -e "Docker was installed successfully"
|
||||
|
||||
# Install docker-compose
|
||||
sudo mv docker-compose /usr/local/bin/docker-compose 1>/dev/null 2>&1
|
||||
sudo ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose &>/dev/mull || true
|
||||
sudo echo \$(docker-compose -v)
|
||||
sudo echo -e "docker-compose installed successfully"
|
||||
|
||||
# Pre_installation
|
||||
sudo rm -rf $install_dir
|
||||
sudo mkdir -p $install_dir /credentials 1>/dev/null 2>&1
|
||||
sudo docker load -i $repo_name.tar
|
||||
cur_dir=\$(pwd)
|
||||
upper_dir=\$(dirname $install_dir)
|
||||
sudo rm -rf \$upper_dir/$repo_name
|
||||
cp=\$(which cp)
|
||||
\$cp -rf \$cur_dir/docker-$repo_name \$upper_dir/$repo_name 1>/dev/null 2>&1
|
||||
sudo mv README \$upper_dir/$repo_name/README 1>/dev/null 2>&1
|
||||
|
||||
# Stop the container and remove the Volumes for sec_installation
|
||||
cd $install_dir
|
||||
rm -rf volumes
|
||||
sudo docker-compose down -v 1>/dev/null 2>&1
|
||||
|
||||
# Avoiding db port conflicts
|
||||
db_port_lines=\$(cat $install_dir/.env |grep DB_.*PORT |wc -l)
|
||||
db_port=\$(cat $install_dir/.env |grep DB_.*PORT |cut -d= -f2 |sed -n 1p)
|
||||
|
||||
while true
|
||||
do
|
||||
if [ "\$db_port_lines" -gt 0 ];then
|
||||
os_db_port_lines=\$(ss -ntulp |grep \$db_port |wc -l)
|
||||
if [ "\$os_db_port_lines" -gt 0 ];then
|
||||
db_port=`expr \$db_port + 1`
|
||||
sed -ri "s/(DB.*_PORT=).*/\1\$db_port/" $install_dir/.env
|
||||
else
|
||||
break
|
||||
fi
|
||||
else
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
# DB Random password
|
||||
sudo echo -e "---$repo_name Installation Wizard---\n" |tee -a /credentials/password.txt
|
||||
|
||||
new_password=\$(date | md5sum | awk '{print $1}' |cut -c 3-18)
|
||||
|
||||
db_password_lines=\$(cat $install_dir/.env |grep DB.*PASSWORD |wc -l)
|
||||
db_user_lines=\$(cat $install_dir/.env |grep DB_.*_USER |wc -l)
|
||||
|
||||
if [ \$db_password_lines -gt 0 ];then
|
||||
if [ \$db_user_lines -gt 0 ];then
|
||||
db_username=\$(cat $install_dir/.env |grep DB_.*_USER |cut -d= -f2 |sed -n 1p)
|
||||
sudo echo "db username: \$db_username" |tee -a /credentials/password.txt
|
||||
else
|
||||
sudo echo "db username: root" |tee -a /credentials/password.txt
|
||||
fi
|
||||
sudo sed -ri "s/(DB_.*_PASSWORD=).*/\1\$new_password/" $install_dir/.env &>/dev/null || true
|
||||
sudo echo "db password: \$new_password" |tee -a /credentials/password.txt
|
||||
else
|
||||
sudo echo "No database password" |tee -a /credentials/password.txt
|
||||
fi
|
||||
|
||||
if [ "\$db_port_lines" -gt 0 ];then
|
||||
sudo echo "db port: \$db_port" |tee -a /credentials/password.txt
|
||||
fi
|
||||
|
||||
sudo echo -e "************************************\n" |tee -a /credentials/password.txt
|
||||
|
||||
# APP Random password
|
||||
app_user_lines=\$(cat $install_dir/.env |grep -w "APP_USER" |wc -l)
|
||||
app_password_lines=\$(cat $install_dir/.env |grep -w "APP_PASSWORD_INIT" |wc -l)
|
||||
app_port_lines=\$(cat $install_dir/.env |grep -w "APP_PORT" |wc -l)
|
||||
|
||||
if [ "\$app_user_lines" -gt 0 ];then
|
||||
app_username=\$(cat $install_dir/.env |cut -d= -f2 |sed -n 1p)
|
||||
sudo echo "$repo_name login username: \$app_username" |tee -a /credentials/password.txt
|
||||
else
|
||||
sudo echo "$repo_name login username: default username, please see the $install_dir/.env" |tee -a /credentials/password.txt
|
||||
fi
|
||||
|
||||
if [ "\$app_password_lines" -gt 0 ];then
|
||||
sudo sed -ri "s/(APP_PASSWORD=).*/\1\$new_password/" $install_dir/.env &>/dev/null || true
|
||||
sudo echo "$repo_name login password: \$new_password" |tee -a /credentials/password.txt
|
||||
else
|
||||
sudo echo "$repo_name login password: default password, please see the $install_dir/.env" |tee -a /credentials/password.txt
|
||||
fi
|
||||
|
||||
if [ "\$app_port_lines" -gt 0 ];then
|
||||
app_port=\$(cat $install_dir/.env |grep -w "APP_PORT" |cut -d= -f2 |sed -n 1p)
|
||||
sudo echo "$repo_name login port: \$app_port" |tee -a /credentials/password.txt
|
||||
fi
|
||||
|
||||
sudo rm -rf \$cur_dir/{$repo_name.tar,get-docker.sh,docker.service,docker-compose,docker.tgz,docker,install.sh,docker-$repo_name}
|
||||
|
||||
sudo echo -e "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~" |tee -a /credentials/password.txt
|
||||
|
||||
# Modify public network IP
|
||||
public_ip=\$(wget -O - https://download.websoft9.com/ansible/get_ip.sh 2>/dev/null | timeout 10 bash)
|
||||
case $repo_name in
|
||||
"erpnext")
|
||||
sudo sed -i "s/APP_SITE_NAME.*/APP_SITE_NAME=\$public_ip/g" $install_dir/.env
|
||||
sudo sed -i "s/APP_SITES=.*/APP_SITES=\`\$public_ip\`/g" $install_dir/.env
|
||||
;;
|
||||
"graylog")
|
||||
sudo sed -i "s#APP_HTTP_EXTERNAL_URI=.*#APP_HTTP_EXTERNAL_URI=http://\$public_ip:9001/#g" $install_dir/.env
|
||||
;;
|
||||
"rocketchat")
|
||||
sudo sed -i "s#APP_ROOT_URL=.*#APP_ROOT_URL=http://\$public_ip:9001/#g" $install_dir/.env
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
|
||||
# Change compose cli environment
|
||||
export DOCKER_CLIENT_TIMEOUT=500
|
||||
export COMPOSE_HTTP_TIMEOUT=500
|
||||
|
||||
sudo systemctl start docker
|
||||
sudo docker-compose up -d
|
||||
sudo clear && sudo docker ps -a
|
||||
sudo echo -e "\n $repo_name installation complete\n"
|
||||
sudo echo -e "\n Please go to \$upper_dir/$repo_name to view the README file"
|
||||
EOF
|
||||
|
||||
# README file
|
||||
cat > /tmp/README <<-EOF
|
||||
Document address:
|
||||
http://support.websoft9.com/docs/$repo_name/zh/
|
||||
Project address:
|
||||
https://github.com/websoft9/docker-$repo_name
|
||||
Password file location:
|
||||
/credentials/password.txt
|
||||
EOF
|
||||
|
||||
# Unpack the pre-installed script
|
||||
cat > /tmp/setup.sh <<-EOF
|
||||
#!/bin/bash
|
||||
line=\`wc -l \$0|awk '{print \$1}'\`
|
||||
line=\`expr \$line - 7\`
|
||||
tail -n \$line \$0 |tar zx -C ~
|
||||
cd ~
|
||||
./install.sh
|
||||
ret=\$?
|
||||
exit \$ret
|
||||
EOF
|
||||
|
||||
sudo chmod +x /tmp/install.sh
|
||||
sudo chmod +x /tmp/setup.sh
|
||||
}
|
||||
|
||||
get_install_information(){
|
||||
install_dir=`curl -s https://raw.githubusercontent.com/Websoft9/docker-$repo_name/main/variables.json |jq -r .installpath` 1>/dev/null
|
||||
compose_file_name=`curl -s https://raw.githubusercontent.com/Websoft9/docker-$repo_name/main/variables.json |jq -r .compose_file` 1>/dev/null
|
||||
compose_env_url="https://raw.githubusercontent.com/Websoft9/docker-$repo_name/main/.env"
|
||||
url_status=`curl -s -m 5 -IL $compose_env_url |grep 200 || true`
|
||||
if [[ $url_status == "" ]];then
|
||||
sudo echo "The env file does not exist"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ $install_dir == "null" || $compose_file_name = "null" ]];then
|
||||
sudo echo "variables.json has an undefined parameter"
|
||||
exit 1
|
||||
fi
|
||||
sudo echo install path $install_dir
|
||||
sudo echo compose filename $compose_file_name
|
||||
}
|
||||
|
||||
make_package(){
|
||||
sudo rm -rf /tmp/$repo_name.tgz install-$repo_name
|
||||
cd /tmp && tar -zcf /tmp/$repo_name.tgz ./{install.sh,README,$repo_name.tar,docker-$repo_name,docker.tgz,docker.service,docker-compose}
|
||||
sudo cat setup.sh $repo_name.tgz > ~/install-$repo_name
|
||||
sudo chmod +x ~/install-$repo_name
|
||||
cd ~ && sudo echo -e "Image packaging successfully" |boxes -d whirly
|
||||
}
|
||||
|
||||
print_information(){
|
||||
sudo figlet websoft9
|
||||
# Check if the repo exists
|
||||
repo_name_exists=$(curl -s --head https://github.com/Websoft9/docker-$repo_name | head -n 1 |grep -c '200')
|
||||
[ "$repo_name_exists" -ne 1 ] && sudo echo -e "The repo does not exist !" && exit 1
|
||||
|
||||
# Print installation information
|
||||
if [ -n "$repo_name" ] && [ "$make_package" == false ];then
|
||||
sudo echo "docker-$repo_name to be installed..."
|
||||
fi
|
||||
|
||||
if [ -n "$repo_name" ] && [ "$make_package" == true ];then
|
||||
sudo echo "$repo_name will be packaged as an image..."
|
||||
fi
|
||||
}
|
||||
|
||||
install_tools
|
||||
|
||||
print_information
|
||||
|
||||
get_install_information
|
||||
|
||||
if [ $make_package = false ]; then
|
||||
install_docker_script
|
||||
install_docker_compose
|
||||
installation
|
||||
fi
|
||||
|
||||
if [ $make_package = true ]; then
|
||||
install_docker_script
|
||||
install_docker_compose
|
||||
download_docker_source
|
||||
download_docker_compose
|
||||
save_images
|
||||
add_install_script
|
||||
make_package
|
||||
fi
|
|
@ -1,5 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
sudo sed -i 's/#PermitRootLogin yes/PermitRootLogin yes/' /etc/ssh/sshd_config
|
||||
sudo systemctl restart sshd
|
||||
sudo passwd root
|
|
@ -1,41 +0,0 @@
|
|||
#!/bin/bash
|
||||
url_list=(
|
||||
api.ipify.org
|
||||
bot.whatismyipaddress.com
|
||||
icanhazip.com
|
||||
ifconfig.co
|
||||
ident.me
|
||||
ifconfig.me
|
||||
icanhazip.com
|
||||
ipecho.net/plain
|
||||
ipinfo.io/ip
|
||||
ip.sb
|
||||
whatismyip.akamai.com
|
||||
inet-ip.info
|
||||
)
|
||||
|
||||
curl_ip(){
|
||||
curl --connect-timeout 1 -m 2 $1 2>/dev/null
|
||||
return $?
|
||||
}
|
||||
|
||||
debug(){
|
||||
for x in ${url_list[*]}
|
||||
do
|
||||
curl_ip $x
|
||||
done
|
||||
}
|
||||
|
||||
print_ip(){
|
||||
for n in ${url_list[*]}
|
||||
do
|
||||
public_ip=`curl_ip $n`
|
||||
check_ip=`echo $public_ip | awk -F"." '{print NF}'`
|
||||
if [ ! -z "$public_ip" -a $check_ip -eq "4" ]; then
|
||||
echo $public_ip
|
||||
exit 0
|
||||
fi
|
||||
done
|
||||
}
|
||||
#debug
|
||||
print_ip
|
|
@ -1,23 +0,0 @@
|
|||
#!/bin/bash
|
||||
#repo=$1
|
||||
#path=$2
|
||||
url_path=(
|
||||
https://ghproxy.com/https://github.com
|
||||
https://github.com)
|
||||
git_clone(){
|
||||
for r in ${1[*]}
|
||||
do
|
||||
filename=$(echo $r | cut -d '/' -f 2)
|
||||
for x in ${url_path[*]}
|
||||
do
|
||||
if [ -e $filename ]; then
|
||||
echo "$filename was cloned successfully"
|
||||
break
|
||||
else
|
||||
timeout -k 1 3 git clone $x/$r.git
|
||||
fi
|
||||
done
|
||||
done
|
||||
}
|
||||
|
||||
git_clone
|
|
@ -1,34 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
|
||||
# This script is created by Bing Chat
|
||||
# 你可以使用以下修改后的 shell 脚本来自动尝试从 github.com 克隆仓库,如果失败了,再尝试使用不同的加速地址来克隆仓库。这个脚本会先尝试从 github.com 克隆仓库,如果失败了,再尝试使用我之前提到的加速地址来克隆仓库,直到成功为止。每个地址都会尝试克隆 3 次。如果克隆失败,脚本会输出友好的错误提示。这个脚本接受两个外部参数:$1 是用户名,$2 是仓库名。
|
||||
# 获取外部参数
|
||||
USERNAME="$1"
|
||||
REPO="$2"
|
||||
|
||||
# 生成仓库 URL
|
||||
REPO_URL="https://github.com/$USERNAME/$REPO.git"
|
||||
|
||||
# 加速地址列表
|
||||
MIRRORS=(
|
||||
"https://github.com"
|
||||
"https://ghproxy.com/https://github.com"
|
||||
)
|
||||
|
||||
for mirror in "${MIRRORS[@]}"; do
|
||||
# 生成加速后的 URL
|
||||
mirror_url="${REPO_URL/https:\/\/github.com/$mirror}"
|
||||
# 尝试克隆仓库
|
||||
for i in {1..3}; do
|
||||
echo "Trying to clone from $mirror_url (attempt $i)"
|
||||
if git clone --depth=1 "$mirror_url"; then
|
||||
echo "Successfully cloned from $mirror_url"
|
||||
exit 0
|
||||
else
|
||||
echo "Failed to clone from $mirror_url (attempt $i)"
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
echo "Failed to clone the repository after multiple attempts. Please check your network connection and try again later."
|
|
@ -1,472 +0,0 @@
|
|||
#!/bin/bash
|
||||
PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin:~/bin
|
||||
export PATH
|
||||
|
||||
function error_exit {
|
||||
echo "$1" 1>&2
|
||||
exit 1
|
||||
}
|
||||
trap 'error_exit "Please push issue to: https://github.com/Websoft9/stackhub/issues"' ERR
|
||||
|
||||
urls="https://w9artifact.blob.core.windows.net/release/websoft9"
|
||||
if [[ "$1" == "dev" ]]; then
|
||||
echo "update by dev artifacts"
|
||||
urls="https://w9artifact.blob.core.windows.net/dev/websoft9"
|
||||
fi
|
||||
|
||||
function get_os_type() {
|
||||
if [ -f /etc/os-release ]; then
|
||||
. /etc/os-release
|
||||
OS=$NAME
|
||||
elif type lsb_release >/dev/null 2>&1; then
|
||||
OS=$(lsb_release -si)
|
||||
else
|
||||
OS=$(uname -s)
|
||||
fi
|
||||
|
||||
if [[ "$OS" == "CentOS Linux" ]]; then
|
||||
echo "CentOS"
|
||||
elif [[ "$OS" == "CentOS Stream" ]]; then
|
||||
echo "CentOS Stream"
|
||||
elif [[ "$OS" == "Rocky Linux" ]]; then
|
||||
echo "Rocky Linux"
|
||||
elif [[ "$OS" == "Oracle Linux Server" ]]; then
|
||||
echo "OracleLinux"
|
||||
elif [[ "$OS" == "Debian GNU/Linux" ]]; then
|
||||
echo "Debian"
|
||||
elif [[ "$OS" == "Ubuntu" ]]; then
|
||||
echo "Ubuntu"
|
||||
elif [[ "$OS" == "Fedora Linux" ]]; then
|
||||
echo "Fedora"
|
||||
elif [[ "$OS" =~ "Red Hat Enterprise Linux" ]]; then
|
||||
echo "Redhat"
|
||||
else
|
||||
echo $OS
|
||||
fi
|
||||
}
|
||||
|
||||
function get_os_version() {
|
||||
if [ -f /etc/os-release ]; then
|
||||
. /etc/os-release
|
||||
OS=$NAME
|
||||
VERSION=$VERSION_ID
|
||||
elif type lsb_release >/dev/null 2>&1; then
|
||||
OS=$(lsb_release -si)
|
||||
VERSION=$(lsb_release -sr)
|
||||
else
|
||||
OS=$(uname -s)
|
||||
VERSION=$(uname -r)
|
||||
fi
|
||||
|
||||
echo $VERSION
|
||||
}
|
||||
os_type=$(get_os_type)
|
||||
os_version=$(get_os_version)
|
||||
|
||||
CheckEnvironment(){
|
||||
|
||||
echo "---------------------------------- Welcome to install websoft9's appstore, it will take 3-5 minutes -------------------------------------------------------"
|
||||
|
||||
echo "Check environment ..."
|
||||
echo os_type: $os_type
|
||||
echo os_version: $os_version
|
||||
if [ $(id -u) != "0" ]; then
|
||||
echo "Please change to root or 'sudo su' to up system privileges, and reinstall the script again ."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ $(getconf WORD_BIT) = '32' ] && [ $(getconf LONG_BIT) = '64' ] ; then
|
||||
echo "64-bit operating system detected."
|
||||
else
|
||||
echo "This script only works on 64-bit operating systems."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$os_type" == 'CentOS' ] ;then
|
||||
if [ "$os_version" != "7" ]; then
|
||||
echo "This app only supported on CentOS 7"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$os_type" == 'CentOS Stream' ] ;then
|
||||
if [ "$os_version" != "8" ] || [ "$os_version" != "9" ]; then
|
||||
echo "This app only supported on CentOS Stream 8,9"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$os_type" == 'Rocky Linux' ] ;then
|
||||
if [ "${os_version:0:1}" == "8" ] || [ "${os_version:0:1}" == "9" ]; then
|
||||
echo ""
|
||||
else
|
||||
echo "This app only supported on Rocky Linux 8"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$os_type" == 'Fedora' ];then
|
||||
if [ "$os_version" != "37" ]; then
|
||||
echo "This app only supported on Fedora 37"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$os_type" == 'Redhat' ];then
|
||||
if [ "${os_version:0:1}" != "7" ] && [ "${os_version:0:1}" != "8" ] && [ "${os_version:0:1}" != "9" ]; then
|
||||
echo "This app only supported on Redhat 7,8"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$os_type" == 'Ubuntu' ];then
|
||||
if [ "$os_version" != "22.04" ] && [ "$os_version" != "20.04" ] && [ "$os_version" != "18.04" ]; then
|
||||
echo "This app only supported on Ubuntu 22.04,20.04,18.04"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$os_type" == 'Debian' ];then
|
||||
if [ "$os_version" != "11" ];then
|
||||
echo "This app only supported on Debian 11"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Check port used
|
||||
if netstat -tuln | grep -qE ':(80|443|9000)\s'; then
|
||||
echo "Port 80,443,9000 is already in use."
|
||||
exit 1
|
||||
else
|
||||
echo "Port 80,443, 9000 are free."
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
InstallTools(){
|
||||
|
||||
echo "Prepare to install Tools ..."
|
||||
|
||||
if [ "$os_type" == 'CentOS' ] || [ "$os_type" == 'Rocky Linux' ] || [ "$os_type" == 'CentOS Stream' ] || [ "$os_type" == 'Fedora' ] || [ "$os_type" == 'OracleLinux' ] || [ "$os_type" == 'Redhat' ];then
|
||||
sudo yum update -y
|
||||
sudo yum install git curl wget yum-utils jq bc unzip -y
|
||||
|
||||
fi
|
||||
|
||||
if [ "$os_type" == 'Ubuntu' ] || [ "$os_type" == 'Debian' ] ;then
|
||||
while fuser /var/lib/dpkg/lock >/dev/null 2>&1 ; do
|
||||
echo "Waiting for other software managers to finish..."
|
||||
sleep 5
|
||||
done
|
||||
sudo apt update -y 1>/dev/null 2>&1
|
||||
if command -v git > /dev/null;then
|
||||
echo "git installed ..."
|
||||
else
|
||||
sudo apt install git -y
|
||||
fi
|
||||
if command -v curl > /dev/null;then
|
||||
echo "jcurlq installed ..."
|
||||
else
|
||||
sudo apt install curl -y
|
||||
fi
|
||||
if command -v wget > /dev/null;then
|
||||
echo "wget installed ..."
|
||||
else
|
||||
sudo apt install wget -y
|
||||
fi
|
||||
if command -v jq > /dev/null;then
|
||||
echo "jq installed ..."
|
||||
else
|
||||
sudo apt install jq -y
|
||||
fi
|
||||
|
||||
if command -v bc > /dev/null;then
|
||||
echo "bc installed ..."
|
||||
else
|
||||
sudo apt install bc -y
|
||||
fi
|
||||
if command -v unzip > /dev/null;then
|
||||
echo "unzip installed ..."
|
||||
else
|
||||
sudo apt install unzip -y
|
||||
fi
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
InstallDocker(){
|
||||
|
||||
if command -v docker &> /dev/null
|
||||
then
|
||||
echo "Docker is installed, update..."
|
||||
if command -v apt > /dev/null;then
|
||||
sudo apt -y install --only-upgrade docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
|
||||
elif command -v dnf > /dev/null;then
|
||||
sudo dnf update -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
|
||||
elif command -v yum > /dev/null;then
|
||||
sudo yum update -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
|
||||
fi
|
||||
sudo systemctl start docker
|
||||
sudo systemctl enable docker
|
||||
if ! docker network inspect websoft9 > /dev/null 2>&1; then
|
||||
sudo docker network create websoft9
|
||||
fi
|
||||
return
|
||||
else
|
||||
echo "Docker is not installed, start to install..."
|
||||
fi
|
||||
if [ "$os_type" == 'CentOS' ];then
|
||||
curl -fsSL https://get.docker.com -o get-docker.sh && sh get-docker.sh
|
||||
fi
|
||||
|
||||
if [ "$os_type" == 'Ubuntu' ] || [ "$os_type" == 'Debian' ] ;then
|
||||
apt-get update
|
||||
while fuser /var/lib/dpkg/lock >/dev/null 2>&1 ; do
|
||||
echo "Waiting for other software managers to finish..."
|
||||
sleep 5
|
||||
done
|
||||
curl -fsSL https://get.docker.com -o get-docker.sh && sh get-docker.sh
|
||||
sleep 30
|
||||
fi
|
||||
|
||||
if [ "$os_type" == 'OracleLinux' ] ;then
|
||||
sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
|
||||
sudo yum install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin -y
|
||||
fi
|
||||
|
||||
if [ "$os_type" == 'Fedora' ] ;then
|
||||
wget -O /etc/yum.repos.d/docker-ce.repo https://download.docker.com/linux/fedora/docker-ce.repo
|
||||
sudo yum install device-mapper-persistent-data lvm2 docker-ce docker-ce-cli containerd.io docker-compose-plugin docker-scan-plugin docker-ce-rootless-extras -y
|
||||
fi
|
||||
|
||||
if [ "$os_type" == 'Redhat' ] ;then
|
||||
sudo yum remove docker docker-client docker-client-latest docker-common docker-latest docker-latest-logrotate docker-logrotate docker-engine podman runc -y 1>/dev/null 2>&1
|
||||
sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
|
||||
sudo yum install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin -y
|
||||
fi
|
||||
|
||||
if [ "$os_type" == 'CentOS Stream' ] || [ "$os_type" == 'Rocky Linux' ];then
|
||||
sudo yum remove docker docker-client docker-client-latest docker-common docker-latest docker-latest-logrotate docker-logrotate docker-engine podman runc -y 1>/dev/null 2>&1
|
||||
wget -O /etc/yum.repos.d/docker-ce.repo https://download.docker.com/linux/centos/docker-ce.repo
|
||||
sudo yum install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin -y
|
||||
fi
|
||||
|
||||
sudo systemctl start docker
|
||||
sudo systemctl enable docker
|
||||
if ! docker network inspect websoft9 > /dev/null 2>&1; then
|
||||
sudo docker network create websoft9
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
InstallCockpit(){
|
||||
echo "Prepare to install Cockpit ..."
|
||||
|
||||
if [ "${os_type}" == 'Debian' ]; then
|
||||
VERSION_CODENAME=$(cat /etc/os-release |grep VERSION_CODENAME|cut -f2 -d"=")
|
||||
sudo echo "deb http://deb.debian.org/debian ${VERSION_CODENAME}-backports main" >/etc/apt/sources.list.d/backports.list
|
||||
sudo apt update
|
||||
sudo apt install -t ${VERSION_CODENAME}-backports cockpit -y
|
||||
sudo apt install cockpit-pcp cockpit-packagekit -y 1>/dev/null 2>&1
|
||||
fi
|
||||
|
||||
if [ "${os_type}" == 'Ubuntu' ]; then
|
||||
if grep -q "^#.*deb http://mirrors.tencentyun.com/ubuntu.*backports" /etc/apt/sources.list; then
|
||||
echo "Add backports deb ..."
|
||||
sudo sed -i 's/^#\(.*deb http:\/\/mirrors.tencentyun.com\/ubuntu.*backports.*\)/\1/' /etc/apt/sources.list
|
||||
apt update
|
||||
fi
|
||||
VERSION_CODENAME=$(cat /etc/os-release |grep VERSION_CODENAME|cut -f2 -d"=")
|
||||
sudo apt install -t ${VERSION_CODENAME}-backports cockpit -y
|
||||
sudo apt install cockpit-pcp -y 1>/dev/null 2>&1
|
||||
echo "Cockpit allow root user"
|
||||
echo "" >/etc/cockpit/disallowed-users 1>/dev/null 2>&1
|
||||
fi
|
||||
|
||||
if [ "${os_type}" == 'CentOS' ] || [ "$os_type" == 'OracleLinux' ]; then
|
||||
sudo yum install cockpit -y
|
||||
sudo yum install cockpit-pcp cockpit-packagekit -y 1>/dev/null 2>&1
|
||||
sudo systemctl enable --now cockpit.socket
|
||||
sudo firewall-cmd --permanent --zone=public --add-service=cockpit
|
||||
sudo firewall-cmd --reload
|
||||
fi
|
||||
|
||||
if [ "$os_type" == 'Fedora' ]; then
|
||||
sudo dnf install cockpit -y
|
||||
sudo dnf install cockpit-pcp cockpit-packagekit -y 1>/dev/null 2>&1
|
||||
sudo systemctl enable --now cockpit.socket
|
||||
sudo firewall-cmd --add-service=cockpit
|
||||
sudo firewall-cmd --add-service=cockpit --permanent
|
||||
fi
|
||||
|
||||
if [ "$os_type" == 'Redhat' ] ; then
|
||||
sudo subscription-manager repos --enable rhel-7-server-extras-rpms 1>/dev/null 2>&1
|
||||
sudo yum install cockpit -y
|
||||
sudo yum install cockpit-pcp cockpit-packagekit -y 1>/dev/null 2>&1
|
||||
sudo setenforce 0 1>/dev/null 2>&1
|
||||
sudo sed -i 's/SELINUX=.*/SELINUX=disabled/' /etc/selinux/config 1>/dev/null 2>&1
|
||||
sudo systemctl enable --now cockpit.socket
|
||||
sudo firewall-cmd --add-service=cockpit
|
||||
sudo firewall-cmd --add-service=cockpit --permanent
|
||||
fi
|
||||
|
||||
if [ "$os_type" == 'CentOS Stream' ]; then
|
||||
sudo subscription-manager repos --enable rhel-7-server-extras-rpms 1>/dev/null 2>&1
|
||||
sudo yum install cockpit -y
|
||||
sudo yum install cockpit-pcp -y 1>/dev/null 2>&1
|
||||
sudo systemctl enable --now cockpit.socket
|
||||
sudo firewall-cmd --add-service=cockpit
|
||||
sudo firewall-cmd --add-service=cockpit --permanent
|
||||
sudo setenforce 0 1>/dev/null 2>&1
|
||||
sudo sed -i 's/SELINUX=.*/SELINUX=disabled/' /etc/selinux/config 1>/dev/null 2>&1
|
||||
|
||||
fi
|
||||
|
||||
file="/etc/cockpit/disallowed-users"
|
||||
|
||||
if [ -f "$file" ]; then
|
||||
echo "" > "$file"
|
||||
else
|
||||
echo "$file is not exist"
|
||||
fi
|
||||
|
||||
echo "Set cockpit port to 9000 ..."
|
||||
sudo sed -i 's/ListenStream=9090/ListenStream=9000/' /lib/systemd/system/cockpit.socket
|
||||
|
||||
|
||||
}
|
||||
|
||||
InstallPlugins(){
|
||||
|
||||
# download apps
|
||||
mkdir -p /data/apps && cd /data/apps
|
||||
wget $urls/websoft9-latest.zip
|
||||
unzip websoft9-latest.zip
|
||||
cp -r /data/apps/websoft9/docker /data/apps/w9services
|
||||
rm -f websoft9-latest.zip
|
||||
|
||||
# install plugins
|
||||
cd /usr/share/cockpit
|
||||
appstore_version=$(cat /data/apps/websoft9/version.json | jq .PLUGINS |jq .APPSTORE | tr -d '"')
|
||||
wget $urls/plugin/appstore/appstore-$appstore_version.zip
|
||||
unzip appstore-$appstore_version.zip
|
||||
|
||||
myapps_version=$(cat /data/apps/websoft9/version.json | jq .PLUGINS |jq .MYAPPS| tr -d '"')
|
||||
wget $urls/plugin/myapps/myapps-$myapps_version.zip
|
||||
unzip myapps-$myapps_version.zip
|
||||
|
||||
portainer_version=$(cat /data/apps/websoft9/version.json | jq .PLUGINS |jq .PORTAINER | tr -d '"')
|
||||
wget $urls/plugin/portainer/portainer-$portainer_version.zip
|
||||
unzip portainer-$portainer_version.zip
|
||||
|
||||
nginx_version=$(cat /data/apps/websoft9/version.json | jq .PLUGINS |jq .NGINX | tr -d '"')
|
||||
wget $urls/plugin/nginx/nginx-$nginx_version.zip
|
||||
unzip nginx-$nginx_version.zip
|
||||
|
||||
settings_version=$(cat /data/apps/websoft9/version.json | jq .PLUGINS |jq .SETTINGS | tr -d '"')
|
||||
wget $urls/plugin/settings/settings-$settings_version.zip
|
||||
unzip settings-$settings_version.zip
|
||||
|
||||
# install navigator
|
||||
navigator_version=$(cat /data/apps/websoft9/version.json | jq .PLUGINS |jq .NAVIGATOR | tr -d '"')
|
||||
wget $urls/plugin/navigator/navigator-$navigator_version.zip
|
||||
unzip navigator-$navigator_version.zip
|
||||
rm -f *.zip
|
||||
|
||||
# install library
|
||||
cd /data
|
||||
library_version=$(cat /data/apps/websoft9/version.json | jq .PLUGINS |jq .LIBRARY | tr -d '"')
|
||||
wget $urls/plugin/library/library-$library_version.zip
|
||||
unzip library-$library_version.zip
|
||||
rm -f library-$library_version.zip
|
||||
|
||||
# configure cockpit
|
||||
cp /data/apps/websoft9/cockpit/cockpit.conf /etc/cockpit/cockpit.conf
|
||||
|
||||
#####ci-section#####
|
||||
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl enable --now cockpit.socket
|
||||
sudo systemctl restart cockpit.socket
|
||||
|
||||
}
|
||||
|
||||
StartAppMng(){
|
||||
|
||||
echo "Start appmanage API ..."
|
||||
cd /data/apps/w9services/w9redis && sudo docker compose up -d
|
||||
cd /data/apps/w9services/w9appmanage && sudo docker compose up -d
|
||||
|
||||
public_ip=`bash /data/apps/websoft9/scripts/get_ip.sh`
|
||||
echo $public_ip > /data/apps/w9services/w9appmanage/public_ip
|
||||
appmanage_ip=$(docker inspect -f '{{range.NetworkSettings.Networks}}{{.IPAddress}}{{end}}' websoft9-appmanage)
|
||||
}
|
||||
|
||||
StartPortainer(){
|
||||
|
||||
echo "Start Portainer ..."
|
||||
cd /data/apps/w9services/w9portainer && sudo docker compose up -d
|
||||
docker pull backplane/pwgen
|
||||
new_password=$(docker run --name pwgen backplane/pwgen 15)!
|
||||
docker rm -f pwgen
|
||||
portainer_ip=$(docker inspect -f '{{range.NetworkSettings.Networks}}{{.IPAddress}}{{end}}' websoft9-portainer)
|
||||
echo "Portainer init password:" $new_password >> /usr/password.txt
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"username":"admin", "Password":"'$new_password'"}' http://$portainer_ip:9000/api/users/admin/init
|
||||
curl "http://$appmanage_ip:5000/AppUpdateUser?user_name=admin&password=$new_password"
|
||||
|
||||
}
|
||||
|
||||
InstallNginx(){
|
||||
|
||||
echo "Install nginxproxymanager ..."
|
||||
cd /data/apps/w9services/w9nginxproxymanager && sudo docker compose up -d
|
||||
sleep 30
|
||||
echo "edit nginxproxymanager password..."
|
||||
nginx_ip=$(docker inspect -f '{{range.NetworkSettings.Networks}}{{.IPAddress}}{{end}}' websoft9-nginxproxymanager)
|
||||
login_data=$(curl -X POST -H "Content-Type: application/json" -d '{"identity":"admin@example.com","scope":"user", "secret":"changeme"}' http://$nginx_ip:81/api/tokens)
|
||||
#token=$(echo $login_data | grep -Po '(?<="token":")[^"]*')
|
||||
token=$(echo $login_data | jq -r '.token')
|
||||
while [ -z "$token" ]; do
|
||||
sleep 5
|
||||
login_data=$(curl -X POST -H "Content-Type: application/json" -d '{"identity":"admin@example.com","scope":"user", "secret":"changeme"}' http://$nginx_ip:81/api/tokens)
|
||||
token=$(echo $login_data | jq -r '.token')
|
||||
done
|
||||
echo "Nginx token:"$token
|
||||
new_password=$(docker run --name pwgen backplane/pwgen 15)!
|
||||
docker rm -f pwgen
|
||||
echo "Nginx init password:" $new_password >> /usr/password.txt
|
||||
curl -X PUT -H "Content-Type: application/json" -H "Authorization: Bearer $token" -d '{"email": "help@websoft9.com", "nickname": "admin", "is_disabled": false, "roles": ["admin"]}' http://$nginx_ip:81/api/users/1
|
||||
curl -X PUT -H "Content-Type: application/json" -H "Authorization: Bearer $token" -d '{"type":"password","current":"changeme","secret":"'$new_password'"}' http://$nginx_ip:81/api/users/1/auth
|
||||
sleep 3
|
||||
curl "http://$appmanage_ip:5000/AppUpdateUser?user_name=help@websoft9.com&password=$new_password"
|
||||
echo "edit password success ..."
|
||||
while [ ! -d "/var/lib/docker/volumes/w9nginxproxymanager_nginx_data/_data/nginx/proxy_host" ]; do
|
||||
sleep 1
|
||||
done
|
||||
cp /data/apps/w9services/w9nginxproxymanager/initproxy.conf /var/lib/docker/volumes/w9nginxproxymanager_nginx_data/_data/nginx/proxy_host
|
||||
echo $public_ip
|
||||
sudo sed -i "s/domain.com/$public_ip/g" /var/lib/docker/volumes/w9nginxproxymanager_nginx_data/_data/nginx/proxy_host/initproxy.conf
|
||||
sudo docker restart websoft9-nginxproxymanager
|
||||
sudo docker cp websoft9-appmanage:/usr/src/app/db/database.sqlite /usr
|
||||
}
|
||||
|
||||
EditMenu(){
|
||||
|
||||
echo "Start to Edit Cockpit Menu ..."
|
||||
|
||||
# uninstall plugins
|
||||
rm -rf /usr/share/cockpit/apps /usr/share/cockpit/selinux /usr/share/cockpit/kdump /usr/share/cockpit/sosreport /usr/share/cockpit/packagekit
|
||||
cp -r /data/apps/websoft9/cockpit/menu_override/* /etc/cockpit
|
||||
|
||||
echo "---------------------------------- Install success! When installation completed, you can access it by: http://Internet IP:9000 and using Linux user for login to install a app by websoft9's appstore. -------------------------------------------------------"
|
||||
}
|
||||
|
||||
CheckEnvironment
|
||||
InstallTools
|
||||
InstallDocker
|
||||
InstallCockpit
|
||||
InstallPlugins
|
||||
StartAppMng
|
||||
StartPortainer
|
||||
InstallNginx
|
||||
EditMenu
|
|
@ -1,46 +0,0 @@
|
|||
function pipInstall() {
|
||||
requirementsFile=$1
|
||||
declare -a mirrors=(
|
||||
https://pypi.org
|
||||
https://pypi.tuna.tsinghua.edu.cn/simple/
|
||||
https://pypi.mirrors.ustc.edu.cn/simple/
|
||||
https://mirrors.aliyun.com/pypi/simple/
|
||||
https://pypi.hustunique.com/
|
||||
https://pypi.sdutlinux.org/
|
||||
https://pypi.douban.com/simple/
|
||||
https://repo.huaweicloud.com/repository/pypi/simple/
|
||||
)
|
||||
time=-1
|
||||
fastMirror=""
|
||||
for url in "${mirrors[@]}"; do
|
||||
SPEED_DOWNLOAD=$(curl --location --range 0-102400 --max-time 8 --silent --write-out %{speed_download} --output /dev/null "${url}")
|
||||
tempReult=$(echo "${SPEED_DOWNLOAD} ${time}" | awk '{if ($1 > $2) print 1; else print 0}')
|
||||
if [ "${tempReult}" -ne 0 ]; then
|
||||
time=${SPEED_DOWNLOAD}
|
||||
fastMirror=${url}
|
||||
fi
|
||||
done
|
||||
echo "choose the url: ${fastMirror}"
|
||||
pip install -r ${requirementsFile} -i ${fastMirror}
|
||||
}
|
||||
|
||||
function retry() {
|
||||
local retries=$1
|
||||
shift
|
||||
|
||||
local count=0
|
||||
until "$@"; do
|
||||
exit=$?
|
||||
wait=$((2 ** $count))
|
||||
count=$(($count + 1))
|
||||
if [ $count -lt $retries ]; then
|
||||
echo "Retry $count/$retries exited $exit, retrying in $wait seconds..."
|
||||
sleep $wait
|
||||
else
|
||||
echo "Retry $count/$retries exited $exit, no more retries left."
|
||||
return $exit
|
||||
fi
|
||||
done
|
||||
return 0
|
||||
}
|
||||
retry 3 pipInstall requirement.txt
|
|
@ -1,33 +0,0 @@
|
|||
#!/bin/bash
|
||||
export PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin
|
||||
clear
|
||||
|
||||
# Check if user is root
|
||||
[ $(id -u) != "0" ] && { echo "Error: You must be root to run this script, please use 'sudo su -' command to change root"; exit 1; }
|
||||
|
||||
version(){
|
||||
echo "version: 0.1"
|
||||
echo "updated date: 2019-12-30"
|
||||
}
|
||||
|
||||
Show_Help(){
|
||||
version
|
||||
echo "Usage: $0 command ...[parameters]...
|
||||
--help, -h Show this help message
|
||||
--version, -v Show version info
|
||||
"
|
||||
}
|
||||
|
||||
echo "Pre-installation is starting, please wait for 1-3 minutes..."
|
||||
|
||||
if command -v yum > /dev/null; then
|
||||
sudo yum install -y epel-release 1>/dev/null 2>&1
|
||||
sudo yum install yum-utils git python python3 -y 1>/dev/null 2>&1
|
||||
fi
|
||||
|
||||
if command -v apt > /dev/null; then
|
||||
sudo apt-get install git python python3 git -y 1>/dev/null 2>&1
|
||||
sudo apt install software-properties-common -y 1>/dev/null 2>&1
|
||||
fi
|
||||
|
||||
sudo echo "Pre-installation has beend completed"
|
|
@ -1,323 +0,0 @@
|
|||
#!/bin/bash
|
||||
PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin:~/bin
|
||||
export PATH
|
||||
|
||||
function error_exit {
|
||||
echo "$1" 1>&2
|
||||
exit 1
|
||||
}
|
||||
trap 'error_exit "Please push issue to: https://github.com/Websoft9/websoft9/issues"' ERR
|
||||
|
||||
urls="https://w9artifact.blob.core.windows.net/release/websoft9"
|
||||
|
||||
CheckEnv(){
|
||||
echo "------------------ Welcome to update websoft9's appstore, it will take 1-3 minutes -----------------"
|
||||
|
||||
if [ $(id -u) != "0" ]; then
|
||||
echo "Please change to root or 'sudo su' to up system privileges, and reinstall the script again ."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
compare_versions() {
|
||||
local old_version="$1"
|
||||
local new_version="$2"
|
||||
|
||||
IFS='.' read -ra old_parts <<< "$old_version"
|
||||
IFS='.' read -ra new_parts <<< "$new_version"
|
||||
|
||||
for i in "${!old_parts[@]}"; do
|
||||
if [[ ${old_parts[i]} -lt ${new_parts[i]} ]]; then
|
||||
return 0
|
||||
elif [[ ${old_parts[i]} -gt ${new_parts[i]} ]]; then
|
||||
return 1
|
||||
fi
|
||||
done
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
UpdateDocker(){
|
||||
|
||||
echo "Parpare to update Docker to latest ..."
|
||||
|
||||
if command -v apt > /dev/null;then
|
||||
sudo apt -y install --only-upgrade docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
|
||||
elif command -v dnf > /dev/null;then
|
||||
sudo dnf update -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
|
||||
elif command -v yum > /dev/null;then
|
||||
sudo yum update -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
UpdateWebsoft9(){
|
||||
|
||||
echo "Update websoft9 ..."
|
||||
|
||||
if [ ! -f /data/apps/websoft9/version.json ]; then
|
||||
old_version="0.0.0"
|
||||
else
|
||||
old_version=$(cat /data/apps/websoft9/version.json | jq .VERSION | tr -d '"')
|
||||
fi
|
||||
release_version=$(curl $urls/version.json | jq .VERSION | tr -d '"')
|
||||
|
||||
compare_versions $old_version $release_version
|
||||
if [[ $? -eq 0 ]]; then
|
||||
echo "start to update websoft9..."
|
||||
cd /data/apps && rm -rf websoft9
|
||||
wget $urls/websoft9-latest.zip
|
||||
unzip websoft9-latest.zip
|
||||
rm -rf websoft9-latest.zip
|
||||
else
|
||||
echo "websoft9 is not need to update"
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
UpdatePlugins(){
|
||||
|
||||
echo "Check plugins if have update ..."
|
||||
|
||||
# update appstore
|
||||
if [ -f "/usr/share/cockpit/appstore/appstore.json" ]; then
|
||||
old_appstore_version=$(cat /usr/share/cockpit/appstore/appstore.json | jq .Version | tr -d '"')
|
||||
else
|
||||
old_appstore_version="0.0.0"
|
||||
fi
|
||||
new_appstore_version=$(cat /data/apps/websoft9/version.json | jq .PLUGINS |jq .APPSTORE | tr -d '"')
|
||||
|
||||
# update settings
|
||||
if [ -f "/usr/share/cockpit/settings/settings.json" ]; then
|
||||
old_settings_version=$(cat /usr/share/cockpit/settings/settings.json | jq .Version | tr -d '"')
|
||||
else
|
||||
old_settings_version="0.0.0"
|
||||
fi
|
||||
new_settings_version=$(cat /data/apps/websoft9/version.json | jq .PLUGINS |jq .SETTINGS | tr -d '"')
|
||||
|
||||
# update myapps
|
||||
if [ -f "/usr/share/cockpit/myapps/myapps.json" ]; then
|
||||
old_myapps_version=$(cat /usr/share/cockpit/myapps/myapps.json | jq .Version | tr -d '"')
|
||||
else
|
||||
old_myapps_version="0.0.0"
|
||||
fi
|
||||
new_myapps_version=$(cat /data/apps/websoft9/version.json | jq .PLUGINS |jq .MYAPPS | tr -d '"')
|
||||
|
||||
## update container
|
||||
if [ -f "/usr/share/cockpit/container/portainer.json" ]; then
|
||||
old_container_version=$(cat /usr/share/cockpit/container/portainer.json | jq .Version | tr -d '"')
|
||||
else
|
||||
old_container_version="0.0.0"
|
||||
fi
|
||||
new_container_version=$(cat /data/apps/websoft9/version.json | jq .PLUGINS |jq .PORTAINER | tr -d '"')
|
||||
|
||||
## update nginx
|
||||
if [ -f "/usr/share/cockpit/nginx/nginx.json" ]; then
|
||||
old_nginx_version=$(cat /usr/share/cockpit/nginx/nginx.json | jq .Version | tr -d '"')
|
||||
else
|
||||
old_nginx_version="0.0.0"
|
||||
fi
|
||||
new_nginx_version=$(cat /data/apps/websoft9/version.json | jq .PLUGINS |jq .NGINX | tr -d '"')
|
||||
|
||||
## update navigator
|
||||
if [ -f "/usr/share/cockpit/navigator/navigator.json" ]; then
|
||||
old_navigator_version=$(cat /usr/share/cockpit/navigator/navigator.json | jq .Version | tr -d '"')
|
||||
else
|
||||
old_navigator_version="0.0.0"
|
||||
fi
|
||||
new_navigator_version=$(cat /data/apps/websoft9/version.json | jq .PLUGINS |jq .NAVIGATOR | tr -d '"')
|
||||
|
||||
## update library
|
||||
if [ -f "/data/library/library.json" ]; then
|
||||
old_library_version=$(cat /data/library/library.json | jq .Version | tr -d '"')
|
||||
else
|
||||
old_library_version="0.0.0"
|
||||
fi
|
||||
new_library_version=$(cat /data/apps/websoft9/version.json | jq .PLUGINS |jq .LIBRARY | tr -d '"')
|
||||
|
||||
if [ "$old_appstore_version" = "$new_appstore_version" ] && [ "$old_settings_version" = "$new_settings_version" ] && [ "$old_myapps_version" = "$new_myapp_version" ] && [ "$old_container_version" = "$new_container_version" ] && [ "$old_nginx_version" \< "$new_nginx_version" ]; then
|
||||
echo "appstore all plugins is latest"
|
||||
else
|
||||
compare_versions $old_appstore_version $new_appstore_version
|
||||
if [[ $? -eq 0 ]]; then
|
||||
echo "appstore plugin need to update"
|
||||
cd /usr/share/cockpit
|
||||
wget $urls/plugin/appstore/appstore-$new_appstore_version.zip
|
||||
rm -rf appstore
|
||||
unzip appstore-$new_appstore_version.zip
|
||||
rm -f appstore-$new_appstore_version.zip
|
||||
else
|
||||
echo "appstore is not need to update"
|
||||
fi
|
||||
|
||||
compare_versions $old_navigator_version $new_navigator_version
|
||||
if [[ $? -eq 0 ]]; then
|
||||
echo "navigator plugin need to update"
|
||||
cd /usr/share/cockpit
|
||||
wget $urls/plugin/navigator/navigator-$new_navigator_version.zip
|
||||
rm -rf navigator
|
||||
unzip navigator-$navigator_version.zip
|
||||
rm -f navigator-$navigator_version.zip
|
||||
else
|
||||
echo "navigator is not need to update"
|
||||
fi
|
||||
|
||||
compare_versions $old_settings_version $new_settings_version
|
||||
if [[ $? -eq 0 ]]; then
|
||||
echo "settings plugin need to update"
|
||||
cd /usr/share/cockpit
|
||||
wget $urls/plugin/settings/settings-$new_settings_version.zip
|
||||
rm -rf settings
|
||||
unzip settings-$new_settings_version.zip
|
||||
rm -f settings-$new_settings_version.zip
|
||||
else
|
||||
echo "settings is not need to update"
|
||||
fi
|
||||
|
||||
compare_versions $old_myapps_version $new_myapps_version
|
||||
if [[ $? -eq 0 ]]; then
|
||||
echo "start to update myapps..."
|
||||
cd /usr/share/cockpit
|
||||
wget $urls/plugin/myapps/myapps-$new_myapps_version.zip
|
||||
rm -rf myapps
|
||||
unzip myapps-$new_myapps_version.zip
|
||||
rm -f myapps-$new_myapps_version.zip
|
||||
else
|
||||
echo "myapps is not need to update"
|
||||
fi
|
||||
|
||||
compare_versions $old_container_version $new_container_version
|
||||
if [[ $? -eq 0 ]]; then
|
||||
echo "start to update portainer..."
|
||||
cd /usr/share/cockpit
|
||||
wget $urls/plugin/portainer/portainer-$new_container_version.zip
|
||||
rm -rf container
|
||||
unzip portainer-$new_container_version.zip
|
||||
rm -f portainer-$new_container_version.zip
|
||||
else
|
||||
echo "portainer is not need to update"
|
||||
fi
|
||||
|
||||
compare_versions $old_nginx_version $new_nginx_version
|
||||
if [[ $? -eq 0 ]]; then
|
||||
echo "start to update nginx..."
|
||||
cd /usr/share/cockpit
|
||||
wget $urls/plugin/nginx/nginx-$new_nginx_version.zip
|
||||
rm -rf nginx
|
||||
unzip nginx-$new_nginx_version.zip
|
||||
rm -f nginx-$new_nginx_version.zip
|
||||
else
|
||||
echo "nginx is not need to update"
|
||||
fi
|
||||
|
||||
compare_versions $old_library_version $new_library_version
|
||||
if [[ $? -eq 0 ]]; then
|
||||
echo "start to update library..."
|
||||
cd /data
|
||||
wget $urls/plugin/library/library-$new_library_version.zip
|
||||
rm -rf library
|
||||
unzip library-$new_library_version.zip
|
||||
rm -f library-$new_library_version.zip
|
||||
else
|
||||
echo "library is not need to update"
|
||||
fi
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
UpdateServices(){
|
||||
echo "Check services if have update ..."
|
||||
old_appmanage=$(cat /data/apps/w9services/w9appmanage/.env |grep APP_VERSION |cut -d= -f2)
|
||||
new_appmanage=$(cat /data/apps/websoft9/docker/w9appmanage/.env |grep APP_VERSION |cut -d= -f2)
|
||||
compare_versions $old_appmanage $new_appmanage
|
||||
if [[ $? -eq 0 ]]; then
|
||||
echo "start to update w9appmanage..."
|
||||
rm -f /data/apps/w9services/w9appmanage/.env && cp /data/apps/websoft9/docker/w9appmanage/.env /data/apps/w9services/w9appmanage/.env
|
||||
rm -f /data/apps/w9services/w9appmanage/docker-compose.yml && cp /data/apps/websoft9/docker/w9appmanage/docker-compose.yml /data/apps/w9services/w9appmanage/docker-compose.yml
|
||||
cd /data/apps/w9services/w9appmanage && sudo docker compose down && sudo docker compose pull && sudo docker compose up -d
|
||||
else
|
||||
echo "appmanage is not need to update"
|
||||
fi
|
||||
|
||||
old_redis=$(cat /data/apps/w9services/w9redis/.env |grep APP_VERSION |cut -d= -f2)
|
||||
new_redis=$(cat /data/apps/websoft9/docker/w9redis/.env |grep APP_VERSION |cut -d= -f2)
|
||||
compare_versions $old_redis $new_redis
|
||||
if [[ $? -eq 0 ]]; then
|
||||
echo "start to update w9redis..."
|
||||
rm -f /data/apps/w9services/w9redis/.env && cp /data/apps/websoft9/docker/w9redis/.env /data/apps/w9services/w9redis/.env
|
||||
rm -f /data/apps/w9services/w9redis/docker-compose.yml && cp /data/apps/websoft9/docker/w9redis/docker-compose.yml /data/apps/w9services/w9redis/docker-compose.yml
|
||||
cd /data/apps/w9services/w9redis && sudo docker compose down && sudo docker compose pull && sudo docker compose up -d
|
||||
else
|
||||
echo "redis is not need to update"
|
||||
fi
|
||||
|
||||
old_portainer=$(cat /data/apps/w9services/w9portainer/.env |grep APP_VERSION |cut -d= -f2)
|
||||
new_portainer=$(cat /data/apps/websoft9/docker/w9portainer/.env |grep APP_VERSION |cut -d= -f2)
|
||||
compare_versions $old_portainer $new_portainer
|
||||
if [[ $? -eq 0 ]]; then
|
||||
echo "start to update w9portainer..."
|
||||
rm -f /data/apps/w9services/w9portainer/.env && cp /data/apps/websoft9/docker/w9portainer/.env /data/apps/w9services/w9portainer/.env
|
||||
rm -f /data/apps/w9services/w9portainer/docker-compose.yml && cp /data/apps/websoft9/docker/w9portainer/docker-compose.yml /data/apps/w9services/w9portainer/docker-compose.yml
|
||||
cd /data/apps/w9services/w9portainer && sudo docker compose down && sudo docker compose pull && sudo docker compose up -d
|
||||
else
|
||||
echo "w9portainer is not need to update"
|
||||
fi
|
||||
|
||||
old_nginx=$(cat /data/apps/w9services/w9nginxproxymanager/.env |grep APP_VERSION |cut -d= -f2)
|
||||
new_nginx=$(cat /data/apps/websoft9/docker/w9nginxproxymanager/.env |grep APP_VERSION |cut -d= -f2)
|
||||
compare_versions $old_nginx $new_nginx
|
||||
if [[ $? -eq 0 ]]; then
|
||||
echo "start to update w9nginx..."
|
||||
rm -f /data/apps/w9services/w9nginxproxymanager/.env && cp /data/apps/websoft9/docker/w9nginxproxymanager/.env /data/apps/w9services/w9nginxproxymanager/.env
|
||||
rm -f /data/apps/w9services/w9nginxproxymanager/docker-compose.yml && cp /data/apps/websoft9/docker/w9nginxproxymanager/docker-compose.yml /data/apps/w9services/w9nginxproxymanager/docker-compose.yml
|
||||
cd /data/apps/w9services/w9nginxproxymanager && sudo docker compose down && sudo docker compose pull && sudo docker compose up -d
|
||||
else
|
||||
echo "w9nginx is not need to update"
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
EditMenu(){
|
||||
|
||||
echo "Start to Edit Cockpit Menu ..."
|
||||
|
||||
# uninstall plugins
|
||||
rm -rf /usr/share/cockpit/apps /usr/share/cockpit/selinux /usr/share/cockpit/kdump /usr/share/cockpit/sosreport /usr/share/cockpit/packagekit
|
||||
cp -r /data/apps/websoft9/cockpit/menu_override/* /etc/cockpit
|
||||
|
||||
}
|
||||
|
||||
UpdateCockpit(){
|
||||
|
||||
echo "Parpare to update Cockpit to latest ..."
|
||||
|
||||
if command -v apt > /dev/null;then
|
||||
current_version=$(dpkg-query --showformat='${Version}' --show cockpit | cut -c 1-3)
|
||||
available_version=$(apt-cache policy cockpit | grep Candidate | awk '{print $2}' | cut -c 1-3)
|
||||
elif command -v yum > /dev/null;then
|
||||
current_version=$(rpm -q --queryformat '%{VERSION}' cockpit | cut -c 1-3)
|
||||
available_version=$(yum list available cockpit --showduplicates | awk '/cockpit/ {print $2}' | sort -V | tail -n 1 | cut -c 1-3)
|
||||
fi
|
||||
if [ "$current_version" \< "$available_version" ]; then
|
||||
echo "There is newer version on cockpit."
|
||||
pkcon refresh
|
||||
pkcon get-updates
|
||||
pkcon update -y 'cockpit' 'cockpit-bridge' 'cockpit-packagekit' 'cockpit-storaged' 'cockpit-system' 'cockpit-ws'
|
||||
EditMenu
|
||||
sudo sed -i 's/ListenStream=9090/ListenStream=9000/' /lib/systemd/system/cockpit.socket
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl restart cockpit.socket
|
||||
echo "cockpit update finished."
|
||||
else
|
||||
echo "cockpit is latest, not need to upadate."
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
CheckEnv
|
||||
UpdateDocker
|
||||
UpdateWebsoft9
|
||||
UpdatePlugins
|
||||
UpdateServices
|
||||
UpdateCockpit
|
|
@ -1,119 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
## This script is used for download zip files to a target directory
|
||||
## apphub dockerfile is depends on this script
|
||||
|
||||
# Command-line options
|
||||
# ==========================================================
|
||||
#
|
||||
# --channel <release|dev>
|
||||
# Use the --channel option to install a release(production) or dev distribution. default is release, for example:
|
||||
#
|
||||
# $ sudo bash install.sh --channel release
|
||||
#
|
||||
# --package_name
|
||||
# Use the --package_name option to define a zip file, for example:
|
||||
#
|
||||
# $ sudo bash install.sh --package_name media.zip
|
||||
#
|
||||
# --sync_to
|
||||
# Use the sync_to option to define the target directory which zip file will unzip, for example:
|
||||
#
|
||||
# $ sudo bash install.sh --sync_to "/websoft9/media"
|
||||
|
||||
channel="release"
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
--channel)
|
||||
channel="$2"
|
||||
shift 2
|
||||
;;
|
||||
--package_name)
|
||||
package_name="$2"
|
||||
shift 2
|
||||
;;
|
||||
--sync_to)
|
||||
sync_to="$2"
|
||||
shift 2
|
||||
;;
|
||||
*)
|
||||
shift
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [ -z "$package_name" ] || [ -z "$sync_to" ]; then
|
||||
echo "Parameter package_name and sync_to is necessary"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Define the artifact URL as a global variable
|
||||
artifact_url="https://w9artifact.blob.core.windows.net/$channel/websoft9/plugin"
|
||||
|
||||
upgrade_zip() {
|
||||
|
||||
# Create the full URL by appending the package name to the artifact URL
|
||||
local plugin_name=${package_name%%-*}
|
||||
local url="$artifact_url/$plugin_name/$package_name"
|
||||
|
||||
# Initialize download attempts
|
||||
local attempts=0
|
||||
local max_attempts=2
|
||||
|
||||
# Download the package using wget
|
||||
while [ $attempts -lt $max_attempts ]; do
|
||||
rm -f "/tmp/$package_name"
|
||||
wget --timeout=120 --no-clobber "$url" -O "/tmp/$package_name"
|
||||
# Check if the download was successful
|
||||
if [ $? -eq 0 ]; then
|
||||
break
|
||||
fi
|
||||
attempts=$((attempts+1))
|
||||
echo "Download attempt $attempts failed. Retrying in 5 seconds..."
|
||||
sleep 5
|
||||
done
|
||||
|
||||
# If the download still failed after max attempts, report an error and exit
|
||||
if [ $attempts -eq $max_attempts ]; then
|
||||
echo "Download failed for package: $package_name after $max_attempts attempts."
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Unzip the downloaded package
|
||||
unzip "/tmp/$package_name" -d "/tmp"
|
||||
|
||||
# Check if the unzip was successful
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Unzip failed for package: $package_name"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Get the name of the package without the .zip extension for syncing
|
||||
local package_directory="${package_name%.zip}"
|
||||
package_directory="${package_directory%%-*}"
|
||||
|
||||
if [ "$unzipped_folder" != "/tmp/$package_directory/" ]; then
|
||||
mv "$unzipped_folder" "/tmp/$package_directory"
|
||||
else
|
||||
echo "The unzipped folder has the same name as the target folder."
|
||||
fi
|
||||
|
||||
# Sync the unzipped package to the desired location
|
||||
rsync -av "/tmp/$package_directory/" "$sync_to"
|
||||
|
||||
# Check if the sync was successful
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Sync failed for package: $package_name"
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo "Successfully downloaded, unzipped, and synced package: $package_name"
|
||||
|
||||
# Remove the downloaded .zip file and the unzipped directory
|
||||
rm -f "/tmp/$package_name"
|
||||
rm -rf "/tmp/$package_directory"
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
upgrade_zip
|
|
@ -1,22 +0,0 @@
|
|||
# Systemd
|
||||
|
||||
This is the Websoft9 system service that run some proxy services on the host machine for Websoft9 to solve the problem that the API cannot handle.
|
||||
|
||||
- Copy credentials from one other containers to apphub container
|
||||
|
||||
## Test it
|
||||
|
||||
```
|
||||
export install_path="/data/websoft9/source"
|
||||
sudo cp -r $install_path/systemd/script/* "$systemd_path"
|
||||
sudo cp -f "$install_path/systemd/websoft9.service" /lib/systemd/system/
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl enable websoft9.service
|
||||
sudo systemctl start websoft9
|
||||
```
|
||||
|
||||
## Develop it
|
||||
|
||||
* [systemd.exec — Execution environment configuration](https://www.freedesktop.org/software/systemd/man/systemd.exec.html)
|
||||
* [systemd.unit — Unit configuration](https://www.freedesktop.org/software/systemd/man/systemd.unit.html)
|
||||
* [systemd.service — Service unit configuration](https://www.freedesktop.org/software/systemd/man/systemd.service.html)
|
|
@ -1,67 +0,0 @@
|
|||
#!/bin/bash
|
||||
PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin:~/bin
|
||||
|
||||
cockpit_port="9000"
|
||||
container_name="websoft9-apphub"
|
||||
volume_name="websoft9_apphub_config"
|
||||
|
||||
check_ports() {
|
||||
|
||||
local ports=("$@")
|
||||
for port in "${ports[@]}"; do
|
||||
echo "Check port: $port"
|
||||
if ss -tuln | grep ":$port " >/dev/null && ! systemctl status cockpit.socket | grep "$port" >/dev/null; then
|
||||
echo "Port $port is in use, can not set this port to config.ini"
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
echo "All ports are available"
|
||||
return 1
|
||||
}
|
||||
|
||||
# get volume from container
|
||||
function get_volume_path() {
|
||||
local container_name="$1"
|
||||
local volume_name="$2"
|
||||
local mounts=$(docker inspect -f '{{ json .Mounts }}' "$container_name" | jq -r '.[] | select(.Name == "'$volume_name'") | .Source')
|
||||
echo "$mounts"
|
||||
}
|
||||
|
||||
volume_path=$(get_volume_path "$container_name" "$volume_name")
|
||||
config_path="$volume_path/config.ini"
|
||||
cockpit_service_path="/lib/systemd/system/cockpit.socket"
|
||||
FILES="$cockpit_service_path $config_path"
|
||||
|
||||
# 监控文件发生变动时需要做的事情
|
||||
on_change() {
|
||||
set +e
|
||||
cockpit_port=$(docker exec -i websoft9-apphub apphub getconfig --section cockpit --key port)
|
||||
listen_stream=$(grep -Po 'ListenStream=\K[0-9]*' /lib/systemd/system/cockpit.socket)
|
||||
if [ "$cockpit_port" != "$listen_stream" ]; then
|
||||
check_ports "$cockpit_port"
|
||||
if [ $? -eq 0 ]; then
|
||||
sudo docker exec -i websoft9-apphub apphub setconfig --section cockpit --key port --value "$listen_stream"
|
||||
else
|
||||
ex -s -c "g/ListenStream=${listen_stream}/s//ListenStream=${cockpit_port}/" -c wq "$cockpit_service_path"
|
||||
systemctl daemon-reload
|
||||
systemctl restart cockpit.socket 2> /dev/null
|
||||
systemctl restart cockpit || exit 1
|
||||
set_Firewalld
|
||||
fi
|
||||
fi
|
||||
set -e
|
||||
}
|
||||
|
||||
set_Firewalld(){
|
||||
echo "Set cockpit service to Firewalld..."
|
||||
sed -i "s/port=\"[0-9]*\"/port=\"$cockpit_port\"/g" /etc/firewalld/services/cockpit.xml 2>/dev/nul
|
||||
sed -i "s/port=\"[0-9]*\"/port=\"$cockpit_port\"/g" /usr/lib/firewalld/services/cockpit.xml 2>/dev/nul
|
||||
firewall-cmd --reload 2>/dev/nul
|
||||
}
|
||||
|
||||
# monitor /lib/systemd/system/cockpit.socket and config.ini, make sure config.ini port is the same with cockpit.socket
|
||||
inotifywait -e modify -m $FILES | while read PATH EVENT FILE; do
|
||||
echo "Set cockpit port by config.ini..."
|
||||
export PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:$PATH
|
||||
on_change
|
||||
done
|
|
@ -1,72 +0,0 @@
|
|||
#!/bin/bash
|
||||
PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin:~/bin
|
||||
|
||||
deployment_username="admin"
|
||||
credentials=("/data/gitea/credential" "/data/credential" "/data/credential")
|
||||
containers=("websoft9-git" "websoft9-deployment" "websoft9-proxy")
|
||||
sections=("gitea" "portainer" "nginx_proxy_manager")
|
||||
max_retries=20
|
||||
|
||||
declare -A usernames passwords
|
||||
|
||||
set +e # Ignore errors
|
||||
|
||||
for i in ${!containers[@]}; do
|
||||
container=${containers[$i]}
|
||||
credential_path=${credentials[$i]}
|
||||
echo "Processing $container"
|
||||
success=false
|
||||
counter=0
|
||||
while [[ $success == false && $counter -lt $max_retries ]]; do
|
||||
temp_file=$(mktemp)
|
||||
if docker cp $container:$credential_path $temp_file; then
|
||||
# Check if temp_file is JSON format
|
||||
if jq -e . >/dev/null 2>&1 <<< "$(cat "$temp_file")"; then
|
||||
# If it is JSON format, use it directly
|
||||
username=$(jq -r '.username' $temp_file)
|
||||
password=$(jq -r '.password' $temp_file)
|
||||
if [[ -n $username && -n $password ]]; then
|
||||
usernames[$container]=$username
|
||||
passwords[$container]=$password
|
||||
success=true
|
||||
fi
|
||||
else
|
||||
# If it is not JSON format, get the content and convert it to JSON
|
||||
content=$(cat "$temp_file")
|
||||
username="$deployment_username"
|
||||
password="$content"
|
||||
if [[ -n $username && -n $password ]]; then
|
||||
usernames[$container]=$username
|
||||
passwords[$container]=$password
|
||||
success=true
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
rm -f "$temp_file"
|
||||
if [[ $success == false ]]; then
|
||||
echo "Waiting for 3 seconds before next attempt..."
|
||||
sleep 3
|
||||
fi
|
||||
((counter++))
|
||||
done
|
||||
if [[ $success == true ]]; then
|
||||
echo "Successfully retrieved credentials for $container"
|
||||
else
|
||||
echo "Failed to retrieve credentials for $container after $max_retries attempts"
|
||||
fi
|
||||
done
|
||||
|
||||
set -e # Stop ignoring errors
|
||||
|
||||
length=${#containers[@]}
|
||||
for ((i=0; i<$length; i++)); do
|
||||
|
||||
container=${containers[$i]}
|
||||
section=${sections[$i]}
|
||||
if [[ -n ${passwords[$container]} ]]; then
|
||||
echo "$container start to set password"
|
||||
docker exec -i websoft9-apphub apphub setconfig --section $section --key user_pwd --value ${passwords[$container]}
|
||||
else
|
||||
echo "Password for $container is not set or empty. Skipping..."
|
||||
fi
|
||||
done
|
|
@ -1,4 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
/bin/bash send_credentials.sh
|
||||
/bin/bash crontab.sh
|
|
@ -1,18 +0,0 @@
|
|||
[Unit]
|
||||
Description=Websoft9 Linux AppStore
|
||||
Requires=network.target docker.service
|
||||
After=network.target docker.service
|
||||
|
||||
[Service]
|
||||
WorkingDirectory=/opt/websoft9/systemd
|
||||
ExecStart=/bin/bash /opt/websoft9/systemd/start_up.sh
|
||||
Restart=on-failure
|
||||
Type=simple
|
||||
NotifyAccess=all
|
||||
StandardOutput=journal
|
||||
StandardError=journal
|
||||
User=root
|
||||
Group=root
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
|
@ -1,51 +0,0 @@
|
|||
{
|
||||
"version": "0.8.30-rc1",
|
||||
"plugins": {
|
||||
"portainer": "0.0.8",
|
||||
"nginx": "0.0.6",
|
||||
"gitea": "0.0.3",
|
||||
"myapps": "0.1.0",
|
||||
"appstore": "0.0.9",
|
||||
"settings": "0.0.6",
|
||||
"navigator": "0.5.10"
|
||||
},
|
||||
"OS": {
|
||||
"Fedora": [
|
||||
"38",
|
||||
"37",
|
||||
"35"
|
||||
],
|
||||
"RedHat": [
|
||||
"9",
|
||||
"8",
|
||||
"7"
|
||||
],
|
||||
"CentOS": [
|
||||
"8",
|
||||
"7"
|
||||
],
|
||||
"Oracle Linux": [
|
||||
"9",
|
||||
"8",
|
||||
"7"
|
||||
],
|
||||
"Rocky": [
|
||||
"9",
|
||||
"8"
|
||||
],
|
||||
"CentOS Stream": [
|
||||
"9",
|
||||
"8"
|
||||
],
|
||||
"Debian": [
|
||||
"12",
|
||||
"11",
|
||||
"10"
|
||||
],
|
||||
"Ubuntu": [
|
||||
"23.04",
|
||||
"22.04",
|
||||
"20.04"
|
||||
]
|
||||
}
|
||||
}
|
Loading…
Reference in a new issue