refactor: translate bash scripts to node in packages/cli

This commit is contained in:
Nicolas Meienberger 2023-08-15 22:49:50 +02:00 committed by Nicolas Meienberger
parent 6efbce5b75
commit c89b9fe752
48 changed files with 2211 additions and 1608 deletions

View file

@ -2,3 +2,4 @@
.eslintrc.js .eslintrc.js
next.config.js next.config.js
jest.config.js jest.config.js
packages/

3
.gitignore vendored
View file

@ -63,3 +63,6 @@ media
/test-results/ /test-results/
/playwright-report/ /playwright-report/
/playwright/.cache/ /playwright/.cache/
temp
./traefik/

View file

@ -55,8 +55,6 @@ We are looking for contributions of all kinds. If you know design, development,
Tipi is licensed under the GNU General Public License v3.0. TL;DR — You may copy, distribute and modify the software as long as you track changes/dates in source files. Any modifications to or software including (via compiler) GPL-licensed code must also be made available under the GPL along with build & install instructions. Tipi is licensed under the GNU General Public License v3.0. TL;DR — You may copy, distribute and modify the software as long as you track changes/dates in source files. Any modifications to or software including (via compiler) GPL-licensed code must also be made available under the GPL along with build & install instructions.
Some of the bash scripts located in the `scripts` folder contain some snippets from [Umbrel](https://github.com/getumbrel/umbrel)'s code. Therefore some parts of the code are licensed under the PolyForm Noncommercial License 1.0.0 license. These parts have been marked with a comment above to clearly identify it. If you were to use this code in your own project, you have to keep the copyright notice and follow the license guidelines. We are actively working on re-writing those parts in order to make them available under the GPL license like the rest of our code.
## 🗣 Community ## 🗣 Community
- [Matrix](https://matrix.to/#/#runtipi:matrix.org)<br /> - [Matrix](https://matrix.to/#/#runtipi:matrix.org)<br />

View file

@ -64,23 +64,10 @@ services:
condition: service_healthy condition: service_healthy
tipi-redis: tipi-redis:
condition: service_healthy condition: service_healthy
env_file:
- .env
environment: environment:
NODE_ENV: development NODE_ENV: development
INTERNAL_IP: ${INTERNAL_IP}
TIPI_VERSION: ${TIPI_VERSION}
JWT_SECRET: ${JWT_SECRET}
NGINX_PORT: ${NGINX_PORT}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
POSTGRES_USERNAME: ${POSTGRES_USERNAME}
POSTGRES_DBNAME: ${POSTGRES_DBNAME}
POSTGRES_HOST: ${POSTGRES_HOST}
APPS_REPO_ID: ${APPS_REPO_ID}
APPS_REPO_URL: ${APPS_REPO_URL}
DOMAIN: ${DOMAIN}
ARCHITECTURE: ${ARCHITECTURE}
REDIS_HOST: ${REDIS_HOST}
DEMO_MODE: ${DEMO_MODE}
LOCAL_DOMAIN: ${LOCAL_DOMAIN}
networks: networks:
- tipi_main_network - tipi_main_network
ports: ports:
@ -90,6 +77,7 @@ services:
# - /dashboard/.next # - /dashboard/.next
- ${PWD}/.env:/runtipi/.env - ${PWD}/.env:/runtipi/.env
- ${PWD}/src:/app/src - ${PWD}/src:/app/src
- ${PWD}/packages:/app/packages
- ${PWD}/state:/runtipi/state - ${PWD}/state:/runtipi/state
- ${PWD}/repos:/runtipi/repos:ro - ${PWD}/repos:/runtipi/repos:ro
- ${PWD}/apps:/runtipi/apps - ${PWD}/apps:/runtipi/apps

View file

@ -1,8 +1,9 @@
{ {
"name": "runtipi", "name": "runtipi",
"version": "1.5.2", "version": "1.6.0",
"description": "A homeserver for everyone", "description": "A homeserver for everyone",
"scripts": { "scripts": {
"prepare": "mkdir -p state && echo \"{}\" > state/system-info.json && echo \"random-seed\" > state/seed",
"copy:migrations": "mkdir -p dist/migrations && cp -r ./src/server/migrations dist", "copy:migrations": "mkdir -p dist/migrations && cp -r ./src/server/migrations dist",
"test": "dotenv -e .env.test -- jest --colors", "test": "dotenv -e .env.test -- jest --colors",
"test:e2e": "NODE_ENV=test dotenv -e .env -e .env.e2e -- playwright test", "test:e2e": "NODE_ENV=test dotenv -e .env -e .env.e2e -- playwright test",
@ -11,6 +12,7 @@
"test:server": "jest --colors --selectProjects server --", "test:server": "jest --colors --selectProjects server --",
"test:vite": "dotenv -e .env.test -- vitest run --coverage", "test:vite": "dotenv -e .env.test -- vitest run --coverage",
"dev": "npm run copy:migrations && npm run db:migrate && nodemon", "dev": "npm run copy:migrations && npm run db:migrate && nodemon",
"dev:watcher": "pnpm -r --filter cli dev",
"db:migrate": "NODE_ENV=development dotenv -e .env -- tsx ./src/server/run-migrations-dev.ts", "db:migrate": "NODE_ENV=development dotenv -e .env -- tsx ./src/server/run-migrations-dev.ts",
"start": "NODE_ENV=production node index.js", "start": "NODE_ENV=production node index.js",
"lint": "next lint", "lint": "next lint",
@ -19,8 +21,8 @@
"build:server": "node ./esbuild.js build", "build:server": "node ./esbuild.js build",
"build:next": "next build", "build:next": "next build",
"start:dev-container": "./.devcontainer/filewatcher.sh && npm run start:dev", "start:dev-container": "./.devcontainer/filewatcher.sh && npm run start:dev",
"start:rc": "docker-compose -f docker-compose.rc.yml --env-file .env up --build", "start:rc": "docker compose -f docker-compose.rc.yml --env-file .env up --build",
"start:dev": "./scripts/start-dev.sh", "start:dev": "npm run prepare && docker compose -f docker-compose.dev.yml up",
"start:e2e": "./scripts/start-e2e.sh latest", "start:e2e": "./scripts/start-e2e.sh latest",
"start:pg": "docker run --name test-db -p 5433:5432 -d --rm -e POSTGRES_PASSWORD=postgres postgres:14", "start:pg": "docker run --name test-db -p 5433:5432 -d --rm -e POSTGRES_PASSWORD=postgres postgres:14",
"version": "echo $npm_package_version", "version": "echo $npm_package_version",
@ -28,7 +30,8 @@
"test:build": "docker buildx build --platform linux/amd64,linux/arm64,linux/arm/v7 -t meienberger/runtipi:test .", "test:build": "docker buildx build --platform linux/amd64,linux/arm64,linux/arm/v7 -t meienberger/runtipi:test .",
"test:build:arm64": "docker buildx build --platform linux/arm64 -t meienberger/runtipi:test .", "test:build:arm64": "docker buildx build --platform linux/arm64 -t meienberger/runtipi:test .",
"test:build:arm7": "docker buildx build --platform linux/arm/v7 -t meienberger/runtipi:test .", "test:build:arm7": "docker buildx build --platform linux/arm/v7 -t meienberger/runtipi:test .",
"test:build:amd64": "docker buildx build --platform linux/amd64 -t meienberger/runtipi:test ." "test:build:amd64": "docker buildx build --platform linux/amd64 -t meienberger/runtipi:test .",
"tsc": "tsc"
}, },
"dependencies": { "dependencies": {
"@hookform/resolvers": "^3.1.1", "@hookform/resolvers": "^3.1.1",
@ -41,6 +44,7 @@
"@radix-ui/react-switch": "^1.0.3", "@radix-ui/react-switch": "^1.0.3",
"@radix-ui/react-tabs": "^1.0.4", "@radix-ui/react-tabs": "^1.0.4",
"@runtipi/postgres-migrations": "^5.3.0", "@runtipi/postgres-migrations": "^5.3.0",
"@runtipi/shared": "workspace:^",
"@tabler/core": "1.0.0-beta19", "@tabler/core": "1.0.0-beta19",
"@tabler/icons-react": "^2.23.0", "@tabler/icons-react": "^2.23.0",
"@tanstack/react-query": "^4.29.7", "@tanstack/react-query": "^4.29.7",
@ -50,6 +54,7 @@
"@trpc/react-query": "^10.27.1", "@trpc/react-query": "^10.27.1",
"@trpc/server": "^10.27.1", "@trpc/server": "^10.27.1",
"argon2": "^0.30.3", "argon2": "^0.30.3",
"bullmq": "^4.5.0",
"clsx": "^1.1.1", "clsx": "^1.1.1",
"connect-redis": "^7.1.0", "connect-redis": "^7.1.0",
"cookies-next": "^2.1.2", "cookies-next": "^2.1.2",
@ -118,6 +123,7 @@
"@typescript-eslint/parser": "^5.60.1", "@typescript-eslint/parser": "^5.60.1",
"@vitejs/plugin-react": "^4.0.1", "@vitejs/plugin-react": "^4.0.1",
"@vitest/coverage-v8": "^0.32.2", "@vitest/coverage-v8": "^0.32.2",
"concurrently": "^8.2.0",
"dotenv-cli": "^7.2.1", "dotenv-cli": "^7.2.1",
"drizzle-kit": "^0.19.2", "drizzle-kit": "^0.19.2",
"esbuild": "^0.16.17", "esbuild": "^0.16.17",

8
packages/cli/.env.test Normal file
View file

@ -0,0 +1,8 @@
INTERNAL_IP=localhost
ARCHITECTURE=arm64
APPS_REPO_ID=repo-id
APPS_REPO_URL=https://test.com/test
ROOT_FOLDER_HOST=/runtipi
STORAGE_PATH=/runtipi
TIPI_VERSION=1

View file

@ -39,6 +39,8 @@ services:
container_name: tipi-redis container_name: tipi-redis
image: redis:alpine image: redis:alpine
restart: on-failure restart: on-failure
ports:
- 6379:6379
volumes: volumes:
- ./data/redis:/data - ./data/redis:/data
healthcheck: healthcheck:
@ -60,23 +62,10 @@ services:
condition: service_healthy condition: service_healthy
tipi-redis: tipi-redis:
condition: service_healthy condition: service_healthy
env_file:
- .env
environment: environment:
NODE_ENV: production NODE_ENV: development
INTERNAL_IP: ${INTERNAL_IP}
TIPI_VERSION: ${TIPI_VERSION}
JWT_SECRET: ${JWT_SECRET}
NGINX_PORT: ${NGINX_PORT}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
POSTGRES_USERNAME: ${POSTGRES_USERNAME}
POSTGRES_DBNAME: ${POSTGRES_DBNAME}
POSTGRES_HOST: ${POSTGRES_HOST}
APPS_REPO_ID: ${APPS_REPO_ID}
APPS_REPO_URL: ${APPS_REPO_URL}
DOMAIN: ${DOMAIN}
ARCHITECTURE: ${ARCHITECTURE}
REDIS_HOST: ${REDIS_HOST}
DEMO_MODE: ${DEMO_MODE}
LOCAL_DOMAIN: ${LOCAL_DOMAIN}
volumes: volumes:
- ${PWD}/.env:/runtipi/.env - ${PWD}/.env:/runtipi/.env
- ${PWD}/state:/runtipi/state - ${PWD}/state:/runtipi/state
@ -116,7 +105,4 @@ services:
networks: networks:
tipi_main_network: tipi_main_network:
driver: bridge driver: bridge
ipam: name: runtipi_tipi_main_network
driver: default
config:
- subnet: 10.21.21.0/24

View file

@ -0,0 +1,161 @@
import fs from 'fs';
import { describe, it, expect, vi } from 'vitest';
import path from 'path';
import { faker } from '@faker-js/faker';
import { AppExecutors } from '../app.executors';
import { createAppConfig } from '@/tests/apps.factory';
import * as dockerHelpers from '@/utils/docker-helpers';
import { getEnv } from '@/utils/environment/environment';
import { pathExists } from '@/utils/fs-helpers';
const { storagePath, rootFolderHost, appsRepoId } = getEnv();
describe('test: app executors', () => {
const appExecutors = new AppExecutors();
describe('test: installApp()', () => {
it('should run correct compose script', async () => {
// arrange
const spy = vi.spyOn(dockerHelpers, 'compose').mockImplementation(() => Promise.resolve({ stdout: 'done', stderr: '' }));
const config = createAppConfig({}, false);
// act
const { message, success } = await appExecutors.installApp(config.id, config);
// assert
const envExists = await pathExists(path.join(storagePath, 'app-data', config.id, 'app.env'));
expect(success).toBe(true);
expect(message).toBe(`App ${config.id} installed successfully`);
expect(spy).toHaveBeenCalledWith(config.id, 'up -d');
expect(envExists).toBe(true);
spy.mockRestore();
});
it('should delete existing app folder', async () => {
// arrange
const config = createAppConfig();
await fs.promises.mkdir(path.join(rootFolderHost, 'apps', config.id), { recursive: true });
await fs.promises.writeFile(path.join(rootFolderHost, 'apps', config.id, 'test.txt'), 'test');
// act
await appExecutors.installApp(config.id, config);
// assert
const exists = await pathExists(path.join(storagePath, 'apps', config.id, 'test.txt'));
expect(exists).toBe(false);
});
it('should not delete existing app-data folder', async () => {
// arrange
const config = createAppConfig();
const filename = faker.system.fileName();
await fs.promises.writeFile(path.join(storagePath, 'app-data', config.id, filename), 'test');
// act
await appExecutors.installApp(config.id, config);
// assert
const exists = await pathExists(path.join(storagePath, 'app-data', config.id, filename));
expect(exists).toBe(true);
});
it('should copy data folder from repo to app-data/id/data', async () => {
// arrange
const config = createAppConfig({}, false);
const filename = faker.system.fileName();
await fs.promises.mkdir(path.join(rootFolderHost, 'repos', appsRepoId, 'apps', config.id, 'data'), { recursive: true });
await fs.promises.writeFile(path.join(rootFolderHost, 'repos', appsRepoId, 'apps', config.id, 'data', filename), 'test');
// act
await appExecutors.installApp(config.id, config);
// assert
const exists = await pathExists(path.join(storagePath, 'app-data', config.id, 'data', filename));
const data = await fs.promises.readFile(path.join(storagePath, 'app-data', config.id, 'data', filename), 'utf-8');
expect(exists).toBe(true);
expect(data).toBe('test');
});
it('should not overwrite exisiting app-data/id/data folder if repo has one', async () => {
// arrange
const config = createAppConfig();
const filename = faker.system.fileName();
await fs.promises.writeFile(path.join(storagePath, 'app-data', config.id, 'data', filename), 'test');
await fs.promises.mkdir(path.join(rootFolderHost, 'repos', appsRepoId, 'apps', config.id, 'data'), { recursive: true });
await fs.promises.writeFile(path.join(rootFolderHost, 'repos', appsRepoId, 'apps', config.id, 'data', filename), 'yeah');
// act
await appExecutors.installApp(config.id, config);
// assert
const exists = await pathExists(path.join(storagePath, 'app-data', config.id, 'data', filename));
const data = await fs.promises.readFile(path.join(storagePath, 'app-data', config.id, 'data', filename), 'utf-8');
expect(exists).toBe(true);
expect(data).toBe('test');
});
it('should handle errors gracefully', async () => {
// arrange
const spy = vi.spyOn(dockerHelpers, 'compose').mockImplementation(() => Promise.reject(new Error('test')));
const config = createAppConfig();
// act
const { message, success } = await appExecutors.installApp(config.id, config);
// assert
expect(success).toBe(false);
expect(message).toBe('test');
spy.mockRestore();
});
it('should error if app does not exist', async () => {
// act
const { message, success } = await appExecutors.installApp('inexistant', {});
// assert
expect(success).toBe(false);
expect(message).toBe(`App inexistant not found in repo ${appsRepoId}`);
});
});
describe('test: stopApp()', () => {
it('should run correct compose script', async () => {
// arrange
const spy = vi.spyOn(dockerHelpers, 'compose').mockImplementation(() => Promise.resolve({ stdout: 'done', stderr: '' }));
const config = createAppConfig();
// act
const { message, success } = await appExecutors.stopApp(config.id, {}, true);
// assert
expect(success).toBe(true);
expect(message).toBe(`App ${config.id} stopped successfully`);
expect(spy).toHaveBeenCalledWith(config.id, 'rm --force --stop');
spy.mockRestore();
});
// it('should re-genereate app.env file', async () => {
// // arrange
// const config = createAppConfig();
// });
it('should handle errors gracefully', async () => {
// arrange
const spy = vi.spyOn(dockerHelpers, 'compose').mockImplementation(() => Promise.reject(new Error('test')));
const config = createAppConfig();
// act
const { message, success } = await appExecutors.stopApp(config.id, {}, true);
// assert
expect(success).toBe(false);
expect(message).toBe('test');
spy.mockRestore();
});
});
});

View file

@ -0,0 +1,243 @@
import fs from 'fs';
import { describe, it, expect } from 'vitest';
import { faker } from '@faker-js/faker';
import { copyDataDir, generateEnvFile } from '../app.helpers';
import { createAppConfig } from '@/tests/apps.factory';
import { getAppEnvMap } from '../env.helpers';
import { getEnv } from '@/utils/environment/environment';
import { pathExists } from '@/utils/fs-helpers';
const { rootFolderHost, storagePath } = getEnv();
describe('app helpers', () => {
describe('Test: generateEnvFile()', () => {
it('should throw an error if the app has an invalid config.json file', async () => {
// arrange
const appConfig = createAppConfig();
await fs.promises.writeFile(`${rootFolderHost}/apps/${appConfig.id}/config.json`, '{}');
// act & assert
expect(generateEnvFile(appConfig.id, {})).rejects.toThrowError(`App ${appConfig.id} has invalid config.json file`);
});
it('Should generate an env file', async () => {
// arrange
const appConfig = createAppConfig({ form_fields: [{ env_variable: 'TEST_FIELD', type: 'text', label: 'test', required: true }] });
const fakevalue = faker.string.alphanumeric(10);
// act
await generateEnvFile(appConfig.id, { TEST_FIELD: fakevalue });
const envmap = await getAppEnvMap(appConfig.id);
// assert
expect(envmap.get('TEST_FIELD')).toBe(fakevalue);
});
it('Should automatically generate value for random field', async () => {
// arrange
const appConfig = createAppConfig({ form_fields: [{ env_variable: 'RANDOM_FIELD', type: 'random', label: 'test', min: 32, max: 32, required: true }] });
// act
await generateEnvFile(appConfig.id, {});
const envmap = await getAppEnvMap(appConfig.id);
// assert
expect(envmap.get('RANDOM_FIELD')).toBeDefined();
expect(envmap.get('RANDOM_FIELD')).toHaveLength(32);
});
it('Should not re-generate random field if it already exists', async () => {
// arrange
const appConfig = createAppConfig({ form_fields: [{ env_variable: 'RANDOM_FIELD', type: 'random', label: 'test', min: 32, max: 32, required: true }] });
const randomField = faker.string.alphanumeric(32);
await fs.promises.mkdir(`${rootFolderHost}/app-data/${appConfig.id}`, { recursive: true });
await fs.promises.writeFile(`${rootFolderHost}/app-data/${appConfig.id}/app.env`, `RANDOM_FIELD=${randomField}`);
// act
await generateEnvFile(appConfig.id, {});
const envmap = await getAppEnvMap(appConfig.id);
// assert
expect(envmap.get('RANDOM_FIELD')).toBe(randomField);
});
it('Should throw an error if required field is not provided', async () => {
// arrange
const appConfig = createAppConfig({ form_fields: [{ env_variable: 'TEST_FIELD', type: 'text', label: 'test', required: true }] });
// act & assert
await expect(generateEnvFile(appConfig.id, {})).rejects.toThrowError();
});
it('Should throw an error if app does not exist', async () => {
// act & assert
await expect(generateEnvFile('non-existing-app', {})).rejects.toThrowError();
});
it('Should add APP_EXPOSED to env file if domain is provided and app is exposed', async () => {
// arrange
const domain = faker.internet.domainName();
const appConfig = createAppConfig({});
// act
await generateEnvFile(appConfig.id, { domain, exposed: true });
const envmap = await getAppEnvMap(appConfig.id);
// assert
expect(envmap.get('APP_EXPOSED')).toBe('true');
expect(envmap.get('APP_DOMAIN')).toBe(domain);
});
it('Should not add APP_EXPOSED if domain is not provided', async () => {
// arrange
const appConfig = createAppConfig({});
// act
await generateEnvFile(appConfig.id, { exposed: true });
const envmap = await getAppEnvMap(appConfig.id);
// assert
expect(envmap.get('APP_EXPOSED')).toBeUndefined();
});
it('Should not add APP_EXPOSED if app is not exposed', async () => {
// arrange
const domain = faker.internet.domainName();
const appConfig = createAppConfig({});
// act
await generateEnvFile(appConfig.id, { domain });
const envmap = await getAppEnvMap(appConfig.id);
// assert
expect(envmap.get('APP_EXPOSED')).toBeUndefined();
expect(envmap.get('APP_DOMAIN')).toBe(`localhost:${appConfig.port}`);
});
it('Should not re-create app-data folder if it already exists', async () => {
// arrange
const appConfig = createAppConfig({});
await fs.promises.mkdir(`${rootFolderHost}/app-data/${appConfig.id}`, { recursive: true });
// act
await generateEnvFile(appConfig.id, {});
const envmap = await getAppEnvMap(appConfig.id);
// assert
expect(envmap.get('APP_EXPOSED')).toBeUndefined();
});
it('should generate vapid private and public keys if config has generate_vapid_keys set to true', async () => {
// arrange
const appConfig = createAppConfig({ generate_vapid_keys: true });
// act
await generateEnvFile(appConfig.id, {});
const envmap = await getAppEnvMap(appConfig.id);
// assert
expect(envmap.get('VAPID_PRIVATE_KEY')).toBeDefined();
expect(envmap.get('VAPID_PUBLIC_KEY')).toBeDefined();
});
it('should not generate vapid private and public keys if config has generate_vapid_keys set to false', async () => {
// arrange
const appConfig = createAppConfig({ generate_vapid_keys: false });
// act
await generateEnvFile(appConfig.id, {});
const envmap = await getAppEnvMap(appConfig.id);
// assert
expect(envmap.get('VAPID_PRIVATE_KEY')).toBeUndefined();
expect(envmap.get('VAPID_PUBLIC_KEY')).toBeUndefined();
});
it('should not re-generate vapid private and public keys if they already exist', async () => {
// arrange
const appConfig = createAppConfig({ generate_vapid_keys: true });
const vapidPrivateKey = faker.string.alphanumeric(32);
const vapidPublicKey = faker.string.alphanumeric(32);
// act
await fs.promises.mkdir(`${rootFolderHost}/app-data/${appConfig.id}`, { recursive: true });
await fs.promises.writeFile(`${rootFolderHost}/app-data/${appConfig.id}/app.env`, `VAPID_PRIVATE_KEY=${vapidPrivateKey}\nVAPID_PUBLIC_KEY=${vapidPublicKey}`);
await generateEnvFile(appConfig.id, {});
const envmap = await getAppEnvMap(appConfig.id);
// assert
expect(envmap.get('VAPID_PRIVATE_KEY')).toBe(vapidPrivateKey);
expect(envmap.get('VAPID_PUBLIC_KEY')).toBe(vapidPublicKey);
});
});
describe('Test: copyDataDir()', () => {
it('should do nothing if app does not have a data dir', async () => {
// arrange
const appConfig = createAppConfig({});
// act
await copyDataDir(appConfig.id);
// assert
expect(await pathExists(`${rootFolderHost}/apps/${appConfig.id}/data`)).toBe(false);
});
it('should copy data dir to app-data folder', async () => {
// arrange
const appConfig = createAppConfig({});
const dataDir = `${rootFolderHost}/apps/${appConfig.id}/data`;
await fs.promises.mkdir(dataDir, { recursive: true });
await fs.promises.writeFile(`${dataDir}/test.txt`, 'test');
// act
await copyDataDir(appConfig.id);
// assert
const appDataDir = `${storagePath}/app-data/${appConfig.id}`;
expect(await fs.promises.readFile(`${appDataDir}/data/test.txt`, 'utf8')).toBe('test');
});
it('should copy folders recursively', async () => {
// arrange
const appConfig = createAppConfig({});
const dataDir = `${rootFolderHost}/apps/${appConfig.id}/data`;
await fs.promises.mkdir(dataDir, { recursive: true });
const subDir = `${dataDir}/subdir/subsubdir`;
await fs.promises.mkdir(subDir, { recursive: true });
await fs.promises.writeFile(`${subDir}/test.txt`, 'test');
await fs.promises.writeFile(`${dataDir}/test.txt`, 'test');
// act
await copyDataDir(appConfig.id);
// assert
const appDataDir = `${storagePath}/app-data/${appConfig.id}`;
expect(await fs.promises.readFile(`${appDataDir}/data/subdir/subsubdir/test.txt`, 'utf8')).toBe('test');
expect(await fs.promises.readFile(`${appDataDir}/data/test.txt`, 'utf8')).toBe('test');
});
it('should replace the content of .template files with the content of the app.env file', async () => {
// arrange
const appConfig = createAppConfig({});
const dataDir = `${rootFolderHost}/apps/${appConfig.id}/data`;
const appDataDir = `${storagePath}/app-data/${appConfig.id}`;
await fs.promises.mkdir(dataDir, { recursive: true });
await fs.promises.mkdir(appDataDir, { recursive: true });
await fs.promises.writeFile(`${dataDir}/test.txt.template`, '{{TEST_VAR}}');
await fs.promises.writeFile(`${appDataDir}/app.env`, 'TEST_VAR=test');
// act
await copyDataDir(appConfig.id);
// assert
expect(await fs.promises.readFile(`${appDataDir}/data/test.txt`, 'utf8')).toBe('test');
});
});
});

View file

@ -0,0 +1,245 @@
import { appInfoSchema, createLogger } from '@runtipi/shared';
import fs from 'fs';
import path from 'path';
import { promisify } from 'util';
import { exec } from 'child_process';
import { getEnv } from '@/utils/environment/environment';
import { pathExists } from '@/utils/fs-helpers';
import { compose } from '@/utils/docker-helpers';
import { copyDataDir, generateEnvFile } from './app.helpers';
const execAsync = promisify(exec);
export class AppExecutors {
private readonly rootFolderHost: string;
private readonly storagePath: string;
private readonly appsRepoId: string;
private readonly logger;
constructor() {
const { rootFolderHost, storagePath, appsRepoId } = getEnv();
this.rootFolderHost = rootFolderHost;
this.storagePath = storagePath;
this.appsRepoId = appsRepoId;
this.logger = createLogger('app-executors', path.join(rootFolderHost, 'logs'));
}
private handleAppError = (err: unknown) => {
if (err instanceof Error) {
this.logger.error(`An error occurred: ${err.message}`);
return { success: false, message: err.message };
}
return { success: false, message: `An error occurred: ${err}` };
};
private getAppPaths = (appId: string) => {
const appDataDirPath = path.join(this.storagePath, 'app-data', appId);
const appDirPath = path.join(this.rootFolderHost, 'apps', appId);
const configJsonPath = path.join(appDirPath, 'config.json');
const repoPath = path.join(this.rootFolderHost, 'repos', this.appsRepoId, 'apps', appId);
return { appDataDirPath, appDirPath, configJsonPath, repoPath };
};
private ensurePermissions = async (appId: string) => {
const { appDataDirPath, configJsonPath } = this.getAppPaths(appId);
if (!(await pathExists(appDataDirPath))) {
this.logger.info(`Creating app ${appId} data dir`);
await fs.promises.mkdir(appDataDirPath, { recursive: true });
}
// Check if app requires special uid and gid
if (await pathExists(configJsonPath)) {
const config = appInfoSchema.parse(JSON.parse(await fs.promises.readFile(configJsonPath, 'utf-8')));
const { uid, gid } = config;
if (uid && gid) {
this.logger.info(`Setting uid and gid to ${uid}:${gid}`);
await execAsync(`chown -R' ${uid}:${gid} ${path.join(appDataDirPath, 'data')}`);
}
}
// Remove all .gitkeep files from app data dir
await execAsync(`find ${appDataDirPath} -name '.gitkeep' -exec rm -f {} \\;`);
await execAsync(`chmod -R a+rwx ${appDataDirPath}`);
};
/**
* Given an app id, ensures that the app folder exists in the apps folder
* If not, copies the app folder from the repo
* @param {string} appId - App id
*/
private ensureAppDir = async (appId: string) => {
const { appDirPath, repoPath } = this.getAppPaths(appId);
const dockerFilePath = path.join(this.rootFolderHost, 'apps', appId, 'docker-compose.yml');
if (!(await pathExists(dockerFilePath))) {
// delete eventual app folder if exists
this.logger.info(`Deleting app ${appId} folder if exists`);
await fs.promises.rm(appDirPath, { recursive: true, force: true });
// Copy app folder from repo
this.logger.info(`Copying app ${appId} from repo ${getEnv().appsRepoId}`);
await fs.promises.cp(repoPath, appDirPath, { recursive: true });
}
};
/**
* Install an app from the repo
* @param {string} appId - The id of the app to install
* @param {Record<string, unknown>} config - The config of the app
*/
public installApp = async (appId: string, config: Record<string, unknown>) => {
try {
const { appDirPath, repoPath, appDataDirPath } = this.getAppPaths(appId);
this.logger.info(`Installing app ${appId}`);
// Check if app exists in repo
const apps = await fs.promises.readdir(path.join(this.rootFolderHost, 'repos', this.appsRepoId, 'apps'));
if (!apps.includes(appId)) {
this.logger.error(`App ${appId} not found in repo ${this.appsRepoId}`);
return { success: false, message: `App ${appId} not found in repo ${this.appsRepoId}` };
}
// Delete app folder if exists
this.logger.info(`Deleting folder ${appDirPath} if exists`);
await fs.promises.rm(appDirPath, { recursive: true, force: true });
// Create app folder
this.logger.info(`Creating folder ${appDirPath}`);
await fs.promises.mkdir(appDirPath, { recursive: true });
// Copy app folder from repo
this.logger.info(`Copying folder ${repoPath} to ${appDirPath}`);
await fs.promises.cp(repoPath, appDirPath, { recursive: true });
// Create folder app-data folder
this.logger.info(`Creating folder ${appDataDirPath}`);
await fs.promises.mkdir(appDataDirPath, { recursive: true });
// Create app.env file
this.logger.info(`Creating app.env file for app ${appId}`);
await generateEnvFile(appId, config);
// Copy data dir
this.logger.info(`Copying data dir for app ${appId}`);
if (!(await pathExists(`${appDataDirPath}/data`))) {
await copyDataDir(appId);
}
// run docker-compose up
this.logger.info(`Running docker-compose up for app ${appId}`);
await compose(appId, 'up -d');
this.logger.info(`Docker-compose up for app ${appId} finished`);
return { success: true, message: `App ${appId} installed successfully` };
} catch (err) {
return this.handleAppError(err);
}
};
/**
* Stops an app
* @param {string} appId - The id of the app to stop
* @param {Record<string, unknown>} config - The config of the app
*/
public stopApp = async (appId: string, config: Record<string, unknown>, skipEnvGeneration = false) => {
try {
this.logger.info(`Stopping app ${appId}`);
this.logger.info(`Regenerating app.env file for app ${appId}`);
await this.ensureAppDir(appId);
if (!skipEnvGeneration) {
await generateEnvFile(appId, config);
}
await compose(appId, 'rm --force --stop');
this.logger.info(`App ${appId} stopped`);
return { success: true, message: `App ${appId} stopped successfully` };
} catch (err) {
return this.handleAppError(err);
}
};
public startApp = async (appId: string, config: Record<string, unknown>) => {
try {
this.logger.info(`Starting app ${appId}`);
this.logger.info(`Regenerating app.env file for app ${appId}`);
await this.ensureAppDir(appId);
await generateEnvFile(appId, config);
await compose(appId, 'up --detach --force-recreate --remove-orphans');
this.logger.info(`App ${appId} started`);
return { success: true, message: `App ${appId} started successfully` };
} catch (err) {
return this.handleAppError(err);
}
};
public uninstallApp = async (appId: string, config: Record<string, unknown>) => {
try {
const { appDirPath, appDataDirPath } = this.getAppPaths(appId);
this.logger.info(`Uninstalling app ${appId}`);
this.logger.info(`Regenerating app.env file for app ${appId}`);
await this.ensureAppDir(appId);
await generateEnvFile(appId, config);
await compose(appId, 'down --remove-orphans --volumes --rmi all');
this.logger.info(`Deleting folder ${appDirPath}`);
await fs.promises.rm(appDirPath, { recursive: true, force: true });
this.logger.info(`Deleting folder ${appDataDirPath}`);
await fs.promises.rm(appDataDirPath, { recursive: true, force: true });
this.logger.info(`App ${appId} uninstalled`);
return { success: true, message: `App ${appId} uninstalled successfully` };
} catch (err) {
return this.handleAppError(err);
}
};
public updateApp = async (appId: string, config: Record<string, unknown>) => {
try {
const { appDirPath, repoPath } = this.getAppPaths(appId);
this.logger.info(`Updating app ${appId}`);
await this.ensureAppDir(appId);
await generateEnvFile(appId, config);
await compose(appId, 'up --detach --force-recreate --remove-orphans');
await compose(appId, 'down --rmi all --remove-orphans');
this.logger.info(`Deleting folder ${appDirPath}`);
await fs.promises.rm(appDirPath, { recursive: true, force: true });
this.logger.info(`Copying folder ${repoPath} to ${appDirPath}`);
await fs.promises.cp(repoPath, appDirPath, { recursive: true });
await this.ensurePermissions(appId);
await compose(appId, 'pull');
return { success: true, message: `App ${appId} updated successfully` };
} catch (err) {
return this.handleAppError(err);
}
};
public regenerateAppEnv = async (appId: string, config: Record<string, unknown>) => {
try {
this.logger.info(`Regenerating app.env file for app ${appId}`);
await this.ensureAppDir(appId);
await generateEnvFile(appId, config);
return { success: true, message: `App ${appId} env file regenerated successfully` };
} catch (err) {
return this.handleAppError(err);
}
};
}

View file

@ -0,0 +1,189 @@
import crypto from 'crypto';
import fs from 'fs';
import path from 'path';
import { appInfoSchema } from '@runtipi/shared';
import { getEnv } from '@/utils/environment/environment';
import { envMapToString, envStringToMap, generateVapidKeys, getAppEnvMap } from './env.helpers';
import { pathExists } from '@/utils/fs-helpers';
/**
* This function generates a random string of the provided length by using the SHA-256 hash algorithm.
* It takes the provided name and a seed value, concatenates them, and uses them as input for the hash algorithm.
* It then returns a substring of the resulting hash of the provided length.
*
* @param {string} name - A name used as input for the hash algorithm.
* @param {number} length - The desired length of the random string.
*/
const getEntropy = async (name: string, length: number) => {
const hash = crypto.createHash('sha256');
const seed = await fs.promises.readFile(path.join(getEnv().rootFolderHost, 'state', 'seed'));
hash.update(name + seed.toString());
return hash.digest('hex').substring(0, length);
};
/**
* This function generates an env file for the provided app.
* It reads the config.json file for the app, parses it,
* and uses the app's form fields and domain to generate the env file
* if the app is exposed and has a domain set, it adds the domain to the env file,
* otherwise, it adds the internal IP address to the env file
* It also creates the app-data folder for the app if it does not exist
*
* @param {string} appId - The id of the app to generate the env file for.
* @param {Record<string, unknown>} config - The config object for the app.
* @throws Will throw an error if the app has an invalid config.json file or if a required variable is missing.
*/
export const generateEnvFile = async (appId: string, config: Record<string, unknown>) => {
const { rootFolderHost, storagePath, internalIp } = getEnv();
const configFile = await fs.promises.readFile(path.join(rootFolderHost, 'apps', appId, 'config.json'));
const parsedConfig = appInfoSchema.safeParse(JSON.parse(configFile.toString()));
if (!parsedConfig.success) {
throw new Error(`App ${appId} has invalid config.json file`);
}
const baseEnvFile = await fs.promises.readFile(path.join(rootFolderHost, '.env'));
const envMap = envStringToMap(baseEnvFile.toString());
// Default always present env variables
envMap.set('APP_PORT', String(parsedConfig.data.port));
envMap.set('APP_ID', appId);
envMap.set('ROOT_FOLDER_HOST', rootFolderHost);
envMap.set('APP_DATA_DIR', path.join(storagePath, 'app-data', appId));
const existingEnvMap = await getAppEnvMap(appId);
if (parsedConfig.data.generate_vapid_keys) {
if (existingEnvMap.has('VAPID_PUBLIC_KEY') && existingEnvMap.has('VAPID_PRIVATE_KEY')) {
envMap.set('VAPID_PUBLIC_KEY', existingEnvMap.get('VAPID_PUBLIC_KEY') as string);
envMap.set('VAPID_PRIVATE_KEY', existingEnvMap.get('VAPID_PRIVATE_KEY') as string);
} else {
const vapidKeys = generateVapidKeys();
envMap.set('VAPID_PUBLIC_KEY', vapidKeys.publicKey);
envMap.set('VAPID_PRIVATE_KEY', vapidKeys.privateKey);
}
}
await Promise.all(
parsedConfig.data.form_fields.map(async (field) => {
const formValue = config[field.env_variable];
const envVar = field.env_variable;
if (formValue || typeof formValue === 'boolean') {
envMap.set(envVar, String(formValue));
} else if (field.type === 'random') {
if (existingEnvMap.has(envVar)) {
envMap.set(envVar, existingEnvMap.get(envVar) as string);
} else {
const length = field.min || 32;
const randomString = await getEntropy(field.env_variable, length);
envMap.set(envVar, randomString);
}
} else if (field.required) {
throw new Error(`Variable ${field.label || field.env_variable} is required`);
}
}),
);
if (config.exposed && config.domain && typeof config.domain === 'string') {
envMap.set('APP_EXPOSED', 'true');
envMap.set('APP_DOMAIN', config.domain);
envMap.set('APP_PROTOCOL', 'https');
envMap.set('APP_HOST', config.domain);
} else {
envMap.set('APP_DOMAIN', `${internalIp}:${parsedConfig.data.port}`);
envMap.set('APP_HOST', internalIp);
}
// Create app-data folder if it doesn't exist
const appDataDirectoryExists = await fs.promises.stat(path.join(storagePath, 'app-data', appId)).catch(() => false);
if (!appDataDirectoryExists) {
await fs.promises.mkdir(path.join(storagePath, 'app-data', appId), { recursive: true });
}
await fs.promises.writeFile(path.join(storagePath, 'app-data', appId, 'app.env'), envMapToString(envMap));
};
/**
* Given a template and a map of variables, this function replaces all instances of the variables in the template with their values.
*
* @param {string} template - The template to be rendered.
* @param {Map<string, string>} envMap - The map of variables and their values.
*/
const renderTemplate = (template: string, envMap: Map<string, string>) => {
let renderedTemplate = template;
envMap.forEach((value, key) => {
renderedTemplate = renderedTemplate.replace(new RegExp(`{{${key}}}`, 'g'), value);
});
return renderedTemplate;
};
/**
* Given an app, this function copies the app's data directory to the app-data folder.
* If a file with an extension of .template is found, it will be copied as a file without the .template extension and the template variables will be replaced
* by the values in the app's env file.
*
* @param {string} id - The id of the app.
*/
export const copyDataDir = async (id: string) => {
const { rootFolderHost, storagePath } = getEnv();
const envMap = await getAppEnvMap(id);
// return if app does not have a data directory
if (!(await pathExists(`${rootFolderHost}/apps/${id}/data`))) {
return;
}
// Create app-data folder if it doesn't exist
if (!(await pathExists(`${storagePath}/app-data/${id}/data`))) {
await fs.promises.mkdir(`${storagePath}/app-data/${id}/data`, { recursive: true });
}
const dataDir = await fs.promises.readdir(`${rootFolderHost}/apps/${id}/data`);
const processFile = async (file: string) => {
if (file.endsWith('.template')) {
const template = await fs.promises.readFile(`${rootFolderHost}/apps/${id}/data/${file}`, 'utf-8');
const renderedTemplate = renderTemplate(template, envMap);
await fs.promises.writeFile(`${storagePath}/app-data/${id}/data/${file.replace('.template', '')}`, renderedTemplate);
} else {
await fs.promises.copyFile(`${rootFolderHost}/apps/${id}/data/${file}`, `${storagePath}/app-data/${id}/data/${file}`);
}
};
const processDir = async (p: string) => {
await fs.promises.mkdir(`${storagePath}/app-data/${id}/data/${p}`, { recursive: true });
const files = await fs.promises.readdir(`${rootFolderHost}/apps/${id}/data/${p}`);
await Promise.all(
files.map(async (file) => {
const fullPath = `${rootFolderHost}/apps/${id}/data/${p}/${file}`;
if ((await fs.promises.lstat(fullPath)).isDirectory()) {
await processDir(`${p}/${file}`);
} else {
await processFile(`${p}/${file}`);
}
}),
);
};
await Promise.all(
dataDir.map(async (file) => {
const fullPath = `${rootFolderHost}/apps/${id}/data/${file}`;
if ((await fs.promises.lstat(fullPath)).isDirectory()) {
await processDir(file);
} else {
await processFile(file);
}
}),
);
};

View file

@ -0,0 +1,68 @@
import webpush from 'web-push';
import fs from 'fs';
import path from 'path';
import { getEnv } from '@/utils/environment/environment';
/**
* Convert a string of environment variables to a Map
*
* @param {string} envString - String of environment variables
*/
export const envStringToMap = (envString: string) => {
const envMap = new Map<string, string>();
const envArray = envString.split('\n');
envArray.forEach((env) => {
const [key, value] = env.split('=');
if (key && value) {
envMap.set(key, value);
}
});
return envMap;
};
/**
* Convert a Map of environment variables to a valid string of environment variables
* that can be used in a .env file
*
* @param {Map<string, string>} envMap - Map of environment variables
*/
export const envMapToString = (envMap: Map<string, string>) => {
const envArray = Array.from(envMap).map(([key, value]) => `${key}=${value}`);
return envArray.join('\n');
};
/**
* This function reads the env file for the app with the provided id and returns a Map containing the key-value pairs of the environment variables.
* It reads the app.env file, splits it into individual environment variables, and stores them in a Map, with the environment variable name as the key and its value as the value.
*
* @param {string} appId - App ID
*/
export const getAppEnvMap = async (appId: string) => {
try {
const envFile = await fs.promises.readFile(path.join(getEnv().storagePath, 'app-data', appId, 'app.env'));
const envVars = envFile.toString().split('\n');
const envVarsMap = new Map<string, string>();
envVars.forEach((envVar) => {
const [key, value] = envVar.split('=');
if (key && value) envVarsMap.set(key, value);
});
return envVarsMap;
} catch (e) {
return new Map<string, string>();
}
};
/**
* Generate VAPID keys
*/
export const generateVapidKeys = () => {
const vapidKeys = webpush.generateVAPIDKeys();
return {
publicKey: vapidKeys.publicKey,
privateKey: vapidKeys.privateKey,
};
};

View file

@ -0,0 +1,3 @@
export { AppExecutors } from './app/app.executors';
export { RepoExecutors } from './repo/repo.executors';
export { SystemExecutors } from './system/system.executors';

View file

@ -0,0 +1,96 @@
import { getEnv } from 'src/utils/environment/environment';
import { createLogger } from '@runtipi/shared';
import path from 'path';
import { promisify } from 'util';
import { exec } from 'child_process';
import { pathExists } from '@/utils/fs-helpers';
import { getRepoHash } from './repo.helpers';
const execAsync = promisify(exec);
export class RepoExecutors {
private readonly rootFolderHost: string;
private readonly logger;
constructor() {
const { rootFolderHost } = getEnv();
this.rootFolderHost = rootFolderHost;
this.logger = createLogger('repo-executors', path.join(rootFolderHost, 'logs'));
}
/**
* Error handler for repo operations
* @param {unknown} err
*/
private handleRepoError = (err: unknown) => {
if (err instanceof Error) {
this.logger.error(`An error occurred: ${err.message}`);
return { success: false, message: err.message };
}
return { success: false, message: `An error occurred: ${err}` };
};
/**
* Given a repo url, clone it to the repos folder if it doesn't exist
*
* @param {string} repoUrl
*/
public cloneRepo = async (repoUrl: string) => {
try {
const repoHash = getRepoHash(repoUrl);
const repoPath = path.join(this.rootFolderHost, 'repos', repoHash);
if (await pathExists(repoPath)) {
this.logger.info(`Repo ${repoUrl} already exists`);
return { success: true, message: '' };
}
this.logger.info(`Cloning repo ${repoUrl} to ${repoPath}`);
const { stdout, stderr } = await execAsync(`git clone ${repoUrl} ${repoPath}`);
if (stderr) {
this.logger.error(`Error cloning repo ${repoUrl}: ${stderr}`);
return { success: false, message: stderr };
}
this.logger.info(`Cloned repo ${repoUrl} to ${repoPath}`);
return { success: true, message: stdout };
} catch (err) {
return this.handleRepoError(err);
}
};
/**
* Given a repo url, pull it to the repos folder if it exists
*
* @param {string} repoUrl
*/
public pullRepo = async (repoUrl: string) => {
try {
const repoHash = getRepoHash(repoUrl);
const repoPath = path.join(this.rootFolderHost, 'repos', repoHash);
if (!(await pathExists(repoPath))) {
this.logger.info(`Repo ${repoUrl} does not exist`);
return { success: false, message: `Repo ${repoUrl} does not exist` };
}
this.logger.info(`Pulling repo ${repoUrl} to ${repoPath}`);
const { stdout, stderr } = await execAsync(`git -C ${repoPath} pull`);
if (stderr) {
this.logger.error(`Error pulling repo ${repoUrl}: ${stderr}`);
return { success: false, message: stderr };
}
this.logger.info(`Pulled repo ${repoUrl} to ${repoPath}`);
return { success: true, message: stdout };
} catch (err) {
return this.handleRepoError(err);
}
};
}

View file

@ -0,0 +1,12 @@
import crypto from 'crypto';
/**
* Given a repo url, return a hash of it to be used as a folder name
*
* @param {string} repoUrl
*/
export const getRepoHash = (repoUrl: string) => {
const hash = crypto.createHash('sha256');
hash.update(repoUrl);
return hash.digest('hex');
};

View file

@ -0,0 +1,300 @@
import fs from 'fs';
import cliProgress from 'cli-progress';
import semver from 'semver';
import axios from 'axios';
import boxen from 'boxen';
import path from 'path';
import { promisify } from 'util';
import { exec, spawn } from 'child_process';
import si from 'systeminformation';
import { createLogger } from '@runtipi/shared';
import { Stream } from 'stream';
import { AppExecutors } from '../app/app.executors';
import { copySystemFiles, generateSystemEnvFile, generateTlsCertificates } from './system.helpers';
import { TerminalSpinner } from '@/utils/logger/terminal-spinner';
import { pathExists } from '@/utils/fs-helpers';
import { getEnv } from '@/utils/environment/environment';
const logger = createLogger('system-executors', path.join(process.cwd(), 'logs'));
const execAsync = promisify(exec);
export class SystemExecutors {
private readonly rootFolder: string;
private readonly envFile: string;
constructor() {
this.rootFolder = process.cwd();
this.envFile = path.join(this.rootFolder, '.env');
}
private handleSystemError = (err: unknown) => {
if (err instanceof Error) {
logger.error(`An error occurred: ${err.message}`);
return { success: false, message: err.message };
}
return { success: false, message: `An error occurred: ${err}` };
};
private getSystemLoad = async () => {
const { currentLoad } = await si.currentLoad();
const mem = await si.mem();
const [disk0] = await si.fsSize();
return {
cpu: { load: currentLoad },
memory: { total: mem.total, used: mem.used, available: mem.available },
disk: { total: disk0?.size, used: disk0?.used, available: disk0?.available },
};
};
public systemInfo = async () => {
try {
const { rootFolderHost } = getEnv();
const systemLoad = await this.getSystemLoad();
await fs.promises.writeFile(path.join(rootFolderHost, 'state', 'system-info.json'), JSON.stringify(systemLoad, null, 2));
await fs.promises.chmod(path.join(rootFolderHost, 'state', 'system-info.json'), 0o777);
return { success: true, message: '' };
} catch (e) {
return this.handleSystemError(e);
}
};
/**
* This method will stop Tipi
* It will stop all the apps and then stop the main containers.
*/
public stop = async () => {
try {
const spinner = new TerminalSpinner('Stopping Tipi...');
if (await pathExists(path.join(this.rootFolder, 'apps'))) {
const apps = await fs.promises.readdir(path.join(this.rootFolder, 'apps'));
const appExecutor = new AppExecutors();
await Promise.all(
apps.map(async (app) => {
const appSpinner = new TerminalSpinner(`Stopping ${app}...`);
appSpinner.start();
await appExecutor.stopApp(app, {}, true);
appSpinner.done(`${app} stopped`);
}),
);
}
spinner.setMessage('Stopping containers...');
spinner.start();
await execAsync('docker compose down --remove-orphans --rmi local');
spinner.done('Tipi successfully stopped');
return { success: true, message: 'Tipi stopped' };
} catch (e) {
return this.handleSystemError(e);
}
};
/**
* This method will start Tipi.
* It will copy the system files, generate the system env file, pull the images and start the containers.
*/
public start = async () => {
try {
const spinner = new TerminalSpinner('Starting Tipi...');
spinner.start();
spinner.setMessage('Copying system files...');
await copySystemFiles();
spinner.done('System files copied');
spinner.setMessage('Generating system env file...');
spinner.start();
const envMap = await generateSystemEnvFile();
spinner.done('System env file generated');
// Stop and Remove container tipi if exists
spinner.setMessage('Stopping and removing containers...');
spinner.start();
await execAsync('docker rm -f tipi-db');
await execAsync('docker rm -f tipi-redis');
await execAsync('docker rm -f dashboard');
await execAsync('docker rm -f reverse-proxy');
spinner.done('Containers stopped and removed');
// Pull images
spinner.setMessage('Pulling images...');
spinner.start();
await execAsync(`docker compose --env-file "${this.envFile}" pull`);
spinner.done('Images pulled');
// Start containers
spinner.setMessage('Starting containers...');
spinner.start();
await execAsync(`docker compose --env-file "${this.envFile}" up --detach --remove-orphans --build`);
spinner.done('Containers started');
// start watcher cli in the background
spinner.setMessage('Starting watcher...');
spinner.start();
await generateTlsCertificates({ domain: envMap.get('LOCAL_DOMAIN') });
const out = fs.openSync('./logs/watcher.log', 'a');
const err = fs.openSync('./logs/watcher.log', 'a');
const subprocess = spawn('./runtipi-cli', [process.argv[1] as string, 'watch'], { cwd: this.rootFolder, detached: true, stdio: ['ignore', out, err] });
subprocess.unref();
spinner.done('Watcher started');
console.log(
boxen(`Visit: http://${envMap.get('INTERNAL_IP')}:${envMap.get('NGINX_PORT')} to access the dashboard\n\nFind documentation and guides at: https://runtipi.io`, {
title: 'Tipi successfully started 🎉',
titleAlignment: 'center',
padding: 1,
borderStyle: 'double',
borderColor: 'green',
margin: { top: 1 },
}),
);
return { success: true, message: 'Tipi started' };
} catch (e) {
return this.handleSystemError(e);
}
};
/**
* This method will stop and start Tipi.
*/
public restart = async () => {
try {
await this.stop();
await this.start();
return { success: true, message: '' };
} catch (e) {
return this.handleSystemError(e);
}
};
/**
* This method will create a password change request file in the state folder.
*/
public resetPassword = async () => {
const { rootFolderHost } = getEnv();
await fs.promises.writeFile(path.join(rootFolderHost, 'state', 'password-change-request'), '');
};
/**
* Given a target version, this method will download the corresponding release from GitHub and replace the current
* runtipi-cli binary with the new one.
* @param {string} target
*/
public update = async (target: string) => {
const spinner = new TerminalSpinner('Evaluating target version...');
try {
spinner.start();
let targetVersion = target;
if (!targetVersion || targetVersion === 'latest') {
spinner.setMessage('Fetching latest version...');
const { data } = await axios.get<{ tag_name: string }>('https://api.github.com/repos/meienberger/runtipi/releases');
targetVersion = data.tag_name;
}
if (!semver.valid(targetVersion)) {
spinner.fail(`Invalid version: ${targetVersion}`);
throw new Error(`Invalid version: ${targetVersion}`);
}
const { rootFolderHost, arch } = getEnv();
let assetName = 'runtipi-cli-linux-x64';
if (arch === 'arm64') {
assetName = 'runtipi-cli-linux-arm64';
}
const fileName = `runtipi-cli-${targetVersion}`;
const savePath = path.join(rootFolderHost, fileName);
const fileUrl = `https://github.com/meienberger/runtipi/releases/download/${targetVersion}/${assetName}`;
spinner.done(`Target version: ${targetVersion}`);
spinner.done(`Download url: ${fileUrl}`);
await this.stop();
console.log(`Downloading Tipi ${targetVersion}...`);
const bar = new cliProgress.SingleBar({}, cliProgress.Presets.rect);
bar.start(100, 0);
await new Promise((resolve, reject) => {
axios<Stream>({
method: 'GET',
url: fileUrl,
responseType: 'stream',
onDownloadProgress: (progress) => {
bar.update(Math.round((progress.loaded / (progress.total || 0)) * 100));
},
}).then((response) => {
const writer = fs.createWriteStream(savePath);
response.data.pipe(writer);
writer.on('error', (err) => {
bar.stop();
spinner.fail(`\nFailed to download Tipi ${targetVersion}`);
reject(err);
});
writer.on('finish', () => {
bar.stop();
resolve('');
});
});
}).catch((e) => {
spinner.fail(`\nFailed to download Tipi ${targetVersion}. Please make sure this version exists on GitHub.`);
throw e;
});
spinner.done(`Tipi ${targetVersion} downloaded`);
await fs.promises.chmod(savePath, 0o755);
spinner.setMessage('Replacing old cli...');
spinner.start();
// Delete old cli
if (await pathExists(path.join(rootFolderHost, 'runtipi-cli'))) {
await fs.promises.unlink(path.join(rootFolderHost, 'runtipi-cli'));
}
// Delete VERSION file
if (await pathExists(path.join(rootFolderHost, 'VERSION'))) {
await fs.promises.unlink(path.join(rootFolderHost, 'VERSION'));
}
// Rename downloaded cli to runtipi-cli
await fs.promises.rename(savePath, path.join(rootFolderHost, 'runtipi-cli'));
spinner.done('Old cli replaced');
const childProcess = spawn('./runtipi-cli', [process.argv[1] as string, 'start']);
childProcess.stdout.on('data', (data) => {
process.stdout.write(data);
});
childProcess.stderr.on('data', (data) => {
process.stderr.write(data);
});
return { success: true, message: 'Tipi updated' };
} catch (e) {
spinner.fail('Tipi update failed, see logs for details');
logger.error(e);
return this.handleSystemError(e);
}
};
}

View file

@ -0,0 +1,262 @@
import crypto from 'crypto';
import fs from 'fs';
import path from 'path';
import os from 'os';
import { envMapToString, envStringToMap, settingsSchema } from '@runtipi/shared';
import { exec } from 'child_process';
import { promisify } from 'util';
import chalk from 'chalk';
import { pathExists } from '@/utils/fs-helpers';
import { getRepoHash } from '../repo/repo.helpers';
type EnvKeys =
| 'APPS_REPO_ID'
| 'APPS_REPO_URL'
| 'TZ'
| 'INTERNAL_IP'
| 'DNS_IP'
| 'ARCHITECTURE'
| 'TIPI_VERSION'
| 'JWT_SECRET'
| 'ROOT_FOLDER_HOST'
| 'NGINX_PORT'
| 'NGINX_PORT_SSL'
| 'DOMAIN'
| 'STORAGE_PATH'
| 'POSTGRES_PORT'
| 'POSTGRES_HOST'
| 'POSTGRES_DBNAME'
| 'POSTGRES_PASSWORD'
| 'POSTGRES_USERNAME'
| 'REDIS_HOST'
| 'LOCAL_DOMAIN'
| 'DEMO_MODE'
// eslint-disable-next-line @typescript-eslint/ban-types
| (string & {});
const execAsync = promisify(exec);
const DEFAULT_REPO_URL = 'https://github.com/meienberger/runtipi-appstore';
/**
* Reads and returns the generated seed
*/
const getSeed = async () => {
const rootFolder = process.cwd();
const seedFilePath = path.join(rootFolder, 'state', 'seed');
if (!(await pathExists(seedFilePath))) {
throw new Error('Seed file not found');
}
const seed = await fs.promises.readFile(seedFilePath, 'utf-8');
return seed;
};
/**
* Derives a new entropy value from the provided entropy and the seed
* @param {string} entropy - The entropy value to derive from
*/
const deriveEntropy = async (entropy: string) => {
const seed = await getSeed();
const hmac = crypto.createHmac('sha256', seed);
hmac.update(entropy);
return hmac.digest('hex');
};
/**
* Generates a random seed if it does not exist yet
*/
const generateSeed = async (rootFolder: string) => {
if (!(await pathExists(path.join(rootFolder, 'state', 'seed')))) {
const randomBytes = crypto.randomBytes(32);
const seed = randomBytes.toString('hex');
await fs.promises.writeFile(path.join(rootFolder, 'state', 'seed'), seed);
}
};
/**
* Will return the first internal IP address of the current system
*/
const getInternalIp = () => {
const interfaces = os.networkInterfaces();
for (let i = 0; i < Object.keys(interfaces).length; i += 1) {
const devName = Object.keys(interfaces)[i];
const iface = interfaces[devName || ''];
const length = iface?.length || 0;
for (let j = 0; j < length; j += 1) {
const alias = iface?.[j];
if (alias && alias.family === 'IPv4' && alias.address !== '127.0.0.1' && !alias.internal) return alias.address;
}
}
return '0.0.0.0';
};
/**
* Returns the architecture of the current system
*/
const getArchitecture = () => {
const arch = os.arch();
if (arch === 'arm64') return 'arm64';
if (arch === 'x64') return 'amd64';
throw new Error(`Unsupported architecture: ${arch}`);
};
/**
* Generates a valid .env file from the settings.json file
*/
export const generateSystemEnvFile = async () => {
const rootFolder = process.cwd();
await fs.promises.mkdir(path.join(rootFolder, 'state'), { recursive: true });
const settingsFilePath = path.join(rootFolder, 'state', 'settings.json');
const envFilePath = path.join(rootFolder, '.env');
if (!(await pathExists(envFilePath))) {
await fs.promises.writeFile(envFilePath, '');
}
const envFile = await fs.promises.readFile(envFilePath, 'utf-8');
const envMap: Map<EnvKeys, string> = envStringToMap(envFile);
if (!(await pathExists(settingsFilePath))) {
await fs.promises.writeFile(settingsFilePath, JSON.stringify({}));
}
const settingsFile = await fs.promises.readFile(settingsFilePath, 'utf-8');
const settings = settingsSchema.safeParse(JSON.parse(settingsFile));
if (!settings.success) {
throw new Error(`Invalid settings.json file: ${settings.error.message}`);
}
await generateSeed(rootFolder);
const { data } = settings;
const jwtSecret = envMap.get('JWT_SECRET') || (await deriveEntropy('jwt_secret'));
const repoId = getRepoHash(data.appsRepoUrl || DEFAULT_REPO_URL);
const postgresPassword = envMap.get('POSTGRES_PASSWORD') || (await deriveEntropy('postgres_password'));
const version = await fs.promises.readFile(path.join(rootFolder, 'VERSION'), 'utf-8');
envMap.set('APPS_REPO_ID', repoId);
envMap.set('APPS_REPO_URL', data.appsRepoUrl || DEFAULT_REPO_URL);
envMap.set('TZ', Intl.DateTimeFormat().resolvedOptions().timeZone);
envMap.set('INTERNAL_IP', data.listenIp || getInternalIp());
envMap.set('DNS_IP', data.dnsIp || '9.9.9.9');
envMap.set('ARCHITECTURE', getArchitecture());
envMap.set('TIPI_VERSION', version);
envMap.set('JWT_SECRET', jwtSecret);
envMap.set('ROOT_FOLDER_HOST', rootFolder);
envMap.set('NGINX_PORT', String(data.port || 80));
envMap.set('NGINX_PORT_SSL', String(data.sslPort || 443));
envMap.set('DOMAIN', data.domain || 'example.com');
envMap.set('STORAGE_PATH', data.storagePath || rootFolder);
envMap.set('POSTGRES_HOST', 'tipi-db');
envMap.set('POSTGRES_DBNAME', 'tipi');
envMap.set('POSTGRES_USERNAME', 'tipi');
envMap.set('POSTGRES_PASSWORD', postgresPassword);
envMap.set('POSTGRES_PORT', String(5432));
envMap.set('REDIS_HOST', 'tipi-redis');
envMap.set('DEMO_MODE', String(data.demoMode || 'false'));
envMap.set('LOCAL_DOMAIN', data.localDomain || 'tipi.lan');
envMap.set('NODE_ENV', 'production');
await fs.promises.writeFile(envFilePath, envMapToString(envMap));
return envMap;
};
/**
* Copies the system files from the assets folder to the current working directory
*/
export const copySystemFiles = async () => {
const assetsFolder = path.join('/snapshot', 'runtipi', 'packages', 'cli', 'assets');
// Copy docker-compose.yml file
await fs.promises.copyFile(path.join(assetsFolder, 'docker-compose.yml'), path.join(process.cwd(), 'docker-compose.yml'));
// Copy VERSION file
await fs.promises.copyFile(path.join(assetsFolder, 'VERSION'), path.join(process.cwd(), 'VERSION'));
// Copy traefik folder from assets
await fs.promises.mkdir(path.join(process.cwd(), 'traefik', 'dynamic'), { recursive: true });
await fs.promises.mkdir(path.join(process.cwd(), 'traefik', 'shared'), { recursive: true });
await fs.promises.mkdir(path.join(process.cwd(), 'traefik', 'tls'), { recursive: true });
await fs.promises.copyFile(path.join(assetsFolder, 'traefik', 'traefik.yml'), path.join(process.cwd(), 'traefik', 'traefik.yml'));
await fs.promises.copyFile(path.join(assetsFolder, 'traefik', 'dynamic', 'dynamic.yml'), path.join(process.cwd(), 'traefik', 'dynamic', 'dynamic.yml'));
// Create base folders
await fs.promises.mkdir(path.join(process.cwd(), 'apps'), { recursive: true });
await fs.promises.mkdir(path.join(process.cwd(), 'app-data'), { recursive: true });
await fs.promises.mkdir(path.join(process.cwd(), 'state'), { recursive: true });
await fs.promises.mkdir(path.join(process.cwd(), 'repos'), { recursive: true });
// Create media folders
await fs.promises.mkdir(path.join(process.cwd(), 'media', 'torrents', 'watch'), { recursive: true });
await fs.promises.mkdir(path.join(process.cwd(), 'media', 'torrents', 'complete'), { recursive: true });
await fs.promises.mkdir(path.join(process.cwd(), 'media', 'torrents', 'incomplete'), { recursive: true });
await fs.promises.mkdir(path.join(process.cwd(), 'media', 'usenet', 'watch'), { recursive: true });
await fs.promises.mkdir(path.join(process.cwd(), 'media', 'usenet', 'complete'), { recursive: true });
await fs.promises.mkdir(path.join(process.cwd(), 'media', 'usenet', 'incomplete'), { recursive: true });
await fs.promises.mkdir(path.join(process.cwd(), 'media', 'downloads', 'watch'), { recursive: true });
await fs.promises.mkdir(path.join(process.cwd(), 'media', 'downloads', 'complete'), { recursive: true });
await fs.promises.mkdir(path.join(process.cwd(), 'media', 'downloads', 'incomplete'), { recursive: true });
await fs.promises.mkdir(path.join(process.cwd(), 'media', 'data', 'books'), { recursive: true });
await fs.promises.mkdir(path.join(process.cwd(), 'media', 'data', 'comics'), { recursive: true });
await fs.promises.mkdir(path.join(process.cwd(), 'media', 'data', 'movies'), { recursive: true });
await fs.promises.mkdir(path.join(process.cwd(), 'media', 'data', 'music'), { recursive: true });
await fs.promises.mkdir(path.join(process.cwd(), 'media', 'data', 'tv'), { recursive: true });
await fs.promises.mkdir(path.join(process.cwd(), 'media', 'data', 'podcasts'), { recursive: true });
await fs.promises.mkdir(path.join(process.cwd(), 'media', 'data', 'images'), { recursive: true });
await fs.promises.mkdir(path.join(process.cwd(), 'media', 'data', 'roms'), { recursive: true });
};
/**
* Given a domain, generates the TLS certificates for it to be used with Traefik
*
* @param {string} data.domain The domain to generate the certificates for
*/
export const generateTlsCertificates = async (data: { domain?: string }) => {
if (!data.domain) {
return;
}
// If the certificate already exists, don't generate it again
if (await pathExists(path.join(process.cwd(), 'traefik', 'tls', `${data.domain}.txt`))) {
return;
}
// Remove old certificates
if (await pathExists(path.join(process.cwd(), 'traefik', 'tls', 'cert.pem'))) {
await fs.promises.unlink(path.join(process.cwd(), 'traefik', 'tls', 'cert.pem'));
}
if (await pathExists(path.join(process.cwd(), 'traefik', 'tls', 'key.pem'))) {
await fs.promises.unlink(path.join(process.cwd(), 'traefik', 'tls', 'key.pem'));
}
const subject = `/O=runtipi.io/OU=IT/CN=*.${data.domain}/emailAddress=webmaster@${data.domain}`;
const subjectAltName = `DNS:*.${data.domain},DNS:${data.domain}`;
try {
await execAsync(`openssl req -x509 -newkey rsa:4096 -keyout traefik/tls/key.pem -out traefik/tls/cert.pem -days 365 -subj "${subject}" -addext "${subjectAltName}" -nodes`);
await fs.promises.writeFile(path.join(process.cwd(), 'traefik', 'tls', `${data.domain}.txt`), '');
} catch (error) {
console.error(chalk.red('✗'), 'Failed to generate TLS certificates');
}
};

70
packages/cli/src/index.ts Normal file
View file

@ -0,0 +1,70 @@
#!/usr/bin/env node
import { program } from 'commander';
import chalk from 'chalk';
import { description, version } from '../package.json';
import { startWorker } from './services/watcher/watcher';
import { SystemExecutors } from './executors';
const main = async () => {
program.description(description).version(version);
program
.command('watch')
.description('Watcher script for events queue')
.action(async () => {
console.log('Starting watcher');
startWorker();
});
program
.command('start')
.description('Start tipi')
.action(async () => {
const systemExecutors = new SystemExecutors();
await systemExecutors.start();
});
program
.command('stop')
.description('Stop tipi')
.action(async () => {
const systemExecutors = new SystemExecutors();
await systemExecutors.stop();
});
program
.command('restart')
.description('Restart tipi')
.action(async () => {
const systemExecutors = new SystemExecutors();
await systemExecutors.restart();
});
program
.command('update')
.description('Update tipi')
.argument('<target>', 'Target to update')
.action(async (target) => {
const systemExecutors = new SystemExecutors();
await systemExecutors.update(target);
});
program
.command('reset-password')
.description('Reset password')
.action(async () => {
const systemExecutors = new SystemExecutors();
await systemExecutors.resetPassword();
console.log(chalk.green('✓'), 'Password reset request created. Head back to the dashboard to set a new password.');
});
program.parse(process.argv);
};
try {
console.log(chalk.green('Welcome to Tipi CLI ✨'));
main();
} catch (e) {
console.error('An error occurred:', e);
}

View file

@ -0,0 +1,118 @@
import { eventSchema } from '@runtipi/shared';
import { Worker } from 'bullmq';
import { exec } from 'child_process';
import { promisify } from 'util';
import { AppExecutors, RepoExecutors, SystemExecutors } from '@/executors';
const execAsync = promisify(exec);
const runCommand = async (jobData: unknown) => {
const { installApp, startApp, stopApp, uninstallApp, updateApp, regenerateAppEnv } = new AppExecutors();
const { cloneRepo, pullRepo } = new RepoExecutors();
const { systemInfo, restart } = new SystemExecutors();
const event = eventSchema.safeParse(jobData);
if (!event.success) {
throw new Error('Event is not valid');
}
const { data } = event;
let success = false;
let message = `Event has invalid type or args ${JSON.stringify(data)}`;
if (data.type === 'app') {
if (data.command === 'install') {
({ success, message } = await installApp(data.appid, data.form));
}
if (data.command === 'stop') {
({ success, message } = await stopApp(data.appid, data.form));
}
if (data.command === 'start') {
({ success, message } = await startApp(data.appid, data.form));
}
if (data.command === 'uninstall') {
({ success, message } = await uninstallApp(data.appid, data.form));
}
if (data.command === 'update') {
({ success, message } = await updateApp(data.appid, data.form));
}
if (data.command === 'generate_env') {
({ success, message } = await regenerateAppEnv(data.appid, data.form));
}
} else if (data.type === 'repo') {
if (data.command === 'clone') {
({ success, message } = await cloneRepo(data.url));
}
if (data.command === 'update') {
({ success, message } = await pullRepo(data.url));
}
} else if (data.type === 'system') {
if (data.command === 'system_info') {
({ success, message } = await systemInfo());
}
if (data.command === 'restart') {
({ success, message } = await restart());
}
}
return { success, message };
};
const killOtherWorkers = async () => {
const { stdout } = await execAsync('ps aux | grep "index.js watch" | grep -v grep | awk \'{print $2}\'');
const pids = stdout.split('\n').filter((pid: string) => pid !== '');
pids.forEach((pid) => {
if (pid === process.pid.toString()) {
console.log('Skipping killing current worker');
return;
}
console.log(`Killing worker with pid ${pid}`);
process.kill(Number(pid));
});
};
/**
* Start the worker for the events queue
*/
export const startWorker = async () => {
await killOtherWorkers();
const worker = new Worker(
'events',
async (job) => {
console.log(`Processing job ${job.id} with data ${JSON.stringify(job.data)}`);
const { message, success } = await runCommand(job.data);
return { success, stdout: message };
},
{ connection: { host: '127.0.0.1', port: 6379 } },
);
worker.on('ready', () => {
console.log('Worker is ready');
});
worker.on('completed', (job) => {
console.log(`Job ${job.id} completed with result: ${JSON.stringify(job.returnvalue)}`);
});
worker.on('failed', (job) => {
console.error(`Job ${job?.id} failed with reason ${job?.failedReason}`);
});
worker.on('error', async (e) => {
console.error('An error occurred:', e);
});
};

View file

@ -0,0 +1,59 @@
import path from 'path';
import { promisify } from 'util';
import { exec } from 'child_process';
import { createLogger } from '@runtipi/shared';
import { getEnv } from '../environment/environment';
import { pathExists } from '../fs-helpers/fs-helpers';
const execAsync = promisify(exec);
const logger = createLogger('docker-helpers', path.join(process.cwd(), 'logs'));
const composeUp = async (args: string[]) => {
const { stdout, stderr } = await execAsync(`docker compose ${args.join(' ')}`);
logger.info('stdout', stdout);
logger.info('stderr', stderr);
return { stdout, stderr };
};
/**
* Helpers to execute docker compose commands
* @param {string} appId - App name
* @param {string} command - Command to execute
*/
export const compose = async (appId: string, command: string) => {
const { arch, rootFolderHost, appsRepoId, storagePath } = getEnv();
const appDataDirPath = path.join(storagePath, 'app-data', appId);
const appDirPath = path.join(rootFolderHost, 'apps', appId);
const args: string[] = [`--env-file ${path.join(appDataDirPath, 'app.env')}`];
// User custom env file
const userEnvFile = path.join(rootFolderHost, 'user-config', appId, 'app.env');
if (await pathExists(userEnvFile)) {
args.push(`--env-file ${userEnvFile}`);
}
args.push(`--project-name ${appId}`);
let composeFile = path.join(appDirPath, 'docker-compose.yml');
if (arch === 'arm64' && (await pathExists(path.join(appDirPath, 'docker-compose.arm64.yml')))) {
composeFile = path.join(appDirPath, 'docker-compose.arm64.yml');
}
args.push(`-f ${composeFile}`);
const commonComposeFile = path.join(rootFolderHost, 'repos', appsRepoId, 'apps', 'docker-compose.common.yml');
args.push(`-f ${commonComposeFile}`);
// User defined overrides
const userComposeFile = path.join(rootFolderHost, 'user-config', appId, 'docker-compose.yml');
if (await pathExists(userComposeFile)) {
args.push(`--file ${userComposeFile}`);
}
args.push(command);
return composeUp(args);
};

View file

@ -0,0 +1 @@
export * from './docker-helpers';

View file

@ -0,0 +1,35 @@
import { z } from 'zod';
import dotenv from 'dotenv';
if (process.env.NODE_ENV === 'development') {
dotenv.config({ path: '.env.dev' });
} else {
dotenv.config();
}
const environmentSchema = z
.object({
STORAGE_PATH: z.string(),
ROOT_FOLDER_HOST: z.string(),
APPS_REPO_ID: z.string(),
ARCHITECTURE: z.enum(['arm64', 'amd64']),
INTERNAL_IP: z.string().ip().or(z.literal('localhost')),
TIPI_VERSION: z.string(),
})
.transform((env) => {
const { STORAGE_PATH, ARCHITECTURE, ROOT_FOLDER_HOST, APPS_REPO_ID, INTERNAL_IP, TIPI_VERSION, ...rest } = env;
return {
storagePath: STORAGE_PATH,
rootFolderHost: ROOT_FOLDER_HOST,
appsRepoId: APPS_REPO_ID,
arch: ARCHITECTURE,
tipiVersion: TIPI_VERSION,
internalIp: INTERNAL_IP,
...rest,
};
});
export type Environment = z.infer<typeof environmentSchema>;
export const getEnv = () => environmentSchema.parse(process.env);

View file

@ -0,0 +1,8 @@
import fs from 'fs';
export const pathExists = async (path: string): Promise<boolean> => {
return fs.promises
.access(path)
.then(() => true)
.catch(() => false);
};

View file

@ -0,0 +1 @@
export * from './fs-helpers';

View file

@ -0,0 +1,55 @@
import logUpdate from 'log-update';
import chalk from 'chalk';
import { dots } from 'cli-spinners';
export class TerminalSpinner {
message: string;
frame = 0;
interval: NodeJS.Timer | null = null;
start() {
this.interval = setInterval(() => {
// eslint-disable-next-line no-plusplus
this.frame = ++this.frame % dots.frames.length;
logUpdate(`${dots.frames[this.frame]} ${this.message}`);
}, dots.interval);
}
constructor(message: string) {
this.message = message;
}
setMessage(message: string) {
this.message = message;
}
done(message?: string) {
if (this.interval) {
clearInterval(this.interval);
}
if (message) {
logUpdate(chalk.green('✓'), message);
} else {
logUpdate.clear();
}
logUpdate.done();
}
fail(message?: string) {
if (this.interval) {
clearInterval(this.interval);
}
if (message) {
logUpdate(chalk.red('✗'), message);
} else {
logUpdate.clear();
}
logUpdate.done();
}
}

View file

@ -0,0 +1,40 @@
import { faker } from '@faker-js/faker';
import fs from 'fs';
import { APP_CATEGORIES, AppInfo, appInfoSchema } from '@runtipi/shared';
import { getEnv } from '@/utils/environment/environment';
export const createAppConfig = (props?: Partial<AppInfo>, isInstalled = true) => {
const { rootFolderHost, storagePath } = getEnv();
const appInfo = appInfoSchema.parse({
id: faker.string.alphanumeric(32),
available: true,
port: faker.number.int({ min: 30, max: 65535 }),
name: faker.string.alphanumeric(32),
description: faker.string.alphanumeric(32),
tipi_version: 1,
short_desc: faker.string.alphanumeric(32),
author: faker.string.alphanumeric(32),
source: faker.internet.url(),
categories: [APP_CATEGORIES.AUTOMATION],
...props,
});
const mockFiles: Record<string, string | string[]> = {};
mockFiles[`${rootFolderHost}/.env`] = 'TEST=test';
mockFiles[`${rootFolderHost}/repos/repo-id/apps/${appInfo.id}/config.json`] = JSON.stringify(appInfoSchema.parse(appInfo));
mockFiles[`${rootFolderHost}/repos/repo-id/apps/${appInfo.id}/docker-compose.yml`] = 'compose';
mockFiles[`${rootFolderHost}/repos/repo-id/apps/${appInfo.id}/metadata/description.md`] = 'md desc';
if (isInstalled) {
mockFiles[`${rootFolderHost}/apps/${appInfo.id}/config.json`] = JSON.stringify(appInfoSchema.parse(appInfo));
mockFiles[`${rootFolderHost}/apps/${appInfo.id}/docker-compose.yml`] = 'compose';
mockFiles[`${rootFolderHost}/apps/${appInfo.id}/metadata/description.md`] = 'md desc';
mockFiles[`${storagePath}/app-data/${appInfo.id}/data/test.txt`] = 'data';
}
// @ts-expect-error - custom mock method
fs.__applyMockFiles(mockFiles);
return appInfo;
};

View file

@ -0,0 +1,41 @@
import { fs, vol } from 'memfs';
const copyFolderRecursiveSync = (src: string, dest: string) => {
const exists = vol.existsSync(src);
const stats = vol.statSync(src);
const isDirectory = exists && stats.isDirectory();
if (isDirectory) {
vol.mkdirSync(dest, { recursive: true });
vol.readdirSync(src).forEach((childItemName) => {
copyFolderRecursiveSync(`${src}/${childItemName}`, `${dest}/${childItemName}`);
});
} else {
vol.copyFileSync(src, dest);
}
};
export const fsMock = {
default: {
...fs,
promises: {
...fs.promises,
cp: copyFolderRecursiveSync,
},
copySync: (src: string, dest: string) => {
copyFolderRecursiveSync(src, dest);
},
__resetAllMocks: () => {
vol.reset();
},
__applyMockFiles: (newMockFiles: Record<string, string>) => {
// Create folder tree
vol.fromJSON(newMockFiles, 'utf8');
},
__createMockFiles: (newMockFiles: Record<string, string>) => {
vol.reset();
// Create folder tree
vol.fromJSON(newMockFiles, 'utf8');
},
__printVol: () => console.log(vol.toTree()),
},
};

View file

@ -0,0 +1,34 @@
import fs from 'fs';
import path from 'path';
import { vi, beforeEach } from 'vitest';
import { getEnv } from '@/utils/environment/environment';
vi.mock('@runtipi/shared', async (importOriginal) => {
const mod = (await importOriginal()) as object;
return {
...mod,
createLogger: vi.fn().mockReturnValue({
info: vi.fn(),
error: vi.fn(),
}),
};
});
vi.mock('fs', async () => {
const { fsMock } = await import('@/tests/mocks/fs');
return {
...fsMock,
};
});
beforeEach(async () => {
// @ts-expect-error - custom mock method
fs.__resetAllMocks();
const { rootFolderHost, appsRepoId } = getEnv();
await fs.promises.mkdir(path.join(rootFolderHost, 'state'), { recursive: true });
await fs.promises.writeFile(path.join(rootFolderHost, 'state', 'seed'), 'seed');
await fs.promises.mkdir(path.join(rootFolderHost, 'repos', appsRepoId, 'apps'), { recursive: true });
});

View file

@ -1,279 +0,0 @@
#!/usr/bin/env bash
echo "Starting app script"
source "${BASH_SOURCE%/*}/common.sh"
set -euo pipefail
ensure_pwd
ROOT_FOLDER="${PWD}"
STATE_FOLDER="${ROOT_FOLDER}/state"
ENV_FILE="${ROOT_FOLDER}/.env"
# Root folder in host system
ROOT_FOLDER_HOST=$(grep -v '^#' "${ENV_FILE}" | xargs -n 1 | grep ROOT_FOLDER_HOST | cut -d '=' -f2)
REPO_ID=$(grep -v '^#' "${ENV_FILE}" | xargs -n 1 | grep APPS_REPO_ID | cut -d '=' -f2)
STORAGE_PATH=$(grep -v '^#' "${ENV_FILE}" | xargs -n 1 | grep STORAGE_PATH | cut -d '=' -f2)
write_log "Running app script: ROOT_FOLDER=${ROOT_FOLDER}, ROOT_FOLDER_HOST=${ROOT_FOLDER_HOST}, REPO_ID=${REPO_ID}, STORAGE_PATH=${STORAGE_PATH}"
if [ -z ${1+x} ]; then
command=""
else
command="$1"
fi
if [ -z ${2+x} ]; then
exit 1
else
app="$2"
app_dir="${ROOT_FOLDER}/apps/${app}"
if [[ ! -d "${app_dir}" ]]; then
# copy from repo
echo "Copying app from repo"
mkdir -p "${app_dir}"
cp -r "${ROOT_FOLDER}/repos/${REPO_ID}/apps/${app}"/* "${app_dir}"
fi
app_data_dir="${STORAGE_PATH}/app-data/${app}"
if [[ -z "${app}" ]] || [[ ! -d "${app_dir}" ]]; then
echo "Error: \"${app}\" is not a valid app"
exit 1
fi
fi
# Function below has been modified from Umbrel
# Required Notice: Copyright
# Umbrel (https://umbrel.com)
compose() {
local app="${1}"
shift
arch=$(uname -m)
local architecture="${arch}"
if [[ "$architecture" == "aarch64" ]]; then
architecture="arm64"
fi
# App data folder
local app_compose_file="${app_dir}/docker-compose.yml"
# Pick arm architecture if running on arm and if the app has a docker-compose.arm.yml file
if [[ "$architecture" == "arm"* ]] && [[ -f "${app_dir}/docker-compose.arm.yml" ]]; then
app_compose_file="${app_dir}/docker-compose.arm.yml"
fi
# Pick arm architecture if running on arm and if the app has a docker-compose.arm64.yml file
if [[ "$architecture" == "arm64" ]] && [[ -f "${app_dir}/docker-compose.arm64.yml" ]]; then
app_compose_file="${app_dir}/docker-compose.arm64.yml"
fi
local common_compose_file="${ROOT_FOLDER}/repos/${REPO_ID}/apps/docker-compose.common.yml"
local user_compose_file="${ROOT_FOLDER}/user-config/${app}/docker-compose.yml"
local user_compose_args=
if [[ -f ${user_compose_file} ]]; then
user_compose_args="--file ${user_compose_file}"
fi
local user_env_file="${ROOT_FOLDER}/user-config/${app}/app.env"
local user_env_args=
if [[ -f ${user_env_file} ]]; then
user_env_args="--env-file ${user_env_file}"
fi
# Vars to use in compose file
export APP_DATA_DIR="${STORAGE_PATH}/app-data/${app}"
export ROOT_FOLDER_HOST="${ROOT_FOLDER_HOST}"
write_log "Running docker compose -f ${app_compose_file} -f ${common_compose_file} ${*}"
write_log "APP_DATA_DIR=${APP_DATA_DIR}"
write_log "ROOT_FOLDER_HOST=${ROOT_FOLDER_HOST}"
docker compose \
--env-file "${app_data_dir}/app.env" \
${user_env_args} \
--project-name "${app}" \
--file "${app_compose_file}" \
--file "${common_compose_file}" \
${user_compose_args} \
"${@}"
}
function ensure_permissions() {
local app="${1}"
# if app_data_dir/data does not exist, create it
if [[ ! -d "${app_data_dir}/data" ]]; then
mkdir -p "${app_data_dir}/data"
fi
# Check if app requires special uid and gid
if [[ -f "${app_dir}/config.json" ]]; then
uid=$(get_json_field "${app_dir}/config.json" uid)
gid=$(get_json_field "${app_dir}/config.json" gid)
write_log "App requires uid=${uid} and gid=${gid}"
if [[ "$uid" != "null" ]] && [[ "$gid" != "null" ]]; then
write_log "Setting uid and gid to ${uid}:${gid}"
if ! chown -R "${uid}:${gid}" "${app_data_dir}/data"; then
write_log "Failed to set uid and gid to ${uid}:${gid}"
fi
fi
fi
# Remove all .gitkeep files from app data dir
find "${app_data_dir}" -name ".gitkeep" -exec rm -f {} \;
chmod -R a+rwx "${app_data_dir}"
}
function install_app() {
local app="${1}"
# Write to file script.log
write_log "Installing app ${app}..."
if ! compose "${app}" pull; then
write_log "Failed to pull app ${app}"
exit 1
fi
ensure_permissions "${app}"
if ! compose "${app}" up -d; then
write_log "Failed to start app ${app}"
exit 1
fi
exit 0
}
function start_app() {
local app="${1}"
write_log "Starting app ${app}..."
ensure_permissions "${app}"
# Pull images
if ! compose "${app}" pull; then
write_log "Failed to pull app ${app}"
fi
if ! compose "${app}" up --detach --force-recreate --remove-orphans; then
write_log "Failed to start app ${app}"
exit 1
fi
exit 0
}
function uninstall_app() {
local app="${1}"
write_log "Removing images for app ${app}..."
if ! compose "${app}" down --rmi all --remove-orphans; then
# just stop it if we can't remove the images
if ! compose "${app}" rm --force --stop; then
write_log "Failed to uninstall app ${app}"
exit 1
fi
fi
write_log "Deleting app data for app ${app}..."
if [[ -d "${app_data_dir}" ]]; then
rm -rf "${app_data_dir}"
fi
if [[ -d "${app_dir}" ]]; then
rm -rf "${app_dir}"
fi
write_log "Successfully uninstalled app ${app}"
exit
}
function update_app() {
local app="${1}"
if ! compose "${app}" up --detach --force-recreate --remove-orphans; then
write_log "Failed to update app ${app}"
fi
if ! compose "${app}" down --rmi all --remove-orphans; then
# just stop it if we can't remove the images
if ! compose "${app}" rm --force --stop; then
write_log "Failed to update app ${app}"
exit 1
fi
fi
# Remove app
if [[ -d "${app_dir}" ]]; then
rm -rf "${app_dir}"
fi
# Copy app from repo
cp -r "${ROOT_FOLDER}/repos/${REPO_ID}/apps/${app}" "${app_dir}"
ensure_permissions "${app}"
compose "${app}" pull
exit 0
}
function stop_app() {
local app="${1}"
write_log "Stopping app ${app}..."
if ! compose "${app}" rm --force --stop; then
write_log "Failed to stop app ${app}"
exit 1
fi
exit 0
}
# Install new app
if [[ "$command" = "install" ]]; then
install_app "${app}"
fi
# Removes images and destroys all data for an app
if [[ "$command" = "uninstall" ]]; then
uninstall_app "${app}"
fi
# Update an app
if [[ "$command" = "update" ]]; then
update_app "${app}"
fi
# Stops an installed app
if [[ "$command" = "stop" ]]; then
stop_app "${app}"
fi
# Starts an installed app
if [[ "$command" = "start" ]]; then
start_app "${app}"
fi
if [[ "$command" = "clean" ]]; then
# Remove all stopped containers and unused images
write_log "Cleaning up..."
docker system prune --all --force
exit 0
fi
exit 1

View file

@ -1,275 +0,0 @@
#!/usr/bin/env bash
ROOT_FOLDER="${PWD}"
STATE_FOLDER="${ROOT_FOLDER}/state"
# Get field from json file
function get_json_field() {
local json_file="$1"
local field="$2"
jq -r ".${field}" "${json_file}"
}
function write_log() {
local message="$1"
local log_file="${PWD}/logs/script.log"
echo "$(date) - ${message}" >>"${log_file}"
}
# Function below is taken from Umbrel
# Required Notice: Copyright
# Umbrel (https://umbrel.com)
function derive_entropy() {
SEED_FILE="${STATE_FOLDER}/seed"
identifier="${1}"
tipi_seed=$(cat "${SEED_FILE}") || true
if [[ -z "$tipi_seed" ]] || [[ -z "$identifier" ]]; then
echo >&2 "Seed file not found. exiting..."
exit 1
fi
printf "%s" "${identifier}" | openssl dgst -sha256 -hmac "${tipi_seed}" | sed 's/^.* //'
}
function ensure_pwd() {
if [[ $(basename "$(pwd)") != "runtipi" ]] || [[ ! -f "${BASH_SOURCE[0]}" ]]; then
echo "Please run this script from the runtipi directory"
exit 1
fi
}
function ensure_root() {
if [[ $UID != 0 ]]; then
echo "Tipi must be started as root"
echo "Please re-run this script as"
echo " sudo ./scripts/start"
exit 1
fi
}
function ensure_linux() {
# Check we are on linux
if [[ "$(uname)" != "Linux" ]]; then
echo "Tipi only works on Linux"
exit 1
fi
}
function clean_logs() {
# Clean logs folder
local logs_folder="${ROOT_FOLDER}/logs"
# Create the folder if it doesn't exist
if [[ ! -d "${logs_folder}" ]]; then
mkdir "${logs_folder}"
fi
if [ "$(find "${logs_folder}" -maxdepth 1 -type f | wc -l)" -gt 0 ]; then
echo "Cleaning logs folder..."
local files=($(ls -d "${logs_folder}"/* | xargs -n 1 basename | sed 's/\///g'))
for file in "${files[@]}"; do
echo "Removing ${file}"
rm -rf "${ROOT_FOLDER}/logs/${file}"
done
fi
}
function kill_watcher() {
local watcher_pid="$(ps aux | grep "scripts/watcher" | grep -v grep | awk '{print $2}')"
# kill it if it's running
if [[ -n $watcher_pid ]]; then
# If multiline kill each pid
if [[ $watcher_pid == *" "* ]]; then
for pid in $watcher_pid; do
# shellcheck disable=SC2086
kill -9 $pid
done
else
# shellcheck disable=SC2086
kill -9 $watcher_pid
fi
fi
# pkill -f "watcher.sh"
}
function generateTLSCert() {
local domain="$1"
# If the certificate already exists for this domain, don't generate it again
if [[ -f "traefik/tls/$domain.txt" ]] && [[ -f "traefik/tls/cert.pem" ]] && [[ -f "traefik/tls/key.pem" ]]; then
return
fi
rm -rf "traefik/tls/$domain.txt"
rm -rf "traefik/tls/cert.pem"
rm -rf "traefik/tls/key.pem"
echo "Generating TLS certificate..."
if ! openssl req -x509 -newkey rsa:4096 -keyout traefik/tls/key.pem -out traefik/tls/cert.pem -days 365 -subj "/O=runtipi.io/OU=IT/CN=*.${domain}/emailAddress=webmaster@${domain}" -addext "subjectAltName = DNS:*.${domain},DNS:${domain}" -nodes; then
echo "Failed to generate TLS certificate"
else
echo "TLS certificate generated"
# Create a file to indicate that the certificate has been generated for this domain
touch "traefik/tls/$domain.txt"
fi
}
function generate_env_file() {
echo "Generating .env file..."
env_variables=$1
json_file=$(mktemp)
echo "$env_variables" > "$json_file"
local default_tz="Etc\/UTC"
local tz="$(timedatectl | grep "Time zone" | awk '{print $3}' | sed 's/\//\\\//g')"
if [[ -z "$tz" ]]; then
tz="$default_tz"
fi
local architecture="$(uname -m | tr '[:upper:]' '[:lower:]')"
if [[ "$architecture" == "aarch64" ]] || [[ "$architecture" == "armv8"* ]]; then
architecture="arm64"
elif [[ "$architecture" == "x86_64" ]]; then
architecture="amd64"
fi
# If none of the above conditions are met, the architecture is not supported
if [[ "$architecture" != "arm64" ]] && [[ "$architecture" != "amd64" ]]; then
echo "Architecture ${architecture} not supported if you think this is a mistake, please open an issue on GitHub."
exit 1
fi
local dns_ip=$(get_json_field "$json_file" dns_ip)
local internal_ip=$(get_json_field "$json_file" internal_ip)
local jwt_secret=$(get_json_field "$json_file" jwt_secret)
local tipi_version=$(get_json_field "$json_file" tipi_version)
local nginx_port=$(get_json_field "$json_file" nginx_port)
local nginx_port_ssl=$(get_json_field "$json_file" nginx_port_ssl)
local repo_id=$(get_json_field "$json_file" repo_id)
local domain=$(get_json_field "$json_file" domain)
local postgres_password=$(get_json_field "$json_file" postgres_password)
local postgres_username=$(get_json_field "$json_file" postgres_username)
local postgres_dbname=$(get_json_field "$json_file" postgres_dbname)
local postgres_host=$(get_json_field "$json_file" postgres_host)
local postgres_port=$(get_json_field "$json_file" postgres_port)
local redis_host=$(get_json_field "$json_file" redis_host)
local demo_mode=$(get_json_field "$json_file" demo_mode)
local docker_tag=$(get_json_field "$json_file" docker_tag)
local local_domain=$(get_json_field "$json_file" local_domain)
local root_folder=$(get_json_field "$json_file" root_folder | sed 's/\//\\\//g')
local apps_repository=$(get_json_field "$json_file" apps_repository | sed 's/\//\\\//g')
local storage_path=$(get_json_field "$json_file" storage_path | sed 's/\//\\\//g')
env_file=$(mktemp)
[[ -f "${ROOT_FOLDER}/.env" ]] && rm -f "${ROOT_FOLDER}/.env"
[[ -f "$ROOT_FOLDER/templates/env-sample" ]] && cp "$ROOT_FOLDER/templates/env-sample" "$env_file"
if [[ -f "${STATE_FOLDER}/settings.json" ]]; then
# If dnsIp is set in settings.json, use it
if [[ "$(get_json_field "${STATE_FOLDER}/settings.json" dnsIp)" != "null" ]]; then
dns_ip=$(get_json_field "${STATE_FOLDER}/settings.json" dnsIp)
fi
# If domain is set in settings.json, use it
if [[ "$(get_json_field "${STATE_FOLDER}/settings.json" domain)" != "null" ]]; then
domain=$(get_json_field "${STATE_FOLDER}/settings.json" domain)
fi
# If appsRepoUrl is set in settings.json, use it
if [[ "$(get_json_field "${STATE_FOLDER}/settings.json" appsRepoUrl)" != "null" ]]; then
apps_repository_temp=$(get_json_field "${STATE_FOLDER}/settings.json" appsRepoUrl)
apps_repository="$(echo "${apps_repository_temp}" | sed 's/\//\\\//g')"
repo_id="$("${ROOT_FOLDER}"/scripts/git.sh get_hash "${apps_repository_temp}")"
fi
# If port is set in settings.json, use it
if [[ "$(get_json_field "${STATE_FOLDER}/settings.json" port)" != "null" ]]; then
nginx_port=$(get_json_field "${STATE_FOLDER}/settings.json" port)
fi
# If sslPort is set in settings.json, use it
if [[ "$(get_json_field "${STATE_FOLDER}/settings.json" sslPort)" != "null" ]]; then
nginx_port_ssl=$(get_json_field "${STATE_FOLDER}/settings.json" sslPort)
fi
# If listenIp is set in settings.json, use it
if [[ "$(get_json_field "${STATE_FOLDER}/settings.json" listenIp)" != "null" ]]; then
internal_ip=$(get_json_field "${STATE_FOLDER}/settings.json" listenIp)
fi
# If demoMode is set in settings.json, use it
if [[ "$(get_json_field "${STATE_FOLDER}/settings.json" demoMode)" == "true" ]]; then
demo_mode="true"
fi
# If storagePath is set in settings.json, use it
storage_path_settings=$(get_json_field "${STATE_FOLDER}/settings.json" storagePath)
if [[ "${storage_path_settings}" != "null" && "${storage_path_settings}" != "" ]]; then
storage_path_temp="${storage_path_settings}"
storage_path="$(echo "${storage_path_temp}" | sed 's/\//\\\//g')"
fi
if [[ "$(get_json_field "${STATE_FOLDER}/settings.json" localDomain)" != "null" ]]; then
local_domain=$(get_json_field "${STATE_FOLDER}/settings.json" localDomain)
fi
fi
echo "Using domain ${domain} and port ${nginx_port}"
# If port is not 80 and domain is not example.com or tipi.localhost, we exit
if [[ "${nginx_port}" != "80" ]] && [[ "${domain}" != "example.com" ]] && [[ "${domain}" != "tipi.localhost" ]]; then
echo "Using a custom domain with a custom port is not supported"
exit 1
fi
os=$(uname)
sed_args=(-i)
# If os is macos, use gnu sed
if [[ "$os" == "Darwin" ]]; then
echo "Using gnu sed"
sed_args=(-i '')
fi
# Function below is modified from Umbrel
# Required Notice: Copyright
# Umbrel (https://umbrel.com)
for template in ${env_file}; do
sed "${sed_args[@]}" "s/<dns_ip>/${dns_ip}/g" "${template}"
sed "${sed_args[@]}" "s/<internal_ip>/${internal_ip}/g" "${template}"
sed "${sed_args[@]}" "s/<tz>/${tz}/g" "${template}"
sed "${sed_args[@]}" "s/<jwt_secret>/${jwt_secret}/g" "${template}"
sed "${sed_args[@]}" "s/<root_folder>/${root_folder}/g" "${template}"
sed "${sed_args[@]}" "s/<tipi_version>/${tipi_version}/g" "${template}"
sed "${sed_args[@]}" "s/<architecture>/${architecture}/g" "${template}"
sed "${sed_args[@]}" "s/<nginx_port>/${nginx_port}/g" "${template}"
sed "${sed_args[@]}" "s/<nginx_port_ssl>/${nginx_port_ssl}/g" "${template}"
sed "${sed_args[@]}" "s/<apps_repo_id>/${repo_id}/g" "${template}"
sed "${sed_args[@]}" "s/<apps_repo_url>/${apps_repository}/g" "${template}"
sed "${sed_args[@]}" "s/<domain>/${domain}/g" "${template}"
sed "${sed_args[@]}" "s/<storage_path>/${storage_path}/g" "${template}"
sed "${sed_args[@]}" "s/<postgres_password>/${postgres_password}/g" "${template}"
sed "${sed_args[@]}" "s/<postgres_username>/${postgres_username}/g" "${template}"
sed "${sed_args[@]}" "s/<postgres_dbname>/${postgres_dbname}/g" "${template}"
sed "${sed_args[@]}" "s/<postgres_port>/${postgres_port}/g" "${template}"
sed "${sed_args[@]}" "s/<postgres_host>/${postgres_host}/g" "${template}"
sed "${sed_args[@]}" "s/<redis_host>/${redis_host}/g" "${template}"
sed "${sed_args[@]}" "s/<demo_mode>/${demo_mode}/g" "${template}"
sed "${sed_args[@]}" "s/<docker_tag>/${docker_tag}/g" "${template}"
sed "${sed_args[@]}" "s/<local_domain>/${local_domain}/g" "${template}"
done
generateTLSCert "$local_domain"
mv -f "$env_file" "$ROOT_FOLDER/.env"
chmod a+rwx "$ROOT_FOLDER/.env"
}

View file

@ -1,158 +0,0 @@
#!/usr/bin/env bash
OS="$(cat /etc/[A-Za-z]*[_-][rv]e[lr]* | grep "^ID=" | cut -d= -f2 | uniq | tr '[:upper:]' '[:lower:]' | tr -d '"')"
SUB_OS="$(cat /etc/[A-Za-z]*[_-][rv]e[lr]* | grep "^ID_LIKE=" | cut -d= -f2 | uniq | tr '[:upper:]' '[:lower:]' | tr -d '"')"
function install_generic() {
local dependency="${1}"
local os="${2}"
if [[ "${os}" == "debian" ]]; then
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y "${dependency}"
return 0
elif [[ "${os}" == "ubuntu" || "${os}" == "pop" ]]; then
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y "${dependency}"
return 0
elif [[ "${os}" == "centos" ]]; then
sudo yum install -y --allowerasing "${dependency}"
return 0
elif [[ "${os}" == "fedora" ]]; then
sudo dnf -y install "${dependency}"
return 0
elif [[ "${os}" == "arch" ]]; then
if ! sudo pacman -Sy --noconfirm "${dependency}" ; then
if command -v yay > /dev/null 2>&1 ; then
sudo -u $SUDO_USER yay -Sy --noconfirm "${dependency}"
else
echo "Could not install \"${dependency}\", either using pacman or the yay AUR helper. Please try installing it manually."
return 1
fi
fi
return 0
else
return 1
fi
}
function install_docker() {
local os="${1}"
echo "Installing docker for os ${os}"
if [[ "${os}" == "debian" ]]; then
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y ca-certificates curl gnupg lsb-release
sudo mkdir -p /etc/apt/keyrings
curl -fsSL https://download.docker.com/linux/debian/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list >/dev/null
sudo DEBIAN_FRONTEND=noninteractive apt-get update -y
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y docker-ce docker-ce-cli containerd.io docker-compose-plugin
return 0
elif [[ "${os}" == "ubuntu" || "${os}" == "pop" ]]; then
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y ca-certificates curl gnupg lsb-release
sudo mkdir -p /etc/apt/keyrings
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list >/dev/null
sudo DEBIAN_FRONTEND=noninteractive apt-get update -y
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y docker-ce docker-ce-cli containerd.io docker-compose-plugin
return 0
elif [[ "${os}" == "centos" ]]; then
sudo yum install -y yum-utils
sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
sudo yum install -y --allowerasing docker-ce docker-ce-cli containerd.io docker-compose-plugin
sudo systemctl start docker
sudo systemctl enable docker
return 0
elif [[ "${os}" == "fedora" ]]; then
sudo dnf -y install dnf-plugins-core
sudo dnf config-manager --add-repo https://download.docker.com/linux/fedora/docker-ce.repo
sudo dnf -y install docker-ce docker-ce-cli containerd.io docker-compose-plugin
sudo systemctl start docker
sudo systemctl enable docker
return 0
elif [[ "${os}" == "arch" ]]; then
sudo pacman -Sy --noconfirm docker docker-compose
sudo systemctl start docker.service
sudo systemctl enable docker.service
return 0
else
return 1
fi
}
function update_docker() {
local os="${1}"
echo "Updating Docker for os ${os}" >/dev/tty
if [[ "${os}" == "debian" ]]; then
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y docker-ce docker-ce-cli containerd.io docker-compose-plugin
return 0
elif [[ "${os}" == "ubuntu" || "${os}" == "pop" ]]; then
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y docker-ce docker-ce-cli containerd.io docker-compose-plugin
return 0
elif [[ "${os}" == "centos" ]]; then
sudo yum install -y --allowerasing docker-ce docker-ce-cli containerd.io docker-compose-plugin
return 0
elif [[ "${os}" == "fedora" ]]; then
sudo dnf -y install docker-ce docker-ce-cli containerd.io docker-compose-plugin
return 0
elif [[ "${os}" == "arch" ]]; then
sudo pacman -Sy --noconfirm docker docker-compose
return 0
else
return 1
fi
}
echo "Updating system"
sudo DEBIAN_FRONTEND=noninteractive apt-get update -y
echo "Upgrading system"
sudo DEBIAN_FRONTEND=noninteractive apt-get upgrade -y
if ! command -v docker >/dev/null; then
echo "Installing docker"
install_docker "${OS}"
docker_result=$?
if [[ docker_result -eq 0 ]]; then
echo "Docker installed"
else
echo "Your system ${OS} is not supported trying with sub_os ${SUB_OS}"
install_docker "${SUB_OS}"
docker_sub_result=$?
if [[ docker_sub_result -eq 0 ]]; then
echo "Docker installed"
else
echo "Your system ${SUB_OS} is not supported please install docker manually"
exit 1
fi
fi
fi
function check_dependency_and_install() {
local dependency="${1}"
if ! command -v "${dependency}" >/dev/null; then
echo "Installing ${dependency}"
install_generic "${dependency}" "${OS}"
install_result=$?
if [[ install_result -eq 0 ]]; then
echo "${dependency} installed"
else
echo "Your system ${OS} is not supported trying with sub_os ${SUB_OS}"
install_generic "${dependency}" "${SUB_OS}"
install_sub_result=$?
if [[ install_sub_result -eq 0 ]]; then
echo "${dependency} installed"
else
echo "Your system ${SUB_OS} is not supported please install ${dependency} manually"
exit 1
fi
fi
fi
}
check_dependency_and_install "jq"
check_dependency_and_install "fswatch"
check_dependency_and_install "openssl"

View file

@ -1,3 +0,0 @@
#!/usr/bin/env bash
docker buildx build --platform linux/amd64,linux/arm64,linux/arm/v7 -t meienberger/runtipi:rc-"$(npm run version --silent)" . --push

View file

@ -1,73 +0,0 @@
#!/usr/bin/env bash
source "${BASH_SOURCE%/*}/common.sh"
ensure_pwd
ROOT_FOLDER="${PWD}"
# Get a static hash based on the repo url
function get_hash() {
url="${1}"
echo -n "${url}" | sha256sum | awk '{print $1}'
}
if [ -z ${1+x} ]; then
command=""
else
command="$1"
fi
# Clone a repo
if [[ "$command" = "clone" ]]; then
repo="$2"
repo_hash=$(get_hash "${repo}")
write_log "Cloning ${repo} to ${ROOT_FOLDER}/repos/${repo_hash}"
repo_dir="${ROOT_FOLDER}/repos/${repo_hash}"
if [ -d "${repo_dir}" ]; then
write_log "Repo already exists"
exit 0
fi
write_log "Cloning ${repo} to ${repo_dir}"
if ! git clone "${repo}" "${repo_dir}"; then
write_log "Failed to clone repo"
exit 1
fi
write_log "Done"
exit 0
fi
# Update a repo
if [[ "$command" = "update" ]]; then
repo="$2"
repo_hash=$(get_hash "${repo}")
repo_dir="${ROOT_FOLDER}/repos/${repo_hash}"
git config --global --add safe.directory "${repo_dir}"
if [ ! -d "${repo_dir}" ]; then
write_log "Repo does not exist"
exit 1
fi
write_log "Updating ${repo} in ${repo_hash}"
cd "${repo_dir}" || exit
if ! git pull origin "$(git rev-parse --abbrev-ref HEAD)"; then
cd "${ROOT_FOLDER}" || exit
write_log "Failed to update repo"
exit 1
fi
cd "${ROOT_FOLDER}" || exit
write_log "Done"
exit 0
fi
if [[ "$command" = "get_hash" ]]; then
repo="$2"
get_hash "${repo}"
exit
fi

View file

@ -13,107 +13,147 @@ if [[ "$ARCHITECTURE" == "armv7"* ]] || [[ "$ARCHITECTURE" == "i686" ]] || [[ "$
fi fi
OS="$(cat /etc/[A-Za-z]*[_-][rv]e[lr]* | grep "^ID=" | cut -d= -f2 | uniq | tr '[:upper:]' '[:lower:]' | tr -d '"')"
SUB_OS="$(cat /etc/[A-Za-z]*[_-][rv]e[lr]* | grep "^ID_LIKE=" | cut -d= -f2 | uniq | tr '[:upper:]' '[:lower:]' | tr -d '"')"
function install_generic() {
local dependency="${1}"
local os="${2}"
if [[ "${os}" == "debian" ]]; then
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y "${dependency}"
return 0
elif [[ "${os}" == "ubuntu" || "${os}" == "pop" ]]; then
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y "${dependency}"
return 0
elif [[ "${os}" == "centos" ]]; then
sudo yum install -y --allowerasing "${dependency}"
return 0
elif [[ "${os}" == "fedora" ]]; then
sudo dnf -y install "${dependency}"
return 0
elif [[ "${os}" == "arch" ]]; then
if ! sudo pacman -Sy --noconfirm "${dependency}" ; then
if command -v yay > /dev/null 2>&1 ; then
sudo -u $SUDO_USER yay -Sy --noconfirm "${dependency}"
else
echo "Could not install \"${dependency}\", either using pacman or the yay AUR helper. Please try installing it manually."
return 1
fi
fi
return 0
else
return 1
fi
}
function install_docker() {
local os="${1}"
echo "Installing docker for os ${os}"
if [[ "${os}" == "debian" ]]; then
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y ca-certificates curl gnupg lsb-release
sudo mkdir -p /etc/apt/keyrings
curl -fsSL https://download.docker.com/linux/debian/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list >/dev/null
sudo DEBIAN_FRONTEND=noninteractive apt-get update -y
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y docker-ce docker-ce-cli containerd.io docker-compose-plugin
return 0
elif [[ "${os}" == "ubuntu" || "${os}" == "pop" ]]; then
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y ca-certificates curl gnupg lsb-release
sudo mkdir -p /etc/apt/keyrings
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list >/dev/null
sudo DEBIAN_FRONTEND=noninteractive apt-get update -y
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y docker-ce docker-ce-cli containerd.io docker-compose-plugin
return 0
elif [[ "${os}" == "centos" ]]; then
sudo yum install -y yum-utils
sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
sudo yum install -y --allowerasing docker-ce docker-ce-cli containerd.io docker-compose-plugin
sudo systemctl start docker
sudo systemctl enable docker
return 0
elif [[ "${os}" == "fedora" ]]; then
sudo dnf -y install dnf-plugins-core
sudo dnf config-manager --add-repo https://download.docker.com/linux/fedora/docker-ce.repo
sudo dnf -y install docker-ce docker-ce-cli containerd.io docker-compose-plugin
sudo systemctl start docker
sudo systemctl enable docker
return 0
elif [[ "${os}" == "arch" ]]; then
sudo pacman -Sy --noconfirm docker docker-compose
sudo systemctl start docker.service
sudo systemctl enable docker.service
return 0
else
return 1
fi
}
echo "Updating system"
sudo DEBIAN_FRONTEND=noninteractive apt-get update -y
echo "Upgrading system"
sudo DEBIAN_FRONTEND=noninteractive apt-get upgrade -y
if ! command -v docker >/dev/null; then
echo "Installing docker"
install_docker "${OS}"
docker_result=$?
if [[ docker_result -eq 0 ]]; then
echo "Docker installed"
else
echo "Your system ${OS} is not supported trying with sub_os ${SUB_OS}"
install_docker "${SUB_OS}"
docker_sub_result=$?
if [[ docker_sub_result -eq 0 ]]; then
echo "Docker installed"
else
echo "Your system ${SUB_OS} is not supported please install docker manually"
exit 1
fi
fi
fi
function check_dependency_and_install() {
local dependency="${1}"
if ! command -v "${dependency}" >/dev/null; then
echo "Installing ${dependency}"
install_generic "${dependency}" "${OS}"
install_result=$?
if [[ install_result -eq 0 ]]; then
echo "${dependency} installed"
else
echo "Your system ${OS} is not supported trying with sub_os ${SUB_OS}"
install_generic "${dependency}" "${SUB_OS}"
install_sub_result=$?
if [[ install_sub_result -eq 0 ]]; then
echo "${dependency} installed"
else
echo "Your system ${SUB_OS} is not supported please install ${dependency} manually"
exit 1
fi
fi
fi
}
# Example
# check_dependency_and_install "openssl"
LATEST_VERSION=$(curl -s https://api.github.com/repos/meienberger/runtipi/releases/latest | grep tag_name | cut -d '"' -f4) LATEST_VERSION=$(curl -s https://api.github.com/repos/meienberger/runtipi/releases/latest | grep tag_name | cut -d '"' -f4)
### -------------------------------- LATEST_ASSET="runtipi-cli-linux-x64"
### CLI arguments if [ "$ARCHITECTURE" == "arm64" ] || [ "$ARCHITECTURE" == "aarch64" ]; then
### -------------------------------- LATEST_ASSET="runtipi-cli-linux-arm64"
UPDATE="false"
while [ -n "${1-}" ]; do
case "$1" in
--update) UPDATE="true" ;;
--)
shift # The double dash makes them parameters
break
;;
*) echo "Option $1 not recognized" && exit 1 ;;
esac
shift
done
if [[ "${UPDATE}" == "false" ]]; then
mkdir -p runtipi
cd runtipi || exit
fi fi
curl --location https://api.github.com/repos/meienberger/runtipi/tarball/"${LATEST_VERSION}" -o runtipi.tar.gz URL="https://github.com/meienberger/runtipi/releases/download/$LATEST_VERSION/$LATEST_ASSET"
mkdir runtipi-"${LATEST_VERSION}"
tar -xzf runtipi.tar.gz -C runtipi-"${LATEST_VERSION}" --strip-components=1
rm runtipi.tar.gz
# copy from downloaded /scripts/* curl --location "$URL" -o runtipi-cli
if [ -d "scripts" ]; then
rm -rf scripts
fi
mkdir scripts
cp -r runtipi-"${LATEST_VERSION}"/scripts/* ./scripts
# copy from downloaded /templates/* sudo ./runtipi-cli start
if [ -d "templates" ]; then
rm -rf templates
fi
mkdir templates
cp -r runtipi-"${LATEST_VERSION}"/templates/* ./templates
# copy from downloaded /traefik/*
if [ -d "traefik" ]; then
mv traefik traefik_old
fi
mkdir traefik
cp -r runtipi-"${LATEST_VERSION}"/traefik/* ./traefik
if [ -d "traefik_old" ] && [ -d "traefik_old/tls" ]; then
## move old traefik TLS config to new traefik config
cp -r traefik_old/tls traefik
rm -rf traefik_old
fi
# copy from downloaded /docker-compose.yml
if [ -f "docker-compose.yml" ]; then
rm -f docker-compose.yml
fi
cp -r runtipi-"${LATEST_VERSION}"/docker-compose.yml .
# copy from downloaded /package.json
if [ -f "package.json" ]; then
rm -f package.json
fi
cp -r runtipi-"${LATEST_VERSION}"/package.json .
mkdir -p apps
mkdir -p app-data
mkdir -p state
mkdir -p repos
mkdir -p traefik/shared
mkdir -p traefik/tls
mkdir -p media/torrents
mkdir -p media/torrents/watch
mkdir -p media/torrents/complete
mkdir -p media/torrents/incomplete
mkdir -p media/usenet
mkdir -p media/usenet/watch
mkdir -p media/usenet/complete
mkdir -p media/usenet/incomplete
mkdir -p media/downloads
mkdir -p media/downloads/watch
mkdir -p media/downloads/complete
mkdir -p media/downloads/incomplete
mkdir -p media/data
mkdir -p media/data/books
mkdir -p media/data/comics
mkdir -p media/data/movies
mkdir -p media/data/music
mkdir -p media/data/tv
mkdir -p media/data/podcasts
mkdir -p media/data/images
mkdir -p media/data/roms
## remove downloaded folder
rm -rf runtipi-"${LATEST_VERSION}"
sudo ./scripts/start.sh

View file

@ -1,12 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
source "${BASH_SOURCE%/*}/common.sh"
ensure_pwd
ROOT_FOLDER="$(pwd)"
STATE_FOLDER="${ROOT_FOLDER}/state"
# Create file request-password-change in state folder
touch "${STATE_FOLDER}/password-change-request"

View file

@ -1,75 +0,0 @@
#!/usr/bin/env bash
set -o errexit
set -o nounset
set -o pipefail
if [[ "${TRACE-0}" == "1" ]]; then
set -o xtrace
fi
source "${BASH_SOURCE%/*}/common.sh"
clean_logs
### --------------------------------
### General variables
### --------------------------------
ROOT_FOLDER="${PWD}"
STATE_FOLDER="${ROOT_FOLDER}/state"
### --------------------------------
### Apps repository configuration
### --------------------------------
apps_repository="https://github.com/meienberger/runtipi-appstore"
env_variables_json=$(cat <<EOF
{
"dns_ip": "9.9.9.9",
"domain": "example.com",
"root_folder": "${ROOT_FOLDER}",
"nginx_port": 3000,
"nginx_port_ssl": 443,
"jwt_secret": "secret",
"postgres_password": "postgres",
"postgres_username": "tipi",
"postgres_dbname": "tipi",
"postgres_port": 5432,
"postgres_host": "tipi-db",
"redis_host": "tipi-redis",
"tipi_version": "$(get_json_field "${ROOT_FOLDER}/package.json" version)",
"internal_ip": "localhost",
"demo_mode": false,
"local_domain": "tipi.lan",
"apps_repository": "${apps_repository}",
"storage_path": "${ROOT_FOLDER}",
"repo_id": "$("${ROOT_FOLDER}"/scripts/git.sh get_hash ${apps_repository})"
}
EOF
)
### --------------------------------
### Watcher and system-info
### --------------------------------
mkdir -p "${ROOT_FOLDER}/state"
if [[ ! -f "${ROOT_FOLDER}/state/events" ]]; then
touch "${ROOT_FOLDER}/state/events"
fi
if [[ ! -f "${ROOT_FOLDER}/state/system-info.json" ]]; then
echo "{}" >"${ROOT_FOLDER}/state/system-info.json"
fi
chmod -R a+rwx "${ROOT_FOLDER}/state/events"
chmod -R a+rwx "${ROOT_FOLDER}/state/system-info.json"
kill_watcher
"${ROOT_FOLDER}/scripts/watcher.sh" &
### --------------------------------
### env file generation
### --------------------------------
generate_env_file "${env_variables_json}"
### --------------------------------
### Start the project
### --------------------------------
docker compose -f docker-compose.dev.yml up --build

View file

@ -1,107 +0,0 @@
#!/usr/bin/env bash
set -o errexit
set -o nounset
set -o pipefail
if [[ "${TRACE-0}" == "1" ]]; then
set -o xtrace
fi
export DEBIAN_FRONTEND=noninteractive
source "${BASH_SOURCE%/*}/common.sh"
clean_logs
### --------------------------------
### General variables
### --------------------------------
ROOT_FOLDER="${PWD}"
STATE_FOLDER="${ROOT_FOLDER}/state"
## Comes from first argument
DOCKER_TAG="${1}"
echo "Starting e2e tests with tag meienberger/runtipi:${DOCKER_TAG}"
### --------------------------------
### Pre-configuration
### --------------------------------
sudo "${ROOT_FOLDER}/scripts/configure.sh"
mkdir -p "${ROOT_FOLDER}/state"
STATE_FOLDER="${ROOT_FOLDER}/state"
mkdir -p traefik
mkdir -p traefik/shared
mkdir -p traefik/tls
if [[ ! -f "${STATE_FOLDER}/seed" ]]; then
echo "Generating seed..."
mkdir -p "${STATE_FOLDER}"
touch "${STATE_FOLDER}/seed"
if ! tr </dev/urandom -dc 'a-zA-Z0-9' | fold -w 32 | head -n 1 >"${STATE_FOLDER}/seed"; then
echo "Created seed file..."
fi
fi
### --------------------------------
### Apps repository configuration
### --------------------------------
apps_repository="https://github.com/meienberger/runtipi-appstore"
env_variables_json=$(cat <<EOF
{
"dns_ip": "9.9.9.9",
"domain": "example.com",
"root_folder": "${ROOT_FOLDER}",
"nginx_port": 80,
"nginx_port_ssl": 443,
"jwt_secret": "secret",
"postgres_password": "postgres",
"postgres_username": "tipi",
"postgres_dbname": "tipi",
"postgres_port": 5432,
"postgres_host": "tipi-db",
"redis_host": "tipi-redis",
"local_domain": "tipi.lan",
"tipi_version": "$(get_json_field "${ROOT_FOLDER}/package.json" version)",
"internal_ip": "localhost",
"demo_mode": false,
"apps_repository": "${apps_repository}",
"storage_path": "${ROOT_FOLDER}",
"repo_id": "$("${ROOT_FOLDER}"/scripts/git.sh get_hash "${apps_repository}")",
"docker_tag": "${DOCKER_TAG}"
}
EOF
)
### --------------------------------
### Watcher and system-info
### --------------------------------
echo "creating events file"
if [[ ! -f "${ROOT_FOLDER}/state/events" ]]; then
touch "${ROOT_FOLDER}/state/events"
fi
echo "creating system-info file"
if [[ ! -f "${ROOT_FOLDER}/state/system-info.json" ]]; then
echo "{}" >"${ROOT_FOLDER}/state/system-info.json"
fi
chmod -R a+rwx "${ROOT_FOLDER}/state/events"
chmod -R a+rwx "${ROOT_FOLDER}/state/system-info.json"
echo "kill previous watcher"
kill_watcher
echo "starting watcher"
nohup "${ROOT_FOLDER}/scripts/watcher.sh" > /dev/null 2>&1 &
### --------------------------------
### env file generation
### --------------------------------
echo "Generating env file..."
generate_env_file "${env_variables_json}"
### --------------------------------
### Start the project
### --------------------------------
echo "Starting docker-compose..."
docker compose -f docker-compose.e2e.yml up -d --build

View file

@ -1,173 +0,0 @@
#!/usr/bin/env bash
set -o errexit
set -o nounset
set -o pipefail
if [[ "${TRACE-0}" == "1" ]]; then
set -o xtrace
fi
source "${BASH_SOURCE%/*}/common.sh"
ROOT_FOLDER="${PWD}"
# Cleanup and ensure environment
ensure_linux
ensure_pwd
ensure_root
clean_logs
### --------------------------------
### Pre-configuration
### --------------------------------
"${ROOT_FOLDER}/scripts/configure.sh"
STATE_FOLDER="${ROOT_FOLDER}/state"
if [[ ! -f "${STATE_FOLDER}/seed" ]]; then
echo "Generating seed..."
if ! tr </dev/urandom -dc 'a-zA-Z0-9' | fold -w 32 | head -n 1 >"${STATE_FOLDER}/seed"; then
echo "Created seed file..."
fi
fi
### --------------------------------
### General variables
### --------------------------------
apps_repository="https://github.com/meienberger/runtipi-appstore"
INTERNAL_IP=
if [[ -f "${STATE_FOLDER}/settings.json" ]]; then
# If listenIp is set in settings.json, use it
if [[ "$(get_json_field "${STATE_FOLDER}/settings.json" listenIp)" != "null" ]]; then
INTERNAL_IP=$(get_json_field "${STATE_FOLDER}/settings.json" listenIp)
fi
fi
if [[ -z "${INTERNAL_IP:-}" ]]; then
network_interface="$(ip route | grep default | awk '{print $5}' | uniq)"
network_interface_count=$(echo "$network_interface" | wc -l)
if [[ "$network_interface_count" -eq 0 ]]; then
echo "No network interface found!"
exit 1
elif [[ "$network_interface_count" -gt 1 ]]; then
echo "Found multiple network interfaces. Please select one of the following interfaces:"
echo "$network_interface"
while true; do
read -rp "> " USER_NETWORK_INTERFACE
if echo "$network_interface" | grep -x "$USER_NETWORK_INTERFACE"; then
network_interface="$USER_NETWORK_INTERFACE"
break
else
echo "Please select one of the interfaces above. (CTRL+C to abort)"
fi
done
fi
INTERNAL_IP="$(ip addr show "${network_interface}" | grep "inet " | awk '{print $2}' | cut -d/ -f1)"
internal_ip_count=$(echo "$INTERNAL_IP" | wc -l)
if [[ "$internal_ip_count" -eq 0 ]]; then
echo "No IP address found for network interface ${network_interface}! Set the IP address manually with --listen-ip or with the listenIp field in settings.json."
exit 1
elif [[ "$internal_ip_count" -gt 1 ]]; then
echo "Found multiple IP addresses for network interface ${network_interface}. Please select one of the following IP addresses:"
echo "$INTERNAL_IP"
while true; do
read -rp "> " USER_INTERNAL_IP
if echo "$INTERNAL_IP" | grep -x "$USER_INTERNAL_IP"; then
INTERNAL_IP="$USER_INTERNAL_IP"
break
else
echo "Please select one of the IP addresses above. (CTRL+C to abort)"
fi
done
fi
fi
env_variables_json=$(cat <<EOF
{
"dns_ip": "9.9.9.9",
"internal_ip": "${INTERNAL_IP}",
"jwt_secret": "$(derive_entropy "jwt")",
"root_folder": "${ROOT_FOLDER}",
"tipi_version": "$(get_json_field "${ROOT_FOLDER}/package.json" version)",
"nginx_port": 80,
"nginx_port_ssl": 443,
"postgres_password": "$(derive_entropy "postgres")",
"postgres_username": "tipi",
"postgres_dbname": "tipi",
"postgres_port": 5432,
"postgres_host": "tipi-db",
"redis_host": "tipi-redis",
"local_domain": "tipi.lan",
"repo_id": "$("${ROOT_FOLDER}"/scripts/git.sh get_hash "${apps_repository}")",
"apps_repository": "${apps_repository}",
"domain": "example.com",
"storage_path": "${ROOT_FOLDER}",
"demo_mode": false
}
EOF
)
echo "Generating config files..."
write_log "Final values: \n${env_variables_json}"
generate_env_file "${env_variables_json}"
### --------------------------------
### Watcher and system-info
### --------------------------------
echo "Running system-info.sh..."
"${ROOT_FOLDER}/scripts/system-info.sh"
kill_watcher
"${ROOT_FOLDER}/scripts/watcher.sh" &
### --------------------------------
### Start the project
### --------------------------------
if [[ "${rc-false}" == "true" ]]; then
docker compose -f docker-compose.rc.yml --env-file "${ROOT_FOLDER}/.env" pull
# Run docker compose
docker compose -f docker-compose.rc.yml --env-file "${ROOT_FOLDER}/.env" up --detach --remove-orphans --build || {
echo "Failed to start containers"
exit 1
}
else
docker compose --env-file "${ROOT_FOLDER}/.env" pull
# Run docker compose
docker compose --env-file "${ROOT_FOLDER}/.env" up --detach --remove-orphans --build || {
echo "Failed to start containers"
exit 1
}
fi
echo "Tipi is now running"
echo ""
cat <<"EOF"
_,.
,` -.)
'( _/'-\\-.
/,|`--._,-^| ,
\_| |`-._/|| ,'|
| `-, / | / /
| || | / /
`r-._||/ __ / /
__,-<_ )`-/ `./ /
' \ `---' \ / /
| |./ /
/ // /
\_/' \ |/ /
| | _,^-'/ /
| , `` (\/ /_
\,.->._ \X-=/^
( / `-._//^`
`Y-.____(__}
| {__)
()`
EOF
echo ""
echo "Visit http://${INTERNAL_IP}/ to view the dashboard"
echo ""

View file

@ -1,32 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
source "${BASH_SOURCE%/*}/common.sh"
ensure_pwd
ensure_root
ROOT_FOLDER="${PWD}"
ENV_FILE="${ROOT_FOLDER}/.env"
STORAGE_PATH=$(grep -v '^#' "${ENV_FILE}" | xargs -n 1 | grep STORAGE_PATH | cut -d '=' -f2)
# Stop all installed apps if there are any
apps_folder="${ROOT_FOLDER}/apps"
if [ "$(find "${apps_folder}" -maxdepth 1 -type d | wc -l)" -gt 1 ]; then
apps_names=($(ls -d "${apps_folder}"/*/ | xargs -n 1 basename | sed 's/\///g'))
for app_name in "${apps_names[@]}"; do
# if folder ${ROOT_FOLDER}/app-data/app_name exists, then stop app
if [[ -d "${STORAGE_PATH}/app-data/${app_name}" ]]; then
echo "Stopping ${app_name}"
"${ROOT_FOLDER}/scripts/app.sh" stop "$app_name"
fi
done
else
echo "No app installed that can be stopped."
fi
kill_watcher
echo "Stopping tipi..."
echo
docker compose down --remove-orphans --rmi local

View file

@ -1,35 +0,0 @@
#!/usr/bin/env bash
set -e # Exit immediately if a command exits with a non-zero status.
ROOT_FOLDER="${PWD}"
STATE_FOLDER="${ROOT_FOLDER}/state"
# if not on linux exit
if [[ "$(uname)" != "Linux" ]]; then
echo '{"cpu": { "load": 0 },"memory": { "available": 0, "total": 0, "used": 0 },"disk": { "available": 0, "total": 0, "used": 0 }}' >"${STATE_FOLDER}/system-info.json"
exit 0
fi
ROOT_FOLDER="$(pwd)"
STATE_FOLDER="${ROOT_FOLDER}/state"
# Available disk space
TOTAL_DISK_SPACE_BYTES=$(df -P -B 1 / | tail -n 1 | awk '{print $2}')
AVAILABLE_DISK_SPACE_BYTES=$(df -P -B 1 / | tail -n 1 | awk '{print $4}')
USED_DISK_SPACE_BYTES=$((TOTAL_DISK_SPACE_BYTES - AVAILABLE_DISK_SPACE_BYTES))
# CPU info
CPU_LOAD_PERCENTAGE=$(top -bn1 | grep "Cpu(s)" | sed "s/.*, *\([0-9.]*\)%* id.*/\1/" | awk '{print 100 - $1}')
# Memory info
MEM_TOTAL_BYTES=$(($(grep </proc/meminfo MemTotal | awk '{print $2}') * 1024))
MEM_AVAILABLE_BYTES=$(($(grep </proc/meminfo MemAvailable | awk '{print $2}') * 1024))
MEM_USED_BYTES=$((MEM_TOTAL_BYTES - MEM_AVAILABLE_BYTES))
# Create temporary json file
TEMP_JSON_FILE=$(mktemp)
echo '{ "cpu": { "load": '"${CPU_LOAD_PERCENTAGE}"' }, "memory": { "total": '"${MEM_TOTAL_BYTES}"' , "used": '"${MEM_USED_BYTES}"', "available": '"${MEM_AVAILABLE_BYTES}"' }, "disk": { "total": '"${TOTAL_DISK_SPACE_BYTES}"' , "used": '"${USED_DISK_SPACE_BYTES}"', "available": '"${AVAILABLE_DISK_SPACE_BYTES}"' } }' >"${TEMP_JSON_FILE}"
# Write to state file
cat "${TEMP_JSON_FILE}" >"${STATE_FOLDER}/system-info.json"
chmod -R a+rwx "${ROOT_FOLDER}/state/system-info.json"

View file

@ -1,66 +0,0 @@
#!/usr/bin/env bash
source "${BASH_SOURCE%/*}/common.sh"
ensure_pwd
ROOT_FOLDER="${PWD}"
if [ -z ${1+x} ]; then
command=""
else
command="$1"
fi
function update() {
write_log "Updating Tipi..."
local current_version=$(get_json_field "${ROOT_FOLDER}/package.json" version)
# check latest version
local latest=$(curl -s https://api.github.com/repos/meienberger/runtipi/releases/latest | grep tag_name | cut -d '"' -f4)
scripts/stop.sh
# backup current version to backups/${current_version}/
local timestamp=$(date +%s)
local backup_folder="${ROOT_FOLDER}/backups/${current_version}-${timestamp}"
mkdir -p "${backup_folder}"
cp -r "${ROOT_FOLDER}/scripts" "${backup_folder}"
cp -r "${ROOT_FOLDER}/templates" "${backup_folder}"
cp -r "${ROOT_FOLDER}/traefik" "${backup_folder}"
cp -r "${ROOT_FOLDER}/package.json" "${backup_folder}"
cp -r "${ROOT_FOLDER}/docker-compose.yml" "${backup_folder}"
# download install.sh from latest release to install-${latest_version}.sh
curl -L https://raw.githubusercontent.com/meienberger/runtipi/master/scripts/install.sh >install-"${latest}".sh
chmod +x ./install-"${latest}".sh
# run install-${latest_version}.sh
./install-"${latest}".sh --update
# remove install-${latest_version}.sh
rm install-"${latest}".sh
rm -rf runtipi-"${latest}"
rm -rf runtipi.tar.gz
exit 0
}
function restart() {
write_log "Restarting Tipi..."
scripts/stop.sh
scripts/start.sh
exit
}
# Restart Tipi
if [[ "$command" = "restart" ]]; then
restart
fi
# Update Tipi
if [[ "$command" = "update" ]]; then
update
fi

View file

@ -26,4 +26,4 @@ rm -rf "${ROOT_FOLDER}/data/postgres"
mkdir -p "${ROOT_FOLDER}/app-data" mkdir -p "${ROOT_FOLDER}/app-data"
cd "$ROOT_FOLDER" || echo "" cd "$ROOT_FOLDER" || echo ""
"${ROOT_FOLDER}/scripts/start.sh" sudo ./runtipi-cli start

View file

@ -1,120 +0,0 @@
#!/usr/bin/env bash
source "${BASH_SOURCE%/*}/common.sh"
ROOT_FOLDER="${PWD}"
WATCH_FILE="${ROOT_FOLDER}/state/events"
function clean_events() {
# Create the file if it doesn't exist
if [[ ! -f "${WATCH_FILE}" ]]; then
touch "${WATCH_FILE}"
fi
echo "" >"$WATCH_FILE"
chmod -R a+rwx "${ROOT_FOLDER}/state/events"
}
function set_status() {
local id=$1
local status=$2
write_log "Setting status for ${id} to ${status}"
# Update the status of the event
if [[ "$(uname)" != "Linux" ]]; then
sed -i '' "s/${id} [a-z]*/${id} ${status}/g" "${WATCH_FILE}"
else
sed -i "s/${id}.*$/$(echo "${id} ${status}" | sed 's/\//\\\//g')/" "$WATCH_FILE"
fi
}
function run_command() {
local command_path="${1}"
local id=$2
shift 2
set_status "$id" "running"
$command_path "$@" >>"${ROOT_FOLDER}/logs/${id}.log" 2>&1
local result=$?
if [[ $result -eq 0 ]]; then
set_status "$id" "success"
else
set_status "$id" "error"
fi
}
function select_command() {
# Example command:
# clone_repo id waiting "args"
local command=$(echo "$1" | cut -d ' ' -f 1)
local id=$(echo "$1" | cut -d ' ' -f 2)
local status=$(echo "$1" | cut -d ' ' -f 3)
local args=$(echo "$1" | cut -d ' ' -f 4-)
if [[ "$status" != "waiting" ]]; then
return 0
fi
write_log "Executing command ${command}"
if [ -z "$command" ]; then
return 0
fi
if [ "$command" = "clone_repo" ]; then
run_command "${ROOT_FOLDER}/scripts/git.sh" "$id" "clone" "$args"
return 0
fi
if [ "$command" = "update_repo" ]; then
run_command "${ROOT_FOLDER}/scripts/git.sh" "$id" "update" "$args"
return 0
fi
if [ "$command" = "app" ]; then
local arg1=$(echo "$args" | cut -d ' ' -f 1)
local arg2=$(echo "$args" | cut -d ' ' -f 2)
# Args example: start filebrowser
run_command "${ROOT_FOLDER}/scripts/app.sh" "$id" "$arg1" "$arg2"
return 0
fi
if [ "$command" = "system_info" ]; then
run_command "${ROOT_FOLDER}/scripts/system-info.sh" "$id"
return 0
fi
if [ "$command" = "update" ]; then
run_command "${ROOT_FOLDER}/scripts/system.sh" "$id" "update"
return 0
fi
if [ "$command" = "restart" ]; then
run_command "${ROOT_FOLDER}/scripts/system.sh" "$id" "restart"
return 0
fi
return 0
}
write_log "Listening for events in ${WATCH_FILE}..."
clean_events
# Listen in for changes in the WATCH_FILE
fswatch -0 "${WATCH_FILE}" | while read -d ""; do
# Read the command from the last line of the file
command=$(tail -n 1 "${WATCH_FILE}")
status=$(echo "$command" | cut -d ' ' -f 3)
if [ -z "$command" ] || [ "$status" != "waiting" ]; then
continue
else
select_command "$command"
fi
done

View file

@ -1,32 +0,0 @@
export const APP_CATEGORIES = {
NETWORK: 'network',
MEDIA: 'media',
DEVELOPMENT: 'development',
AUTOMATION: 'automation',
SOCIAL: 'social',
UTILITIES: 'utilities',
PHOTOGRAPHY: 'photography',
SECURITY: 'security',
FEATURED: 'featured',
BOOKS: 'books',
DATA: 'data',
MUSIC: 'music',
FINANCE: 'finance',
GAMING: 'gaming',
AI: 'ai',
} as const;
export type AppCategory = (typeof APP_CATEGORIES)[keyof typeof APP_CATEGORIES];
export const FIELD_TYPES = {
TEXT: 'text',
PASSWORD: 'password',
EMAIL: 'email',
NUMBER: 'number',
FQDN: 'fqdn',
IP: 'ip',
FQDNIP: 'fqdnip',
URL: 'url',
RANDOM: 'random',
BOOLEAN: 'boolean',
} as const;

View file

@ -1,25 +0,0 @@
# Only edit this file if you know what you are doing!
# It will be overwritten on update.
APPS_REPO_ID=<apps_repo_id>
APPS_REPO_URL=<apps_repo_url>
TZ=<tz>
INTERNAL_IP=<internal_ip>
DNS_IP=<dns_ip>
ARCHITECTURE=<architecture>
TIPI_VERSION=<tipi_version>
JWT_SECRET=<jwt_secret>
ROOT_FOLDER_HOST=<root_folder>
NGINX_PORT=<nginx_port>
NGINX_PORT_SSL=<nginx_port_ssl>
DOMAIN=<domain>
STORAGE_PATH=<storage_path>
POSTGRES_HOST=<postgres_host>
POSTGRES_DBNAME=<postgres_dbname>
POSTGRES_USERNAME=<postgres_username>
POSTGRES_PASSWORD=<postgres_password>
POSTGRES_PORT=<postgres_port>
REDIS_HOST=<redis_host>
DEMO_MODE=<demo_mode>
LOCAL_DOMAIN=<local_domain>
DOCKER_TAG=<docker_tag>

View file

@ -1,3 +0,0 @@
mkcert -install
mkcert -cert-file ./local-cert.pem -key-file ./local-key.pem "docker.localhost" "*.docker.localhost" "tipi.local" "*.tipi.local"