Merge branch 'main' of https://github.com/immich-app/immich into feat/cli-albums

This commit is contained in:
Jonathan Jogenfors 2023-11-08 16:30:30 +01:00
commit b2bbdf423a
703 changed files with 34486 additions and 10077 deletions

View file

@ -38,7 +38,7 @@ jobs:
-
name: Clean temporary images
if: "${{ env.TOKEN != '' }}"
uses: stumpylog/image-cleaner-action/ephemeral@v0.3.0
uses: stumpylog/image-cleaner-action/ephemeral@v0.4.0
with:
token: "${{ env.TOKEN }}"
owner: "immich-app"
@ -70,7 +70,7 @@ jobs:
-
name: Clean untagged images
if: "${{ env.TOKEN != '' }}"
uses: stumpylog/image-cleaner-action/untagged@v0.3.0
uses: stumpylog/image-cleaner-action/untagged@v0.4.0
with:
token: "${{ env.TOKEN }}"
owner: "immich-app"

View file

@ -21,7 +21,7 @@ jobs:
submodules: "recursive"
- name: Run e2e tests
run: docker-compose -f ./docker/docker-compose.test.yml -p immich-test-e2e up --renew-anon-volumes --abort-on-container-exit --exit-code-from immich-server-test --remove-orphans --build
run: docker compose -f ./docker/docker-compose.test.yml up --renew-anon-volumes --abort-on-container-exit --exit-code-from immich-server --remove-orphans --build
doc-tests:
name: Run documentation checks
@ -166,7 +166,6 @@ jobs:
- name: Install dependencies
run: |
poetry install --with dev
poetry run pip install --no-deps -r requirements.txt
- name: Lint with ruff
run: |
poetry run ruff check --format=github app

1
.gitignore vendored
View file

@ -4,6 +4,7 @@
.idea
docker/upload
docker/library
uploads
coverage

View file

@ -2,7 +2,10 @@ dev:
docker-compose -f ./docker/docker-compose.dev.yml up --remove-orphans
dev-new:
docker compose -f ./docker/docker-compose.dev.yml up --remove-orphans
docker compose -f ./docker/docker-compose.dev.yml up --remove-orphans || make dev-down
dev-down:
docker compose -f ./docker/docker-compose.dev.yml down --remove-orphans
dev-new-update:
docker compose -f ./docker/docker-compose.dev.yml up --build -V --remove-orphans
@ -20,7 +23,7 @@ pull-stage:
docker-compose -f ./docker/docker-compose.staging.yml pull
test-e2e:
docker-compose -f ./docker/docker-compose.test.yml -p immich-test-e2e up --renew-anon-volumes --abort-on-container-exit --exit-code-from immich-server-test --remove-orphans --build
docker compose -f ./docker/docker-compose.test.yml up --renew-anon-volumes --abort-on-container-exit --exit-code-from immich-server --remove-orphans --build
prod:
docker-compose -f ./docker/docker-compose.prod.yml up --build -V --remove-orphans
@ -32,4 +35,4 @@ api:
cd ./server && npm run api:generate
attach-server:
docker exec -it docker_immich-server_1 sh
docker exec -it docker_immich-server_1 sh

View file

@ -2,7 +2,7 @@
<br/>
<a href="https://opensource.org/licenses/MIT"><img src="https://img.shields.io/badge/license-MIT-green.svg?color=3F51B5&style=for-the-badge&label=License&logoColor=000000&labelColor=ececec" alt="License: MIT"></a>
<a href="https://discord.gg/D8JsnBEuKb">
<img src="https://img.shields.io/discord/979116623879368755.svg?label=Discord&logo=Discord&style=for-the-badge&logoColor=000000&labelColor=ececec" atl="Discord"/>
<img src="https://img.shields.io/discord/979116623879368755.svg?label=Discord&logo=Discord&style=for-the-badge&logoColor=000000&labelColor=ececec" alt="Discord"/>
</a>
<br/>
<br/>
@ -25,6 +25,7 @@
<a href="README_fr_FR.md">Français</a>
<a href="README_nl_NL.md">Nederlands</a>
<a href="README_ja_JP.md">日本語</a>
<a href="README_it_IT.md">Italiano</a>
</p>
## Disclaimer
@ -65,7 +66,7 @@ password: demo
Spec: Free-tier Oracle VM - Amsterdam - 2.4Ghz quad-core ARM64 CPU, 24GB RAM
```
# Features
## Features
| Features | Mobile | Web |
| -------------------------------------------- | ------ | --- |
@ -95,7 +96,7 @@ Spec: Free-tier Oracle VM - Amsterdam - 2.4Ghz quad-core ARM64 CPU, 24GB RAM
| Offline support | Yes | No |
| Read-only gallery | Yes | Yes |
# Support the project
## Support the project
I've committed to this project, and I will not stop. I will keep updating the docs, adding new features, and fixing bugs. But I can't do it alone. So I need your help to give me additional motivation to keep going.
@ -103,10 +104,15 @@ As our hosts in the [selfhosted.show - In the episode 'The-organization-must-not
If you feel like this is the right cause and the app is something you are seeing yourself using for a long time, please consider supporting the project with the option below.
## Donation
### Donation
- [Monthly donation](https://github.com/sponsors/alextran1502) via GitHub Sponsors
- [One-time donation](https://github.com/sponsors/alextran1502?frequency=one-time&sponsor=alextran1502) via GitHub Sponsors
- [Librepay](https://liberapay.com/alex.tran1502/)
- [buymeacoffee](https://www.buymeacoffee.com/altran1502)
- Bitcoin: 1FvEp6P6NM8EZEkpGUFAN2LqJ1gxusNxZX
## Contributors
<a href="https://github.com/alextran1502/immich/graphs/contributors">
<img src="https://contrib.rocks/image?repo=immich-app/immich" width="100%"/>
</a>

View file

@ -25,6 +25,7 @@
<a href="README_fr_FR.md">Français</a>
<a href="README_nl_NL.md">Nederlands</a>
<a href="README_ja_JP.md">日本語</a>
<a href="README_it_IT.md">Italiano</a>
</p>
## Avís legal

View file

@ -24,6 +24,7 @@
<a href="README_ca_ES.md">Català</a>
<a href="README_fr_FR.md">Français</a>
<a href="README_ja_JP.md">日本語</a>
<a href="README_it_IT.md">Italiano</a>
</p>
## Descargo de responsabilidad

View file

@ -25,6 +25,7 @@
<a href="README_fr_FR.md">Français</a>
<a href="README_nl_NL.md">Nederlands</a>
<a href="README_ja_JP.md">日本語</a>
<a href="README_it_IT.md">Italiano</a>
</p>
## Clause de non-responsabilité

113
README_it_IT.md Normal file
View file

@ -0,0 +1,113 @@
<p align="center">
<br/>
<a href="https://opensource.org/licenses/MIT"><img src="https://img.shields.io/badge/license-MIT-green.svg?color=3F51B5&style=for-the-badge&label=License&logoColor=000000&labelColor=ececec" alt="License: MIT"></a>
<a href="https://discord.gg/D8JsnBEuKb">
<img src="https://img.shields.io/discord/979116623879368755.svg?label=Discord&logo=Discord&style=for-the-badge&logoColor=000000&labelColor=ececec" atl="Discord"/>
</a>
<br/>
<br/>
</p>
<p align="center">
<img src="design/immich-logo.svg" width="150" title="Login With Custom URL">
</p>
<h3 align="center">Immich - Soluzione self-hosted ad alte prestazioni per backup di foto e video</h3>
<br/>
<a href="https://immich.app">
<img src="design/immich-screenshots.png" title="Main Screenshot">
</a>
<br/>
<p align="center">
<a href="README.md">English</a>
<a href="README_zh_CN.md">中文</a>
<a href="README_tr_TR.md">Türkçe</a>
<a href="README_ca_ES.md">Català</a>
<a href="README_es_ES.md">Español</a>
<a href="README_fr_FR.md">Français</a>
<a href="README_nl_NL.md">Nederlands</a>
<a href="README_ja_JP.md">日本語</a>
</p>
## Declino di responsabilità
- ⚠️ Il progetto è in fase di sviluppo **molto avanzato**.
- ⚠️ Possibilità di bug e cambiamenti rilevanti.
- ⚠️ **Non utilizzare l'app come unico salvataggio delle tue foto e dei tuoi video.**
- ⚠️ Utilizza sempre una tecnica [3-2-1](https://www.backblaze.com/blog/the-3-2-1-backup-strategy/) di backup per le foto e i video a cui tieni!
## Contenuto
- [Documentazione Ufficiale](https://immich.app/docs)
- [Roadmap](https://github.com/orgs/immich-app/projects/1)
- [Demo](#demo)
- [Funzionalità](#features)
- [Introduzione](https://immich.app/docs/overview/introduction)
- [Installazione](https://immich.app/docs/install/requirements)
- [Linee Guida per Contribuire](https://immich.app/docs/overview/support-the-project)
- [Supporta il Progetto](#support-the-project)
## Documentazione
La documentazione ufficiale, inclusa la guida all'installazione, è disponibile qui: https://immich.app/.
## Demo
Prova la demo del progetto https://demo.immich.app
Sull'app mobile, imposta `https://demo.immich.app/api` come `Server Endpoint URL`
```bash title="Demo Credential"
Credenziali di accesso
email: demo@immich.app
password: demo
```
```
Spec: Free-tier Oracle VM - Amsterdam - 2.4Ghz quad-core ARM64 CPU, 24GB RAM
```
# Funzionalità
| Funzionalità | Mobile | Web |
| ---------------------------------------------- | ------ | --- |
| Caricamento e visualizzazione di foto e video | Sì | Sì |
| Backup automatico quando l'app è in esecuzione | Sì | N/A |
| Selezione degli album per backup | Sì | N/A |
| Download foto e video sul dispositivo | Sì | Sì |
| Supporto multi utente | Sì | Sì |
| Album e album condivisi | Sì | Sì |
| Barra di scorrimento con trascinamento | Sì | Sì |
| Supporto formati raw | Sì | Sì |
| Visualizzazione metadata (EXIF, map) | Sì | Sì |
| Ricerca per metadata, oggetti, volti e CLIP | Sì | Sì |
| Funzioni di amministrazione degli utenti | No | Sì |
| Backup in background | Sì | N/A |
| Scroll virtuale | Sì | Sì |
| Supporto OAuth | Sì | Sì |
| API Keys | N/A | Sì |
| Backup e riproduzione di LivePhoto | iOS | Sì |
| Archiviazione impostata dall'utente | Sì | Sì |
| Condivisione pubblica | No | Sì |
| Archivio e Preferiti | Sì | Sì |
| Mappa globale | Sì | Sì |
| Collaborazione con utenti | Sì | Sì |
| Riconoscimento facciale e categorizzazione | Sì | Sì |
| Ricordi (x anni fa) | Sì | Sì |
| Supporto offline | Sì | No |
| Galleria sola lettura | Sì | Sì |
# Supporta il progetto
Mi dedico al progetto e non smetterò di farlo. Manterrò aggiornata la documentazione, aggiungerò nuove funzioni e risolverò i bug, ma non posso farlo da solo. Ho bisogno del tuo aiuto che mi da motivazione per continuare.
Come detto dal nostro host [selfhosted.show - Nell'episodio 'The-organization-must-not-be-name is a Hostile Actor'](https://selfhosted.show/79?t=1418), quello che il team ed io stiamo facendo è un lavoro enorme. Mi piacerebbe dedicarmi al progetto full-time e chiedo il tuo aiuto affinchè sia possibile.
Se pensi che Immich sia una buona causa e che l'app sia qualcosa che useresti nel lungo termine, sappi che puoi supportare il progetto scegliendo tra le opzioni sotto elencate.
## Donazioni
- [Donazione mensile](https://github.com/sponsors/alextran1502) tramite GitHub Sponsors
- [Donazione una tantum](https://github.com/sponsors/alextran1502?frequency=one-time&sponsor=alextran1502) tramite GitHub Sponsors
- [Librepay](https://liberapay.com/alex.tran1502/)
- [buymeacoffee](https://www.buymeacoffee.com/altran1502)
- Bitcoin: 1FvEp6P6NM8EZEkpGUFAN2LqJ1gxusNxZX

View file

@ -24,6 +24,7 @@
<a href="README_es_ES.md">Español</a>
<a href="README_fr_FR.md">Français</a>
<a href="README_nl_NL.md">Nederlands</a>
<a href="README_it_IT.md">Italiano</a>
</p>
## 免責事項

View file

@ -25,6 +25,7 @@
<a href="README_fr_FR.md">Français</a>
<a href="README_nl_NL.md">Nederlands</a>
<a href="README_ja_JP.md">日本語</a>
<a href="README_it_IT.md">Italiano</a>
</p>
## Disclaimer

View file

@ -25,6 +25,7 @@
<a href="README_fr_FR.md">Français</a>
<a href="README_nl_NL.md">Nederlands</a>
<a href="README_ja_JP.md">日本語</a>
<a href="README_it_IT.md">Italiano</a>
</p>
## Feragatname

View file

@ -29,6 +29,7 @@
<a href="README_fr_FR.md">Français</a>
<a href="README_nl_NL.md">Nederlands</a>
<a href="README_ja_JP.md">日本語</a>
<a href="README_it_IT.md">Italiano</a>
</p>

View file

@ -18,6 +18,7 @@ module.exports = {
'@typescript-eslint/explicit-function-return-type': 'off',
'@typescript-eslint/explicit-module-boundary-types': 'off',
'@typescript-eslint/no-explicit-any': 'off',
'@typescript-eslint/no-floating-promises': 'error',
'prettier/prettier': 0,
},
};

File diff suppressed because it is too large Load diff

View file

@ -4,7 +4,7 @@
* Immich
* Immich API
*
* The version of the OpenAPI document: 1.81.1
* The version of the OpenAPI document: 1.85.0
*
*
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).

View file

@ -4,7 +4,7 @@
* Immich
* Immich API
*
* The version of the OpenAPI document: 1.81.1
* The version of the OpenAPI document: 1.85.0
*
*
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).

View file

@ -4,7 +4,7 @@
* Immich
* Immich API
*
* The version of the OpenAPI document: 1.81.1
* The version of the OpenAPI document: 1.85.0
*
*
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).

View file

@ -4,7 +4,7 @@
* Immich
* Immich API
*
* The version of the OpenAPI document: 1.81.1
* The version of the OpenAPI document: 1.85.0
*
*
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).

View file

@ -21,9 +21,9 @@ program
)
.addOption(new Option('--delete', 'Delete local assets after upload').env('IMMICH_DELETE_ASSETS'))
.argument('[paths...]', 'One or more paths to assets to be uploaded')
.action((paths, options) => {
.action(async (paths, options) => {
options.excludePatterns = options.ignore;
new Upload().run(paths, options);
await new Upload().run(paths, options);
});
program
@ -39,18 +39,18 @@ program
.addOption(new Option('-i, --ignore [paths...]', 'Paths to ignore').env('IMMICH_IGNORE_PATHS').default(false))
.addOption(new Option('--no-read-only', 'Import files without read-only protection, allowing Immich to manage them'))
.argument('[paths...]', 'One or more paths to assets to be imported')
.action((paths, options) => {
.action(async (paths, options) => {
options.import = true;
options.excludePatterns = options.ignore;
new Upload().run(paths, options);
await new Upload().run(paths, options);
});
program
.command('server-info')
.description('Display server information')
.action(() => {
new ServerInfo().run();
.action(async () => {
await new ServerInfo().run();
});
program
@ -58,8 +58,8 @@ program
.description('Login using an API key')
.argument('[instanceUrl]')
.argument('[apiKey]')
.action((paths, options) => {
new LoginKey().run(paths, options);
.action(async (paths, options) => {
await new LoginKey().run(paths, options);
});
program.parse(process.argv);

View file

@ -67,7 +67,7 @@ describe('SessionService', () => {
});
});
it('should create auth file when logged in', async () => {
it.skip('should create auth file when logged in', async () => {
mockfs();
await sessionService.keyLogin('https://test/api', 'pNussssKSYo5WasdgalvKJ1n9kdvaasdfbluPg');

View file

@ -53,7 +53,14 @@ export class SessionService {
if (!fs.existsSync(this.configDir)) {
// Create config folder if it doesn't exist
fs.mkdirSync(this.configDir, { recursive: true });
const created = await fs.promises.mkdir(this.configDir, { recursive: true });
if (!created) {
throw new Error(`Failed to create config folder ${this.configDir}`);
}
}
if (!fs.existsSync(this.configDir)) {
console.error('waah');
}
fs.writeFileSync(this.authPath, yaml.stringify({ instanceUrl, apiKey }));

View file

@ -1,35 +1,24 @@
import { UploadService } from './upload.service';
import mockfs from 'mock-fs';
import axios from 'axios';
import mockAxios from 'jest-mock-axios';
import FormData from 'form-data';
import { ApiConfiguration } from '../cores/api-configuration';
jest.mock('axios', () => jest.fn());
describe('UploadService', () => {
let uploadService: UploadService;
beforeAll(() => {
// Write a dummy output before mock-fs to prevent some annoying errors
console.log();
});
beforeEach(() => {
const apiConfiguration = new ApiConfiguration('https://example.com/api', 'key');
uploadService = new UploadService(apiConfiguration);
});
it('should upload a single file', async () => {
it('should call axios', async () => {
const data = new FormData();
uploadService.upload(data);
await uploadService.upload(data);
mockAxios.mockResponse();
expect(axios).toHaveBeenCalled();
});
afterEach(() => {
mockfs.restore();
mockAxios.reset();
});
});

View file

@ -42,21 +42,21 @@ export class UploadService {
};
}
public checkIfAssetAlreadyExists(path: string, checksum: string): Promise<any> {
public checkIfAssetAlreadyExists(path: string, checksum: string) {
this.checkAssetExistenceConfig.data = JSON.stringify({ assets: [{ id: path, checksum: checksum }] });
// TODO: retry on 500 errors?
return axios(this.checkAssetExistenceConfig);
}
public upload(data: FormData): Promise<any> {
public upload(data: FormData) {
this.uploadConfig.data = data;
// TODO: retry on 500 errors?
return axios(this.uploadConfig);
}
public import(data: any): Promise<any> {
public import(data: any) {
this.importConfig.data = data;
// TODO: retry on 500 errors?

View file

@ -1,3 +1,7 @@
# See:
# - https://immich.app/docs/developer/setup
# - https://immich.app/docs/developer/troubleshooting
version: "3.8"
services:
@ -21,6 +25,10 @@ services:
- .env
environment:
- NODE_ENV=development
ulimits:
nofile:
soft: 1048576
hard: 1048576
depends_on:
- redis
- database
@ -48,6 +56,10 @@ services:
- 9231:9230
environment:
- NODE_ENV=development
ulimits:
nofile:
soft: 1048576
hard: 1048576
depends_on:
- database
- immich-server
@ -63,16 +75,16 @@ services:
command: npm run dev --host
env_file:
- .env
environment:
# Rename these values for svelte public interface
- PUBLIC_IMMICH_SERVER_URL=${IMMICH_SERVER_URL}
- PUBLIC_IMMICH_API_URL_EXTERNAL=${IMMICH_API_URL_EXTERNAL}
ports:
- 3000:3000
- 24678:24678
volumes:
- ../web:/usr/src/app
- /usr/src/app/node_modules
ulimits:
nofile:
soft: 1048576
hard: 1048576
restart: unless-stopped
depends_on:
- immich-server

View file

@ -7,9 +7,9 @@ services:
build:
context: ../server
dockerfile: Dockerfile
command: ["./start-server.sh"]
command: [ "./start-server.sh" ]
volumes:
- ${UPLOAD_LOCATION}:/usr/src/app/upload
- ${UPLOAD_LOCATION}/photos:/usr/src/app/upload
- /etc/localtime:/etc/localtime:ro
env_file:
- .env
@ -18,19 +18,6 @@ services:
- database
- typesense
immich-machine-learning:
container_name: immich_machine_learning
image: immich-machine-learning:latest
build:
context: ../machine-learning
dockerfile: Dockerfile
volumes:
- ${UPLOAD_LOCATION}:/usr/src/app/upload
- model-cache:/cache
env_file:
- .env
restart: always
immich-microservices:
container_name: immich_microservices
image: immich-microservices:latest
@ -40,9 +27,9 @@ services:
build:
context: ../server
dockerfile: Dockerfile
command: ["./start-microservices.sh"]
command: [ "./start-microservices.sh" ]
volumes:
- ${UPLOAD_LOCATION}:/usr/src/app/upload
- ${UPLOAD_LOCATION}/photos:/usr/src/app/upload
- /etc/localtime:/etc/localtime:ro
env_file:
- .env
@ -64,6 +51,18 @@ services:
depends_on:
- immich-server
immich-machine-learning:
container_name: immich_machine_learning
image: immich-machine-learning:latest
build:
context: ../machine-learning
dockerfile: Dockerfile
volumes:
- model-cache:/cache
env_file:
- .env
restart: always
typesense:
container_name: immich_typesense
image: typesense/typesense:0.24.1@sha256:9bcff2b829f12074426ca044b56160ca9d777a0c488303469143dd9f8259d4dd
@ -73,7 +72,7 @@ services:
# remove this to get debug messages
- GLOG_minloglevel=1
volumes:
- tsdata:/data
- ${UPLOAD_LOCATION}/typesense:/data
restart: always
redis:
@ -91,7 +90,7 @@ services:
POSTGRES_USER: ${DB_USERNAME}
POSTGRES_DB: ${DB_DATABASE_NAME}
volumes:
- pgdata:/var/lib/postgresql/data
- ${UPLOAD_LOCATION}/postgres:/var/lib/postgresql/data
restart: always
immich-proxy:
@ -113,6 +112,4 @@ services:
restart: always
volumes:
pgdata:
model-cache:
tsdata:

View file

@ -1,10 +1,10 @@
version: "3.8"
# Compose file for dockerized end-to-end testing of the backend
name: "immich-test-e2e"
services:
immich-server-test:
image: immich-server-test
immich-server:
image: immich-server-dev:latest
build:
context: ../server
dockerfile: Dockerfile
@ -14,27 +14,20 @@ services:
- ../server:/usr/src/app
- /usr/src/app/node_modules
environment:
- DB_HOSTNAME=immich-database-test
- DB_HOSTNAME=database
- DB_USERNAME=postgres
- DB_PASSWORD=postgres
- DB_DATABASE_NAME=e2e_test
- IMMICH_RUN_ALL_TESTS=true
depends_on:
- immich-database-test
networks:
- immich-test-network
- database
immich-database-test:
container_name: immich-database-test
database:
image: postgres:14-alpine@sha256:28407a9961e76f2d285dc6991e8e48893503cc3836a4755bbc2d40bcc272a441
command: -c fsync=off
environment:
POSTGRES_PASSWORD: postgres
POSTGRES_USER: postgres
POSTGRES_DB: e2e_test
networks:
- immich-test-network
logging:
driver: none
networks:
immich-test-network:

View file

@ -83,10 +83,6 @@ services:
immich-proxy:
container_name: immich_proxy
image: ghcr.io/immich-app/immich-proxy:${IMMICH_VERSION:-release}
environment:
# Make sure these values get passed through from the env file
- IMMICH_SERVER_URL
- IMMICH_WEB_URL
ports:
- 2283:8080
depends_on:

24
docker/hwaccel-rkmpp.yml Normal file
View file

@ -0,0 +1,24 @@
version: "3.8"
# Hardware acceleration for transcoding using RKMPP for Rockchip SOCs
# This is only needed if you want to use hardware acceleration for transcoding.
# Supported host OS is Ubuntu Jammy 22.04 with custom ffmpeg from ppa:liujianfeng1994/rockchip-multimedia
services:
hwaccel:
security_opt: # enables full access to /sys and /proc, still far better than privileged: true
- systempaths=unconfined
- apparmor=unconfined
group_add:
- video
devices:
- /dev/rga:/dev/rga
- /dev/dri:/dev/dri
- /dev/dma_heap:/dev/dma_heap
- /dev/mpp_service:/dev/mpp_service
volumes:
- /usr/bin/ffmpeg:/usr/bin/ffmpeg_mpp:ro
- /lib/aarch64-linux-gnu:/lib/ffmpeg-mpp:ro
- /lib/aarch64-linux-gnu/libblas.so.3:/lib/ffmpeg-mpp/libblas.so.3:ro # symlink is resolved by mounting
- /lib/aarch64-linux-gnu/liblapack.so.3:/lib/ffmpeg-mpp/liblapack.so.3:ro # symlink is resolved by mounting
- /lib/aarch64-linux-gnu/pulseaudio/libpulsecommon-15.99.so:/lib/ffmpeg-mpp/libpulsecommon-15.99.so:ro

View file

@ -33,8 +33,6 @@ To be concise, Immich can now read in the gallery files, register the path into
- Only new files that are added to the gallery will be detected.
- Deleted and moved files will not be detected.
You can find more information on how to use the feature by reading the documentation [here](/docs/features/read-only-gallery).
## Memory feature
This is considered a fun feature that the team and I wanted to build for so long, but we had to put it off because of the refactoring of the code base. The code base is now in a good enough form to circle back and add more exciting features.

View file

@ -16,10 +16,6 @@ sidebar_position: 7
Immich doesn't have two-way synchronization ([yet](https://github.com/immich-app/immich/discussions/1006)), but the [command line tool](/docs/features/bulk-upload.md) can bulk upload items from a directory to Immich.
### Why doesn't Immich watch an existing photo gallery directory?
The initial approach of Immich is to become a backup tool, primarily for mobile device usage. Thus, all the assets must be uploaded from the mobile client. The app was architectured to perform that job well.
### Why are only photos and not videos being uploaded to Immich?
This often happens when using a reverse proxy or cloudflare tunnel in front of Immich. Make sure to set your reverse proxy to allow large POST requests. In `nginx`, set `client_max_body_size 50000M;` or similar. Cloudflare tunnels are limited to 100 mb file sizes. Also check the disk space of your reverse proxy, in some cases proxies caches requests to disk before passing them on, and if disk space runs out the request fails.

View file

@ -17,13 +17,13 @@ docker exec -t immich_postgres pg_dumpall -c -U postgres | gzip > "/path/to/back
```
```bash title='Restore'
docker-compose down -v # CAUTION! Deletes all Immich data to start from scratch.
docker-compose pull # Update to latest version of Immich (if desired)
docker-compose create # Create Docker containers for Immich apps without running them.
docker compose down -v # CAUTION! Deletes all Immich data to start from scratch.
docker compose pull # Update to latest version of Immich (if desired)
docker compose create # Create Docker containers for Immich apps without running them.
docker start immich_postgres # Start Postgres server
sleep 10 # Wait for Postgres server to start up
gunzip < "/path/to/backup/dump.sql.gz" | docker exec -i immich_postgres psql -U postgres -d immich # Restore Backup
docker-compose up -d # Start remainder of Immich apps
docker compose up -d # Start remainder of Immich apps
```
Note that for the database restore to proceed properly, it requires a completely fresh install (i.e. the Immich server has never run since creating the Docker containers). If the Immich app has run, Postgres conflicts may be encountered upon database restoration (relation already exists, violated foreign key constraints, multiple primary keys, etc.).

Binary file not shown.

Before

Width:  |  Height:  |  Size: 111 KiB

After

Width:  |  Height:  |  Size: 501 KiB

View file

@ -4,7 +4,7 @@ Immich uses the [OpenAPI](https://swagger.io/specification/) standard to generat
## Generator
OpenAPI is used to generate the client (Typescript, Dart) SDK. `openapi-generator-cli` can be installed [here](https://openapi-generator.tech/docs/installation/). The generated SDK is based on the `immich-openapi-specs.json` file, which is autogenerated by the server when running in development mode. The `immich-openapi-specs.json` file can be modified with `@nestjs/swagger` decorators used or referenced by controller endpoints. See the [NestJS OpenAPI docs](https://docs.nestjs.com/openapi/types-and-parameters) for more info. When you add a new endpoint or modify an existing one, you must run the command below to update the client SDK.
OpenAPI is used to generate the client (Typescript, Dart) SDK. `openapi-generator-cli` can be installed [here](https://openapi-generator.tech/docs/installation/). The generated SDK is based on the `immich-openapi-specs.json` file, which is autogenerated by the server **when running in development mode**. The `immich-openapi-specs.json` file can be modified with `@nestjs/swagger` decorators used or referenced by controller endpoints. See the [NestJS OpenAPI docs](https://docs.nestjs.com/openapi/types-and-parameters) for more info. When you add a new endpoint or modify an existing one, you must run the server in development mode and run the command below to update the client SDK.
```bash
npm run api:generate # Run from the `server/` directory

View file

@ -12,6 +12,6 @@ The backend has an end-to-end test suite that can be called with `npm run test:e
Note that there is a bug in nodejs <20.8 that causes segmentation faults when running these tests. If you run into segfaults, ensure you are using at least version 20.8.
To perform a full e2e test, you need to run e2e tests inside docker. The easiest way to do that is to run `make test-e2e` in the root directory. This will build and start a docker-compose consisting of the server, microservices, and a postgres database. It will then perfom the tests and exit.
To perform a full e2e test, you need to run e2e tests inside docker. The easiest way to do that is to run `make test-e2e` in the root directory. This will build and start a docker-compose consisting of the server, microservices, and a postgres database. It will then perform the tests and exit.
If you manually install the dependencies (see the DOCKERFILE) on your development machine, you can also run the full e2e tests manually by setting the `IMMICH_RUN_ALL_TESTS` environment value to true, i.e. `IMMICH_RUN_ALL_TESTS=true npm run test:e2e`.

View file

@ -0,0 +1,19 @@
# Troubleshooting
:::tip
A great option to get assistance with troubleshooting is to join our [Discord](https://discord.gg/D8JsnBEuKb) server, where we have a dedicated channel for `#contributing`.
:::
## Known Issues
### Running on Windows
Running Immich on Windows can be frustrating and there are lots of ways it can go wrong. Where possible we recommend using Docker on Linux. However, several people have had success running Immich on Windows using Docker via WSL2.
### NTFS Mounted Volumes
The docker-compose.dev.yml and docker-compose.prod.yml use volume mounts for the postgres database. On start-up, postgres will try to `chown` the data directory, but fail. See [this post](https://forums.docker.com/t/data-directory-var-lib-postgresql-data-pgdata-has-wrong-ownership/17963/24) for more information about this issue and possible solutions.
### `Cannot read properties of null (reading 'split')`
This error occurs when trying to access the app via port `3000` instead of `2283`. During development `immich-proxy` runs on port 2283, while `immich-web` runs on `3000`.

View file

@ -4,6 +4,10 @@ You can use the CLI to upload an existing gallery to the Immich server
[Immich CLI Repository](https://github.com/immich-app/CLI)
:::tip Google Photos Takeout
If you are looking to import your Google Photos takeout, we recommed this community maintained tool [immich-go](https://github.com/simulot/immich-go)
:::
## Requirements
- Node.js 16 or above
@ -32,7 +36,6 @@ immich
| --server / -s | Immich's server address |
| --threads / -t | Number of threads to use (Default 5) |
| --album/ -al | Create albums for assets based on the parent folder or a given name |
| --import/ -i | Import gallery (assets are not uploaded) |
## Quick Start
@ -108,70 +111,3 @@ npm run build
```bash title="Run the command"
node bin/index.js upload --key HFEJ38DNSDUEG --server http://192.168.1.216:2283/api --recursive your/asset/directory
```
---
## Importing existing libraries
If you do not wish to upload files into the server, existing files can be imported into the immich gallery through the use of the `--import` flag.
```
immich upload --key HFEJ38DNSDUEG --server http://192.168.1.216:2283/api --recursive directory/ --import
```
```
immich upload --key HFEJ38DNSDUEG --server http://192.168.1.216:2283/api file1.jpg file2.jpg --import
```
The `immich-server` and `immich-microservices` containers must be able to access the files, or directories at the path referenced in the command. The directories referenced must be set under a user's `External Path` setting. More detailed instructions can be found [here](/docs/features/read-only-gallery).
:::tip Matching volume references
The import command is most easily run on the machine running the immich service, as the path to the files on the machine running the command and the server much match identically.
If you are running immich within docker, the volume pointing to your existing library should be identical with your host machine.
```diff title="docker-compose.yml"
immich-server:
container_name: immich_server
image: ghcr.io/immich-app/immich-server:${IMMICH_VERSION:-release}
command: [ "start.sh", "immich" ]
volumes:
- ${UPLOAD_LOCATION}:/usr/src/app/upload
+ - /path/to/media:/path/to/media
env_file:
- .env
depends_on:
- redis
- database
- typesense
restart: always
immich-microservices:
container_name: immich_microservices
image: ghcr.io/immich-app/immich-server:${IMMICH_VERSION:-release}
command: [ "start.sh", "microservices" ]
volumes:
- ${UPLOAD_LOCATION}:/usr/src/app/upload
+ - /path/to/media:/path/to/media
env_file:
- .env
depends_on:
- redis
- database
- typesense
restart: always
```
The proper command for above would be as shown below. You should have access to `/path/to/media` exactly on the environment the CLI command is being run on
```
immich upload --key HFEJ38DNSDUEG --server http://192.168.1.216:2283/api --recursive /path/to/media --import
```
If you are running the import using the docker command, please note that the volumes should point to the `/path/to/media` exactly on the environment the CLI command is being run on
```
docker run -it --rm -v "/path/to/media:/path/to/media" ghcr.io/immich-app/immich-cli:latest upload --key HFEJ38DNSDUEG --server http://192.168.1.216:2283/api --recursive /path/to/media --import
```
:::

View file

@ -1,5 +1,7 @@
# Facial Recognition
## Overview
Immich recognizes faces in your photos and videos and groups them together. You can then assign names to the faces and search for them.
The list of people is shown in the Explore page.
@ -13,3 +15,16 @@ Upon clicking on a person, a list of assets that contain their face will be show
The asset detail view will also show the faces that are recognized in the asset.
<img src={require('./img/facial-recognition-3.png').default} title='Facial Recognition 3' />
## Actions
Additional actions you can do with a detected person are:
- Change the feature face photo of the person
- Set date of birth
- Merge two or more detected faces into one person
- Hide face
It can be found from the app bar when you access the detial view of a person
<img src={require('./img/facial-recognition-4.png').default} title='Facial Recognition 4' width="70%"/>

Binary file not shown.

After

Width:  |  Height:  |  Size: 416 KiB

View file

@ -85,7 +85,7 @@ There is an automatic job that's run once a day and refreshes all modified files
Let's show a concrete example where we add an existing gallery to Immich. Here, we have the following folders we want to add:
- `/home/user/old-pics`: a folder contining childhood photos.
- `/home/user/old-pics`: a folder containing childhood photos.
- `/mnt/nas/christmas-trip`: photos from a christmas trip. The subfolder `/mnt/nas/christmas-trip/Raw` contains the raw files directly from the DSLR. We don't want to import the raw files to Immich
- `/mnt/media/videos`: Videos from the same christmas trip.

View file

@ -1,6 +1,6 @@
import MobileAppDownload from '../partials/_mobile-app-download.md';
import MobileAppLogin from '../partials/_mobile-app-login.md';
import MobileAppBackup from '../partials/_mobile-app-login.md';
import MobileAppBackup from '../partials/_mobile-app-backup.md';
# Mobile App

View file

@ -1,97 +0,0 @@
# Read-only Gallery [Deprecated]
:::caution
This feature is being deprecated in favor of [Libraries](/docs/features/libraries.md).
:::
## Overview
This feature enables users to use an existing gallery without uploading the assets to Immich.
Upon syncing the file information, it will be read by Immich to generate supported files.
## Usage
:::tip Example scenario
On the VM/system that Immich is running, I have 2 galleries that I want to use with Immich.
- My gallery is stored at `/mnt/media/precious-memory`
- My wife's gallery is stored at `/mnt/media/childhood-memory`
We will use those values in the steps below.
:::
### Mount the gallery to the containers.
`immich-server` and `immich-microservices` containers will need access to the gallery. Mount the directory path as in the example below
```diff title="docker-compose.yml"
immich-server:
container_name: immich_server
image: ghcr.io/immich-app/immich-server:${IMMICH_VERSION:-release}
command: [ "start.sh", "immich" ]
volumes:
- ${UPLOAD_LOCATION}:/usr/src/app/upload
+ - /mnt/media/precious-memory:/mnt/media/precious-memory:ro
+ - /mnt/media/childhood-memory:/mnt/media/childhood-memory:ro
env_file:
- .env
depends_on:
- redis
- database
- typesense
restart: always
immich-microservices:
container_name: immich_microservices
image: ghcr.io/immich-app/immich-server:${IMMICH_VERSION:-release}
command: [ "start.sh", "microservices" ]
volumes:
- ${UPLOAD_LOCATION}:/usr/src/app/upload
+ - /mnt/media/precious-memory:/mnt/media/precious-memory:ro
+ - /mnt/media/childhood-memory:/mnt/media/childhood-memory:ro
env_file:
- .env
depends_on:
- redis
- database
- typesense
restart: always
```
:::tip
Internal and external path have to be identical.
:::
_Remember to bring the container down/up to register the changes. Make sure you can see the mounted path in the container._
### Register the path for the user.
This action is done by the admin of the instance.
- Navigate to `Administration > Users` page on the web.
- Click on the user edit button.
- Add the gallery path to the `External Path` field for the corresponding user and confirm the changes.
<img src={require('./img/me.png').default} width='33%' title='My Account Storage Path' />
<img src={require('./img/my-wife.png').default} width='33%' title='My Wifes Account Storage Path' />
### Sync with the CLI tool.
- Install or update the [CLI Tool](/docs/features/bulk-upload.md). The import feature is supported from version `v0.39.0` of the CLI
- Run the command below to sync the gallery with Immich.
```bash title="Import my gallery"
immich upload --key <my-api-key> --server http://my-server-ip:2283/api /mnt/media/precious-memory --recursive --import
```
```bash title="Import my wife gallery"
immich upload --key <my-wife-api-key> --server http://my-server-ip:2283/api /mnt/media/childhood-memory --recursive --import
```
The `--import` flag will tell Immich to import the files by path instead of uploading them.

View file

@ -0,0 +1,42 @@
# Python File Upload
```python
#!/usr/bin/python3
import requests
import os
from datetime import datetime
API_KEY = 'YOUR_API_KEY' # replace with a valid api key
BASE_URL = 'http://127.0.0.1:2283/api' # replace as needed
def upload(file):
stats = os.stat(file)
headers = {
'Accept': 'application/json',
'x-api-key': API_KEY
}
data = {
'deviceAssetId': f'{file}-{stats.st_mtime}',
'deviceId': 'python',
'fileCreatedAt': datetime.fromtimestamp(stats.st_mtime),
'fileModifiedAt': datetime.fromtimestamp(stats.st_mtime),
'isFavorite': 'false',
}
files = {
'assetData': open(file, 'rb')
}
response = requests.post(
f'{BASE_URL}/asset/upload', headers=headers, data=data, files=files)
print(response.json())
# {'id': 'ef96f635-61c7-4639-9e60-61a11c4bbfba', 'duplicate': False}
upload('./test.jpg')
```

View file

@ -17,6 +17,12 @@ The default configuration looks like this:
"targetAudioCodec": "aac",
"targetResolution": "720",
"maxBitrate": "0",
"bframes": -1,
"refs": 0,
"gopSize": 0,
"npl": 0,
"temporalAQ": false,
"cqMode": "auto",
"twoPass": false,
"transcode": "required",
"tonemap": "hable",
@ -44,9 +50,15 @@ The default configuration looks like this:
"sidecar": {
"concurrency": 5
},
"library": {
"concurrency": 5
},
"storageTemplateMigration": {
"concurrency": 5
},
"migration": {
"concurrency": 5
},
"thumbnailGeneration": {
"concurrency": 5
},
@ -55,16 +67,16 @@ The default configuration looks like this:
}
},
"machineLearning": {
"classification": {
"minScore": 0.7,
"enabled": true,
"modelName": "microsoft/resnet-50"
},
"enabled": true,
"url": "http://immich-machine-learning:3003",
"classification": {
"enabled": true,
"modelName": "microsoft/resnet-50",
"minScore": 0.9
},
"clip": {
"enabled": true,
"modelName": "ViT-B-32::openai"
"modelName": "ViT-B-32__openai"
},
"facialRecognition": {
"enabled": true,
@ -74,6 +86,14 @@ The default configuration looks like this:
"minFaces": 1
}
},
"map": {
"enabled": true,
"tileUrl": "https://tile.openstreetmap.org/{z}/{x}/{y}.png"
},
"reverseGeocoding": {
"enabled": true,
"citiesFileOverride": "cities500"
},
"oauth": {
"enabled": false,
"issuerUrl": "",
@ -96,8 +116,27 @@ The default configuration looks like this:
"thumbnail": {
"webpSize": 250,
"jpegSize": 1440,
"quality": 90,
"quality": 80,
"colorspace": "p3"
},
"newVersionCheck": {
"enabled": true
},
"trash": {
"enabled": true,
"days": 30
},
"theme": {
"customCss": ""
},
"library": {
"scan": {
"enabled": true,
"cronExpression": "0 0 * * *"
}
},
"stylesheets": {
"css": ""
}
}
```

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.7 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 86 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 20 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 79 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 19 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.2 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 9.9 KiB

View file

@ -1,5 +1,5 @@
---
sidebar_position: 80
sidebar_position: 90
---
import RegisterAdminUser from '../partials/_register-admin.md';

View file

@ -0,0 +1,192 @@
---
sidebar_position: 80
---
# TrueNAS SCALE [Community]
:::note
This is a community contribution and not officially supported by the Immich team, but included here for convenience.
**Please report issues to the corresponding [Github Repository](https://github.com/truenas/charts/tree/master/community/immich).**
:::
Immich can easily be installed on TrueNAS SCALE via the **Community** train application.
Consider reviewing the TrueNAS [Apps tutorial](https://www.truenas.com/docs/scale/scaletutorials/apps/) if you have not previously configured applications on your system.
TrueNAS SCALE makes installing and updating Immich easy, but you must use the Immich web portal and mobile app to configure accounts and access libraries.
## First Steps
The Immich app in TrueNAS SCALE installs, completes the initial configuration, then starts the Immich web portal.
When updates become available, SCALE alerts and provides easy updates.
Before installing the Immich app in SCALE, review the [Environment Variables](/docs/install/environment-variables.md) documentation to see if you want to configure any during installation.
You can configure environment variables at any time after deploying the application.
You can allow SCALE to create the datasets Immich requires automatically during app installation.
Or before beginning app installation, [create the datasets](https://www.truenas.com/docs/scale/scaletutorials/storage/datasets/datasetsscale/) to use in the **Storage Configuration** section during installation.
Immich requires seven datasets: **library**, **pgBackup**, **pgData**, **profile**, **thumbs**, **uploads**, and **video**.
You can organize these as one parent with seven child datasets, for example `mnt/tank/immich/library`, `mnt/tank/immich/pgBackup`, and so on.
## Installing the Immich Application
To install the **Immich** application, go to **Apps**, click **Discover Apps**, either begin typing Immich into the search field or scroll down to locate the **Immich** application widget.
<img
src={require('./img/truenas01.png').default}
width="50%"
alt="Immich App Widget"
className="border rounded-xl"
/>
Click on the widget to open the **Immich** application details screen.
<img
src={require('./img/truenas02.png').default}
width="100%"
alt="Immich App Details Screen"
className="border rounded-xl"
/>
Click **Install** to open the Immich application configuration screen.
Application configuration settings are presented in several sections, each explained below.
To find specific fields click in the **Search Input Fields** search field, scroll down to a particular section or click on the section heading on the navigation area in the upper-right corner.
<img
src={require('./img/truenas03.png').default}
width="100%"
alt="Install Immich Screen"
className="border rounded-xl"
/>
Accept the default values in **Application Name** and **Version**.
Accept the default value in **Timezone** or change to match your local timezone.
**Timezone** is only used by the Immich `exiftool` microservice if it cannot be determined from the image metadata.
Accept the default port in **Web Port**.
Immich requires seven storage datasets.
You can allow SCALE to create them for you, or use the dataset(s) created in [First Steps](#first-steps).
Select the storage options you want to use for **Immich Uploads Storage**, **Immich Library Storage**, **Immich Thumbs Storage**, **Immich Profile Storage**, **Immich Video Storage**, **Immich Postgres Data Storage**, **Immich Postgres Backup Storage**.
Select **ixVolume (dataset created automatically by the system)** in **Type** to let SCALE create the dataset or select **Host Path** to use the existing datasets created on the system.
Accept the defaults in Resources or change the CPU and memory limits to suit your use case.
Click **Install**.
The system opens the **Installed Applications** screen with the Immich app in the **Deploying** state.
When the installation completes it changes to **Running**.
<img
src={require('./img/truenas04.png').default}
width="100%"
alt="Immich Installed"
className="border rounded-xl"
/>
Click **Web Portal** on the **Application Info** widget to open the Immich web interface to set up your account and begin uploading photos.
:::tip
For more information on how to use the application once installed, please refer to the [Post Install](/docs/install/post-install.mdx) guide.
:::
## Editing Environment Variables
Go to the **Installed Applications** screen and select Immich from the list of installed applications.
Click **Edit** on the **Application Info** widget to open the **Edit Immich** screen.
The settings on the edit screen are the same as on the install screen.
You cannot edit **Storage Configuration** paths after the initial app install.
Click **Update** to save changes.
TrueNAS automatically updates, recreates, and redeploys the Immich container with the updated environment variables.
## Updating the App
When updates become available, SCALE alerts and provides easy updates.
To update the app to the latest version, click **Update** on the **Application Info** widget from the **Installed Applications** screen.
Update opens an update window for the application that includes two selectable options, Images (to be updated) and Changelog. Click on the down arrow to see the options available for each.
Click **Upgrade** to begin the process and open a counter dialog that shows the upgrade progress. When complete, the update badge and buttons disappear and the application Update state on the Installed screen changes from Update Available to Up to date.
## Understanding Immich Settings in TrueNAS SCALE
Accept the default value or enter a name in **Application Name** field.
In most cases use the default name, but if adding a second deployment of the application you must change this name.
Accept the default version number in **Version**.
When a new version becomes available, the application has an update badge.
The **Installed Applications** screen shows the option to update applications.
### Immich Configuration Settings
You can accept the defaults in the **Immich Configuration** settings, or enter the settings you want to use.
<img
src={require('./img/truenas05.png').default}
width="100%"
alt="Configuration Settings"
className="border rounded-xl"
/>
Accept the default setting in **Timezone** or change to match your local timezone.
**Timezone** is only used by the Immich `exiftool` microservice if it cannot be determined from the image metadata.
You can enter a **Public Login Message** to display on the login page, or leave it blank.
### Networking Settings
Accept the default port numbers in **Web Port**.
The SCALE Immich app listens on port **30041**.
Refer to the TrueNAS [default port list](https://www.truenas.com/docs/references/defaultports/) for a list of assigned port numbers.
To change the port numbers, enter a number within the range 9000-65535.
<img
src={require('./img/truenas06.png').default}
width="100%"
alt="Networking Settings"
className="border rounded-xl"
/>
### Storage Settings
You can install Immich using the default setting **ixVolume (dataset created automatically by the system)** or use the host path option with datasets [created before installing the app](#first-steps).
<img
src={require('./img/truenas07.png').default}
width="100%"
alt="Configure Storage ixVolumes"
className="border rounded-xl"
/>
Select **Host Path (Path that already exists on the system)** to browse to and select the datasets.
<img
src={require('./img/truenas08.png').default}
width="100%"
alt="Configure Storage Host Paths"
className="border rounded-xl"
/>
### Resource Configuration Settings
Accept the default values in **Resources Configuration** or enter new CPU and memory values
By default, this application is limited to use no more than 4 CPU cores and 8 Gigabytes available memory. The application might use considerably less system resources.
<img
src={require('./img/truenas09.png').default}
width="100%"
alt="Resource Limits"
className="border rounded-xl"
/>
To customize the CPU and memory allocated to the container Immich uses, enter new CPU values as a plain integer value followed by the suffix m (milli).
Default is 4000m.
Accept the default value 8Gi allocated memory or enter a new limit in bytes.
Enter a plain integer followed by the measurement suffix, for example 129M or 123Mi.
Systems with compatible GPU(s) display devices in **GPU Configuration**.
See [Managing GPUs](https://www.truenas.com/docs/scale/scaletutorials/systemsettings/advanced/managegpuscale/) for more information about allocating isolated GPU devices in TrueNAS SCALE.

View file

@ -11,6 +11,6 @@ Running into an issue or have a question? Try the following:
3. Search through existing [GitHub Issues][github-issues].
4. Open a help ticket on [Discord][discord-link].
[github-issues]: https://github.com/immich-app/immich/releases
[github-issues]: https://github.com/immich-app/immich/issues
[github-releases]: https://github.com/immich-app/immich/releases
[discord-link]: https://discord.com/invite/D8JsnBEuKb

View file

@ -4,7 +4,7 @@ sidebar_position: 1
# Introduction
<img src={require('./img/feature-panel.png').default} alt="Immich" />
<img src={require('./img/feature-panel.png').default} alt="Immich - Self-hosted photos and videos backup tool" />
## Welcome!

Binary file not shown.

Before

Width:  |  Height:  |  Size: 35 KiB

After

Width:  |  Height:  |  Size: 137 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 321 KiB

After

Width:  |  Height:  |  Size: 404 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 335 KiB

After

Width:  |  Height:  |  Size: 334 KiB

View file

@ -89,6 +89,7 @@ const config = {
},
},
navbar: {
title: 'IMMICH',
logo: {
alt: 'Immich University Logo',
src: 'img/color-logo.png',
@ -100,6 +101,11 @@ const config = {
position: 'right',
label: 'Docs',
},
{
to: '/milestones',
position: 'right',
label: 'Milestones',
},
{
to: '/docs/api',
position: 'right',

28
docs/package-lock.json generated
View file

@ -10,6 +10,8 @@
"dependencies": {
"@docusaurus/core": "^2.4.3",
"@docusaurus/preset-classic": "^2.4.3",
"@mdi/js": "^7.3.67",
"@mdi/react": "^1.6.1",
"@mdx-js/react": "^1.6.22",
"autoprefixer": "^10.4.13",
"classnames": "^2.3.2",
@ -2862,6 +2864,19 @@
"resolved": "https://registry.npmjs.org/@leichtgewicht/ip-codec/-/ip-codec-2.0.4.tgz",
"integrity": "sha512-Hcv+nVC0kZnQ3tD9GVu5xSMR4VVYOteQIr/hwFPVEvPdlXqgGEuRjiheChHgdM+JyqdgNcmzZOX/tnl0JOiI7A=="
},
"node_modules/@mdi/js": {
"version": "7.3.67",
"resolved": "https://registry.npmjs.org/@mdi/js/-/js-7.3.67.tgz",
"integrity": "sha512-MnRjknFqpTC6FifhGHjZ0+QYq2bAkZFQqIj8JA2AdPZbBxUvr8QSgB2yPAJ8/ob/XkR41xlg5majDR3c1JP1hw=="
},
"node_modules/@mdi/react": {
"version": "1.6.1",
"resolved": "https://registry.npmjs.org/@mdi/react/-/react-1.6.1.tgz",
"integrity": "sha512-4qZeDcluDFGFTWkHs86VOlHkm6gnKaMql13/gpIcUQ8kzxHgpj31NuCkD8abECVfbULJ3shc7Yt4HJ6Wu6SN4w==",
"dependencies": {
"prop-types": "^15.7.2"
}
},
"node_modules/@mdx-js/mdx": {
"version": "1.6.22",
"resolved": "https://registry.npmjs.org/@mdx-js/mdx/-/mdx-1.6.22.tgz",
@ -16932,6 +16947,19 @@
"resolved": "https://registry.npmjs.org/@leichtgewicht/ip-codec/-/ip-codec-2.0.4.tgz",
"integrity": "sha512-Hcv+nVC0kZnQ3tD9GVu5xSMR4VVYOteQIr/hwFPVEvPdlXqgGEuRjiheChHgdM+JyqdgNcmzZOX/tnl0JOiI7A=="
},
"@mdi/js": {
"version": "7.3.67",
"resolved": "https://registry.npmjs.org/@mdi/js/-/js-7.3.67.tgz",
"integrity": "sha512-MnRjknFqpTC6FifhGHjZ0+QYq2bAkZFQqIj8JA2AdPZbBxUvr8QSgB2yPAJ8/ob/XkR41xlg5majDR3c1JP1hw=="
},
"@mdi/react": {
"version": "1.6.1",
"resolved": "https://registry.npmjs.org/@mdi/react/-/react-1.6.1.tgz",
"integrity": "sha512-4qZeDcluDFGFTWkHs86VOlHkm6gnKaMql13/gpIcUQ8kzxHgpj31NuCkD8abECVfbULJ3shc7Yt4HJ6Wu6SN4w==",
"requires": {
"prop-types": "^15.7.2"
}
},
"@mdx-js/mdx": {
"version": "1.6.22",
"resolved": "https://registry.npmjs.org/@mdx-js/mdx/-/mdx-1.6.22.tgz",

View file

@ -19,6 +19,8 @@
"dependencies": {
"@docusaurus/core": "^2.4.3",
"@docusaurus/preset-classic": "^2.4.3",
"@mdi/js": "^7.3.67",
"@mdi/react": "^1.6.1",
"@mdx-js/react": "^1.6.22",
"autoprefixer": "^10.4.13",
"classnames": "^2.3.2",

View file

@ -0,0 +1,90 @@
import React from 'react';
import Icon from '@mdi/react';
import { mdiCheckboxMarkedCircleOutline } from '@mdi/js';
import useIsBrowser from '@docusaurus/useIsBrowser';
export interface Item {
icon: string;
title: string;
description?: string;
release: string;
tag?: string;
date: Date;
dateType: DateType;
}
export enum DateType {
RELEASE = 'Release Date',
DATE = 'Date',
}
interface Props {
items: Item[];
}
export default function Timeline({ items }: Props): JSX.Element {
const isBrowser = useIsBrowser();
return (
<ul className="flex flex-col pl-4">
{items.map((item, index) => {
const isFirst = index === 0;
const isLast = index === items.length - 1;
const classNames: string[] = [];
if (isFirst) {
classNames.push('');
}
if (isLast) {
classNames.push('rounded rounded-b-full');
}
return (
<li key={index} className="flex min-h-24 w-[700px] max-w-[90vw]">
<div className="md:flex justify-start w-36 mr-8 items-center dark:text-immich-dark-primary text-immich-primary hidden">
{isBrowser ? item.date.toLocaleDateString(navigator.language) : ''}
</div>
<div className={`${isFirst && 'relative top-[50%]'} ${isLast && 'relative bottom-[50%]'}`}>
<div
className={`h-full border-solid border-4 border-immich-primary dark:border-immich-dark-primary ${
isFirst && 'rounded rounded-t-full'
} ${isLast && 'rounded rounded-b-full'}`}
></div>
</div>
<div className="z-10 flex items-center bg-immich-primary dark:bg-immich-dark-primary border-2 border-solid rounded-full dark:text-black text-white relative top-[50%] left-[-3px] translate-y-[-50%] translate-x-[-50%] w-8 h-8 shadow-lg ">
<Icon path={mdiCheckboxMarkedCircleOutline} size={1.25} />
</div>
<section className=" dark:bg-immich-dark-gray bg-immich-gray dark:border-0 border-gray-200 border border-solid rounded-2xl flex flex-col w-full gap-2 p-4 md:ml-4 my-2 hover:bg-immich-primary/10 dark:hover:bg-immich-dark-primary/10 transition-all">
<div className="m-0 text-lg flex w-full items-center justify-between gap-2">
<p className="m-0 items-start flex gap-2">
<Icon path={item.icon} size={1} />
<span>{item.title}</span>
</p>
<span className="dark:text-immich-dark-primary text-immich-primary">
{item.tag ? (
<a
href={`https://github.com/immich-app/immich/releases/tag/${item.tag}`}
target="_blank"
rel="noopener"
>
[{item.release}]{' '}
</a>
) : (
<span>[{item.release}]</span>
)}
</span>
</div>
<div className="md:hidden text-xs">
{`${item.dateType} - ${isBrowser ? item.date.toLocaleDateString(navigator.language) : ''}`}
</div>
<p className="m-0 text-sm text-gray-600 dark:text-gray-300">{item.description}</p>
</section>
</li>
);
})}
</ul>
);
}

View file

@ -40,3 +40,7 @@ button {
--ifm-background-color: #000000;
--docusaurus-highlighted-code-line-bg: rgba(0, 0, 0, 0.3);
}
.navbar__brand .navbar__title {
@apply font-immich-title text-2xl font-normal text-immich-primary dark:text-immich-dark-primary;
}

View file

@ -6,32 +6,35 @@ function HomepageHeader() {
return (
<header>
<section className="text-center m-6 p-12 border border-red-400 rounded-[50px] bg-gray-100 dark:bg-immich-dark-gray">
<h1 className="md:text-6xl font-bold mb-10 font-immich-title text-immich-primary dark:text-immich-dark-primary">
IMMICH
<img src="img/immich-logo.svg" className="md:h-24 h-12 mb-2" alt="Immich logo" />
<h1 className="md:text-6xl font-immich-title mb-10 text-immich-primary dark:text-immich-dark-primary uppercase">
Immich
</h1>
<div className="font-thin sm:text-base md:text-2xl my-12 sm:leading-tight">
<p>SELF-HOSTED BACKUP SOLUTION </p>
<p>FOR PHOTOS AND VIDEOS</p>
<p>ON MOBILE DEVICE</p>
<p className="mb-1 uppercase">
Self-hosted backup solution <span className="block"></span>
for photos and videos <span className="block"></span>
on mobile device
</p>
</div>
<div className="flex flex-col sm:flex-row place-items-center place-content-center mt-9 mb-16 gap-4 ">
<Link
className="flex place-items-center place-content-center py-3 px-8 border bg-immich-primary dark:bg-immich-dark-primary rounded-full no-underline hover:no-underline text-white hover:text-gray-50 dark:text-immich-dark-bg font-bold"
className="flex place-items-center place-content-center py-3 px-8 border bg-immich-primary dark:bg-immich-dark-primary rounded-full no-underline hover:no-underline text-white hover:text-gray-50 dark:text-immich-dark-bg font-bold uppercase"
to="docs/overview/introduction"
>
GET STARTED
Get started
</Link>
<Link
className="flex place-items-center place-content-center py-3 px-8 border bg-immich-primary/10 dark:bg-gray-300 rounded-full hover:no-underline text-immich-primary dark:text-immich-dark-bg font-bold"
className="flex place-items-center place-content-center py-3 px-8 border bg-immich-primary/10 dark:bg-gray-300 rounded-full hover:no-underline text-immich-primary dark:text-immich-dark-bg font-bold uppercase"
to="https://demo.immich.app/"
>
DEMO PORTAL
Demo portal
</Link>
</div>
<img src="/img/immich-screenshots.png" alt="logo" />
<img src="/img/immich-screenshots.png" alt="screenshots" width={'85%'} />
<div className="flex flex-col sm:flex-row place-items-center place-content-center mt-4 gap-1">
<div className="h-24">

View file

@ -1,7 +0,0 @@
---
title: Markdown page example
---
# Markdown page example
You don't need React to write simple standalone pages.

View file

@ -0,0 +1,592 @@
import {
mdiAccountGroup,
mdiAndroid,
mdiAppleIos,
mdiArchiveOutline,
mdiBookSearchOutline,
mdiCakeVariant,
mdiCheckAll,
mdiCheckboxMarked,
mdiCollage,
mdiContentCopy,
mdiDevices,
mdiFaceMan,
mdiFaceManOutline,
mdiFile,
mdiFileSearch,
mdiFolder,
mdiHeart,
mdiImage,
mdiImageAlbum,
mdiImageMultipleOutline,
mdiImageSearch,
mdiKeyboardSettingsOutline,
mdiMagnify,
mdiMap,
mdiMaterialDesign,
mdiMerge,
mdiMonitor,
mdiMotionPlayOutline,
mdiPalette,
mdiPanVertical,
mdiPartyPopper,
mdiRaw,
mdiRotate360,
mdiSecurity,
mdiServer,
mdiShareAll,
mdiShareCircle,
mdiStar,
mdiTag,
mdiText,
mdiThemeLightDark,
mdiTrashCanOutline,
mdiVideo,
mdiWeb,
} from '@mdi/js';
import Layout from '@theme/Layout';
import React from 'react';
import Timeline, { DateType, Item } from '../components/timeline';
const items: Item[] = [
{
icon: mdiStar,
description: 'Reach 20K Stars on GitHub!',
title: '20,000 Stars',
release: 'v1.83.0',
tag: 'v1.83.0',
date: new Date(2023, 9, 28),
dateType: DateType.RELEASE,
},
{
icon: mdiContentCopy,
title: 'Stack assets',
description: 'Manual asset stacking for grouping and hiding related assets in the main timeline.',
release: 'v1.83.0',
tag: 'v1.83.0',
date: new Date(2023, 9, 28),
dateType: DateType.RELEASE,
},
{
icon: mdiPalette,
title: 'Custom theme',
description: 'Apply your custom CSS for modifying fonts, colors, and styles in the web application.',
release: 'v1.83.0',
tag: 'v1.83.0',
date: new Date(2023, 9, 28),
dateType: DateType.RELEASE,
},
{
icon: mdiTrashCanOutline,
title: 'Trash Feature',
description: 'Trash, restore from trash, and automatically empty the recycle bin after 30 days.',
release: 'v1.82.0',
tag: 'v1.82.0',
date: new Date(2023, 9, 17),
dateType: DateType.RELEASE,
},
{
icon: mdiBookSearchOutline,
title: 'External Libraries',
description: 'Automatically import media into Immich based on imports paths and ignore patterns.',
release: 'v1.79.0',
tag: 'v1.79.0',
date: new Date(2023, 8, 21),
dateType: DateType.RELEASE,
},
{
icon: mdiMap,
title: 'Map View (Mobile)',
description: 'Heat map implementation in the mobile app.',
release: 'v1.76.0',
tag: 'v1.76.0',
date: new Date(2023, 7, 29),
dateType: DateType.RELEASE,
},
{
icon: mdiFile,
title: 'Configuration File',
description: 'Auto-configure an Immich installation via a configuration file.',
release: 'v1.75.0',
tag: 'v1.75.0',
date: new Date(2023, 7, 26),
dateType: DateType.RELEASE,
},
{
icon: mdiMonitor,
title: 'Slideshow Mode (Web)',
description: 'Start a full-screen slideshow from an Album on the web.',
release: 'v1.75.0',
tag: 'v1.75.0',
date: new Date(2023, 7, 26),
dateType: DateType.RELEASE,
},
{
icon: mdiServer,
title: 'Hardware Transcoding',
description: 'Support hardware acceleration (QuickSync, VAAPI, and Nvidia) for video transcoding.',
release: 'v1.72.0',
tag: 'v1.72.0',
date: new Date(2023, 7, 6),
dateType: DateType.RELEASE,
},
{
icon: mdiImageAlbum,
title: 'View Albums via Time Buckets',
description: 'Upgrade albums to use time buckets, an optimized virtual viewport.',
release: 'v1.72.0',
tag: 'v1.72.0',
date: new Date(2023, 7, 6),
dateType: DateType.RELEASE,
},
{
icon: mdiImageAlbum,
title: 'Album Description',
description: 'Save an album description.',
release: 'v1.72.0',
tag: 'v1.72.0',
date: new Date(2023, 7, 6),
dateType: DateType.RELEASE,
},
{
icon: mdiRotate360,
title: '360° Photos (Web)',
description: 'View 360° Photos on the web.',
release: 'v1.71.0',
tag: 'v1.71.0',
date: new Date(2023, 6, 29),
dateType: DateType.RELEASE,
},
{
icon: mdiMotionPlayOutline,
title: 'Android Motion Photos',
description: 'Add support for Android Motion Photos.',
release: 'v1.69.0',
tag: 'v1.69.0',
date: new Date(2023, 6, 23),
dateType: DateType.RELEASE,
},
{
icon: mdiFaceManOutline,
title: 'Show/Hide Faces',
description: 'Add the options to show or hide faces.',
release: 'v1.68.0',
tag: 'v1.68.0',
date: new Date(2023, 6, 20),
dateType: DateType.RELEASE,
},
{
icon: mdiMerge,
title: 'Merge Faces',
description: 'Add the ability to merge multiple faces together.',
release: 'v1.67.0',
tag: 'v1.67.0',
date: new Date(2023, 6, 14),
dateType: DateType.RELEASE,
},
{
icon: mdiImage,
title: 'Feature Photo',
description: 'Add the option to change the feature photo for a person.',
release: 'v1.66.0',
tag: 'v1.66.0',
date: new Date(2023, 6, 4),
dateType: DateType.RELEASE,
},
{
icon: mdiKeyboardSettingsOutline,
title: 'Multi-Select via SHIFT',
description: 'Add the option to multi-select while holding SHIFT.',
release: 'v1.66.0',
tag: 'v1.66.0',
date: new Date(2023, 6, 4),
dateType: DateType.RELEASE,
},
{
icon: mdiImageMultipleOutline,
title: 'Memories (Mobile)',
description: 'View "On this day..." memories in the mobile app.',
release: 'v1.65.0',
tag: 'v1.65.0',
date: new Date(2023, 5, 30),
dateType: DateType.RELEASE,
},
{
icon: mdiFaceMan,
title: 'Facial Recognition (Mobile)',
description: 'View detected faces in the mobile app.',
release: 'v1.63.0',
tag: 'v1.63.0',
date: new Date(2023, 5, 24),
dateType: DateType.RELEASE,
},
{
icon: mdiImageMultipleOutline,
title: 'Memories (Web)',
description: 'View pictures taken in past years on this day on the web.',
release: 'v1.61.0',
tag: 'v1.61.0',
date: new Date(2023, 5, 16),
dateType: DateType.RELEASE,
},
{
icon: mdiCollage,
title: 'Justified Layout (Web)',
description: 'Implement justified layout (collage) on the web.',
release: 'v1.61.0',
tag: 'v1.61.0',
date: new Date(2023, 5, 16),
dateType: DateType.RELEASE,
},
{
icon: mdiRaw,
title: 'RAW File Formats',
description: 'Support for RAW file formats.',
release: 'v1.61.0',
tag: 'v1.61.0',
date: new Date(2023, 5, 16),
dateType: DateType.RELEASE,
},
{
icon: mdiShareAll,
title: 'Partner Sharing (Mobile)',
description: 'View shared partner photos in the mobile app.',
release: 'v1.58.0',
tag: 'v1.58.0',
date: new Date(2023, 4, 28),
dateType: DateType.RELEASE,
},
{
icon: mdiFile,
title: 'XMP Sidecar',
description: 'Attach XMP Sidecar files to assets.',
release: 'v1.58.0',
tag: 'v1.58.0',
date: new Date(2023, 4, 28),
dateType: DateType.RELEASE,
},
{
icon: mdiFolder,
title: 'Custom Storage Label',
description: 'Replace the user UUID in the storage template with a custom label.',
release: 'v1.57.0',
tag: 'v1.57.0',
date: new Date(2023, 4, 23),
dateType: DateType.RELEASE,
},
{
icon: mdiShareCircle,
title: 'Partner Sharing',
description: 'Share your entire collection with another user.',
release: 'v1.56.0',
tag: 'v1.56.0',
date: new Date(2023, 4, 18),
dateType: DateType.RELEASE,
},
{
icon: mdiFaceMan,
title: 'Facial Recognition',
description: 'Detect faces in pictures and cluster them together as people, which can be named.',
release: 'v1.56.0',
tag: 'v1.56.0',
date: new Date(2023, 4, 18),
dateType: DateType.RELEASE,
},
{
icon: mdiMap,
title: 'Map View (Web)',
description: 'View a global map, with clusters of photos based on corresponding GPS data.',
release: 'v1.55.0',
tag: 'v1.55.0',
date: new Date(2023, 4, 9),
dateType: DateType.RELEASE,
},
{
icon: mdiDevices,
title: 'Manage Auth Devices',
description: 'Manage logged-in devices and revoke access from User Settings.',
release: 'v1.55.0',
tag: 'v1.55.0',
date: new Date(2023, 4, 9),
dateType: DateType.RELEASE,
},
{
icon: mdiStar,
description: 'Reach 10K Stars on GitHub!',
title: '10,000 Stars',
release: 'v1.54.0',
tag: 'v1.54.0',
date: new Date(2023, 3, 18),
dateType: DateType.RELEASE,
},
{
icon: mdiText,
title: 'Asset Descriptions',
description: 'Save an asset description',
release: 'v1.54.0',
tag: 'v1.54.0',
date: new Date(2023, 3, 18),
dateType: DateType.RELEASE,
},
{
icon: mdiArchiveOutline,
title: 'Archiving',
description: 'Remove assets from the main timeline by archiving them.',
release: 'v1.54.0',
tag: 'v1.54.0',
date: new Date(2023, 3, 18),
dateType: DateType.RELEASE,
},
{
icon: mdiDevices,
title: 'Responsive Web App',
description: 'Optimize the web app for small screen.',
release: 'v1.54.0',
tag: 'v1.54.0',
date: new Date(2023, 3, 18),
dateType: DateType.RELEASE,
},
{
icon: mdiFileSearch,
title: 'Search By Metadata',
description: 'Search images by filename, description, tagged people, make, model, and other metadata.',
release: 'v1.52.0',
tag: 'v1.52.0',
date: new Date(2023, 2, 29),
dateType: DateType.RELEASE,
},
{
icon: mdiImageSearch,
title: 'CLIP Search',
description: 'Search images with free-form text like "Sunset at the beach".',
release: 'v1.51.0',
tag: 'v1.51.0',
date: new Date(2023, 2, 20),
dateType: DateType.RELEASE,
},
{
icon: mdiMagnify,
title: 'Explore Page',
description: 'View tagged places, object, and people.',
release: 'v1.51.0',
tag: 'v1.51.0',
date: new Date(2023, 2, 20),
dateType: DateType.RELEASE,
},
{
icon: mdiAppleIos,
title: 'iOS Background Uploads',
description: 'Automatically backup pictures in the background on iOS.',
release: 'v1.48.0',
tag: 'v1.48.0',
date: new Date(2023, 1, 21),
dateType: DateType.RELEASE,
},
{
icon: mdiMotionPlayOutline,
title: 'Auto-Link Live Photos',
description: 'Automatically link live photos, even when uploaded as separate files.',
release: 'v1.48.0',
tag: 'v1.48.0',
date: new Date(2023, 2, 21),
dateType: DateType.RELEASE,
},
{
icon: mdiMaterialDesign,
title: 'Material Design 3 (Mobile)',
description: 'Upgrade the mobile app to Material Design 3.',
release: 'v1.47.0',
tag: 'v1.47.0',
date: new Date(2023, 1, 13),
dateType: DateType.RELEASE,
},
{
icon: mdiHeart,
title: 'Favorites (Mobile)',
description: 'Show favorites on the mobile app.',
release: 'v1.46.0',
tag: 'v1.46.0',
date: new Date(2023, 1, 9),
dateType: DateType.RELEASE,
},
{
icon: mdiCakeVariant,
title: 'Immich Turns 1',
description: 'Immich is officially one year old.',
release: 'v1.43.0',
tag: 'v1.43.0',
date: new Date(2023, 1, 3),
dateType: DateType.DATE,
},
{
icon: mdiHeart,
title: 'Favorites Page (Web)',
description: 'Favorite and view favorites on the web.',
release: 'v1.43.0',
tag: 'v1.43.0',
date: new Date(2023, 0, 27),
dateType: DateType.RELEASE,
},
{
icon: mdiShareCircle,
title: 'Public Share Links',
description: 'Share photos and albums publicly via a shared link.',
release: 'v1.41.0',
tag: 'v1.41.1_64-dev',
date: new Date(2023, 0, 10),
dateType: DateType.RELEASE,
},
{
icon: mdiFolder,
title: 'User-Defined Storage Structure',
description: 'Support custom storage structures.',
release: 'v1.39.0',
tag: 'v1.39.0_61-dev',
date: new Date(2022, 11, 19),
dateType: DateType.RELEASE,
},
{
icon: mdiMotionPlayOutline,
title: 'iOS Live Photos',
description: 'Backup and display iOS Live Photos.',
release: 'v1.36.0',
tag: 'v1.36.0_55-dev',
date: new Date(2022, 10, 20),
dateType: DateType.RELEASE,
},
{
icon: mdiSecurity,
title: 'OAuth Integration',
description: 'Support OAuth2 and OIDC capable identity providers.',
release: 'v1.36.0',
tag: 'v1.36.0_55-dev',
date: new Date(2022, 10, 20),
dateType: DateType.RELEASE,
},
{
icon: mdiWeb,
title: 'Documentation Site',
description: 'Release an official documentation website.',
release: 'v1.33.1',
tag: 'v1.33.0_52-dev',
date: new Date(2022, 9, 26),
dateType: DateType.RELEASE,
},
{
icon: mdiThemeLightDark,
title: 'Dark Mode (Web)',
description: 'Dark mode on the web.',
release: 'v1.32.0',
tag: ' v1.32.0_50-dev',
date: new Date(2022, 9, 14),
dateType: DateType.RELEASE,
},
{
icon: mdiPanVertical,
title: 'Virtual Scrollbar (Web)',
description: 'View the main timeline with a virtual scrollbar, allowing to jump to any point in time, instantly.',
release: 'v1.27.0',
tag: 'v1.27.0_37-dev',
date: new Date(2022, 8, 6),
dateType: DateType.RELEASE,
},
{
icon: mdiCheckAll,
title: 'Checksum Duplication Check',
description: 'Enforce per user sha1 checksum uniqueness.',
release: 'v1.27.0',
tag: 'v1.27.0_37-dev',
date: new Date(2022, 8, 6),
dateType: DateType.RELEASE,
},
{
icon: mdiAndroid,
title: 'Android Background Backup',
description: 'Automatic backup in the background on Android.',
release: 'v1.24.0',
tag: 'v1.24.0_34-dev',
date: new Date(2022, 7, 19),
dateType: DateType.RELEASE,
},
{
icon: mdiAccountGroup,
title: 'Admin Portal',
description: 'Manage users and admin settings from the web.',
release: 'v1.10.0',
tag: 'v1.10.0_15-dev',
date: new Date(2022, 4, 29),
dateType: DateType.RELEASE,
},
{
icon: mdiShareCircle,
title: 'Album Sharing',
description: 'Share albums with other users.',
release: 'v1.7.0',
tag: 'v1.7.0_11-dev ',
date: new Date(2022, 3, 24),
dateType: DateType.RELEASE,
},
{
icon: mdiTag,
title: 'Image Tagging',
description: 'Tag images with custom values.',
release: 'v1.7.0',
tag: 'v1.7.0_11-dev ',
date: new Date(2022, 3, 24),
dateType: DateType.RELEASE,
},
{
icon: mdiImage,
title: 'View Exif',
description: 'View metadata about assets.',
release: 'v1.3.0',
tag: 'V1.3.0-dev ',
date: new Date(2022, 2, 22),
dateType: DateType.RELEASE,
},
{
icon: mdiCheckboxMarked,
title: 'Multi Select',
description: 'Select and execute actions on multiple assets at the same time.',
release: 'v1.2.0',
tag: 'V0.2-dev ',
date: new Date(2022, 1, 8),
dateType: DateType.RELEASE,
},
{
icon: mdiVideo,
title: 'Video Player',
description: 'Play videos in the web and on mobile.',
release: 'v1.2.0',
tag: 'v0.2-dev ',
date: new Date(2022, 1, 8),
dateType: DateType.RELEASE,
},
{
icon: mdiPartyPopper,
title: 'First Commit',
description: 'First commit on GitHub, Immich is born.',
release: 'v1.0.0',
date: new Date(2022, 1, 3),
dateType: DateType.DATE,
},
];
export default function MilestonePage(): JSX.Element {
return (
<Layout title="Milestones" description="History of Immich">
<section className="my-8">
<h1 className="md:text-6xl text-center mb-10 text-immich-primary dark:text-immich-dark-primary px-2">
Major Milestones
</h1>
<p className="text-center text-xl px-2">
A list of project achievements and milestones, <br />
by release date.
</p>
<div className="flex justify-around mt-8 w-full max-w-full">
<Timeline items={items} />
</div>
</section>
</Layout>
);
}

98
docs/static/img/immich-logo.svg vendored Normal file
View file

@ -0,0 +1,98 @@
<?xml version="1.0" encoding="utf-8"?>
<!-- Generator: Adobe Illustrator 26.0.1, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
<svg version="1.1" id="svg2781" xmlns:svg="http://www.w3.org/2000/svg"
xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px" viewBox="0 0 564.2 553.5"
style="enable-background:new 0 0 564.2 553.5;" xml:space="preserve">
<style type="text/css">
.st0{fill:#4081EF;stroke:#512D8C;stroke-miterlimit:10;}
.st1{fill:#31A452;stroke:#512D8C;stroke-miterlimit:10;}
.st2{fill:#DE7FB3;stroke:#512D8C;stroke-miterlimit:10;}
.st3{fill:#FFB800;stroke:#512D8C;stroke-miterlimit:10;}
.st4{fill:#E64132;stroke:#512D8C;stroke-miterlimit:10;}
.st5{fill:#F2F5FB;stroke:#512D8C;stroke-miterlimit:10;}
</style>
<path class="st0" d="M210.5,549.6c-2.2-0.2-5.5-1-9.7-2.2c-52.4-15.7-99-46.5-133.8-88.5c-8.8-10.7-17.2-22.4-19.4-27.5
c-8.1-18.1-6.3-38.7,4.8-55.4c5-7.5,13.2-15,20.5-18.7c1.2-0.6,54.1-20,55.8-20.4c0.5-0.1,0.5,0.2-0.3,2.1c-0.7,1.7-1,3.1-1.1,5.5
l-0.1,3.2l2.8,5.8c8.7,17.9,19.2,32.7,33.2,46.4c6.3,6.2,7.8,7.6,13.8,12.3c22.7,18.1,52,30.7,79.9,34.3c2.5,0.3,5,0.8,5.7,1
c2.8,0.9,7.7-0.8,11-3.7l1.8-1.6l-0.2,4.8c-0.1,2.7-0.6,15.4-1,28.3c-0.6,20.3-0.8,24-1.5,27.5c-3.9,20.7-18.6,37.5-38.4,44.1
c-4.6,1.5-8,2.2-13.1,2.7C216.6,550.1,215.3,550,210.5,549.6z"/>
<path class="st1" d="M339.8,549.4c-4-0.4-9.4-1.6-13.2-2.9c-3.4-1.2-10-4.4-12.5-6.1c-10.9-7.4-19-17.9-23.1-30
c-2.2-6.7-2.3-7.5-3.3-36.9c-0.5-14.9-0.9-27.9-0.9-28.9l0-1.9l2.3,1.8c2.6,2,6.6,3.4,8.5,3.1c0.6-0.1,3-0.5,5.3-0.8
c37.7-5.3,71.2-22.2,97.4-49.1c12.2-12.5,21.4-25.5,29.9-42.4l3.5-7l0-3.6c0-3.1-0.1-3.8-1-5.7c-0.5-1.2-0.9-2.1-0.9-2.2
c0.2-0.2,55.3,20.1,56.9,20.9c2.6,1.3,6.6,4.1,9.9,7c9.2,7.7,16.1,19.4,18.8,31.8c0.7,3.1,0.8,4.8,0.8,11.3c0,8.6-0.5,11.7-2.9,18.7
c-1.7,5-2.9,7.2-7.1,13.1c-7.6,11-15.3,20.5-25.2,31.2c-32.8,35.4-76.5,62.5-123.4,76.3C351.6,549.6,347.2,550.1,339.8,549.4z"/>
<path class="st2" d="M255.6,438c-25.9-4.2-50.7-14.9-71.7-31c-5.2-4-8.7-7.1-14.1-12.4c-12.7-12.5-21.9-24.9-30.5-41.4
c-2.3-4.4-2.4-4.7-2.4-7.1c0-8.8,8.5-15.2,16.9-12.7c5.6,1.7,9.6,6.8,9.7,12.2c0,2.6-0.8,4.6-2.6,6.2c-1.2,1.1-3.2,1.9-4.6,1.9
c-1.2,0-3.3-0.8-4.3-1.6c-2.1-1.8-2-1,0.4,3.2c19.3,33.8,52.3,59.1,90,69.1c5.7,1.5,11.5,2.7,11.8,2.4c0.1-0.1-0.4-0.8-1.3-1.6
c-5.1-4.5-2.3-11.7,5-12.8c5.4-0.8,11.4,2.7,13.9,8c0.8,1.7,1,2.5,1,5.3s-0.1,3.5-1,5.3c-2,4.3-6.8,7.9-10.3,7.8
C260.6,438.7,257.9,438.3,255.6,438z"/>
<path class="st0" d="M297.6,438.2c-3.4-1.3-6.4-4.3-7.8-8.1c-1.1-2.9-0.9-7.3,0.5-10.2c2.6-5.3,8.7-8.5,14.4-7.5
c2.9,0.5,4.7,1.9,6,4.3c0.8,1.6,1,2.2,0.8,3.6c-0.3,2.2-0.9,3.3-2.7,4.8c-0.8,0.7-1.4,1.4-1.3,1.5c0.5,0.5,13.4-2.7,21.3-5.4
c33.6-11.3,62.5-35.1,80.4-66.1c2.5-4.4,2.6-5,0.5-3.2c-2.8,2.4-7,1.9-9.6-1c-4-4.6-0.7-13.8,6.1-16.9c2-0.9,2.7-1,5.5-1
c2.9,0,3.5,0.1,5.6,1.1c4.4,2.1,7.4,6.4,7.8,11c0.2,2.2,0.1,2.3-2.2,6.9c-23,45.9-67,78.1-117.2,85.9
C300.2,438.8,299.4,438.9,297.6,438.2z"/>
<path class="st1" d="M211.1,398.5c-4.7-0.9-8.7-2.7-12.9-5.9c-10.8-8.1-13.5-22.3-6.6-33.7c0.7-1.2,1.1-2.2,1-2.4
c-0.2-0.2-1.2-0.6-2.3-1.1c-7.6-3-13-10.6-13.5-19.1c-0.5-7.4,3.1-15,9-19.4c1-0.7,2.2-1.5,2.6-1.8c0.8-0.4,68.9-22.7,69.4-22.7
c0.2,0,0.7,0.7,1.2,1.5c0.5,0.8,1.6,2.3,2.4,3.3c1.2,1.4,1.5,1.9,1.2,2.3c-0.2,0.3-6.9,9.5-14.8,20.5
c-15.9,21.9-15.5,21.3-13.4,23.4c1.3,1.3,2.9,1.4,4.4,0.3c0.6-0.4,7.5-9.7,15.5-20.7c11.2-15.4,14.6-19.9,15-19.7
c0.9,0.4,5.5,1.9,6.6,2.1l1,0.2l0,35.3c0,39.7,0,38.8-2.5,44c-2.6,5.3-7.2,9.3-12.7,11.2c-3.7,1.3-6.8,1.6-10.2,1
c-5.5-0.9-9.8-3.2-13.7-7.4l-2.2-2.4l-0.6,0.9c-3,4.3-8.6,8.1-14,9.5C218.2,398.6,213.2,398.9,211.1,398.5z"/>
<path class="st3" d="M342.9,398.5c-5.5-0.9-9.9-3.2-14.3-7.6l-3.2-3.2l-0.7,1c-2.3,3.3-6.8,6.5-11.1,7.9c-3.7,1.2-9.2,1.4-12.6,0.3
c-7.1-2.1-12.7-7.4-15.2-14.3l-0.9-2.6v-37.1v-37.1l1.8-0.4c1-0.2,2.7-0.8,3.9-1.2c1.1-0.5,2.1-0.8,2.2-0.7c0.1,0.1,6.5,9,14.4,19.9
c7.8,10.9,14.7,20.1,15.2,20.5c2.2,1.9,5.4,0.4,5.4-2.6c0-1.4-1-2.9-13.8-20.5c-7.6-10.5-14.2-19.6-14.7-20.4l-0.9-1.3l1.4-1.7
c0.8-0.9,1.9-2.5,2.5-3.4l1-1.6l34.4,11.2c18.9,6.2,35.1,11.6,35.9,12.1c6.8,4,11.1,11.3,11.1,19.1c0,4.1-0.5,6.4-2.4,10.2
c-2,4.1-5.5,7.6-9.6,9.7c-1.6,0.8-3.2,1.5-3.4,1.5c-1,0-0.9,0.7,0.3,2.6c2.8,4.3,4,8.5,3.9,13.7c0,8.1-3.7,15.2-10.6,20.3
C356.4,397.6,349.5,399.5,342.9,398.5z"/>
<path class="st2" d="M53.9,341.9c-0.5-0.1-2.3-0.4-3.9-0.7c-15.6-2.6-30.4-12.6-38.8-26.2c-3.5-5.7-6.4-13.2-7.8-19.9
c-1.2-6.1-0.8-28.1,0.8-43.1c4.5-43,19-84.3,42.2-120.7c6.5-10.2,14.9-21.5,18.2-24.6c17.8-16.6,43.1-20.5,64.8-10
c4.3,2.1,8.8,5.1,12.7,8.6c2.8,2.4,5.8,6.1,20.9,25.5c9.7,12.5,17.8,22.8,17.9,23c0.2,0.2-0.9,0.4-3.2,0.4c-2.5,0-4.1,0.2-5.7,0.7
c-2.1,0.7-2.6,1.1-7.9,6.3c-8.2,8.1-14.4,15.3-20.3,23.9c-15.5,22.2-25.4,47.7-28.8,74.8c-2.2,16.9-1.6,37.5,1.6,52.3
c0.3,1.4,0.5,2.8,0.4,3c-0.1,0.2,0.2,1.3,0.8,2.4c1.1,2.4,4.3,5.7,6.5,6.8l1.5,0.8l-1.2,0.4c-0.7,0.2-13.1,3.8-27.6,8
c-16.4,4.7-27.7,7.8-29.8,8.1C64.1,342.1,56.1,342.3,53.9,341.9z"/>
<path class="st3" d="M494.7,341.7c-2.1-0.3-33.8-9.1-56.5-15.8l-2.5-0.7l1.6-0.8c3.4-1.7,7.2-6.6,7.3-9.6c0-0.7,0.4-3.3,0.8-5.8
c3.9-22.7,3.1-46.1-2.5-68.4c-6.4-25.5-18.6-49.2-35.8-69.1c-4.6-5.3-14.8-15.4-16.4-16.1c-2.4-1.1-5.1-1.6-8-1.4l-2.7,0.2l1.2-1.5
c0.7-0.8,8.5-10.8,17.5-22.3c8.9-11.5,17.2-21.8,18.5-23.1c2.6-2.7,7-6.2,10.3-8.2c19.3-11.6,43-11.1,61.6,1.2
c5.4,3.6,8.2,6.2,12.3,11.7c26.4,34.5,44,73.7,52.3,116.2c3.4,17.6,4.9,33.3,5,52.4c0,13-0.2,14.8-2.5,21.8
C547.8,328.6,521.7,345.2,494.7,341.7z"/>
<path class="st4" d="M133.9,318.5c-2-0.5-4.6-1.9-6-3.3c-2.5-2.4-3.1-3.5-3.7-7.3c-4.4-27.3-2.2-54,6.7-79.3
c5.3-15.1,13.5-30.5,23-43.1c5.8-7.8,16.6-19.5,19-20.7c4.7-2.4,11.3-1.2,15.2,2.7c5.4,5.4,5.2,13.9-0.3,19.1
c-4.3,4-9.4,4.4-12.6,0.9c-1.7-1.9-2.2-3.9-1.7-6.4c0.2-1.1,0.3-2,0.2-2.2c-0.3-0.3-3.6,3.3-8.3,9.1c-17.6,21.8-28.5,48-31.9,76.5
c-1.1,9.3-1,26.4,0.1,34.6c0.3,1.8,0.8,1.9,1.4,0.1c0.9-2.6,4-4.7,6.8-4.7c3,0,5.9,2.2,7.5,5.7c0.6,1.3,0.8,2.3,0.8,5.2
c0,3.3-0.1,3.8-1.1,5.7c-1.4,2.7-4.6,5.7-7.1,6.6C139.4,318.6,135.8,318.9,133.9,318.5z"/>
<path class="st1" d="M422.6,318.5c-3.7-0.6-7.7-3.6-9.4-7.1c-3.8-7.5,0.1-16.9,6.9-16.9c3.1,0,5.8,2,6.9,5.2
c0.4,1.2,0.5,1.3,0.7,0.7c1.3-3.7,1.7-26.4,0.6-35.7c-3.6-29.6-14.5-55.3-33-77.9c-5.5-6.7-8.4-9.4-7.1-6.6c0.7,1.4,0.5,4.3-0.3,5.9
c-0.9,1.7-3.2,3.5-5,3.8c-3.2,0.6-7.9-1.6-10.2-4.8c-6.5-8.8-0.5-21.2,10.4-21.4c4.6-0.1,5.2,0.3,11.2,6.4
c12.1,12.3,21.1,24.9,28.8,40.3c13.2,26.3,18.6,54.9,16.1,84.5c-0.5,5.6-2,15.7-2.6,17.1c-1.3,2.8-4.8,5.5-8.4,6.5
C425.9,318.9,425.1,318.9,422.6,318.5z"/>
<path class="st0" d="M178.2,307.2c-6-1.3-12.2-6.2-14.9-11.7c-3.4-7-3.1-15.1,0.9-21.6c0.7-1.2,1.2-2.3,1.1-2.4
c-0.1-0.1-1.1-0.6-2.1-1c-3.9-1.5-8.1-4.8-10.7-8.3c-4.6-6.2-6.1-14.6-3.9-22.1c2.9-10.3,9.4-16.8,19.1-19.3c2.8-0.7,9-0.8,11.7,0
c1.1,0.3,2.2,0.5,2.4,0.5c0.2,0,0.3-0.7,0.3-1.5c0-2.9,0.8-5.8,2.4-9.2c5.2-10.8,18.1-15.5,29-10.5c2.7,1.2,6.2,3.8,7.8,5.8
c0.7,0.8,10.3,14,21.5,29.4l20.3,27.9l-1.5,1.8c-0.8,1-1.9,2.6-2.5,3.5c-0.6,1-1.2,1.7-1.5,1.6c-4.5-1.7-46.7-15-47.7-15
c-1.9,0-3.1,1.3-3.1,3.2c0,1,0.2,1.7,0.8,2.3c0.6,0.6,7.8,3.1,24.5,8.5l23.7,7.7l-0.1,4.3l-0.1,4.3L223,295.9
c-18,5.9-33.9,10.9-35.2,11.2C184.7,307.8,181.2,307.8,178.2,307.2z"/>
<path class="st4" d="M372.5,306.8c-1.8-0.5-17.5-5.6-35-11.3l-31.8-10.4l1-4.3v-4.3l22.6-7.7c15-4.9,24-8,24.6-8.5
c0.7-0.6,0.9-1.1,0.9-2.2c0-2-1.2-3.3-3.1-3.3c-0.9,0-10.5,2.9-24.7,7.5c-12.8,4.1-23.4,7.5-23.6,7.5c-0.1,0-0.7-0.8-1.3-1.9
c-0.6-1-1.6-2.5-2.2-3.2c-0.7-0.7-1.2-1.5-1.2-1.6c0-0.2,9.6-13.5,21.4-29.6c18.9-26,21.6-29.6,23.6-31.1c5.7-4.4,13.1-5.8,19.7-3.9
c9,2.7,16.1,11.6,16.1,20.3c0,2.3-0.1,2.3,3.1,1.5c4.7-1.1,11.5-0.5,16,1.5c4.6,2,9,6,11.5,10.2c2.1,3.6,3.9,9.4,4.2,13.2
c0.3,5.2-1.1,10.7-4,15.3c-2.6,4.1-7.8,8.3-12.1,9.8c-0.9,0.3-1.7,0.8-1.7,1c0,0.2,0.4,1,0.9,1.7c2.4,3.6,3.6,7.7,3.5,12.7
c0,5.8-2.1,10.7-6.4,15.1c-4,4.1-8.9,6.3-14.9,6.5C376.3,307.7,375.3,307.6,372.5,306.8z"/>
<path class="st5" d="M276.2,298.9c-6.1-1.6-11.4-6.8-13.2-12.9c-0.7-2.4-0.7-7.5,0-9.9c1.7-5.8,6.6-10.8,12.3-12.5
c2.7-0.8,7.2-0.9,10-0.2c6.2,1.6,11.6,7.1,13.2,13.3c1.6,6-0.3,12.6-5,17.3C288.9,298.6,282.2,300.5,276.2,298.9z"/>
<path class="st2" d="M248.3,229.8c-13.3-18.3-21.2-29.6-22-31.1c-1.4-3-1.9-5.5-1.9-9.4c0-14.1,13.1-24.4,27.1-21.4
c1.4,0.3,2.6,0.5,2.7,0.5s0.3-1.3,0.4-2.8c0.8-10.7,8.4-19.6,18.9-22.4c3.9-1,10.6-1,14.5,0c8.9,2.3,15.9,9.3,18.2,18.2
c0.4,1.5,0.7,3.7,0.7,4.9c0,1.2,0.1,2.1,0.3,2.1s1.5-0.3,3-0.6c7.4-1.6,15.2,0.7,20.5,6c4.3,4.3,6.6,9.6,6.6,15.6
c0,4-0.6,6.5-2.4,10c-0.6,1.2-10.4,15-21.7,30.7c-17.8,24.5-20.8,28.5-21.4,28.3c-0.4-0.1-1.9-0.6-3.4-1.1c-1.5-0.5-2.9-0.9-3.3-0.9
c-0.7,0-0.7-0.8-0.3-25.5v-25.5l-1.4-0.9c-1-1.1-2.5-1.5-3.8-0.9c-2,0.8-2-0.5-1.8,27.2v25.8h-1.2c-0.5-0.2-2.4,0.3-4,0.9
s-3.1,1.1-3.2,1.1C269.2,258.5,259.8,245.6,248.3,229.8z"/>
<path class="st3" d="M210.9,164.8c-4.1-0.9-7.7-3.6-9.6-7.4c-1.4-2.8-1.7-7.3-0.5-10.3c1.7-4.5,3.9-6.1,15.6-11.2
c15.8-7,31.4-11.1,49.2-12.9c7.3-0.8,23.2-0.8,30.6,0c17.4,1.8,33.3,6,49.1,13c7.3,3.2,12.5,6.1,13.6,7.5c4.3,5.6,3.8,12.7-1.1,17.6
c-5.1,5.1-12.9,5.4-18.1,0.7c-2-1.8-3-3.5-3.4-5.6c-0.7-4,2.9-8.1,7.3-8.2c1.4,0,1.5-0.1,1.1-0.5c-0.3-0.3-2.2-1.2-4.3-2.1
c-33.2-14.5-70.5-16.4-105-5.4c-7.5,2.4-19,7.2-18.6,7.7c0.1,0.2,0.8,0.3,1.6,0.3c5.6,0,9.1,6.2,6.1,10.8
C221.6,163.3,215.9,165.9,210.9,164.8z"/>
<path class="st4" d="M174.7,123.4c-8.9-13.1-16.8-25.1-17.5-26.6c-1.6-3.3-3.6-9.2-4.4-13c-2.6-12.5-0.9-25.8,5-37.5
c4.2-8.3,11.2-16.3,18.6-21.3c5-3.4,6.1-3.9,12.8-6.3c23.1-8.2,47.2-13.1,73.4-15c7.5-0.6,28.5-0.6,36.3,0
c25.5,1.8,50.6,6.9,73,14.8c6.4,2.2,8.2,3.1,13.1,6.5c9.8,6.6,18.1,17.5,22,29.2c2.2,6.5,2.7,10,2.7,17.9c0,7.9-0.5,11.3-2.7,17.9
c-2.3,6.8-3.7,9.1-20.3,33.6l-16.1,23.8l-0.4-2.2c-0.2-1.2-0.9-3-1.4-4c-1-1.8-4.4-5.6-4.7-5.2c-0.1,0.1-1.2-0.4-2.4-1.1
c-9.1-5.2-21.9-10.5-33.2-13.9c-37-11-77.2-8.8-113,6.1c-4.9,2.1-17.7,8.4-19.2,9.5c-2.2,1.6-5.1,6.8-5.1,9c0,0.4-0.1,1-0.3,1.2
C191,147,184.7,138,174.7,123.4z"/>
</svg>

After

Width:  |  Height:  |  Size: 9.7 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.6 MiB

After

Width:  |  Height:  |  Size: 1.8 MiB

View file

@ -10,9 +10,8 @@ RUN poetry config installer.max-workers 10 && \
RUN python -m venv /opt/venv
ENV VIRTUAL_ENV="/opt/venv" PATH="/opt/venv/bin:${PATH}"
COPY poetry.lock pyproject.toml requirements.txt ./
COPY poetry.lock pyproject.toml ./
RUN poetry install --sync --no-interaction --no-ansi --no-root --only main
RUN pip install --no-deps -r requirements.txt
FROM python:3.11-slim-bookworm

View file

@ -1,5 +1,6 @@
import json
from typing import Any, Iterator, TypeAlias
from pathlib import Path
from typing import Any, Iterator
from unittest import mock
import numpy as np
@ -8,8 +9,7 @@ from fastapi.testclient import TestClient
from PIL import Image
from .main import app, init_state
ndarray: TypeAlias = np.ndarray[int, np.dtype[np.float32]]
from .schemas import ndarray_f32
@pytest.fixture
@ -18,13 +18,13 @@ def pil_image() -> Image.Image:
@pytest.fixture
def cv_image(pil_image: Image.Image) -> ndarray:
def cv_image(pil_image: Image.Image) -> ndarray_f32:
return np.asarray(pil_image)[:, :, ::-1] # PIL uses RGB while cv2 uses BGR
@pytest.fixture
def mock_get_model() -> Iterator[mock.Mock]:
with mock.patch("app.models.cache.InferenceModel.from_model_type", autospec=True) as mocked:
with mock.patch("app.models.cache.from_model_type", autospec=True) as mocked:
yield mocked
@ -37,3 +37,25 @@ def deployed_app() -> TestClient:
@pytest.fixture(scope="session")
def responses() -> dict[str, Any]:
return json.load(open("responses.json", "r"))
@pytest.fixture(scope="session")
def clip_model_cfg() -> dict[str, Any]:
return {
"embed_dim": 512,
"vision_cfg": {"image_size": 224, "layers": 12, "width": 768, "patch_size": 32},
"text_cfg": {"context_length": 77, "vocab_size": 49408, "width": 512, "heads": 8, "layers": 12},
}
@pytest.fixture(scope="session")
def clip_preprocess_cfg() -> dict[str, Any]:
return {
"size": [224, 224],
"mode": "RGB",
"mean": [0.48145466, 0.4578275, 0.40821073],
"std": [0.26862954, 0.26130258, 0.27577711],
"interpolation": "bicubic",
"resize_mode": "shortest",
"fill_color": 0,
}

View file

@ -1,3 +1,25 @@
from .clip import CLIPEncoder
from typing import Any
from app.schemas import ModelType
from .base import InferenceModel
from .clip import MCLIPEncoder, OpenCLIPEncoder, is_mclip, is_openclip
from .facial_recognition import FaceRecognizer
from .image_classification import ImageClassifier
def from_model_type(model_type: ModelType, model_name: str, **model_kwargs: Any) -> InferenceModel:
match model_type:
case ModelType.CLIP:
if is_openclip(model_name):
return OpenCLIPEncoder(model_name, **model_kwargs)
elif is_mclip(model_name):
return MCLIPEncoder(model_name, **model_kwargs)
else:
raise ValueError(f"Unknown CLIP model {model_name}")
case ModelType.FACIAL_RECOGNITION:
return FaceRecognizer(model_name, **model_kwargs)
case ModelType.IMAGE_CLASSIFICATION:
return ImageClassifier(model_name, **model_kwargs)
case _:
raise ValueError(f"Unknown model type {model_type}")

View file

@ -25,7 +25,7 @@ class InferenceModel(ABC):
) -> None:
self.model_name = model_name
self.loaded = False
self._cache_dir = Path(cache_dir) if cache_dir is not None else get_cache_dir(model_name, self.model_type)
self._cache_dir = Path(cache_dir) if cache_dir is not None else None
self.providers = model_kwargs.pop("providers", ["CPUExecutionProvider"])
# don't pre-allocate more memory than needed
self.provider_options = model_kwargs.pop(
@ -92,7 +92,7 @@ class InferenceModel(ABC):
@property
def cache_dir(self) -> Path:
return self._cache_dir
return self._cache_dir if self._cache_dir is not None else get_cache_dir(self.model_name, self.model_type)
@cache_dir.setter
def cache_dir(self, cache_dir: Path) -> None:

View file

@ -4,6 +4,8 @@ from aiocache.backends.memory import SimpleMemoryCache
from aiocache.lock import OptimisticLock
from aiocache.plugins import BasePlugin, TimingPlugin
from app.models import from_model_type
from ..schemas import ModelType
from .base import InferenceModel
@ -50,7 +52,7 @@ class ModelCache:
async with OptimisticLock(self.cache, key) as lock:
model = await self.cache.get(key)
if model is None:
model = InferenceModel.from_model_type(model_type, model_name, **model_kwargs)
model = from_model_type(model_type, model_name, **model_kwargs)
await lock.cas(model, ttl=self.ttl)
return model

View file

@ -1,23 +1,24 @@
import os
import zipfile
import json
from abc import abstractmethod
from functools import cached_property
from io import BytesIO
from pathlib import Path
from typing import Any, Literal
import numpy as np
import onnxruntime as ort
import torch
from clip_server.model.clip import BICUBIC, _convert_image_to_rgb
from clip_server.model.clip_onnx import _MODELS, _S3_BUCKET_V2, CLIPOnnxModel, download_model
from clip_server.model.pretrained_models import _VISUAL_MODEL_IMAGE_SIZE
from clip_server.model.tokenization import Tokenizer
from huggingface_hub import snapshot_download
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import AutoTokenizer
from app.config import log
from app.models.transforms import crop, get_pil_resampling, normalize, resize, to_numpy
from app.schemas import ModelType, ndarray_f32, ndarray_i32, ndarray_i64
from ..config import log
from ..schemas import ModelType
from .base import InferenceModel
class CLIPEncoder(InferenceModel):
class BaseCLIPEncoder(InferenceModel):
_model_type = ModelType.CLIP
def __init__(
@ -27,48 +28,29 @@ class CLIPEncoder(InferenceModel):
mode: Literal["text", "vision"] | None = None,
**model_kwargs: Any,
) -> None:
if mode is not None and mode not in ("text", "vision"):
raise ValueError(f"Mode must be 'text', 'vision', or omitted; got '{mode}'")
if model_name not in _MODELS:
raise ValueError(f"Unknown model name {model_name}.")
self.mode = mode
super().__init__(model_name, cache_dir, **model_kwargs)
def _download(self) -> None:
models: tuple[tuple[str, str], tuple[str, str]] = _MODELS[self.model_name]
text_onnx_path = self.cache_dir / "textual.onnx"
vision_onnx_path = self.cache_dir / "visual.onnx"
if not text_onnx_path.is_file():
self._download_model(*models[0])
if not vision_onnx_path.is_file():
self._download_model(*models[1])
def _load(self) -> None:
if self.mode == "text" or self.mode is None:
log.debug(f"Loading clip text model '{self.model_name}'")
self.text_model = ort.InferenceSession(
self.cache_dir / "textual.onnx",
self.textual_path.as_posix(),
sess_options=self.sess_options,
providers=self.providers,
provider_options=self.provider_options,
)
self.text_outputs = [output.name for output in self.text_model.get_outputs()]
self.tokenizer = Tokenizer(self.model_name)
if self.mode == "vision" or self.mode is None:
log.debug(f"Loading clip vision model '{self.model_name}'")
self.vision_model = ort.InferenceSession(
self.cache_dir / "visual.onnx",
self.visual_path.as_posix(),
sess_options=self.sess_options,
providers=self.providers,
provider_options=self.provider_options,
)
self.vision_outputs = [output.name for output in self.vision_model.get_outputs()]
image_size = _VISUAL_MODEL_IMAGE_SIZE[CLIPOnnxModel.get_model_name(self.model_name)]
self.transform = _transform_pil_image(image_size)
def _predict(self, image_or_text: Image.Image | str) -> list[float]:
if isinstance(image_or_text, bytes):
@ -78,55 +60,163 @@ class CLIPEncoder(InferenceModel):
case Image.Image():
if self.mode == "text":
raise TypeError("Cannot encode image as text-only model")
pixel_values = self.transform(image_or_text)
assert isinstance(pixel_values, torch.Tensor)
pixel_values = torch.unsqueeze(pixel_values, 0).numpy()
outputs = self.vision_model.run(self.vision_outputs, {"pixel_values": pixel_values})
outputs = self.vision_model.run(None, self.transform(image_or_text))
case str():
if self.mode == "vision":
raise TypeError("Cannot encode text as vision-only model")
text_inputs: dict[str, torch.Tensor] = self.tokenizer(image_or_text)
inputs = {
"input_ids": text_inputs["input_ids"].int().numpy(),
"attention_mask": text_inputs["attention_mask"].int().numpy(),
}
outputs = self.text_model.run(self.text_outputs, inputs)
outputs = self.text_model.run(None, self.tokenize(image_or_text))
case _:
raise TypeError(f"Expected Image or str, but got: {type(image_or_text)}")
return outputs[0][0].tolist()
def _download_model(self, model_name: str, model_md5: str) -> bool:
# downloading logic is adapted from clip-server's CLIPOnnxModel class
download_model(
url=_S3_BUCKET_V2 + model_name,
target_folder=self.cache_dir.as_posix(),
md5sum=model_md5,
with_resume=True,
)
file = self.cache_dir / model_name.split("/")[1]
if file.suffix == ".zip":
with zipfile.ZipFile(file, "r") as zip_ref:
zip_ref.extractall(self.cache_dir)
os.remove(file)
return True
@abstractmethod
def tokenize(self, text: str) -> dict[str, ndarray_i32]:
pass
@abstractmethod
def transform(self, image: Image.Image) -> dict[str, ndarray_f32]:
pass
@property
def textual_dir(self) -> Path:
return self.cache_dir / "textual"
@property
def visual_dir(self) -> Path:
return self.cache_dir / "visual"
@property
def model_cfg_path(self) -> Path:
return self.cache_dir / "config.json"
@property
def textual_path(self) -> Path:
return self.textual_dir / "model.onnx"
@property
def visual_path(self) -> Path:
return self.visual_dir / "model.onnx"
@property
def preprocess_cfg_path(self) -> Path:
return self.visual_dir / "preprocess_cfg.json"
@property
def cached(self) -> bool:
return (self.cache_dir / "textual.onnx").is_file() and (self.cache_dir / "visual.onnx").is_file()
return self.textual_path.is_file() and self.visual_path.is_file()
# same as `_transform_blob` without `_blob2image`
def _transform_pil_image(n_px: int) -> Compose:
return Compose(
[
Resize(n_px, interpolation=BICUBIC),
CenterCrop(n_px),
_convert_image_to_rgb,
ToTensor(),
Normalize(
(0.48145466, 0.4578275, 0.40821073),
(0.26862954, 0.26130258, 0.27577711),
),
]
)
class OpenCLIPEncoder(BaseCLIPEncoder):
def __init__(
self,
model_name: str,
cache_dir: str | None = None,
mode: Literal["text", "vision"] | None = None,
**model_kwargs: Any,
) -> None:
super().__init__(_clean_model_name(model_name), cache_dir, mode, **model_kwargs)
def _download(self) -> None:
snapshot_download(
f"immich-app/{self.model_name}",
cache_dir=self.cache_dir,
local_dir=self.cache_dir,
local_dir_use_symlinks=False,
)
def _load(self) -> None:
super()._load()
self.tokenizer = AutoTokenizer.from_pretrained(self.textual_dir)
self.sequence_length = self.model_cfg["text_cfg"]["context_length"]
self.size = (
self.preprocess_cfg["size"][0] if type(self.preprocess_cfg["size"]) == list else self.preprocess_cfg["size"]
)
self.resampling = get_pil_resampling(self.preprocess_cfg["interpolation"])
self.mean = np.array(self.preprocess_cfg["mean"], dtype=np.float32)
self.std = np.array(self.preprocess_cfg["std"], dtype=np.float32)
def tokenize(self, text: str) -> dict[str, ndarray_i32]:
input_ids: ndarray_i64 = self.tokenizer(
text,
max_length=self.sequence_length,
return_tensors="np",
return_attention_mask=False,
padding="max_length",
truncation=True,
).input_ids
return {"text": input_ids.astype(np.int32)}
def transform(self, image: Image.Image) -> dict[str, ndarray_f32]:
image = resize(image, self.size)
image = crop(image, self.size)
image_np = to_numpy(image)
image_np = normalize(image_np, self.mean, self.std)
return {"image": np.expand_dims(image_np.transpose(2, 0, 1), 0)}
@cached_property
def model_cfg(self) -> dict[str, Any]:
return json.load(self.model_cfg_path.open())
@cached_property
def preprocess_cfg(self) -> dict[str, Any]:
return json.load(self.preprocess_cfg_path.open())
class MCLIPEncoder(OpenCLIPEncoder):
def tokenize(self, text: str) -> dict[str, ndarray_i32]:
tokens: dict[str, ndarray_i64] = self.tokenizer(text, return_tensors="np")
return {k: v.astype(np.int32) for k, v in tokens.items()}
_OPENCLIP_MODELS = {
"RN50__openai",
"RN50__yfcc15m",
"RN50__cc12m",
"RN101__openai",
"RN101__yfcc15m",
"RN50x4__openai",
"RN50x16__openai",
"RN50x64__openai",
"ViT-B-32__openai",
"ViT-B-32__laion2b_e16",
"ViT-B-32__laion400m_e31",
"ViT-B-32__laion400m_e32",
"ViT-B-32__laion2b-s34b-b79k",
"ViT-B-16__openai",
"ViT-B-16__laion400m_e31",
"ViT-B-16__laion400m_e32",
"ViT-B-16-plus-240__laion400m_e31",
"ViT-B-16-plus-240__laion400m_e32",
"ViT-L-14__openai",
"ViT-L-14__laion400m_e31",
"ViT-L-14__laion400m_e32",
"ViT-L-14__laion2b-s32b-b82k",
"ViT-L-14-336__openai",
"ViT-H-14__laion2b-s32b-b79k",
"ViT-g-14__laion2b-s12b-b42k",
}
_MCLIP_MODELS = {
"LABSE-Vit-L-14",
"XLM-Roberta-Large-Vit-B-32",
"XLM-Roberta-Large-Vit-B-16Plus",
"XLM-Roberta-Large-Vit-L-14",
}
def _clean_model_name(model_name: str) -> str:
return model_name.split("/")[-1].replace("::", "__")
def is_openclip(model_name: str) -> bool:
return _clean_model_name(model_name) in _OPENCLIP_MODELS
def is_mclip(model_name: str) -> bool:
return _clean_model_name(model_name) in _MCLIP_MODELS

View file

@ -9,7 +9,8 @@ from insightface.model_zoo import ArcFaceONNX, RetinaFace
from insightface.utils.face_align import norm_crop
from insightface.utils.storage import BASE_REPO_URL, download_file
from ..schemas import ModelType
from app.schemas import ModelType, ndarray_f32
from .base import InferenceModel
@ -68,7 +69,7 @@ class FaceRecognizer(InferenceModel):
)
self.rec_model.prepare(ctx_id=0)
def _predict(self, image: np.ndarray[int, np.dtype[Any]] | bytes) -> list[dict[str, Any]]:
def _predict(self, image: ndarray_f32 | bytes) -> list[dict[str, Any]]:
if isinstance(image, bytes):
image = cv2.imdecode(np.frombuffer(image, np.uint8), cv2.IMREAD_COLOR)
bboxes, kpss = self.det_model.detect(image)

View file

@ -0,0 +1,35 @@
import numpy as np
from PIL import Image
from app.schemas import ndarray_f32
_PIL_RESAMPLING_METHODS = {resampling.name.lower(): resampling for resampling in Image.Resampling}
def resize(img: Image.Image, size: int) -> Image.Image:
if img.width < img.height:
return img.resize((size, int((img.height / img.width) * size)), resample=Image.BICUBIC)
else:
return img.resize((int((img.width / img.height) * size), size), resample=Image.BICUBIC)
# https://stackoverflow.com/a/60883103
def crop(img: Image.Image, size: int) -> Image.Image:
left = int((img.size[0] / 2) - (size / 2))
upper = int((img.size[1] / 2) - (size / 2))
right = left + size
lower = upper + size
return img.crop((left, upper, right, lower))
def to_numpy(img: Image.Image) -> ndarray_f32:
return np.asarray(img.convert("RGB")).astype(np.float32) / 255.0
def normalize(img: ndarray_f32, mean: float | ndarray_f32, std: float | ndarray_f32) -> ndarray_f32:
return (img - mean) / std
def get_pil_resampling(resample: str) -> Image.Resampling:
return _PIL_RESAMPLING_METHODS[resample.lower()]

View file

@ -1,5 +1,7 @@
from enum import StrEnum
from typing import TypeAlias
import numpy as np
from pydantic import BaseModel
@ -31,3 +33,8 @@ class ModelType(StrEnum):
IMAGE_CLASSIFICATION = "image-classification"
CLIP = "clip"
FACIAL_RECOGNITION = "facial-recognition"
ndarray_f32: TypeAlias = np.ndarray[int, np.dtype[np.float32]]
ndarray_i64: TypeAlias = np.ndarray[int, np.dtype[np.int64]]
ndarray_i32: TypeAlias = np.ndarray[int, np.dtype[np.int32]]

View file

@ -1,7 +1,8 @@
import json
import pickle
from io import BytesIO
from typing import Any, TypeAlias
from pathlib import Path
from typing import Any, Callable
from unittest import mock
import cv2
@ -14,13 +15,11 @@ from pytest_mock import MockerFixture
from .config import settings
from .models.base import PicklableSessionOptions
from .models.cache import ModelCache
from .models.clip import CLIPEncoder
from .models.clip import OpenCLIPEncoder
from .models.facial_recognition import FaceRecognizer
from .models.image_classification import ImageClassifier
from .schemas import ModelType
ndarray: TypeAlias = np.ndarray[int, np.dtype[np.float32]]
class TestImageClassifier:
classifier_preds = [
@ -56,30 +55,50 @@ class TestImageClassifier:
class TestCLIP:
embedding = np.random.rand(512).astype(np.float32)
cache_dir = Path("test_cache")
def test_basic_image(self, pil_image: Image.Image, mocker: MockerFixture) -> None:
mocker.patch.object(CLIPEncoder, "download")
def test_basic_image(
self,
pil_image: Image.Image,
mocker: MockerFixture,
clip_model_cfg: dict[str, Any],
clip_preprocess_cfg: Callable[[Path], dict[str, Any]],
) -> None:
mocker.patch.object(OpenCLIPEncoder, "download")
mocker.patch.object(OpenCLIPEncoder, "model_cfg", clip_model_cfg)
mocker.patch.object(OpenCLIPEncoder, "preprocess_cfg", clip_preprocess_cfg)
mocker.patch("app.models.clip.AutoTokenizer.from_pretrained", autospec=True)
mocked = mocker.patch("app.models.clip.ort.InferenceSession", autospec=True)
mocked.return_value.run.return_value = [[self.embedding]]
clip_encoder = CLIPEncoder("ViT-B-32::openai", cache_dir="test_cache", mode="vision")
assert clip_encoder.mode == "vision"
clip_encoder = OpenCLIPEncoder("ViT-B-32::openai", cache_dir="test_cache", mode="vision")
embedding = clip_encoder.predict(pil_image)
assert clip_encoder.mode == "vision"
assert isinstance(embedding, list)
assert len(embedding) == 512
assert len(embedding) == clip_model_cfg["embed_dim"]
assert all([isinstance(num, float) for num in embedding])
clip_encoder.vision_model.run.assert_called_once()
def test_basic_text(self, mocker: MockerFixture) -> None:
mocker.patch.object(CLIPEncoder, "download")
def test_basic_text(
self,
mocker: MockerFixture,
clip_model_cfg: dict[str, Any],
clip_preprocess_cfg: Callable[[Path], dict[str, Any]],
) -> None:
mocker.patch.object(OpenCLIPEncoder, "download")
mocker.patch.object(OpenCLIPEncoder, "model_cfg", clip_model_cfg)
mocker.patch.object(OpenCLIPEncoder, "preprocess_cfg", clip_preprocess_cfg)
mocker.patch("app.models.clip.AutoTokenizer.from_pretrained", autospec=True)
mocked = mocker.patch("app.models.clip.ort.InferenceSession", autospec=True)
mocked.return_value.run.return_value = [[self.embedding]]
clip_encoder = CLIPEncoder("ViT-B-32::openai", cache_dir="test_cache", mode="text")
assert clip_encoder.mode == "text"
clip_encoder = OpenCLIPEncoder("ViT-B-32::openai", cache_dir="test_cache", mode="text")
embedding = clip_encoder.predict("test search query")
assert clip_encoder.mode == "text"
assert isinstance(embedding, list)
assert len(embedding) == 512
assert len(embedding) == clip_model_cfg["embed_dim"]
assert all([isinstance(num, float) for num in embedding])
clip_encoder.text_model.run.assert_called_once()

View file

@ -0,0 +1,21 @@
FROM mambaorg/micromamba:bookworm-slim as builder
ENV NODE_ENV=production \
TRANSFORMERS_CACHE=/cache \
PYTHONDONTWRITEBYTECODE=1 \
PYTHONUNBUFFERED=1 \
PATH="/opt/venv/bin:$PATH" \
PYTHONPATH=/usr/src
COPY --chown=$MAMBA_USER:$MAMBA_USER conda-lock.yml /tmp/conda-lock.yml
RUN micromamba install -y -n base -f /tmp/conda-lock.yml && \
micromamba remove -y -n base cxx-compiler && \
micromamba clean --all --yes
WORKDIR /usr/src/app
COPY --chown=$MAMBA_USER:$MAMBA_USER start.sh .
COPY --chown=$MAMBA_USER:$MAMBA_USER app .
ENTRYPOINT ["/usr/local/bin/_entrypoint.sh"]
CMD ["./start.sh"]

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,15 @@
name: base
channels:
- conda-forge
platforms:
- linux-64
- linux-aarch64
dependencies:
- black
- conda-lock
- mypy
- pytest
- pytest-cov
- pytest-mock
- ruff
category: dev

View file

@ -0,0 +1,25 @@
name: base
channels:
- conda-forge
- nvidia
- pytorch-nightly
platforms:
- linux-64
dependencies:
- cxx-compiler
- onnx==1.*
- onnxruntime==1.*
- open-clip-torch==2.*
- orjson==3.*
- pip
- python==3.11.*
- pytorch
- rich==13.*
- safetensors==0.*
- setuptools==68.*
- torchvision
- transformers==4.*
- pip:
- multilingual-clip
- onnx-simplifier
category: main

View file

@ -0,0 +1,67 @@
import tempfile
import warnings
from pathlib import Path
import torch
from multilingual_clip.pt_multilingual_clip import MultilingualCLIP
from transformers import AutoTokenizer
from .openclip import OpenCLIPModelConfig
from .openclip import to_onnx as openclip_to_onnx
from .optimize import optimize
from .util import get_model_path
_MCLIP_TO_OPENCLIP = {
"M-CLIP/XLM-Roberta-Large-Vit-B-32": OpenCLIPModelConfig("ViT-B-32", "openai"),
"M-CLIP/XLM-Roberta-Large-Vit-B-16Plus": OpenCLIPModelConfig("ViT-B-16-plus-240", "laion400m_e32"),
"M-CLIP/LABSE-Vit-L-14": OpenCLIPModelConfig("ViT-L-14", "openai"),
"M-CLIP/XLM-Roberta-Large-Vit-L-14": OpenCLIPModelConfig("ViT-L-14", "openai"),
}
def to_onnx(
model_name: str,
output_dir_visual: Path | str,
output_dir_textual: Path | str,
) -> None:
textual_path = get_model_path(output_dir_textual)
with tempfile.TemporaryDirectory() as tmpdir:
model = MultilingualCLIP.from_pretrained(model_name, cache_dir=tmpdir)
AutoTokenizer.from_pretrained(model_name).save_pretrained(output_dir_textual)
for param in model.parameters():
param.requires_grad_(False)
export_text_encoder(model, textual_path)
openclip_to_onnx(_MCLIP_TO_OPENCLIP[model_name], output_dir_visual)
optimize(textual_path)
def export_text_encoder(model: MultilingualCLIP, output_path: Path | str) -> None:
output_path = Path(output_path)
def forward(self: MultilingualCLIP, input_ids: torch.Tensor, attention_mask: torch.Tensor) -> torch.Tensor:
embs = self.transformer(input_ids, attention_mask)[0]
embs = (embs * attention_mask.unsqueeze(2)).sum(dim=1) / attention_mask.sum(dim=1)[:, None]
embs = self.LinearTransformation(embs)
return torch.nn.functional.normalize(embs, dim=-1)
# unfortunately need to monkeypatch for tracing to work here
# otherwise it hits the 2GiB protobuf serialization limit
MultilingualCLIP.forward = forward
args = (torch.ones(1, 77, dtype=torch.int32), torch.ones(1, 77, dtype=torch.int32))
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
torch.onnx.export(
model,
args,
output_path.as_posix(),
input_names=["input_ids", "attention_mask"],
output_names=["text_embedding"],
opset_version=17,
dynamic_axes={
"input_ids": {0: "batch_size", 1: "sequence_length"},
"attention_mask": {0: "batch_size", 1: "sequence_length"},
},
)

View file

@ -0,0 +1,109 @@
import tempfile
import warnings
from dataclasses import dataclass, field
from pathlib import Path
import open_clip
import torch
from transformers import AutoTokenizer
from .optimize import optimize
from .util import get_model_path, save_config
@dataclass
class OpenCLIPModelConfig:
name: str
pretrained: str
image_size: int = field(init=False)
sequence_length: int = field(init=False)
def __post_init__(self) -> None:
open_clip_cfg = open_clip.get_model_config(self.name)
if open_clip_cfg is None:
raise ValueError(f"Unknown model {self.name}")
self.image_size = open_clip_cfg["vision_cfg"]["image_size"]
self.sequence_length = open_clip_cfg["text_cfg"]["context_length"]
def to_onnx(
model_cfg: OpenCLIPModelConfig,
output_dir_visual: Path | str | None = None,
output_dir_textual: Path | str | None = None,
) -> None:
with tempfile.TemporaryDirectory() as tmpdir:
model = open_clip.create_model(
model_cfg.name,
pretrained=model_cfg.pretrained,
jit=False,
cache_dir=tmpdir,
require_pretrained=True,
)
text_vision_cfg = open_clip.get_model_config(model_cfg.name)
for param in model.parameters():
param.requires_grad_(False)
if output_dir_visual is not None:
output_dir_visual = Path(output_dir_visual)
visual_path = get_model_path(output_dir_visual)
save_config(open_clip.get_model_preprocess_cfg(model), output_dir_visual / "preprocess_cfg.json")
save_config(text_vision_cfg, output_dir_visual.parent / "config.json")
export_image_encoder(model, model_cfg, visual_path)
optimize(visual_path)
if output_dir_textual is not None:
output_dir_textual = Path(output_dir_textual)
textual_path = get_model_path(output_dir_textual)
tokenizer_name = text_vision_cfg["text_cfg"].get("hf_tokenizer_name", "openai/clip-vit-base-patch32")
AutoTokenizer.from_pretrained(tokenizer_name).save_pretrained(output_dir_textual)
export_text_encoder(model, model_cfg, textual_path)
optimize(textual_path)
def export_image_encoder(model: open_clip.CLIP, model_cfg: OpenCLIPModelConfig, output_path: Path | str) -> None:
output_path = Path(output_path)
def encode_image(image: torch.Tensor) -> torch.Tensor:
return model.encode_image(image, normalize=True)
args = (torch.randn(1, 3, model_cfg.image_size, model_cfg.image_size),)
traced = torch.jit.trace(encode_image, args)
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
torch.onnx.export(
traced,
args,
output_path.as_posix(),
input_names=["image"],
output_names=["image_embedding"],
opset_version=17,
dynamic_axes={"image": {0: "batch_size"}},
)
def export_text_encoder(model: open_clip.CLIP, model_cfg: OpenCLIPModelConfig, output_path: Path | str) -> None:
output_path = Path(output_path)
def encode_text(text: torch.Tensor) -> torch.Tensor:
return model.encode_text(text, normalize=True)
args = (torch.ones(1, model_cfg.sequence_length, dtype=torch.int32),)
traced = torch.jit.trace(encode_text, args)
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
torch.onnx.export(
traced,
args,
output_path.as_posix(),
input_names=["text"],
output_names=["text_embedding"],
opset_version=17,
dynamic_axes={"text": {0: "batch_size"}},
)

View file

@ -0,0 +1,38 @@
from pathlib import Path
import onnx
import onnxruntime as ort
import onnxsim
def optimize_onnxsim(model_path: Path | str, output_path: Path | str) -> None:
model_path = Path(model_path)
output_path = Path(output_path)
model = onnx.load(model_path.as_posix())
model, check = onnxsim.simplify(model, skip_shape_inference=True)
assert check, "Simplified ONNX model could not be validated"
onnx.save(model, output_path.as_posix())
def optimize_ort(
model_path: Path | str,
output_path: Path | str,
level: ort.GraphOptimizationLevel = ort.GraphOptimizationLevel.ORT_ENABLE_BASIC,
) -> None:
model_path = Path(model_path)
output_path = Path(output_path)
sess_options = ort.SessionOptions()
sess_options.graph_optimization_level = level
sess_options.optimized_model_filepath = output_path.as_posix()
ort.InferenceSession(model_path.as_posix(), providers=["CPUExecutionProvider"], sess_options=sess_options)
def optimize(model_path: Path | str) -> None:
model_path = Path(model_path)
optimize_ort(model_path, model_path)
# onnxsim serializes large models as a blob, which uses much more memory when loading the model at runtime
if not any(file.name.startswith("Constant") for file in model_path.parent.iterdir()):
optimize_onnxsim(model_path, model_path)

View file

@ -0,0 +1,15 @@
import json
from pathlib import Path
from typing import Any
def get_model_path(output_dir: Path | str) -> Path:
output_dir = Path(output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
return output_dir / "model.onnx"
def save_config(config: Any, output_path: Path | str) -> None:
output_path = Path(output_path)
output_path.parent.mkdir(parents=True, exist_ok=True)
json.dump(config, output_path.open("w"))

View file

@ -0,0 +1,76 @@
import gc
import os
from pathlib import Path
from tempfile import TemporaryDirectory
from huggingface_hub import create_repo, login, upload_folder
from models import mclip, openclip
from rich.progress import Progress
models = [
"RN50::openai",
"RN50::yfcc15m",
"RN50::cc12m",
"RN101::openai",
"RN101::yfcc15m",
"RN50x4::openai",
"RN50x16::openai",
"RN50x64::openai",
"ViT-B-32::openai",
"ViT-B-32::laion2b_e16",
"ViT-B-32::laion400m_e31",
"ViT-B-32::laion400m_e32",
"ViT-B-32::laion2b-s34b-b79k",
"ViT-B-16::openai",
"ViT-B-16::laion400m_e31",
"ViT-B-16::laion400m_e32",
"ViT-B-16-plus-240::laion400m_e31",
"ViT-B-16-plus-240::laion400m_e32",
"ViT-L-14::openai",
"ViT-L-14::laion400m_e31",
"ViT-L-14::laion400m_e32",
"ViT-L-14::laion2b-s32b-b82k",
"ViT-L-14-336::openai",
"ViT-H-14::laion2b-s32b-b79k",
"ViT-g-14::laion2b-s12b-b42k",
"M-CLIP/LABSE-Vit-L-14",
"M-CLIP/XLM-Roberta-Large-Vit-B-32",
"M-CLIP/XLM-Roberta-Large-Vit-B-16Plus",
"M-CLIP/XLM-Roberta-Large-Vit-L-14",
]
login(token=os.environ["HF_AUTH_TOKEN"])
with Progress() as progress:
task1 = progress.add_task("[green]Exporting models...", total=len(models))
task2 = progress.add_task("[yellow]Uploading models...", total=len(models))
with TemporaryDirectory() as tmp:
tmpdir = Path(tmp)
for model in models:
model_name = model.split("/")[-1].replace("::", "__")
config_path = tmpdir / model_name / "config.json"
def upload() -> None:
progress.update(task2, description=f"[yellow]Uploading {model_name}")
repo_id = f"immich-app/{model_name}"
create_repo(repo_id, exist_ok=True)
upload_folder(repo_id=repo_id, folder_path=tmpdir / model_name)
progress.update(task2, advance=1)
def export() -> None:
progress.update(task1, description=f"[green]Exporting {model_name}")
visual_dir = tmpdir / model_name / "visual"
textual_dir = tmpdir / model_name / "textual"
if model.startswith("M-CLIP"):
mclip.to_onnx(model, visual_dir, textual_dir)
else:
name, _, pretrained = model_name.partition("__")
openclip.to_onnx(openclip.OpenCLIPModelConfig(name, pretrained), visual_dir, textual_dir)
progress.update(task1, advance=1)
gc.collect()
export()
upload()

View file

@ -1,11 +1,12 @@
from io import BytesIO
import json
from argparse import ArgumentParser
from io import BytesIO
from typing import Any
from locust import HttpUser, events, task
from locust.env import Environment
from PIL import Image
from argparse import ArgumentParser
byte_image = BytesIO()
@ -14,11 +15,21 @@ def _(parser: ArgumentParser) -> None:
parser.add_argument("--tag-model", type=str, default="microsoft/resnet-50")
parser.add_argument("--clip-model", type=str, default="ViT-B-32::openai")
parser.add_argument("--face-model", type=str, default="buffalo_l")
parser.add_argument("--tag-min-score", type=int, default=0.0,
help="Returns all tags at or above this score. The default returns all tags.")
parser.add_argument("--face-min-score", type=int, default=0.034,
help=("Returns all faces at or above this score. The default returns 1 face per request; "
"setting this to 0 blows up the number of faces to the thousands."))
parser.add_argument(
"--tag-min-score",
type=int,
default=0.0,
help="Returns all tags at or above this score. The default returns all tags.",
)
parser.add_argument(
"--face-min-score",
type=int,
default=0.034,
help=(
"Returns all faces at or above this score. The default returns 1 face per request; "
"setting this to 0 blows up the number of faces to the thousands."
),
)
parser.add_argument("--image-size", type=int, default=1000)
@ -62,7 +73,7 @@ class CLIPTextFormDataLoadTest(InferenceLoadTest):
("modelName", self.environment.parsed_options.clip_model),
("modelType", "clip"),
("options", json.dumps({"mode": "text"})),
("text", "test search query")
("text", "test search query"),
]
self.client.post("/predict", data=data)
@ -88,5 +99,5 @@ class RecognitionFormDataLoadTest(InferenceLoadTest):
("options", json.dumps({"minScore": self.environment.parsed_options.face_min_score})),
]
files = {"image": self.data}
self.client.post("/predict", data=data, files=files)

File diff suppressed because it is too large Load diff

View file

@ -1,6 +1,6 @@
[tool.poetry]
name = "machine-learning"
version = "1.81.1"
version = "1.85.0"
description = ""
authors = ["Hau Tran <alex.tran1502@gmail.com>"]
readme = "README.md"
@ -9,8 +9,8 @@ packages = [{include = "app"}]
[tool.poetry.dependencies]
python = "^3.11"
torch = [
{markers = "platform_machine == 'arm64' or platform_machine == 'aarch64'", version = "=2.0.1", source = "pypi"},
{markers = "platform_machine == 'amd64' or platform_machine == 'x86_64'", version = "=2.0.1", source = "pytorch-cpu"}
{markers = "platform_machine == 'arm64' or platform_machine == 'aarch64'", version = "=2.1.0", source = "pypi"},
{markers = "platform_machine == 'amd64' or platform_machine == 'x86_64'", version = "=2.1.0", source = "pytorch-cpu"}
]
transformers = "^4.29.2"
onnxruntime = "^1.15.0"
@ -22,14 +22,9 @@ uvicorn = {extras = ["standard"], version = "^0.22.0"}
pydantic = "^1.10.8"
aiocache = "^0.12.1"
optimum = "^1.9.1"
torchvision = [
{markers = "platform_machine == 'arm64' or platform_machine == 'aarch64'", version = "=0.15.2", source = "pypi"},
{markers = "platform_machine == 'amd64' or platform_machine == 'x86_64'", version = "=0.15.2", source = "pytorch-cpu"}
]
rich = "^13.4.2"
ftfy = "^6.1.1"
setuptools = "^68.0.0"
open-clip-torch = "^2.20.0"
python-multipart = "^0.0.6"
orjson = "^3.9.5"
safetensors = "0.3.2"
@ -63,6 +58,7 @@ warn_redundant_casts = true
disallow_any_generics = true
check_untyped_defs = true
disallow_untyped_defs = true
ignore_missing_imports = true
[tool.pydantic-mypy]
init_forbid_extra = true
@ -70,30 +66,6 @@ init_typed = true
warn_required_dynamic_aliases = true
warn_untyped_fields = true
[[tool.mypy.overrides]]
module = [
"huggingface_hub",
"transformers",
"gunicorn",
"cv2",
"insightface.model_zoo",
"insightface.utils.face_align",
"insightface.utils.storage",
"onnxruntime",
"optimum",
"optimum.pipelines",
"optimum.onnxruntime",
"clip_server.model.clip",
"clip_server.model.clip_onnx",
"clip_server.model.pretrained_models",
"clip_server.model.tokenization",
"torchvision.transforms",
"aiocache.backends.memory",
"aiocache.lock",
"aiocache.plugins"
]
ignore_missing_imports = true
[tool.ruff]
line-length = 120
target-version = "py311"

View file

@ -1,2 +0,0 @@
# requirements to be installed with `--no-deps` flag
clip-server==0.8.*

View file

@ -35,8 +35,8 @@ platform :android do
task: 'bundle',
build_type: 'Release',
properties: {
"android.injected.version.code" => 105,
"android.injected.version.name" => "1.81.1",
"android.injected.version.code" => 109,
"android.injected.version.name" => "1.85.0",
}
)
upload_to_play_store(skip_upload_apk: true, skip_upload_images: true, skip_upload_screenshots: true, aab: '../build/app/outputs/bundle/release/app-release.aab')

View file

@ -1,2 +1,2 @@
* User can now download assets to local device
* Increased the font size for curated image thumbnail information on the seach page
* Increased the font size for curated image thumbnail information on the search page

View file

@ -5,17 +5,17 @@
<testcase classname="fastlane.lanes" name="0: default_platform" time="0.000269">
<testcase classname="fastlane.lanes" name="0: default_platform" time="0.000625">
</testcase>
<testcase classname="fastlane.lanes" name="1: bundleRelease" time="81.160108">
<testcase classname="fastlane.lanes" name="1: bundleRelease" time="70.943413">
</testcase>
<testcase classname="fastlane.lanes" name="2: upload_to_play_store" time="39.176668">
<testcase classname="fastlane.lanes" name="2: upload_to_play_store" time="30.374484">
</testcase>

View file

@ -3,6 +3,8 @@
"add_to_album_bottom_sheet_already_exists": "Already in {album}",
"advanced_settings_prefer_remote_subtitle": "Some devices are painfully slow to load thumbnails from assets on the device. Activate this setting to load remote images instead.",
"advanced_settings_prefer_remote_title": "Prefereix imatges remotes",
"advanced_settings_self_signed_ssl_subtitle": "Skips SSL certificate verification for the server endpoint. Required for self-signed certificates.",
"advanced_settings_self_signed_ssl_title": "Allow self-signed SSL certificates",
"advanced_settings_tile_subtitle": "Advanced user's settings",
"advanced_settings_tile_title": "Avançat",
"advanced_settings_troubleshooting_subtitle": "Enable additional features for troubleshooting",
@ -128,7 +130,10 @@
"control_bottom_app_bar_delete": "Esborra",
"control_bottom_app_bar_favorite": "Preferit",
"control_bottom_app_bar_share": "Share",
"control_bottom_app_bar_share_to": "Share To",
"control_bottom_app_bar_stack": "Stack",
"control_bottom_app_bar_unarchive": "Desarxiva",
"control_bottom_app_bar_upload": "Upload",
"create_album_page_untitled": "Untitled",
"create_shared_album_page_create": "Create",
"create_shared_album_page_share": "Comparteix",
@ -143,6 +148,8 @@
"delete_dialog_cancel": "Cancel·la",
"delete_dialog_ok": "Esborra",
"delete_dialog_title": "Esborra permanentment",
"delete_shared_link_dialog_content": "Are you sure you want to delete this shared link?",
"delete_shared_link_dialog_title": "Delete Shared Link",
"description_input_hint_text": "Afegeix descripció...",
"description_input_submit_error": "Error updating description, check the log for more details",
"exif_bottom_sheet_description": "Afegeix descripció",
@ -164,6 +171,7 @@
"home_page_upload_err_limit": "Can only upload a maximum of 30 assets at a time, skipping",
"image_viewer_page_state_provider_download_error": "Download Error",
"image_viewer_page_state_provider_download_success": "Download Success",
"image_viewer_page_state_provider_share_error": "Share Error",
"library_page_albums": "Àlbums",
"library_page_archive": "Arxiu",
"library_page_device_albums": "Àlbums al Dispositiu",
@ -171,6 +179,8 @@
"library_page_new_album": "New album",
"library_page_sharing": "Sharing",
"library_page_sort_created": "Most recently created",
"library_page_sort_last_modified": "Last modified",
"library_page_sort_most_recent_photo": "Most recent photo",
"library_page_sort_title": "Album title",
"login_disabled": "Login has been disabled",
"login_form_api_exception": "API exception. Please check the server URL and try again.",
@ -186,6 +196,7 @@
"login_form_failed_get_oauth_server_config": "Error logging using OAuth, check server URL",
"login_form_failed_get_oauth_server_disable": "OAuth feature is not available on this server",
"login_form_failed_login": "Error logging you in, check server URL, email and password",
"login_form_handshake_exception": "There was an Handshake Exception with the server. Enable self-signed certificate support in the settings if you are using a self-signed certificate.",
"login_form_label_email": "Correu electrònic",
"login_form_label_password": "Contrasenya",
"login_form_next_button": "Següent",
@ -193,6 +204,24 @@
"login_form_save_login": "Mantingues identificat",
"login_form_server_empty": "Enter a server URL.",
"login_form_server_error": "Could not connect to server.",
"login_password_changed_error": "There was an error updating your password",
"login_password_changed_success": "Password updated successfully",
"map_cannot_get_user_location": "Cannot get user's location",
"map_location_dialog_cancel": "Cancel",
"map_location_dialog_yes": "Yes",
"map_location_service_disabled_content": "Location service needs to be enabled to display assets from your current location. Do you want to enable it now?",
"map_location_service_disabled_title": "Location Service disabled",
"map_no_assets_in_bounds": "No photos in this area",
"map_no_location_permission_content": "Location permission is needed to display assets from your current location. Do you want to allow it now?",
"map_no_location_permission_title": "Location Permission denied",
"map_settings_dark_mode": "Dark mode",
"map_settings_dialog_cancel": "Cancel",
"map_settings_dialog_save": "Save",
"map_settings_dialog_title": "Map Settings",
"map_settings_include_show_archived": "Include Archived",
"map_settings_only_relative_range": "Date range",
"map_settings_only_show_favorites": "Show Favorite Only",
"map_zoom_to_see_photos": "Zoom out to see photos",
"monthly_title_text_date_format": "MMMM y",
"motion_photos_page_title": "Motion Photos",
"notification_permission_dialog_cancel": "Cancel·la",
@ -223,6 +252,7 @@
"profile_drawer_client_server_up_to_date": "Client and Server are up-to-date",
"profile_drawer_settings": "Settings",
"profile_drawer_sign_out": "Tanca la sessió",
"profile_drawer_trash": "Trash",
"recently_added_page_title": "Recently Added",
"search_bar_hint": "Search your photos",
"search_page_categories": "Categories",
@ -271,11 +301,27 @@
"share_add_title": "Afegeix un títol",
"share_create_album": "Crea un àlbum",
"share_dialog_preparing": "Preparing...",
"shared_link_app_bar_title": "Shared Links",
"shared_link_create_app_bar_title": "Create link to share",
"shared_link_create_info": "Let anyone with the link see the selected photo(s)",
"shared_link_create_submit_button": "Create link",
"shared_link_edit_allow_download": "Allow public user to download",
"shared_link_edit_allow_upload": "Allow public user to upload",
"shared_link_edit_app_bar_title": "Edit link",
"shared_link_edit_change_expiry": "Change expiration time",
"shared_link_edit_description": "Description",
"shared_link_edit_description_hint": "Enter the share description",
"shared_link_edit_show_meta": "Show metadata",
"shared_link_edit_submit_button": "Update link",
"shared_link_empty": "You don't have any shared links",
"shared_link_manage_links": "Manage Shared links",
"share_done": "Done",
"share_invite": "Convida a l'àlbum",
"sharing_page_album": "Shared albums",
"sharing_page_description": "Create shared albums to share photos and videos with people in your network.",
"sharing_page_empty_list": "EMPTY LIST",
"sharing_silver_appbar_create_shared_album": "Crea àlbum compartit",
"sharing_silver_appbar_shared_links": "Shared links",
"sharing_silver_appbar_share_partner": "Comparteix amb un company",
"tab_controller_nav_library": "Library",
"tab_controller_nav_photos": "Fotografies",
@ -291,6 +337,19 @@
"theme_setting_theme_title": "Theme",
"theme_setting_three_stage_loading_subtitle": "Three-stage loading might increase the loading performance but causes significantly higher network load",
"theme_setting_three_stage_loading_title": "Enable three-stage loading",
"translated_text_options": "Options",
"trash_page_delete": "Delete",
"trash_page_delete_all": "Delete All",
"trash_page_empty_trash_btn": "Empty trash",
"trash_page_empty_trash_dialog_content": "Do you want to empty your trashed assets? These items will be permanently removed from Immich",
"trash_page_empty_trash_dialog_ok": "Ok",
"trash_page_info": "Trashed items will be permanently deleted after {} days",
"trash_page_no_assets": "No trashed assets",
"trash_page_restore": "Restore",
"trash_page_restore_all": "Restore All",
"trash_page_select_assets_btn": "Select assets",
"trash_page_select_btn": "Select",
"trash_page_title": "Trash ({})",
"upload_dialog_cancel": "Cancel",
"upload_dialog_info": "Do you want to backup the selected Asset(s) to the server?",
"upload_dialog_ok": "Upload",
@ -300,5 +359,8 @@
"version_announcement_overlay_text_1": "Hi friend, there is a new release of",
"version_announcement_overlay_text_2": "please take your time to visit the ",
"version_announcement_overlay_text_3": " and ensure your docker-compose and .env setup is up-to-date to prevent any misconfigurations, especially if you use WatchTower or any mechanism that handles updating your server application automatically.",
"version_announcement_overlay_title": "New Server Version Available \uD83C\uDF89"
"version_announcement_overlay_title": "New Server Version Available \uD83C\uDF89",
"viewer_remove_from_stack": "Remove from Stack",
"viewer_stack_use_as_main_asset": "Use as Main Asset",
"viewer_unstack": "Un-Stack"
}

View file

@ -3,6 +3,8 @@
"add_to_album_bottom_sheet_already_exists": "Je již v {album}",
"advanced_settings_prefer_remote_subtitle": "U některých zařízení je načítání miniatur z prostředků v zařízení velmi pomalé. Aktivujte toto nastavení, aby se místo toho načítaly vzdálené obrázky.",
"advanced_settings_prefer_remote_title": "Preferovat vzdálené obrázky",
"advanced_settings_self_signed_ssl_subtitle": "Vynechá ověření SSL certifikátu serveru. Vyžadováno pro self-signed certifikáty.",
"advanced_settings_self_signed_ssl_title": "Povolit self-signed SSL certifikáty",
"advanced_settings_tile_subtitle": "Pokročilé uživatelské nastavení",
"advanced_settings_tile_title": "Pokročilé",
"advanced_settings_troubleshooting_subtitle": "Zobrazit dodatečné vlastnosti pro řešení problémů",
@ -12,8 +14,8 @@
"album_thumbnail_card_item": "1 položka",
"album_thumbnail_card_items": "{} položek",
"album_thumbnail_card_shared": "Sdíleno",
"album_thumbnail_owned": "Vlastněno",
"album_thumbnail_shared_by": "Sdílené od {}",
"album_thumbnail_owned": "Vlastní",
"album_thumbnail_shared_by": "Sdílel(a) {}",
"album_viewer_appbar_share_delete": "Odstranit album",
"album_viewer_appbar_share_err_delete": "Nepodařilo se odstranit album",
"album_viewer_appbar_share_err_leave": "Nepodařilo se opustit album",
@ -128,7 +130,10 @@
"control_bottom_app_bar_delete": "Vymazat",
"control_bottom_app_bar_favorite": "Oblíbené",
"control_bottom_app_bar_share": "Sdílet",
"control_bottom_app_bar_share_to": "Share To",
"control_bottom_app_bar_stack": "Stack",
"control_bottom_app_bar_unarchive": "Odarchivovat",
"control_bottom_app_bar_upload": "Upload",
"create_album_page_untitled": "Bez názvu",
"create_shared_album_page_create": "Vytvořit",
"create_shared_album_page_share": "Sdílet",
@ -143,6 +148,8 @@
"delete_dialog_cancel": "Zrušit",
"delete_dialog_ok": "Vymazat",
"delete_dialog_title": "Vymazat trvale",
"delete_shared_link_dialog_content": "Are you sure you want to delete this shared link?",
"delete_shared_link_dialog_title": "Delete Shared Link",
"description_input_hint_text": "Přidat popis...",
"description_input_submit_error": "Chyba aktualizace popisu, další podrobnosti najdete v logu",
"exif_bottom_sheet_description": "Přidat popis...",
@ -164,6 +171,7 @@
"home_page_upload_err_limit": "Lze zálohovat nejvýše 30 položek najednou, přeskakuji",
"image_viewer_page_state_provider_download_error": "Chyba stahování",
"image_viewer_page_state_provider_download_success": "Stahování bylo úspěšné",
"image_viewer_page_state_provider_share_error": "Share Error",
"library_page_albums": "Alba",
"library_page_archive": "Archív",
"library_page_device_albums": "Alba v zařízení",
@ -171,6 +179,8 @@
"library_page_new_album": "Nové album",
"library_page_sharing": "Sdílení",
"library_page_sort_created": "Naposledy vytvořené",
"library_page_sort_last_modified": "Last modified",
"library_page_sort_most_recent_photo": "Most recent photo",
"library_page_sort_title": "Podle názvu alba",
"login_disabled": "Přihlášení bylo zakázáno",
"login_form_api_exception": "Výjimka API. Zkontrolujte URL serveru a zkuste to znovu.",
@ -186,6 +196,7 @@
"login_form_failed_get_oauth_server_config": "Chyba přihlášení pomocí OAuth, zkontrolujte adresu URL serveru",
"login_form_failed_get_oauth_server_disable": "Funkce OAuth není na tomto serveru dostupná",
"login_form_failed_login": "Chyba přihlášení, zkontrolujte URL adresu serveru, e-mail a heslo.",
"login_form_handshake_exception": "Došlo k výjimce Handshake se serverem. Pokud používáte self-signed certifikát, povolte v nastavení podporu self-signed certifikátu.",
"login_form_label_email": "E-mail",
"login_form_label_password": "Heslo",
"login_form_next_button": "Další",
@ -193,6 +204,24 @@
"login_form_save_login": "Zůstat přihlášen",
"login_form_server_empty": "Zadejte URL serveru.",
"login_form_server_error": "Není možné se připojit k serveru.",
"login_password_changed_error": "Při aktualizaci vašeho hesla došlo k chybě",
"login_password_changed_success": "Heslo bylo úspěšně aktualizováno",
"map_cannot_get_user_location": "Nelze zjistit polohu uživatele",
"map_location_dialog_cancel": "Zrušit",
"map_location_dialog_yes": "Ano",
"map_location_service_disabled_content": "Pro zobrazení fotek z vaší aktuální polohy musí být povolena služba určování polohy. Chcete ji nyní povolit?",
"map_location_service_disabled_title": "Služba určování polohy je zakázána",
"map_no_assets_in_bounds": "Žádné fotografie v této oblasti",
"map_no_location_permission_content": "Oprávnění polohy je nutné pro zobrazení fotek z vaší aktuální polohy. Chcete oprávnění nyní povolit?",
"map_no_location_permission_title": "Oprávnění polohy zamítnuto",
"map_settings_dark_mode": "Tmavý režim",
"map_settings_dialog_cancel": "Zrušit",
"map_settings_dialog_save": "Uložit",
"map_settings_dialog_title": "Nastavení map",
"map_settings_include_show_archived": "Include Archived",
"map_settings_only_relative_range": "Rozsah data",
"map_settings_only_show_favorites": "Zobrazit pouze oblíbené",
"map_zoom_to_see_photos": "Oddálit pro zobrazení fotografií",
"monthly_title_text_date_format": "LLLL y",
"motion_photos_page_title": "Pohyblivé fotky",
"notification_permission_dialog_cancel": "Zrušit",
@ -223,6 +252,7 @@
"profile_drawer_client_server_up_to_date": "Klient a server jsou aktuální",
"profile_drawer_settings": "Nastavení",
"profile_drawer_sign_out": "Odhlásit se",
"profile_drawer_trash": "Trash",
"recently_added_page_title": "Nedávno přidané",
"search_bar_hint": "Prohledejte své fotky",
"search_page_categories": "Kategorie",
@ -271,11 +301,27 @@
"share_add_title": "Přidat název",
"share_create_album": "Vytvořit album",
"share_dialog_preparing": "Připravuji...",
"shared_link_app_bar_title": "Shared Links",
"shared_link_create_app_bar_title": "Create link to share",
"shared_link_create_info": "Let anyone with the link see the selected photo(s)",
"shared_link_create_submit_button": "Create link",
"shared_link_edit_allow_download": "Allow public user to download",
"shared_link_edit_allow_upload": "Allow public user to upload",
"shared_link_edit_app_bar_title": "Edit link",
"shared_link_edit_change_expiry": "Change expiration time",
"shared_link_edit_description": "Description",
"shared_link_edit_description_hint": "Enter the share description",
"shared_link_edit_show_meta": "Show metadata",
"shared_link_edit_submit_button": "Update link",
"shared_link_empty": "You don't have any shared links",
"shared_link_manage_links": "Manage Shared links",
"share_done": "Done",
"share_invite": "Pozvat do alba",
"sharing_page_album": "Sdílená alba",
"sharing_page_description": "Vytvářejte sdílená alba a sdílejte fotografie a videa s lidmi ve vaší síti.",
"sharing_page_empty_list": "PRÁZDNÝ SEZNAM",
"sharing_silver_appbar_create_shared_album": "Vytvořit sdílené album",
"sharing_silver_appbar_shared_links": "Shared links",
"sharing_silver_appbar_share_partner": "Sdílet s partnerem",
"tab_controller_nav_library": "Knihovna",
"tab_controller_nav_photos": "Fotografie",
@ -291,6 +337,19 @@
"theme_setting_theme_title": "Téma",
"theme_setting_three_stage_loading_subtitle": "Třístupňové načítání může zvýšit výkonnost načítání, ale vede k výrazně vyššímu zatížení sítě.",
"theme_setting_three_stage_loading_title": "Povolení třístupňového načítání",
"translated_text_options": "Možnosti",
"trash_page_delete": "Delete",
"trash_page_delete_all": "Delete All",
"trash_page_empty_trash_btn": "Empty trash",
"trash_page_empty_trash_dialog_content": "Do you want to empty your trashed assets? These items will be permanently removed from Immich",
"trash_page_empty_trash_dialog_ok": "Ok",
"trash_page_info": "Trashed items will be permanently deleted after {} days",
"trash_page_no_assets": "No trashed assets",
"trash_page_restore": "Restore",
"trash_page_restore_all": "Restore All",
"trash_page_select_assets_btn": "Select assets",
"trash_page_select_btn": "Select",
"trash_page_title": "Trash ({})",
"upload_dialog_cancel": "Zrušit",
"upload_dialog_info": "Chcete zálohovat vybrané položky na server?",
"upload_dialog_ok": "Zálohovat",
@ -300,5 +359,8 @@
"version_announcement_overlay_text_1": "Ahoj, k dispozici je nová verze",
"version_announcement_overlay_text_2": "najděte si čas na návštěvu ",
"version_announcement_overlay_text_3": " a ujistěte se, že vaše konfigurace docker-compose a .env je aktuální, abyste předešli nesprávné konfiguraci, zvláště pokud používáte WatchTower nebo jakýkoli mechanismus, který podporuje automatické aktualizace serverových aplikací.",
"version_announcement_overlay_title": "K dispozici je nová verze serveru \uD83C\uDF89"
"version_announcement_overlay_title": "K dispozici je nová verze serveru \uD83C\uDF89",
"viewer_remove_from_stack": "Remove from Stack",
"viewer_stack_use_as_main_asset": "Use as Main Asset",
"viewer_unstack": "Un-Stack"
}

View file

@ -3,6 +3,8 @@
"add_to_album_bottom_sheet_already_exists": "Allerede i {album}",
"advanced_settings_prefer_remote_subtitle": "Nogle enheder tager meget lang tid om at indlæse miniaturebilleder af elementer på enheden. Aktiver denne indstilling for i stedetat indlæse elementer fra serveren.",
"advanced_settings_prefer_remote_title": "Foretræk elementer på serveren",
"advanced_settings_self_signed_ssl_subtitle": "Spring verificering af SSL-certifikat over for serverens endelokation. Kræves for selvsignerede certifikater.",
"advanced_settings_self_signed_ssl_title": "Tillad selvsignerede certifikater",
"advanced_settings_tile_subtitle": "Avancerede brugerindstillinger",
"advanced_settings_tile_title": "Arkivér",
"advanced_settings_troubleshooting_subtitle": "Slå ekstra funktioner for fejlsøgning til",
@ -128,7 +130,10 @@
"control_bottom_app_bar_delete": "Slet",
"control_bottom_app_bar_favorite": "Favorit",
"control_bottom_app_bar_share": "Del",
"control_bottom_app_bar_share_to": "Share To",
"control_bottom_app_bar_stack": "Stack",
"control_bottom_app_bar_unarchive": "Afakivér",
"control_bottom_app_bar_upload": "Upload",
"create_album_page_untitled": "Uden titel",
"create_shared_album_page_create": "Opret",
"create_shared_album_page_share": "Del",
@ -143,6 +148,8 @@
"delete_dialog_cancel": "Annuller",
"delete_dialog_ok": "Slet",
"delete_dialog_title": "Slet permanent",
"delete_shared_link_dialog_content": "Are you sure you want to delete this shared link?",
"delete_shared_link_dialog_title": "Delete Shared Link",
"description_input_hint_text": "Tilføj en beskrivelse...",
"description_input_submit_error": "Fejl med at opdatere beskrivelsen. Tjek loggen for flere detaljer",
"exif_bottom_sheet_description": "Tilføj beskrivelse...",
@ -164,6 +171,7 @@
"home_page_upload_err_limit": "Det er kun muligt at lave sikkerhedskopi af 30 elementer ad gangen. Springer over",
"image_viewer_page_state_provider_download_error": "Fejl ved download",
"image_viewer_page_state_provider_download_success": "Download succesfuld",
"image_viewer_page_state_provider_share_error": "Share Error",
"library_page_albums": "Albummer",
"library_page_archive": "Arkiv",
"library_page_device_albums": "Albummer på enhed",
@ -171,6 +179,8 @@
"library_page_new_album": "Nyt album",
"library_page_sharing": "Delte",
"library_page_sort_created": "Senest oprettet",
"library_page_sort_last_modified": "Last modified",
"library_page_sort_most_recent_photo": "Most recent photo",
"library_page_sort_title": "Albumtitel",
"login_disabled": "Login er blevet deaktiveret",
"login_form_api_exception": "API-undtagelse. Tjek serverens URL og prøv igen. ",
@ -186,6 +196,7 @@
"login_form_failed_get_oauth_server_config": "Fejl med at logge på med OAuth. Tjek serveres webadresse",
"login_form_failed_get_oauth_server_disable": "OAuth er ikke tilgængelig på denne server",
"login_form_failed_login": "Der opstod en vejl ved at logge ind. Tjek server webadressen, e-mailen og kodeordet",
"login_form_handshake_exception": "Der opstod en fejl med at oprette forbindelse til serveren. Aktiver selvsignerede certifikater i indstillingerne, hvis du bruger et selv signeret certifikat.",
"login_form_label_email": "E-mail",
"login_form_label_password": "Kodeord",
"login_form_next_button": "Næste",
@ -193,6 +204,24 @@
"login_form_save_login": "Forbliv logget ind",
"login_form_server_empty": "Indtast server-URL.",
"login_form_server_error": "Kunne ikke forbinde til serveren.",
"login_password_changed_error": "Der opstod en fejl i opdateringen af dit kodeord",
"login_password_changed_success": "Kodeordet blev opdateret",
"map_cannot_get_user_location": "Kan ikke finde brugerens placering",
"map_location_dialog_cancel": "Annuller",
"map_location_dialog_yes": "Ja",
"map_location_service_disabled_content": "Placeringstjenesten skal aktiveres for at vise elementer fra din nuværende placering. Vil du aktivere den nu?",
"map_location_service_disabled_title": "Placeringstjenesten er deaktiveret",
"map_no_assets_in_bounds": "Der er ingen billeder i dette område",
"map_no_location_permission_content": "Der kræves tilladelse til placeringen for at vise elementer fra din nuværende placering. Vil du give tilladelse?",
"map_no_location_permission_title": "Placeringstilladelse blev afvist",
"map_settings_dark_mode": "Mørk tilstand",
"map_settings_dialog_cancel": "Annuller",
"map_settings_dialog_save": "Gem",
"map_settings_dialog_title": "Kortindstillinger",
"map_settings_include_show_archived": "Include Archived",
"map_settings_only_relative_range": "Datointerval",
"map_settings_only_show_favorites": "Vis kun favoritter",
"map_zoom_to_see_photos": "Zoom ud for at vise billeder",
"monthly_title_text_date_format": "MMMM y",
"motion_photos_page_title": "Bevægelsesbilleder",
"notification_permission_dialog_cancel": "Annuller",
@ -223,6 +252,7 @@
"profile_drawer_client_server_up_to_date": "Klient og server er ajour",
"profile_drawer_settings": "Indstillinger",
"profile_drawer_sign_out": "Log ud",
"profile_drawer_trash": "Trash",
"recently_added_page_title": "Nyligt tilføjet",
"search_bar_hint": "Søg i dine billeder",
"search_page_categories": "Kategorier",
@ -271,11 +301,27 @@
"share_add_title": "Tilføj en titel",
"share_create_album": "Opret album",
"share_dialog_preparing": "Forbereder...",
"shared_link_app_bar_title": "Shared Links",
"shared_link_create_app_bar_title": "Create link to share",
"shared_link_create_info": "Let anyone with the link see the selected photo(s)",
"shared_link_create_submit_button": "Create link",
"shared_link_edit_allow_download": "Allow public user to download",
"shared_link_edit_allow_upload": "Allow public user to upload",
"shared_link_edit_app_bar_title": "Edit link",
"shared_link_edit_change_expiry": "Change expiration time",
"shared_link_edit_description": "Description",
"shared_link_edit_description_hint": "Enter the share description",
"shared_link_edit_show_meta": "Show metadata",
"shared_link_edit_submit_button": "Update link",
"shared_link_empty": "You don't have any shared links",
"shared_link_manage_links": "Manage Shared links",
"share_done": "Done",
"share_invite": "Inviter til album",
"sharing_page_album": "Delt albums",
"sharing_page_description": "Opret delte albummer for at dele billeder og video med personer på dit netværk.",
"sharing_page_empty_list": "TOM LISTE",
"sharing_silver_appbar_create_shared_album": "Opret delt album",
"sharing_silver_appbar_shared_links": "Shared links",
"sharing_silver_appbar_share_partner": "Del med partner",
"tab_controller_nav_library": "Bibliotek",
"tab_controller_nav_photos": "Billeder",
@ -291,6 +337,19 @@
"theme_setting_theme_title": "Tema",
"theme_setting_three_stage_loading_subtitle": "Tre-trins indlæsning kan øge ydeevnen, men kan ligeledes føre til højere netværksbelastning",
"theme_setting_three_stage_loading_title": "Slå tre-trins indlæsning til",
"translated_text_options": "Handlinger",
"trash_page_delete": "Delete",
"trash_page_delete_all": "Delete All",
"trash_page_empty_trash_btn": "Empty trash",
"trash_page_empty_trash_dialog_content": "Do you want to empty your trashed assets? These items will be permanently removed from Immich",
"trash_page_empty_trash_dialog_ok": "Ok",
"trash_page_info": "Trashed items will be permanently deleted after {} days",
"trash_page_no_assets": "No trashed assets",
"trash_page_restore": "Restore",
"trash_page_restore_all": "Restore All",
"trash_page_select_assets_btn": "Select assets",
"trash_page_select_btn": "Select",
"trash_page_title": "Trash ({})",
"upload_dialog_cancel": "Annuller",
"upload_dialog_info": "Vil du sikkerhedskopiere de(t) valgte element(er) til serveren?",
"upload_dialog_ok": "Upload",
@ -300,5 +359,8 @@
"version_announcement_overlay_text_1": "Hej ven, der er en ny version af",
"version_announcement_overlay_text_2": ". Besøg venligst ",
"version_announcement_overlay_text_3": " for at sikre dig, at din dockercompose- og .env-fil er opdateret, så der undgås fejlkonfiguration, specielt hvis du bruger WatchTower eller lignede.",
"version_announcement_overlay_title": "Ny serverversion er tilgængelig \uD83C\uDF89"
"version_announcement_overlay_title": "Ny serverversion er tilgængelig \uD83C\uDF89",
"viewer_remove_from_stack": "Remove from Stack",
"viewer_stack_use_as_main_asset": "Use as Main Asset",
"viewer_unstack": "Un-Stack"
}

Some files were not shown because too many files have changed in this diff Show more