First-init

This commit is contained in:
Tyleo Dv. Delaware 2025-01-25 18:01:48 +01:00
parent c4db076ae2
commit a28ae15c25
85 changed files with 3006 additions and 0 deletions

0
LICENSE Normal file → Executable file
View file

0
README.md Normal file → Executable file
View file

View file

@ -0,0 +1,9 @@
services:
static-service:
image: fusengine/apaxy:latest
restart: always
container_name: ${INSTANCE}-${DIFF_IP}-${DIFF_PORT}
volumes:
- ${WEB_EXPOSITION_DIR}:/app/web:ro
ports:
- ${DIFF_IP}:${DIFF_PORT}:80

View file

@ -0,0 +1,21 @@
services:
app:
image: sanjeet990/astroluma:latest
container_name: astroluma-${IP}-${PORT}
ports:
- ${IP}:${PORT}:8000
environment:
PORT: 8000
NODE_ENV: production
SECRET_KEY: a2c5f9a8b2d7e1a7f2c9c8d9b5f7a3d5
MONGODB_URI: mongodb://mongodb:27017/astroluma
volumes:
- ${DATA_DIR}/uploads:/app/storage/uploads
depends_on:
- mongodb
restart: always
mongodb:
image: mongo:6.0
volumes:
- ${DATA_DIR}/database:/data/db
restart: always

36
automcs.compose.yaml.txt Executable file
View file

@ -0,0 +1,36 @@
version: "3"
services:
app:
command: [
"auto-mcs-ttyd",
"-W",
"-t", "disableLeaveAlert=true",
"-t", "titleFixed=auto-mcs (docker)",
"-t", "fontSize=20",
"-t", "theme={\"background\": \"#1A1A1A\"}",
"-p", "8080",
"-c", "root:auto-mcs",
"tmux", "-u", "-2", "new", "-A", "-s", "-c",
"./auto-mcs"
]
image: macarooniman/auto-mcs:latest
container_name: auto-mcs
stdin_open: true
tty: true
restart: always
ports:
# ttyd web UI
- "8080:8080"
# Telepath API (auto-mcs)
- "7001:7001"
# Add more ports based on the servers you create
- "25565:25565"
volumes:
- auto-mcs-data:/root/.auto-mcs
volumes:
auto-mcs-data:

16
backrest.compose.yaml.txt Executable file
View file

@ -0,0 +1,16 @@
services:
backrest:
image: garethgeorge/backrest
container_name: backrest
ports:
- ${PORT}:9898
volumes:
- ${DATA_DIR}/data:/data
- ${DATA_DIR}/config:/config
- ${DATA_DIR}/cache:/cache
- ${TARGET_DATA_DIR}:/targets${TARGET_DATA_DIR} # mount your directories to backup somewhere in the filesystem
environment:
- BACKREST_DATA=/data # path for backrest data. restic binary and the database are placed here.
- BACKREST_CONFIG=/config/config.json # path for the backrest config file.
- XDG_CACHE_HOME=/cache # path for the restic cache which greatly improves performance.
restart: unless-stopped

45
blinko-note.compose.yaml.txt Executable file
View file

@ -0,0 +1,45 @@
services:
blinko-website:
image: blinkospace/blinko:latest
container_name: blinko-${IP}-${PORT}
environment:
NODE_ENV: production
# NEXTAUTH_URL: http://localhost:1111
# NEXT_PUBLIC_BASE_URL: http://localhost:1111
NEXTAUTH_SECRET: ${KEY}
DATABASE_URL: postgresql://postgres:${DB_PASS}@postgres:5432/postgres
depends_on:
postgres:
condition: service_healthy
volumes:
- ${DATA_DIR}/app:/app/.blinko
restart: always
logging:
options:
max-size: "10m"
max-file: "3"
ports:
- ${IP}:${PORT}:1111
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:1111/"]
interval: 30s
timeout: 10s
retries: 5
start_period: 30s
postgres:
image: postgres:14
restart: always
volumes:
- ${DATA_DIR}/bdd:/var/lib/postgresql/data:rw
environment:
POSTGRES_DB: postgres
POSTGRES_USER: postgres
POSTGRES_PASSWORD: ${DB_PASS}
TZ: ${TIMEZONE}
healthcheck:
test:
["CMD", "pg_isready", "-U", "postgres", "-d", "postgres"]
interval: 5s
timeout: 10s
retries: 5

View file

@ -0,0 +1,39 @@
services:
bytestash:
image: "ghcr.io/jordan-dalby/bytestash:latest"
container_name: bytestash-${IP}-${PORT}
volumes:
- ${DATA_DIR}:/data/snippets
ports:
- ${IP}:${PORT}:5000
environment:
- BASE_PATH=${BASEPATH}
# if auth username or password are left blank then authorisation is disabled
# the username used for logging in
- AUTH_USERNAME=${USER}
# the password used for logging in
- AUTH_PASSWORD=${PASS}
# the jwt secret used by the server, make sure to generate your own secret token to replace this one
- JWT_SECRET=${JWT_KEY}
# how long the token lasts, examples: "2 days", "10h", "7d", "1m", "60s"
- TOKEN_EXPIRY=${TOKEN_EXP}
restart: unless-stopped
bytestash-display:
image: "ghcr.io/jordan-dalby/bytestash:latest"
container_name: bytestash-display-${IP}-${PORT_PUBLIC}
volumes:
- ${DATA_DIR}:/data/snippets:ro
ports:
- ${IP}:${PORT_PUBLIC}:5000
environment:
- BASE_PATH=${BASEPATH_PUBLIC}
# if auth username or password are left blank then authorisation is disabled
# the username used for logging in
- AUTH_USERNAME=
# the password used for logging in
- AUTH_PASSWORD=
# the jwt secret used by the server, make sure to generate your own secret token to replace this one
- JWT_SECRET=${JWT_KEY_PUBLIC}
# how long the token lasts, examples: "2 days", "10h", "7d", "1m", "60s"
- TOKEN_EXPIRY=${TOKEN_EXP_PUBLIC}
restart: unless-stopped

20
bytestash.compose.yaml.txt Executable file
View file

@ -0,0 +1,20 @@
services:
bytestash:
image: "ghcr.io/jordan-dalby/bytestash:latest"
container_name: bytestash-${IP}-${PORT}
volumes:
- ${DATA_DIR}:/data/snippets
ports:
- ${IP}:${PORT}:5000
environment:
- BASE_PATH=${BASEPATH}
# if auth username or password are left blank then authorisation is disabled
# the username used for logging in
- AUTH_USERNAME=${USER}
# the password used for logging in
- AUTH_PASSWORD=${PASS}
# the jwt secret used by the server, make sure to generate your own secret token to replace this one
- JWT_SECRET=${JWT_KEY}
# how long the token lasts, examples: "2 days", "10h", "7d", "1m", "60s"
- TOKEN_EXPIRY=${TOKEN_EXP}
restart: unless-stopped

15
checkmk.compose.yaml.txt Normal file
View file

@ -0,0 +1,15 @@
services:
checkmk:
container_name: checkmk-${IP}-${PORT}
image: checkmk/check-mk-raw:latest
tmpfs:
- /opt/omd/sites/cmk/tmp:uid=1000,gid=1000
ulimits:
nofile: 1024
volumes:
- ${DATA_DIR}/monitoring:/omd/sites
- /etc/localtime:/etc/localtime:ro
ports:
- ${IP}:${PORT}:5000
restart: unless-stopped
networks: {}

9
dagu.compose.yaml.txt Normal file
View file

@ -0,0 +1,9 @@
services:
dagu:
ports:
- ${IP}:${PORT}:8080
volumes:
- ${DATA_DIR}/home:/home/dagu/.config/dagu
- ${DATA_DIR}/share:/home/dagu/.local/share
image: ghcr.io/dagu-org/dagu:latest
networks: {}

37
dbgate.compose.yaml.txt Executable file
View file

@ -0,0 +1,37 @@
version: '3'
services:
dbgate:
image: dbgate/dbgate
restart: always
ports:
- 80:3000
volumes:
- dbgate-data:/root/.dbgate
environment:
CONNECTIONS: con1,con2,con3,con4
LABEL_con1: MySql
SERVER_con1: mysql
USER_con1: root
PASSWORD_con1: TEST
PORT_con1: 3306
ENGINE_con1: mysql@dbgate-plugin-mysql
LABEL_con2: Postgres
SERVER_con2: postgres
USER_con2: postgres
PASSWORD_con2: TEST
PORT_con2: 5432
ENGINE_con2: postgres@dbgate-plugin-postgres
LABEL_con3: MongoDB
URL_con3: mongodb://mongo:27017
ENGINE_con3: mongo@dbgate-plugin-mongo
LABEL_con4: SQLite
FILE_con4: /home/jan/feeds.sqlite
ENGINE_con4: sqlite@dbgate-plugin-sqlite
volumes:
dbgate-data:
driver: local

View file

@ -0,0 +1,28 @@
version: "3.8"
services:
puter:
container_name: puter-${IP}-${PORT}
image: ghcr.io/heyputer/puter:2.4.2
user: ${RUN_USER}
pull_policy: always
# build: ./
restart: unless-stopped
ports:
- ${IP}:${PORT}:4100
environment:
# TZ: Europe/Paris
# CONFIG_PATH: /etc/puter
PUID: ${RUN_ID}
PGID: ${RUN_ID}
volumes:
- ${CONFIG_DIR}:/etc/puter
- ${DATA_DIR}:/var/puter
- ${SHARED_DIR}:/var/puter/Shared
healthcheck:
test: wget --no-verbose --tries=1 --spider http://puter.localhost:4100/test ||
exit 1
interval: 30s
timeout: 3s
retries: 3
start_period: 30s
networks: {}

View file

@ -0,0 +1,36 @@
services:
directus-prod:
container_name: directus-prod-cmdb-${IP}-${PORT}
image: directus/directus:11.3.2
user: ${PARAM_RUNLEVEL} # root (insecure) or UID/GID
ports:
- ${IP}:${PORT}:8055
volumes:
- ${DATA_DIR}/database:/directus/database
- ${DATA_DIR}/uploads:/directus/uploads
- ${DATA_DIR}/extensions:/directus/extensions
environment:
SECRET: ${PARAM_KEY} # pwgen 64 1 --secure
ADMIN_EMAIL: ${PARAM_USER} # Mail Format
ADMIN_PASSWORD: ${PARAM_PASS}
DB_CLIENT: sqlite3
DB_FILENAME: /directus/database/data.db
WEBSOCKETS_ENABLED: ${PARAM_WS} # true or false to enable WebSockets. Can be insecure if exposed !
directus-sandbox:
container_name: directus-sandbox-cmdb-${S_IP}-${S_PORT}
image: directus/directus:11.3.2
user: ${S_PARAM_RUNLEVEL} # root (insecure) or UID/GID
ports:
- ${S_IP}:${S_PORT}:8055
volumes:
- ${S_DATA_DIR}/database:/directus/database
- ${S_DATA_DIR}/uploads:/directus/uploads
- ${S_DATA_DIR}/extensions:/directus/extensions
environment:
SECRET: ${S_PARAM_KEY} # pwgen 64 1 --secure
ADMIN_EMAIL: ${S_PARAM_USER} # Mail Format
ADMIN_PASSWORD: ${S_PARAM_PASS}
DB_CLIENT: sqlite3
DB_FILENAME: /directus/database/data.db
WEBSOCKETS_ENABLED: ${S_PARAM_WS} # true or false to enable WebSockets. Can be insecure if exposed !
networks: {}

View file

@ -0,0 +1,19 @@
version: "3"
services:
directus:
image: directus/directus:11.3.2
user: ${PARAM_RUNLEVEL} # root (insecure) or UID/GID
ports:
- ${IP}:${PORT}:8055
volumes:
- ${DATA_DIR}/database:/directus/database
- ${DATA_DIR}/uploads:/directus/uploads
- ${DATA_DIR}/extensions:/directus/extensions
environment:
SECRET: ${PARAM_KEY} # pwgen 64 1 --secure
ADMIN_EMAIL: ${PARAM_USER} # Mail Format
ADMIN_PASSWORD: ${PARAM_PASS}
DB_CLIENT: sqlite3
DB_FILENAME: /directus/database/data.db
WEBSOCKETS_ENABLED: ${PARAM_WS} # true or false to enable WebSockets. Can be insecure if exposed !
networks: {}

14
dockge.compose.yaml.txt Executable file
View file

@ -0,0 +1,14 @@
services:
dockge:
image: louislam/dockge:${VERSION} # latest (stable), beta (unstable) or nightly (you are courageous !)
container_name: dockge-${MODE}-${IP}-${PORT}
restart: always
ports:
- ${IP}:${PORT}:5001
volumes:
- /var/run/docker.sock:/var/run/docker.sock:${MODE} # Use ro for Controller mode, rw for Agent mode.
- ${DATA_DIR}:/app/data
- ${STACK_DIR}:${STACK_DIR}
environment:
# Tell Dockge where to find the stacks
- DOCKGE_STACKS_DIR=${STACK_DIR}

View file

@ -0,0 +1,26 @@
services:
# DOKEMON Library Edition - A Library Centric version for exposing your compositions to the world.
#
# /!\ NEVER USE CREDENTIALS DIRECTLY WITHIN COMPOSITIONS HERE - THEY WILL BE AVAILABLE TO THE WORLD !! /!\
# Use Variable instead. Exposition will only expose Compositions within "composelibrary" part of Dokemon.
# We do not recommend to use this version for production use. It's recommended to use beside a real production version.
#
# This version is not intented for Production use.
# This version is tweaked only for library use and exposing over Apaxy v2 Static Server.
dokemon-library-manager:
ports:
- ${DIFFUSION_IP}:${DIFFUSION_ADMIN}:9090
volumes:
- ${DOKEMON_DATA_DIR}/dokemon:/data
- ${COMPOSE_DATA_DIR}:/data/compose
restart: unless-stopped
container_name: ${INSTANCE_NAME}-dokemon-library
image: productiveops/dokemon:latest
dokemon-library-exposer-v2:
image: fusengine/apaxy:latest
container_name: ${INSTANCE_NAME}-dokemon-exposer
ports:
- ${DIFFUSION_IP}:${DIFFUSION_EXPOSED}:80
volumes:
- ${COMPOSE_DATA_DIR}:/app/web:ro

View file

@ -0,0 +1,14 @@
services:
donetick:
image: donetick/donetick
container_name: donetick-${IP}-${PORT}
restart: unless-stopped
ports:
- ${IP}:${PORT}:2021
volumes:
- ${DATA_DIR}/data:/donetick-data
- ${DATA_DIR}/config:/config
environment:
- DT_ENV=selfhosted
- DT_SQLITE_PATH=/donetick-data/donetick.db
networks: {}

13
donetick.compose.yaml.txt Executable file
View file

@ -0,0 +1,13 @@
services:
donetick:
image: donetick/donetick
container_name: donetick-${IP}-${PORT}
restart: unless-stopped
ports:
- ${IP}:${PORT}:2021 # needed for serving backend and frontend
volumes:
- ${DATA_DIR}/data:/donetick-data # database file stored (sqlite database)
- ${DATA_DIR}/config:/config # configration file like selfhosted.yaml
environment:
- DT_ENV=selfhosted # this tell donetick to load ./config/selfhosted.yaml for the configuration file
- DT_SQLITE_PATH=/donetick-data/donetick.db

View file

@ -0,0 +1,34 @@
services:
db:
image: postgres
environment:
- POSTGRES_HOST_AUTH_METHOD=trust
volumes:
- ${REPOMAKER_PATH}/pgdata:/var/lib/postgresql/data
restart: unless-stopped
web:
image: registry.gitlab.com/fdroid/repomaker:latest
hostname: ${REPOMAKER_HOSTNAME}
domainname: ${REPOMAKER_HOSTNAME}
command: bash -c './wait-for db:5432 -- python3 manage.py migrate &&
./httpd-foreground'
volumes:
- ${REPOMAKER_PATH}/data:/repomaker/data
ports:
- ${REPOMAKER_PORT}:80
depends_on:
- db
restart: unless-stopped
tasks:
image: registry.gitlab.com/fdroid/repomaker:latest
command: bash -c './wait-for web:80 -- su www-data -p -s /bin/bash -c "cd
/repomaker && python3 manage.py process_tasks"'
volumes:
- ${REPOMAKER_PATH}/data:/repomaker/data
depends_on:
- db
- web
restart: unless-stopped
networks: {}
# vim: set tabstop=2:softtabstop=2:shiftwidth=2

11
fenrus.compose.yaml.txt Executable file
View file

@ -0,0 +1,11 @@
services:
fenrus:
image: revenz/fenrus
container_name: fenrus-${IP}-${PORT}
environment:
- TZ=${TIMEZONE}
volumes:
- ${DATA_DIR}:/app/data
ports:
- ${IP}:${PORT}:3000
restart: unless-stopped

View file

@ -0,0 +1,18 @@
services:
filebrowser:
image: hurlenko/filebrowser
container_name: filebrowser-${RUN_USER}-${IP}-${PORT}-${INSTANCE}
user: "${RUN_USER}:${RUN_USER}" # Can be ROOT but not recommended
ports:
- ${IP}:${PORT}:8080 # Use 0.0.0.0 for IP if you are within a LAN
volumes:
- ${DATA_DIR}/data:/data:rw
- ${DATA_DIR}/config:/config:rw
- ${DATA_DIR}/style:/data/style:rw
# Add your data links below
# - SOURCE:DESTINATION:MODE[RO/RW]
############################################
environment:
- FB_BASEURL=${BASEURL} # Set it to "/" for sub-domain use
restart: always

188
formbricks.compose.yaml.txt Executable file
View file

@ -0,0 +1,188 @@
version: "3.3"
x-environment: &environment
environment:
######################################################## REQUIRED ########################################################
# The url of your Formbricks instance used in the admin panel
# Set this to your public-facing URL, e.g., https://example.com
WEBAPP_URL:
# Required for next-auth. Should be the same as WEBAPP_URL
NEXTAUTH_URL:
# PostgreSQL DB for Formbricks to connect to
DATABASE_URL: "postgresql://postgres:postgres@postgres:5432/formbricks?schema=public"
# NextJS Auth
# @see: https://next-auth.js.org/configuration/options#nextauth_secret
# You can use: `openssl rand -hex 32` to generate one
NEXTAUTH_SECRET:
# Encryption Key is used for 2FA & Single use URLs for Link Surveys
# You can use: $(openssl rand -hex 32) to generate one
ENCRYPTION_KEY:
# API Secret for running cron jobs.
# You can use: $(openssl rand -hex 32) to generate a secure one
CRON_SECRET:
############################################# OPTIONAL (ENTERPRISE EDITION) #############################################
# Enterprise License Key (More info at: https://formbricks.com/docs/self-hosting/license)
# Required to access Enterprise-only features
# ENTERPRISE_LICENSE_KEY:
############################################# OPTIONAL (EMAIL CONFIGURATION) #############################################
# Email Configuration
# MAIL_FROM:
# SMTP_HOST:
# SMTP_PORT:
# SMTP_USER:
# SMTP_PASSWORD:
# (Additional option for TLS (port 465) only)
# SMTP_SECURE_ENABLED: 1
# If set to 0, the server will accept connections without requiring authorization from the list of supplied CAs (default is 1).
# SMTP_REJECT_UNAUTHORIZED_TLS: 0
############################################## OPTIONAL (APP CONFIGURATION) ##############################################
# Set the below value if you have and want to use a custom URL for the links created by the Link Shortener
# SHORT_URL_BASE:
# Set the below to 0 to enable Email Verification for new signups (will required Email Configuration)
EMAIL_VERIFICATION_DISABLED: 1
# Set the below to 0 to enable Password Reset (will required Email Configuration)
PASSWORD_RESET_DISABLED: 1
# Set the below to 1 to disable logins with email
# EMAIL_AUTH_DISABLED:
# Set the below to 1 to disable invites
# INVITE_DISABLED:
# Set the below if you want to ship JS & CSS files from a complete URL instead of the current domain
# ASSET_PREFIX_URL:
# Set the below to your Unsplash API Key for their Survey Backgrounds
# UNSPLASH_ACCESS_KEY:
################################################### OPTIONAL (STORAGE) ###################################################
# Set the below to set a custom Upload Directory
# UPLOADS_DIR:
# Set S3 Storage configuration (required for the file upload in serverless environments like Vercel)
# S3_ACCESS_KEY:
# S3_SECRET_KEY:
# S3_REGION:
# S3_BUCKET_NAME:
# Set a third party S3 compatible storage service endpoint like StorJ leave empty if you use Amazon S3
# S3_ENDPOINT_URL=
# Force path style for S3 compatible storage (0 for disabled, 1 for enabled)
S3_FORCE_PATH_STYLE: 0
############################################# OPTIONAL (OAUTH CONFIGURATION) #############################################
# Set the below from GitHub if you want to enable GitHub OAuth
# GITHUB_ID:
# GITHUB_SECRET:
# Set the below from Google if you want to enable Google OAuth
# GOOGLE_CLIENT_ID:
# GOOGLE_CLIENT_SECRET:
# Set the below from Azure Active Directory Login if you want to enable Azure AD OAuth
# AZUREAD_CLIENT_ID:
# AZUREAD_CLIENT_SECRET:
# AZUREAD_TENANT_ID:
# Set the below to OpenID Connect Provider if you want to enable OIDC
# OIDC_CLIENT_ID:
# OIDC_CLIENT_SECRET:
# OIDC_ISSUER:
# OIDC_DISPLAY_NAME:
# OIDC_SIGNING_ALGORITHM:
########################################## OPTIONAL (THIRD PARTY INTEGRATIONS) ###########################################
# Oauth credentials for Notion Integration
# NOTION_OAUTH_CLIENT_ID:
# NOTION_OAUTH_CLIENT_SECRET:
# Oauth credentials for Google Sheet Integration
# GOOGLE_SHEETS_CLIENT_ID:
# GOOGLE_SHEETS_CLIENT_SECRET:
# GOOGLE_SHEETS_REDIRECT_URL:
# Oauth credentials for Airtable Integration
# AIRTABLE_CLIENT_ID:
# Oauth credentials for Slack Integration
# SLACK_CLIENT_ID:
# SLACK_CLIENT_SECRET:
############################################### OPTIONAL (LEGAL INFORMATION) ################################################
# Set the below to have your own Privacy Page URL on auth & link survey page
# PRIVACY_URL:
# Set the below to have your own Terms Page URL on auth & link survey page
# TERMS_URL:
# Set the below to have your own Imprint Page URL on auth & link survey page
# IMPRINT_URL:
########################################## OPTIONAL (SERVER CONFIGURATION) ###########################################
# Set the below to 1 to disable Rate Limiting across Formbricks
# RATE_LIMITING_DISABLED: 1
# Set the below to send OpenTelemetry data for tracing
# OPENTELEMETRY_LISTENER_URL: http://localhost:4318/v1/traces
# Set the below to use Redis for Next Caching (default is In-Memory from Next Cache)
# REDIS_URL:
# Set the below to use for Rate Limiting (default us In-Memory LRU Cache)
# REDIS_HTTP_URL:
############################################# OPTIONAL (OTHER) #############################################
# Set the below to automatically assign new users to a specific organization and role within that organization
# Insert an existing organization id or generate a valid CUID for a new one at https://www.getuniqueid.com/cuid (e.g. cjld2cjxh0000qzrmn831i7rn)
# (Role Management is an Enterprise feature)
# DEFAULT_ORGANIZATION_ID:
# DEFAULT_ORGANIZATION_ROLE: owner
services:
postgres:
restart: always
image: pgvector/pgvector:pg17
volumes:
- postgres:/var/lib/postgresql/data
environment:
# Postgres DB Super User Password
# Replace the below with your own secure password & Make sure the password matches the password field in DATABASE_URL above
- POSTGRES_PASSWORD=postgres
formbricks:
restart: always
image: ghcr.io/formbricks/formbricks:latest
depends_on:
- postgres
ports:
- 3000:3000
volumes:
- uploads:/home/nextjs/apps/web/uploads/
<<: *environment
volumes:
postgres:
driver: local
uploads:

View file

@ -0,0 +1,33 @@
services:
# MySQL 8 Embedded Version
# Container Name will be setup as variable is set !
ghost-website:
container_name: ghost-website-${DOMAIN}-${DIFFUSION}
image: ghost:latest
restart: always
ports:
- ${DIFFUSION}:2368
volumes:
- ${DATA_DIR}/${DOMAIN}/ghost-website:/var/lib/ghost/content
environment:
database__client: mysql
database__connection__host: ${DBIP}
database__connection__port: ${DBPORT}
database__connection__user: root
database__connection__password: ${DBPASS}
database__connection__database: ${DBNAME}
url: https://${DOMAIN}
ghost-database:
# MySQL 8 Locked Version (stable/compatible)
image: mysql:8.3.0
command: --default-authentication-plugin=mysql_native_password
restart: always
ports:
- ${DBIP}:${DBPORT}:3306
volumes:
- ${DATA_DIR}/${DOMAIN}/ghost-database:/var/lib/mysql
environment:
MYSQL_ROOT_PASSWORD: ${DBPASS}
MYSQL_DATABASE: ${DBNAME}
MYSQL_ROOT_HOST: "%"

View file

@ -0,0 +1,19 @@
services:
# MYSQL/MARIADB Only - Designed to be connected to another MySQL/MariaDB server !
# Container Name will be setup as variable is set !
ghost-website:
container_name: ghost-website-${DOMAIN}-${DIFFUSION}
image: ghost:latest
restart: always
ports:
- ${DIFFUSION}:2368
volumes:
- ${DATA_DIR}/${DOMAIN}/ghost-website:/var/lib/ghost/content
environment:
database__client: mysql
database__connection__host: ${DBIP}
database__connection__port: ${DBPORT}
database__connection__user: ${DBUSER}
database__connection__password: ${DBPASS}
database__connection__database: ${DBNAME}
url: https://${DOMAIN}

29
glpi.compose.yaml.txt Executable file
View file

@ -0,0 +1,29 @@
services:
#MariaDB Container
mariadb:
image: mariadb:10.7
container_name: mariadb
hostname: mariadb
environment:
- MARIADB_ROOT_PASSWORD=${DBROOTPASS}
- MARIADB_DATABASE=glpidb
- MARIADB_USER=glpi_user
- MARIADB_PASSWORD=${DBPASS}
volumes:
- ${DATA_DIR}/mysql:/var/lib/mysql
restart: always
#GLPI Container
glpi:
image: diouxx/glpi
container_name : glpi
links:
- mariadb
hostname: glpi
ports:
- ${IP}:${PORT}:80
volumes:
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
- ${DATA_DIR}/glpi:/var/www/html/glpi
restart: always

25
gravity-dhcp.compose.yaml.txt Executable file
View file

@ -0,0 +1,25 @@
# GRAVITY DHCP Only node (port mode)
# In IP variable, prefer using the real host IP, this determine the hostname of DHCP node.
services:
gravity:
# Important for this to be static and unique
container_name: gravity-DHCP-node-${IP}-${PORT}
hostname: gravity-DHCP-node-${IP}-${PORT}
image: ghcr.io/beryju/gravity:stable
restart: unless-stopped
ports:
- ${IP}:${PORT}:8008 # Web Port
- ${IP}:67:67/tcp # DHCP Service Port TCP
- ${IP}:67:67/udp # DHCP Service Port UDP
volumes:
- ${DATA_DIR}:/data
environment:
ADMIN_PASSWORD: ${PASSWORD}
BOOTSTRAP_ROLES: dhcp;api;etcd;discovery;monitoring;tsdb
# The default log level of info logs DHCP and DHCP queries, so ensure
# the logs aren't filling up the disk
logging:
driver: json-file
options:
max-size: "10m"
max-file: "3"

View file

@ -0,0 +1,15 @@
services:
gravity:
container_name: ${GRVHOSTNAME}
hostname: ${GRVHOSTNAME}
image: ghcr.io/beryju/gravity:stable
restart: unless-stopped
network_mode: host
volumes:
- ${DATA_DIR}:/data
logging:
driver: json-file
options:
max-size: 10m
max-file: "3"
networks: {}

View file

@ -0,0 +1,17 @@
services:
gravity:
container_name: ${GRVHOSTNAME}
hostname: ${GRVHOSTNAME}
image: ghcr.io/beryju/gravity:stable
restart: unless-stopped
network_mode: host
volumes:
- ${DATA_DIR}:/data
environment:
BOOTSTRAP_ROLES: dns
logging:
driver: json-file
options:
max-size: 10m
max-file: "3"
networks: {}

25
gravity-dns.compose.yaml.txt Executable file
View file

@ -0,0 +1,25 @@
# GRAVITY DNS Only node (port mode)
# In IP variable, prefer using the real host IP, this determine the hostname of DNS node.
services:
gravity:
# Important for this to be static and unique
container_name: gravity-dns-node-${IP}-${PORT}
hostname: gravity-dns-node-${IP}-${PORT}
image: ghcr.io/beryju/gravity:stable
restart: unless-stopped
ports:
- ${IP}:${PORT}:8008 # Web Port
- ${IP}:53:53/tcp # DNS Service Port TCP
- ${IP}:53:53/udp # DNS Service Port UDP
volumes:
- ${DATA_DIR}:/data
environment:
ADMIN_PASSWORD: ${PASSWORD}
BOOTSTRAP_ROLES: dns;api;etcd;discovery;monitoring;tsdb
# The default log level of info logs DHCP and DNS queries, so ensure
# the logs aren't filling up the disk
logging:
driver: json-file
options:
max-size: "10m"
max-file: "3"

22
gravity-host.compose.yaml.txt Executable file
View file

@ -0,0 +1,22 @@
# GRAVITY DNS Only node (port mode)
# In IP variable, prefer using the real host IP, this determine the hostname of DNS node.
services:
gravity:
# Important for this to be static and unique
container_name: gravity-dns-node-${IP}-${PORT}
hostname: gravity-dns-node-${IP}-${PORT}
image: ghcr.io/beryju/gravity:stable
restart: unless-stopped
network_mode: host
volumes:
- ${DATA_DIR}:/data
environment:
ADMIN_PASSWORD: ${PASSWORD}
# LOG_LEVEL: info
# The default log level of info logs DHCP and DNS queries, so ensure
# the logs aren't filling up the disk
logging:
driver: json-file
options:
max-size: "10m"
max-file: "3"

27
gravity-hybrid.compose.yaml.txt Executable file
View file

@ -0,0 +1,27 @@
# GRAVITY DNS Only node (port mode)
# In IP variable, prefer using the real host IP, this determine the hostname of DNS node.
services:
gravity:
# Important for this to be static and unique
container_name: gravity-dns-node-${IP}-${PORT}
hostname: gravity-dns-node-${IP}-${PORT}
image: ghcr.io/beryju/gravity:stable
restart: unless-stopped
ports:
- ${IP}:${PORT}:8008 # Web Port
- ${IP}:53:53/tcp # DNS Service Port TCP
- ${IP}:53:53/udp # DNS Service Port UDP
- ${IP}:67:67/tcp # DHCP Service Port TCP
- ${IP}:67:67/udp # DHCP Service Port UDP
volumes:
- ${DATA_DIR}:/data
environment:
ADMIN_PASSWORD: ${PASSWORD}
# LOG_LEVEL: info
# The default log level of info logs DHCP and DNS queries, so ensure
# the logs aren't filling up the disk
logging:
driver: json-file
options:
max-size: "10m"
max-file: "3"

View file

@ -0,0 +1,8 @@
services:
guacamole:
image: flcontainers/guacamole
volumes:
- ${DATA_DIR}:/config
- ${BRANDING_JARFILE}:/config/guacamole/extensions/branding.jar
ports:
- ${IP}:${PORT}:8080

7
guacamole-fl.compose.yaml.txt Executable file
View file

@ -0,0 +1,7 @@
services:
guacamole:
image: flcontainers/guacamole
volumes:
- ${DATA_DIR}:/config
ports:
- ${IP}:${PORT}:8080

11
headscale-vpn.compose.yaml.txt Executable file
View file

@ -0,0 +1,11 @@
services:
headscale:
image: headscale/headscale:latest
restart: unless-stopped
container_name: headscale
ports:
- "${IP}:${WEBPORT}:8080"
- "${IP}:${VPNPORT}:9090"
volumes:
- ${DATA_DIR}:/etc/headscale
command: serve

26
hedgedoc.compose.yaml.txt Executable file
View file

@ -0,0 +1,26 @@
services:
database:
image: postgres:13.4-alpine
environment:
- POSTGRES_USER=${DB_USER}
- POSTGRES_PASSWORD=${DB_PASS}
- POSTGRES_DB=${DB_NAME}
volumes:
- ${DATA_DIR}/database:/var/lib/postgresql/data
restart: always
app:
# Make sure to use the latest release from https://hedgedoc.org/latest-release
image: quay.io/hedgedoc/hedgedoc:1.10.0
environment:
- CMD_DB_URL=postgres://${DB_USER}:${DB_PASS}@database:5432/${DB_NAME}
- CMD_DOMAIN=${DOMAIN_NAME}
- CMD_URL_ADDPORT=${DOMAIN_ADDPORT}
- CMD_ALLOW_ORIGIN=['${DOMAIN_NAME}']
- CMD_ALLOW_EMAIL_REGISTER=false
volumes:
- ${DATA_DIR}/uploads:/hedgedoc/public/uploads
ports:
- "${IP}:${PORT}:3000"
restart: always
depends_on:
- database

21
hoodik-gmail.compose.yaml.txt Executable file
View file

@ -0,0 +1,21 @@
services:
hoodik:
container_name: hoodik-${IP}-${PORT}-${URL}
stdin_open: true
tty: true
environment:
- DATA_DIR=/data
- APP_URL=https://${URL}
- SSL_CERT_FILE=/data/my-cert-file.crt.pem
- SSL_KEY_FILE=/data/my-key-file.key.pem
- MAILER_TYPE=smtp
- SMTP_ADDRESS=smtp.gmail.com
- SMTP_USERNAME=${MAIL_RECEPT}
- SMTP_PASSWORD=${MAIL_PWD}
- SMTP_PORT=465
- SMTP_DEFAULT_FROM=Hoodik Drive <${MAIL_RECEPT}>
volumes:
- ${DATA_DIR}:/data
ports:
- ${IP}:${PORT}:5443
image: hudik/hoodik:latest

59
immich-base.compose.yaml.txt Executable file
View file

@ -0,0 +1,59 @@
services:
immich-server:
container_name: immich_server
image: ghcr.io/immich-app/immich-server:${IMMICH_VERSION:-release}
volumes:
- ${DATA_DIR}/data:/usr/src/app/upload
- /etc/localtime:/etc/localtime:ro
ports:
- ${PORT}:2283
environment:
DB_PASSWORD: ${DB_PASSWORD}
DB_USERNAME: ${DB_USERNAME}
DB_DATABASE_NAME: ${DB_DATABASE_NAME}
depends_on:
- redis
- database
restart: always
healthcheck:
disable: false
redis:
container_name: immich_redis
image: docker.io/redis:6.2-alpine@sha256:2ba50e1ac3a0ea17b736ce9db2b0a9f6f8b85d4c27d5f5accc6a416d8f42c6d5
healthcheck:
test: redis-cli ping || exit 1
restart: always
database:
container_name: immich_postgres
image: docker.io/tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:90724186f0a3517cf6914295b5ab410db9ce23190a2d9d0b9dd6463e3fa298f0
environment:
POSTGRES_PASSWORD: ${DB_PASSWORD}
POSTGRES_USER: ${DB_USERNAME}
POSTGRES_DB: ${DB_DATABASE_NAME}
POSTGRES_INITDB_ARGS: '--data-checksums'
volumes:
- ${DATA_DIR}/bdd:/var/lib/postgresql/data
healthcheck:
test: pg_isready --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' || exit 1; Chksum="$$(psql --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' --tuples-only --no-align --command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')"; echo "checksum failure count is $$Chksum"; [ "$$Chksum" = '0' ] || exit 1
interval: 5m
start_interval: 30s
start_period: 5m
command:
[
'postgres',
'-c',
'shared_preload_libraries=vectors.so',
'-c',
'search_path="$$user", public, vectors',
'-c',
'logging_collector=on',
'-c',
'max_wal_size=2GB',
'-c',
'shared_buffers=512MB',
'-c',
'wal_compression=on',
]
restart: always

74
immich-powered.compose.yaml.txt Executable file
View file

@ -0,0 +1,74 @@
services:
immich-server:
container_name: immich_server
image: ghcr.io/immich-app/immich-server:${IMMICH_VERSION:-release}
volumes:
- ${DATA_DIR}/data:/usr/src/app/upload
- /etc/localtime:/etc/localtime:ro
ports:
- ${IP}:${PORT}:2283
environment:
DB_PASSWORD: ${DB_PASSWORD}
DB_USERNAME: ${DB_USERNAME}
DB_DATABASE_NAME: ${DB_DATABASE_NAME}
depends_on:
- redis
- database
restart: always
healthcheck:
disable: false
immich-public-proxy:
image: alangrainger/immich-public-proxy:latest
container_name: immich-public-proxy
restart: always
ports:
- ${IP}:${PORT_GALLERY}:3000
environment:
- IMMICH_URL=http://${IP}:${PORT}
healthcheck:
test: wget -q http://localhost:3000/healthcheck || exit 1
start_period: 10s
timeout: 5s
# Removed PowerTool : Not really "power", lol.
redis:
container_name: immich_redis
image: docker.io/redis:6.2-alpine@sha256:2ba50e1ac3a0ea17b736ce9db2b0a9f6f8b85d4c27d5f5accc6a416d8f42c6d5
healthcheck:
test: redis-cli ping || exit 1
restart: always
database:
container_name: immich_postgres
image: docker.io/tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:90724186f0a3517cf6914295b5ab410db9ce23190a2d9d0b9dd6463e3fa298f0
environment:
POSTGRES_PASSWORD: ${DB_PASSWORD}
POSTGRES_USER: ${DB_USERNAME}
POSTGRES_DB: ${DB_DATABASE_NAME}
POSTGRES_INITDB_ARGS: '--data-checksums'
volumes:
- ${DATA_DIR}/bdd:/var/lib/postgresql/data
healthcheck:
test: pg_isready --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' || exit 1; Chksum="$$(psql --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' --tuples-only --no-align --command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')"; echo "checksum failure count is $$Chksum"; [ "$$Chksum" = '0' ] || exit 1
interval: 5m
start_interval: 30s
start_period: 5m
command:
[
'postgres',
'-c',
'shared_preload_libraries=vectors.so',
'-c',
'search_path="$$user", public, vectors',
'-c',
'logging_collector=on',
'-c',
'max_wal_size=2GB',
'-c',
'shared_buffers=512MB',
'-c',
'wal_compression=on',
]
restart: always

7
it-tools.compose.yaml.txt Executable file
View file

@ -0,0 +1,7 @@
services:
it-tools:
container_name: it-tools
restart: always
ports:
- ${IP}:${PORT}:80
image: corentinth/it-tools:${VER}

15
jellyfin.compose.yaml.txt Executable file
View file

@ -0,0 +1,15 @@
services:
jellyfin:
image: jellyfin/jellyfin
container_name: jellyfin-${USER_ID}-${IP}-${PORT}-${DOMAIN}
user: ${USER_ID}:${USER_ID}
ports:
- ${IP}:${PORT}:8096
volumes:
- ${DATA_DIR}/config:/config
- ${DATA_DIR}/cache:/cache
- ${DATA_DIR}/media:/media
restart: 'unless-stopped'
# Optional - alternative address used for autodiscovery
environment:
- JELLYFIN_PublishedServerUrl=${SCHEME}://${DOMAIN}

36
koillection.compose.yaml.txt Executable file
View file

@ -0,0 +1,36 @@
version: '3'
services:
# Koillection
koillection:
image: koillection/koillection
container_name: koillection
restart: unless-stopped
ports:
- 80:80
depends_on:
- db
volumes:
- ./volumes/koillection/uploads:/uploads
# Database : choose one of the following
db:
image: postgres:16
container_name: db
restart: unless-stopped
environment:
- POSTGRES_DB=${DB_NAME}
- POSTGRES_USER=${DB_USER}
- POSTGRES_PASSWORD=${DB_PASSWORD}
volumes:
- "./volumes/postgresql:/var/lib/postgresql/data"
#db:
# image: mysql:latest
# container_name: db
# restart: unless-stopped
# environment:
# - MYSQL_DATABASE=${DB_NAME}
# - MYSQL_ROOT_PASSWORD=${DB_PASSWORD}
# volumes:
# - "./docker/volumes/mysql:/var/lib/mysql"

27
leantime.compose.yaml.txt Normal file
View file

@ -0,0 +1,27 @@
services:
leantime_db:
image: mysql:8.4
container_name: mysql_leantime
volumes:
- db_data:/var/lib/mysql
restart: unless-stopped
env_file: ./.env # Environment file with settings
networks:
- leantime-net
command: --character-set-server=UTF8MB4 --collation-server=UTF8MB4_unicode_ci
leantime:
image: leantime/leantime:latest
container_name: leantime
restart: unless-stopped
env_file: ./.env # Environment file with settings
networks:
- leantime-net
volumes:
- public_userfiles:/var/www/html/public/userfiles # Volume to store public files, logo etc
- userfiles:/var/www/html/userfiles # Volume to store private user uploaded files
- plugins:/var/www/html/app/Plugins # Uncomment if you are planning to use plugins from the marketplace
ports:
- ${LEAN_PORT}:80 # The port to expose and access Leantime
depends_on:
- leantime_db # Don't start Leantime unless leantime_db is running
networks: {}

75
librephoto.compose.yaml.txt Executable file
View file

@ -0,0 +1,75 @@
# DO NOT EDIT
# The .env file has everything you need to edit.
# Run options:
# 1. Use prebuilt images (preferred method):
# run cmd: docker compose up -d
# 2. Build images on your own machine:
# build cmd: docker compose build
# run cmd: docker compose up -d
services:
proxy:
image: reallibrephotos/librephotos-proxy:${tag}
container_name: proxy
restart: unless-stopped
volumes:
- ${scanDirectory}:/data
- ${data}/protected_media:/protected_media
ports:
- ${IP}:${PORT}:80
depends_on:
- backend
- frontend
db:
image: postgres:13
container_name: db
restart: unless-stopped
environment:
- POSTGRES_USER=${dbUser}
- POSTGRES_PASSWORD=${dbPass}
- POSTGRES_DB=${dbName}
volumes:
- ${data}/db8:/var/lib/postgresql/data
command: postgres -c fsync=off -c synchronous_commit=off -c full_page_writes=off -c random_page_cost=1.0
healthcheck:
test: psql -U ${dbUser} -d ${dbName} -c "SELECT 1;"
interval: 5s
timeout: 5s
retries: 5
frontend:
image: reallibrephotos/librephotos-frontend:${tag}
container_name: frontend
restart: unless-stopped
backend:
image: reallibrephotos/librephotos:${tag}
container_name: backend
restart: unless-stopped
volumes:
- ${scanDirectory}:/data
- ${data}/protected_media:/protected_media
- ${data}/logs:/logs
- ${data}/cache:/root/.cache
environment:
- SECRET_KEY=${shhhhKey:-}
- BACKEND_HOST=backend
- ADMIN_EMAIL=${adminEmail:-}
- ADMIN_USERNAME=${userName:-}
- ADMIN_PASSWORD=${userPass:-}
- DB_BACKEND=postgresql
- DB_NAME=${dbName}
- DB_USER=${dbUser}
- DB_PASS=${dbPass}
- DB_HOST=${dbHost}
- DB_PORT=5432
- MAPBOX_API_KEY=${mapApiKey:-}
- WEB_CONCURRENCY=${gunniWorkers:-1}
- SKIP_PATTERNS=${skipPatterns:-}
- ALLOW_UPLOAD=${allowUpload:-false}
- CSRF_TRUSTED_ORIGINS=${csrfTrustedOrigins:-}
- DEBUG=0
depends_on:
db:
condition: service_healthy

35
lldap-sqlite.compose.yaml.txt Executable file
View file

@ -0,0 +1,35 @@
services:
lldap:
image: lldap/lldap:stable
ports:
# For LDAP, not recommended to expose, see Usage section.
#- "3890:3890"
# For LDAPS (LDAP Over SSL), enable port if LLDAP_LDAPS_OPTIONS__ENABLED set true, look env below
#- "6360:6360"
# For the web front-end
- "17170:17170"
volumes:
- ${DATA_DIR}:/data
environment:
- UID=####
- GID=####
- TZ=####/####
- LLDAP_JWT_SECRET=REPLACE_WITH_RANDOM
- LLDAP_KEY_SEED=REPLACE_WITH_RANDOM
- LLDAP_LDAP_BASE_DN=dc=example,dc=com
# If using LDAPS, set enabled true and configure cert and key path
# - LLDAP_LDAPS_OPTIONS__ENABLED=true
# - LLDAP_LDAPS_OPTIONS__CERT_FILE=/path/to/certfile.crt
# - LLDAP_LDAPS_OPTIONS__KEY_FILE=/path/to/keyfile.key
# You can also set a different database:
# - LLDAP_DATABASE_URL=mysql://mysql-user:password@mysql-server/my-database
# - LLDAP_DATABASE_URL=postgres://postgres-user:password@postgres-server/my-database
# If using SMTP, set the following variables
# - LLDAP_SMTP_OPTIONS__ENABLE_PASSWORD_RESET=true
# - LLDAP_SMTP_OPTIONS__SERVER=smtp.example.com
# - LLDAP_SMTP_OPTIONS__PORT=465 # Check your smtp providor's documentation for this setting
# - LLDAP_SMTP_OPTIONS__SMTP_ENCRYPTION=TLS # How the connection is encrypted, either "NONE" (no encryption, port 25), "TLS" (sometimes called SSL, port 465) or "STARTTLS" (sometimes called TLS, port 587).
# - [email protected] # The SMTP user, usually your email address
# - LLDAP_SMTP_OPTIONS__PASSWORD=PasswordGoesHere # The SMTP password
# - LLDAP_SMTP_OPTIONS__FROM=no-reply <[email protected]> # The header field, optional: how the sender appears in the email. The first is a free-form name, followed by an email between <>.
# - LLDAP_SMTP_OPTIONS__TO=admin <[email protected]> # Same for reply-to, optional.

View file

@ -0,0 +1,21 @@
services:
macos:
image: dockurr/macos
container_name: macos-${IP}-${PORT_WEB}-${PORT_VNC}
environment:
VERSION: ${MACOS_VERSION}
devices:
- /dev/kvm
cap_add:
- NET_ADMIN
ports:
- ${IP}:${PORT_WEB}:8006
- ${IP}:${PORT_VNC}:5900/tcp
- ${IP}:${PORT_VNC}:5900/udp
stop_grace_period: 2m
# 15 macOS 15 Sequoia
# 14 macOS 14 Sonoma
# 13 macOS 13 Ventura
# 12 macOS 12 Monterey
# 11 macOS 11 Big Sur

View file

@ -0,0 +1,9 @@
services:
mafl:
container_name: mafl-${IP}-${PORT}
image: hywax/mafl
restart: unless-stopped
ports:
- '${IP}:${PORT}:3000'
volumes:
- ${DATA_DIR}:/app/data

View file

@ -0,0 +1,62 @@
# NOTE: This docker-compose file was constructed to create a base for
# use by the End-to-end tests. It has not been fully tested for use in
# constructing a true, stand-alone sync server.
# If you're interested in doing that, please join our community in the
# github issues and comments.
#
# Application runs off of port 8000.
# you can test if it's available with
# curl "http://localhost:8000/__heartbeat__"
version: "3"
services:
sync-db:
image: mysql:5.7
volumes:
- sync_db_data:/var/lib/mysql
restart: always
ports:
- "3306"
command: --explicit_defaults_for_timestamp
environment:
#MYSQL_RANDOM_ROOT_PASSWORD: yes
MYSQL_ROOT_PASSWORD: random
MYSQL_DATABASE: syncstorage
MYSQL_USER: test
MYSQL_PASSWORD: test
tokenserver-db:
image: mysql:5.7
volumes:
- tokenserver_db_data:/var/lib/mysql
restart: always
ports:
- "3306"
command: --explicit_defaults_for_timestamp
environment:
#MYSQL_RANDOM_ROOT_PASSWORD: yes
MYSQL_ROOT_PASSWORD: random
MYSQL_DATABASE: tokenserver
MYSQL_USER: test
MYSQL_PASSWORD: test
syncserver:
# NOTE: The naming in the rest of this repository has been updated to reflect the fact
# that Syncstorage and Tokenserver are now part of one repository/server called
# "Syncserver" (updated from "syncstorage-rs"). We keep the legacy naming below for
# backwards compatibility with previous Docker images.
image: mozilla/syncstorage-rs:latest
restart: always
ports:
- 8000:8000
depends_on:
- sync-db
- tokenserver-db
environment:
SYNC_HOST: 0.0.0.0
SYNC_MASTER_SECRET: secret0
SYNC_SYNCSTORAGE__DATABASE_URL: mysql://test:test@sync-db:3306/syncstorage
SYNC_TOKENSERVER__DATABASE_URL: mysql://test:test@tokenserver-db:3306/tokenserver
SYNC_TOKENSERVER__RUN_MIGRATIONS: "true"
volumes:
sync_db_data: null
tokenserver_db_data: null
networks: {}

49
nextcloud-lsio.compose.yaml.txt Executable file
View file

@ -0,0 +1,49 @@
services:
nextcloud:
image: lscr.io/linuxserver/nextcloud:latest
container_name: nextcloud-${INSTANCE}-${IP}-${PORT}
environment:
- PUID=${U_ID}
- PGID=${U_ID}
- TZ=${TZ-Continent}/{TZ-City}
- MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}
- MYSQL_DATABASE=${MYSQL_DATABASE}
- MYSQL_USER=${MYSQL_USER}
- MYSQL_PASSWORD=${MYSQL_PASSWORD}
- MYSQL_HOST=mariadb
depends_on:
- mariadb
- collabora
volumes:
- ${DATA_DIR}/config:/config
- ${DATA_DIR}/data:/data
ports:
- ${IP}:${PORT}:443
restart: unless-stopped
mariadb:
image: mariadb:latest
container_name: mariadb
restart: unless-stopped
environment:
- MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}
- MYSQL_DATABASE=${MYSQL_DATABASE}
- MYSQL_USER=${MYSQL_USER}
- MYSQL_PASSWORD=${MYSQL_PASSWORD}
volumes:
- ${DATA_DIR}/bdd:/var/lib/mysql
collabora:
container_name: example-com--collabora
image: collabora/code:latest
cap_add:
- MKNOD
environment:
- domain=${DOMAIN}
- username=${DOMAIN}
- password=${CSEC}
ports:
- ${IP}:${PORT_COLLABORA}:9980
restart: always
volumes:
- "/etc/localtime:/etc/localtime:ro"

44
nextcloud.compose.yaml.txt Executable file
View file

@ -0,0 +1,44 @@
services:
# Service Nextcloud
nextcloud:
image: nextcloud:latest-alpine
container_name: nextcloud-${IP}-${PORT}
restart: unless-stopped
environment:
- MYSQL_PASSWORD=${MYSQL_PASSWORD}
- MYSQL_DATABASE=${MYSQL_DATABASE}
- MYSQL_USER=${MYSQL_USER}
- MYSQL_HOST=mariadb
- REDIS_HOST=redis
- REDIS_HOST_PASSWORD=${REDIS_PASSWORD}
- NEXTCLOUD_TRUSTED_DOMAINS=${DOMAIN}
volumes:
- ${DATA_DIR}/data:/var/www/html
depends_on:
- mariadb
- redis
ports:
- ${IP}:${PORT}:80
# Service MariaDB
mariadb:
image: mariadb:latest
container_name: mariadb
restart: unless-stopped
environment:
- MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}
- MYSQL_DATABASE=${MYSQL_DATABASE}
- MYSQL_USER=${MYSQL_USER}
- MYSQL_PASSWORD=${MYSQL_PASSWORD}
volumes:
- ${DATA_DIR}/bdd:/var/lib/mysql
# Service Redis
redis:
image: redis:latest
container_name: redis
restart: unless-stopped
environment:
- REDIS_PASSWORD=${REDIS_PASSWORD}
volumes:
- ${TEMP_DIR}:/data

10
nexterm.compose.yaml.txt Executable file
View file

@ -0,0 +1,10 @@
services:
nexterm:
container_name: nexterm-${VER}-${IP}-${PORT}
ports:
- ${IP}:${PORT}:6989
volumes:
- ${DATA_DIR}:/app/data
image: germannewsmaker/nexterm:${VER} # "1.0.2-OPEN-PREVIEW" for example, check https://docs.nexterm.dev/preview. Use "development" for dev streamed version.
restart: always
networks: {}

29
opendcim.compose.yaml.txt Executable file
View file

@ -0,0 +1,29 @@
version: '3.7'
services:
db:
image: mariadb
container_name: db
restart: always
ports:
- ${IP}:${PORT_DB}:3306
environment:
MARIADB_USER: opendcim
MARIADB_DATABASE: opendcim
MARIADB_ROOT_PASSWORD: ${DB_PASS}
MARIADB_PASSWORD: ${DB_PASS}
MARIADB_ROOT_HOST: "%"
volumes:
- ${DATA_DIR}/bdd:/var/lib/mysql
web:
image: opendcim/opendcim:23.04
ports:
- ${IP}:${PORT}:80
environment:
OPENDCIM_DB_HOST: ${IP}:${PORT_DB}
OPENDCIM_DB_USER: opendcim
OPENDCIM_DB_PASSWORD: ${DB_PASS}
OPENDCIM_DB_NAME: opendcim
links:
- db

56
opnform.compose.yaml.txt Normal file
View file

@ -0,0 +1,56 @@
---
services:
api: &api
image: jhumanj/opnform-api:latest
environment: &api-environment # Add this anchor
DB_HOST: db
REDIS_HOST: redis
DB_DATABASE: ${DB_DATABASE:-forge}
DB_USERNAME: ${DB_USERNAME:-forge}
DB_PASSWORD: ${DB_PASSWORD:-forge}
DB_CONNECTION: ${DB_CONNECTION:-pgsql}
FILESYSTEM_DISK: local
LOCAL_FILESYSTEM_VISIBILITY: public
env_file:
- ./api/.env
volumes:
- opnform_storage:/usr/share/nginx/html/storage:rw
api-worker:
image: jhumanj/opnform-api:latest
command: php artisan queue:work
environment:
<<: *api-environment
IS_API_WORKER: "true"
env_file:
- ./api/.env
volumes:
- opnform_storage:/usr/share/nginx/html/storage:rw
ui:
image: jhumanj/opnform-client:latest
env_file:
- ./client/.env
redis:
image: redis:7
db:
image: postgres:16
environment:
POSTGRES_DB: ${DB_DATABASE:-forge}
POSTGRES_USER: ${DB_USERNAME:-forge}
POSTGRES_PASSWORD: ${DB_PASSWORD:-forge}
volumes:
- postgres-data:/var/lib/postgresql/data
ingress:
image: nginx:1
volumes:
- ./docker/nginx.conf:/etc/nginx/templates/default.conf.template
ports:
- 80:80
volumes:
postgres-data:
opnform_storage:

11
organizr.compose.yaml.txt Executable file
View file

@ -0,0 +1,11 @@
services:
organizr:
container_name: organizr-${PORT}-${USER_ID}
volumes:
- ${DATADIR}:/config
environment:
- PGID=${USER_ID}
- PUID=${USER_ID}
ports:
- ${PORT}:80
image: ghcr.io/organizr/organizr

View file

@ -0,0 +1,62 @@
services:
owncloud:
image: owncloud/server:latest
container_name: owncloud-server-${IP}-${PORT}
restart: always
ports:
- ${IP}:${PORT}:8080
depends_on:
- mariadb
- redis
environment:
- OWNCLOUD_DOMAIN=${OWNCLOUD_DOMAIN}
- OWNCLOUD_TRUSTED_DOMAINS=${OWNCLOUD_TRUSTED_DOMAINS}
- OWNCLOUD_DB_TYPE=mysql
- OWNCLOUD_DB_NAME=owncloud
- OWNCLOUD_DB_USERNAME=root
- OWNCLOUD_DB_PASSWORD=${DBPASS}
- OWNCLOUD_DB_HOST=mariadb
- OWNCLOUD_ADMIN_USERNAME=${ADMIN_USERNAME}
- OWNCLOUD_ADMIN_PASSWORD=${ADMIN_PASSWORD}
- OWNCLOUD_MYSQL_UTF8MB4=true
- OWNCLOUD_REDIS_ENABLED=true
- OWNCLOUD_REDIS_HOST=redis
healthcheck:
test: ["CMD", "/usr/bin/healthcheck"]
interval: 30s
timeout: 10s
retries: 5
volumes:
- ${DATA_DIR}/data/files:/mnt/data
mariadb:
image: mariadb:10.11 # minimum required ownCloud version is 10.9
container_name: owncloud-mariadb
restart: always
environment:
- MYSQL_ROOT_PASSWORD=${DBPASS}
- MYSQL_USER=owncloud
- MYSQL_PASSWORD=${DBPASS}
- MYSQL_DATABASE=owncloud
- MARIADB_AUTO_UPGRADE=1
command: ["--max-allowed-packet=128M", "--innodb-log-file-size=64M"]
healthcheck:
test: ["CMD", "mysqladmin", "ping", "-u", "root", "--password=${DBPASS}"]
interval: 10s
timeout: 5s
retries: 5
volumes:
- ${DATA_DIR}/data/sql:/var/lib/mysql
redis:
image: redis:6
container_name: owncloud-redis
restart: always
command: ["--databases", "1"]
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 10s
timeout: 5s
retries: 5
volumes:
- ${DATA_DIR}/redis:/data

8
photofield.compose.yaml.txt Executable file
View file

@ -0,0 +1,8 @@
services:
photofield:
ports:
- ${IP}:${PORT}:8080
volumes:
# - ${DATA_DIR}/data:/app/data
- ${DATA_DIR}/photos:/app/photos:ro
image: ghcr.io/smilyorg/photofield

222
plane.compose.yaml.txt Executable file
View file

@ -0,0 +1,222 @@
# DO NOT USE - DEV IN PROGRESS
x-app-env: &app-env
environment:
- NGINX_PORT=${NGINX_PORT:-80}
- WEB_URL=${WEB_URL:-http://localhost}
- DEBUG=${DEBUG:-0}
- SENTRY_DSN=${SENTRY_DSN:-""}
- SENTRY_ENVIRONMENT=${SENTRY_ENVIRONMENT:-"production"}
- CORS_ALLOWED_ORIGINS=${CORS_ALLOWED_ORIGINS:-}
# Gunicorn Workers
- GUNICORN_WORKERS=${GUNICORN_WORKERS:-1}
#DB SETTINGS
- PGHOST=${PGHOST:-plane-db}
- PGDATABASE=${PGDATABASE:-plane}
- POSTGRES_USER=${POSTGRES_USER:-plane}
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD:-plane}
- POSTGRES_DB=${POSTGRES_DB:-plane}
- POSTGRES_PORT=${POSTGRES_PORT:-5432}
- PGDATA=${PGDATA:-/var/lib/postgresql/data}
- DATABASE_URL=${DATABASE_URL:-postgresql://plane:plane@plane-db/plane}
# REDIS SETTINGS
- REDIS_HOST=${REDIS_HOST:-plane-redis}
- REDIS_PORT=${REDIS_PORT:-6379}
- REDIS_URL=${REDIS_URL:-redis://plane-redis:6379/}
# RabbitMQ Settings
- RABBITMQ_HOST=${RABBITMQ_HOST:-plane-mq}
- RABBITMQ_PORT=${RABBITMQ_PORT:-5672}
- RABBITMQ_DEFAULT_USER=${RABBITMQ_USER:-plane}
- RABBITMQ_DEFAULT_PASS=${RABBITMQ_PASSWORD:-plane}
- RABBITMQ_DEFAULT_VHOST=${RABBITMQ_VHOST:-plane}
- RABBITMQ_VHOST=${RABBITMQ_VHOST:-plane}
- AMQP_URL=${AMQP_URL:-amqp://plane:plane@plane-mq:5672/plane}
# Application secret
- SECRET_KEY=${SECRET_KEY:-60gp0byfz2dvffa45cxl20p1scy9xbpf6d8c5y0geejgkyp1b5}
# DATA STORE SETTINGS
- USE_MINIO=${USE_MINIO:-1}
- AWS_REGION=${AWS_REGION:-""}
- AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID:-"access-key"}
- AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY:-"secret-key"}
- AWS_S3_ENDPOINT_URL=${AWS_S3_ENDPOINT_URL:-http://plane-minio:9000}
- AWS_S3_BUCKET_NAME=${AWS_S3_BUCKET_NAME:-uploads}
- MINIO_ROOT_USER=${MINIO_ROOT_USER:-"access-key"}
- MINIO_ROOT_PASSWORD=${MINIO_ROOT_PASSWORD:-"secret-key"}
- BUCKET_NAME=${BUCKET_NAME:-uploads}
- FILE_SIZE_LIMIT=${FILE_SIZE_LIMIT:-5242880}
# Live server env
- API_BASE_URL=${API_BASE_URL:-http://api:8000}
services:
web:
<<: *app-env
image: ${DOCKERHUB_USER:-makeplane}/plane-frontend:${APP_RELEASE:-stable}
platform: ${DOCKER_PLATFORM:-}
pull_policy: if_not_present
restart: unless-stopped
command: node web/server.js web
deploy:
replicas: ${WEB_REPLICAS:-1}
depends_on:
- api
- worker
space:
<<: *app-env
image: ${DOCKERHUB_USER:-makeplane}/plane-space:${APP_RELEASE:-stable}
platform: ${DOCKER_PLATFORM:-}
pull_policy: if_not_present
restart: unless-stopped
command: node space/server.js space
deploy:
replicas: ${SPACE_REPLICAS:-1}
depends_on:
- api
- worker
- web
admin:
<<: *app-env
image: ${DOCKERHUB_USER:-makeplane}/plane-admin:${APP_RELEASE:-stable}
platform: ${DOCKER_PLATFORM:-}
pull_policy: if_not_present
restart: unless-stopped
command: node admin/server.js admin
deploy:
replicas: ${ADMIN_REPLICAS:-1}
depends_on:
- api
- web
live:
<<: *app-env
image: ${DOCKERHUB_USER:-makeplane}/plane-live:${APP_RELEASE:-stable}
platform: ${DOCKER_PLATFORM:-}
pull_policy: if_not_present
restart: unless-stopped
command: node live/dist/server.js live
deploy:
replicas: ${LIVE_REPLICAS:-1}
depends_on:
- api
- web
api:
<<: *app-env
image: ${DOCKERHUB_USER:-makeplane}/plane-backend:${APP_RELEASE:-stable}
platform: ${DOCKER_PLATFORM:-}
pull_policy: if_not_present
restart: unless-stopped
command: ./bin/docker-entrypoint-api.sh
deploy:
replicas: ${API_REPLICAS:-1}
volumes:
- logs_api:/code/plane/logs
depends_on:
- plane-db
- plane-redis
- plane-mq
worker:
<<: *app-env
image: ${DOCKERHUB_USER:-makeplane}/plane-backend:${APP_RELEASE:-stable}
platform: ${DOCKER_PLATFORM:-}
pull_policy: if_not_present
restart: unless-stopped
command: ./bin/docker-entrypoint-worker.sh
volumes:
- logs_worker:/code/plane/logs
depends_on:
- api
- plane-db
- plane-redis
- plane-mq
beat-worker:
<<: *app-env
image: ${DOCKERHUB_USER:-makeplane}/plane-backend:${APP_RELEASE:-stable}
platform: ${DOCKER_PLATFORM:-}
pull_policy: if_not_present
restart: unless-stopped
command: ./bin/docker-entrypoint-beat.sh
volumes:
- logs_beat-worker:/code/plane/logs
depends_on:
- api
- plane-db
- plane-redis
- plane-mq
migrator:
<<: *app-env
image: ${DOCKERHUB_USER:-makeplane}/plane-backend:${APP_RELEASE:-stable}
platform: ${DOCKER_PLATFORM:-}
pull_policy: if_not_present
restart: "no"
command: ./bin/docker-entrypoint-migrator.sh
volumes:
- logs_migrator:/code/plane/logs
depends_on:
- plane-db
- plane-redis
plane-db:
<<: *app-env
image: postgres:15.7-alpine
pull_policy: if_not_present
restart: unless-stopped
command: postgres -c 'max_connections=1000'
volumes:
- pgdata:/var/lib/postgresql/data
plane-redis:
<<: *app-env
image: valkey/valkey:7.2.5-alpine
pull_policy: if_not_present
restart: unless-stopped
volumes:
- redisdata:/data
plane-mq:
<<: *app-env
image: rabbitmq:3.13.6-management-alpine
restart: always
volumes:
- rabbitmq_data:/var/lib/rabbitmq
plane-minio:
<<: *app-env
image: minio/minio:latest
pull_policy: if_not_present
restart: unless-stopped
command: server /export --console-address ":9090"
volumes:
- uploads:/export
# Comment this if you already have a reverse proxy running
proxy:
<<: *app-env
image: ${DOCKERHUB_USER:-makeplane}/plane-proxy:${APP_RELEASE:-stable}
platform: ${DOCKER_PLATFORM:-}
pull_policy: if_not_present
restart: unless-stopped
ports:
- ${NGINX_PORT}:80
depends_on:
- web
- api
- space
volumes:
pgdata:
redisdata:
uploads:
logs_api:
logs_worker:
logs_beat-worker:
logs_migrator:
rabbitmq_data:

View file

@ -0,0 +1,13 @@
version: "3.3"
services:
portainer:
ports:
- ${ip}:${agent_port}:8000
- ${ip}:${web_port}:9443
container_name: portainer-${edition}-${ip}-w${web_port}-a${agent_port}
restart: always
volumes:
- /var/run/docker.sock:/var/run/docker.sock:${docker_mode}
- ${data_dir}:/data
image: portainer/portainer-${edition}:alpine-sts
networks: {}

50
pritunl-vpn.compose.yaml.txt Executable file
View file

@ -0,0 +1,50 @@
services:
pritunl:
image: ghcr.io/jippi/docker-pritunl:latest
container_name: pritunl-front-${IP}-${HTTPS_PORT}
privileged: true
ports:
- ${IP}:${HTTPS_PORT}:443 # SSL access only !, you can use any port but dont forget to enforce HTTPS and No-Verify if needed.
# Servers Ports - Three Nodes - Add ports below, ensure EXT PORT is equal to INT PORT
- ${IP}:${VPN_SERVER_01}:${VPN_SERVER_01}
- ${IP}:${VPN_SERVER_02}:${VPN_SERVER_02}
- ${IP}:${VPN_SERVER_03}:${VPN_SERVER_03}
dns:
- ${DNS_1_FRONT}
- ${DNS_2_FRONT}
- ${DNS_3_BACKS}
- ${DNS_4_BACKS}
restart: unless-stopped
volumes:
- ${DATA_DIR}/data/pritunl.conf:/etc/pritunl.conf
- ${DATA_DIR}/data/pritunl:/var/lib/pritunl
environment:
- PRITUNL_MONGODB_URI=mongodb://${IP}:${DB_PORT}/pritunl
depends_on:
- mongodb
mongodb:
image: mongo:latest
container_name: pritunl-back-${IP}-${DB_PORT}
restart: unless-stopped
ports:
- ${IP}:${DB_PORT}:27017
volumes:
- ${DATA_DIR}/data/mongodb:/data/db
###### pritunl.conf model #####################################
# Adapt data to your compose ! ##############
# then place it to ${DATA_DIR}/data/pritunl.conf ##############
###############################################################
# {
# "mongodb_uri": "mongodb://0.0.0.0:27017/pritunl",
# "server_key_path": "/var/lib/pritunl/pritunl.key",
# "log_path": "/var/log/pritunl.log",
# "static_cache": true,
# "server_cert_path": "/var/lib/pritunl/pritunl.crt",
# "temp_path": "/tmp/pritunl_%r",
# "bind_addr": "0.0.0.0",
# "debug": false,
# "www_path": "/usr/share/pritunl/www",
# "local_address_interface": "auto"
# }
###############################################################

19
pritunl-zero.compose.yaml.txt Executable file
View file

@ -0,0 +1,19 @@
services:
pritunl-zero:
container_name: pritunl-zero-front-${IP}-${PORT_HTTPS}-${PORT_HTTP}
ports:
- ${IP}:${PORT_HTTP}:80
- ${IP}:${PORT_HTTPS}:443
environment:
- MONGO_URI=mongodb://${IP}:${DB_PORT}/pritunl-zero
- NODE_ID=${ID}
image: docker.io/pritunl/pritunl-zero
mongodb:
image: mongo:latest
container_name: pritunl-zero-back-${IP}-${DB_PORT}
restart: unless-stopped
ports:
- ${IP}:${DB_PORT}:27017
volumes:
- ${DATA_DIR}/data/mongodb:/data/db

19
projectsend.compose.yaml.txt Executable file
View file

@ -0,0 +1,19 @@
services:
web:
restart: unless-stopped
image: terrestris/projectsend:latest
volumes:
- ${DATA_DIR}/config:/config
- ${DATA_DIR}/data:/data
ports:
- ${PORT}:80
mysql:
restart: unless-stopped
image: mariadb:10.5
volumes:
- ${DATA_DIR}/bdd:/var/lib/mysql
environment:
MYSQL_ROOT_PASSWORD: ${DBROOTPASS}
MYSQL_DATABASE: projectsend
MYSQL_USER: projectsend
MYSQL_PASSWORD: ${DBPASS}

View file

@ -0,0 +1,19 @@
services:
pwgen:
ports:
- ${IP}:${PORT}:5069
environment:
- NO_API_CHECK=${NO_API_CHECK}
- PW_LENGTH=${PW_LENGTH}
- PW_INCLUDE_UPPERCASE=${PW_INCLUDE_UPPERCASE}
- PW_INCLUDE_DIGITS=${PW_INCLUDE_DIGITS}
- PW_EXCLUDE_HOMOGLYPHS=${PW_EXCLUDE_HOMOGLYPHS}
- PP_WORD_COUNT=${PP_WORD_COUNT}
- PP_CAPITALIZE=${PP_CAPITALIZE}
- PP_SEPARATOR_TYPE=${PP_SEPARATOR_TYPE}
- PP_USER_DEFINED_SEPARATOR=${PP_USER_DEFINED_SEPARATOR}
- PP_MAX_WORD_LENGTH=${PP_MAX_WORD_LENGTH}
- PP_INCLUDE_NUMBERS=${PP_INCLUDE_NUMBERS}
- PP_INCLUDE_SPECIAL_CHARS=${PP_INCLUDE_SPECIAL_CHARS}
image: jocxfin/pwgen:latest
networks: {}

106
revolt.compose.yaml.txt Executable file
View file

@ -0,0 +1,106 @@
services:
# MongoDB database
database:
image: mongo
restart: always
volumes:
- ./data/db:/data/db
# Redis server
redis:
image: eqalpha/keydb
restart: always
# S3-compatible storage server
minio:
image: minio/minio
command: server /data
volumes:
- ./data/minio:/data
environment:
MINIO_ROOT_USER: minioautumn
MINIO_ROOT_PASSWORD: minioautumn
MINIO_DOMAIN: minio
networks:
default:
aliases:
- revolt-uploads.minio
# legacy support:
- attachments.minio
- avatars.minio
- backgrounds.minio
- icons.minio
- banners.minio
- emojis.minio
restart: always
# Caddy web server
caddy:
image: caddy
restart: always
env_file: .env.web
ports:
- "80:80"
- "443:443"
volumes:
- ./Caddyfile:/etc/caddy/Caddyfile
- ./data/caddy-data:/data
- ./data/caddy-config:/config
# API server (delta)
api:
image: ghcr.io/revoltchat/server:20241024-1
depends_on:
- database
- redis
volumes:
- ./Revolt.toml:/Revolt.toml
restart: always
# Events service (quark)
events:
image: ghcr.io/revoltchat/bonfire:20241024-1
depends_on:
- database
- redis
volumes:
- ./Revolt.toml:/Revolt.toml
restart: always
# Web App (revite)
web:
image: ghcr.io/revoltchat/client:master
restart: always
env_file: .env.web
# File server (autumn)
autumn:
image: ghcr.io/revoltchat/autumn:20241024-1
depends_on:
- database
- createbuckets
volumes:
- ./Revolt.toml:/Revolt.toml
restart: always
# Metadata and image proxy (january)
january:
image: ghcr.io/revoltchat/january:20241024-1
volumes:
- ./Revolt.toml:/Revolt.toml
restart: always
# Create buckets for minio.
createbuckets:
image: minio/mc
depends_on:
- minio
entrypoint: >
/bin/sh -c "
while ! /usr/bin/mc ready minio; do
/usr/bin/mc config host add minio http://minio:9000 minioautumn minioautumn;
echo 'Waiting minio...' && sleep 1;
done;
/usr/bin/mc mb minio/revolt-uploads;
exit 0;
"

View file

@ -0,0 +1,11 @@
services:
semaphore:
ports:
- ${IP}:${PORT}:3000
image: semaphoreui/semaphore:v2.10.35
environment:
SEMAPHORE_DB_DIALECT: bolt
SEMAPHORE_ADMIN_PASSWORD: ${ID_PASS}
SEMAPHORE_ADMIN_NAME: ${ID_NAME}
SEMAPHORE_ADMIN_EMAIL: ${ID_MAIL}
SEMAPHORE_ADMIN: ${ID_NAME}

View file

@ -0,0 +1,43 @@
services:
mysql:
restart: unless-stopped
image: mysql:8.0
hostname: mysql
volumes:
- semaphore-mysql:/var/lib/mysql
environment:
MYSQL_RANDOM_ROOT_PASSWORD: 'yes'
MYSQL_DATABASE: semaphore
MYSQL_USER: semaphore
MYSQL_PASSWORD: semaphore
semaphore:
restart: unless-stopped
ports:
- 3000:3000
image: semaphoreui/semaphore:latest
environment:
SEMAPHORE_DB_USER: semaphore
SEMAPHORE_DB_PASS: semaphore
SEMAPHORE_DB_HOST: mysql # for postgres, change to: postgres
SEMAPHORE_DB_PORT: 3306 # change to 5432 for postgres
SEMAPHORE_DB_DIALECT: mysql # for postgres, change to: postgres
SEMAPHORE_DB: semaphore
SEMAPHORE_PLAYBOOK_PATH: /tmp/semaphore/
SEMAPHORE_ADMIN_PASSWORD: changeme
SEMAPHORE_ADMIN_NAME: admin
SEMAPHORE_ADMIN_EMAIL: admin@localhost
SEMAPHORE_ADMIN: admin
SEMAPHORE_ACCESS_KEY_ENCRYPTION: gs72mPntFATGJs9qK0pQ0rKtfidlexiMjYCH9gWKhTU=
SEMAPHORE_LDAP_ACTIVATED: 'no' # if you wish to use ldap, set to: 'yes'
SEMAPHORE_LDAP_HOST: dc01.local.example.com
SEMAPHORE_LDAP_PORT: '636'
SEMAPHORE_LDAP_NEEDTLS: 'yes'
SEMAPHORE_LDAP_DN_BIND: 'uid=bind_user,cn=users,cn=accounts,dc=local,dc=shiftsystems,dc=net'
SEMAPHORE_LDAP_PASSWORD: 'ldap_bind_account_password'
SEMAPHORE_LDAP_DN_SEARCH: 'dc=local,dc=example,dc=com'
SEMAPHORE_LDAP_SEARCH_FILTER: "(\u0026(uid=%s)(memberOf=cn=ipausers,cn=groups,cn=accounts,dc=local,dc=example,dc=com))"
TZ: UTC
depends_on:
- mysql # for postgres, change to: postgres
volumes:
semaphore-mysql: # to use postgres, switch to: semaphore-postgres

View file

@ -0,0 +1,42 @@
services:
postgres:
restart: unless-stopped
image: postgres:14
hostname: postgres
volumes:
- semaphore-postgres:/var/lib/postgresql/data
environment:
POSTGRES_USER: semaphore
POSTGRES_PASSWORD: semaphore
POSTGRES_DB: semaphore
semaphore:
restart: unless-stopped
ports:
- 3000:3000
image: semaphoreui/semaphore:latest
environment:
SEMAPHORE_DB_USER: semaphore
SEMAPHORE_DB_PASS: semaphore
SEMAPHORE_DB_HOST: mysql # for postgres, change to: postgres
SEMAPHORE_DB_PORT: 3306 # change to 5432 for postgres
SEMAPHORE_DB_DIALECT: mysql # for postgres, change to: postgres
SEMAPHORE_DB: semaphore
SEMAPHORE_PLAYBOOK_PATH: /tmp/semaphore/
SEMAPHORE_ADMIN_PASSWORD: changeme
SEMAPHORE_ADMIN_NAME: admin
SEMAPHORE_ADMIN_EMAIL: admin@localhost
SEMAPHORE_ADMIN: admin
SEMAPHORE_ACCESS_KEY_ENCRYPTION: gs72mPntFATGJs9qK0pQ0rKtfidlexiMjYCH9gWKhTU=
SEMAPHORE_LDAP_ACTIVATED: 'no' # if you wish to use ldap, set to: 'yes'
SEMAPHORE_LDAP_HOST: dc01.local.example.com
SEMAPHORE_LDAP_PORT: '636'
SEMAPHORE_LDAP_NEEDTLS: 'yes'
SEMAPHORE_LDAP_DN_BIND: 'uid=bind_user,cn=users,cn=accounts,dc=local,dc=shiftsystems,dc=net'
SEMAPHORE_LDAP_PASSWORD: 'ldap_bind_account_password'
SEMAPHORE_LDAP_DN_SEARCH: 'dc=local,dc=example,dc=com'
SEMAPHORE_LDAP_SEARCH_FILTER: "(\u0026(uid=%s)(memberOf=cn=ipausers,cn=groups,cn=accounts,dc=local,dc=example,dc=com))"
TZ: UTC
depends_on:
- mysql # for postgres, change to: postgres
volumes:
semaphore-mysql: # to use postgres, switch to: semaphore-postgres

15
siyuan-notes.compose.yaml.txt Executable file
View file

@ -0,0 +1,15 @@
services:
main:
image: b3log/siyuan
container_name: siyuan-note-${IP}-${PORT}-${UID}
command: ['--workspace=/siyuan/${WORKSPACE_NAME}/', '--accessAuthCode=${PASS}']
ports:
- ${IP}:${PORT}:6806
volumes:
- ${DATA_DIR}:/siyuan/${WORKSPACE_NAME}
restart: unless-stopped
environment:
# A list of time zone identifiers can be found at https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
- TZ=${TIMEZONE}
- PUID=${UID} # Customize user ID
- PGID=${UID} # Customize group ID

68
ssm-prod.compose.yaml.txt Executable file
View file

@ -0,0 +1,68 @@
services:
proxy:
restart: unless-stopped
image: "ghcr.io/squirrelcorporation/squirrelserversmanager-proxy:latest"
container_name: proxy-ssm-${IP}-${PORT}
ports:
- "${IP}:${PORT}:8000"
depends_on:
- client
- mongo
- server
- redis
labels:
wud.display.name: "SSM - Proxy"
wud.watch.digest: false
mongo:
container_name: mongo-ssm-${IP}-${PORT}
image: mongo
restart: unless-stopped
volumes:
- ${DATA_DIR}/db:/data/db
command: --quiet
labels:
wud.display.name: "SSM - MongoDB"
redis:
container_name: cache-ssm-${IP}-${PORT}
image: redis
restart: unless-stopped
volumes:
- ${DATA_DIR}/cache:/data
command: --save 60 1
labels:
wud.display.name: "SSM - Redis"
server:
image: "ghcr.io/squirrelcorporation/squirrelserversmanager-server:latest"
container_name: server-ssm-${IP}-${PORT}
restart: unless-stopped
external_links:
- mongo
- redis
depends_on:
- mongo
- redis
environment:
NODE_ENV: production
SECRET: ${ENV_KEY}
SALT: ${ENV_SALTKEY}
VAULT_PWD: ${ENV_PWD}
DB_HOST: mongo
DB_NAME: ssm
DB_PORT: 27017
REDIS_HOST: redis
REDIS_PORT: 6379
volumes:
- ${DATA_DIR}/playbooks:/playbooks
- ${DATA_DIR}/config:/ansible-config
labels:
wud.display.name: "SSM - Server"
wud.watch.digest: false
client:
image: "ghcr.io/squirrelcorporation/squirrelserversmanager-client:latest"
container_name: client-ssm-${IP}-${PORT}
restart: unless-stopped
depends_on:
- server
labels:
wud.display.name: "SSM - Client"
wud.watch.digest: false

8
starbase80.compose.yaml.txt Executable file
View file

@ -0,0 +1,8 @@
services:
homepage:
image: jordanroher/starbase-80
ports:
- ${IP}:${PORT}:4173
volumes:
- ./config.json:/app/src/config/config.json
- ./icons:/app/public/icons # or wherever, JSON icon paths are relative to /app/public

21
tabby.compose.yaml.txt Normal file
View file

@ -0,0 +1,21 @@
services:
tabby:
build: .
restart: always
container_name: tabby-${IP}-${PORT}
depends_on:
- db
ports:
- ${IP}:${PORT}:80
environment:
- DATABASE_URL=mysql://root:${DBPASS}@db/tabby
- PORT=80
- DEBUG=False
- DOCKERIZE_ARGS="-wait tcp://db:3306 -timeout 60s"
db:
image: mariadb:10.7.1
restart: always
environment:
MARIADB_DATABASE: tabby
MYSQL_ROOT_PASSWORD: ${DBPASS}

View file

@ -0,0 +1,28 @@
services:
tianji:
image: moonrailgun/tianji
ports:
- ${DIFFUSION_PORT}:12345
environment:
DATABASE_URL: postgresql://tianji:${DBPASS}@postgres:5432/tianji
JWT_SECRET: ${JWT_KEY}
ALLOW_REGISTER: "${REGMODE}"
ALLOW_OPENAPI: "${APIMODE}"
depends_on:
- postgres
restart: always
postgres:
image: postgres:15.4-alpine
environment:
POSTGRES_DB: tianji
POSTGRES_USER: tianji
POSTGRES_PASSWORD: ${DBPASS}
volumes:
- ${DATA_DIR}:/var/lib/postgresql/data
restart: always
healthcheck:
test: ["CMD-SHELL", "pg_isready -U $${POSTGRES_USER} -d $${POSTGRES_DB}"]
interval: 5s
timeout: 5s
retries: 5

12
tududi-project.compose.yaml.txt Executable file
View file

@ -0,0 +1,12 @@
services:
tududi:
environment:
- TUDUDI_USER_EMAIL=${U_MAIL}
- TUDUDI_USER_PASSWORD=${U_PASS}
- TUDUDI_SESSION_SECRET=${SESSION_SECRET}
- TUDUDI_INTERNAL_SSL_ENABLED=false
volumes:
- ${DATA_DIR}:/usr/src/app/tududi_db
ports:
- ${DIFFUSION}:9292
image: chrisvel/tududi:${APP_VER} # At creation of this compose : 0.20, This project has NOT a :latest tag !

10
unbrel-os.compose.yaml.txt Executable file
View file

@ -0,0 +1,10 @@
services:
umbrel:
image: dockurr/umbrel
container_name: umbrel-${IP}-${PORT}
ports:
- ${IP}:${PORT}:80
volumes:
- ${DATA_DIR}:/data
- "/var/run/docker.sock:/var/run/docker.sock"
stop_grace_period: 1m

232
voltaserve.compose.yaml.txt Executable file
View file

@ -0,0 +1,232 @@
########################################
## UNFINISHED COMPOSE - DO NOT USE !! ##
########################################
volumes:
cockroach:
minio:
meilisearch:
redis:
services:
cockroach:
image: cockroachdb/cockroach:latest-v24.2
ports:
- ${VOLTASERVE_POSTGRES_PORT}:26257
- ${VOLTASERVE_COCKROACH_CONSOLE_PORT}:8080
environment:
COCKROACH_DATABASE: voltaserve
COCKROACH_USER: voltaserve
volumes:
- cockroach:/cockroach/cockroach-data
command: start-single-node --insecure
healthcheck:
test: cockroach sql --insecure --execute='SELECT 1;' || exit 1
minio:
image: minio/minio:RELEASE.2024-09-09T16-59-28Z
ports:
- ${VOLTASERVE_MINIO_PORT}:9000
- ${VOLTASERVE_MINIO_CONSOLE_PORT}:9001
environment:
MINIO_ROOT_USER: voltaserve
MINIO_ROOT_PASSWORD: voltaserve
MINIO_REGION: us-east-1
volumes:
- minio:/data
command: server /data --console-address ":9001"
meilisearch:
image: getmeili/meilisearch:v1.10.1
ports:
- ${VOLTASERVE_MEILISEARCH_PORT}:7700
volumes:
- meilisearch:/meili_data
healthcheck:
test: curl --fail http://127.0.0.1:7700/health || exit 1
redis:
image: redis:7.4
ports:
- ${VOLTASERVE_REDIS_PORT}:6379
volumes:
- redis:/data
healthcheck:
test: redis-cli ping || exit 1
maildev:
image: maildev/maildev:2.1.0
ports:
- ${VOLTASERVE_MAILDEV_SMTP_PORT}:1025
- ${VOLTASERVE_MAILDEV_WEB_PORT}:1080
healthcheck:
test: wget --quiet --spider http://127.0.0.1:1080 || exit 1
api:
image: voltaserve/api
build:
context: ./api
ports:
- ${VOLTASERVE_API_PORT}:8080
environment:
- PORT=8080
- CONVERSION_URL=http://conversion:8083
- LANGUAGE_URL=http://language:8084
- MOSAIC_URL=http://mosaic:8085
- POSTGRES_URL=postgresql://voltaserve@cockroach:26257/voltaserve
- S3_URL=minio:9000
- SEARCH_URL=http://meilisearch:7700
- PUBLIC_UI_URL=http://${VOLTASERVE_HOSTNAME}:${VOLTASERVE_UI_PORT}
- REDIS_ADDRESS=redis:6379
- SMTP_HOST=${VOLTASERVE_SMTP_HOST}
- SMTP_PORT=${VOLTASERVE_SMTP_PORT}
- SMTP_SECURE=${VOLTASERVE_SMTP_SECURE}
- SMTP_USERNAME=${VOLTASERVE_SMTP_USERNAME}
- SMTP_PASSWORD=${VOLTASERVE_SMTP_PASSWORD}
- SMTP_SENDER_ADDRESS=${VOLTASERVE_SMTP_SENDER_ADDRESS}
- SMTP_SENDER_NAME=${VOLTASERVE_SMTP_SENDER_NAME}
healthcheck:
test: wget --quiet --spider http://127.0.0.1:8080/v3/health || exit 1
depends_on:
- cockroach
- redis
- minio
- meilisearch
restart: on-failure
idp:
image: voltaserve/idp
build:
context: ./idp
ports:
- ${VOLTASERVE_IDP_PORT}:8081
environment:
- PORT=8081
- POSTGRES_URL=postgresql://voltaserve@cockroach:26257/voltaserve
- SEARCH_URL=http://meilisearch:7700
- PUBLIC_UI_URL=http://${VOLTASERVE_HOSTNAME}:${VOLTASERVE_UI_PORT}
- SMTP_HOST=${VOLTASERVE_SMTP_HOST}
- SMTP_PORT=${VOLTASERVE_SMTP_PORT}
- SMTP_SECURE=${VOLTASERVE_SMTP_SECURE}
- SMTP_USERNAME=${VOLTASERVE_SMTP_USERNAME}
- SMTP_PASSWORD=${VOLTASERVE_SMTP_PASSWORD}
- SMTP_SENDER_ADDRESS=${VOLTASERVE_SMTP_SENDER_ADDRESS}
- SMTP_SENDER_NAME=${VOLTASERVE_SMTP_SENDER_NAME}
healthcheck:
test: wget --quiet --spider http://127.0.0.1:8081/v3/health || exit 1
depends_on:
- cockroach
- meilisearch
- minio
restart: on-failure
ui:
image: voltaserve/ui
build:
context: ./ui
ports:
- ${VOLTASERVE_UI_PORT}:3000
environment:
- API_URL=http://api:8080
- IDP_URL=http://idp:8081
healthcheck:
test: wget --quiet --spider http://127.0.0.1:3000/index.html || exit 1
depends_on:
- idp
- api
restart: on-failure
webdav:
image: voltaserve/webdav
build:
context: ./webdav
ports:
- ${VOLTASERVE_WEBDAV_PORT}:8082
environment:
- PORT=8082
- IDP_URL=http://idp:8081
- API_URL=http://api:8080
- REDIS_ADDRESS=redis:6379
- S3_URL=minio:9000
healthcheck:
test: wget --quiet --spider http://127.0.0.1:8082/v3/health || exit 1
depends_on:
- idp
- api
restart: on-failure
conversion:
image: voltaserve/conversion
build:
context: ./conversion
ports:
- ${VOLTASERVE_CONVERSION_PORT}:8083
environment:
- PORT=8083
- ENABLE_INSTALLER=true
- API_URL=http://api:8080
- LANGUAGE_URL=http://language:8084
- MOSAIC_URL=http://mosaic:8085
- S3_URL=minio:9000
healthcheck:
test: wget --quiet --spider http://127.0.0.1:8083/v3/health || exit 1
depends_on:
- api
- minio
restart: on-failure
language:
image: voltaserve/language
build:
context: ./language
ports:
- ${VOLTASERVE_LANGUAGE_PORT}:8084
healthcheck:
test: wget --quiet --spider http://127.0.0.1:8084/v3/health || exit 1
restart: on-failure
mosaic:
image: voltaserve/mosaic
build:
context: ./mosaic
ports:
- ${VOLTASERVE_MOSAIC_PORT}:8085
environment:
- S3_URL=minio:9000
healthcheck:
test: wget --quiet --spider http://127.0.0.1:8085/v3/health || exit 1
restart: on-failure
console:
image: voltaserve/console
build:
context: ./console
dockerfile: Dockerfile
ports:
- ${VOLTASERVE_CONSOLE_PORT}:8086
environment:
- PORT=8086
- HOST=0.0.0.0
- POSTGRES_URL=cockroach
- POSTGRES_PORT=26257
- POSTGRES_NAME=voltaserve
- POSTGRES_USER=voltaserve
- WORKERS=4
- SECURITY_JWT_SIGNING_KEY=586cozl1x9m6zmu4fg8iwi6ajazguehcm9qdfgd5ndo2pc3pcn
- SECURITY_CORS_ORIGINS=http://localhost:3000
- JWT_ALGORITHM=HS256
- URL=localhost
- API_URL=http://api:8080
- IDP_URL=http://idp:8081
- WEBDAV_URL=http://webdav:8082
- CONVERSION_URL=http://conversion:8083
- LANGUAGE_URL=http://language:8084
- MOSAIC_URL=http://mosaic:8085
healthcheck:
test: wget --quiet --spider http://127.0.0.1:8086/liveness || exit 1
depends_on:
- api
- idp
- webdav
- conversion
- language
- mosaic
- cockroach
restart: on-failure
migrations:
image: voltaserve/migrations
build:
context: ./migrations
environment:
- DATABASE_URL=postgresql://voltaserve@cockroach:26257/voltaserve
depends_on:
- cockroach
restart: on-failure

View file

@ -0,0 +1,28 @@
services:
pritunl:
image: ghcr.io/jippi/docker-pritunl:latest
container_name: pritunl-${IP}-http${HTTP_WEBPORT}-ssl${HTTPS_WEBPORT}-srv${SERVER_PORT}
privileged: true
ports:
- ${IP}:${HTTP_WEBPORT}:80
- ${IP}:${SERVER_PORT}:22550/udp
- ${IP}:${SERVER_PORT}:22550/tcp
- ${IP}:${HTTPS_WEBPORT}:443
dns: ${VPN_DNS_SERVER}
restart: unless-stopped
volumes:
- ${DATA_DIR}/data/pritunl.conf:/etc/pritunl.conf
- ${DATA_DIR}/data/pritunl:/var/lib/pritunl
environment:
- PRITUNL_MONGODB_URI=mongodb://mongodb:27018/pritunl
depends_on:
- mongodb
mongodb:
image: mongo:latest
container_name: mongodb
restart: unless-stopped
volumes:
- ${DATA_DIR}/data/mongodb:/data/db
networks: {}
# MongoDB Connection is automatic and within internal network between containers.

25
vscode-lsio.compose.yaml.txt Executable file
View file

@ -0,0 +1,25 @@
services:
code-server:
image: lscr.io/linuxserver/code-server:latest
container_name: code-server-${DOMAIN}-${UID}-${IP}-${PORT}
environment:
- PUID=${UID}
- PGID=${UID}
- TZ=${TIMEZONE}
- PASSWORD=${USER_PASSWORD}
- SUDO_PASSWORD=${SUDO_PASSWORD}
- PROXY_DOMAIN=${DOMAIN}
- DEFAULT_WORKSPACE=/${WORKSPACE_NAME} #optional
volumes:
- ${DATA_DIR}/config:/config
- ${WORKSPACE_DIR}:/${WORKSPACE_NAME}
ports:
- ${IP}:${PORT}:8443
restart: always
deploy:
resources:
limits:
memory: ${SPECS_MAX_RAM}
reservations:
memory: ${SPECS_MIN_RAM}
networks: {}

View file

@ -0,0 +1,36 @@
services:
windows:
image: dockurr/windows
container_name: windows-${IP}-${PORT_WEB}-${PORT_RDP}
environment:
VERSION: ${WINDOWS_VERSION} # See below the compose for list !, You can also use a custom url to a web ISO.
devices:
- /dev/kvm
cap_add:
- NET_ADMIN
ports:
- ${IP}:${PORT_WEB}:8006
- ${IP}:${PORT_RDP}:3389/tcp
- ${IP}:${PORT_RDP}:3389/udp
stop_grace_period: 2m
#### VERSION LISTING ####
# win11 Windows 11 Pro 5.4 GB
# ltsc11 Windows 11 LTSC 4.2 GB
# win11e Windows 11 Enterprise 5.8 GB
# win10 Windows 10 Pro 5.7 GB
# ltsc10 Windows 10 LTSC 4.6 GB
# win10e Windows 10 Enterprise 5.2 GB
# win8 Windows 8.1 Pro 4.0 GB
# win8e Windows 8.1 Enterprise 3.7 GB
# win7 Windows 7 Enterprise 3.0 GB
# vista Windows Vista Enterprise 3.0 GB
# winxp Windows XP Professional 0.6 GB
# 2025 Windows Server 2025 5.0 GB
# 2022 Windows Server 2022 4.7 GB
# 2019 Windows Server 2019 5.3 GB
# 2016 Windows Server 2016 6.5 GB
# 2012 Windows Server 2012 4.3 GB
# 2008 Windows Server 2008 3.0 GB
# 2003 Windows Server 2003 0.6 GB

View file

@ -0,0 +1,36 @@
services:
windows:
image: dockurr/windows
container_name: windows-${IP}-${PORT_WEB}-${PORT_RDP}
environment:
VERSION: ${WINDOWS_VERSION} # See below the compose for list !, You can also use a custom url to a web ISO.
devices:
- /dev/kvm
cap_add:
- NET_ADMIN
ports:
- ${IP}:${PORT_WEB}:8006
- ${IP}:${PORT_RDP}:3389/tcp
- ${IP}:${PORT_RDP}:3389/udp
stop_grace_period: 2m
#### VERSION LISTING ####
# win11 Windows 11 Pro 5.4 GB
# ltsc11 Windows 11 LTSC 4.2 GB
# win11e Windows 11 Enterprise 5.8 GB
# win10 Windows 10 Pro 5.7 GB
# ltsc10 Windows 10 LTSC 4.6 GB
# win10e Windows 10 Enterprise 5.2 GB
# win8 Windows 8.1 Pro 4.0 GB
# win8e Windows 8.1 Enterprise 3.7 GB
# win7 Windows 7 Enterprise 3.0 GB
# vista Windows Vista Enterprise 3.0 GB
# winxp Windows XP Professional 0.6 GB
# 2025 Windows Server 2025 5.0 GB
# 2022 Windows Server 2022 4.7 GB
# 2019 Windows Server 2019 5.3 GB
# 2016 Windows Server 2016 6.5 GB
# 2012 Windows Server 2012 4.3 GB
# 2008 Windows Server 2008 3.0 GB
# 2003 Windows Server 2003 0.6 GB

View file

@ -0,0 +1,49 @@
services:
# sitename : The domain or subdomain, don't use spaces or caps characters !
# this version include :
# phpMyAdmin (no auto login)
# mariadb (latest) auto config
# wordpress auto config
wordpress-db:
container_name: wp-${sitename}-bdd
image: mariadb:latest
volumes:
- ${DATA_DIR}/${sitename}/db:/var/lib/mysql
restart: always
environment:
MYSQL_ROOT_PASSWORD: ${DB_ROOT_PASS}
MYSQL_DATABASE: wp-${sitename}
MYSQL_USER: ${DB_USER}
MYSQL_PASSWORD: ${DB_PASS}
wordpress:
container_name: wp-${sitename}-front
depends_on:
- wordpress-db
image: wordpress:latest
ports:
- ${DIFFUSION}:80
restart: always
volumes:
- ${DATA_DIR}/${sitename}/uploads.ini:/usr/local/etc/php/conf.d/uploads.ini
- ${DATA_DIR}/${sitename}/html:/var/www/html
environment:
WORDPRESS_DB_HOST: wp-${sitename}-bdd:3306
WORDPRESS_DB_USER: ${DB_USER}
WORDPRESS_DB_PASSWORD: ${DB_PASS}
WORDPRESS_DB_NAME: wp-${sitename}
wordpress-pma:
image: phpmyadmin/phpmyadmin
container_name: wp-${sitename}-pma
depends_on:
- wordpress-db
environment:
- PMA_HOST=wp-${sitename}-bdd
- PMA_PORT=3306
- PMA_ARBITRARY=1
ports:
- ${BDD_MANAGER_PORT}:80
restart: always

12
yacht.compose.yaml.txt Normal file
View file

@ -0,0 +1,12 @@
version: "3.3"
services:
yacht:
ports:
- ${IP}:${PORT}:8000
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- ${DATA_DIR}:/config
container_name: yacht-${IP}-${PORT}
restart: unless-stopped
image: selfhostedpro/yacht
networks: {}

View file

@ -0,0 +1,96 @@
services:
zabbix-server:
image: ${ZABBIX_SERVER_IMAGE:-zabbix/zabbix-server-pgsql:ubuntu-7.2.1}
container_name: server
restart: unless-stopped
ports:
- 10051:10051
environment:
DB_SERVER_HOST: postgres
DB_SERVER_PORT: 5432
POSTGRES_USER: ${POSTGRES_USER}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
POSTGRES_DB: ${POSTGRES_DB}
depends_on:
- postgres
networks:
- network-zabbix
zabbix-frontend:
image: ${ZABBIX_FRONTEND_IMAGE:-zabbix/zabbix-web-nginx-pgsql:ubuntu-7.2.1}
restart: unless-stopped
container_name: frontend
ports:
- 8080:8080
- 8443:8443
environment:
DB_SERVER_HOST: postgres
POSTGRES_USER: ${POSTGRES_USER}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
POSTGRES_DB: ${POSTGRES_DB}
PHP_TZ: ${TZ}
ZBX_SERVER_HOST: zabbix-server
ZBX_SERVER_PORT: 10051
depends_on:
- zabbix-server
networks:
- network-zabbix
zabbix-agent:
image: ${ZABBIX_AGENT_IMAGE:-zabbix/zabbix-agent:ubuntu-7.2.1}
container_name: zabbix-agent
restart: unless-stopped
ports:
- 10050:10050
environment:
ZBX_ACTIVE_ALLOW: false
TZ: ${TZ}
ZBX_SERVER_HOST: zabbix-server
ZBX_SERVER_PORT: 10051
ZBX_HOSTNAME: zabbix-agent
ZBX_HOSTNAMEITEM: system.hostname
depends_on:
- zabbix-server
networks:
- network-zabbix
postgres:
image: ${POSTGRES_IMAGE:-postgres:latest}
# command: -c ssl=on -c ssl_cert_file=/run/secrets/server-cert.pem -c ssl_key_file=/run/secrets/server-key.pem -c ssl_ca_file=/run/secrets/root-ca.pem
container_name: postgres
restart: unless-stopped
ports:
- 5432:5432
volumes:
- postgres:/var/lib/postgresql/data
# - pgsql_socket:/var/run/postgresql/
environment:
POSTGRES_USER: ${POSTGRES_USER}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
POSTGRES_DB: ${POSTGRES_DB}
PG_DATA: /var/lib/postgresql/data/pgdata
networks:
- network-zabbix
grafana:
image: ${GRAFANA_IMAGE:-grafana/grafana}
container_name: grafana
restart: unless-stopped
ports:
- 3000:3000
environment:
GF_SECURITY_ADMIN_USER: ${GRAFANA_USER:-admin}
GF_SECURITY_ADMIN_PASSWORD: ${GRAFANA_SECRET:-12345}
GF_INSTALL_PLUGINS: alexanderzobnin-zabbix-app
TZ: ${TZ}
user: "472"
volumes:
- grafana:/var/lib/grafana
- ./grafana/grafana.ini:/etc/grafana/grafana.ini
- ./grafana/provisioning:/etc/grafana/provisioning
depends_on:
- zabbix-frontend
networks:
- network-zabbix
volumes:
postgres: {}
grafana: {}
networks:
network-zabbix:
driver: bridge

73
zabbix.compose.yaml.txt Normal file
View file

@ -0,0 +1,73 @@
version: "3.3"
services:
# Zabbix database
zabbix-db:
container_name: zabbix-db
image: mariadb:10.11.4
restart: always
volumes:
- ${ZABBIX_DATA_PATH}/zabbix-db/mariadb:/var/lib/mysql:rw
- ${ZABBIX_DATA_PATH}/zabbix-db/backups:/backups
command:
- mariadbd
- --character-set-server=utf8mb4
- --collation-server=utf8mb4_bin
- --default-authentication-plugin=mysql_native_password
environment:
- MYSQL_USER=${MYSQL_USER}
- MYSQL_PASSWORD=${MYSQL_PASSWORD}
- MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}
stop_grace_period: 1m
# Zabbix server
zabbix-server:
container_name: zabbix-server
image: zabbix/zabbix-server-mysql:ubuntu-6.4-latest
restart: always
ports:
- 10051:10051
volumes:
- /etc/localtime:/etc/localtime:ro
- ${ZABBIX_DATA_PATH}/zabbix-server/alertscripts:/usr/lib/zabbix/alertscripts:ro
- ${ZABBIX_DATA_PATH}/zabbix-server/externalscripts:/usr/lib/zabbix/externalscripts:ro
- ${ZABBIX_DATA_PATH}/zabbix-server/dbscripts:/var/lib/zabbix/dbscripts:ro
- ${ZABBIX_DATA_PATH}/zabbix-server/export:/var/lib/zabbix/export:rw
- ${ZABBIX_DATA_PATH}/zabbix-server/modules:/var/lib/zabbix/modules:ro
- ${ZABBIX_DATA_PATH}/zabbix-server/enc:/var/lib/zabbix/enc:ro
- ${ZABBIX_DATA_PATH}/zabbix-server/ssh_keys:/var/lib/zabbix/ssh_keys:ro
- ${ZABBIX_DATA_PATH}/zabbix-server/mibs:/var/lib/zabbix/mibs:ro
environment:
- MYSQL_ROOT_USER=root
- MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}
- DB_SERVER_HOST=zabbix-db
- ZBX_STARTPINGERS=${ZBX_STARTPINGERS}
depends_on:
- zabbix-db
stop_grace_period: 30s
sysctls:
- net.ipv4.ip_local_port_range=1024 65000
- net.ipv4.conf.all.accept_redirects=0
- net.ipv4.conf.all.secure_redirects=0
- net.ipv4.conf.all.send_redirects=0
# Zabbix web UI
zabbix-web:
container_name: zabbix-web
image: zabbix/zabbix-web-nginx-mysql:ubuntu-6.4-latest
restart: always
ports:
- 8080:8080
volumes:
- /etc/localtime:/etc/localtime:ro
- ${ZABBIX_DATA_PATH}/zabbix-web/nginx:/etc/ssl/nginx:ro
- ${ZABBIX_DATA_PATH}/zabbix-web/modules/:/usr/share/zabbix/modules/:ro
environment:
- MYSQL_USER=${MYSQL_USER}
- MYSQL_PASSWORD=${MYSQL_PASSWORD}
- DB_SERVER_HOST=zabbix-db
- ZBX_SERVER_HOST=zabbix-server
- ZBX_SERVER_NAME=Zabbix Docker
- PHP_TZ=Europe/London
depends_on:
- zabbix-db
- zabbix-server
stop_grace_period: 10s
networks: {}

34
zipline-share.compose.yaml.txt Executable file
View file

@ -0,0 +1,34 @@
services:
postgres:
image: postgres:15
restart: unless-stopped
environment:
- POSTGRES_USER=postgres
- POSTGRES_PASSWORD=${DB_PASS}
- POSTGRES_DATABASE=postgres
volumes:
- ${DATA_DIR}/database:/var/lib/postgresql/data
healthcheck:
test: ['CMD-SHELL', 'pg_isready -U postgres']
interval: 10s
timeout: 5s
retries: 5
zipline:
image: ghcr.io/diced/zipline
container_name: zipline-${DIFFUSION}
ports:
- ${DIFFUSION}:3000
restart: unless-stopped
environment:
- CORE_RETURN_HTTPS=false
- CORE_SECRET=${KEY}
- CORE_HOST=0.0.0.0
- CORE_PORT=3000
- CORE_DATABASE_URL=postgres://postgres:${DB_PASS}@postgres/postgres
- CORE_LOGGER=true
volumes:
- ${DATA_DIR}/uploads:/zipline/uploads
- ${DATA_DIR}/public:/zipline/public
depends_on:
- 'postgres'

41
zitadel.compose.yaml.txt Normal file
View file

@ -0,0 +1,41 @@
services:
zitadel:
container_name: zitadel-iam-${IP}-${PORT}
restart: always
image: ghcr.io/zitadel/zitadel:latest
command: start-from-init --masterkey "${MASTER_KEY}" --tlsMode disabled
environment:
ZITADEL_DATABASE_POSTGRES_HOST: db
ZITADEL_DATABASE_POSTGRES_PORT: 5432
ZITADEL_DATABASE_POSTGRES_DATABASE: zitadel
ZITADEL_DATABASE_POSTGRES_USER_USERNAME: zitadel
ZITADEL_DATABASE_POSTGRES_USER_PASSWORD: ${DB_PASS_U}
ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE: disable
ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME: postgres
ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD: ${DB_PASS_A}
ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE: disable
ZITADEL_EXTERNALSECURE: false
depends_on:
db:
condition: service_healthy
ports:
- ${IP}:${PORT}:8080
db:
restart: always
image: postgres:16-alpine
environment:
PGUSER: postgres
POSTGRES_PASSWORD: postgres
healthcheck:
test:
- CMD-SHELL
- pg_isready
- -d
- zitadel
- -U
- postgres
interval: 10s
timeout: 30s
retries: 5
start_period: 20s
networks: {}

16
zoraxy-gateway.compose.yaml.txt Executable file
View file

@ -0,0 +1,16 @@
services:
zoraxy-docker:
image: zoraxydocker/zoraxy:latest
container_name: gateway-${INSTANCE_NAME}-${ADMIN_PORT}
restart: always
ports:
- ${HTTP_PORT}:80
- ${HTTPS_PORT}:443
- ${ADMIN_PORT}:8000
dns:
- ${DNS_1}
- ${DNS_2}
volumes:
- ${DATA_DIR}/config:/opt/zoraxy/config/
environment:
ARGS: -noauth=${NOAUTH}