-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathdocker-compose.yml
387 lines (372 loc) · 13.2 KB
/
docker-compose.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
version: '3'
services:
nginx-proxy:
build: ./nginx
# TODO: Note that this is built with ultimate-bad-bot-blocker scripts
# that currently need to be run manually to update
# (with the possibility that the bots.d folder has to be blown away first - not sure)
# Eventually, this needs to be checked and put on a chron job
# docker exec -t nginx-proxy bash
# /usr/local/sbin/setup-ngxblocker -x
# /usr/local/sbin/update-ngxblocker -x email
container_name: nginx-proxy
ports:
- "80:80"
- "443:443"
restart: always
#environment:
# - HTTPS_METHOD=noredirect
volumes:
- ./nginx/conf.d:/etc/nginx/conf.d
- ./nginx/vhost.d:/etc/nginx/vhost.d
- ./nginx/bots.d:/etc/nginx/bots.d
- ./nginx/certs:/etc/nginx/certs:rw
- nginx:/usr/share/nginx/html
#- nginx:/app/nginx.tmpl
- /var/run/docker.sock:/tmp/docker.sock:ro
#- ./nginx/htpasswd:/etc/nginx/htpasswd
acme-companion:
image: nginxproxy/acme-companion:2.2
container_name: nginx-proxy-acme
environment:
- DEFAULT_EMAIL=${EMAIL}
# Uncomment this for testing
#- LETSENCRYPT_TEST=true
volumes_from:
- nginx-proxy
volumes:
- ./nginx/conf.d:/etc/nginx/conf.d
- ./nginx/vhost.d:/etc/nginx/vhost.d
- ./nginx/certs:/etc/nginx/certs:rw
- nginx:/usr/share/nginx/html
- acme:/etc/acme.sh
- /var/run/docker.sock:/var/run/docker.sock:ro
depends_on:
- nginx-proxy
restart: always
portfolio:
# TODO: This will eventually be rewritten with something like VUE
container_name: portfolio
build: ./portfolio
# To just server running the following command
#command: bash -c "npm run serve"
# To reinstall the packages run the following command instead
command: bash -c "npm install && npm run serve"
volumes:
- portfolio:/src/node_modules
- ./portfolio/src:/src
environment:
- VIRTUAL_HOST=${DOMAIN},www.${DOMAIN}
#- VIRTUAL_PATH=/
# For subdirectory baseURL needs to be set in app.js for static files and routes
- VIRTUAL_PATH=/legacy
- VIRTUAL_DEST=/legacy
- VIRTUAL_PORT=3000
#- LETSENCRYPT_HOST=${DOMAIN},www.${DOMAIN},gitea.${DOMAIN} #this last one is for legacy support
#- LETSENCRYPT_EMAIL=${EMAIL}
ports:
- "3000:3000"
restart: always
depends_on:
mongo:
condition: service_healthy
#restheart:
#nginx-proxy:
#labels:
# com.github.nginx-proxy.nginx-proxy.keepalive: "64"
portfolio-nuxt:
# NOTE: This is the rewrite of the frontend
# NOTE: The build process for nuxt seems to require that sharp be reinstalled in the .output folder
container_name: portfolio-nuxt
build: ./portfolio-nuxt
# To rebuild the site and the server run this
command: bash -c "npm run build && node .output/server/index.mjs"
# To just start the server run this
#command: bash -c "node .output/server/index.mjs"
# To start the server in dev mode
#command: bash -c "npm run dev -o"
volumes:
- portfolio-nuxt:/src/node_modules
- ./portfolio-nuxt:/src
environment:
- VIRTUAL_HOST=${DOMAIN},www.${DOMAIN}
- VIRTUAL_PATH=/
#- VIRTUAL_DEST=/dev
# For subdirectory baseURL needs to be set in nuxt config
#- VIRTUAL_PATH=/dev
#- VIRTUAL_DEST=/dev
- VIRTUAL_PORT=5000
- LETSENCRYPT_HOST=${DOMAIN},www.${DOMAIN},gitea.${DOMAIN} #this last one is for legacy support
- LETSENCRYPT_EMAIL=${EMAIL}
ports:
- "5000:5000"
restart: always
depends_on:
- restheart
- nginx-proxy
#labels:
# com.github.nginx-proxy.nginx-proxy.keepalive: "64"
mongo:
container_name: mongo
# using mongo4 or mongo5 as opposed to mongo:6 for server status in mongo-express and because of bugs
# mongo 5 requires avx support so if the machine is not capable of avx support use mongo4
# NOTE: mongo 4 shell uses mongo and mongo 5 uses mongosh!
# These need to be changed accordingly in the health check and the mongosetup.sh file for the container mongo-init
image: mongo:5
#image: mongo:4
restart: always
environment:
- MONGO_INITDB_ROOT_USERNAME=${USER}
- MONGO_INITDB_ROOT_PASSWORD=${PASSWORD}
- MONGO_INITDB_DATABASE=portfolio
command: ["--keyFile", "/auth/keyfile", "--replSet", "rs0", "--bind_ip_all"]
# NOTE: If starting from scracth, create key for mongo then put it in ./mongo/auth/
# openssl rand -base64 756 > keyfile
# chmod 600 keyfile
# sudo chown 999 keyfile
# sudo chgrp 999 keyfile
# NOTE: If you tar archive the site and move it without retaining permissions,
# you will need to run the last 3 lines on the file to make it work
ports:
- 27017:27017
volumes:
- ./portfolio/mongo/data/db:/data/db
- ./portfolio/mongo/data/configdb:/data/configdb
- ./portfolio/mongo/auth/keyfile:/auth/keyfile
- ./portfolio/mongo/db_backups:/db_backups
healthcheck:
# mongo 5
test: echo 'rs.status().ok' | mongosh --host mongo:27017 -u $${MONGO_INITDB_ROOT_USERNAME} -p $${MONGO_INITDB_ROOT_PASSWORD} --quiet | grep 1
# mongo 4
#test: echo 'rs.status().ok' | mongo --host mongo:27017 -u $${MONGO_INITDB_ROOT_USERNAME} -p $${MONGO_INITDB_ROOT_PASSWORD} --quiet | grep 1
interval: 15s
start_period: 20s
mongo-init:
container_name: mongo-init
# using mongo5 as opposed to mongo:6 for server status in mongo-express and because of bugs
image: mongo:5
restart: on-failure
volumes:
# mongo 5
- ./portfolio/mongo/scripts/mongo5setup.sh:/scripts/mongo5setup.sh
# mongo 4
#- ./portfolio/mongo/scripts/mongo4setup.sh:/scripts/mongo4setup.sh
# these two are necessary otherwise they get created again as anonymous volumes
- ./portfolio/mongo/data/db:/data/db
- ./portfolio/mongo/data/configdb:/data/configdb
# mongo 5
entrypoint: ["bash", "/scripts/mongo5setup.sh" ]
# mongo 4
#entrypoint: ["bash", "/scripts/mongo4setup.sh" ]
environment:
- MONGO_INITDB_ROOT_USERNAME=${USER}
- MONGO_INITDB_ROOT_PASSWORD=${PASSWORD}
depends_on:
mongo:
condition: service_started
mongo-express:
# using mongo-express:0.54 as opposed to mongo-express:1 for server status and because of bugs
image: mongo-express:0.54
container_name: mongo-express
restart: always
environment:
- ME_CONFIG_MONGODB_URL=mongodb://${USER}:${PASSWORD}@mongo:27017/?replicaSet=rs0
- ME_CONFIG_MONGODB_ADMINUSERNAME=${USER}
- ME_CONFIG_MONGODB_ADMINPASSWORD=${PASSWORD}
- ME_CONFIG_MONGODB_ENABLE_ADMIN=true
- ME_CONFIG_BASICAUTH_USERNAME=${USER}
- ME_CONFIG_BASICAUTH_PASSWORD=${PASSWORD}
- ME_CONFIG_SITE_BASEURL=/admin
- ME_CONFIG_SITE_GRIDFS_ENABLED=true
- VIRTUAL_HOST=${DOMAIN},admin.${DOMAIN}
- VIRTUAL_PATH=/admin/
- VIRTUAL_PORT=8081
#volumes:
# - ./nginx/certs:/etc/nginx/certs:ro
depends_on:
mongo:
condition: service_healthy
ports:
- "8081:8081"
restheart:
image: softinstigate/restheart:7
container_name: restheart
# NOTE: the api_admin endpoint only works locally
environment:
- RHO=
/mongo/mongo-mounts[1]->{'where':'/api','what':'portfolio'};
/mongo/mongo-mounts[2]->{'where':'/api_admin','what':'restheart'};
/mclient/connection-string->'mongodb://${USER}:${PASSWORD}@mongo:27017/?replicaSet=rs0';
/http-listener/host->'0.0.0.0';
# NOTE: If starting from scratch use must set admin password!
# curl -u admin:secret -X PATCH localhost:8080/api_admin/users/admin -H "Content-Type: application/json" -d '{ "password": "my-strong-password" }'
# NOTE: An ACL entry to allow unaunthenticated users to perform gets must be added
# For now, it was added to the restheart db manually
# by adding the following to the acl collection with curl or using mongo-express
# {
# predicate: 'path-prefix[/api] and method[GET]',
# roles: ['$unauthenticated'],
# priority: 50
# }
# This does not seem to do anything but should somehow use a file for the realm creations
#/fileRealmAuthenticator/users[userid='admin']/password->'${PASSWORD}';
- VIRTUAL_HOST=${DOMAIN},www.${DOMAIN}
- VIRTUAL_PATH=/api/
- VIRTUAL_DEST=/api/
- VIRTUAL_PORT=8080
depends_on:
mongo:
condition: service_healthy
#command: ["--envFile", "/opt/restheart/etc/default.properties"]
ports:
- "8080:8080"
restart: always
#volumes:
# - ./restheart:/opt/restheart/etc:ro
gitea:
image: gitea/gitea:1
container_name: gitea
environment:
- USER_UID=1000
- USER_GID=1000
- GITEA__database__DB_TYPE=mysql
- GITEA__database__HOST=mysql-gitea
- GITEA__database__NAME=gitea
- GITEA__database__USER=${USER}
- GITEA__database__PASSWD=${PASSWORD}
- GITEA__server__LANDING_PAGE=/${USER}
- GITEA__attachment__MAX_SIZE=5000
#- GITEA__repository.upload__FILE_MAX_SIZE=5000
# NOTE: This next line can be commented out if you want to run the wizard locally
# But it needs to be set properly as the base url to work remotely
# no matter how you run the wizard
- GITEA__server__ROOT_URL=https://${DOMAIN}/code/
- HTTP_PORT=4000
- LFS_START_SERVER=true
- DISABLE_REGISTRATION=true
- RUN_MODE=prod
- VIRTUAL_HOST=${DOMAIN},www.${DOMAIN},gitea.${DOMAIN} # this last one is for legacy support
- VIRTUAL_PORT=4000
- VIRTUAL_PATH=/code/
- VIRTUAL_DEST=/
restart: always
volumes:
- ./gitea:/data
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
ports:
- "4000:4000"
- "222:22"
depends_on:
mysql-gitea:
condition: service_healthy
mysql-gitea:
image: mariadb:10
container_name: mysql-gitea
restart: always
environment:
- MYSQL_ROOT_PASSWORD=${PASSWORD}
- MYSQL_PASSWORD=${PASSWORD}
- MYSQL_DATABASE=gitea
- MYSQL_USER=${USER}
volumes:
- ./gitea/mysql:/var/lib/mysql
#- ./mysql_gitea/etc:/etc/mysql/conf.d
#- ./mysql_gitea/init:/docker-entrypoint-initdb.d
healthcheck:
test: ["CMD", "mysqladmin" ,"ping", "-h", "localhost"]
interval: 15s
start_period: 20s
nextcloud:
image: nextcloud:25
container_name: nextcloud
restart: always
volumes:
#- ./nextcloud/data:/var/www/html/data
#- nextcloud:/var/www/html
- ./nextcloud/html:/var/www/html
environment:
- MYSQL_DATABASE=nextcloud
- MYSQL_USER=${USER}
- MYSQL_PASSWORD=${PASSWORD}
- MYSQL_HOST=mysql-nextcloud
- NEXTCLOUD_ADMIN_USER=${USER}
- NEXTCLOUD_ADMIN_PASSWORD=${PASSWORD}
- NEXTCLOUD_TRUSTED_DOMAINS=${DOMAIN} www.${DOMAIN}
#- NEXTCLOUD_INIT_LOCK=true
- APACHE_DISABLE_REWRITE_IP=1
- TRUSTED_PROXIES=nginx-proxy
- OVERWRITEHOST=${DOMAIN}
- OVERWRITEWEBROOT=/cloud
- OVERWRITEPROTOCOL=https
#- OVERWRITECLIURL=http://localhost/
- OVERWRITECLIURL=https://unboundedpress.org
# NOTE: These configurations above make it work with the subdirectory
# but you cannot set VIRTUAL_PORT
# for reasons I have no idea
- VIRTUAL_HOST=${DOMAIN},www.${DOMAIN}
- VIRTUAL_PATH=/cloud/
- VIRTUAL_DEST=/
# TODO: add redis and chron
#- REDIS_HOST=redis
depends_on:
mysql-nextcloud:
condition: service_healthy
#redis:
ports:
- 8888:80
collabora:
image: collabora/code:22.05.14.3.1
container_name: collabora
depends_on:
- nextcloud
cap_add:
- MKNOD
environment:
- username=${USER}
- password=${PASSWORD}
- domain=${DOMAIN}
- VIRTUAL_HOST=${DOMAIN},www.${DOMAIN}
- VIRTUAL_PATH=/collab/
- VIRTUAL_DEST=/
# Extra parameters to Collabora, see also
# https://www.collaboraoffice.com/code/nginx-reverse-proxy/:
# SSL terminates at the proxy
- extra_params=--o:ssl.enable=false --o:ssl.termination=true
# NOTE: The file nginx/vhosts.d/unboundedpress.org handles
# routing for collabora on production only
ports:
- 9980:9980
cron-nextcloud:
image: nextcloud:25
container_name: cron-nextcloud
restart: always
volumes:
- ./nextcloud/html:/var/www/html
entrypoint: /cron.sh
depends_on:
mysql-nextcloud:
condition: service_healthy
#redis:
mysql-nextcloud:
image: mariadb:10
container_name: mysql-nextcloud
command: --transaction-isolation=READ-COMMITTED --binlog-format=ROW
restart: always
environment:
- MYSQL_ROOT_PASSWORD=${PASSWORD}
- MYSQL_PASSWORD=${PASSWORD}
- MYSQL_DATABASE=nextcloud
- MYSQL_USER=${USER}
volumes:
- ./nextcloud/mysql:/var/lib/mysql
healthcheck:
test: ["CMD", "mysqladmin" ,"ping", "-h", "localhost"]
interval: 15s
start_period: 20s
volumes:
nginx:
#nextcloud:
acme:
portfolio:
portfolio-nuxt: