-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathcompose.yml
56 lines (55 loc) · 2.06 KB
/
compose.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
services:
secondbrain:
image: ghcr.io/mcasperson/secondbrain
restart: always
ports:
- "8080:8080"
- "8181:8181"
environment:
# These values must be updated to use the details of your own Slack App
# if you wish to use the Oauth login flow: https://api.slack.com/apps
SB_SLACK_CLIENTID: 1234567890.1234567890
SB_SLACK_CLIENTSECRET: 123456789
# You should change this to a random value
SB_ENCRYPTION_PASSWORD: 123456789
# This value refers to the sibling container
SB_OLLAMA_URL: http://ollama:11434
# Set this to true to enable debug logging
SB_TOOLS_DEBUG: false
# Define the model to use by SecondBrain.
# Try "llama3.1" for a larger model, or "llama3.2" for a smaller model.
# Remember, each model needs to be pulled with the command below.
# docker exec secondbrain-ollama-1 ollama pull <model name>
# for example
# docker exec secondbrain-ollama-1 ollama pull llama3.2
SB_OLLAMA_MODEL: llama3.2
# The model used to select a tool should always been llama3.1 or llama3.2
# as these models have been trained to select tools. The model used here
# must also be pulled:
# docker exec secondbrain-ollama-1 ollama pull llama3.1
SB_OLLAMA_TOOLMODEL: llama3.1
# This aids in debugging
SB_EXCEPTIONS_PRINTSTACKTRACE: true
pull_policy: always
volumes:
- cache:/cache
ollama:
image: ollama/ollama
restart: always
volumes:
- ollama:/root/.ollama
# Uncomment these lines to enable GPU support
# You'll also need to install the NVIDIA Container Toolkit on your host
# https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html
# Test that the GPU is available from Docker by running
# docker run --rm -it --gpus=all nvcr.io/nvidia/k8s/cuda-sample:nbody nbody -gpu -benchmark
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities: [ gpu ]
volumes:
ollama:
cache: