Skip to content

Commit

Permalink
Feat/dockerfile (chatchat-space#4271)
Browse files Browse the repository at this point in the history
  • Loading branch information
yuehua-s authored Jun 21, 2024
1 parent 8937877 commit d494684
Show file tree
Hide file tree
Showing 4 changed files with 60 additions and 22 deletions.
46 changes: 28 additions & 18 deletions docker/Dockerfile
Original file line number Diff line number Diff line change
@@ -1,19 +1,29 @@
# Base Image
FROM python:3.11

RUN apt-get update
RUN apt-get install -y libgl1-mesa-glx

RUN mkdir /Langchain-Chatchat
COPY requirements.txt /Langchain-Chatchat
COPY requirements_api.txt /Langchain-Chatchat
COPY requirements_webui.txt /Langchain-Chatchat

WORKDIR /Langchain-Chatchat
RUN pip install --upgrade pip
RUN pip install -r requirements.txt
RUN pip install -r requirements_api.txt
RUN pip install -r requirements_webui.txt

EXPOSE 8501
EXPOSE 7861
EXPOSE 20000
# Labels
LABEL maintainer=chatchat
# Environment Variables
ENV HOME=/usr/local/lib/python3.11/site-packages/chatchat
# Init Environment
RUN ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && \
echo "Asia/Shanghai" > /etc/timezone && \
apt-get update -y && \
apt-get install -y --no-install-recommends libgl1 libglib2.0-0 && \
apt-get clean && \
pip install openpyxl networkx faiss-cpu jq unstructured[pdf] opencv-python rapidocr-onnxruntime PyMuPDF rank_bm25 \
youtube_search && \
pip install streamlit-chatbox==1.1.12.post4 && \
rm -rf /var/lib/apt/lists/*
# Install Chatchat
RUN pip install langchain-chatchat -U
# Install ModelProvider
RUN pip install xinference-client
# Make Custom Settings
RUN chatchat-config server --default_bind_host=0.0.0.0 && \
chatchat-config model --default_llm_model qwen2-instruct
# Copy Data
COPY /libs/chatchat-server/chatchat/configs/model_providers.yaml $HOME/configs/model_providers.yaml
ADD /docker/data.tar.gz $HOME/
WORKDIR $HOME
EXPOSE 7861 8501
ENTRYPOINT ["chatchat", "-a"]
Binary file added docker/data.tar.gz
Binary file not shown.
28 changes: 28 additions & 0 deletions docker/docker-compose.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
version: '3.9'
services:
xinference:
image: xprobe/xinference:v0.12.1
command: xinference-local -H 0.0.0.0
# ports:
# - "9997:9997"
network_mode: "host"
volumes:
- ~/xinference:/root/.xinference # 将本地目录(/root/xinference/.xinference)挂载到容器目录(/root/.xinference)中, 详细见:https://inference.readthedocs.io/zh-cn/latest/getting_started/using_docker_image.html
# - ~/xinference/cache/huggingface:/root/.cache/huggingface
# - ~/xinference/cache/modelscope:/root/.cache/modelscope
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]
runtime: nvidia
chatchat:
image: chatimage/chatchat:0.3.0-0622
# ports:
# - "7861:7861"
# - "8501:8501"
network_mode: "host"
# volumes:
# - /root/chatchat/data:/usr/local/lib/python3.11/site-packages/chatchat/data
8 changes: 4 additions & 4 deletions libs/chatchat-server/chatchat/configs/model_providers.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -20,16 +20,16 @@

xinference:
model_credential:
- model: 'glm-4'
- model: 'glm4-chat'
model_type: 'llm'
model_credentials:
server_url: 'http://127.0.0.1:9997/'
model_uid: 'glm-4'
- model: 'qwen1.5-chat'
model_uid: 'glm4-chat'
- model: 'qwen2-instruct'
model_type: 'llm'
model_credentials:
server_url: 'http://127.0.0.1:9997/'
model_uid: 'qwen1.5-chat'
model_uid: 'qwen2-instruct'
- model: 'bge-large-zh-v1.5'
model_type: 'text-embedding'
model_credentials:
Expand Down

0 comments on commit d494684

Please sign in to comment.