forked from Curt-Park/mnist-fastapi-celery-triton
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Makefile
67 lines (51 loc) · 1.57 KB
/
Makefile
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
PYTHON=3.9
N_PROC=8
CONDA_CH=defaults conda-forge pytorch
BASENAME=$(shell basename $(CURDIR))
NVCC_USE=$(notdir $(shell which nvcc 2> NULL))
# setup
env:
conda create -n $(BASENAME) python=$(PYTHON)
setup:
conda install -y --file requirements.txt $(addprefix -c ,$(CONDA_CH))
pip install -r requirements-pip.txt # separated for M1 chips
ifeq ($(NVCC_USE),nvcc)
conda install -y --file requirements-gpu.txt $(addprefix -c ,$(CONDA_CH))
endif
broker:
redis-server --protected-mode no
# services
worker:
# auto-restart for script modifications
PYTHONPATH=src watchmedo auto-restart \
--directory=src/worker \
--pattern=*.py \
--recursive -- \
celery -A worker.celery worker -P processes -c $(N_PROC) -l INFO
triton:
docker run --gpus 1 --ipc host --rm -p 9000:8000 -p 9001:8001 -p 9002:8002 \
-v $(PWD)/model_repository:/models nvcr.io/nvidia/tritonserver:22.02-py3 \
tritonserver --model-repository=/models
api:
PYTHONPATH=src uvicorn api.server:app --reload --host 0.0.0.0 --port 8000
dashboard:
sh -c "./wait_for_workers.sh"
PYTHONPATH=src celery -A worker.celery flower --port=5555
# load tests
load:
locust -f test/ltest/locustfile.py MnistPredictionUser
load-triton:
locust -f test/ltest/locustfile.py MnistPredictionTritonUser
# for developers
setup-dev:
conda install --file requirements-dev.txt $(addprefix -c ,$(CONDA_CH))
pre-commit install
format:
black .
isort .
lint:
pytest src --flake8 --pylint --mypy
utest:
PYTHONPATH=src pytest test/utest --cov=src --cov-report=html --cov-report=term --cov-config=setup.cfg
cov:
open htmlcov/index.html