Skip to content

Commit

Permalink
refactor(ml): model downloading (immich-app#3545)
Browse files Browse the repository at this point in the history
* download facial recognition models

* download hf models

* simplified logic

* updated `predict` for facial recognition

* ensure download method is called

* fixed repo_id for clip

* fixed download destination

* use st's own `snapshot_download`

* conditional download

* fixed predict method

* check if loaded

* minor fixes

* updated mypy overrides

* added pytest-mock

* updated tests

* updated lock
  • Loading branch information
mertalev authored Aug 6, 2023
1 parent 2f26a7e commit c73832b
Show file tree
Hide file tree
Showing 10 changed files with 350 additions and 274 deletions.
2 changes: 1 addition & 1 deletion machine-learning/app/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ class Settings(BaseSettings):
min_face_score: float = 0.7
test_full: bool = False

class Config(BaseSettings.Config):
class Config:
env_prefix = "MACHINE_LEARNING_"
case_sensitive = False

Expand Down
88 changes: 1 addition & 87 deletions machine-learning/app/conftest.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
from types import SimpleNamespace
from typing import Any, Iterator, TypeAlias
from typing import Iterator, TypeAlias
from unittest import mock

import numpy as np
Expand All @@ -22,91 +21,6 @@ def cv_image(pil_image: Image.Image) -> ndarray:
return np.asarray(pil_image)[:, :, ::-1] # PIL uses RGB while cv2 uses BGR


@pytest.fixture
def mock_classifier_pipeline() -> Iterator[mock.Mock]:
with mock.patch("app.models.image_classification.pipeline") as model:
classifier_preds = [
{"label": "that's an image alright", "score": 0.8},
{"label": "well it ends with .jpg", "score": 0.1},
{"label": "idk, im just seeing bytes", "score": 0.05},
{"label": "not sure", "score": 0.04},
{"label": "probably a virus", "score": 0.01},
]

def forward(
inputs: Image.Image | list[Image.Image], **kwargs: Any
) -> list[dict[str, Any]] | list[list[dict[str, Any]]]:
if isinstance(inputs, list) and not all([isinstance(img, Image.Image) for img in inputs]):
raise TypeError
elif not isinstance(inputs, Image.Image):
raise TypeError

if isinstance(inputs, list):
return [classifier_preds] * len(inputs)

return classifier_preds

model.return_value = forward
yield model


@pytest.fixture
def mock_st() -> Iterator[mock.Mock]:
with mock.patch("app.models.clip.SentenceTransformer") as model:
embedding = np.random.rand(512).astype(np.float32)

def encode(inputs: Image.Image | list[Image.Image], **kwargs: Any) -> ndarray | list[ndarray]:
# mypy complains unless isinstance(inputs, list) is used explicitly
img_batch = isinstance(inputs, list) and all([isinstance(inst, Image.Image) for inst in inputs])
text_batch = isinstance(inputs, list) and all([isinstance(inst, str) for inst in inputs])
if isinstance(inputs, list) and not any([img_batch, text_batch]):
raise TypeError

if isinstance(inputs, list):
return np.stack([embedding] * len(inputs))

return embedding

mocked = mock.Mock()
mocked.encode = encode
model.return_value = mocked
yield model


@pytest.fixture
def mock_faceanalysis() -> Iterator[mock.Mock]:
with mock.patch("app.models.facial_recognition.FaceAnalysis") as model:
face_preds = [
SimpleNamespace( # this is so these fields can be accessed through dot notation
**{
"bbox": np.random.rand(4).astype(np.float32),
"kps": np.random.rand(5, 2).astype(np.float32),
"det_score": np.array([0.67]).astype(np.float32),
"normed_embedding": np.random.rand(512).astype(np.float32),
}
),
SimpleNamespace(
**{
"bbox": np.random.rand(4).astype(np.float32),
"kps": np.random.rand(5, 2).astype(np.float32),
"det_score": np.array([0.4]).astype(np.float32),
"normed_embedding": np.random.rand(512).astype(np.float32),
}
),
]

def get(image: np.ndarray[int, np.dtype[np.float32]], **kwargs: Any) -> list[SimpleNamespace]:
if not isinstance(image, np.ndarray):
raise TypeError

return face_preds

mocked = mock.Mock()
mocked.get = get
model.return_value = mocked
yield model


@pytest.fixture
def mock_get_model() -> Iterator[mock.Mock]:
with mock.patch("app.models.cache.InferenceModel.from_model_type", autospec=True) as mocked:
Expand Down
6 changes: 1 addition & 5 deletions machine-learning/app/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@
from PIL import Image

from .config import settings
from .models.base import InferenceModel
from .models.cache import ModelCache
from .schemas import (
EmbeddingResponse,
Expand Down Expand Up @@ -38,10 +37,7 @@ async def load_models() -> None:

# Get all models
for model_name, model_type in models:
if settings.eager_startup:
await app.state.model_cache.get(model_name, model_type)
else:
InferenceModel.from_model_type(model_type, model_name)
await app.state.model_cache.get(model_name, model_type, eager=settings.eager_startup)


@app.on_event("startup")
Expand Down
45 changes: 37 additions & 8 deletions machine-learning/app/models/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,22 +14,43 @@
class InferenceModel(ABC):
_model_type: ModelType

def __init__(self, model_name: str, cache_dir: Path | str | None = None, **model_kwargs: Any) -> None:
def __init__(
self, model_name: str, cache_dir: Path | str | None = None, eager: bool = True, **model_kwargs: Any
) -> None:
self.model_name = model_name
self._loaded = False
self._cache_dir = Path(cache_dir) if cache_dir is not None else get_cache_dir(model_name, self.model_type)

loader = self.load if eager else self.download
try:
self.load(**model_kwargs)
loader(**model_kwargs)
except (OSError, InvalidProtobuf):
self.clear_cache()
self.load(**model_kwargs)
loader(**model_kwargs)

def download(self, **model_kwargs: Any) -> None:
if not self.cached:
self._download(**model_kwargs)

@abstractmethod
def load(self, **model_kwargs: Any) -> None:
self.download(**model_kwargs)
self._load(**model_kwargs)
self._loaded = True

def predict(self, inputs: Any) -> Any:
if not self._loaded:
self.load()
return self._predict(inputs)

@abstractmethod
def _predict(self, inputs: Any) -> Any:
...

@abstractmethod
def predict(self, inputs: Any) -> Any:
def _download(self, **model_kwargs: Any) -> None:
...

@abstractmethod
def _load(self, **model_kwargs: Any) -> None:
...

@property
Expand All @@ -44,6 +65,10 @@ def cache_dir(self) -> Path:
def cache_dir(self, cache_dir: Path) -> None:
self._cache_dir = cache_dir

@property
def cached(self) -> bool:
return self.cache_dir.exists() and any(self.cache_dir.iterdir())

@classmethod
def from_model_type(cls, model_type: ModelType, model_name: str, **model_kwargs: Any) -> InferenceModel:
subclasses = {subclass._model_type: subclass for subclass in cls.__subclasses__()}
Expand All @@ -55,7 +80,11 @@ def from_model_type(cls, model_type: ModelType, model_name: str, **model_kwargs:
def clear_cache(self) -> None:
if not self.cache_dir.exists():
return
elif not rmtree.avoids_symlink_attacks:
if not rmtree.avoids_symlink_attacks:
raise RuntimeError("Attempted to clear cache, but rmtree is not safe on this platform.")

rmtree(self.cache_dir)
if self.cache_dir.is_dir():
rmtree(self.cache_dir)
else:
self.cache_dir.unlink()
self.cache_dir.mkdir(parents=True, exist_ok=True)
15 changes: 12 additions & 3 deletions machine-learning/app/models/clip.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
from pathlib import Path
from typing import Any

from PIL.Image import Image
from sentence_transformers import SentenceTransformer
from sentence_transformers.util import snapshot_download

from ..schemas import ModelType
from .base import InferenceModel
Expand All @@ -11,12 +11,21 @@
class CLIPSTEncoder(InferenceModel):
_model_type = ModelType.CLIP

def load(self, **model_kwargs: Any) -> None:
def _download(self, **model_kwargs: Any) -> None:
repo_id = self.model_name if "/" in self.model_name else f"sentence-transformers/{self.model_name}"
snapshot_download(
cache_dir=self.cache_dir,
repo_id=repo_id,
library_name="sentence-transformers",
ignore_files=["flax_model.msgpack", "rust_model.ot", "tf_model.h5"],
)

def _load(self, **model_kwargs: Any) -> None:
self.model = SentenceTransformer(
self.model_name,
cache_folder=self.cache_dir.as_posix(),
**model_kwargs,
)

def predict(self, image_or_text: Image | str) -> list[float]:
def _predict(self, image_or_text: Image | str) -> list[float]:
return self.model.encode(image_or_text).tolist()
73 changes: 50 additions & 23 deletions machine-learning/app/models/facial_recognition.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,12 @@
import zipfile
from pathlib import Path
from typing import Any

import cv2
from insightface.app import FaceAnalysis
import numpy as np
from insightface.model_zoo import ArcFaceONNX, RetinaFace
from insightface.utils.face_align import norm_crop
from insightface.utils.storage import BASE_REPO_URL, download_file

from ..config import settings
from ..schemas import ModelType
Expand All @@ -22,39 +26,62 @@ def __init__(
self.min_score = min_score
super().__init__(model_name, cache_dir, **model_kwargs)

def load(self, **model_kwargs: Any) -> None:
self.model = FaceAnalysis(
name=self.model_name,
root=self.cache_dir.as_posix(),
allowed_modules=["detection", "recognition"],
**model_kwargs,
)
self.model.prepare(
ctx_id=0,
def _download(self, **model_kwargs: Any) -> None:
zip_file = self.cache_dir / f"{self.model_name}.zip"
download_file(f"{BASE_REPO_URL}/{self.model_name}.zip", zip_file)
with zipfile.ZipFile(zip_file, "r") as zip:
members = zip.namelist()
det_file = next(model for model in members if model.startswith("det_"))
rec_file = next(model for model in members if model.startswith("w600k_"))
zip.extractall(self.cache_dir, members=[det_file, rec_file])
zip_file.unlink()

def _load(self, **model_kwargs: Any) -> None:
try:
det_file = next(self.cache_dir.glob("det_*.onnx"))
rec_file = next(self.cache_dir.glob("w600k_*.onnx"))
except StopIteration:
raise FileNotFoundError("Facial recognition models not found in cache directory")
self.det_model = RetinaFace(det_file.as_posix())
self.rec_model = ArcFaceONNX(rec_file.as_posix())

self.det_model.prepare(
ctx_id=-1,
det_thresh=self.min_score,
det_size=(640, 640),
input_size=(640, 640),
)
self.rec_model.prepare(ctx_id=-1)

def predict(self, image: cv2.Mat) -> list[dict[str, Any]]:
height, width, _ = image.shape
results = []
faces = self.model.get(image)
def _predict(self, image: cv2.Mat) -> list[dict[str, Any]]:
bboxes, kpss = self.det_model.detect(image)
if bboxes.size == 0:
return []
assert isinstance(kpss, np.ndarray)

for face in faces:
x1, y1, x2, y2 = face.bbox
scores = bboxes[:, 4].tolist()
bboxes = bboxes[:, :4].round().tolist()

results = []
height, width, _ = image.shape
for (x1, y1, x2, y2), score, kps in zip(bboxes, scores, kpss):
cropped_img = norm_crop(image, kps)
embedding = self.rec_model.get_feat(cropped_img)[0].tolist()
results.append(
{
"imageWidth": width,
"imageHeight": height,
"boundingBox": {
"x1": round(x1),
"y1": round(y1),
"x2": round(x2),
"y2": round(y2),
"x1": x1,
"y1": y1,
"x2": x2,
"y2": y2,
},
"score": face.det_score.item(),
"embedding": face.normed_embedding.tolist(),
"score": score,
"embedding": embedding,
}
)
return results

@property
def cached(self) -> bool:
return self.cache_dir.is_dir() and any(self.cache_dir.glob("*.onnx"))
10 changes: 8 additions & 2 deletions machine-learning/app/models/image_classification.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from pathlib import Path
from typing import Any

from huggingface_hub import snapshot_download
from PIL.Image import Image
from transformers.pipelines import pipeline

Expand All @@ -22,14 +23,19 @@ def __init__(
self.min_score = min_score
super().__init__(model_name, cache_dir, **model_kwargs)

def load(self, **model_kwargs: Any) -> None:
def _download(self, **model_kwargs: Any) -> None:
snapshot_download(
cache_dir=self.cache_dir, repo_id=self.model_name, allow_patterns=["*.bin", "*.json", "*.txt"]
)

def _load(self, **model_kwargs: Any) -> None:
self.model = pipeline(
self.model_type.value,
self.model_name,
model_kwargs={"cache_dir": self.cache_dir, **model_kwargs},
)

def predict(self, image: Image) -> list[str]:
def _predict(self, image: Image) -> list[str]:
predictions: list[dict[str, Any]] = self.model(image) # type: ignore
tags = [tag for pred in predictions for tag in pred["label"].split(", ") if pred["score"] >= self.min_score]

Expand Down
Loading

0 comments on commit c73832b

Please sign in to comment.