Skip to content

Commit

Permalink
replicate
Browse files Browse the repository at this point in the history
  • Loading branch information
chenxwh committed Oct 18, 2023
1 parent 673b3c2 commit ef9354f
Show file tree
Hide file tree
Showing 3 changed files with 98 additions and 0 deletions.
2 changes: 2 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@ Ours Hugging Face Demo and Model are released ! Latent Consistency Models are su

Hugging Face Demo: [![Hugging Face Spaces](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue)](https://huggingface.co/spaces/SimianLuo/Latent_Consistency_Model)

Replicate Demo: [![Replicate](https://replicate.com/cjwbw/latent-consistency-model/badge)](https://replicate.com/cjwbw/latent-consistency-modeln)

LCM Model Download: [LCM_Dreamshaper_v7](https://huggingface.co/SimianLuo/LCM_Dreamshaper_v7)

<p align="center">
Expand Down
18 changes: 18 additions & 0 deletions cog.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
# Configuration for Cog ⚙️
# Reference: https://github.com/replicate/cog/blob/main/docs/yaml.md

build:
gpu: true
system_packages:
- "libgl1-mesa-glx"
- "libglib2.0-0"
python_version: "3.11"
python_packages:
- "accelerate==0.23.0"
- "torch==2.0.1"
- "torchvision==0.15.2"
- "diffusers==0.21.4"
- "Pillow==10.1.0"
- "transformers==4.34.1"
- "opencv-python==4.8.1.78"
predict: "predict.py:Predictor"
78 changes: 78 additions & 0 deletions predict.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
# Prediction interface for Cog ⚙️
# https://github.com/replicate/cog/blob/main/docs/python.md

import os
import torch
from diffusers import DiffusionPipeline
from cog import BasePredictor, Input, Path


class Predictor(BasePredictor):
def setup(self) -> None:
"""Load the model into memory to make running multiple predictions efficient"""
self.pipe = DiffusionPipeline.from_pretrained(
"SimianLuo/LCM_Dreamshaper_v7",
custom_pipeline="latent_consistency_txt2img",
custom_revision="main",
cache_dir="model_cache",
local_files_only=True,
)
self.pipe.to(torch_device="cuda", torch_dtype=torch.float32)

def predict(
self,
prompt: str = Input(
description="Input prompt",
default="Self-portrait oil painting, a beautiful cyborg with golden hair, 8k",
),
width: int = Input(
description="Width of output image. Lower the setting if out of memory.",
default=768,
),
height: int = Input(
description="Height of output image. Lower the setting if out of memory.",
default=768,
),
num_images: int = Input(
description="Number of images to output.",
ge=1,
le=4,
default=1,
),
num_inference_steps: int = Input(
description="Number of denoising steps. Can be set to 1~50 steps. LCM support fast inference even <= 4 steps. Recommend: 1~8 steps.",
ge=1,
le=50,
default=8,
),
guidance_scale: float = Input(
description="Scale for classifier-free guidance", ge=1, le=20, default=8.0
),
seed: int = Input(
description="Random seed. Leave blank to randomize the seed", default=None
),
) -> list[Path]:
"""Run a single prediction on the model"""
if seed is None:
seed = int.from_bytes(os.urandom(2), "big")
print(f"Using seed: {seed}")
torch.manual_seed(seed)

result = self.pipe(
prompt=prompt,
width=width,
height=height,
guidance_scale=guidance_scale,
num_inference_steps=num_inference_steps,
num_images_per_prompt=num_images,
lcm_origin_steps=50,
output_type="pil",
).images

output_paths = []
for i, sample in enumerate(result):
output_path = f"/tmp/out-{i}.png"
sample.save(output_path)
output_paths.append(Path(output_path))

return output_paths

0 comments on commit ef9354f

Please sign in to comment.