From 5de53ea012af3659dbc7597e916ebc4dec3cff1d Mon Sep 17 00:00:00 2001 From: Zangwei Zheng Date: Tue, 19 Mar 2024 19:34:50 +0800 Subject: [PATCH] update readme --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 361bba7d..1769d939 100644 --- a/README.md +++ b/README.md @@ -137,14 +137,14 @@ To run inference with our provided weights, first download [T5](https://huggingf torchrun --standalone --nproc_per_node 1 scripts/inference.py configs/opensora/inference/16x256x256.py --ckpt-path ./path/to/your/ckpt.pth --prompt-path ./asserts/texts/t2v_samples.txt # Sample 16x512x512 (20s/sample, 100 time steps) -torchrun --standalone --nproc_per_node 1 scripts/inference.py configs/opensora/inference/16x512x512.py --ckpt-path ./path/to/your/ckpt.pth +torchrun --standalone --nproc_per_node 1 scripts/inference.py configs/opensora/inference/16x512x512.py --ckpt-path ./path/to/your/ckpt.pth --prompt-path ./asserts/texts/t2v_samples.txt # Sample 64x512x512 (40s/sample, 100 time steps) -torchrun --standalone --nproc_per_node 1 scripts/inference.py configs/opensora/inference/64x512x512.py --ckpt-path ./path/to/your/ckpt.pth +torchrun --standalone --nproc_per_node 1 scripts/inference.py configs/opensora/inference/64x512x512.py --ckpt-path ./path/to/your/ckpt.pth --prompt-path ./asserts/texts/t2v_samples.txt # Sample 64x512x512 with sequence parallelism (30s/sample, 100 time steps) # sequence parallelism is enabled automatically when nproc_per_node is larger than 1 -torchrun --standalone --nproc_per_node 2 scripts/inference.py configs/opensora/inference/64x512x512.py --ckpt-path ./path/to/your/ckpt.pth +torchrun --standalone --nproc_per_node 2 scripts/inference.py configs/opensora/inference/64x512x512.py --ckpt-path ./path/to/your/ckpt.pth --prompt-path ./asserts/texts/t2v_samples.txt ``` The speed is tested on H800 GPUs. For inference with other models, see [here](docs/commands.md) for more instructions.