-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathrun_lora_inference.py
More file actions
31 lines (24 loc) · 1.01 KB
/
run_lora_inference.py
File metadata and controls
31 lines (24 loc) · 1.01 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
import torch
from diffusers import StableDiffusionPipeline
# Define paths
model_name = "stabilityai/stable-diffusion-2-1"
lora_weights_path = "./finetune-lora-output/pytorch_lora_weights.safetensors"
output_dir = "./finetune-lora-output"
# Set seed for reproducibility
seed = 1337
torch.manual_seed(seed)
# Load the base model
pipe = StableDiffusionPipeline.from_pretrained(model_name, torch_dtype=torch.float32)
# Load the LoRA weights
pipe.load_lora_weights(lora_weights_path)
# Move the pipeline to the GPU (if available)
pipe = pipe.to("cuda")
# Define the prompt
prompt = "generate a realistic interior room design of a dining room"
# Generate images
generator = torch.Generator("cuda").manual_seed(seed)
for steps in [5, 10, 25, 50, 75, 100]:
with torch.autocast("cuda"):
image = pipe(prompt, num_inference_steps=steps, guidance_scale=7.5, generator=generator).images[0]
image.save(f"{output_dir}/generated_dining_{steps}.png")
print("Image saved to", f"{output_dir}/generated_dining_{steps}.png")