The Diffusion template provides high-performance image generation using Stable Diffusion and other diffusion models. Perfect for text-to-image, image-to-image, and inpainting applications.
What is Stable Diffusion?
Stable Diffusion is a powerful diffusion model that generates high-quality images from text prompts:
🎨 Text-to-image generation from prompts
🖼️ Image-to-image transformation and editing
🎭 Inpainting to fill missing parts of images
🎯 ControlNet for guided generation
⚡ Optimized inference with multiple acceleration techniques
Quick Start
from chutes.chute import NodeSelector
from chutes.chute.template.diffusion import build_diffusion_chute
chute = build_diffusion_chute(
username="myuser",
model_name="stabilityai/stable-diffusion-xl-base-1.0",
revision="main",
node_selector=NodeSelector(
gpu_count=1,
min_vram_gb_per_gpu=12
)
)
This creates a complete diffusion deployment with:
# Optimize for lower VRAM usage
memory_efficient_chute = build_diffusion_chute(
username="myuser",
model_name="runwayml/stable-diffusion-v1-5",
enable_cpu_offload=True, # Offload to CPU when not in use
enable_vae_slicing=True, # Slice VAE for memory efficiency
enable_attention_slicing=True, # Slice attention layers
height=512,
width=512
)
Testing Your Diffusion Chute
Python Client
import requests
import base64
from PIL import Image
import io
defgenerate_image(prompt, negative_prompt="", width=1024, height=1024):
"""Generate image from text prompt."""
response = requests.post(
"https://myuser-diffusion-chute.chutes.ai/generate",
json={
"prompt": prompt,
"negative_prompt": negative_prompt,
"width": width,
"height": height,
"num_inference_steps": 30,
"guidance_scale": 7.5,
"seed": -1# Random seed
}
)
if response.status_code == 200:
result = response.json()
# Decode base64 image
image_data = base64.b64decode(result["images"][0])
image = Image.open(io.BytesIO(image_data))
return image
else:
raise Exception(f"Generation failed: {response.text}")
# Test image generation
image = generate_image(
prompt="A serene mountain lake at sunset with purple clouds",
negative_prompt="blurry, low quality, distorted, text",
width=1024,
height=768
)
image.save("generated_image.png")
print("Image saved as generated_image.png")
Batch Generation
import asyncio
import aiohttp
import json
asyncdefbatch_generate_images(prompts):
"""Generate multiple images concurrently."""asyncdefgenerate_single(session, prompt):
asyncwith session.post(
"https://myuser-diffusion-chute.chutes.ai/generate",
json={
"prompt": prompt,
"num_inference_steps": 25,
"guidance_scale": 7.0,
"width": 512,
"height": 512
}
) as response:
returnawait response.json()
asyncwith aiohttp.ClientSession() as session:
tasks = [generate_single(session, prompt) for prompt in prompts]
results = await asyncio.gather(*tasks)
return results
# Test batch generation
prompts = [
"A majestic eagle soaring over mountains",
"A cyberpunk cityscape at night with neon lights",
"A peaceful garden with cherry blossoms",
"A futuristic robot in a sci-fi laboratory"
]
results = asyncio.run(batch_generate_images(prompts))
for i, result inenumerate(results):
print(f"Generated image {i+1} successfully")
Image-to-Image Testing
import requests
from PIL import Image
defimg2img_transform(input_image_path, prompt, strength=0.7):
"""Transform an existing image based on prompt."""withopen(input_image_path, 'rb') as f:
files = {'image': f}
data = {
'prompt': prompt,
'strength': strength,
'guidance_scale': 7.5,
'num_inference_steps': 30
}
response = requests.post(
"https://myuser-diffusion-chute.chutes.ai/img2img",
files=files,
data=data
)
if response.status_code == 200:
result = response.json()
# Process result similar to text-to-imagereturn result
else:
raise Exception(f"Transform failed: {response.text}")
# Test image transformation
result = img2img_transform(
"input_photo.jpg",
"Transform this into a watercolor painting",
strength=0.8
)
Generation Parameters Guide
Prompt Engineering
# Effective prompt structuredefcreate_effective_prompt(subject, style, quality_modifiers=""):
"""Create well-structured prompts."""
base_prompt = f"{subject}, {style}"if quality_modifiers:
base_prompt += f", {quality_modifiers}"# Add quality enhancers
quality_terms = "highly detailed, sharp focus, professional photography"returnf"{base_prompt}, {quality_terms}"# Examples
portrait_prompt = create_effective_prompt(
subject="Portrait of a young woman with curly hair",
style="Renaissance painting style",
quality_modifiers="oil painting, classical lighting"
)
landscape_prompt = create_effective_prompt(
subject="Mountain landscape with a lake",
style="digital art",
quality_modifiers="golden hour lighting, cinematic composition"
)
Set safety_checker=False if appropriate for your use case
Use different negative prompts
Best Practices
1. Prompt Engineering
# Good prompt structure
good_prompt = "Portrait of a person, photorealistic, highly detailed, professional photography, sharp focus, beautiful lighting"# Include style modifiers
style_prompt = "Landscape painting, oil on canvas, Bob Ross style, happy little trees, peaceful, serene"# Use negative prompts effectively
negative_prompt = "blurry, low quality, distorted, ugly, bad anatomy, extra limbs, text, watermark"