Sdxl
In [ ]:
Copied!
!git clone https://github.com/soumik12345/wandb-addons
!pip install ./wandb-addons[huggingface] > install.log
!git clone https://github.com/soumik12345/wandb-addons
!pip install ./wandb-addons[huggingface] > install.log
In [ ]:
Copied!
from functools import partial
import torch
torch.cuda.empty_cache()
from diffusers import StableDiffusionXLPipeline, StableDiffusionXLImg2ImgPipeline
from wandb_addons.diffusers import StableDiffusionXLCallback
from functools import partial
import torch
torch.cuda.empty_cache()
from diffusers import StableDiffusionXLPipeline, StableDiffusionXLImg2ImgPipeline
from wandb_addons.diffusers import StableDiffusionXLCallback
In [ ]:
Copied!
base_pipeline = StableDiffusionXLPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0",
torch_dtype=torch.float16,
variant="fp16",
use_safetensors=True,
)
base_pipeline.enable_model_cpu_offload()
base_pipeline = StableDiffusionXLPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0",
torch_dtype=torch.float16,
variant="fp16",
use_safetensors=True,
)
base_pipeline.enable_model_cpu_offload()
In [ ]:
Copied!
prompt = "seascape by Ray Collins and artgerm, front view of a perfect wave, sunny background, ultra detailed water, 4k resolution"
negative_prompt = "low resolution, low details, blurry, clouds"
num_inference_steps = 50
callback = StableDiffusionXLCallback(
pipeline=base_pipeline,
prompt=prompt,
wandb_project="diffusers-new",
wandb_entity="geekyrakshit",
weave_mode=True,
num_inference_steps=num_inference_steps,
negative_prompt=negative_prompt,
)
image = base_pipeline(
prompt=prompt,
negative_prompt=negative_prompt,
output_type="latent",
num_inference_steps=num_inference_steps,
callback=partial(callback, end_experiment=False)
).images[0]
prompt = "seascape by Ray Collins and artgerm, front view of a perfect wave, sunny background, ultra detailed water, 4k resolution"
negative_prompt = "low resolution, low details, blurry, clouds"
num_inference_steps = 50
callback = StableDiffusionXLCallback(
pipeline=base_pipeline,
prompt=prompt,
wandb_project="diffusers-new",
wandb_entity="geekyrakshit",
weave_mode=True,
num_inference_steps=num_inference_steps,
negative_prompt=negative_prompt,
)
image = base_pipeline(
prompt=prompt,
negative_prompt=negative_prompt,
output_type="latent",
num_inference_steps=num_inference_steps,
callback=partial(callback, end_experiment=False)
).images[0]
In [ ]:
Copied!
refiner_pipeline = StableDiffusionXLImg2ImgPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-refiner-1.0",
text_encoder_2=base_pipeline.text_encoder_2,
vae=base_pipeline.vae,
torch_dtype=torch.float16,
use_safetensors=True,
variant="fp16",
)
refiner_pipeline.enable_model_cpu_offload()
refiner_pipeline = StableDiffusionXLImg2ImgPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-refiner-1.0",
text_encoder_2=base_pipeline.text_encoder_2,
vae=base_pipeline.vae,
torch_dtype=torch.float16,
use_safetensors=True,
variant="fp16",
)
refiner_pipeline.enable_model_cpu_offload()
In [ ]:
Copied!
num_inference_steps = 50
strength = 0.3
callback.add_refiner_stage(
refiner_pipeline, num_inference_steps=num_inference_steps, strength=strength
)
image = refiner_pipeline(
prompt=prompt,
negative_prompt=negative_prompt,
image=image[None, :],
callback=callback
).images[0]
num_inference_steps = 50
strength = 0.3
callback.add_refiner_stage(
refiner_pipeline, num_inference_steps=num_inference_steps, strength=strength
)
image = refiner_pipeline(
prompt=prompt,
negative_prompt=negative_prompt,
image=image[None, :],
callback=callback
).images[0]