Deepfloyd if
In [ ]:
Copied!
!git clone https://github.com/soumik12345/wandb-addons
!pip install ./wandb-addons[huggingface]
!git clone https://github.com/soumik12345/wandb-addons
!pip install ./wandb-addons[huggingface]
In [ ]:
Copied!
import gc
from functools import partial
import torch
from diffusers import IFPipeline, IFSuperResolutionPipeline, StableDiffusionUpscalePipeline
from wandb_addons.diffusers import IFCallback
import gc
from functools import partial
import torch
from diffusers import IFPipeline, IFSuperResolutionPipeline, StableDiffusionUpscalePipeline
from wandb_addons.diffusers import IFCallback
In [ ]:
Copied!
pipeline_1 = IFPipeline.from_pretrained(
"DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.float16
)
pipeline_1.enable_model_cpu_offload()
pipeline_1 = IFPipeline.from_pretrained(
"DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.float16
)
pipeline_1.enable_model_cpu_offload()
In [ ]:
Copied!
prompt = 'a photo of a smiling bee wearing a yellow hoodie and blue sunglasses standing in front of the eiffel tower holding a sign that says "Weights and Biases"'
prompt_embeds, negative_embeds = pipeline_1.encode_prompt(prompt)
num_images_per_prompt = 2
num_inference_steps = 100
configs = {"guidance_scale": 7.0}
callback = IFCallback(
pipeline=pipeline_1,
prompt=prompt,
wandb_project="diffusers-2",
wandb_entity="geekyrakshit",
weave_mode=True,
num_inference_steps=num_inference_steps,
num_images_per_prompt=num_images_per_prompt,
configs=configs
)
image = pipeline_1(
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_embeds,
output_type="pt",
callback=partial(callback, end_experiment=False),
**configs,
).images
prompt = 'a photo of a smiling bee wearing a yellow hoodie and blue sunglasses standing in front of the eiffel tower holding a sign that says "Weights and Biases"'
prompt_embeds, negative_embeds = pipeline_1.encode_prompt(prompt)
num_images_per_prompt = 2
num_inference_steps = 100
configs = {"guidance_scale": 7.0}
callback = IFCallback(
pipeline=pipeline_1,
prompt=prompt,
wandb_project="diffusers-2",
wandb_entity="geekyrakshit",
weave_mode=True,
num_inference_steps=num_inference_steps,
num_images_per_prompt=num_images_per_prompt,
configs=configs
)
image = pipeline_1(
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_embeds,
output_type="pt",
callback=partial(callback, end_experiment=False),
**configs,
).images
In [ ]:
Copied!
pipeline_2 = IFSuperResolutionPipeline.from_pretrained(
"DeepFloyd/IF-II-L-v1.0", text_encoder=None, variant="fp16", torch_dtype=torch.float16
)
pipeline_2.enable_model_cpu_offload()
pipeline_2 = IFSuperResolutionPipeline.from_pretrained(
"DeepFloyd/IF-II-L-v1.0", text_encoder=None, variant="fp16", torch_dtype=torch.float16
)
pipeline_2.enable_model_cpu_offload()
In [ ]:
Copied!
num_inference_steps = 50
callback.add_stage(pipeline_2, num_inference_steps=num_inference_steps)
image = pipeline_2(
image=image,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_embeds,
num_inference_steps=num_inference_steps,
output_type="pt",
callback=partial(callback, end_experiment=False),
).images
num_inference_steps = 50
callback.add_stage(pipeline_2, num_inference_steps=num_inference_steps)
image = pipeline_2(
image=image,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_embeds,
num_inference_steps=num_inference_steps,
output_type="pt",
callback=partial(callback, end_experiment=False),
).images
In [ ]:
Copied!
del pipeline_2
gc.collect()
torch.cuda.empty_cache()
del pipeline_2
gc.collect()
torch.cuda.empty_cache()
In [ ]:
Copied!
safety_modules = {
"feature_extractor": pipeline_1.feature_extractor,
"safety_checker": pipeline_1.safety_checker,
"watermarker": pipeline_1.watermarker,
}
pipeline_3 = StableDiffusionUpscalePipeline.from_pretrained(
"stabilityai/stable-diffusion-x4-upscaler", **safety_modules, torch_dtype=torch.float16
)
pipeline_3.enable_model_cpu_offload()
safety_modules = {
"feature_extractor": pipeline_1.feature_extractor,
"safety_checker": pipeline_1.safety_checker,
"watermarker": pipeline_1.watermarker,
}
pipeline_3 = StableDiffusionUpscalePipeline.from_pretrained(
"stabilityai/stable-diffusion-x4-upscaler", **safety_modules, torch_dtype=torch.float16
)
pipeline_3.enable_model_cpu_offload()
In [ ]:
Copied!
num_inference_steps = 75
callback.add_stage(pipeline_3, num_inference_steps=num_inference_steps, stage_name="Upscale")
image = pipeline_3(
prompt=prompt,
image=image,
noise_level=100,
num_inference_steps=num_inference_steps,
callback=callback,
).images
num_inference_steps = 75
callback.add_stage(pipeline_3, num_inference_steps=num_inference_steps, stage_name="Upscale")
image = pipeline_3(
prompt=prompt,
image=image,
noise_level=100,
num_inference_steps=num_inference_steps,
callback=callback,
).images