@@ -348,7 +348,7 @@ dam = DescribeAnythingModel(
348348)` ,
349349] ;
350350
351- const diffusers_install = "pip install -U diffusers transformers" ;
351+ const diffusers_install = "pip install -U diffusers transformers accelerate " ;
352352
353353const diffusersDefaultPrompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k" ;
354354
@@ -357,19 +357,23 @@ const diffusersImg2ImgDefaultPrompt = "Turn this cat into a dog";
357357const diffusersVideoDefaultPrompt = "A man with short gray hair plays a red electric guitar." ;
358358
359359const diffusers_default = ( model : ModelData ) => [
360- `from diffusers import DiffusionPipeline
360+ `import torch
361+ from diffusers import DiffusionPipeline
361362
362- pipe = DiffusionPipeline.from_pretrained("${ model . id } ")
363+ # switch to "mps" for apple devices
364+ pipe = DiffusionPipeline.from_pretrained("${ model . id } ", dtype=torch.bfloat16, device_map="cuda")
363365
364366prompt = "${ get_prompt_from_diffusers_model ( model ) ?? diffusersDefaultPrompt } "
365367image = pipe(prompt).images[0]` ,
366368] ;
367369
368370const diffusers_image_to_image = ( model : ModelData ) => [
369- `from diffusers import DiffusionPipeline
371+ `import torch
372+ from diffusers import DiffusionPipeline
370373from diffusers.utils import load_image
371374
372- pipe = DiffusionPipeline.from_pretrained("${ model . id } ")
375+ # switch to "mps" for apple devices
376+ pipe = DiffusionPipeline.from_pretrained("${ model . id } ", dtype=torch.bfloat16, device_map="cuda")
373377
374378prompt = "${ get_prompt_from_diffusers_model ( model ) ?? diffusersImg2ImgDefaultPrompt } "
375379input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
@@ -382,7 +386,8 @@ const diffusers_image_to_video = (model: ModelData) => [
382386from diffusers import DiffusionPipeline
383387from diffusers.utils import load_image, export_to_video
384388
385- pipe = DiffusionPipeline.from_pretrained("${ model . id } ", torch_dtype=torch.float16)
389+ # switch to "mps" for apple devices
390+ pipe = DiffusionPipeline.from_pretrained("${ model . id } ", dtype=torch.bfloat16, device_map="cuda")
386391pipe.to("cuda")
387392
388393prompt = "${ get_prompt_from_diffusers_model ( model ) ?? diffusersVideoDefaultPrompt } "
@@ -404,20 +409,24 @@ pipe = StableDiffusionControlNetPipeline.from_pretrained(
404409] ;
405410
406411const diffusers_lora = ( model : ModelData ) => [
407- `from diffusers import DiffusionPipeline
412+ `import torch
413+ from diffusers import DiffusionPipeline
408414
409- pipe = DiffusionPipeline.from_pretrained("${ get_base_diffusers_model ( model ) } ")
415+ # switch to "mps" for apple devices
416+ pipe = DiffusionPipeline.from_pretrained("${ get_base_diffusers_model ( model ) } ", dtype=torch.bfloat16, device_map="cuda")
410417pipe.load_lora_weights("${ model . id } ")
411418
412419prompt = "${ get_prompt_from_diffusers_model ( model ) ?? diffusersDefaultPrompt } "
413420image = pipe(prompt).images[0]` ,
414421] ;
415422
416423const diffusers_lora_image_to_image = ( model : ModelData ) => [
417- `from diffusers import DiffusionPipeline
424+ `import torch
425+ from diffusers import DiffusionPipeline
418426from diffusers.utils import load_image
419427
420- pipe = DiffusionPipeline.from_pretrained("${ get_base_diffusers_model ( model ) } ")
428+ # switch to "mps" for apple devices
429+ pipe = DiffusionPipeline.from_pretrained("${ get_base_diffusers_model ( model ) } ", dtype=torch.bfloat16, device_map="cuda")
421430pipe.load_lora_weights("${ model . id } ")
422431
423432prompt = "${ get_prompt_from_diffusers_model ( model ) ?? diffusersImg2ImgDefaultPrompt } "
@@ -427,10 +436,12 @@ image = pipe(image=input_image, prompt=prompt).images[0]`,
427436] ;
428437
429438const diffusers_lora_text_to_video = ( model : ModelData ) => [
430- `from diffusers import DiffusionPipeline
439+ `import torch
440+ from diffusers import DiffusionPipeline
431441from diffusers.utils import export_to_video
432442
433- pipe = DiffusionPipeline.from_pretrained("${ get_base_diffusers_model ( model ) } ")
443+ # switch to "mps" for apple devices
444+ pipe = DiffusionPipeline.from_pretrained("${ get_base_diffusers_model ( model ) } ", dtype=torch.bfloat16, device_map="cuda")
434445pipe.load_lora_weights("${ model . id } ")
435446
436447prompt = "${ get_prompt_from_diffusers_model ( model ) ?? diffusersVideoDefaultPrompt } "
@@ -440,10 +451,12 @@ export_to_video(output, "output.mp4")`,
440451] ;
441452
442453const diffusers_lora_image_to_video = ( model : ModelData ) => [
443- `from diffusers import DiffusionPipeline
454+ `import torch
455+ from diffusers import DiffusionPipeline
444456from diffusers.utils import load_image, export_to_video
445457
446- pipe = DiffusionPipeline.from_pretrained("${ get_base_diffusers_model ( model ) } ")
458+ # switch to "mps" for apple devices
459+ pipe = DiffusionPipeline.from_pretrained("${ get_base_diffusers_model ( model ) } ", dtype=torch.bfloat16, device_map="cuda")
447460pipe.load_lora_weights("${ model . id } ")
448461
449462prompt = "${ get_prompt_from_diffusers_model ( model ) ?? diffusersVideoDefaultPrompt } "
@@ -454,9 +467,11 @@ export_to_video(output, "output.mp4")`,
454467] ;
455468
456469const diffusers_textual_inversion = ( model : ModelData ) => [
457- `from diffusers import DiffusionPipeline
470+ `import torch
471+ from diffusers import DiffusionPipeline
458472
459- pipe = DiffusionPipeline.from_pretrained("${ get_base_diffusers_model ( model ) } ")
473+ # switch to "mps" for apple devices
474+ pipe = DiffusionPipeline.from_pretrained("${ get_base_diffusers_model ( model ) } ", dtype=torch.bfloat16, device_map="cuda")
460475pipe.load_textual_inversion("${ model . id } ")` ,
461476] ;
462477
@@ -468,7 +483,8 @@ from diffusers.utils import load_image
468483image = load_image("https://huggingface.co/datasets/diffusers/diffusers-images-docs/resolve/main/cup.png")
469484mask = load_image("https://huggingface.co/datasets/diffusers/diffusers-images-docs/resolve/main/cup_mask.png")
470485
471- pipe = FluxFillPipeline.from_pretrained("${ model . id } ", torch_dtype=torch.bfloat16).to("cuda")
486+ # switch to "mps" for apple devices
487+ pipe = FluxFillPipeline.from_pretrained("${ model . id } ", dtype=torch.bfloat16, device_map="cuda")
472488image = pipe(
473489 prompt="a white paper cup",
474490 image=image,
@@ -488,7 +504,8 @@ const diffusers_inpainting = (model: ModelData) => [
488504from diffusers import AutoPipelineForInpainting
489505from diffusers.utils import load_image
490506
491- pipe = AutoPipelineForInpainting.from_pretrained("${ model . id } ", torch_dtype=torch.float16, variant="fp16").to("cuda")
507+ # switch to "mps" for apple devices
508+ pipe = AutoPipelineForInpainting.from_pretrained("${ model . id } ", dtype=torch.float16, variant="fp16", device_map="cuda")
492509
493510img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
494511mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
@@ -1615,7 +1632,7 @@ export const transformers = (model: ModelData): string[] => {
16151632 autoSnippet . push (
16161633 "# Load model directly" ,
16171634 `from transformers import ${ info . auto_model } ` ,
1618- `model = ${ info . auto_model } .from_pretrained("${ model . id } "` + remote_code_snippet + ', torch_dtype ="auto")'
1635+ `model = ${ info . auto_model } .from_pretrained("${ model . id } "` + remote_code_snippet + ', dtype ="auto")'
16191636 ) ;
16201637 }
16211638
0 commit comments