Instructions to use NoMoreCopyrightOrg/flux-dev with libraries, inference providers, notebooks, and local apps. Follow these links to get started.
- Libraries
- Diffusers
How to use NoMoreCopyrightOrg/flux-dev with Diffusers:
pip install -U diffusers transformers accelerate
import torch from diffusers import DiffusionPipeline # switch to "mps" for apple devices pipe = DiffusionPipeline.from_pretrained("NoMoreCopyrightOrg/flux-dev", dtype=torch.bfloat16, device_map="cuda") prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k" image = pipe(prompt).images[0] - Notebooks
- Google Colab
- Kaggle
- Local Apps
- Draw Things
- DiffusionBee
| # https://github.com/sayakpaul/diffusers-torchao | |
| import os | |
| from typing import Any, Dict | |
| from diffusers import FluxPipeline, FluxTransformer2DModel, AutoencoderKL, TorchAoConfig | |
| from PIL import Image | |
| import torch | |
| from torchao.quantization import quantize_, autoquant, int8_dynamic_activation_int8_weight, int8_dynamic_activation_int4_weight | |
| from huggingface_hub import hf_hub_download | |
| IS_COMPILE = False | |
| IS_TURBO = False | |
| IS_4BIT = True | |
| if IS_COMPILE: | |
| import torch._dynamo | |
| torch._dynamo.config.suppress_errors = True | |
| from huggingface_inference_toolkit.logging import logger | |
| def load_pipeline_stable(repo_id: str, dtype: torch.dtype) -> Any: | |
| quantization_config = TorchAoConfig("int4dq" if IS_4BIT else "int8dq") | |
| vae = AutoencoderKL.from_pretrained(repo_id, subfolder="vae", torch_dtype=dtype) | |
| pipe = FluxPipeline.from_pretrained(repo_id, vae=vae, torch_dtype=dtype, quantization_config=quantization_config) | |
| pipe.transformer.fuse_qkv_projections() | |
| pipe.vae.fuse_qkv_projections() | |
| pipe.to("cuda") | |
| return pipe | |
| def load_pipeline_compile(repo_id: str, dtype: torch.dtype) -> Any: | |
| quantization_config = TorchAoConfig("int4dq" if IS_4BIT else "int8dq") | |
| vae = AutoencoderKL.from_pretrained(repo_id, subfolder="vae", torch_dtype=dtype) | |
| pipe = FluxPipeline.from_pretrained(repo_id, vae=vae, torch_dtype=dtype, quantization_config=quantization_config) | |
| pipe.transformer.fuse_qkv_projections() | |
| pipe.vae.fuse_qkv_projections() | |
| pipe.transformer.to(memory_format=torch.channels_last) | |
| pipe.transformer = torch.compile(pipe.transformer, mode="reduce-overhead", fullgraph=False, dynamic=False) | |
| pipe.vae.to(memory_format=torch.channels_last) | |
| pipe.vae = torch.compile(pipe.vae, mode="reduce-overhead", fullgraph=False, dynamic=False) | |
| pipe.to("cuda") | |
| return pipe | |
| def load_pipeline_autoquant(repo_id: str, dtype: torch.dtype) -> Any: | |
| pipe = FluxPipeline.from_pretrained(repo_id, torch_dtype=dtype).to("cuda") | |
| pipe.transformer.fuse_qkv_projections() | |
| pipe.vae.fuse_qkv_projections() | |
| pipe.transformer.to(memory_format=torch.channels_last) | |
| pipe.transformer = torch.compile(pipe.transformer, mode="max-autotune", fullgraph=True) | |
| pipe.vae.to(memory_format=torch.channels_last) | |
| pipe.vae = torch.compile(pipe.vae, mode="max-autotune", fullgraph=True) | |
| pipe.transformer = autoquant(pipe.transformer, error_on_unseen=False) | |
| pipe.vae = autoquant(pipe.vae, error_on_unseen=False) | |
| pipe.to("cuda") | |
| return pipe | |
| def load_pipeline_turbo(repo_id: str, dtype: torch.dtype) -> Any: | |
| pipe = FluxPipeline.from_pretrained(repo_id, torch_dtype=dtype).to("cuda") | |
| pipe.load_lora_weights(hf_hub_download("ByteDance/Hyper-SD", "Hyper-FLUX.1-dev-8steps-lora.safetensors"), adapter_name="hyper-sd") | |
| pipe.set_adapters(["hyper-sd"], adapter_weights=[0.125]) | |
| pipe.fuse_lora() | |
| pipe.transformer.fuse_qkv_projections() | |
| pipe.vae.fuse_qkv_projections() | |
| weight = int8_dynamic_activation_int4_weight() if IS_4BIT else int8_dynamic_activation_int8_weight() | |
| quantize_(pipe.transformer, weight, device="cuda") | |
| quantize_(pipe.vae, weight, device="cuda") | |
| quantize_(pipe.text_encoder_2, weight, device="cuda") | |
| pipe.to("cuda") | |
| return pipe | |
| def load_pipeline_turbo_compile(repo_id: str, dtype: torch.dtype) -> Any: | |
| pipe = FluxPipeline.from_pretrained(repo_id, torch_dtype=dtype).to("cuda") | |
| pipe.load_lora_weights(hf_hub_download("ByteDance/Hyper-SD", "Hyper-FLUX.1-dev-8steps-lora.safetensors"), adapter_name="hyper-sd") | |
| pipe.set_adapters(["hyper-sd"], adapter_weights=[0.125]) | |
| pipe.fuse_lora() | |
| pipe.transformer.fuse_qkv_projections() | |
| pipe.vae.fuse_qkv_projections() | |
| weight = int8_dynamic_activation_int4_weight() if IS_4BIT else int8_dynamic_activation_int8_weight() | |
| quantize_(pipe.transformer, weight, device="cuda") | |
| quantize_(pipe.vae, weight, device="cuda") | |
| quantize_(pipe.text_encoder_2, weight, device="cuda") | |
| pipe.transformer.to(memory_format=torch.channels_last) | |
| pipe.transformer = torch.compile(pipe.transformer, mode="reduce-overhead", fullgraph=False, dynamic=False) | |
| pipe.vae.to(memory_format=torch.channels_last) | |
| pipe.vae = torch.compile(pipe.vae, mode="reduce-overhead", fullgraph=False, dynamic=False) | |
| pipe.to("cuda") | |
| return pipe | |
| class EndpointHandler: | |
| def __init__(self, path=""): | |
| repo_id = "NoMoreCopyrightOrg/flux-dev-8step" if IS_TURBO else "NoMoreCopyrightOrg/flux-dev" | |
| #dtype = torch.bfloat16 | |
| dtype = torch.float16 # for older nVidia GPUs | |
| if IS_COMPILE: load_pipeline_compile(repo_id, dtype) | |
| else: self.pipeline = load_pipeline_stable(repo_id, dtype) | |
| def __call__(self, data: Dict[str, Any]) -> Image.Image: | |
| logger.info(f"Received incoming request with {data=}") | |
| if "inputs" in data and isinstance(data["inputs"], str): | |
| prompt = data.pop("inputs") | |
| elif "prompt" in data and isinstance(data["prompt"], str): | |
| prompt = data.pop("prompt") | |
| else: | |
| raise ValueError( | |
| "Provided input body must contain either the key `inputs` or `prompt` with the" | |
| " prompt to use for the image generation, and it needs to be a non-empty string." | |
| ) | |
| parameters = data.pop("parameters", {}) | |
| num_inference_steps = parameters.get("num_inference_steps", 8 if IS_TURBO else 28) | |
| width = parameters.get("width", 1024) | |
| height = parameters.get("height", 1024) | |
| guidance_scale = parameters.get("guidance_scale", 3.5) | |
| # seed generator (seed cannot be provided as is but via a generator) | |
| seed = parameters.get("seed", 0) | |
| generator = torch.manual_seed(seed) | |
| return self.pipeline( # type: ignore | |
| prompt, | |
| height=height, | |
| width=width, | |
| guidance_scale=guidance_scale, | |
| num_inference_steps=num_inference_steps, | |
| generator=generator, | |
| output_type="pil", | |
| ).images[0] | |