Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
#6
by
John6666
- opened
app.py
CHANGED
@@ -273,7 +273,7 @@ with gr.Blocks() as demo:
|
|
273 |
|
274 |
# Initialize both pipelines
|
275 |
if __name__ == "__main__":
|
276 |
-
from diffusers import FluxTransformer2DModel, FluxPipeline, BitsAndBytesConfig
|
277 |
from transformers import T5EncoderModel, BitsAndBytesConfig as BitsAndBytesConfigTF
|
278 |
|
279 |
# Initialize Flux pipeline
|
@@ -282,11 +282,15 @@ if __name__ == "__main__":
|
|
282 |
|
283 |
dtype = torch.bfloat16
|
284 |
file_url = "https://huggingface.co/gokaygokay/flux-game/blob/main/gokaygokay_00001_.safetensors"
|
|
|
285 |
single_file_base_model = "camenduru/FLUX.1-dev-diffusers"
|
286 |
quantization_config_tf = BitsAndBytesConfigTF(load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_use_double_quant=True, bnb_4bit_compute_dtype=torch.bfloat16)
|
287 |
text_encoder_2 = T5EncoderModel.from_pretrained(single_file_base_model, subfolder="text_encoder_2", torch_dtype=dtype, config=single_file_base_model, quantization_config=quantization_config_tf, token=huggingface_token)
|
288 |
-
|
289 |
-
|
|
|
|
|
|
|
290 |
flux_pipeline = FluxPipeline.from_pretrained(single_file_base_model, transformer=transformer, text_encoder_2=text_encoder_2, torch_dtype=dtype, quantization_config=quantization_config, token=huggingface_token)
|
291 |
|
292 |
# Initialize Trellis pipeline
|
|
|
273 |
|
274 |
# Initialize both pipelines
|
275 |
if __name__ == "__main__":
|
276 |
+
from diffusers import FluxTransformer2DModel, FluxPipeline, BitsAndBytesConfig, GGUFQuantizationConfig
|
277 |
from transformers import T5EncoderModel, BitsAndBytesConfig as BitsAndBytesConfigTF
|
278 |
|
279 |
# Initialize Flux pipeline
|
|
|
282 |
|
283 |
dtype = torch.bfloat16
|
284 |
file_url = "https://huggingface.co/gokaygokay/flux-game/blob/main/gokaygokay_00001_.safetensors"
|
285 |
+
file_url = file_url.replace("/resolve/main/", "/blob/main/").replace("?download=true", "")
|
286 |
single_file_base_model = "camenduru/FLUX.1-dev-diffusers"
|
287 |
quantization_config_tf = BitsAndBytesConfigTF(load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_use_double_quant=True, bnb_4bit_compute_dtype=torch.bfloat16)
|
288 |
text_encoder_2 = T5EncoderModel.from_pretrained(single_file_base_model, subfolder="text_encoder_2", torch_dtype=dtype, config=single_file_base_model, quantization_config=quantization_config_tf, token=huggingface_token)
|
289 |
+
if ".gguf" in file_url:
|
290 |
+
transformer = transformer_model.from_single_file(file_url, subfolder="transformer", quantization_config=GGUFQuantizationConfig(compute_dtype=dtype), torch_dtype=dtype, config=single_file_base_model)
|
291 |
+
else:
|
292 |
+
quantization_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_use_double_quant=True, bnb_4bit_compute_dtype=torch.bfloat16, token=huggingface_token)
|
293 |
+
transformer = FluxTransformer2DModel.from_single_file(file_url, subfolder="transformer", torch_dtype=dtype, config=single_file_base_model, quantization_config=quantization_config, token=huggingface_token)
|
294 |
flux_pipeline = FluxPipeline.from_pretrained(single_file_base_model, transformer=transformer, text_encoder_2=text_encoder_2, torch_dtype=dtype, quantization_config=quantization_config, token=huggingface_token)
|
295 |
|
296 |
# Initialize Trellis pipeline
|