akhaliq HF Staff commited on
Commit
be83a55
·
verified ·
1 Parent(s): 3d60b50

Upload folder using huggingface_hub

Browse files
Files changed (2) hide show
  1. app.py +21 -50
  2. requirements.txt +4 -8
app.py CHANGED
@@ -5,58 +5,28 @@ from diffusers import ZImagePipeline
5
  import os
6
  from pathlib import Path
7
 
8
- # Global variable to store the pipeline
9
- pipe = None
 
10
 
11
- def load_model():
12
- """
13
- Load the Z-Image Turbo model before inference.
14
- This ensures the model is downloaded and ready before any generation requests.
15
- """
16
- global pipe
 
17
 
18
- if pipe is not None:
19
- return pipe
 
 
20
 
21
- print("Loading Z-Image Turbo model...")
22
- print("This may take a few minutes on first run while the model downloads...")
23
 
24
- try:
25
- # Load the pipeline with optimal settings
26
- pipe = ZImagePipeline.from_pretrained(
27
- "Tongyi-MAI/Z-Image-Turbo",
28
- torch_dtype=torch.bfloat16,
29
- low_cpu_mem_usage=False,
30
- )
31
-
32
- # Move to GPU if available
33
- device = "cuda" if torch.cuda.is_available() else "cpu"
34
- pipe.to(device)
35
- print(f"Model loaded on {device}")
36
-
37
- # Optional: Enable Flash Attention for better efficiency
38
- try:
39
- pipe.transformer.set_attention_backend("flash")
40
- print("Flash Attention enabled")
41
- except Exception as e:
42
- print(f"Flash Attention not available: {e}")
43
- print("Using default attention backend")
44
-
45
- print("Model loaded successfully!")
46
- return pipe
47
-
48
- except Exception as e:
49
- print(f"Error loading model: {e}")
50
- raise
51
-
52
- # Pre-load the model when the app starts
53
- print("Initializing model on startup...")
54
- try:
55
- load_model()
56
- print("Model initialization complete!")
57
  except Exception as e:
58
- print(f"Warning: Could not pre-load model: {e}")
59
- print("Model will be loaded on first generation request")
60
 
61
  @spaces.GPU()
62
  def generate_image(
@@ -74,10 +44,8 @@ def generate_image(
74
  """
75
  global pipe
76
 
77
- # Ensure model is loaded
78
  if pipe is None:
79
- progress(0, desc="Loading model...")
80
- load_model()
81
 
82
  if not prompt.strip():
83
  raise gr.Error("Please enter a prompt to generate an image.")
@@ -297,6 +265,7 @@ button.primary:active {
297
  with gr.Blocks(
298
  title="Z-Image Turbo",
299
  fill_height=False,
 
300
  ) as demo:
301
 
302
  # Header
@@ -350,12 +319,14 @@ with gr.Blocks(
350
  fn=generate_image,
351
  inputs=prompt,
352
  outputs=output_image,
 
353
  )
354
 
355
  prompt.submit(
356
  fn=generate_image,
357
  inputs=prompt,
358
  outputs=output_image,
 
359
  )
360
 
361
  if __name__ == "__main__":
 
5
  import os
6
  from pathlib import Path
7
 
8
+ # Load the model directly at startup
9
+ print("Loading Z-Image Turbo model...")
10
+ print("This may take a few minutes on first run while the model downloads...")
11
 
12
+ try:
13
+ # Load the pipeline with optimal settings
14
+ pipe = ZImagePipeline.from_pretrained(
15
+ "Tongyi-MAI/Z-Image-Turbo",
16
+ torch_dtype=torch.bfloat16,
17
+ low_cpu_mem_usage=False,
18
+ )
19
 
20
+ # Move to GPU if available
21
+ device = "cuda" if torch.cuda.is_available() else "cpu"
22
+ pipe.to(device)
23
+ print(f"Model loaded on {device}")
24
 
25
+ print("Model loaded successfully!")
 
26
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
  except Exception as e:
28
+ print(f"Error loading model: {e}")
29
+ pipe = None
30
 
31
  @spaces.GPU()
32
  def generate_image(
 
44
  """
45
  global pipe
46
 
 
47
  if pipe is None:
48
+ raise gr.Error("Model failed to load on startup. Please restart the application.")
 
49
 
50
  if not prompt.strip():
51
  raise gr.Error("Please enter a prompt to generate an image.")
 
265
  with gr.Blocks(
266
  title="Z-Image Turbo",
267
  fill_height=False,
268
+ footer_links=[{"label": "Built with anycoder", "url": "https://huggingface.co/spaces/akhaliq/anycoder"}]
269
  ) as demo:
270
 
271
  # Header
 
319
  fn=generate_image,
320
  inputs=prompt,
321
  outputs=output_image,
322
+ api_visibility="public"
323
  )
324
 
325
  prompt.submit(
326
  fn=generate_image,
327
  inputs=prompt,
328
  outputs=output_image,
329
+ api_visibility="public"
330
  )
331
 
332
  if __name__ == "__main__":
requirements.txt CHANGED
@@ -1,16 +1,12 @@
 
 
1
  torch
2
- torchvision
3
- torchaudio
4
  git+https://github.com/huggingface/diffusers
5
  git+https://github.com/huggingface/transformers
6
  sentencepiece
7
  accelerate
8
  tokenizers
9
- spaces
10
- gradio
11
  requests
12
  Pillow
13
- numpy
14
- pandas
15
- matplotlib
16
- scipy
 
1
+ spaces
2
+ gradio
3
  torch
 
 
4
  git+https://github.com/huggingface/diffusers
5
  git+https://github.com/huggingface/transformers
6
  sentencepiece
7
  accelerate
8
  tokenizers
 
 
9
  requests
10
  Pillow
11
+ torchvision
12
+ torchaudio