Alexander Bagus commited on
Commit
8d6aabd
·
1 Parent(s): 90502d2
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -75,10 +75,10 @@ pipe.to("cuda", torch.bfloat16)
75
  # pipe.transformer.layers._repeated_blocks = ["ZImageTransformerBlock"]
76
  # spaces.aoti_blocks_load(pipe.transformer.layers, "zerogpu-aoti/Z-Image", variant="fa3")
77
 
78
- def prepare(prompt):
 
79
  polished_prompt = polish_prompt(prompt)
80
-
81
- return polished_prompt
82
 
83
  @spaces.GPU
84
  def inference(
@@ -179,7 +179,7 @@ with gr.Blocks(css=css) as demo:
179
  placeholder="Enter your prompt",
180
  container=False,
181
  )
182
-
183
  control_mode = gr.Radio(
184
  choices=["Canny", "Depth", "HED", "MLSD", "Pose"],
185
  value="HED",
@@ -242,8 +242,8 @@ with gr.Blocks(css=css) as demo:
242
 
243
  run_button.click(
244
  fn=prepare,
245
- inputs=prompt,
246
- outputs=[polished_prompt]
247
  # outputs=gr.State(), # Pass to the next function, not to UI at this step
248
  ).then(
249
  fn=inference,
 
75
  # pipe.transformer.layers._repeated_blocks = ["ZImageTransformerBlock"]
76
  # spaces.aoti_blocks_load(pipe.transformer.layers, "zerogpu-aoti/Z-Image", variant="fa3")
77
 
78
+ def prepare(prompt, is_polish_prompt):
79
+ if not is_polish_prompt: return prompt, False
80
  polished_prompt = polish_prompt(prompt)
81
+ return polished_prompt, True
 
82
 
83
  @spaces.GPU
84
  def inference(
 
179
  placeholder="Enter your prompt",
180
  container=False,
181
  )
182
+ is_polish_prompt = gr.Checkbox(label="Polish prompt", value=True)
183
  control_mode = gr.Radio(
184
  choices=["Canny", "Depth", "HED", "MLSD", "Pose"],
185
  value="HED",
 
242
 
243
  run_button.click(
244
  fn=prepare,
245
+ inputs=[prompt, is_polish_prompt],
246
+ outputs=[polished_prompt, is_polish_prompt]
247
  # outputs=gr.State(), # Pass to the next function, not to UI at this step
248
  ).then(
249
  fn=inference,