silveroxides commited on
Commit
5a176c7
·
verified ·
1 Parent(s): 525a6bb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -8
app.py CHANGED
@@ -149,7 +149,7 @@ cliptextencode = CLIPTextEncode()
149
  unetloader = NODE_CLASS_MAPPINGS["ScaledFP8HybridUNetLoader"]()
150
  vaeloader = VAELoader()
151
  modelsamplingauraflow = NODE_CLASS_MAPPINGS["ModelSamplingAuraFlow"]()
152
- basicguider = NODE_CLASS_MAPPINGS["BasicGuider"]()
153
  betascheduler = NODE_CLASS_MAPPINGS["BetaSamplingScheduler"]()
154
  samplercustomadvanced = NODE_CLASS_MAPPINGS["SamplerCustomAdvanced"]()
155
  vaedecode = VAEDecode()
@@ -181,7 +181,7 @@ valid_models = [
181
  model_management.load_models_gpu(valid_models)
182
 
183
  @spaces.GPU
184
- def generate_image(prompt, width, height, steps, seed):
185
  with torch.inference_mode():
186
  # Set random seed if provided
187
  if seed == -1:
@@ -198,6 +198,10 @@ def generate_image(prompt, width, height, steps, seed):
198
  text=prompt,
199
  clip=get_value_at_index(t5tokenizeroptions_82, 0),
200
  )
 
 
 
 
201
 
202
 
203
  modelsamplingauraflow_85 = modelsamplingauraflow.patch(
@@ -205,9 +209,11 @@ def generate_image(prompt, width, height, steps, seed):
205
  model=get_value_at_index(unetloader_76, 0),
206
  )
207
 
208
- basicguider_73 = basicguider.get_guider(
 
209
  model=get_value_at_index(unetloader_76, 0),
210
- conditioning=get_value_at_index(cliptextencode_74, 0),
 
211
  )
212
 
213
  betascheduler_84 = betascheduler.get_sigmas(
@@ -219,7 +225,7 @@ def generate_image(prompt, width, height, steps, seed):
219
 
220
  samplercustomadvanced_67 = samplercustomadvanced.sample(
221
  noise=get_value_at_index(randomnoise_68, 0),
222
- guider=get_value_at_index(basicguider_73, 0),
223
  sampler=get_value_at_index(ksamplerselect_72, 0),
224
  sigmas=get_value_at_index(betascheduler_84, 0),
225
  latent_image=get_value_at_index(emptysd3latentimage_69, 0),
@@ -259,8 +265,15 @@ Space Author: [GitHub](https://github.com/gokayfem) | [X.com](https://x.com/goka
259
  prompt = gr.Textbox(
260
  label="Prompt",
261
  placeholder="Enter your prompt here...",
 
262
  lines=3
263
  )
 
 
 
 
 
 
264
 
265
  with gr.Row():
266
  width = gr.Slider(
@@ -282,10 +295,17 @@ Space Author: [GitHub](https://github.com/gokayfem) | [X.com](https://x.com/goka
282
  steps = gr.Slider(
283
  minimum=1,
284
  maximum=50,
285
- value=24,
286
  step=1,
287
  label="Steps"
288
  )
 
 
 
 
 
 
 
289
  seed = gr.Number(
290
  value=-1,
291
  label="Seed (-1 for random)"
@@ -298,14 +318,14 @@ Space Author: [GitHub](https://github.com/gokayfem) | [X.com](https://x.com/goka
298
 
299
  generate_btn.click(
300
  fn=generate_image,
301
- inputs=[prompt, width, height, steps, seed],
302
  outputs=[output_image]
303
  )
304
 
305
  # Add examples section
306
  gr.Examples(
307
  examples=EXAMPLES,
308
- inputs=[prompt, width, height, steps, seed],
309
  outputs=[output_image],
310
  fn=generate_image,
311
  cache_examples=True,
 
149
  unetloader = NODE_CLASS_MAPPINGS["ScaledFP8HybridUNetLoader"]()
150
  vaeloader = VAELoader()
151
  modelsamplingauraflow = NODE_CLASS_MAPPINGS["ModelSamplingAuraFlow"]()
152
+ cfgguider = NODE_CLASS_MAPPINGS["CFGGuider"]()
153
  betascheduler = NODE_CLASS_MAPPINGS["BetaSamplingScheduler"]()
154
  samplercustomadvanced = NODE_CLASS_MAPPINGS["SamplerCustomAdvanced"]()
155
  vaedecode = VAEDecode()
 
181
  model_management.load_models_gpu(valid_models)
182
 
183
  @spaces.GPU
184
+ def generate_image(prompt, negative_prompt, width, height, steps, cfg, seed):
185
  with torch.inference_mode():
186
  # Set random seed if provided
187
  if seed == -1:
 
198
  text=prompt,
199
  clip=get_value_at_index(t5tokenizeroptions_82, 0),
200
  )
201
+ cliptextencode_75 = cliptextencode.encode(
202
+ text=negative_prompt,
203
+ clip=get_value_at_index(t5tokenizeroptions_82, 0),
204
+ )
205
 
206
 
207
  modelsamplingauraflow_85 = modelsamplingauraflow.patch(
 
209
  model=get_value_at_index(unetloader_76, 0),
210
  )
211
 
212
+ cfgguider_73 = cfgguider.get_guider(
213
+ cfg=cfg,
214
  model=get_value_at_index(unetloader_76, 0),
215
+ positive=get_value_at_index(cliptextencode_74, 0),
216
+ negative=get_value_at_index(cliptextencode_75, 0),
217
  )
218
 
219
  betascheduler_84 = betascheduler.get_sigmas(
 
225
 
226
  samplercustomadvanced_67 = samplercustomadvanced.sample(
227
  noise=get_value_at_index(randomnoise_68, 0),
228
+ guider=get_value_at_index(cfgguider_73, 0),
229
  sampler=get_value_at_index(ksamplerselect_72, 0),
230
  sigmas=get_value_at_index(betascheduler_84, 0),
231
  latent_image=get_value_at_index(emptysd3latentimage_69, 0),
 
265
  prompt = gr.Textbox(
266
  label="Prompt",
267
  placeholder="Enter your prompt here...",
268
+ value="Overlaid at the center of the image is a title text that says \"CHROMA1-FLASH-HEUN\" in a large white 3D letters. This is a close-up photograph from a nature documentary capturing the right side of the face of a tiger. The photograph is centered on its highly detailed and speckled eye surrounded by intricately detailed fur. Amateur photography. unfiltered. natural lighting. anatomically correct. subtle shadows. perfect composition. highest quality. detailed. sharp focus",
269
  lines=3
270
  )
271
+ negative_prompt = gr.Textbox(
272
+ label="Negative Prompt",
273
+ placeholder="Enter negative prompt here...",
274
+ value="low quality, ugly, unfinished, out of focus, deformed, disfigure, blurry, smudged, restricted palette, flat colors",
275
+ lines=2
276
+ )
277
 
278
  with gr.Row():
279
  width = gr.Slider(
 
295
  steps = gr.Slider(
296
  minimum=1,
297
  maximum=50,
298
+ value=12,
299
  step=1,
300
  label="Steps"
301
  )
302
+ cfg = gr.Slider(
303
+ minimum=1,
304
+ maximum=20,
305
+ value=1,
306
+ step=0.01,
307
+ label="CFG Scale"
308
+ )
309
  seed = gr.Number(
310
  value=-1,
311
  label="Seed (-1 for random)"
 
318
 
319
  generate_btn.click(
320
  fn=generate_image,
321
+ inputs=[prompt, negative_prompt, width, height, steps, cfg, seed],
322
  outputs=[output_image]
323
  )
324
 
325
  # Add examples section
326
  gr.Examples(
327
  examples=EXAMPLES,
328
+ inputs=[prompt, negative_prompt, width, height, steps, cfg, seed],
329
  outputs=[output_image],
330
  fn=generate_image,
331
  cache_examples=True,