leoeric commited on
Commit
0a33f4f
Β·
1 Parent(s): b9eaf60

Fix log visibility: Display logs directly in UI instead of relying on file access

Browse files

- Remove dependency on outputs/ folder for logs (not accessible in HF Spaces)
- Display all logs directly in the status output
- Show full STDERR, STDOUT, and log content in error messages
- No need to access Files tab - everything shown in UI
- Better for debugging GPU abort issues

Files changed (1) hide show
  1. app.py +35 -61
app.py CHANGED
@@ -199,7 +199,8 @@ def _generate_image_impl(prompt, aspect_ratio, cfg, seed, checkpoint_file, confi
199
  status_msg += " - Subsequent runs: Only generation time (~1-3 min)\n"
200
 
201
  try:
202
- # Create output directory
 
203
  output_dir = Path("outputs")
204
  output_dir.mkdir(exist_ok=True)
205
 
@@ -218,18 +219,16 @@ def _generate_image_impl(prompt, aspect_ratio, cfg, seed, checkpoint_file, confi
218
  "--jacobi", "1",
219
  "--jacobi_th", "0.001",
220
  "--jacobi_block_size", "16",
221
- "--logdir", str(output_dir) # Set logdir to outputs directory
222
  ]
223
 
224
  status_msg += "πŸš€ Running generation...\n"
225
  status_msg += "πŸ“Š Current step: Model inference (checkpoint should already be downloaded)\n"
226
 
227
- # Create log file for debugging
228
- log_file = output_dir / "generation.log"
229
- status_msg += f"\nπŸ“‹ LOG FILE LOCATION:\n"
230
- status_msg += f" File: {log_file}\n"
231
- status_msg += f" View in Space: Files tab β†’ outputs β†’ generation.log\n"
232
- status_msg += f" (Logs are written in real-time during generation)\n\n"
233
 
234
  # Ensure GPU environment variables are passed to subprocess
235
  env = os.environ.copy()
@@ -266,39 +265,24 @@ def _generate_image_impl(prompt, aspect_ratio, cfg, seed, checkpoint_file, confi
266
  timeout=2700
267
  )
268
 
269
- # Write comprehensive log file
270
- with open(log_file, 'w') as log:
271
- log.write("=== GENERATION LOG ===\n\n")
272
- log.write(f"Command: {' '.join(cmd)}\n\n")
273
- log.write(f"Environment Variables:\n")
274
- log.write(f" CUDA_VISIBLE_DEVICES={env.get('CUDA_VISIBLE_DEVICES', 'not set')}\n")
275
- log.write(f" CUDA_AVAILABLE={torch.cuda.is_available()}\n")
276
- if torch.cuda.is_available():
277
- log.write(f" GPU_NAME={torch.cuda.get_device_name(0)}\n")
278
- log.write(f" GPU_MEMORY_TOTAL={torch.cuda.get_device_properties(0).total_memory / 1024**3:.2f} GB\n")
279
- log.write(f"\n")
280
- log.write("=== STDOUT ===\n")
281
- log.write(result.stdout if result.stdout else "(empty)\n")
282
- log.write("\n\n=== STDERR ===\n")
283
- log.write(result.stderr if result.stderr else "(empty)\n")
284
- log.write(f"\n\n=== RETURN CODE: {result.returncode} ===\n")
285
-
286
- # Add note about GPU abort
287
- if result.returncode != 0:
288
- log.write(f"\n⚠️ PROCESS FAILED WITH RETURN CODE {result.returncode}\n")
289
- log.write("This could indicate:\n")
290
- log.write("- GPU abort/timeout\n")
291
- log.write("- CUDA out of memory\n")
292
- log.write("- Process killed by system\n")
293
- log.write("- Model loading error\n")
294
- log.write("\nCheck the STDERR section above for detailed error messages.\n")
295
 
296
- # Read log file for detailed output
297
- log_content = ""
298
- if log_file.exists():
299
- with open(log_file, 'r') as f:
300
- log_content = f.read()
301
- status_msg += f"\nπŸ“‹ Full logs available at: {log_file}\n"
302
 
303
  if result.returncode != 0:
304
  error_msg = f"❌ Error during generation (return code: {result.returncode})\n\n"
@@ -343,28 +327,18 @@ def _generate_image_impl(prompt, aspect_ratio, cfg, seed, checkpoint_file, confi
343
  stdout_preview = result.stdout[-5000:] if len(result.stdout) > 5000 else result.stdout
344
  error_msg += f"{stdout_preview}\n\n"
345
 
346
- # Show log file content if available
347
- if log_content:
348
- error_msg += f"=== LOG FILE CONTENT ({log_file}) ===\n"
349
- # Show last 5000 chars of log
350
- log_preview = log_content[-5000:] if len(log_content) > 5000 else log_content
351
- error_msg += f"{log_preview}\n\n"
352
- else:
353
- error_msg += f"⚠️ Log file not found at: {log_file}\n\n"
354
 
355
- # Instructions on where to find logs
356
  error_msg += f"{'='*80}\n"
357
- error_msg += f"πŸ“ HOW TO VIEW FULL LOGS:\n"
358
  error_msg += f"{'='*80}\n"
359
- error_msg += f"OPTION 1 - Space Files Tab (Recommended):\n"
360
- error_msg += f" 1. Click 'Files' tab in your Space\n"
361
- error_msg += f" 2. Navigate to: outputs/generation.log\n"
362
- error_msg += f" 3. Click to view/download the full log\n\n"
363
- error_msg += f"OPTION 2 - Space Logs Tab:\n"
364
- error_msg += f" 1. Click 'Logs' tab in your Space\n"
365
- error_msg += f" 2. Look for messages starting with '[sample.py]'\n"
366
- error_msg += f" 3. Check for GPU abort or CUDA errors\n\n"
367
- error_msg += f"Full log path: {log_file}\n"
368
  error_msg += f"{'='*80}\n"
369
 
370
  return None, error_msg
@@ -413,9 +387,9 @@ def _generate_image_impl(prompt, aspect_ratio, cfg, seed, checkpoint_file, confi
413
  error_msg += f"Searched in: {output_dir} and {model_output_dir}\n"
414
  error_msg += debug_info
415
  if log_content:
416
- error_msg += f"\n\nπŸ“‹ Check log file for details: {log_file}\nLast 2000 chars:\n{log_content[-2000:]}"
417
  else:
418
- error_msg += f"\n\nCheck stdout:\n{result.stdout[-1000:]}"
419
  return None, error_msg
420
 
421
  except Exception as e:
 
199
  status_msg += " - Subsequent runs: Only generation time (~1-3 min)\n"
200
 
201
  try:
202
+ # Create output directory (use /tmp for logs, outputs/ for images)
203
+ # In HF Spaces, /tmp is accessible and outputs/ may not be visible in Files tab
204
  output_dir = Path("outputs")
205
  output_dir.mkdir(exist_ok=True)
206
 
 
219
  "--jacobi", "1",
220
  "--jacobi_th", "0.001",
221
  "--jacobi_block_size", "16",
222
+ "--logdir", str(output_dir) # Set logdir to outputs directory for images
223
  ]
224
 
225
  status_msg += "πŸš€ Running generation...\n"
226
  status_msg += "πŸ“Š Current step: Model inference (checkpoint should already be downloaded)\n"
227
 
228
+ # Note about log file location
229
+ status_msg += f"\nπŸ“‹ LOGS:\n"
230
+ status_msg += f" All logs will be shown in the status output below\n"
231
+ status_msg += f" (Logs are captured in real-time)\n\n"
 
 
232
 
233
  # Ensure GPU environment variables are passed to subprocess
234
  env = os.environ.copy()
 
265
  timeout=2700
266
  )
267
 
268
+ # Build comprehensive log content for display (not relying on file access)
269
+ log_content_parts = []
270
+ log_content_parts.append("=== GENERATION LOG ===\n\n")
271
+ log_content_parts.append(f"Command: {' '.join(cmd)}\n\n")
272
+ log_content_parts.append(f"Environment Variables:\n")
273
+ log_content_parts.append(f" CUDA_VISIBLE_DEVICES={env.get('CUDA_VISIBLE_DEVICES', 'not set')}\n")
274
+ log_content_parts.append(f" CUDA_AVAILABLE={torch.cuda.is_available()}\n")
275
+ if torch.cuda.is_available():
276
+ log_content_parts.append(f" GPU_NAME={torch.cuda.get_device_name(0)}\n")
277
+ log_content_parts.append(f" GPU_MEMORY_TOTAL={torch.cuda.get_device_properties(0).total_memory / 1024**3:.2f} GB\n")
278
+ log_content_parts.append(f"\n")
279
+ log_content_parts.append("=== STDOUT ===\n")
280
+ log_content_parts.append(result.stdout if result.stdout else "(empty)\n")
281
+ log_content_parts.append("\n\n=== STDERR ===\n")
282
+ log_content_parts.append(result.stderr if result.stderr else "(empty)\n")
283
+ log_content_parts.append(f"\n\n=== RETURN CODE: {result.returncode} ===\n")
 
 
 
 
 
 
 
 
 
 
284
 
285
+ log_content = ''.join(log_content_parts)
 
 
 
 
 
286
 
287
  if result.returncode != 0:
288
  error_msg = f"❌ Error during generation (return code: {result.returncode})\n\n"
 
327
  stdout_preview = result.stdout[-5000:] if len(result.stdout) > 5000 else result.stdout
328
  error_msg += f"{stdout_preview}\n\n"
329
 
330
+ # Show full log content directly in error message (no file access needed)
331
+ error_msg += f"=== FULL GENERATION LOG ===\n"
332
+ error_msg += f"{log_content}\n\n"
 
 
 
 
 
333
 
334
+ # Instructions on where to find more info
335
  error_msg += f"{'='*80}\n"
336
+ error_msg += f"πŸ“ ADDITIONAL DEBUGGING:\n"
337
  error_msg += f"{'='*80}\n"
338
+ error_msg += f"1. Check the Space 'Logs' tab for container logs\n"
339
+ error_msg += f"2. Look for messages from sample.py\n"
340
+ error_msg += f"3. Check for GPU abort or CUDA errors\n"
341
+ error_msg += f"4. All logs are shown above in this error message\n"
 
 
 
 
 
342
  error_msg += f"{'='*80}\n"
343
 
344
  return None, error_msg
 
387
  error_msg += f"Searched in: {output_dir} and {model_output_dir}\n"
388
  error_msg += debug_info
389
  if log_content:
390
+ error_msg += f"\n\nπŸ“‹ Full log details:\n{log_content[-2000:]}"
391
  else:
392
+ error_msg += f"\n\nCheck stdout:\n{result.stdout[-1000:] if result.stdout else '(no output)'}"
393
  return None, error_msg
394
 
395
  except Exception as e: