|
|
import os |
|
|
import sys |
|
|
sys.path.append(os.path.abspath(os.path.realpath(os.path.join(os.path.dirname(__file__), './submodules/FramePack')))) |
|
|
|
|
|
|
|
|
import asyncio |
|
|
if sys.platform in ('win32', 'cygwin'): |
|
|
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) |
|
|
|
|
|
from diffusers_helper.hf_login import login |
|
|
|
|
|
import os |
|
|
import random |
|
|
import time |
|
|
import subprocess |
|
|
import traceback |
|
|
|
|
|
import yaml |
|
|
import zipfile |
|
|
|
|
|
import argparse |
|
|
|
|
|
|
|
|
import sys |
|
|
sys.path.append(os.path.abspath(os.path.dirname(__file__))) |
|
|
from eichi_utils.png_metadata import ( |
|
|
embed_metadata_to_png, extract_metadata_from_png, extract_metadata_from_numpy_array, |
|
|
PROMPT_KEY, SEED_KEY, SECTION_PROMPT_KEY, SECTION_NUMBER_KEY |
|
|
) |
|
|
|
|
|
parser = argparse.ArgumentParser() |
|
|
parser.add_argument('--share', action='store_true') |
|
|
parser.add_argument("--server", type=str, default='127.0.0.1') |
|
|
parser.add_argument("--port", type=int, default=8001) |
|
|
parser.add_argument("--inbrowser", action='store_true') |
|
|
parser.add_argument("--lang", type=str, default='ja', help="Language: ja, zh-tw, en") |
|
|
args = parser.parse_args() |
|
|
|
|
|
|
|
|
from locales.i18n_extended import (set_lang, translate) |
|
|
set_lang(args.lang) |
|
|
|
|
|
try: |
|
|
import winsound |
|
|
HAS_WINSOUND = True |
|
|
except ImportError: |
|
|
HAS_WINSOUND = False |
|
|
import json |
|
|
import traceback |
|
|
from datetime import datetime, timedelta |
|
|
|
|
|
if 'HF_HOME' not in os.environ: |
|
|
os.environ['HF_HOME'] = os.path.abspath(os.path.realpath(os.path.join(os.path.dirname(__file__), './hf_download'))) |
|
|
print(translate("HF_HOMEを設定: {0}").format(os.environ['HF_HOME'])) |
|
|
else: |
|
|
print(translate("既存のHF_HOMEを使用: {0}").format(os.environ['HF_HOME'])) |
|
|
temp_dir = "./temp_for_zip_section_info" |
|
|
|
|
|
|
|
|
has_lora_support = False |
|
|
try: |
|
|
import lora_utils |
|
|
has_lora_support = True |
|
|
except ImportError: |
|
|
print(translate("LoRAサポートが無効です(lora_utilsモジュールがインストールされていません)")) |
|
|
|
|
|
|
|
|
from eichi_utils.settings_manager import load_app_settings_f1 |
|
|
saved_app_settings = load_app_settings_f1() |
|
|
|
|
|
|
|
|
if saved_app_settings: |
|
|
pass |
|
|
else: |
|
|
print(translate(" 保存された設定が見つかりません。デフォルト設定を使用します")) |
|
|
|
|
|
|
|
|
import os.path |
|
|
from eichi_utils.video_mode_settings import ( |
|
|
VIDEO_MODE_SETTINGS, get_video_modes, get_video_seconds, get_important_keyframes, |
|
|
get_copy_targets, get_max_keyframes_count, get_total_sections, generate_keyframe_guide_html, |
|
|
handle_mode_length_change, process_keyframe_change, MODE_TYPE_NORMAL |
|
|
|
|
|
) |
|
|
|
|
|
|
|
|
from eichi_utils.settings_manager import ( |
|
|
get_settings_file_path, |
|
|
get_output_folder_path, |
|
|
initialize_settings, |
|
|
load_settings, |
|
|
save_settings, |
|
|
open_output_folder |
|
|
) |
|
|
|
|
|
|
|
|
from eichi_utils.log_manager import ( |
|
|
enable_logging, disable_logging, is_logging_enabled, |
|
|
get_log_folder, set_log_folder, open_log_folder, |
|
|
get_default_log_settings, load_log_settings, apply_log_settings |
|
|
) |
|
|
|
|
|
|
|
|
from eichi_utils.preset_manager import ( |
|
|
initialize_presets, |
|
|
load_presets, |
|
|
get_default_startup_prompt, |
|
|
save_preset, |
|
|
delete_preset |
|
|
) |
|
|
|
|
|
|
|
|
from eichi_utils.keyframe_handler import ( |
|
|
ui_to_code_index, |
|
|
code_to_ui_index, |
|
|
unified_keyframe_change_handler, |
|
|
unified_input_image_change_handler |
|
|
) |
|
|
|
|
|
|
|
|
from eichi_utils.keyframe_handler_extended import extended_mode_length_change_handler |
|
|
import gradio as gr |
|
|
|
|
|
from eichi_utils.ui_styles import get_app_css |
|
|
import torch |
|
|
import einops |
|
|
import safetensors.torch as sf |
|
|
import numpy as np |
|
|
import math |
|
|
|
|
|
from PIL import Image |
|
|
from diffusers import AutoencoderKLHunyuanVideo |
|
|
from transformers import LlamaModel, CLIPTextModel, LlamaTokenizerFast, CLIPTokenizer |
|
|
from diffusers_helper.hunyuan import encode_prompt_conds, vae_decode, vae_encode, vae_decode_fake |
|
|
from diffusers_helper.utils import save_bcthw_as_mp4, crop_or_pad_yield_mask, soft_append_bcthw, resize_and_center_crop, state_dict_weighted_merge, state_dict_offset_merge, generate_timestamp |
|
|
from diffusers_helper.models.hunyuan_video_packed import HunyuanVideoTransformer3DModelPacked |
|
|
from diffusers_helper.pipelines.k_diffusion_hunyuan import sample_hunyuan |
|
|
from diffusers_helper.memory import cpu, gpu, get_cuda_free_memory_gb, move_model_to_device_with_memory_preservation, offload_model_from_device_for_memory_preservation, fake_diffusers_current_device, DynamicSwapInstaller, unload_complete_models, load_model_as_complete |
|
|
from diffusers_helper.thread_utils import AsyncStream, async_run |
|
|
from diffusers_helper.gradio.progress_bar import make_progress_bar_css, make_progress_bar_html |
|
|
from transformers import SiglipImageProcessor, SiglipVisionModel |
|
|
from diffusers_helper.clip_vision import hf_clip_vision_encode |
|
|
from diffusers_helper.bucket_tools import find_nearest_bucket |
|
|
|
|
|
from eichi_utils.transformer_manager import TransformerManager |
|
|
from eichi_utils.text_encoder_manager import TextEncoderManager |
|
|
|
|
|
free_mem_gb = get_cuda_free_memory_gb(gpu) |
|
|
high_vram = free_mem_gb > 100 |
|
|
|
|
|
print(translate('Free VRAM {0} GB').format(free_mem_gb)) |
|
|
print(translate('High-VRAM Mode: {0}').format(high_vram)) |
|
|
|
|
|
|
|
|
from eichi_utils.model_downloader import ModelDownloader |
|
|
ModelDownloader().download_f1() |
|
|
|
|
|
|
|
|
|
|
|
transformer_manager = TransformerManager(device=gpu, high_vram_mode=high_vram, use_f1_model=True) |
|
|
text_encoder_manager = TextEncoderManager(device=gpu, high_vram_mode=high_vram) |
|
|
|
|
|
try: |
|
|
tokenizer = LlamaTokenizerFast.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='tokenizer') |
|
|
tokenizer_2 = CLIPTokenizer.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='tokenizer_2') |
|
|
vae = AutoencoderKLHunyuanVideo.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='vae', torch_dtype=torch.float16).cpu() |
|
|
|
|
|
|
|
|
if not text_encoder_manager.ensure_text_encoder_state(): |
|
|
raise Exception(translate("text_encoderとtext_encoder_2の初期化に失敗しました")) |
|
|
text_encoder, text_encoder_2 = text_encoder_manager.get_text_encoders() |
|
|
|
|
|
|
|
|
transformer_manager.ensure_download_models() |
|
|
transformer = transformer_manager.get_transformer() |
|
|
|
|
|
|
|
|
feature_extractor = SiglipImageProcessor.from_pretrained("lllyasviel/flux_redux_bfl", subfolder='feature_extractor') |
|
|
image_encoder = SiglipVisionModel.from_pretrained("lllyasviel/flux_redux_bfl", subfolder='image_encoder', torch_dtype=torch.float16).cpu() |
|
|
except Exception as e: |
|
|
print(translate("モデル読み込みエラー: {0}").format(e)) |
|
|
print(translate("プログラムを終了します...")) |
|
|
import sys |
|
|
sys.exit(1) |
|
|
|
|
|
vae.eval() |
|
|
image_encoder.eval() |
|
|
|
|
|
if not high_vram: |
|
|
vae.enable_slicing() |
|
|
vae.enable_tiling() |
|
|
|
|
|
vae.to(dtype=torch.float16) |
|
|
image_encoder.to(dtype=torch.float16) |
|
|
|
|
|
vae.requires_grad_(False) |
|
|
image_encoder.requires_grad_(False) |
|
|
|
|
|
if not high_vram: |
|
|
|
|
|
DynamicSwapInstaller.install_model(transformer, device=gpu) |
|
|
else: |
|
|
image_encoder.to(gpu) |
|
|
vae.to(gpu) |
|
|
|
|
|
stream = AsyncStream() |
|
|
|
|
|
|
|
|
from eichi_utils.settings_manager import ( |
|
|
get_settings_file_path, |
|
|
get_output_folder_path, |
|
|
initialize_settings, |
|
|
load_settings, |
|
|
save_settings, |
|
|
open_output_folder |
|
|
) |
|
|
|
|
|
|
|
|
webui_folder = os.path.dirname(os.path.abspath(__file__)) |
|
|
|
|
|
|
|
|
settings_folder = os.path.join(webui_folder, 'settings') |
|
|
os.makedirs(settings_folder, exist_ok=True) |
|
|
|
|
|
|
|
|
initialize_settings() |
|
|
|
|
|
|
|
|
from eichi_utils.lora_preset_manager import initialize_lora_presets |
|
|
initialize_lora_presets() |
|
|
|
|
|
|
|
|
base_path = os.path.dirname(os.path.abspath(__file__)) |
|
|
|
|
|
|
|
|
app_settings = load_settings() |
|
|
output_folder_name = app_settings.get('output_folder', 'outputs') |
|
|
print(translate("設定から出力フォルダを読み込み: {0}").format(output_folder_name)) |
|
|
|
|
|
|
|
|
log_settings = app_settings.get('log_settings', get_default_log_settings()) |
|
|
print(translate("ログ設定を読み込み: 有効={0}, フォルダ={1}").format( |
|
|
log_settings.get('log_enabled', False), |
|
|
log_settings.get('log_folder', 'logs') |
|
|
)) |
|
|
if log_settings.get('log_enabled', False): |
|
|
|
|
|
enable_logging(log_settings.get('log_folder', 'logs'), source_name="endframe_ichi_f1") |
|
|
print(translate("ログ出力を有効化しました")) |
|
|
|
|
|
|
|
|
queue_enabled = False |
|
|
queue_type = "prompt" |
|
|
prompt_queue_file_path = None |
|
|
image_queue_files = [] |
|
|
input_folder_name_value = app_settings.get('input_folder', 'inputs') |
|
|
|
|
|
|
|
|
input_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), input_folder_name_value) |
|
|
print(translate("設定から入力フォルダを読み込み: {0}").format(input_folder_name_value)) |
|
|
|
|
|
|
|
|
outputs_folder = get_output_folder_path(output_folder_name) |
|
|
os.makedirs(outputs_folder, exist_ok=True) |
|
|
|
|
|
|
|
|
def get_image_queue_files(): |
|
|
global image_queue_files, input_folder_name_value |
|
|
input_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), input_folder_name_value) |
|
|
|
|
|
|
|
|
if not os.path.exists(input_dir): |
|
|
print(translate("入力ディレクトリが存在しません: {0}(保存及び入力フォルダを開くボタンを押すと作成されます)").format(input_dir)) |
|
|
return [] |
|
|
|
|
|
|
|
|
image_files = [] |
|
|
for file in sorted(os.listdir(input_dir)): |
|
|
if file.lower().endswith(('.png', '.jpg', '.jpeg')): |
|
|
image_path = os.path.join(input_dir, file) |
|
|
image_files.append(image_path) |
|
|
|
|
|
print(translate("入力ディレクトリから画像ファイル{0}個を読み込みました").format(len(image_files))) |
|
|
|
|
|
image_queue_files = image_files |
|
|
return image_files |
|
|
|
|
|
@torch.no_grad() |
|
|
def worker(input_image, prompt, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf=16, all_padding_value=1.0, image_strength=1.0, keep_section_videos=False, lora_files=None, lora_files2=None, lora_files3=None, lora_scales_text="0.8,0.8,0.8", output_dir=None, save_section_frames=False, use_all_padding=False, use_lora=False, lora_mode=None, lora_dropdown1=None, lora_dropdown2=None, lora_dropdown3=None, save_tensor_data=False, tensor_data_input=None, fp8_optimization=False, resolution=640, batch_index=None, frame_save_mode=None): |
|
|
|
|
|
|
|
|
save_latent_frames = False |
|
|
save_last_section_frames = False |
|
|
|
|
|
if frame_save_mode == translate("全フレーム画像保存"): |
|
|
save_latent_frames = True |
|
|
elif frame_save_mode == translate("最終セクションのみ全フレーム画像保存"): |
|
|
save_last_section_frames = True |
|
|
|
|
|
|
|
|
if isinstance(input_image, str): |
|
|
has_any_image = (input_image is not None) |
|
|
else: |
|
|
has_any_image = (input_image is not None) |
|
|
last_visible_section_image = None |
|
|
last_visible_section_num = -1 |
|
|
|
|
|
if not has_any_image and section_settings is not None: |
|
|
|
|
|
total_display_sections = None |
|
|
try: |
|
|
|
|
|
seconds = get_video_seconds(total_second_length) |
|
|
|
|
|
|
|
|
current_latent_window_size = 4.5 if frame_size_setting == "0.5秒 (17フレーム)" else 9 |
|
|
frame_count = current_latent_window_size * 4 - 3 |
|
|
|
|
|
|
|
|
total_frames = int(seconds * 30) |
|
|
total_display_sections = int(max(round(total_frames / frame_count), 1)) |
|
|
except Exception as e: |
|
|
print(translate("セクション数計算エラー: {0}").format(e)) |
|
|
|
|
|
|
|
|
valid_sections = [] |
|
|
for section in section_settings: |
|
|
if section and len(section) > 1 and section[0] is not None and section[1] is not None: |
|
|
try: |
|
|
section_num = int(section[0]) |
|
|
|
|
|
if total_display_sections is None or section_num < total_display_sections: |
|
|
valid_sections.append((section_num, section[1])) |
|
|
except (ValueError, TypeError): |
|
|
continue |
|
|
|
|
|
|
|
|
if valid_sections: |
|
|
|
|
|
valid_sections.sort(key=lambda x: x[0]) |
|
|
|
|
|
last_visible_section_num, last_visible_section_image = valid_sections[-1] |
|
|
|
|
|
has_any_image = has_any_image or (last_visible_section_image is not None) |
|
|
if not has_any_image: |
|
|
raise ValueError("入力画像または表示されている最後のキーフレーム画像のいずれかが必要です") |
|
|
|
|
|
|
|
|
if input_image is None and last_visible_section_image is not None: |
|
|
print(translate("入力画像が指定されていないため、セクション{0}のキーフレーム画像を使用します").format(last_visible_section_num)) |
|
|
input_image = last_visible_section_image |
|
|
|
|
|
|
|
|
global outputs_folder |
|
|
global output_folder_name |
|
|
if output_dir and output_dir.strip(): |
|
|
|
|
|
outputs_folder = get_output_folder_path(output_dir) |
|
|
print(translate("出力フォルダを設定: {0}").format(outputs_folder)) |
|
|
|
|
|
|
|
|
if output_dir != output_folder_name: |
|
|
settings = load_settings() |
|
|
settings['output_folder'] = output_dir |
|
|
if save_settings(settings): |
|
|
output_folder_name = output_dir |
|
|
print(translate("出力フォルダ設定を保存しました: {0}").format(output_dir)) |
|
|
else: |
|
|
|
|
|
outputs_folder = get_output_folder_path(output_folder_name) |
|
|
print(translate("デフォルト出力フォルダを使用: {0}").format(outputs_folder)) |
|
|
|
|
|
|
|
|
os.makedirs(outputs_folder, exist_ok=True) |
|
|
|
|
|
|
|
|
process_start_time = time.time() |
|
|
|
|
|
|
|
|
global transformer, text_encoder, text_encoder_2 |
|
|
|
|
|
|
|
|
if not text_encoder_manager.ensure_text_encoder_state(): |
|
|
raise Exception(translate("text_encoderとtext_encoder_2の初期化に失敗しました")) |
|
|
text_encoder, text_encoder_2 = text_encoder_manager.get_text_encoders() |
|
|
|
|
|
|
|
|
total_latent_sections = (total_second_length * 30) / (latent_window_size * 4) |
|
|
total_latent_sections = int(max(round(total_latent_sections), 1)) |
|
|
|
|
|
|
|
|
|
|
|
total_sections = total_latent_sections |
|
|
|
|
|
|
|
|
|
|
|
batch_suffix = f"_batch{batch_index+1}" if batch_index is not None else "" |
|
|
job_id = generate_timestamp() + batch_suffix |
|
|
|
|
|
|
|
|
|
|
|
total_sections = total_latent_sections |
|
|
|
|
|
|
|
|
if total_second_length > 0: |
|
|
sections_by_frames = int(max(round((total_second_length * 30) / (latent_window_size * 4 - 3)), 1)) |
|
|
if sections_by_frames != total_sections: |
|
|
print(translate("セクション数に不一致があります!計算値を優先します")) |
|
|
total_sections = sections_by_frames |
|
|
|
|
|
print(translate("セクション生成詳細 (F1モード):")) |
|
|
print(translate(" - 合計セクション数: {0} (最終確定値)").format(total_sections)) |
|
|
frame_count = latent_window_size * 4 - 3 |
|
|
print(translate(" - 各セクションのフレーム数: 約{0}フレーム (latent_window_size: {1})").format(frame_count, latent_window_size)) |
|
|
|
|
|
stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Starting ...')))) |
|
|
|
|
|
try: |
|
|
|
|
|
section_map = None |
|
|
section_numbers_sorted = [] |
|
|
|
|
|
|
|
|
if not high_vram: |
|
|
|
|
|
unload_complete_models( |
|
|
image_encoder, vae |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, translate("Text encoding ..."))))) |
|
|
|
|
|
if not high_vram: |
|
|
fake_diffusers_current_device(text_encoder, gpu) |
|
|
load_model_as_complete(text_encoder_2, target_device=gpu) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
using_custom_txt = False |
|
|
if queue_enabled and queue_type == "image" and batch_index is not None and batch_index > 0: |
|
|
if batch_index - 1 < len(image_queue_files): |
|
|
img_path = image_queue_files[batch_index - 1] |
|
|
txt_path = os.path.splitext(img_path)[0] + ".txt" |
|
|
if os.path.exists(txt_path): |
|
|
using_custom_txt = True |
|
|
|
|
|
|
|
|
actual_prompt = prompt |
|
|
prompt_source = translate("共通プロンプト") |
|
|
|
|
|
|
|
|
if queue_enabled and queue_type == "prompt" and batch_index is not None: |
|
|
|
|
|
prompt_source = translate("プロンプトキュー") |
|
|
print(translate("プロンプトキューからのプロンプトをエンコードしています...")) |
|
|
elif using_custom_txt: |
|
|
|
|
|
actual_prompt = prompt |
|
|
prompt_source = translate("カスタムプロンプト(イメージキュー)") |
|
|
print(translate("カスタムプロンプトをエンコードしています...")) |
|
|
else: |
|
|
|
|
|
print(translate("共通プロンプトをエンコードしています...")) |
|
|
|
|
|
|
|
|
print(translate("プロンプト情報: ソース: {0}").format(prompt_source)) |
|
|
print(translate("プロンプト情報: 内容: {0}").format(actual_prompt)) |
|
|
|
|
|
llama_vec, clip_l_pooler = encode_prompt_conds(prompt, text_encoder, text_encoder_2, tokenizer, tokenizer_2) |
|
|
|
|
|
if cfg == 1: |
|
|
llama_vec_n, clip_l_pooler_n = torch.zeros_like(llama_vec), torch.zeros_like(clip_l_pooler) |
|
|
else: |
|
|
llama_vec_n, clip_l_pooler_n = encode_prompt_conds(n_prompt, text_encoder, text_encoder_2, tokenizer, tokenizer_2) |
|
|
|
|
|
llama_vec, llama_attention_mask = crop_or_pad_yield_mask(llama_vec, length=512) |
|
|
llama_vec_n, llama_attention_mask_n = crop_or_pad_yield_mask(llama_vec_n, length=512) |
|
|
|
|
|
|
|
|
|
|
|
if not high_vram: |
|
|
text_encoder, text_encoder_2 = None, None |
|
|
text_encoder_manager.dispose_text_encoders() |
|
|
|
|
|
|
|
|
uploaded_tensor = None |
|
|
if tensor_data_input is not None: |
|
|
try: |
|
|
|
|
|
if isinstance(tensor_data_input, list): |
|
|
if tensor_data_input and hasattr(tensor_data_input[0], 'name'): |
|
|
tensor_data_input = tensor_data_input[0] |
|
|
else: |
|
|
tensor_data_input = None |
|
|
|
|
|
if tensor_data_input is not None and hasattr(tensor_data_input, 'name'): |
|
|
tensor_path = tensor_data_input.name |
|
|
print(translate("テンソルデータを読み込み: {0}").format(os.path.basename(tensor_path))) |
|
|
stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, translate('Loading tensor data ...'))))) |
|
|
|
|
|
|
|
|
tensor_dict = sf.load_file(tensor_path) |
|
|
|
|
|
|
|
|
print(translate("テンソルデータの内容:")) |
|
|
for key, tensor in tensor_dict.items(): |
|
|
print(translate(" - {0}: shape={1}, dtype={2}").format(key, tensor.shape, tensor.dtype)) |
|
|
|
|
|
|
|
|
if "history_latents" in tensor_dict: |
|
|
uploaded_tensor = tensor_dict["history_latents"] |
|
|
print(translate("テンソルデータ読み込み成功: shape={0}, dtype={1}").format(uploaded_tensor.shape, uploaded_tensor.dtype)) |
|
|
stream.output_queue.push(('progress', (None, translate('Tensor data loaded successfully!'), make_progress_bar_html(10, translate('Tensor data loaded successfully!'))))) |
|
|
else: |
|
|
print(translate("警告: テンソルデータに 'history_latents' キーが見つかりません")) |
|
|
except Exception as e: |
|
|
print(translate("テンソルデータ読み込みエラー: {0}").format(e)) |
|
|
traceback.print_exc() |
|
|
|
|
|
stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, translate("Image processing ..."))))) |
|
|
|
|
|
def preprocess_image(img_path_or_array, resolution=640): |
|
|
"""Pathまたは画像配列を処理して適切なサイズに変換する""" |
|
|
if img_path_or_array is None: |
|
|
|
|
|
img = np.zeros((resolution, resolution, 3), dtype=np.uint8) |
|
|
height = width = resolution |
|
|
return img, img, height, width |
|
|
|
|
|
|
|
|
if isinstance(img_path_or_array, torch.Tensor): |
|
|
img_path_or_array = img_path_or_array.cpu().numpy() |
|
|
|
|
|
|
|
|
if isinstance(img_path_or_array, str) and os.path.exists(img_path_or_array): |
|
|
img = np.array(Image.open(img_path_or_array).convert('RGB')) |
|
|
else: |
|
|
|
|
|
img = img_path_or_array |
|
|
|
|
|
H, W, C = img.shape |
|
|
|
|
|
height, width = find_nearest_bucket(H, W, resolution=resolution) |
|
|
img_np = resize_and_center_crop(img, target_width=width, target_height=height) |
|
|
img_pt = torch.from_numpy(img_np).float() / 127.5 - 1 |
|
|
img_pt = img_pt.permute(2, 0, 1)[None, :, None] |
|
|
return img_np, img_pt, height, width |
|
|
|
|
|
|
|
|
|
|
|
input_image_np, input_image_pt, height, width = preprocess_image(input_image, resolution=resolution) |
|
|
|
|
|
|
|
|
|
|
|
initial_image_path = os.path.join(outputs_folder, f'{job_id}.png') |
|
|
Image.fromarray(input_image_np).save(initial_image_path) |
|
|
|
|
|
|
|
|
metadata = { |
|
|
PROMPT_KEY: prompt, |
|
|
SEED_KEY: seed |
|
|
} |
|
|
embed_metadata_to_png(initial_image_path, metadata) |
|
|
|
|
|
|
|
|
|
|
|
stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, translate("VAE encoding ..."))))) |
|
|
|
|
|
if not high_vram: |
|
|
load_model_as_complete(vae, target_device=gpu) |
|
|
|
|
|
|
|
|
|
|
|
if uploaded_tensor is not None: |
|
|
print(translate("アップロードされたテンソルデータを検出: 動画生成後に後方に結合します")) |
|
|
|
|
|
if input_image is None: |
|
|
try: |
|
|
|
|
|
preview_latent = uploaded_tensor[:, :, 0:1, :, :].clone() |
|
|
if preview_latent.device != torch.device('cpu'): |
|
|
preview_latent = preview_latent.cpu() |
|
|
if preview_latent.dtype != torch.float16: |
|
|
preview_latent = preview_latent.to(dtype=torch.float16) |
|
|
|
|
|
decoded_image = vae_decode(preview_latent, vae) |
|
|
decoded_image = (decoded_image[0, :, 0] * 127.5 + 127.5).permute(1, 2, 0).cpu().numpy().clip(0, 255).astype(np.uint8) |
|
|
|
|
|
Image.fromarray(decoded_image).save(os.path.join(outputs_folder, f'{job_id}_tensor_preview.png')) |
|
|
|
|
|
input_image = decoded_image |
|
|
|
|
|
input_image_np, input_image_pt, height, width = preprocess_image(input_image) |
|
|
print(translate("テンソルからデコードした画像を生成しました: {0}x{1}").format(height, width)) |
|
|
except Exception as e: |
|
|
print(translate("テンソルからのデコード中にエラーが発生しました: {0}").format(e)) |
|
|
|
|
|
|
|
|
|
|
|
tensor_info = translate("テンソルデータ ({0}フレーム) を検出しました。動画生成後に後方に結合します。").format(uploaded_tensor.shape[2]) |
|
|
stream.output_queue.push(('progress', (None, tensor_info, make_progress_bar_html(10, translate('テンソルデータを後方に結合'))))) |
|
|
|
|
|
|
|
|
start_latent = vae_encode(input_image_pt, vae) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, translate("CLIP Vision encoding ..."))))) |
|
|
|
|
|
if not high_vram: |
|
|
load_model_as_complete(image_encoder, target_device=gpu) |
|
|
|
|
|
image_encoder_output = hf_clip_vision_encode(input_image_np, feature_extractor, image_encoder) |
|
|
image_encoder_last_hidden_state = image_encoder_output.last_hidden_state |
|
|
|
|
|
|
|
|
|
|
|
llama_vec = llama_vec.to(transformer.dtype) |
|
|
llama_vec_n = llama_vec_n.to(transformer.dtype) |
|
|
clip_l_pooler = clip_l_pooler.to(transformer.dtype) |
|
|
clip_l_pooler_n = clip_l_pooler_n.to(transformer.dtype) |
|
|
image_encoder_last_hidden_state = image_encoder_last_hidden_state.to(transformer.dtype) |
|
|
|
|
|
|
|
|
|
|
|
stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, translate("Start sampling ..."))))) |
|
|
|
|
|
rnd = torch.Generator("cpu").manual_seed(seed) |
|
|
|
|
|
if latent_window_size == 4.5: |
|
|
num_frames = 17 |
|
|
else: |
|
|
num_frames = int(latent_window_size * 4 - 3) |
|
|
|
|
|
|
|
|
history_latents = torch.zeros(size=(1, 16, 16 + 2 + 1, height // 8, width // 8), dtype=torch.float32).cpu() |
|
|
history_pixels = None |
|
|
|
|
|
|
|
|
history_latents = torch.cat([history_latents, start_latent.to(history_latents)], dim=2) |
|
|
total_generated_latent_frames = 1 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
original_use_lora = use_lora |
|
|
|
|
|
|
|
|
if "PYTORCH_CUDA_ALLOC_CONF" not in os.environ: |
|
|
old_env = os.environ.get("PYTORCH_CUDA_ALLOC_CONF", "") |
|
|
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" |
|
|
print(translate("CUDA環境変数設定: PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True (元の値: {0})").format(old_env)) |
|
|
|
|
|
|
|
|
current_lora_paths = [] |
|
|
current_lora_scales = [] |
|
|
|
|
|
if use_lora and has_lora_support: |
|
|
|
|
|
if lora_mode == translate("ディレクトリから選択"): |
|
|
print(translate("ディレクトリから選択モードでLoRAを処理します")) |
|
|
|
|
|
for dropdown in [lora_dropdown1, lora_dropdown2, lora_dropdown3]: |
|
|
if dropdown is not None and dropdown != translate("なし") and dropdown != 0: |
|
|
|
|
|
lora_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'lora') |
|
|
lora_path = os.path.join(lora_dir, dropdown) |
|
|
if os.path.exists(lora_path): |
|
|
current_lora_paths.append(lora_path) |
|
|
print(translate("LoRAファイルを追加: {0}").format(lora_path)) |
|
|
else: |
|
|
print(translate("LoRAファイルが見つかりません: {0}").format(lora_path)) |
|
|
|
|
|
|
|
|
if lora_dropdown2 == 0: |
|
|
try: |
|
|
|
|
|
choices = scan_lora_directory() |
|
|
if choices and len(choices) > 0: |
|
|
if choices[0] != translate("なし"): |
|
|
print(translate("予期しない選択肢リスト: 最初の要素が「なし」ではありません: {0}").format(choices[0])) |
|
|
except Exception as e: |
|
|
print(translate("ドロップダウン2の特別処理でエラー: {0}").format(e)) |
|
|
else: |
|
|
|
|
|
print(translate("ファイルアップロードモードでLoRAを処理します")) |
|
|
|
|
|
if lora_files is not None: |
|
|
if isinstance(lora_files, list): |
|
|
|
|
|
current_lora_paths.extend([file.name for file in lora_files]) |
|
|
else: |
|
|
|
|
|
current_lora_paths.append(lora_files.name) |
|
|
|
|
|
|
|
|
if lora_files2 is not None: |
|
|
if isinstance(lora_files2, list): |
|
|
|
|
|
current_lora_paths.extend([file.name for file in lora_files2]) |
|
|
else: |
|
|
|
|
|
current_lora_paths.append(lora_files2.name) |
|
|
|
|
|
|
|
|
if lora_files3 is not None: |
|
|
if isinstance(lora_files3, list): |
|
|
current_lora_paths.extend([file.name for file in lora_files3]) |
|
|
else: |
|
|
current_lora_paths.append(lora_files3.name) |
|
|
|
|
|
|
|
|
if current_lora_paths: |
|
|
try: |
|
|
scales_text = lora_scales_text.strip() |
|
|
if scales_text: |
|
|
|
|
|
scales = [float(scale.strip()) for scale in scales_text.split(',')] |
|
|
current_lora_scales = scales |
|
|
|
|
|
|
|
|
if len(scales) < len(current_lora_paths): |
|
|
current_lora_scales.extend([0.8] * (len(current_lora_paths) - len(scales))) |
|
|
else: |
|
|
|
|
|
current_lora_scales = [0.8] * len(current_lora_paths) |
|
|
except Exception as e: |
|
|
print(translate("LoRAスケール解析エラー: {0}").format(e)) |
|
|
print(translate("デフォルトスケール 0.8 を使用します")) |
|
|
current_lora_scales = [0.8] * len(current_lora_paths) |
|
|
|
|
|
|
|
|
if len(current_lora_scales) < len(current_lora_paths): |
|
|
|
|
|
current_lora_scales.extend([0.8] * (len(current_lora_paths) - len(current_lora_scales))) |
|
|
elif len(current_lora_scales) > len(current_lora_paths): |
|
|
|
|
|
current_lora_scales = current_lora_scales[:len(current_lora_paths)] |
|
|
|
|
|
|
|
|
if original_use_lora: |
|
|
use_lora = True |
|
|
print(translate("UIでLoRA使用が有効化されているため、LoRA使用を有効にします")) |
|
|
|
|
|
|
|
|
transformer_manager.set_next_settings( |
|
|
lora_paths=current_lora_paths, |
|
|
lora_scales=current_lora_scales, |
|
|
fp8_enabled=fp8_optimization, |
|
|
high_vram_mode=high_vram, |
|
|
force_dict_split=True |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print(translate("セクション処理開始前のtransformer状態チェック...")) |
|
|
try: |
|
|
|
|
|
if not transformer_manager.ensure_transformer_state(): |
|
|
raise Exception(translate("transformer状態の確認に失敗しました")) |
|
|
|
|
|
|
|
|
transformer = transformer_manager.get_transformer() |
|
|
print(translate("transformer状態チェック完了")) |
|
|
except Exception as e: |
|
|
print(translate("transformer状態チェックエラー: {0}").format(e)) |
|
|
traceback.print_exc() |
|
|
raise e |
|
|
|
|
|
|
|
|
for i_section in range(total_sections): |
|
|
|
|
|
is_first_section = i_section == 0 |
|
|
|
|
|
|
|
|
is_last_section = i_section == total_sections - 1 |
|
|
|
|
|
|
|
|
|
|
|
latent_padding = 1 |
|
|
|
|
|
latent_padding_size = int(latent_padding * latent_window_size) |
|
|
|
|
|
|
|
|
padding_info = translate("パディング値: {0} (F1モードでは影響なし)").format(latent_padding) |
|
|
print(translate("■ セクション{0}の処理開始 ({1})").format(i_section, padding_info)) |
|
|
print(translate(" - 現在の生成フレーム数: {0}フレーム").format(total_generated_latent_frames * 4 - 3)) |
|
|
print(translate(" - 生成予定フレーム数: {0}フレーム").format(num_frames)) |
|
|
print(translate(" - 最初のセクション?: {0}").format(is_first_section)) |
|
|
print(translate(" - 最後のセクション?: {0}").format(is_last_section)) |
|
|
|
|
|
|
|
|
current_latent = start_latent |
|
|
|
|
|
|
|
|
if stream.input_queue.top() == 'end': |
|
|
stream.output_queue.push(('end', None)) |
|
|
return |
|
|
|
|
|
|
|
|
current_llama_vec, current_clip_l_pooler, current_llama_attention_mask = llama_vec, clip_l_pooler, llama_attention_mask |
|
|
|
|
|
print(translate('latent_padding_size = {0}, is_last_section = {1}').format(latent_padding_size, is_last_section)) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
effective_window_size = 5 if latent_window_size == 4.5 else int(latent_window_size) |
|
|
|
|
|
indices = torch.arange(0, sum([1, 16, 2, 1, effective_window_size])).unsqueeze(0) |
|
|
clean_latent_indices_start, clean_latent_4x_indices, clean_latent_2x_indices, clean_latent_1x_indices, latent_indices = indices.split([1, 16, 2, 1, effective_window_size], dim=1) |
|
|
clean_latent_indices = torch.cat([clean_latent_indices_start, clean_latent_1x_indices], dim=1) |
|
|
|
|
|
clean_latents_4x, clean_latents_2x, clean_latents_1x = history_latents[:, :, -sum([16, 2, 1]):, :, :].split([16, 2, 1], dim=2) |
|
|
clean_latents = torch.cat([start_latent.to(history_latents), clean_latents_1x], dim=2) |
|
|
|
|
|
if not high_vram: |
|
|
unload_complete_models() |
|
|
|
|
|
preserved_memory = float(gpu_memory_preservation) if gpu_memory_preservation is not None else 6.0 |
|
|
print(translate('Setting transformer memory preservation to: {0} GB').format(preserved_memory)) |
|
|
move_model_to_device_with_memory_preservation(transformer, target_device=gpu, preserved_memory_gb=preserved_memory) |
|
|
|
|
|
if use_teacache: |
|
|
transformer.initialize_teacache(enable_teacache=True, num_steps=steps) |
|
|
else: |
|
|
transformer.initialize_teacache(enable_teacache=False) |
|
|
|
|
|
def callback(d): |
|
|
preview = d['denoised'] |
|
|
preview = vae_decode_fake(preview) |
|
|
|
|
|
preview = (preview * 255.0).detach().cpu().numpy().clip(0, 255).astype(np.uint8) |
|
|
preview = einops.rearrange(preview, 'b c t h w -> (b h) (t w) c') |
|
|
|
|
|
if stream.input_queue.top() == 'end': |
|
|
stream.output_queue.push(('end', None)) |
|
|
raise KeyboardInterrupt('User ends the task.') |
|
|
|
|
|
current_step = d['i'] + 1 |
|
|
percentage = int(100.0 * current_step / steps) |
|
|
hint = translate('Sampling {0}/{1}').format(current_step, steps) |
|
|
|
|
|
section_info = translate('セクション: {0}/{1}').format(i_section+1, total_sections) |
|
|
desc = f"{section_info} " + translate('生成フレーム数: {total_generated_latent_frames}, 動画長: {video_length:.2f} 秒 (FPS-30). 動画が生成中です ...').format(section_info=section_info, total_generated_latent_frames=int(max(0, total_generated_latent_frames * 4 - 3)), video_length=max(0, (total_generated_latent_frames * 4 - 3) / 30)) |
|
|
stream.output_queue.push(('progress', (preview, desc, make_progress_bar_html(percentage, hint)))) |
|
|
return |
|
|
|
|
|
|
|
|
|
|
|
strength_value = max(0.01, 1.0 / image_strength) |
|
|
print(translate('Image影響度: UI値={0:.2f}({1:.0f}%)→計算値={2:.4f}(値が小さいほど始点の影響が強い)').format( |
|
|
image_strength, image_strength * 100, strength_value)) |
|
|
|
|
|
generated_latents = sample_hunyuan( |
|
|
transformer=transformer, |
|
|
sampler='unipc', |
|
|
width=width, |
|
|
height=height, |
|
|
frames=num_frames, |
|
|
real_guidance_scale=cfg, |
|
|
distilled_guidance_scale=gs, |
|
|
guidance_rescale=rs, |
|
|
|
|
|
num_inference_steps=steps, |
|
|
generator=rnd, |
|
|
prompt_embeds=current_llama_vec, |
|
|
prompt_embeds_mask=current_llama_attention_mask, |
|
|
prompt_poolers=current_clip_l_pooler, |
|
|
negative_prompt_embeds=llama_vec_n, |
|
|
negative_prompt_embeds_mask=llama_attention_mask_n, |
|
|
negative_prompt_poolers=clip_l_pooler_n, |
|
|
device=gpu, |
|
|
dtype=torch.bfloat16, |
|
|
image_embeddings=image_encoder_last_hidden_state, |
|
|
latent_indices=latent_indices, |
|
|
clean_latents=clean_latents, |
|
|
clean_latent_indices=clean_latent_indices, |
|
|
clean_latents_2x=clean_latents_2x, |
|
|
clean_latent_2x_indices=clean_latent_2x_indices, |
|
|
clean_latents_4x=clean_latents_4x, |
|
|
clean_latent_4x_indices=clean_latent_4x_indices, |
|
|
initial_latent=current_latent, |
|
|
strength=strength_value, |
|
|
callback=callback, |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
total_generated_latent_frames += int(generated_latents.shape[2]) |
|
|
|
|
|
history_latents = torch.cat([history_latents, generated_latents.to(history_latents)], dim=2) |
|
|
|
|
|
if not high_vram: |
|
|
|
|
|
preserved_memory_offload = 8.0 |
|
|
print(translate('Offloading transformer with memory preservation: {0} GB').format(preserved_memory_offload)) |
|
|
offload_model_from_device_for_memory_preservation(transformer, target_device=gpu, preserved_memory_gb=preserved_memory_offload) |
|
|
load_model_as_complete(vae, target_device=gpu) |
|
|
|
|
|
|
|
|
real_history_latents = history_latents[:, :, -total_generated_latent_frames:, :, :] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if history_pixels is None: |
|
|
history_pixels = vae_decode(real_history_latents, vae).cpu() |
|
|
else: |
|
|
|
|
|
if latent_window_size == 4.5: |
|
|
section_latent_frames = 11 if is_last_section else 10 |
|
|
overlapped_frames = 17 |
|
|
else: |
|
|
|
|
|
section_latent_frames = int(latent_window_size * 2) if is_last_section else int(latent_window_size * 2) |
|
|
overlapped_frames = int(latent_window_size * 4 - 3) |
|
|
|
|
|
|
|
|
current_pixels = vae_decode(real_history_latents[:, :, -section_latent_frames:], vae).cpu() |
|
|
|
|
|
|
|
|
if history_pixels is None: |
|
|
history_pixels = current_pixels |
|
|
else: |
|
|
history_pixels = soft_append_bcthw(history_pixels, current_pixels, overlapped_frames) |
|
|
|
|
|
|
|
|
if save_section_frames and history_pixels is not None: |
|
|
try: |
|
|
if i_section == 0 or current_pixels is None: |
|
|
|
|
|
last_frame = history_pixels[0, :, -1, :, :] |
|
|
else: |
|
|
|
|
|
last_frame = current_pixels[0, :, -1, :, :] |
|
|
last_frame = einops.rearrange(last_frame, 'c h w -> h w c') |
|
|
last_frame = last_frame.cpu().numpy() |
|
|
last_frame = np.clip((last_frame * 127.5 + 127.5), 0, 255).astype(np.uint8) |
|
|
last_frame = resize_and_center_crop(last_frame, target_width=width, target_height=height) |
|
|
|
|
|
|
|
|
section_metadata = { |
|
|
PROMPT_KEY: prompt, |
|
|
SEED_KEY: seed, |
|
|
SECTION_NUMBER_KEY: i_section |
|
|
} |
|
|
|
|
|
|
|
|
if section_map and i_section in section_map: |
|
|
_, section_prompt = section_map[i_section] |
|
|
if section_prompt and section_prompt.strip(): |
|
|
section_metadata[SECTION_PROMPT_KEY] = section_prompt |
|
|
|
|
|
|
|
|
if is_first_section: |
|
|
frame_path = os.path.join(outputs_folder, f'{job_id}_{i_section}_end.png') |
|
|
Image.fromarray(last_frame).save(frame_path) |
|
|
embed_metadata_to_png(frame_path, section_metadata) |
|
|
else: |
|
|
frame_path = os.path.join(outputs_folder, f'{job_id}_{i_section}.png') |
|
|
Image.fromarray(last_frame).save(frame_path) |
|
|
embed_metadata_to_png(frame_path, section_metadata) |
|
|
|
|
|
print(translate("セクション{0}のフレーム画像をメタデータ付きで保存しました").format(i_section)) |
|
|
except Exception as e: |
|
|
print(translate("セクション{0}最終フレーム画像保存時にエラー: {1}").format(i_section, e)) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
is_last_section = i_section == total_sections - 1 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
is_save_all_frames = bool(save_latent_frames) |
|
|
is_save_last_frame_only = bool(save_last_section_frames) |
|
|
|
|
|
if is_save_all_frames: |
|
|
should_save_frames = True |
|
|
elif is_save_last_frame_only and is_last_section: |
|
|
should_save_frames = True |
|
|
else: |
|
|
should_save_frames = False |
|
|
|
|
|
if should_save_frames: |
|
|
try: |
|
|
|
|
|
source_pixels = None |
|
|
|
|
|
|
|
|
|
|
|
if history_pixels is not None: |
|
|
source_pixels = history_pixels |
|
|
print(translate("フレーム画像保存: history_pixelsを使用します")) |
|
|
elif 'current_pixels' in locals() and current_pixels is not None: |
|
|
source_pixels = current_pixels |
|
|
print(translate("フレーム画像保存: current_pixelsを使用します")) |
|
|
else: |
|
|
print(translate("フレーム画像保存: 有効なピクセルデータがありません")) |
|
|
return |
|
|
|
|
|
|
|
|
latent_frame_count = source_pixels.shape[2] |
|
|
|
|
|
|
|
|
|
|
|
if is_save_all_frames: |
|
|
print(translate("全フレーム画像保存: セクション{0}の{1}フレームを保存します").format(i_section, latent_frame_count)) |
|
|
elif is_save_last_frame_only and is_last_section: |
|
|
|
|
|
print(translate("最終セクションのみ全フレーム画像保存: セクション{0}/{1}の{2}フレームを保存します (最終セクション)").format( |
|
|
i_section, total_sections-1, latent_frame_count)) |
|
|
else: |
|
|
print(translate("フレーム画像保存: セクション{0}の{1}フレームを保存します").format(i_section, latent_frame_count)) |
|
|
|
|
|
|
|
|
frames_folder = os.path.join(outputs_folder, f'{job_id}_frames_section{i_section}') |
|
|
os.makedirs(frames_folder, exist_ok=True) |
|
|
|
|
|
|
|
|
for frame_idx in range(latent_frame_count): |
|
|
|
|
|
frame = source_pixels[0, :, frame_idx, :, :] |
|
|
frame = einops.rearrange(frame, 'c h w -> h w c') |
|
|
frame = frame.cpu().numpy() |
|
|
frame = np.clip((frame * 127.5 + 127.5), 0, 255).astype(np.uint8) |
|
|
frame = resize_and_center_crop(frame, target_width=width, target_height=height) |
|
|
|
|
|
|
|
|
frame_metadata = { |
|
|
PROMPT_KEY: prompt, |
|
|
SEED_KEY: seed, |
|
|
SECTION_NUMBER_KEY: i_section, |
|
|
"FRAME_NUMBER": frame_idx |
|
|
} |
|
|
|
|
|
|
|
|
frame_path = os.path.join(frames_folder, f'frame_{frame_idx:03d}.png') |
|
|
Image.fromarray(frame).save(frame_path) |
|
|
embed_metadata_to_png(frame_path, frame_metadata) |
|
|
|
|
|
|
|
|
|
|
|
if is_save_all_frames: |
|
|
print(translate("全フレーム画像保存: セクション{0}の{1}個のフレーム画像を保存しました: {2}").format( |
|
|
i_section, latent_frame_count, frames_folder)) |
|
|
elif is_save_last_frame_only and is_last_section: |
|
|
print(translate("最終セクションのみ全フレーム画像保存: セクション{0}/{1}の{2}個のフレーム画像を保存しました (最終セクション): {3}").format( |
|
|
i_section, total_sections-1, latent_frame_count, frames_folder)) |
|
|
else: |
|
|
print(translate("セクション{0}の{1}個のフレーム画像を保存しました: {2}").format( |
|
|
i_section, latent_frame_count, frames_folder)) |
|
|
except Exception as e: |
|
|
print(translate("セクション{0}のフレーム画像保存中にエラー: {1}").format(i_section, e)) |
|
|
traceback.print_exc() |
|
|
|
|
|
if not high_vram: |
|
|
unload_complete_models() |
|
|
|
|
|
|
|
|
|
|
|
output_filename = os.path.join(outputs_folder, f'{job_id}_{total_generated_latent_frames}.mp4') |
|
|
|
|
|
|
|
|
if history_pixels.min() < -1.0 or history_pixels.max() > 1.0: |
|
|
history_pixels = torch.clamp(history_pixels, -1.0, 1.0) |
|
|
|
|
|
|
|
|
save_bcthw_as_mp4(history_pixels, output_filename, fps=30, crf=mp4_crf) |
|
|
|
|
|
print(translate('Decoded. Current latent shape {0}; pixel shape {1}').format(real_history_latents.shape, history_pixels.shape)) |
|
|
|
|
|
print(translate("■ セクション{0}の処理完了").format(i_section)) |
|
|
print(translate(" - 現在の累計フレーム数: {0}フレーム").format(int(max(0, total_generated_latent_frames * 4 - 3)))) |
|
|
print(translate(" - レンダリング時間: {0}秒").format(f"{max(0, (total_generated_latent_frames * 4 - 3) / 30):.2f}")) |
|
|
print(translate(" - 出力ファイル: {0}").format(output_filename)) |
|
|
|
|
|
stream.output_queue.push(('file', output_filename)) |
|
|
|
|
|
if is_last_section: |
|
|
combined_output_filename = None |
|
|
|
|
|
if uploaded_tensor is not None: |
|
|
try: |
|
|
original_frames = real_history_latents.shape[2] |
|
|
uploaded_frames = uploaded_tensor.shape[2] |
|
|
|
|
|
print(translate("テンソルデータを後方に結合します: アップロードされたフレーム数 = {uploaded_frames}").format(uploaded_frames=uploaded_frames)) |
|
|
|
|
|
stream.output_queue.push(('progress', (None, translate("テンソルデータ({uploaded_frames}フレーム)の結合を開始します...").format(uploaded_frames=uploaded_frames), make_progress_bar_html(80, translate('テンソルデータ結合準備'))))) |
|
|
|
|
|
|
|
|
|
|
|
if uploaded_tensor.shape[3] != real_history_latents.shape[3] or uploaded_tensor.shape[4] != real_history_latents.shape[4]: |
|
|
print(translate("警告: テンソルサイズが異なります: アップロード={0}, 現在の生成={1}").format(uploaded_tensor.shape, real_history_latents.shape)) |
|
|
print(translate("テンソルサイズの不一致のため、前方結合をスキップします")) |
|
|
stream.output_queue.push(('progress', (None, translate("テンソルサイズの不一致のため、前方結合をスキップしました"), make_progress_bar_html(85, translate('互換性エラー'))))) |
|
|
else: |
|
|
|
|
|
processed_tensor = uploaded_tensor.clone() |
|
|
if processed_tensor.device != real_history_latents.device: |
|
|
processed_tensor = processed_tensor.to(real_history_latents.device) |
|
|
if processed_tensor.dtype != real_history_latents.dtype: |
|
|
processed_tensor = processed_tensor.to(dtype=real_history_latents.dtype) |
|
|
|
|
|
|
|
|
original_output_filename = os.path.join(outputs_folder, f'{job_id}_original.mp4') |
|
|
save_bcthw_as_mp4(history_pixels, original_output_filename, fps=30, crf=mp4_crf) |
|
|
print(translate("元の動画を保存しました: {original_output_filename}").format(original_output_filename=original_output_filename)) |
|
|
|
|
|
|
|
|
combined_history_latents = real_history_latents.clone() |
|
|
combined_history_pixels = history_pixels.clone() if history_pixels is not None else None |
|
|
|
|
|
|
|
|
if torch.cuda.is_available(): |
|
|
torch.cuda.synchronize() |
|
|
torch.cuda.empty_cache() |
|
|
import gc |
|
|
gc.collect() |
|
|
print(translate("GPUメモリ確保状態: {memory:.2f}GB").format(memory=torch.cuda.memory_allocated()/1024**3)) |
|
|
|
|
|
|
|
|
if not high_vram and vae.device != torch.device('cuda'): |
|
|
print(translate("VAEをGPUに移動: {0} → cuda").format(vae.device)) |
|
|
vae.to('cuda') |
|
|
|
|
|
|
|
|
|
|
|
chunk_size = min(5, uploaded_frames) |
|
|
|
|
|
|
|
|
num_chunks = (uploaded_frames + chunk_size - 1) // chunk_size |
|
|
|
|
|
|
|
|
for chunk_idx in range(num_chunks): |
|
|
chunk_start = chunk_idx * chunk_size |
|
|
chunk_end = min(chunk_start + chunk_size, uploaded_frames) |
|
|
chunk_frames = chunk_end - chunk_start |
|
|
|
|
|
|
|
|
chunk_progress = (chunk_idx + 1) / num_chunks * 100 |
|
|
progress_message = translate("テンソルデータ結合中: チャンク {0}/{1} (フレーム {2}-{3}/{4})").format(chunk_idx+1, num_chunks, chunk_start+1, chunk_end, uploaded_frames) |
|
|
stream.output_queue.push(('progress', (None, progress_message, make_progress_bar_html(int(80 + chunk_progress * 0.1), translate('テンソルデータ処理中'))))) |
|
|
|
|
|
|
|
|
current_chunk = processed_tensor[:, :, chunk_start:chunk_end, :, :] |
|
|
print(translate("チャンク{0}/{1}処理中: フレーム {2}-{3}/{4}").format(chunk_idx+1, num_chunks, chunk_start+1, chunk_end, uploaded_frames)) |
|
|
|
|
|
|
|
|
if torch.cuda.is_available(): |
|
|
print(translate("チャンク{0}処理前のGPUメモリ: {1:.2f}GB/{2:.2f}GB").format(chunk_idx+1, torch.cuda.memory_allocated()/1024**3, torch.cuda.get_device_properties(0).total_memory/1024**3)) |
|
|
|
|
|
torch.cuda.empty_cache() |
|
|
|
|
|
try: |
|
|
|
|
|
if torch.cuda.is_available(): |
|
|
torch.cuda.synchronize() |
|
|
torch.cuda.empty_cache() |
|
|
import gc |
|
|
gc.collect() |
|
|
|
|
|
|
|
|
print(translate("チャンク{0}のVAEデコード開始...").format(chunk_idx+1)) |
|
|
stream.output_queue.push(('progress', (None, translate("チャンク{0}/{1}のVAEデコード中...").format(chunk_idx+1, num_chunks), make_progress_bar_html(int(80 + chunk_progress * 0.1), translate('デコード処理'))))) |
|
|
|
|
|
|
|
|
if current_chunk.device != vae.device: |
|
|
print(translate(" - デバイスをVAEと同じに変更: {0} → {1}").format(current_chunk.device, vae.device)) |
|
|
current_chunk = current_chunk.to(vae.device) |
|
|
|
|
|
|
|
|
if current_chunk.dtype != torch.float16: |
|
|
print(translate(" - データ型をfloat16に変更: {0} → torch.float16").format(current_chunk.dtype)) |
|
|
current_chunk = current_chunk.to(dtype=torch.float16) |
|
|
|
|
|
|
|
|
chunk_pixels = vae_decode(current_chunk, vae).cpu() |
|
|
print(translate("チャンク{0}のVAEデコード完了 (フレーム数: {1})").format(chunk_idx+1, chunk_frames)) |
|
|
|
|
|
|
|
|
if torch.cuda.is_available(): |
|
|
print(translate("チャンク{0}デコード後のGPUメモリ: {1:.2f}GB").format(chunk_idx+1, torch.cuda.memory_allocated()/1024**3)) |
|
|
|
|
|
|
|
|
if combined_history_pixels is None: |
|
|
|
|
|
combined_history_pixels = chunk_pixels |
|
|
else: |
|
|
|
|
|
if combined_history_pixels.dtype != chunk_pixels.dtype: |
|
|
print(translate(" - データ型の不一致を修正: {0} → {1}").format(combined_history_pixels.dtype, chunk_pixels.dtype)) |
|
|
combined_history_pixels = combined_history_pixels.to(dtype=chunk_pixels.dtype) |
|
|
|
|
|
|
|
|
if combined_history_pixels.device != torch.device('cpu'): |
|
|
combined_history_pixels = combined_history_pixels.cpu() |
|
|
if chunk_pixels.device != torch.device('cpu'): |
|
|
chunk_pixels = chunk_pixels.cpu() |
|
|
|
|
|
|
|
|
combined_history_pixels = torch.cat([combined_history_pixels, chunk_pixels], dim=2) |
|
|
|
|
|
|
|
|
current_total_frames = combined_history_pixels.shape[2] |
|
|
print(translate("チャンク{0}の結合完了: 現在の組み込みフレーム数 = {1}").format(chunk_idx+1, current_total_frames)) |
|
|
|
|
|
|
|
|
if chunk_idx == num_chunks - 1 or (chunk_idx > 0 and (chunk_idx + 1) % 5 == 0): |
|
|
|
|
|
interim_output_filename = os.path.join(outputs_folder, f'{job_id}_combined_interim_{chunk_idx+1}.mp4') |
|
|
print(translate("中間結果を保存中: チャンク{0}/{1}").format(chunk_idx+1, num_chunks)) |
|
|
stream.output_queue.push(('progress', (None, translate("中間結果のMP4変換中... (チャンク{0}/{1})").format(chunk_idx+1, num_chunks), make_progress_bar_html(int(85 + chunk_progress * 0.1), translate('MP4保存中'))))) |
|
|
|
|
|
|
|
|
save_bcthw_as_mp4(combined_history_pixels, interim_output_filename, fps=30, crf=mp4_crf) |
|
|
print(translate("中間結果を保存しました: {0}").format(interim_output_filename)) |
|
|
|
|
|
|
|
|
stream.output_queue.push(('file', interim_output_filename)) |
|
|
except Exception as e: |
|
|
print(translate("チャンク{0}の処理中にエラーが発生しました: {1}").format(chunk_idx+1, e)) |
|
|
traceback.print_exc() |
|
|
|
|
|
|
|
|
print(translate("エラー情報:")) |
|
|
print(translate(" - チャンク情報: {0}/{1}, フレーム {2}-{3}/{4}").format(chunk_idx+1, num_chunks, chunk_start+1, chunk_end, uploaded_frames)) |
|
|
if 'current_chunk' in locals(): |
|
|
print(translate(" - current_chunk: shape={0}, dtype={1}, device={2}").format(current_chunk.shape, current_chunk.dtype, current_chunk.device)) |
|
|
if 'vae' in globals(): |
|
|
print(translate(" - VAE情報: device={0}, dtype={1}").format(vae.device, next(vae.parameters()).dtype)) |
|
|
|
|
|
|
|
|
if torch.cuda.is_available(): |
|
|
print(translate(" - GPU使用量: {0:.2f}GB/{1:.2f}GB").format(torch.cuda.memory_allocated()/1024**3, torch.cuda.get_device_properties(0).total_memory/1024**3)) |
|
|
|
|
|
stream.output_queue.push(('progress', (None, translate("エラー: チャンク{0}の処理に失敗しました - {1}").format(chunk_idx+1, str(e)), make_progress_bar_html(90, translate('エラー'))))) |
|
|
break |
|
|
|
|
|
|
|
|
if torch.cuda.is_available(): |
|
|
torch.cuda.synchronize() |
|
|
torch.cuda.empty_cache() |
|
|
import gc |
|
|
gc.collect() |
|
|
print(translate("チャンク処理後のGPUメモリ確保状態: {0:.2f}GB").format(torch.cuda.memory_allocated()/1024**3)) |
|
|
|
|
|
|
|
|
if combined_history_pixels is not None: |
|
|
|
|
|
print(translate("最終結果を保存中: 全{0}チャンク完了").format(num_chunks)) |
|
|
stream.output_queue.push(('progress', (None, translate("結合した動画をMP4に変換中..."), make_progress_bar_html(95, translate('最終MP4変換処理'))))) |
|
|
|
|
|
|
|
|
combined_output_filename = os.path.join(outputs_folder, f'{job_id}_combined.mp4') |
|
|
|
|
|
|
|
|
save_bcthw_as_mp4(combined_history_pixels, combined_output_filename, fps=30, crf=mp4_crf) |
|
|
print(translate("最終結果を保存しました: {0}").format(combined_output_filename)) |
|
|
print(translate("結合動画の保存場所: {0}").format(os.path.abspath(combined_output_filename))) |
|
|
|
|
|
|
|
|
print(translate("中間ファイルの削除を開始します...")) |
|
|
deleted_files = [] |
|
|
try: |
|
|
|
|
|
import re |
|
|
interim_pattern = re.compile(f'{job_id}_combined_interim_\d+\.mp4') |
|
|
|
|
|
for filename in os.listdir(outputs_folder): |
|
|
if interim_pattern.match(filename): |
|
|
interim_path = os.path.join(outputs_folder, filename) |
|
|
try: |
|
|
os.remove(interim_path) |
|
|
deleted_files.append(filename) |
|
|
print(translate(" - 中間ファイルを削除しました: {0}").format(filename)) |
|
|
except Exception as e: |
|
|
print(translate(" - ファイル削除エラー ({0}): {1}").format(filename, e)) |
|
|
|
|
|
if deleted_files: |
|
|
print(translate("合計 {0} 個の中間ファイルを削除しました").format(len(deleted_files))) |
|
|
|
|
|
files_str = ', '.join(deleted_files) |
|
|
stream.output_queue.push(('progress', (None, translate("中間ファイルを削除しました: {0}").format(files_str), make_progress_bar_html(97, translate('クリーンアップ完了'))))) |
|
|
else: |
|
|
print(translate("削除対象の中間ファイルは見つかりませんでした")) |
|
|
except Exception as e: |
|
|
print(translate("中間ファイル削除中にエラーが発生しました: {0}").format(e)) |
|
|
traceback.print_exc() |
|
|
|
|
|
|
|
|
stream.output_queue.push(('file', combined_output_filename)) |
|
|
|
|
|
|
|
|
combined_frames = combined_history_pixels.shape[2] |
|
|
combined_size_mb = (combined_history_pixels.element_size() * combined_history_pixels.nelement()) / (1024 * 1024) |
|
|
print(translate("結合完了情報: テンソルデータ({0}フレーム) + 新規動画({1}フレーム) = 合計{2}フレーム").format(uploaded_frames, original_frames, combined_frames)) |
|
|
print(translate("結合動画の再生時間: {0:.2f}秒").format(combined_frames / 30)) |
|
|
print(translate("データサイズ: {0:.2f} MB(制限無し)").format(combined_size_mb)) |
|
|
|
|
|
|
|
|
stream.output_queue.push(('progress', (None, translate("テンソルデータ({0}フレーム)と動画({1}フレーム)の結合が完了しました。\n合計フレーム数: {2}フレーム ({3:.2f}秒) - サイズ制限なし").format(uploaded_frames, original_frames, combined_frames, combined_frames / 30), make_progress_bar_html(100, translate('結合完了'))))) |
|
|
else: |
|
|
print(translate("テンソルデータの結合に失敗しました。")) |
|
|
stream.output_queue.push(('progress', (None, translate("テンソルデータの結合に失敗しました。"), make_progress_bar_html(100, translate('エラー'))))) |
|
|
|
|
|
|
|
|
|
|
|
real_history_latents = combined_history_latents |
|
|
history_pixels = combined_history_pixels |
|
|
|
|
|
|
|
|
stream.output_queue.push(('file', combined_output_filename)) |
|
|
|
|
|
|
|
|
output_filename = combined_output_filename |
|
|
|
|
|
|
|
|
combined_frames = combined_history_pixels.shape[2] |
|
|
combined_size_mb = (combined_history_pixels.element_size() * combined_history_pixels.nelement()) / (1024 * 1024) |
|
|
print(translate("結合完了情報: テンソルデータ({0}フレーム) + 新規動画({1}フレーム) = 合計{2}フレーム").format(uploaded_frames, original_frames, combined_frames)) |
|
|
print(translate("結合動画の再生時間: {0:.2f}秒").format(combined_frames / 30)) |
|
|
print(translate("データサイズ: {0:.2f} MB(制限無し)").format(combined_size_mb)) |
|
|
|
|
|
|
|
|
stream.output_queue.push(('progress', (None, translate("テンソルデータ({0}フレーム)と動画({1}フレーム)の結合が完了しました。\n合計フレーム数: {2}フレーム ({3:.2f}秒)").format(uploaded_frames, original_frames, combined_frames, combined_frames / 30), make_progress_bar_html(100, translate('結合完了'))))) |
|
|
except Exception as e: |
|
|
print(translate("テンソルデータ結合中にエラーが発生しました: {0}").format(e)) |
|
|
traceback.print_exc() |
|
|
stream.output_queue.push(('progress', (None, translate("エラー: テンソルデータ結合に失敗しました - {0}").format(str(e)), make_progress_bar_html(100, translate('エラー'))))) |
|
|
|
|
|
|
|
|
|
|
|
should_play_alarm = False |
|
|
|
|
|
|
|
|
if isinstance(alarm_on_completion, bool): |
|
|
should_play_alarm = alarm_on_completion |
|
|
elif hasattr(alarm_on_completion, 'value') and isinstance(alarm_on_completion.value, bool): |
|
|
should_play_alarm = alarm_on_completion.value |
|
|
else: |
|
|
|
|
|
try: |
|
|
from eichi_utils.settings_manager import load_app_settings_f1 |
|
|
app_settings = load_app_settings_f1() |
|
|
if app_settings and "alarm_on_completion" in app_settings: |
|
|
should_play_alarm = app_settings["alarm_on_completion"] |
|
|
except: |
|
|
|
|
|
should_play_alarm = False |
|
|
|
|
|
if should_play_alarm: |
|
|
if HAS_WINSOUND: |
|
|
winsound.PlaySound("SystemExclamation", winsound.SND_ALIAS) |
|
|
else: |
|
|
print(translate("処理が完了しました")) |
|
|
|
|
|
|
|
|
if torch.cuda.is_available(): |
|
|
torch.cuda.synchronize() |
|
|
torch.cuda.empty_cache() |
|
|
import gc |
|
|
gc.collect() |
|
|
print(translate("処理完了後のメモリクリア: {memory:.2f}GB/{total_memory:.2f}GB").format(memory=torch.cuda.memory_allocated()/1024**3, total_memory=torch.cuda.get_device_properties(0).total_memory/1024**3)) |
|
|
|
|
|
|
|
|
if save_tensor_data: |
|
|
try: |
|
|
|
|
|
tensor_file_path = os.path.join(outputs_folder, f'{job_id}.safetensors') |
|
|
|
|
|
|
|
|
print(translate("=== テンソルデータ保存処理開始 ===")) |
|
|
print(translate("保存対象フレーム数: {frames}").format(frames=real_history_latents.shape[2])) |
|
|
|
|
|
|
|
|
tensor_to_save = real_history_latents.clone().cpu() |
|
|
|
|
|
|
|
|
tensor_size_mb = (tensor_to_save.element_size() * tensor_to_save.nelement()) / (1024 * 1024) |
|
|
|
|
|
print(translate("テンソルデータを保存中... shape: {shape}, フレーム数: {frames}, サイズ: {size:.2f} MB").format(shape=tensor_to_save.shape, frames=tensor_to_save.shape[2], size=tensor_size_mb)) |
|
|
stream.output_queue.push(('progress', (None, translate('テンソルデータを保存中... ({frames}フレーム)').format(frames=tensor_to_save.shape[2]), make_progress_bar_html(95, translate('テンソルデータの保存'))))) |
|
|
|
|
|
|
|
|
metadata = torch.tensor([height, width, tensor_to_save.shape[2]], dtype=torch.int32) |
|
|
|
|
|
|
|
|
tensor_dict = { |
|
|
"history_latents": tensor_to_save, |
|
|
"metadata": metadata |
|
|
} |
|
|
sf.save_file(tensor_dict, tensor_file_path) |
|
|
|
|
|
print(translate("テンソルデータを保存しました: {path}").format(path=tensor_file_path)) |
|
|
print(translate("保存済みテンソルデータ情報: {frames}フレーム, {size:.2f} MB").format(frames=tensor_to_save.shape[2], size=tensor_size_mb)) |
|
|
print(translate("=== テンソルデータ保存処理完了 ===")) |
|
|
stream.output_queue.push(('progress', (None, translate("テンソルデータが保存されました: {path} ({frames}フレーム, {size:.2f} MB)").format(path=os.path.basename(tensor_file_path), frames=tensor_to_save.shape[2], size=tensor_size_mb), make_progress_bar_html(100, translate('処理完了'))))) |
|
|
|
|
|
|
|
|
if tensor_data_input is not None and uploaded_tensor is not None: |
|
|
try: |
|
|
|
|
|
uploaded_tensor_filename = os.path.basename(tensor_data_input.name) |
|
|
tensor_combined_path = os.path.join(outputs_folder, f'{job_id}_combined_tensors.safetensors') |
|
|
|
|
|
print(translate("=== テンソルデータ結合処理開始 ===")) |
|
|
print(translate("生成テンソルと入力テンソルを結合して保存します")) |
|
|
print(translate("生成テンソル: {frames}フレーム").format(frames=tensor_to_save.shape[2])) |
|
|
print(translate("入力テンソル: {frames}フレーム").format(frames=uploaded_tensor.shape[2])) |
|
|
|
|
|
|
|
|
if uploaded_tensor.dtype != tensor_to_save.dtype: |
|
|
uploaded_tensor = uploaded_tensor.to(dtype=tensor_to_save.dtype) |
|
|
if uploaded_tensor.device != tensor_to_save.device: |
|
|
uploaded_tensor = uploaded_tensor.to(device=tensor_to_save.device) |
|
|
|
|
|
|
|
|
if uploaded_tensor.shape[3] != tensor_to_save.shape[3] or uploaded_tensor.shape[4] != tensor_to_save.shape[4]: |
|
|
print(translate("警告: テンソルサイズが一致しないため結合できません: {uploaded_shape} vs {tensor_shape}").format(uploaded_shape=uploaded_tensor.shape, tensor_shape=tensor_to_save.shape)) |
|
|
else: |
|
|
|
|
|
combined_tensor = torch.cat([tensor_to_save, uploaded_tensor], dim=2) |
|
|
combined_frames = combined_tensor.shape[2] |
|
|
combined_size_mb = (combined_tensor.element_size() * combined_tensor.nelement()) / (1024 * 1024) |
|
|
|
|
|
|
|
|
combined_metadata = torch.tensor([height, width, combined_frames], dtype=torch.int32) |
|
|
|
|
|
|
|
|
combined_tensor_dict = { |
|
|
"history_latents": combined_tensor, |
|
|
"metadata": combined_metadata |
|
|
} |
|
|
sf.save_file(combined_tensor_dict, tensor_combined_path) |
|
|
|
|
|
print(translate("結合テンソルを保存しました: {path}").format(path=tensor_combined_path)) |
|
|
print(translate("結合テンソル情報: 合計{0}フレーム ({1}+{2}), {3:.2f} MB").format(frames, tensor_to_save.shape[2], uploaded_tensor.shape[2], size)) |
|
|
print(translate("=== テンソルデータ結合処理完了 ===")) |
|
|
stream.output_queue.push(('progress', (None, translate("テンソルデータ結合が保存されました: 合計{frames}フレーム").format(frames=combined_frames), make_progress_bar_html(100, translate('結合テンソル保存完了'))))) |
|
|
except Exception as e: |
|
|
print(translate("テンソルデータ結合保存エラー: {0}").format(e)) |
|
|
traceback.print_exc() |
|
|
except Exception as e: |
|
|
print(translate("テンソルデータ保存エラー: {0}").format(e)) |
|
|
traceback.print_exc() |
|
|
stream.output_queue.push(('progress', (None, translate("テンソルデータの保存中にエラーが発生しました。"), make_progress_bar_html(100, translate('処理完了'))))) |
|
|
|
|
|
|
|
|
process_end_time = time.time() |
|
|
total_process_time = process_end_time - process_start_time |
|
|
hours, remainder = divmod(total_process_time, 3600) |
|
|
minutes, seconds = divmod(remainder, 60) |
|
|
time_str = "" |
|
|
if hours > 0: |
|
|
time_str = translate("{0}時間 {1}分 {2}秒").format(int(hours), int(minutes), f"{seconds:.1f}") |
|
|
elif minutes > 0: |
|
|
time_str = translate("{0}分 {1}秒").format(int(minutes), f"{seconds:.1f}") |
|
|
else: |
|
|
time_str = translate("{0:.1f}秒").format(seconds) |
|
|
print(translate("全体の処理時間: {0}").format(time_str)) |
|
|
|
|
|
|
|
|
if combined_output_filename is not None: |
|
|
|
|
|
combined_filename_only = os.path.basename(combined_output_filename) |
|
|
completion_message = translate("すべてのセクション({sections}/{total_sections})が完了しました。テンソルデータとの後方結合も完了しました。結合ファイル名: {filename}\n全体の処理時間: {time}").format(sections=sections, total_sections=total_sections, filename=combined_filename_only, time=time_str) |
|
|
|
|
|
output_filename = combined_output_filename |
|
|
else: |
|
|
|
|
|
completion_message = translate("すべてのセクション({sections}/{total_sections})が完了しました。全体の処理時間: {time}").format(sections=total_sections, total_sections=total_sections, time=time_str) |
|
|
|
|
|
stream.output_queue.push(('progress', (None, completion_message, make_progress_bar_html(100, translate('処理完了'))))) |
|
|
|
|
|
|
|
|
if not keep_section_videos: |
|
|
|
|
|
final_video_path = output_filename |
|
|
final_video_name = os.path.basename(final_video_path) |
|
|
|
|
|
job_id_part = job_id |
|
|
|
|
|
|
|
|
files = os.listdir(outputs_folder) |
|
|
deleted_count = 0 |
|
|
|
|
|
for file in files: |
|
|
|
|
|
|
|
|
if file.startswith(job_id_part) and file.endswith('.mp4') \ |
|
|
and file != final_video_name \ |
|
|
and 'combined' not in file: |
|
|
file_path = os.path.join(outputs_folder, file) |
|
|
try: |
|
|
os.remove(file_path) |
|
|
deleted_count += 1 |
|
|
print(translate("中間ファイル: {0}").format(file)) |
|
|
except Exception as e: |
|
|
print(translate("ファイル削除時のエラー {0}: {1}").format(file, e)) |
|
|
|
|
|
if deleted_count > 0: |
|
|
print(translate("{0}個の中間ファイルを削除しました。最終ファイルは保存されています: {1}").format(deleted_count, final_video_name)) |
|
|
final_message = translate("中間ファイルを削除しました。最終動画と結合動画は保存されています。") |
|
|
stream.output_queue.push(('progress', (None, final_message, make_progress_bar_html(100, translate('処理完了'))))) |
|
|
|
|
|
break |
|
|
except: |
|
|
traceback.print_exc() |
|
|
|
|
|
if not high_vram: |
|
|
unload_complete_models( |
|
|
text_encoder, text_encoder_2, image_encoder, vae, transformer |
|
|
) |
|
|
|
|
|
stream.output_queue.push(('end', None)) |
|
|
return |
|
|
|
|
|
|
|
|
def validate_images(input_image, section_settings, length_radio=None, frame_size_radio=None): |
|
|
"""入力画像または画面に表示されている最後のキーフレーム画像のいずれかが有効かを確認する""" |
|
|
|
|
|
if input_image is not None: |
|
|
return True, "" |
|
|
|
|
|
|
|
|
total_display_sections = None |
|
|
if length_radio is not None and frame_size_radio is not None: |
|
|
try: |
|
|
|
|
|
seconds = get_video_seconds(length_radio.value) |
|
|
|
|
|
|
|
|
latent_window_size = 4.5 if frame_size_radio.value == translate("0.5秒 (17フレーム)") else 9 |
|
|
frame_count = latent_window_size * 4 - 3 |
|
|
|
|
|
|
|
|
total_frames = int(seconds * 30) |
|
|
total_display_sections = int(max(round(total_frames / frame_count), 1)) |
|
|
except Exception as e: |
|
|
print(translate("セクション数計算エラー: {0}").format(e)) |
|
|
|
|
|
|
|
|
last_visible_section_image = None |
|
|
last_visible_section_num = -1 |
|
|
|
|
|
if section_settings is not None and not isinstance(section_settings, bool): |
|
|
|
|
|
valid_sections = [] |
|
|
try: |
|
|
for section in section_settings: |
|
|
if section and len(section) > 1 and section[0] is not None: |
|
|
try: |
|
|
section_num = int(section[0]) |
|
|
|
|
|
if total_display_sections is None or section_num < total_display_sections: |
|
|
valid_sections.append((section_num, section[1])) |
|
|
except (ValueError, TypeError): |
|
|
continue |
|
|
except (TypeError, ValueError): |
|
|
|
|
|
valid_sections = [] |
|
|
|
|
|
|
|
|
if valid_sections: |
|
|
|
|
|
valid_sections.sort(key=lambda x: x[0]) |
|
|
|
|
|
last_visible_section_num, last_visible_section_image = valid_sections[-1] |
|
|
|
|
|
|
|
|
if last_visible_section_image is not None: |
|
|
return True, "" |
|
|
|
|
|
|
|
|
error_html = f""" |
|
|
<div style="padding: 15px; border-radius: 10px; background-color: #ffebee; border: 1px solid #f44336; margin: 10px 0;"> |
|
|
<h3 style="color: #d32f2f; margin: 0 0 10px 0;">{translate('画像が選択されていません')}</h3> |
|
|
<p>{translate('生成を開始する前に「Image」欄または表示されている最後のキーフレーム画像に画像をアップロードしてください。これは叡智の始発点となる重要な画像です。')}</p> |
|
|
</div> |
|
|
""" |
|
|
error_bar = make_progress_bar_html(100, translate('画像がありません')) |
|
|
return False, error_html + error_bar |
|
|
|
|
|
def process(input_image, prompt, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, use_random_seed, mp4_crf=16, all_padding_value=1.0, image_strength=1.0, frame_size_setting="1秒 (33フレーム)", keep_section_videos=False, lora_files=None, lora_files2=None, lora_files3=None, lora_scales_text="0.8,0.8,0.8", output_dir=None, save_section_frames=False, use_all_padding=False, use_lora=False, lora_mode=None, lora_dropdown1=None, lora_dropdown2=None, lora_dropdown3=None, save_tensor_data=False, section_settings=None, tensor_data_input=None, fp8_optimization=False, resolution=640, batch_count=1, frame_save_mode=translate("保存しない"), use_queue=False, prompt_queue_file=None, save_settings_on_start=False, alarm_on_completion=False): |
|
|
|
|
|
|
|
|
if section_settings is not None and not isinstance(section_settings, list): |
|
|
print(translate("section_settingsがリスト型ではありません:{0}. 初期化します。").format(type(section_settings).__name__)) |
|
|
section_settings = [[None, None, ""] for _ in range(50)] |
|
|
|
|
|
global stream |
|
|
global batch_stopped |
|
|
global queue_enabled, queue_type, prompt_queue_file_path, image_queue_files |
|
|
|
|
|
|
|
|
batch_stopped = False |
|
|
|
|
|
|
|
|
|
|
|
if frame_size_setting == "0.5秒 (17フレーム)": |
|
|
|
|
|
latent_window_size = 4.5 |
|
|
print(translate('フレームサイズを0.5秒モードに設定: latent_window_size = {0}').format(latent_window_size)) |
|
|
else: |
|
|
|
|
|
latent_window_size = 9 |
|
|
print(translate('フレームサイズを1秒モードに設定: latent_window_size = {0}').format(latent_window_size)) |
|
|
|
|
|
|
|
|
batch_count = max(1, min(int(batch_count), 100)) |
|
|
print(translate("バッチ処理回数: {0}回").format(batch_count)) |
|
|
|
|
|
|
|
|
from diffusers_helper.bucket_tools import SAFE_RESOLUTIONS |
|
|
|
|
|
|
|
|
print(translate("UIから受け取った解像度値: {0}(型: {1})").format(resolution, type(resolution).__name__)) |
|
|
|
|
|
|
|
|
if resolution not in SAFE_RESOLUTIONS: |
|
|
closest_resolution = min(SAFE_RESOLUTIONS, key=lambda x: abs(x - resolution)) |
|
|
print(translate('安全な解像度値ではないため、{0}から{1}に自動調整しました').format(resolution, closest_resolution)) |
|
|
resolution = closest_resolution |
|
|
|
|
|
|
|
|
print(translate('解像度を設定: {0}').format(resolution)) |
|
|
|
|
|
|
|
|
|
|
|
if latent_window_size == 4.5: |
|
|
frame_count = 17 |
|
|
else: |
|
|
frame_count = int(latent_window_size * 4 - 3) |
|
|
total_latent_sections = int(max(round((total_second_length * 30) / frame_count), 1)) |
|
|
|
|
|
|
|
|
mode_name = translate("通常モード") |
|
|
|
|
|
print(translate("==== 動画生成開始 =====")) |
|
|
print(translate("生成モード: {0}").format(mode_name)) |
|
|
print(translate("動画長: {0}秒").format(total_second_length)) |
|
|
|
|
|
|
|
|
if save_settings_on_start: |
|
|
try: |
|
|
from eichi_utils.settings_manager import save_app_settings_f1 |
|
|
current_settings = { |
|
|
"resolution": resolution, |
|
|
"mp4_crf": mp4_crf, |
|
|
"steps": steps, |
|
|
"cfg": cfg, |
|
|
"use_teacache": use_teacache, |
|
|
"gpu_memory_preservation": gpu_memory_preservation, |
|
|
"gs": gs, |
|
|
"image_strength": image_strength, |
|
|
"keep_section_videos": keep_section_videos, |
|
|
"save_section_frames": save_section_frames, |
|
|
"save_tensor_data": save_tensor_data, |
|
|
"frame_save_mode": frame_save_mode, |
|
|
"save_settings_on_start": save_settings_on_start, |
|
|
"alarm_on_completion": alarm_on_completion |
|
|
} |
|
|
save_app_settings_f1(current_settings) |
|
|
print(translate("自動保存が完了しました")) |
|
|
except Exception as e: |
|
|
print(translate("自動保存中にエラーが発生しました: {0}").format(str(e))) |
|
|
print(translate("フレームサイズ: {0}").format(frame_size_setting)) |
|
|
print(translate("生成セクション数: {0}回").format(total_latent_sections)) |
|
|
print(translate("サンプリングステップ数: {0}").format(steps)) |
|
|
print(translate("TeaCache使用: {0}").format(use_teacache)) |
|
|
|
|
|
print(translate("使用SEED値: {0}").format(seed)) |
|
|
print(translate("LoRA使用: {0}").format(use_lora)) |
|
|
|
|
|
|
|
|
print(translate("FP8最適化: {0}").format(fp8_optimization)) |
|
|
|
|
|
|
|
|
print(translate("オールパディング: F1モードでは無効化されています")) |
|
|
|
|
|
|
|
|
if use_lora and has_lora_support: |
|
|
all_lora_files = [] |
|
|
lora_paths = [] |
|
|
|
|
|
|
|
|
if lora_mode == translate("ディレクトリから選択"): |
|
|
|
|
|
print(translate("ディレクトリから選択モードでLoRAを処理")) |
|
|
lora_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'lora') |
|
|
|
|
|
|
|
|
if isinstance(lora_dropdown2, int) and lora_dropdown2 == 0: |
|
|
|
|
|
|
|
|
|
|
|
dropdown_direct_value = translate("なし") |
|
|
|
|
|
|
|
|
if isinstance(lora_dropdown2, str) and lora_dropdown2 != "0" and lora_dropdown2 != translate("なし"): |
|
|
dropdown_direct_value = lora_dropdown2 |
|
|
|
|
|
|
|
|
for dropdown, label in zip([lora_dropdown1, lora_dropdown2, lora_dropdown3], ["LoRA1", "LoRA2", "LoRA3"]): |
|
|
if dropdown is not None and dropdown != translate("なし") and dropdown != 0: |
|
|
|
|
|
file_path = os.path.join(lora_dir, dropdown) |
|
|
if os.path.exists(file_path): |
|
|
lora_paths.append(file_path) |
|
|
print(translate("{0}選択: {1}").format(label, dropdown)) |
|
|
else: |
|
|
print(translate("選択された{0}ファイルが見つかりません: {1}").format(label, file_path)) |
|
|
else: |
|
|
|
|
|
print(translate("ファイルアップロードモードでLoRAを処理")) |
|
|
|
|
|
|
|
|
if lora_files is not None: |
|
|
if isinstance(lora_files, list): |
|
|
all_lora_files.extend(lora_files) |
|
|
else: |
|
|
all_lora_files.append(lora_files) |
|
|
|
|
|
|
|
|
if lora_files2 is not None: |
|
|
if isinstance(lora_files2, list): |
|
|
all_lora_files.extend(lora_files2) |
|
|
else: |
|
|
all_lora_files.append(lora_files2) |
|
|
|
|
|
|
|
|
if lora_files3 is not None: |
|
|
if isinstance(lora_files3, list): |
|
|
all_lora_files.extend(lora_files3) |
|
|
else: |
|
|
all_lora_files.append(lora_files3) |
|
|
|
|
|
|
|
|
for lora_file in all_lora_files: |
|
|
if hasattr(lora_file, 'name'): |
|
|
lora_paths.append(lora_file.name) |
|
|
|
|
|
|
|
|
try: |
|
|
scales = [float(s.strip()) for s in lora_scales_text.split(',')] |
|
|
except: |
|
|
|
|
|
scales = [0.8] * len(lora_paths) |
|
|
|
|
|
|
|
|
if len(scales) < len(lora_paths): |
|
|
scales.extend([0.8] * (len(lora_paths) - len(scales))) |
|
|
elif len(scales) > len(lora_paths): |
|
|
scales = scales[:len(lora_paths)] |
|
|
|
|
|
|
|
|
if len(lora_paths) == 1: |
|
|
|
|
|
print(translate("LoRAファイル: {0}").format(os.path.basename(lora_paths[0]))) |
|
|
print(translate("LoRA適用強度: {0}").format(scales[0])) |
|
|
elif len(lora_paths) > 1: |
|
|
|
|
|
print(translate("LoRAファイル (複数):")) |
|
|
for i, path in enumerate(lora_paths): |
|
|
print(translate(" - {0} (スケール: {1})").format(os.path.basename(path), scales[i] if i < len(scales) else 0.8)) |
|
|
else: |
|
|
|
|
|
print(translate("LoRA: 使用しない")) |
|
|
|
|
|
|
|
|
valid_sections = [] |
|
|
if section_settings is not None: |
|
|
|
|
|
if not isinstance(section_settings, list): |
|
|
print(translate("section_settingsがリスト型ではありません。空のリストとして扱います。")) |
|
|
section_settings = [] |
|
|
|
|
|
for i, sec_data in enumerate(section_settings): |
|
|
if sec_data and isinstance(sec_data, list) and len(sec_data) > 1 and sec_data[1] is not None: |
|
|
valid_sections.append(sec_data[0]) |
|
|
|
|
|
if valid_sections: |
|
|
print(translate("使用するキーフレーム画像: セクション{0}").format(', '.join(map(str, valid_sections)))) |
|
|
else: |
|
|
print(translate("キーフレーム画像: デフォルト設定のみ使用")) |
|
|
|
|
|
print("=============================") |
|
|
|
|
|
|
|
|
batch_stopped = False |
|
|
|
|
|
|
|
|
original_seed = seed |
|
|
|
|
|
|
|
|
use_random = False |
|
|
if isinstance(use_random_seed, bool): |
|
|
use_random = use_random_seed |
|
|
elif isinstance(use_random_seed, str): |
|
|
use_random = use_random_seed.lower() in ["true", "yes", "1", "on"] |
|
|
|
|
|
if use_random: |
|
|
|
|
|
previous_seed = seed |
|
|
|
|
|
seed = random.randint(0, 2**32 - 1) |
|
|
|
|
|
print(translate("ランダムシード機能が有効なため、指定されたSEED値 {0} の代わりに新しいSEED値 {1} を使用します。").format(previous_seed, seed)) |
|
|
|
|
|
yield gr.skip(), None, '', '', gr.update(interactive=False), gr.update(interactive=True), gr.update(value=seed) |
|
|
|
|
|
original_seed = seed |
|
|
else: |
|
|
print(translate("指定されたSEED値 {0} を使用します。").format(seed)) |
|
|
yield gr.skip(), None, '', '', gr.update(interactive=False), gr.update(interactive=True), gr.update() |
|
|
|
|
|
stream = AsyncStream() |
|
|
|
|
|
|
|
|
if batch_stopped: |
|
|
print(translate("バッチ処理が中断されました(バッチ開始前)")) |
|
|
yield ( |
|
|
gr.skip(), |
|
|
gr.update(visible=False), |
|
|
translate("バッチ処理が中断されました"), |
|
|
'', |
|
|
gr.update(interactive=True), |
|
|
gr.update(interactive=False, value=translate("End Generation")), |
|
|
gr.update() |
|
|
) |
|
|
return |
|
|
|
|
|
|
|
|
if queue_enabled: |
|
|
if queue_type == "image": |
|
|
print(translate("バッチ処理情報: 合計{0}回").format(batch_count)) |
|
|
print(translate("イメージキュー: 有効, 入力画像1枚 + 画像ファイル{0}枚").format(len(image_queue_files))) |
|
|
print(translate("処理順序: 1回目=入力画像, 2回目以降=入力フォルダの画像ファイル")) |
|
|
|
|
|
for i in range(batch_count): |
|
|
if i == 0: |
|
|
img_src = "入力画像" |
|
|
else: |
|
|
img_idx = i - 1 |
|
|
if img_idx < len(image_queue_files): |
|
|
img_src = os.path.basename(image_queue_files[img_idx]) |
|
|
else: |
|
|
img_src = "入力画像(キュー画像不足)" |
|
|
print(translate(" └ バッチ{0}: {1}").format(i+1, img_src)) |
|
|
else: |
|
|
queue_lines_count = 0 |
|
|
if prompt_queue_file_path and os.path.exists(prompt_queue_file_path): |
|
|
try: |
|
|
with open(prompt_queue_file_path, 'r', encoding='utf-8') as f: |
|
|
queue_lines = [line.strip() for line in f.readlines() if line.strip()] |
|
|
queue_lines_count = len(queue_lines) |
|
|
|
|
|
for i in range(min(batch_count, queue_lines_count)): |
|
|
prompt_preview = queue_lines[i][:50] + "..." if len(queue_lines[i]) > 50 else queue_lines[i] |
|
|
print(translate(" └ バッチ{0}: {1}").format(i+1, prompt_preview)) |
|
|
except: |
|
|
pass |
|
|
print(translate("バッチ処理情報: 合計{0}回").format(batch_count)) |
|
|
print(translate("プロンプトキュー: 有効, プロンプト行数={0}行").format(queue_lines_count)) |
|
|
else: |
|
|
print(translate("バッチ処理情報: 合計{0}回").format(batch_count)) |
|
|
print(translate("キュー機能: 無効")) |
|
|
for batch_index in range(batch_count): |
|
|
|
|
|
if batch_stopped: |
|
|
print(translate("バッチ処理がユーザーによって中止されました")) |
|
|
yield ( |
|
|
gr.skip(), |
|
|
gr.update(visible=False), |
|
|
translate("バッチ処理が中止されました。"), |
|
|
'', |
|
|
gr.update(interactive=True), |
|
|
gr.update(interactive=False, value=translate("End Generation")), |
|
|
gr.update() |
|
|
) |
|
|
break |
|
|
|
|
|
|
|
|
if batch_count > 1: |
|
|
batch_info = translate("バッチ処理: {0}/{1}").format(batch_index + 1, batch_count) |
|
|
print(f"{batch_info}") |
|
|
|
|
|
yield gr.skip(), gr.update(visible=False), batch_info, "", gr.update(interactive=False), gr.update(interactive=True), gr.update() |
|
|
|
|
|
|
|
|
current_prompt = prompt |
|
|
current_image = input_image |
|
|
|
|
|
|
|
|
using_custom_prompt = False |
|
|
if queue_enabled and queue_type == "image" and batch_index > 0: |
|
|
if batch_index - 1 < len(image_queue_files): |
|
|
queue_img_path = image_queue_files[batch_index - 1] |
|
|
img_basename = os.path.splitext(queue_img_path)[0] |
|
|
txt_path = f"{img_basename}.txt" |
|
|
if os.path.exists(txt_path): |
|
|
img_name = os.path.basename(queue_img_path) |
|
|
using_custom_prompt = True |
|
|
print(translate("セクション{0}はイメージキュー画像「{1}」の専用プロンプトを使用します").format("全て", img_name)) |
|
|
|
|
|
|
|
|
if queue_enabled: |
|
|
if queue_type == "prompt" and prompt_queue_file_path is not None: |
|
|
|
|
|
if os.path.exists(prompt_queue_file_path): |
|
|
try: |
|
|
with open(prompt_queue_file_path, 'r', encoding='utf-8') as f: |
|
|
lines = [line.strip() for line in f.readlines() if line.strip()] |
|
|
if batch_index < len(lines): |
|
|
|
|
|
current_prompt = lines[batch_index] |
|
|
print(translate("プロンプトキュー実行中: バッチ {0}/{1}").format(batch_index+1, batch_count)) |
|
|
print(translate(" └ プロンプト: 「{0}...」").format(current_prompt[:50])) |
|
|
else: |
|
|
print(translate("プロンプトキュー実行中: バッチ {0}/{1} はプロンプト行数を超えているため元のプロンプトを使用").format(batch_index+1, batch_count)) |
|
|
except Exception as e: |
|
|
print(translate("プロンプトキューファイル読み込みエラー: {0}").format(str(e))) |
|
|
|
|
|
elif queue_type == "image" and len(image_queue_files) > 0: |
|
|
|
|
|
|
|
|
if batch_index == 0: |
|
|
print(translate("イメージキュー実行中: バッチ {0}/{1} は入力画像を使用").format(batch_index+1, batch_count)) |
|
|
elif batch_index > 0: |
|
|
|
|
|
image_index = batch_index - 1 |
|
|
|
|
|
if image_index < len(image_queue_files): |
|
|
current_image = image_queue_files[image_index] |
|
|
image_filename = os.path.basename(current_image) |
|
|
print(translate("イメージキュー実行中: バッチ {0}/{1} の画像「{2}」").format(batch_index+1, batch_count, image_filename)) |
|
|
print(translate(" └ 画像ファイルパス: {0}").format(current_image)) |
|
|
|
|
|
|
|
|
img_basename = os.path.splitext(current_image)[0] |
|
|
txt_path = f"{img_basename}.txt" |
|
|
if os.path.exists(txt_path): |
|
|
try: |
|
|
with open(txt_path, 'r', encoding='utf-8') as f: |
|
|
custom_prompt = f.read().strip() |
|
|
if custom_prompt: |
|
|
print(translate("イメージキュー: 画像「{0}」用のテキストファイルを読み込みました").format(image_filename)) |
|
|
print(translate("カスタムプロンプト: {0}").format(custom_prompt[:50] + "..." if len(custom_prompt) > 50 else custom_prompt)) |
|
|
|
|
|
current_prompt = custom_prompt |
|
|
except Exception as e: |
|
|
print(translate("イメージキュー: テキストファイル読み込みエラー: {0}").format(e)) |
|
|
else: |
|
|
|
|
|
print(translate("イメージキュー実行中: バッチ {0}/{1} は画像数を超えているため入力画像を使用").format(batch_index+1, batch_count)) |
|
|
|
|
|
|
|
|
current_seed = original_seed + batch_index |
|
|
if batch_count > 1: |
|
|
print(translate("初期SEED値: {0}").format(current_seed)) |
|
|
|
|
|
seed = current_seed |
|
|
|
|
|
|
|
|
if batch_stopped: |
|
|
print(translate("バッチ処理が中断されました。worker関数の実行をキャンセルします。")) |
|
|
|
|
|
yield (gr.skip(), |
|
|
gr.update(visible=False), |
|
|
translate("バッチ処理が中断されました({0}/{1})").format(batch_index, batch_count), |
|
|
'', |
|
|
gr.update(interactive=True), |
|
|
gr.update(interactive=False, value=translate("End Generation")), |
|
|
gr.update()) |
|
|
break |
|
|
|
|
|
|
|
|
gpu_memory_value = float(gpu_memory_preservation) if gpu_memory_preservation is not None else 6.0 |
|
|
print(translate('Using GPU memory preservation setting: {0} GB').format(gpu_memory_value)) |
|
|
|
|
|
|
|
|
if not output_dir or not output_dir.strip(): |
|
|
output_dir = "outputs" |
|
|
print(translate('Output directory: {0}').format(output_dir)) |
|
|
|
|
|
|
|
|
if hasattr(frame_save_mode, 'value'): |
|
|
frame_save_mode_actual = frame_save_mode.value |
|
|
else: |
|
|
frame_save_mode_actual = frame_save_mode |
|
|
|
|
|
print(translate("現在のバッチ: {0}/{1}, 画像: {2}").format( |
|
|
batch_index + 1, |
|
|
batch_count, |
|
|
os.path.basename(current_image) if isinstance(current_image, str) else "入力画像" |
|
|
)) |
|
|
|
|
|
|
|
|
async_run( |
|
|
worker, |
|
|
current_image, |
|
|
current_prompt, |
|
|
n_prompt, |
|
|
seed, |
|
|
total_second_length, |
|
|
latent_window_size, |
|
|
steps, |
|
|
cfg, |
|
|
gs, |
|
|
rs, |
|
|
gpu_memory_value, |
|
|
use_teacache, |
|
|
mp4_crf, |
|
|
all_padding_value, |
|
|
image_strength, |
|
|
keep_section_videos, |
|
|
lora_files, |
|
|
lora_files2, |
|
|
lora_files3, |
|
|
lora_scales_text, |
|
|
output_dir, |
|
|
save_section_frames, |
|
|
use_all_padding, |
|
|
use_lora, |
|
|
lora_mode, |
|
|
lora_dropdown1, |
|
|
lora_dropdown2, |
|
|
lora_dropdown3, |
|
|
save_tensor_data, |
|
|
tensor_data_input, |
|
|
fp8_optimization, |
|
|
resolution, |
|
|
batch_index, |
|
|
frame_save_mode_actual |
|
|
) |
|
|
|
|
|
|
|
|
batch_output_filename = None |
|
|
|
|
|
|
|
|
while True: |
|
|
flag, data = stream.output_queue.next() |
|
|
|
|
|
if flag == 'file': |
|
|
batch_output_filename = data |
|
|
|
|
|
yield ( |
|
|
batch_output_filename if batch_output_filename is not None else gr.skip(), |
|
|
gr.update(value=None, visible=False), |
|
|
gr.update(), |
|
|
gr.update(), |
|
|
gr.update(interactive=False), |
|
|
gr.update(interactive=True), |
|
|
gr.update(), |
|
|
) |
|
|
|
|
|
if flag == 'progress': |
|
|
preview, desc, html = data |
|
|
|
|
|
if batch_count > 1: |
|
|
batch_info = translate("バッチ処理: {0}/{1} - ").format(batch_index + 1, batch_count) |
|
|
desc = batch_info + desc |
|
|
|
|
|
yield gr.skip(), gr.update(visible=True, value=preview), desc, html, gr.update(interactive=False), gr.update(interactive=True), gr.update() |
|
|
|
|
|
if flag == 'end': |
|
|
|
|
|
if batch_index == batch_count - 1 or batch_stopped: |
|
|
|
|
|
completion_message = "" |
|
|
if batch_stopped: |
|
|
completion_message = translate("バッチ処理が中止されました({0}/{1})").format(batch_index + 1, batch_count) |
|
|
else: |
|
|
completion_message = translate("バッチ処理が完了しました({0}/{1})").format(batch_count, batch_count) |
|
|
yield ( |
|
|
batch_output_filename if batch_output_filename is not None else gr.skip(), |
|
|
gr.update(value=None, visible=False), |
|
|
completion_message, |
|
|
'', |
|
|
gr.update(interactive=True), |
|
|
gr.update(interactive=False, value=translate("End Generation")), |
|
|
gr.update() |
|
|
) |
|
|
|
|
|
print(translate("バッチシーケンス完了: 全 {0} バッチの処理を終了").format(batch_count)) |
|
|
else: |
|
|
|
|
|
next_batch_message = translate("バッチ処理: {0}/{1} 完了、次のバッチに進みます...").format(batch_index + 1, batch_count) |
|
|
print(translate("バッチ {0}/{1} 完了 - 次のバッチに進みます").format(batch_index + 1, batch_count)) |
|
|
yield ( |
|
|
batch_output_filename if batch_output_filename is not None else gr.skip(), |
|
|
gr.update(value=None, visible=False), |
|
|
next_batch_message, |
|
|
'', |
|
|
gr.update(interactive=False), |
|
|
gr.update(interactive=True), |
|
|
gr.update() |
|
|
) |
|
|
|
|
|
continue_next_batch = True |
|
|
break |
|
|
|
|
|
|
|
|
output_filename = batch_output_filename |
|
|
|
|
|
|
|
|
if batch_stopped: |
|
|
print(translate("バッチ処理ループを中断します")) |
|
|
break |
|
|
|
|
|
|
|
|
def end_process(): |
|
|
global stream |
|
|
global batch_stopped |
|
|
|
|
|
|
|
|
batch_stopped = True |
|
|
print(translate("停止ボタンが押されました。バッチ処理を停止します...")) |
|
|
|
|
|
stream.input_queue.push('end') |
|
|
|
|
|
|
|
|
return gr.update(value=translate("停止処理中...")) |
|
|
|
|
|
|
|
|
quick_prompts = [ |
|
|
'A character doing some simple body movements.', |
|
|
'A character uses expressive hand gestures and body language.', |
|
|
'A character walks leisurely with relaxed movements.', |
|
|
'A character performs dynamic movements with energy and flowing motion.', |
|
|
'A character moves in unexpected ways, with surprising transitions poses.', |
|
|
] |
|
|
quick_prompts = [[x] for x in quick_prompts] |
|
|
|
|
|
css = get_app_css() |
|
|
block = gr.Blocks(css=css).queue() |
|
|
with block: |
|
|
gr.HTML('<h1>FramePack<span class="title-suffix">-<s>eichi</s> F1</span></h1>') |
|
|
|
|
|
|
|
|
with gr.Row(): |
|
|
with gr.Column(scale=1): |
|
|
|
|
|
mode_radio = gr.Radio(choices=[MODE_TYPE_NORMAL], value=MODE_TYPE_NORMAL, label=translate("生成モード"), info=translate("F1モードでは通常のみ利用可能")) |
|
|
with gr.Column(scale=1): |
|
|
|
|
|
frame_size_radio = gr.Radio( |
|
|
choices=[translate("1秒 (33フレーム)"), translate("0.5秒 (17フレーム)")], |
|
|
value=translate("1秒 (33フレーム)"), |
|
|
label=translate("セクションフレームサイズ"), |
|
|
info=translate("1秒 = 高品質・通常速度 / 0.5秒 = よりなめらかな動き(実験的機能)") |
|
|
) |
|
|
with gr.Column(scale=1): |
|
|
|
|
|
use_all_padding = gr.Checkbox( |
|
|
label=translate("オールパディング"), |
|
|
value=False, |
|
|
info=translate("F1モードでは使用できません。無印モードでのみ有効です。"), |
|
|
elem_id="all_padding_checkbox", |
|
|
interactive=False |
|
|
) |
|
|
all_padding_value = gr.Slider( |
|
|
label=translate("パディング値"), |
|
|
minimum=0.2, |
|
|
maximum=3, |
|
|
value=1, |
|
|
step=0.1, |
|
|
info=translate("F1モードでは使用できません"), |
|
|
visible=False, |
|
|
interactive=False |
|
|
) |
|
|
|
|
|
|
|
|
def toggle_all_padding_visibility(use_all_padding): |
|
|
return gr.update(visible=use_all_padding) |
|
|
|
|
|
use_all_padding.change( |
|
|
fn=toggle_all_padding_visibility, |
|
|
inputs=[use_all_padding], |
|
|
outputs=[all_padding_value] |
|
|
) |
|
|
with gr.Column(scale=1): |
|
|
|
|
|
length_radio = gr.Radio(choices=get_video_modes(), value=translate("1秒"), label=translate("動画長"), info=translate("動画の長さを設定。F1モードでは右下の「動画の総長(秒)」で20秒より長い動画長を設定可能です")) |
|
|
|
|
|
with gr.Row(): |
|
|
with gr.Column(): |
|
|
input_image = gr.Image(sources=['upload', 'clipboard'], type="filepath", label="Image", height=320) |
|
|
|
|
|
|
|
|
with gr.Group(): |
|
|
gr.Markdown(f"### " + translate("テンソルデータ設定")) |
|
|
|
|
|
|
|
|
use_tensor_data = gr.Checkbox(label=translate("テンソルデータを使用する"), value=False, info=translate("チェックをオンにするとテンソルデータをアップロードできます")) |
|
|
|
|
|
|
|
|
with gr.Group(visible=False) as tensor_data_group: |
|
|
tensor_data_input = gr.File( |
|
|
label=translate("テンソルデータアップロード (.safetensors) - 生成動画の後方(末尾)に結合されます"), |
|
|
file_types=[".safetensors"] |
|
|
) |
|
|
|
|
|
gr.Markdown(translate("※ テンソルデータをアップロードすると通常の動画生成後に、その動画の後方(末尾)に結合されます。\n結合した動画は「元のファイル名_combined.mp4」として保存されます。\n※ テンソルデータの保存機能を有効にすると、生成とアップロードのテンソルを結合したデータも保存されます。\n※ テンソルデータの結合は別ツール `python eichi_utils/tensor_combiner.py --ui` でもできます。")) |
|
|
|
|
|
|
|
|
def toggle_tensor_data_settings(use_tensor): |
|
|
return gr.update(visible=use_tensor) |
|
|
|
|
|
|
|
|
use_tensor_data.change( |
|
|
fn=toggle_tensor_data_settings, |
|
|
inputs=[use_tensor_data], |
|
|
outputs=[tensor_data_group] |
|
|
) |
|
|
|
|
|
|
|
|
def toggle_queue_settings(use_queue_val): |
|
|
|
|
|
global queue_enabled, queue_type |
|
|
|
|
|
|
|
|
is_enabled = False |
|
|
|
|
|
|
|
|
if hasattr(use_queue_val, 'value'): |
|
|
is_enabled = bool(use_queue_val.value) |
|
|
|
|
|
elif isinstance(use_queue_val, bool): |
|
|
is_enabled = use_queue_val |
|
|
|
|
|
elif isinstance(use_queue_val, str) and use_queue_val.lower() in ('true', 'false', 't', 'f', 'yes', 'no', 'y', 'n', '1', '0'): |
|
|
is_enabled = use_queue_val.lower() in ('true', 't', 'yes', 'y', '1') |
|
|
|
|
|
|
|
|
queue_enabled = is_enabled |
|
|
|
|
|
print(translate("トグル関数: チェックボックスの型={0}, 値={1}").format(type(use_queue_val).__name__, use_queue_val)) |
|
|
print(translate("キュー設定の表示状態を変更: {0} (グローバル変数に保存: queue_enabled={1})").format(is_enabled, queue_enabled)) |
|
|
|
|
|
|
|
|
if is_enabled: |
|
|
if queue_type == "prompt": |
|
|
return [gr.update(visible=True), gr.update(visible=True), gr.update(visible=False)] |
|
|
else: |
|
|
return [gr.update(visible=True), gr.update(visible=False), gr.update(visible=True)] |
|
|
else: |
|
|
|
|
|
return [gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)] |
|
|
|
|
|
|
|
|
def toggle_queue_type(queue_type_val): |
|
|
global queue_type |
|
|
|
|
|
|
|
|
if queue_type_val == translate("プロンプトキュー"): |
|
|
queue_type = "prompt" |
|
|
return [gr.update(visible=True), gr.update(visible=False)] |
|
|
else: |
|
|
queue_type = "image" |
|
|
|
|
|
get_image_queue_files() |
|
|
return [gr.update(visible=False), gr.update(visible=True)] |
|
|
|
|
|
|
|
|
def handle_file_upload(file_obj): |
|
|
global prompt_queue_file_path |
|
|
|
|
|
if file_obj is not None: |
|
|
print(translate("ファイルアップロード検出: 型={0}").format(type(file_obj).__name__)) |
|
|
|
|
|
if hasattr(file_obj, 'name'): |
|
|
prompt_queue_file_path = file_obj.name |
|
|
print(translate("アップロードファイルパス保存: {0}").format(prompt_queue_file_path)) |
|
|
else: |
|
|
prompt_queue_file_path = file_obj |
|
|
print(translate("アップロードファイルデータ保存: {0}").format(file_obj)) |
|
|
else: |
|
|
prompt_queue_file_path = None |
|
|
print("ファイルアップロード解除") |
|
|
|
|
|
return file_obj |
|
|
|
|
|
|
|
|
def handle_input_folder_change(folder_name): |
|
|
"""入力フォルダ名が変更されたときの処理(グローバル変数に保存するだけ)""" |
|
|
global input_folder_name_value |
|
|
|
|
|
|
|
|
folder_name = folder_name.strip() |
|
|
|
|
|
|
|
|
if not folder_name: |
|
|
folder_name = "inputs" |
|
|
|
|
|
|
|
|
folder_name = ''.join(c for c in folder_name if c.isalnum() or c in ('_', '-')) |
|
|
|
|
|
|
|
|
input_folder_name_value = folder_name |
|
|
print(translate("入力フォルダ名をメモリに保存: {0}(保存及び入力フォルダを開くボタンを押すと保存されます)").format(folder_name)) |
|
|
|
|
|
|
|
|
return gr.update(value=folder_name) |
|
|
|
|
|
|
|
|
def open_input_folder(): |
|
|
"""入力フォルダを開く処理(保存も実行)""" |
|
|
global input_folder_name_value |
|
|
|
|
|
|
|
|
settings = load_settings() |
|
|
settings['input_folder'] = input_folder_name_value |
|
|
save_settings(settings) |
|
|
print(translate("入力フォルダ設定を保存しました: {0}").format(input_folder_name_value)) |
|
|
|
|
|
|
|
|
input_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), input_folder_name_value) |
|
|
|
|
|
|
|
|
if not os.path.exists(input_dir): |
|
|
os.makedirs(input_dir, exist_ok=True) |
|
|
print(translate("入力ディレクトリを作成しました: {0}").format(input_dir)) |
|
|
|
|
|
|
|
|
get_image_queue_files() |
|
|
|
|
|
|
|
|
try: |
|
|
if os.name == 'nt': |
|
|
os.startfile(input_dir) |
|
|
elif os.name == 'posix': |
|
|
if sys.platform == 'darwin': |
|
|
subprocess.Popen(['open', input_dir]) |
|
|
else: |
|
|
subprocess.Popen(['xdg-open', input_dir]) |
|
|
print(translate("入力フォルダを開きました: {0}").format(input_dir)) |
|
|
return translate("設定を保存し、入力フォルダを開きました") |
|
|
except Exception as e: |
|
|
error_msg = translate("フォルダを開けませんでした: {0}").format(str(e)) |
|
|
print(error_msg) |
|
|
return error_msg |
|
|
|
|
|
|
|
|
with gr.Group(): |
|
|
with gr.Row(): |
|
|
with gr.Column(scale=2): |
|
|
resolution = gr.Dropdown( |
|
|
label=translate("解像度"), |
|
|
choices=[512, 640, 768, 960, 1080], |
|
|
value=saved_app_settings.get("resolution", 640) if saved_app_settings else 640, |
|
|
info=translate("出力動画の基準解像度。640推奨。960/1080は高負荷・高メモリ消費"), |
|
|
elem_classes="saveable-setting" |
|
|
) |
|
|
with gr.Column(scale=1): |
|
|
batch_count = gr.Slider( |
|
|
label=translate("バッチ処理回数"), |
|
|
minimum=1, |
|
|
maximum=100, |
|
|
value=1, |
|
|
step=1, |
|
|
info=translate("同じ設定で連続生成する回数。SEEDは各回で+1されます") |
|
|
) |
|
|
|
|
|
|
|
|
use_queue = gr.Checkbox( |
|
|
label=translate("キューを使用"), |
|
|
value=False, |
|
|
info=translate("チェックをオンにするとプロンプトまたは画像の連続処理ができます。") |
|
|
) |
|
|
|
|
|
|
|
|
queue_type_selector = gr.Radio( |
|
|
choices=[translate("プロンプトキュー"), translate("イメージキュー")], |
|
|
value=translate("プロンプトキュー"), |
|
|
label=translate("キュータイプ"), |
|
|
visible=False, |
|
|
interactive=True |
|
|
) |
|
|
|
|
|
|
|
|
with gr.Group(visible=False) as prompt_queue_group: |
|
|
prompt_queue_file = gr.File( |
|
|
label=translate("プロンプトキューファイル (.txt) - 1行に1つのプロンプトが記載されたテキストファイル"), |
|
|
file_types=[".txt"] |
|
|
) |
|
|
gr.Markdown(translate("※ ファイル内の各行が別々のプロンプトとして処理されます。\n※ チェックボックスがオフの場合は無効。\n※ バッチ処理回数より行数が多い場合は行数分処理されます。\n※ バッチ処理回数が1でもキュー回数が優先されます。")) |
|
|
|
|
|
|
|
|
with gr.Group(visible=False) as image_queue_group: |
|
|
gr.Markdown(translate("※ 1回目はImage画像を使用し、2回目以降は入力フォルダの画像ファイルを名前順に使用します。\n※ 画像と同名のテキストファイル(例:image1.jpg → image1.txt)があれば、その内容を自動的にプロンプトとして使用します。\n※ バッチ回数が全画像数を超える場合、残りはImage画像で処理されます。\n※ バッチ処理回数が1でもキュー回数が優先されます。")) |
|
|
|
|
|
|
|
|
with gr.Row(): |
|
|
input_folder_name = gr.Textbox( |
|
|
label=translate("入力フォルダ名"), |
|
|
value=input_folder_name_value, |
|
|
info=translate("画像ファイルを格納するフォルダ名") |
|
|
) |
|
|
open_input_folder_btn = gr.Button(value="📂 " + translate("保存及び入力フォルダを開く"), size="md") |
|
|
|
|
|
|
|
|
use_queue.change( |
|
|
fn=toggle_queue_settings, |
|
|
inputs=[use_queue], |
|
|
outputs=[queue_type_selector, prompt_queue_group, image_queue_group] |
|
|
) |
|
|
|
|
|
|
|
|
queue_type_selector.change( |
|
|
fn=toggle_queue_type, |
|
|
inputs=[queue_type_selector], |
|
|
outputs=[prompt_queue_group, image_queue_group] |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
prompt_queue_file.change( |
|
|
fn=handle_file_upload, |
|
|
inputs=[prompt_queue_file], |
|
|
outputs=[prompt_queue_file] |
|
|
) |
|
|
|
|
|
|
|
|
input_folder_name.change( |
|
|
fn=handle_input_folder_change, |
|
|
inputs=[input_folder_name], |
|
|
outputs=[input_folder_name] |
|
|
) |
|
|
|
|
|
|
|
|
open_input_folder_btn.click( |
|
|
fn=open_input_folder, |
|
|
inputs=[], |
|
|
outputs=[gr.Textbox(visible=False)] |
|
|
) |
|
|
|
|
|
|
|
|
with gr.Row(): |
|
|
start_button = gr.Button(value=translate("Start Generation")) |
|
|
end_button = gr.Button(value=translate("End Generation"), interactive=False) |
|
|
|
|
|
|
|
|
with gr.Row(): |
|
|
fp8_optimization = gr.Checkbox( |
|
|
label=translate("FP8 最適化"), |
|
|
value=True, |
|
|
info=translate("メモリ使用量を削減し速度を改善(PyTorch 2.1以上が必要)") |
|
|
) |
|
|
|
|
|
|
|
|
section_number_inputs = [] |
|
|
section_image_inputs = [] |
|
|
section_prompt_inputs = [] |
|
|
section_row_groups = [] |
|
|
|
|
|
|
|
|
max_keyframes = get_max_keyframes_count() |
|
|
|
|
|
|
|
|
def get_current_sections_count(): |
|
|
mode_value = length_radio.value |
|
|
if mode_value in VIDEO_MODE_SETTINGS: |
|
|
|
|
|
return VIDEO_MODE_SETTINGS[mode_value]["sections"] |
|
|
return max_keyframes |
|
|
|
|
|
|
|
|
initial_sections_count = get_current_sections_count() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
global copy_metadata |
|
|
copy_metadata = gr.Checkbox( |
|
|
label=translate("埋め込みプロンプトおよびシードを複写する"), |
|
|
value=False, |
|
|
info=translate("チェックをオンにすると、画像のメタデータからプロンプトとシードを自動的に取得します"), |
|
|
visible=False |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
section_image_inputs = [] |
|
|
section_number_inputs = [] |
|
|
section_prompt_inputs = [] |
|
|
section_row_groups = [] |
|
|
|
|
|
|
|
|
|
|
|
def update_from_image_metadata(image_path, copy_enabled=False): |
|
|
"""Imageアップロード時にメタデータを抽出してUIに反映する |
|
|
F1モードではキーフレームコピー機能を削除済みのため、単純化 |
|
|
""" |
|
|
|
|
|
if not copy_enabled: |
|
|
return [gr.update()] * 2 |
|
|
|
|
|
if image_path is None: |
|
|
return [gr.update()] * 2 |
|
|
|
|
|
try: |
|
|
|
|
|
metadata = extract_metadata_from_png(image_path) |
|
|
|
|
|
if not metadata: |
|
|
print(translate("アップロードされた画像にメタデータが含まれていません")) |
|
|
return [gr.update()] * 2 |
|
|
|
|
|
print(translate("画像からメタデータを抽出しました: {0}").format(metadata)) |
|
|
|
|
|
|
|
|
prompt_update = gr.update() |
|
|
seed_update = gr.update() |
|
|
|
|
|
if PROMPT_KEY in metadata and metadata[PROMPT_KEY]: |
|
|
prompt_update = gr.update(value=metadata[PROMPT_KEY]) |
|
|
|
|
|
if SEED_KEY in metadata and metadata[SEED_KEY]: |
|
|
|
|
|
try: |
|
|
seed_value = int(metadata[SEED_KEY]) |
|
|
seed_update = gr.update(value=seed_value) |
|
|
except (ValueError, TypeError): |
|
|
print(translate("SEED値の変換エラー: {0}").format(metadata[SEED_KEY])) |
|
|
|
|
|
return [prompt_update, seed_update] |
|
|
except Exception as e: |
|
|
print(translate("メタデータ抽出処理中のエラー: {0}").format(e)) |
|
|
traceback.print_exc() |
|
|
print(translate("メタデータ抽出エラー: {0}").format(e)) |
|
|
return [gr.update()] * 2 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with gr.Group(visible=has_lora_support) as lora_settings_group: |
|
|
gr.Markdown(f"### " + translate("LoRA設定")) |
|
|
|
|
|
|
|
|
|
|
|
use_lora = gr.Checkbox(label=translate("LoRAを使用する"), value=False, info=translate("チェックをオンにするとLoRAを使用します(要16GB VRAM以上)")) |
|
|
|
|
|
|
|
|
def scan_lora_directory(): |
|
|
"""./loraディレクトリからLoRAモデルファイルを検索する関数""" |
|
|
lora_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'lora') |
|
|
choices = [] |
|
|
|
|
|
|
|
|
if not os.path.exists(lora_dir): |
|
|
os.makedirs(lora_dir, exist_ok=True) |
|
|
print(translate("LoRAディレクトリが存在しなかったため作成しました: {0}").format(lora_dir)) |
|
|
|
|
|
|
|
|
for filename in os.listdir(lora_dir): |
|
|
if filename.endswith(('.safetensors', '.pt', '.bin')): |
|
|
choices.append(filename) |
|
|
|
|
|
|
|
|
choices = sorted(choices) |
|
|
|
|
|
|
|
|
none_choice = translate("なし") |
|
|
choices.insert(0, none_choice) |
|
|
|
|
|
|
|
|
for i, choice in enumerate(choices): |
|
|
if not isinstance(choice, str): |
|
|
|
|
|
choices[i] = str(choice) |
|
|
|
|
|
|
|
|
print(translate("LoRAディレクトリから{0}個のモデルを検出しました").format(len(choices) - 1)) |
|
|
|
|
|
return choices |
|
|
|
|
|
|
|
|
lora_mode = gr.Radio( |
|
|
choices=[translate("ディレクトリから選択"), translate("ファイルアップロード")], |
|
|
value=translate("ディレクトリから選択"), |
|
|
label=translate("LoRA読み込み方式"), |
|
|
visible=False |
|
|
) |
|
|
|
|
|
|
|
|
with gr.Group(visible=False) as lora_upload_group: |
|
|
|
|
|
lora_files = gr.File( |
|
|
label=translate("LoRAファイル (.safetensors, .pt, .bin)"), |
|
|
file_types=[".safetensors", ".pt", ".bin"], |
|
|
visible=True |
|
|
) |
|
|
|
|
|
lora_files2 = gr.File( |
|
|
label=translate("LoRAファイル2 (.safetensors, .pt, .bin)"), |
|
|
file_types=[".safetensors", ".pt", ".bin"], |
|
|
visible=True |
|
|
) |
|
|
|
|
|
lora_files3 = gr.File( |
|
|
label=translate("LoRAファイル3 (.safetensors, .pt, .bin)"), |
|
|
file_types=[".safetensors", ".pt", ".bin"], |
|
|
visible=True |
|
|
) |
|
|
|
|
|
|
|
|
with gr.Group(visible=False) as lora_dropdown_group: |
|
|
|
|
|
lora_dropdown1 = gr.Dropdown( |
|
|
label=translate("LoRAモデル選択 1"), |
|
|
choices=[], |
|
|
value=None, |
|
|
allow_custom_value=False |
|
|
) |
|
|
lora_dropdown2 = gr.Dropdown( |
|
|
label=translate("LoRAモデル選択 2"), |
|
|
choices=[], |
|
|
value=None, |
|
|
allow_custom_value=False |
|
|
) |
|
|
lora_dropdown3 = gr.Dropdown( |
|
|
label=translate("LoRAモデル選択 3"), |
|
|
choices=[], |
|
|
value=None, |
|
|
allow_custom_value=False |
|
|
) |
|
|
|
|
|
lora_scan_button = gr.Button(translate("LoRAディレクトリを再スキャン"), variant="secondary") |
|
|
|
|
|
|
|
|
lora_scales_text = gr.Textbox( |
|
|
label=translate("LoRA適用強度 (カンマ区切り)"), |
|
|
value="0.8,0.8,0.8", |
|
|
info=translate("各LoRAのスケール値をカンマ区切りで入力 (例: 0.8,0.5,0.3)"), |
|
|
visible=False |
|
|
) |
|
|
|
|
|
|
|
|
def toggle_lora_settings(use_lora): |
|
|
if use_lora: |
|
|
|
|
|
choices = scan_lora_directory() |
|
|
|
|
|
|
|
|
for i, choice in enumerate(choices): |
|
|
if not isinstance(choice, str): |
|
|
choices[i] = str(choice) |
|
|
|
|
|
|
|
|
preset_visible = True |
|
|
|
|
|
|
|
|
return [ |
|
|
gr.update(visible=True), |
|
|
gr.update(visible=False), |
|
|
gr.update(visible=True), |
|
|
gr.update(visible=True), |
|
|
] |
|
|
else: |
|
|
|
|
|
return [ |
|
|
gr.update(visible=False), |
|
|
gr.update(visible=False), |
|
|
gr.update(visible=False), |
|
|
gr.update(visible=False), |
|
|
] |
|
|
|
|
|
|
|
|
def toggle_lora_mode(mode): |
|
|
if mode == translate("ディレクトリから選択"): |
|
|
|
|
|
|
|
|
choices = scan_lora_directory() |
|
|
|
|
|
|
|
|
for i, choice in enumerate(choices): |
|
|
if not isinstance(choice, str): |
|
|
choices[i] = str(choice) |
|
|
|
|
|
|
|
|
first_choice = choices[0] |
|
|
|
|
|
|
|
|
return [ |
|
|
gr.update(visible=False), |
|
|
gr.update(visible=True), |
|
|
gr.update(choices=choices, value=choices[0]), |
|
|
gr.update(choices=choices, value=choices[0]), |
|
|
gr.update(choices=choices, value=choices[0]) |
|
|
] |
|
|
else: |
|
|
|
|
|
return [ |
|
|
gr.update(visible=True), |
|
|
gr.update(visible=False), |
|
|
gr.update(), |
|
|
gr.update(), |
|
|
gr.update() |
|
|
] |
|
|
|
|
|
|
|
|
def update_lora_dropdowns(): |
|
|
choices = scan_lora_directory() |
|
|
|
|
|
|
|
|
for i, choice in enumerate(choices): |
|
|
if not isinstance(choice, str): |
|
|
choices[i] = str(choice) |
|
|
|
|
|
|
|
|
return [ |
|
|
gr.update(choices=choices, value=choices[0]), |
|
|
gr.update(choices=choices, value=choices[0]), |
|
|
gr.update(choices=choices, value=choices[0]), |
|
|
] |
|
|
|
|
|
|
|
|
previous_lora_mode = translate("ディレクトリから選択") |
|
|
|
|
|
|
|
|
def toggle_lora_full_update(use_lora_val): |
|
|
|
|
|
global previous_lora_mode |
|
|
|
|
|
|
|
|
|
|
|
if not use_lora_val: |
|
|
|
|
|
current_mode = getattr(lora_mode, 'value', translate("ディレクトリから選択")) |
|
|
if current_mode: |
|
|
previous_lora_mode = current_mode |
|
|
|
|
|
|
|
|
settings_updates = toggle_lora_settings(use_lora_val) |
|
|
|
|
|
|
|
|
if use_lora_val: |
|
|
|
|
|
|
|
|
if previous_lora_mode == translate("ファイルアップロード"): |
|
|
|
|
|
|
|
|
settings_updates[0] = gr.update(visible=True, value=translate("ファイルアップロード")) |
|
|
settings_updates[1] = gr.update(visible=True) |
|
|
settings_updates[2] = gr.update(visible=False) |
|
|
|
|
|
|
|
|
return settings_updates + [gr.update(), gr.update(), gr.update()] |
|
|
else: |
|
|
|
|
|
choices = scan_lora_directory() |
|
|
|
|
|
|
|
|
dropdown_updates = [ |
|
|
gr.update(choices=choices, value=choices[0]), |
|
|
gr.update(choices=choices, value=choices[0]), |
|
|
gr.update(choices=choices, value=choices[0]) |
|
|
] |
|
|
|
|
|
|
|
|
settings_updates[0] = gr.update(visible=True, value=translate("ディレクトリから選択")) |
|
|
return settings_updates + dropdown_updates |
|
|
|
|
|
|
|
|
return settings_updates + [gr.update(), gr.update(), gr.update()] |
|
|
|
|
|
|
|
|
def toggle_lora_mode_with_memory(mode_value): |
|
|
|
|
|
global previous_lora_mode |
|
|
previous_lora_mode = mode_value |
|
|
|
|
|
|
|
|
return toggle_lora_mode(mode_value) |
|
|
|
|
|
|
|
|
use_lora.change( |
|
|
fn=toggle_lora_full_update, |
|
|
inputs=[use_lora], |
|
|
outputs=[lora_mode, lora_upload_group, lora_dropdown_group, lora_scales_text, |
|
|
lora_dropdown1, lora_dropdown2, lora_dropdown3] |
|
|
) |
|
|
|
|
|
|
|
|
lora_mode.change( |
|
|
fn=toggle_lora_mode_with_memory, |
|
|
inputs=[lora_mode], |
|
|
outputs=[lora_upload_group, lora_dropdown_group, lora_dropdown1, lora_dropdown2, lora_dropdown3] |
|
|
) |
|
|
|
|
|
|
|
|
lora_scan_button.click( |
|
|
fn=update_lora_dropdowns, |
|
|
inputs=[], |
|
|
outputs=[lora_dropdown1, lora_dropdown2, lora_dropdown3] |
|
|
) |
|
|
|
|
|
|
|
|
def lora_ready_init(): |
|
|
"""LoRAドロップダウンの初期化を行う関数""" |
|
|
|
|
|
|
|
|
use_lora_value = getattr(use_lora, 'value', False) |
|
|
lora_mode_value = getattr(lora_mode, 'value', translate("ディレクトリから選択")) |
|
|
|
|
|
|
|
|
global previous_lora_mode |
|
|
previous_lora_mode = lora_mode_value |
|
|
|
|
|
if use_lora_value: |
|
|
|
|
|
if lora_mode_value == translate("ディレクトリから選択"): |
|
|
|
|
|
choices = scan_lora_directory() |
|
|
return [ |
|
|
gr.update(choices=choices, value=choices[0]), |
|
|
gr.update(choices=choices, value=choices[0]), |
|
|
gr.update(choices=choices, value=choices[0]) |
|
|
] |
|
|
else: |
|
|
|
|
|
return [gr.update(), gr.update(), gr.update()] |
|
|
|
|
|
|
|
|
return [gr.update(), gr.update(), gr.update()] |
|
|
|
|
|
|
|
|
lora_init_btn = gr.Button(visible=False, elem_id="lora_init_btn_f1") |
|
|
lora_init_btn.click( |
|
|
fn=lora_ready_init, |
|
|
inputs=[], |
|
|
outputs=[lora_dropdown1, lora_dropdown2, lora_dropdown3] |
|
|
) |
|
|
|
|
|
|
|
|
js_init_code = """ |
|
|
function initLoraDropdowns() { |
|
|
// UIロード後、少し待ってからボタンをクリック |
|
|
setTimeout(function() { |
|
|
// 非表示ボタンを探して自動クリック |
|
|
var initBtn = document.getElementById('lora_init_btn_f1'); |
|
|
if (initBtn) { |
|
|
console.log('LoRAドロップダウン初期化ボタンを自動実行します'); |
|
|
initBtn.click(); |
|
|
} else { |
|
|
console.log('LoRAドロップダウン初期化ボタンが見つかりません'); |
|
|
} |
|
|
}, 1000); // 1秒待ってから実行 |
|
|
} |
|
|
|
|
|
// ページロード時に初期化関数を呼び出し |
|
|
window.addEventListener('load', initLoraDropdowns); |
|
|
""" |
|
|
|
|
|
|
|
|
gr.HTML(f"<script>{js_init_code}</script>") |
|
|
|
|
|
|
|
|
lora_preset_group = None |
|
|
|
|
|
|
|
|
if has_lora_support: |
|
|
from eichi_utils.lora_preset_manager import save_lora_preset, load_lora_preset |
|
|
|
|
|
|
|
|
with gr.Group(visible=False) as lora_preset_group: |
|
|
|
|
|
with gr.Row(): |
|
|
|
|
|
preset_buttons = [] |
|
|
for i in range(1, 6): |
|
|
preset_buttons.append( |
|
|
gr.Button( |
|
|
translate("設定{0}").format(i), |
|
|
variant="secondary", |
|
|
scale=1 |
|
|
) |
|
|
) |
|
|
|
|
|
|
|
|
with gr.Row(scale=1): |
|
|
load_btn = gr.Button(translate("Load"), variant="primary", scale=1) |
|
|
save_btn = gr.Button(translate("Save"), variant="secondary", scale=1) |
|
|
|
|
|
lora_preset_mode = gr.Radio( |
|
|
choices=[translate("Load"), translate("Save")], |
|
|
value=translate("Load"), |
|
|
visible=False |
|
|
) |
|
|
|
|
|
|
|
|
lora_preset_status = gr.Textbox( |
|
|
label=translate("プリセット状態"), |
|
|
value="", |
|
|
interactive=False, |
|
|
lines=1 |
|
|
) |
|
|
|
|
|
|
|
|
def toggle_lora_and_preset(use_lora_val, lora_mode_val): |
|
|
|
|
|
preset_visible = use_lora_val and lora_mode_val == translate("ディレクトリから選択") |
|
|
return gr.update(visible=preset_visible) |
|
|
|
|
|
|
|
|
def handle_lora_preset_button(button_index, mode, lora1, lora2, lora3, scales): |
|
|
"""LoRAプリセットボタンのクリックを処理する""" |
|
|
if mode == translate("Load"): |
|
|
|
|
|
loaded_values = load_lora_preset(button_index) |
|
|
if loaded_values: |
|
|
return ( |
|
|
gr.update(value=loaded_values[0]), |
|
|
gr.update(value=loaded_values[1]), |
|
|
gr.update(value=loaded_values[2]), |
|
|
gr.update(value=loaded_values[3]), |
|
|
translate("設定{0}を読み込みました").format(button_index + 1) |
|
|
) |
|
|
else: |
|
|
return ( |
|
|
gr.update(), gr.update(), gr.update(), gr.update(), |
|
|
translate("設定{0}の読み込みに失敗しました").format(button_index + 1) |
|
|
) |
|
|
else: |
|
|
|
|
|
success, message = save_lora_preset(button_index, lora1, lora2, lora3, scales) |
|
|
return ( |
|
|
gr.update(), gr.update(), gr.update(), gr.update(), |
|
|
message |
|
|
) |
|
|
|
|
|
|
|
|
def set_load_mode(): |
|
|
return ( |
|
|
gr.update(value=translate("Load")), |
|
|
gr.update(variant="primary"), |
|
|
gr.update(variant="secondary") |
|
|
) |
|
|
|
|
|
def set_save_mode(): |
|
|
return ( |
|
|
gr.update(value=translate("Save")), |
|
|
gr.update(variant="secondary"), |
|
|
gr.update(variant="primary") |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
for i, btn in enumerate(preset_buttons): |
|
|
btn.click( |
|
|
fn=lambda mode, lora1, lora2, lora3, scales, idx=i: handle_lora_preset_button( |
|
|
idx, mode, lora1, lora2, lora3, scales |
|
|
), |
|
|
inputs=[lora_preset_mode, lora_dropdown1, lora_dropdown2, lora_dropdown3, lora_scales_text], |
|
|
outputs=[lora_dropdown1, lora_dropdown2, lora_dropdown3, lora_scales_text, lora_preset_status] |
|
|
) |
|
|
|
|
|
|
|
|
load_btn.click( |
|
|
set_load_mode, |
|
|
outputs=[lora_preset_mode, load_btn, save_btn] |
|
|
) |
|
|
|
|
|
save_btn.click( |
|
|
set_save_mode, |
|
|
outputs=[lora_preset_mode, load_btn, save_btn] |
|
|
) |
|
|
|
|
|
|
|
|
use_lora.change( |
|
|
toggle_lora_and_preset, |
|
|
inputs=[use_lora, lora_mode], |
|
|
outputs=[lora_preset_group] |
|
|
) |
|
|
|
|
|
lora_mode.change( |
|
|
toggle_lora_and_preset, |
|
|
inputs=[use_lora, lora_mode], |
|
|
outputs=[lora_preset_group] |
|
|
) |
|
|
else: |
|
|
|
|
|
lora_preset_group = gr.Group(visible=False) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
copy_metadata_visible = gr.Checkbox( |
|
|
label=translate("埋め込みプロンプトおよびシードを複写する"), |
|
|
value=False, |
|
|
info=translate("チェックをオンにすると、画像のメタデータからプロンプトとシードを自動的に取得します") |
|
|
) |
|
|
|
|
|
|
|
|
copy_metadata_visible.change( |
|
|
fn=lambda x: x, |
|
|
inputs=[copy_metadata_visible], |
|
|
outputs=[copy_metadata] |
|
|
) |
|
|
|
|
|
|
|
|
copy_metadata.change( |
|
|
fn=lambda x: x, |
|
|
inputs=[copy_metadata], |
|
|
outputs=[copy_metadata_visible], |
|
|
queue=False |
|
|
) |
|
|
|
|
|
|
|
|
prompt = gr.Textbox(label=translate("Prompt"), value=get_default_startup_prompt(), lines=6) |
|
|
|
|
|
|
|
|
with gr.Group(visible=True) as prompt_management: |
|
|
gr.Markdown(f"### " + translate("プロンプト管理")) |
|
|
|
|
|
|
|
|
with gr.Group(visible=True): |
|
|
|
|
|
default_prompt = "" |
|
|
default_name = "" |
|
|
for preset in load_presets()["presets"]: |
|
|
if preset.get("is_startup_default", False): |
|
|
default_prompt = preset["prompt"] |
|
|
default_name = preset["name"] |
|
|
break |
|
|
|
|
|
with gr.Row(): |
|
|
edit_name = gr.Textbox(label=translate("プリセット名"), placeholder=translate("名前を入力..."), value=default_name) |
|
|
|
|
|
edit_prompt = gr.Textbox(label=translate("プロンプト"), lines=5, value=default_prompt) |
|
|
|
|
|
with gr.Row(): |
|
|
|
|
|
default_preset = translate("起動時デフォルト") |
|
|
|
|
|
presets_data = load_presets() |
|
|
choices = [preset["name"] for preset in presets_data["presets"]] |
|
|
default_presets = [name for name in choices if any(p["name"] == name and p.get("is_default", False) for p in presets_data["presets"])] |
|
|
user_presets = [name for name in choices if name not in default_presets] |
|
|
sorted_choices = [(name, name) for name in sorted(default_presets) + sorted(user_presets)] |
|
|
preset_dropdown = gr.Dropdown(label=translate("プリセット"), choices=sorted_choices, value=default_preset, type="value") |
|
|
|
|
|
with gr.Row(): |
|
|
save_btn = gr.Button(value=translate("保存"), variant="primary") |
|
|
apply_preset_btn = gr.Button(value=translate("反映"), variant="primary") |
|
|
clear_btn = gr.Button(value=translate("クリア")) |
|
|
delete_preset_btn = gr.Button(value=translate("削除")) |
|
|
|
|
|
|
|
|
result_message = gr.Markdown("") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with gr.Row(visible=False): |
|
|
example_quick_prompts = gr.Dataset(samples=quick_prompts, label=translate("Quick List"), samples_per_page=1000, components=[prompt]) |
|
|
example_quick_prompts.click(lambda x: x[0], inputs=[example_quick_prompts], outputs=prompt, show_progress=False, queue=False) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
section_settings = gr.State([[None, None, ""] for _ in range(max_keyframes)]) |
|
|
section_inputs = [] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def update_section_calculation(frame_size, mode, length): |
|
|
"""フレームサイズ変更時にセクション数を再計算して表示を更新""" |
|
|
|
|
|
seconds = get_video_seconds(length) |
|
|
|
|
|
|
|
|
latent_window_size = 4.5 if frame_size == translate("0.5秒 (17フレーム)") else 9 |
|
|
frame_count = latent_window_size * 4 - 3 |
|
|
|
|
|
|
|
|
total_frames = int(seconds * 30) |
|
|
total_sections = int(max(round(total_frames / frame_count), 1)) |
|
|
|
|
|
|
|
|
html = f"""<div style='padding: 10px; background-color: #f5f5f5; border-radius: 5px; font-size: 14px;'> |
|
|
{translate('<strong>計算詳細</strong>: フレームサイズ={0}, 総フレーム数={1}, セクションあたり={2}フレーム, 必要セクション数={3}').format(frame_size, total_frames, frame_count, total_sections)} |
|
|
<br> |
|
|
{translate('動画モード {0} とフレームサイズ {1} で必要なセクション数: <strong>{2}</strong>').format(length, frame_size, total_sections)} |
|
|
</div>""" |
|
|
|
|
|
|
|
|
print(translate("計算結果: モード=通常, フレームサイズ={0}, latent_window_size={1}, 総フレーム数={2}, 必要セクション数={3}").format(frame_size, latent_window_size, total_frames, total_sections)) |
|
|
|
|
|
return html |
|
|
|
|
|
|
|
|
initial_html = update_section_calculation(frame_size_radio.value, mode_radio.value, length_radio.value) |
|
|
section_calc_display = gr.HTML(value=initial_html, label="") |
|
|
|
|
|
|
|
|
frame_size_radio.change( |
|
|
fn=update_section_calculation, |
|
|
inputs=[frame_size_radio, mode_radio, length_radio], |
|
|
outputs=[section_calc_display] |
|
|
) |
|
|
|
|
|
|
|
|
def update_section_visibility(mode, length, frame_size=None): |
|
|
"""F1モードではシンプル化された関数""" |
|
|
|
|
|
seconds = get_video_seconds(length) |
|
|
print(translate("F1モード:シンプル設定(不要な機能を削除済み)")) |
|
|
|
|
|
|
|
|
return [gr.update()] * 2 + [] + [gr.update(value=seconds)] + [] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
length_radio.change( |
|
|
fn=update_section_calculation, |
|
|
inputs=[frame_size_radio, mode_radio, length_radio], |
|
|
outputs=[section_calc_display] |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
mode_radio.change( |
|
|
fn=update_section_calculation, |
|
|
inputs=[frame_size_radio, mode_radio, length_radio], |
|
|
outputs=[section_calc_display] |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with gr.Column(): |
|
|
result_video = gr.Video( |
|
|
label=translate("Finished Frames"), |
|
|
key="result_video", |
|
|
autoplay=True, |
|
|
show_share_button=False, |
|
|
height=512, |
|
|
loop=True, |
|
|
format="mp4", |
|
|
interactive=False, |
|
|
) |
|
|
progress_desc = gr.Markdown('', elem_classes='no-generating-animation') |
|
|
progress_bar = gr.HTML('', elem_classes='no-generating-animation') |
|
|
preview_image = gr.Image(label="Next Latents", height=200, visible=False) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
section_calc_display = gr.HTML("", label="") |
|
|
|
|
|
use_teacache = gr.Checkbox( |
|
|
label=translate('Use TeaCache'), |
|
|
value=saved_app_settings.get("use_teacache", True) if saved_app_settings else True, |
|
|
info=translate('Faster speed, but often makes hands and fingers slightly worse.'), |
|
|
elem_classes="saveable-setting" |
|
|
) |
|
|
|
|
|
|
|
|
use_random_seed_default = True |
|
|
seed_default = random.randint(0, 2**32 - 1) if use_random_seed_default else 1 |
|
|
|
|
|
use_random_seed = gr.Checkbox(label=translate("Use Random Seed"), value=use_random_seed_default) |
|
|
|
|
|
n_prompt = gr.Textbox(label=translate("Negative Prompt"), value="", visible=False) |
|
|
seed = gr.Number(label=translate("Seed"), value=seed_default, precision=0) |
|
|
|
|
|
|
|
|
|
|
|
input_image.change( |
|
|
fn=update_from_image_metadata, |
|
|
inputs=[input_image, copy_metadata], |
|
|
outputs=[prompt, seed] |
|
|
) |
|
|
|
|
|
|
|
|
def check_metadata_on_checkbox_change(copy_enabled, image_path): |
|
|
if not copy_enabled or image_path is None: |
|
|
return [gr.update()] * 2 |
|
|
|
|
|
return update_from_image_metadata(image_path, copy_enabled) |
|
|
|
|
|
|
|
|
|
|
|
copy_metadata.change( |
|
|
fn=check_metadata_on_checkbox_change, |
|
|
inputs=[copy_metadata, input_image], |
|
|
outputs=[prompt, seed] |
|
|
) |
|
|
|
|
|
|
|
|
def set_random_seed(is_checked): |
|
|
if is_checked: |
|
|
return random.randint(0, 2**32 - 1) |
|
|
else: |
|
|
return gr.update() |
|
|
use_random_seed.change(fn=set_random_seed, inputs=use_random_seed, outputs=seed) |
|
|
|
|
|
total_second_length = gr.Slider(label=translate("Total Video Length (Seconds)"), minimum=1, maximum=120, value=1, step=1) |
|
|
latent_window_size = gr.Slider(label=translate("Latent Window Size"), minimum=1, maximum=33, value=9, step=1, visible=False) |
|
|
steps = gr.Slider( |
|
|
label=translate("Steps"), |
|
|
minimum=1, |
|
|
maximum=100, |
|
|
value=saved_app_settings.get("steps", 25) if saved_app_settings else 25, |
|
|
step=1, |
|
|
info=translate('Changing this value is not recommended.'), |
|
|
elem_classes="saveable-setting" |
|
|
) |
|
|
|
|
|
cfg = gr.Slider( |
|
|
label=translate("CFG Scale"), |
|
|
minimum=1.0, |
|
|
maximum=32.0, |
|
|
value=saved_app_settings.get("cfg", 1.0) if saved_app_settings else 1.0, |
|
|
step=0.01, |
|
|
visible=False, |
|
|
elem_classes="saveable-setting" |
|
|
) |
|
|
gs = gr.Slider( |
|
|
label=translate("Distilled CFG Scale"), |
|
|
minimum=1.0, |
|
|
maximum=32.0, |
|
|
value=saved_app_settings.get("gs", 10) if saved_app_settings else 10, |
|
|
step=0.01, |
|
|
info=translate('Changing this value is not recommended.'), |
|
|
elem_classes="saveable-setting" |
|
|
) |
|
|
rs = gr.Slider(label=translate("CFG Re-Scale"), minimum=0.0, maximum=1.0, value=0.0, step=0.01, visible=False) |
|
|
|
|
|
available_cuda_memory_gb = round(torch.cuda.get_device_properties(0).total_memory / (1024**3)) |
|
|
default_gpu_memory_preservation_gb = 6 if available_cuda_memory_gb >= 20 else (8 if available_cuda_memory_gb > 16 else 10) |
|
|
gpu_memory_preservation = gr.Slider(label=translate("GPU Memory to Preserve (GB) (smaller = more VRAM usage)"), minimum=6, maximum=128, value=saved_app_settings.get("gpu_memory_preservation", default_gpu_memory_preservation_gb) if saved_app_settings else default_gpu_memory_preservation_gb, step=0.1, info=translate("空けておくGPUメモリ量を指定。小さい値=より多くのVRAMを使用可能=高速、大きい値=より少ないVRAMを使用=安全"), elem_classes="saveable-setting") |
|
|
|
|
|
|
|
|
mp4_crf = gr.Slider( |
|
|
label=translate("MP4 Compression"), |
|
|
minimum=0, |
|
|
maximum=100, |
|
|
value=saved_app_settings.get("mp4_crf", 16) if saved_app_settings else 16, |
|
|
step=1, |
|
|
info=translate("数値が小さいほど高品質になります。0は無圧縮。黒画面が出る場合は16に設定してください。"), |
|
|
elem_classes="saveable-setting" |
|
|
) |
|
|
|
|
|
|
|
|
keep_section_videos = gr.Checkbox(label=translate("完了時にセクションごとの動画を残す - チェックがない場合は最終動画のみ保存されます(デフォルトOFF)"), value=saved_app_settings.get("keep_section_videos", False) if saved_app_settings else False, elem_classes="saveable-setting") |
|
|
|
|
|
|
|
|
save_tensor_data = gr.Checkbox( |
|
|
label=translate("完了時にテンソルデータ(.safetensors)も保存 - このデータを別の動画の後に結合可能"), |
|
|
value=saved_app_settings.get("save_tensor_data", False) if saved_app_settings else False, |
|
|
info=translate("チェックすると、生成されたテンソルデータを保存します。アップロードされたテンソルがあれば、結合したテンソルデータも保存されます。"), |
|
|
elem_classes="saveable-setting" |
|
|
) |
|
|
|
|
|
|
|
|
save_section_frames = gr.Checkbox(label=translate("Save Section Frames"), value=saved_app_settings.get("save_section_frames", False) if saved_app_settings else False, info=translate("各セクションの最終フレームを静止画として保存します(デフォルトOFF)"), elem_classes="saveable-setting") |
|
|
|
|
|
|
|
|
|
|
|
with gr.Group(): |
|
|
gr.Markdown(f"### " + translate("フレーム画像保存設定")) |
|
|
frame_save_mode = gr.Radio( |
|
|
label=translate("フレーム画像保存モード"), |
|
|
choices=[ |
|
|
translate("保存しない"), |
|
|
translate("全フレーム画像保存"), |
|
|
translate("最終セクションのみ全フレーム画像保存") |
|
|
], |
|
|
value=saved_app_settings.get("frame_save_mode", translate("保存しない")) if saved_app_settings else translate("保存しない"), |
|
|
info=translate("フレーム画像の保存方法を選択します。過去セクション分も含めて保存します。全セクションか最終セクションのみか選択できます。"), |
|
|
elem_classes="saveable-setting" |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
mode_radio.change( |
|
|
fn=update_section_visibility, |
|
|
inputs=[mode_radio, length_radio, frame_size_radio], |
|
|
outputs=[input_image, input_image, total_second_length] |
|
|
) |
|
|
|
|
|
|
|
|
frame_size_radio.change( |
|
|
fn=update_section_visibility, |
|
|
inputs=[mode_radio, length_radio, frame_size_radio], |
|
|
outputs=[input_image, input_image, total_second_length] |
|
|
) |
|
|
|
|
|
|
|
|
length_radio.change( |
|
|
fn=update_section_visibility, |
|
|
inputs=[mode_radio, length_radio, frame_size_radio], |
|
|
outputs=[input_image, input_image, total_second_length] |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
with gr.Group(): |
|
|
gr.Markdown("### " + translate("Image影響度調整")) |
|
|
image_strength = gr.Slider( |
|
|
label=translate("Image影響度"), |
|
|
minimum=1.00, |
|
|
maximum=1.02, |
|
|
value=saved_app_settings.get("image_strength", 1.00) if saved_app_settings else 1.00, |
|
|
step=0.001, |
|
|
info=translate("開始フレーム(Image)が動画に与える影響の強さを調整します。1.00が通常の動作(100%)です。値を大きくすると始点の影響が強まり、変化が少なくなります。100%-102%の範囲で0.1%刻みの微調整が可能です。"), |
|
|
elem_classes="saveable-setting" |
|
|
) |
|
|
|
|
|
|
|
|
gr.Markdown(translate("※ 出力先は `webui` 配下に限定されます")) |
|
|
with gr.Row(equal_height=True): |
|
|
with gr.Column(scale=4): |
|
|
|
|
|
output_dir = gr.Textbox( |
|
|
label=translate("出力フォルダ名"), |
|
|
value=output_folder_name, |
|
|
info=translate("動画やキーフレーム画像の保存先フォルダ名"), |
|
|
placeholder="outputs" |
|
|
) |
|
|
with gr.Column(scale=1, min_width=100): |
|
|
open_folder_btn = gr.Button(value=translate("📂 保存および出力フォルダを開く"), size="sm") |
|
|
|
|
|
|
|
|
with gr.Row(visible=False): |
|
|
path_display = gr.Textbox( |
|
|
label=translate("出力フォルダの完全パス"), |
|
|
value=os.path.join(base_path, output_folder_name), |
|
|
interactive=False |
|
|
) |
|
|
|
|
|
|
|
|
def handle_open_folder_btn(folder_name): |
|
|
"""フォルダ名を保存し、そのフォルダを開く""" |
|
|
if not folder_name or not folder_name.strip(): |
|
|
folder_name = "outputs" |
|
|
|
|
|
|
|
|
folder_path = get_output_folder_path(folder_name) |
|
|
|
|
|
|
|
|
settings = load_settings() |
|
|
old_folder_name = settings.get('output_folder') |
|
|
|
|
|
if old_folder_name != folder_name: |
|
|
settings['output_folder'] = folder_name |
|
|
save_result = save_settings(settings) |
|
|
if save_result: |
|
|
|
|
|
global output_folder_name, outputs_folder |
|
|
output_folder_name = folder_name |
|
|
outputs_folder = folder_path |
|
|
print(translate("出力フォルダ設定を保存しました: {folder_name}").format(folder_name=folder_name)) |
|
|
|
|
|
|
|
|
open_output_folder(folder_path) |
|
|
|
|
|
|
|
|
return gr.update(value=folder_name), gr.update(value=folder_path) |
|
|
|
|
|
open_folder_btn.click(fn=handle_open_folder_btn, inputs=[output_dir], outputs=[output_dir, path_display]) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with gr.Group(): |
|
|
gr.Markdown(f"### " + translate("アプリケーション設定")) |
|
|
with gr.Row(): |
|
|
with gr.Column(scale=1): |
|
|
save_current_settings_btn = gr.Button(value=translate("💾 現在の設定を保存"), size="sm") |
|
|
with gr.Column(scale=1): |
|
|
reset_settings_btn = gr.Button(value=translate("🔄 設定をリセット"), size="sm") |
|
|
|
|
|
|
|
|
save_settings_default_value = saved_app_settings.get("save_settings_on_start", False) if saved_app_settings else False |
|
|
save_settings_on_start = gr.Checkbox( |
|
|
label=translate("生成開始時に自動保存"), |
|
|
value=save_settings_default_value, |
|
|
info=translate("チェックをオンにすると、生成開始時に現在の設定が自動的に保存されます。設定は再起動時に反映されます。"), |
|
|
elem_classes="saveable-setting", |
|
|
interactive=True |
|
|
) |
|
|
|
|
|
|
|
|
alarm_default_value = saved_app_settings.get("alarm_on_completion", True) if saved_app_settings else True |
|
|
alarm_on_completion = gr.Checkbox( |
|
|
label=translate("完了時にアラームを鳴らす(Windows)"), |
|
|
value=alarm_default_value, |
|
|
info=translate("チェックをオンにすると、生成完了時にアラーム音を鳴らします(Windows)"), |
|
|
elem_classes="saveable-setting", |
|
|
interactive=True |
|
|
) |
|
|
|
|
|
|
|
|
gr.Markdown("### " + translate("ログ設定")) |
|
|
|
|
|
|
|
|
all_settings = load_settings() |
|
|
log_settings = all_settings.get('log_settings', {'log_enabled': False, 'log_folder': 'logs'}) |
|
|
|
|
|
|
|
|
log_enabled = gr.Checkbox( |
|
|
label=translate("コンソールログを出力する"), |
|
|
value=log_settings.get('log_enabled', False), |
|
|
info=translate("チェックをオンにすると、コンソール出力をログファイルにも保存します"), |
|
|
elem_classes="saveable-setting", |
|
|
interactive=True |
|
|
) |
|
|
|
|
|
|
|
|
log_folder = gr.Textbox( |
|
|
label=translate("ログ出力先"), |
|
|
value=log_settings.get('log_folder', 'logs'), |
|
|
info=translate("ログファイルの保存先フォルダを指定します"), |
|
|
elem_classes="saveable-setting", |
|
|
interactive=True |
|
|
) |
|
|
|
|
|
|
|
|
open_log_folder_btn = gr.Button(value=translate("📂 ログフォルダを開く"), size="sm") |
|
|
|
|
|
|
|
|
open_log_folder_btn.click(fn=open_log_folder) |
|
|
|
|
|
|
|
|
settings_status = gr.Markdown("") |
|
|
|
|
|
|
|
|
def save_app_settings_handler( |
|
|
|
|
|
resolution_val, |
|
|
mp4_crf_val, |
|
|
steps_val, |
|
|
cfg_val, |
|
|
|
|
|
use_teacache_val, |
|
|
gpu_memory_preservation_val, |
|
|
|
|
|
gs_val, |
|
|
|
|
|
image_strength_val, |
|
|
|
|
|
keep_section_videos_val, |
|
|
save_section_frames_val, |
|
|
save_tensor_data_val, |
|
|
frame_save_mode_val, |
|
|
|
|
|
save_settings_on_start_val, |
|
|
alarm_on_completion_val, |
|
|
|
|
|
log_enabled_val, |
|
|
log_folder_val |
|
|
): |
|
|
"""現在の設定を保存""" |
|
|
from eichi_utils.settings_manager import save_app_settings_f1 |
|
|
|
|
|
|
|
|
current_settings = { |
|
|
|
|
|
"resolution": resolution_val, |
|
|
"mp4_crf": mp4_crf_val, |
|
|
"steps": steps_val, |
|
|
"cfg": cfg_val, |
|
|
|
|
|
"use_teacache": use_teacache_val, |
|
|
"gpu_memory_preservation": gpu_memory_preservation_val, |
|
|
|
|
|
"gs": gs_val, |
|
|
|
|
|
"image_strength": image_strength_val, |
|
|
|
|
|
"keep_section_videos": keep_section_videos_val, |
|
|
"save_section_frames": save_section_frames_val, |
|
|
"save_tensor_data": save_tensor_data_val, |
|
|
"frame_save_mode": frame_save_mode_val, |
|
|
|
|
|
"save_settings_on_start": save_settings_on_start_val, |
|
|
"alarm_on_completion": alarm_on_completion_val |
|
|
} |
|
|
|
|
|
|
|
|
try: |
|
|
app_success = save_app_settings_f1(current_settings) |
|
|
except Exception as e: |
|
|
return translate("設定の保存に失敗しました: {0}").format(str(e)) |
|
|
|
|
|
|
|
|
|
|
|
is_log_enabled = False |
|
|
if isinstance(log_enabled_val, bool): |
|
|
is_log_enabled = log_enabled_val |
|
|
elif hasattr(log_enabled_val, 'value'): |
|
|
is_log_enabled = bool(log_enabled_val.value) |
|
|
|
|
|
|
|
|
log_folder_path = "logs" |
|
|
if log_folder_val and isinstance(log_folder_val, str): |
|
|
log_folder_path = log_folder_val |
|
|
elif hasattr(log_folder_val, 'value') and log_folder_val.value: |
|
|
log_folder_path = str(log_folder_val.value) |
|
|
|
|
|
log_settings = { |
|
|
"log_enabled": is_log_enabled, |
|
|
"log_folder": log_folder_path |
|
|
} |
|
|
|
|
|
|
|
|
all_settings = load_settings() |
|
|
all_settings['log_settings'] = log_settings |
|
|
log_success = save_settings(all_settings) |
|
|
|
|
|
|
|
|
if log_success: |
|
|
|
|
|
disable_logging() |
|
|
|
|
|
apply_log_settings(log_settings, source_name="endframe_ichi_f1") |
|
|
print(translate("ログ設定を更新しました: 有効={0}, フォルダ={1}").format( |
|
|
log_enabled_val, log_folder_val)) |
|
|
|
|
|
if app_success and log_success: |
|
|
return translate("設定を保存しました") |
|
|
else: |
|
|
return translate("設定の一部保存に失敗しました") |
|
|
|
|
|
def reset_app_settings_handler(): |
|
|
"""設定をデフォルトに戻す""" |
|
|
from eichi_utils.settings_manager import get_default_app_settings_f1 |
|
|
from locales import i18n |
|
|
|
|
|
|
|
|
current_lang = i18n.lang |
|
|
|
|
|
|
|
|
default_settings = get_default_app_settings_f1(current_lang) |
|
|
updates = [] |
|
|
|
|
|
|
|
|
updates.append(gr.update(value=default_settings.get("resolution", 640))) |
|
|
updates.append(gr.update(value=default_settings.get("mp4_crf", 16))) |
|
|
updates.append(gr.update(value=default_settings.get("steps", 25))) |
|
|
updates.append(gr.update(value=default_settings.get("cfg", 1.0))) |
|
|
updates.append(gr.update(value=default_settings.get("use_teacache", True))) |
|
|
updates.append(gr.update(value=default_settings.get("gpu_memory_preservation", 6))) |
|
|
updates.append(gr.update(value=default_settings.get("gs", 10))) |
|
|
|
|
|
updates.append(gr.update(value=default_settings.get("image_strength", 1.0))) |
|
|
updates.append(gr.update(value=default_settings.get("keep_section_videos", False))) |
|
|
updates.append(gr.update(value=default_settings.get("save_section_frames", False))) |
|
|
updates.append(gr.update(value=default_settings.get("save_tensor_data", False))) |
|
|
updates.append(gr.update(value=default_settings.get("frame_save_mode", translate("保存しない")))) |
|
|
updates.append(gr.update(value=default_settings.get("save_settings_on_start", False))) |
|
|
updates.append(gr.update(value=default_settings.get("alarm_on_completion", True))) |
|
|
|
|
|
|
|
|
|
|
|
updates.append(gr.update(value=False)) |
|
|
updates.append(gr.update(value="logs")) |
|
|
|
|
|
|
|
|
default_log_settings = { |
|
|
"log_enabled": False, |
|
|
"log_folder": "logs" |
|
|
} |
|
|
|
|
|
|
|
|
all_settings = load_settings() |
|
|
all_settings['log_settings'] = default_log_settings |
|
|
save_settings(all_settings) |
|
|
|
|
|
|
|
|
disable_logging() |
|
|
|
|
|
|
|
|
updates.append(translate("設定をデフォルトに戻しました")) |
|
|
|
|
|
return updates |
|
|
|
|
|
|
|
|
def validate_and_process(*args): |
|
|
"""入力画像または最後のキーフレーム画像のいずれかが有効かどうかを確認し、問題がなければ処理を実行する""" |
|
|
|
|
|
global batch_stopped, queue_enabled, queue_type, prompt_queue_file_path, image_queue_files |
|
|
|
|
|
input_img = args[0] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
output_dir = args[22] if len(args) > 22 else None |
|
|
save_section_frames = args[23] if len(args) > 23 else False |
|
|
use_all_padding = args[24] if len(args) > 24 else False |
|
|
use_lora = args[25] if len(args) > 25 else False |
|
|
lora_mode = args[26] if len(args) > 26 else translate("ディレクトリから選択") |
|
|
lora_dropdown1 = args[27] if len(args) > 27 else None |
|
|
lora_dropdown2 = args[28] if len(args) > 28 else None |
|
|
lora_dropdown3 = args[29] if len(args) > 29 else None |
|
|
save_tensor_data = args[30] if len(args) > 30 else False |
|
|
|
|
|
|
|
|
|
|
|
section_settings = [[None, None, ""] for _ in range(50)] |
|
|
if len(args) > 31 and args[31] is not None: |
|
|
|
|
|
if isinstance(args[31], list): |
|
|
section_settings = args[31] |
|
|
tensor_data_input = args[32] if len(args) > 32 else None |
|
|
fp8_optimization = args[33] if len(args) > 33 else True |
|
|
resolution_value = args[34] if len(args) > 34 else 640 |
|
|
batch_count = args[35] if len(args) > 35 else 1 |
|
|
frame_save_mode = args[36] if len(args) > 36 else translate("保存しない") |
|
|
|
|
|
use_queue_ui = args[37] if len(args) > 37 else False |
|
|
prompt_queue_file_ui = args[38] if len(args) > 38 else None |
|
|
|
|
|
|
|
|
save_settings_on_start_ui = args[39] if len(args) > 39 else False |
|
|
alarm_on_completion_ui = args[40] if len(args) > 40 else False |
|
|
|
|
|
|
|
|
actual_save_settings_value = save_settings_on_start_ui |
|
|
if hasattr(save_settings_on_start_ui, 'value'): |
|
|
actual_save_settings_value = save_settings_on_start_ui.value |
|
|
|
|
|
|
|
|
actual_alarm_value = False |
|
|
|
|
|
|
|
|
if isinstance(alarm_on_completion_ui, bool): |
|
|
|
|
|
actual_alarm_value = alarm_on_completion_ui |
|
|
elif hasattr(alarm_on_completion_ui, 'value'): |
|
|
|
|
|
if isinstance(alarm_on_completion_ui.value, bool): |
|
|
actual_alarm_value = alarm_on_completion_ui.value |
|
|
|
|
|
|
|
|
print(translate("キュータイプ: {0}").format(queue_type)) |
|
|
|
|
|
|
|
|
queue_enabled = use_queue_ui |
|
|
|
|
|
|
|
|
if len(args) > 31 and args[31] is not None and not isinstance(args[31], list): |
|
|
print(translate("section_settingsが正しい型ではありません: {0}. 初期化します。").format(type(args[31]).__name__)) |
|
|
section_settings = [[None, None, ""] for _ in range(50)] |
|
|
|
|
|
|
|
|
batch_count = max(1, min(int(batch_count), 100)) |
|
|
|
|
|
|
|
|
if queue_enabled and queue_type == "image": |
|
|
|
|
|
get_image_queue_files() |
|
|
image_queue_count = len(image_queue_files) |
|
|
print(translate("イメージキュー使用: 入力フォルダの画像 {0} 個を使用します").format(image_queue_count)) |
|
|
|
|
|
|
|
|
if image_queue_count > 0: |
|
|
|
|
|
total_needed_batches = 1 + image_queue_count |
|
|
|
|
|
|
|
|
if total_needed_batches > batch_count: |
|
|
print(translate("画像キュー数+1に合わせてバッチ数を自動調整: {0} → {1}").format(batch_count, total_needed_batches)) |
|
|
batch_count = total_needed_batches |
|
|
|
|
|
|
|
|
if queue_enabled and queue_type == "prompt": |
|
|
|
|
|
if prompt_queue_file_path is not None: |
|
|
queue_file_path = prompt_queue_file_path |
|
|
print(translate("プロンプトキューファイル: {0}").format(queue_file_path)) |
|
|
|
|
|
|
|
|
if os.path.exists(queue_file_path): |
|
|
print(translate("プロンプトキューファイルの内容を読み込みます: {0}").format(queue_file_path)) |
|
|
try: |
|
|
with open(queue_file_path, 'r', encoding='utf-8') as f: |
|
|
lines = [line.strip() for line in f.readlines() if line.strip()] |
|
|
queue_prompts_count = len(lines) |
|
|
print(translate("有効なプロンプト行数: {0}").format(queue_prompts_count)) |
|
|
|
|
|
if queue_prompts_count > 0: |
|
|
|
|
|
sample_lines = lines[:min(3, queue_prompts_count)] |
|
|
print(translate("プロンプトサンプル: {0}").format(sample_lines)) |
|
|
|
|
|
|
|
|
if queue_prompts_count > batch_count: |
|
|
print(translate("プロンプト数に合わせてバッチ数を自動調整: {0} → {1}").format(batch_count, queue_prompts_count)) |
|
|
batch_count = queue_prompts_count |
|
|
else: |
|
|
print(translate("プロンプトキューファイルに有効なプロンプトがありません")) |
|
|
except Exception as e: |
|
|
print(translate("プロンプトキューファイル読み込みエラー: {0}").format(str(e))) |
|
|
else: |
|
|
print(translate("プロンプトキューファイルが存在しないか無効です: {0}").format(queue_file_path)) |
|
|
else: |
|
|
print(translate("プロンプトキュー無効: ファイルが正しくアップロードされていません")) |
|
|
|
|
|
|
|
|
if hasattr(frame_save_mode, 'value'): |
|
|
|
|
|
frame_save_mode_value = frame_save_mode.value |
|
|
else: |
|
|
|
|
|
frame_save_mode_value = frame_save_mode |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
batch_count = max(1, min(int(batch_count), 100)) |
|
|
|
|
|
|
|
|
section_settings = [[None, None, ""] for _ in range(50)] |
|
|
|
|
|
|
|
|
is_valid, error_message = validate_images(input_img, section_settings, length_radio, frame_size_radio) |
|
|
|
|
|
if not is_valid: |
|
|
|
|
|
yield None, gr.update(visible=False), translate("エラー: 画像が選択されていません"), error_message, gr.update(interactive=True), gr.update(interactive=False), gr.update() |
|
|
return |
|
|
|
|
|
|
|
|
|
|
|
new_args = list(args) |
|
|
|
|
|
|
|
|
if len(new_args) > 25: |
|
|
new_args[25] = use_lora |
|
|
if len(new_args) > 26: |
|
|
new_args[26] = lora_mode |
|
|
if len(new_args) > 27: |
|
|
new_args[27] = lora_dropdown1 |
|
|
if len(new_args) > 28: |
|
|
new_args[28] = lora_dropdown2 |
|
|
if len(new_args) > 29: |
|
|
new_args[29] = lora_dropdown3 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if len(new_args) > 30: |
|
|
new_args[30] = save_tensor_data |
|
|
|
|
|
|
|
|
if len(new_args) > 31: |
|
|
new_args[31] = section_settings |
|
|
|
|
|
|
|
|
if len(new_args) <= 37: |
|
|
|
|
|
new_args.extend([None] * (37 - len(new_args))) |
|
|
if len(new_args) <= 31: |
|
|
if len(new_args) <= 30: |
|
|
if len(new_args) <= 29: |
|
|
|
|
|
new_args.append(resolution_value) |
|
|
new_args.append(batch_count) |
|
|
else: |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
new_args[34] = resolution_value |
|
|
new_args[35] = batch_count |
|
|
|
|
|
new_args[36] = frame_save_mode |
|
|
new_args[37] = use_queue_ui |
|
|
new_args[38] = prompt_queue_file_ui |
|
|
new_args[39] = actual_save_settings_value |
|
|
new_args[40] = actual_alarm_value |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
section_settings_index = 31 |
|
|
if len(new_args) > section_settings_index: |
|
|
if not isinstance(new_args[section_settings_index], list): |
|
|
print(translate("section_settingsがリストではありません。修正します。")) |
|
|
new_args[section_settings_index] = [[None, None, ""] for _ in range(50)] |
|
|
|
|
|
|
|
|
yield from process(*new_args) |
|
|
|
|
|
|
|
|
save_current_settings_btn.click( |
|
|
fn=save_app_settings_handler, |
|
|
inputs=[ |
|
|
resolution, |
|
|
mp4_crf, |
|
|
steps, |
|
|
cfg, |
|
|
use_teacache, |
|
|
gpu_memory_preservation, |
|
|
gs, |
|
|
image_strength, |
|
|
keep_section_videos, |
|
|
save_section_frames, |
|
|
save_tensor_data, |
|
|
frame_save_mode, |
|
|
save_settings_on_start, |
|
|
alarm_on_completion, |
|
|
|
|
|
log_enabled, |
|
|
log_folder |
|
|
], |
|
|
outputs=[settings_status] |
|
|
) |
|
|
|
|
|
|
|
|
reset_settings_btn.click( |
|
|
fn=reset_app_settings_handler, |
|
|
inputs=[], |
|
|
outputs=[ |
|
|
resolution, |
|
|
mp4_crf, |
|
|
steps, |
|
|
cfg, |
|
|
use_teacache, |
|
|
gpu_memory_preservation, |
|
|
gs, |
|
|
image_strength, |
|
|
keep_section_videos, |
|
|
save_section_frames, |
|
|
save_tensor_data, |
|
|
frame_save_mode, |
|
|
save_settings_on_start, |
|
|
alarm_on_completion, |
|
|
log_enabled, |
|
|
log_folder, |
|
|
settings_status |
|
|
] |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ips = [input_image, prompt, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, use_random_seed, mp4_crf, all_padding_value, image_strength, frame_size_radio, keep_section_videos, lora_files, lora_files2, lora_files3, lora_scales_text, output_dir, save_section_frames, use_all_padding, use_lora, lora_mode, lora_dropdown1, lora_dropdown2, lora_dropdown3, save_tensor_data, section_settings, tensor_data_input, fp8_optimization, resolution, batch_count, frame_save_mode, use_queue, prompt_queue_file, save_settings_on_start, alarm_on_completion] |
|
|
|
|
|
start_button.click(fn=validate_and_process, inputs=ips, outputs=[result_video, preview_image, progress_desc, progress_bar, start_button, end_button, seed]) |
|
|
end_button.click(fn=end_process, outputs=[end_button]) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def save_button_click_handler(name, prompt_text): |
|
|
"""保存ボタンクリック時のハンドラ関数""" |
|
|
|
|
|
|
|
|
if "A character" in prompt_text and prompt_text.count("A character") > 1: |
|
|
sentences = prompt_text.split(".") |
|
|
if len(sentences) > 0: |
|
|
prompt_text = sentences[0].strip() + "." |
|
|
|
|
|
|
|
|
|
|
|
result_msg = save_preset(name, prompt_text) |
|
|
|
|
|
|
|
|
presets_data = load_presets() |
|
|
choices = [preset["name"] for preset in presets_data["presets"]] |
|
|
default_presets = [n for n in choices if any(p["name"] == n and p.get("is_default", False) for p in presets_data["presets"])] |
|
|
user_presets = [n for n in choices if n not in default_presets] |
|
|
sorted_choices = [(n, n) for n in sorted(default_presets) + sorted(user_presets)] |
|
|
|
|
|
|
|
|
return result_msg, gr.update(choices=sorted_choices), gr.update() |
|
|
|
|
|
|
|
|
save_btn.click( |
|
|
fn=save_button_click_handler, |
|
|
inputs=[edit_name, edit_prompt], |
|
|
outputs=[result_message, preset_dropdown, prompt] |
|
|
) |
|
|
|
|
|
|
|
|
def clear_fields(): |
|
|
return gr.update(value=""), gr.update(value="") |
|
|
|
|
|
clear_btn.click( |
|
|
fn=clear_fields, |
|
|
inputs=[], |
|
|
outputs=[edit_name, edit_prompt] |
|
|
) |
|
|
|
|
|
|
|
|
def load_preset_handler(preset_name): |
|
|
|
|
|
for preset in load_presets()["presets"]: |
|
|
if preset["name"] == preset_name: |
|
|
return gr.update(value=preset_name), gr.update(value=preset["prompt"]) |
|
|
return gr.update(), gr.update() |
|
|
|
|
|
|
|
|
def load_preset_handler_wrapper(preset_name): |
|
|
|
|
|
if isinstance(preset_name, tuple) and len(preset_name) == 2: |
|
|
preset_name = preset_name[1] |
|
|
return load_preset_handler(preset_name) |
|
|
|
|
|
preset_dropdown.change( |
|
|
fn=load_preset_handler_wrapper, |
|
|
inputs=[preset_dropdown], |
|
|
outputs=[edit_name, edit_prompt] |
|
|
) |
|
|
|
|
|
|
|
|
def apply_to_prompt(edit_text): |
|
|
"""編集画面の内容をメインプロンプトに反映する関数""" |
|
|
|
|
|
return gr.update(value=edit_text) |
|
|
|
|
|
|
|
|
def delete_preset_handler(preset_name): |
|
|
|
|
|
if isinstance(preset_name, tuple) and len(preset_name) == 2: |
|
|
preset_name = preset_name[1] |
|
|
|
|
|
result = delete_preset(preset_name) |
|
|
|
|
|
|
|
|
presets_data = load_presets() |
|
|
choices = [preset["name"] for preset in presets_data["presets"]] |
|
|
default_presets = [name for name in choices if any(p["name"] == name and p.get("is_default", False) for p in presets_data["presets"])] |
|
|
user_presets = [name for name in choices if name not in default_presets] |
|
|
sorted_names = sorted(default_presets) + sorted(user_presets) |
|
|
updated_choices = [(name, name) for name in sorted_names] |
|
|
|
|
|
return result, gr.update(choices=updated_choices) |
|
|
|
|
|
apply_preset_btn.click( |
|
|
fn=apply_to_prompt, |
|
|
inputs=[edit_prompt], |
|
|
outputs=[prompt] |
|
|
) |
|
|
|
|
|
delete_preset_btn.click( |
|
|
fn=delete_preset_handler, |
|
|
inputs=[preset_dropdown], |
|
|
outputs=[result_message, preset_dropdown] |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
allowed_paths = [os.path.abspath(os.path.realpath(os.path.join(os.path.dirname(__file__), './outputs')))] |
|
|
|
|
|
|
|
|
try: |
|
|
block.launch( |
|
|
server_name=args.server, |
|
|
server_port=args.port, |
|
|
share=args.share, |
|
|
allowed_paths=allowed_paths, |
|
|
inbrowser=args.inbrowser, |
|
|
) |
|
|
except OSError as e: |
|
|
if "Cannot find empty port" in str(e): |
|
|
print("======================================================") |
|
|
print(translate("エラー: FramePack-eichiは既に起動しています。")) |
|
|
print(translate("同時に複数のインスタンスを実行することはできません。")) |
|
|
print(translate("現在実行中のアプリケーションを先に終了してください。")) |
|
|
print("======================================================") |
|
|
input(translate("続行するには何かキーを押してください...")) |
|
|
else: |
|
|
|
|
|
print(translate("エラーが発生しました: {e}").format(e=e)) |
|
|
input(translate("続行するには何かキーを押してください...")) |
|
|
|