import os os.environ["FORCE_TORCH_LAYERNORM"] = "1" import sys import torch import gradio as gr import numpy as np import json import cv2 from PIL import Image from datetime import datetime import tempfile import os.path as osp # 假设你的模型代码已经在同一目录或者正确的路径中 from src.condition import Condition from src.SubjectGeniusTransformer2DModel import SubjectGeniusTransformer2DModel from src.SubjectGeniusPipeline import SubjectGeniusPipeline from accelerate.utils import set_seed # 全局变量 weight_dtype = torch.bfloat16 device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") transformer = None pipe = None TEMP_DIR = tempfile.mkdtemp() # 默认参数设置,与原始推理脚本一致 DEFAULT_CONFIG = { "pretrained_model_name_or_path": "/data/ydchen/VLP/SubjectGenius/model/FLUX.1-schnell", "transformer": "/data/ydchen/VLP/SubjectGenius/model/FLUX.1-schnell/transformer", "condition_types": ["fill", "subject"], "denoising_lora": "/data/ydchen/VLP/SubjectGenius/model/Subject_genuis/Denoising_LoRA/subject_fill_union", "denoising_lora_weight": 1.0, "condition_lora_dir": "/data/ydchen/VLP/SubjectGenius/model/Subject_genuis/Condition_LoRA", "resolution": 512, "num_inference_steps": 8, "max_sequence_length": 512 } def load_model(): global transformer, pipe print("开始加载transformer模型...") # 加载transformer模型 transformer = SubjectGeniusTransformer2DModel.from_pretrained( pretrained_model_name_or_path=DEFAULT_CONFIG["transformer"], ).to(device=device, dtype=weight_dtype) print("transformer模型加载完成") print("开始加载condition LoRA...") # 加载condition LoRA for condition_type in DEFAULT_CONFIG["condition_types"]: print(f"加载{condition_type} LoRA...") transformer.load_lora_adapter( f"{DEFAULT_CONFIG['condition_lora_dir']}/{condition_type}.safetensors", adapter_name=condition_type ) print("所有condition LoRA加载完成") print("开始创建pipeline...") # 创建pipeline pipe = SubjectGeniusPipeline.from_pretrained( DEFAULT_CONFIG["pretrained_model_name_or_path"], torch_dtype=weight_dtype, transformer=None ) print("pipeline创建完成") print("设置transformer...") pipe.transformer = transformer print("设置adapter...") # 设置adapter pipe.transformer.set_adapters([i for i in DEFAULT_CONFIG["condition_types"]]) pipe = pipe.to(device) print("模型完全加载完成!") return "模型加载完成!" def process_image_for_display(image_array): """将图像处理为适合显示的格式,保持原始尺寸,但确保是RGB格式""" if image_array is None: return None # 如果是PIL图像,转换为numpy数组 if isinstance(image_array, Image.Image): image_array = np.array(image_array) # 确保是RGB格式 if len(image_array.shape) == 2: # 灰度图像 image_array = cv2.cvtColor(image_array, cv2.COLOR_GRAY2RGB) elif image_array.shape[2] == 4: # RGBA图像 image_array = image_array[:, :, :3] return image_array def save_image_for_model(image_array, path): """保存图像用于模型输入""" if image_array is None: return None # 确保目录存在 os.makedirs(os.path.dirname(path), exist_ok=True) # 如果是PIL图像,直接保存 if isinstance(image_array, Image.Image): image_array.save(path) return path # 如果是numpy数组,转换为PIL图像再保存 Image.fromarray(process_image_for_display(image_array)).save(path) return path def preserve_aspect_ratio(image, target_size=(512, 512)): """保持原始比例调整图像大小""" if isinstance(image, np.ndarray): pil_image = Image.fromarray(image) else: pil_image = image # 计算宽高比 width, height = pil_image.size aspect_ratio = width / height # 创建新的白色背景图像 new_image = Image.new("RGB", target_size, (255, 255, 255)) # 保持比例缩放 if aspect_ratio > 1: # 宽图 new_width = target_size[0] new_height = int(new_width / aspect_ratio) else: # 高图 new_height = target_size[1] new_width = int(new_height * aspect_ratio) # 调整大小 resized_image = pil_image.resize((new_width, new_height), Image.LANCZOS) # 居中粘贴到新图像 paste_position = ((target_size[0] - new_width) // 2, (target_size[1] - new_height) // 2) new_image.paste(resized_image, paste_position) return new_image def generate_image( prompt, subject_image, background_image, x1, y1, x2, y2, version="training-free", seed=0, num_inference_steps=8 ): global pipe # 确保模型已加载 if pipe is None: load_model() # 检查输入 if subject_image is None or background_image is None: return None, None, "请同时上传主体图像和背景图像" try: # 将坐标转换为整数 x1, y1, x2, y2 = int(float(x1)), int(float(y1)), int(float(x2)), int(float(y2)) if x1 > x2: x1, x2 = x2, x1 if y1 > y2: y1, y2 = y2, y1 # 准备模型所需的固定尺寸图像 MODEL_SIZE = (512, 512) # 1. 处理主体图像 - 保持原始比例,但调整到模型可接受的尺寸 subject_pil = Image.fromarray(subject_image) if isinstance(subject_image, np.ndarray) else subject_image # 创建白色背景 subject_processed = Image.new("RGB", MODEL_SIZE, (255, 255, 255)) # 保持比例调整大小 subject_pil.thumbnail((MODEL_SIZE[0], MODEL_SIZE[1]), Image.LANCZOS) # 居中粘贴 paste_pos = ((MODEL_SIZE[0] - subject_pil.width) // 2, (MODEL_SIZE[1] - subject_pil.height) // 2) subject_processed.paste(subject_pil, paste_pos) # 2. 处理背景图像 - 同样保持原始比例 background_pil = Image.fromarray(background_image) if isinstance(background_image, np.ndarray) else background_image # 保存原始尺寸,用于坐标转换 orig_width, orig_height = background_pil.size # 调整背景图像大小,保持比例 background_processed = Image.new("RGB", MODEL_SIZE, (255, 255, 255)) background_pil.thumbnail((MODEL_SIZE[0], MODEL_SIZE[1]), Image.LANCZOS) bg_paste_pos = ((MODEL_SIZE[0] - background_pil.width) // 2, (MODEL_SIZE[1] - background_pil.height) // 2) background_processed.paste(background_pil, bg_paste_pos) # 3. 计算调整后的bbox坐标 scale_x = background_pil.width / orig_width scale_y = background_pil.height / orig_height adjusted_x1 = int(x1 * scale_x) + bg_paste_pos[0] adjusted_y1 = int(y1 * scale_y) + bg_paste_pos[1] adjusted_x2 = int(x2 * scale_x) + bg_paste_pos[0] adjusted_y2 = int(y2 * scale_y) + bg_paste_pos[1] # 确保坐标在有效范围内 adjusted_x1 = max(0, min(adjusted_x1, MODEL_SIZE[0]-1)) adjusted_y1 = max(0, min(adjusted_y1, MODEL_SIZE[1]-1)) adjusted_x2 = max(0, min(adjusted_x2, MODEL_SIZE[0]-1)) adjusted_y2 = max(0, min(adjusted_y2, MODEL_SIZE[1]-1)) # 最终bbox bbox = [adjusted_x1, adjusted_y1, adjusted_x2, adjusted_y2] # 4. 创建用于展示的背景图像副本(用于可视化结果) background_display = background_processed.copy() # 5. 在实际输入到模型的背景图像上将选定区域填充为黑色 background_for_model = background_processed.copy() background_for_model_array = np.array(background_for_model) # 将选定区域填充为黑色 background_for_model_array[adjusted_y1:adjusted_y2+1, adjusted_x1:adjusted_x2+1] = (0, 0, 0) background_for_model = Image.fromarray(background_for_model_array) # 6. 创建模型条件 subject_condition = Condition("subject", raw_img=subject_processed, no_process=True) # 使用黑色区域的背景图像作为填充条件 fill_condition = Condition("fill", raw_img=background_for_model, no_process=True) conditions = [subject_condition, fill_condition] # 7. 设置随机种子 if seed is not None: set_seed(seed) # 8. 准备JSON数据 json_data = { "description": prompt, "bbox": bbox } # 9. 设置模型模式 if version == "training-based": denoising_lora_name = os.path.basename(os.path.normpath(DEFAULT_CONFIG["denoising_lora"])) pipe.transformer.load_lora_adapter( DEFAULT_CONFIG["denoising_lora"], adapter_name=denoising_lora_name, use_safetensors=True ) pipe.transformer.set_adapters( [i for i in DEFAULT_CONFIG["condition_types"]] + [denoising_lora_name], [1.0, 1.0, DEFAULT_CONFIG["denoising_lora_weight"]] ) elif version == "training-free": pipe.transformer.set_adapters([i for i in DEFAULT_CONFIG["condition_types"]]) # 10. 生成图像 result_img = pipe( prompt=prompt, conditions=conditions, height=MODEL_SIZE[1], width=MODEL_SIZE[0], num_inference_steps=num_inference_steps, max_sequence_length=DEFAULT_CONFIG["max_sequence_length"], model_config={"json_data": json_data}, ).images[0] # 11. 创建可视化结果(拼接图像) concat_image = Image.new("RGB", (MODEL_SIZE[0] * 3, MODEL_SIZE[1]), (255, 255, 255)) # 添加主体图像 concat_image.paste(subject_processed, (0, 0)) # 添加实际输入模型的背景图像(包含黑色区域) concat_image.paste(background_for_model, (MODEL_SIZE[0], 0)) # 添加生成结果 concat_image.paste(result_img, (MODEL_SIZE[0] * 2, 0)) return concat_image, result_img, "生成成功!" except Exception as e: import traceback traceback.print_exc() return None, None, f"生成图像时发生错误: {str(e)}" def draw_bbox(background_image, evt: gr.SelectData): """处理用户在图片上的选择,绘制矩形""" # 初始化边界框 if not hasattr(draw_bbox, "start_point"): draw_bbox.start_point = None draw_bbox.current_image = None # 检查背景图像 if background_image is None: return background_image, "", "", "", "" try: # 获取图像尺寸 h, w = background_image.shape[:2] # 处理目标宽度和高度 target_width = getattr(evt, 'target_width', None) or getattr(evt.target, 'width', None) or w target_height = getattr(evt, 'target_height', None) or getattr(evt.target, 'height', None) or h # 计算缩放比例 scale_x = w / target_width if target_width else 1.0 scale_y = h / target_height if target_height else 1.0 # 获取点击坐标 x = min(max(0, int(evt.index[0] * scale_x)), w-1) y = min(max(0, int(evt.index[1] * scale_y)), h-1) # 如果是第一次点击,记录起始点 if draw_bbox.start_point is None: draw_bbox.start_point = (x, y) draw_bbox.current_image = background_image.copy() return background_image, "", "", "", "" # 第二次点击,完成矩形 end_point = (x, y) # 确保坐标有序 x1 = min(draw_bbox.start_point[0], end_point[0]) y1 = min(draw_bbox.start_point[1], end_point[1]) x2 = max(draw_bbox.start_point[0], end_point[0]) y2 = max(draw_bbox.start_point[1], end_point[1]) # 绘制矩形 img_with_rect = draw_bbox.current_image.copy() cv2.rectangle(img_with_rect, (x1, y1), (x2, y2), (0, 255, 0), 2) # 重置起始点 draw_bbox.start_point = None return img_with_rect, str(x1), str(y1), str(x2), str(y2) except Exception as e: print(f"绘制边界框时发生错误: {e}") draw_bbox.start_point = None return background_image, "", "", "", "" def update_bbox_from_input(background_image, x1, y1, x2, y2): """根据输入的坐标值更新矩形框""" try: if background_image is None: return background_image # 尝试将坐标转换为整数 x1, y1, x2, y2 = int(float(x1) if x1 else 0), int(float(y1) if y1 else 0), \ int(float(x2) if x2 else 0), int(float(y2) if y2 else 0) # 获取图像尺寸 h, w = background_image.shape[:2] # 边界检查 x1 = max(0, min(x1, w-1)) y1 = max(0, min(y1, h-1)) x2 = max(0, min(x2, w-1)) y2 = max(0, min(y2, h-1)) # 确保x1 < x2, y1 < y2 if x1 > x2: x1, x2 = x2, x1 if y1 > y2: y1, y2 = y2, y1 # 绘制矩形 img_with_rect = background_image.copy() cv2.rectangle(img_with_rect, (x1, y1), (x2, y2), (0, 255, 0), 2) return img_with_rect except: return background_image def reset_bbox(background_image): """重置边界框和图像""" if hasattr(draw_bbox, "start_point"): draw_bbox.start_point = None if background_image is None: return None, "", "", "", "" else: return background_image.copy(), "", "", "", "" # 创建Gradio界面 def create_interface(): with gr.Blocks(title="SubjectGenius 图像生成器") as demo: gr.Markdown("# SubjectGenius 图像生成器") gr.Markdown("上传参考图像和背景图像,并在背景上选择区域来生成新的图像。") status_message = gr.Textbox(label="状态信息", interactive=False) with gr.Row(): with gr.Column(scale=1): gr.Markdown("### 输入参数") prompt = gr.Textbox(label="图像描述文本", placeholder="例如:A decorative fabric topper for windows.") with gr.Row(): subject_image = gr.Image(label="主体图像 (Subject)", type="numpy") background_image = gr.Image(label="背景图像 (Fill)", type="numpy") gr.Markdown("### 在背景图上选择区域(点击两次确定对角线顶点)或手动输入坐标") with gr.Row(): x1_input = gr.Textbox(label="X1", placeholder="左上角 X 坐标") y1_input = gr.Textbox(label="Y1", placeholder="左上角 Y 坐标") x2_input = gr.Textbox(label="X2", placeholder="右下角 X 坐标") y2_input = gr.Textbox(label="Y2", placeholder="右下角 Y 坐标") reset_btn = gr.Button("重置选择") with gr.Accordion("高级选项", open=False): version = gr.Radio( ["training-free", "training-based"], label="版本", value="training-free" ) seed = gr.Slider( 0, 1000, value=0, step=1, label="随机种子" ) steps = gr.Slider( 4, 50, value=8, step=1, label="推理步数(越大越慢但质量可能更好)" ) generate_btn = gr.Button("生成图像", variant="primary") with gr.Column(scale=1): gr.Markdown("### 预览区域选择") preview_image = gr.Image(label="区域预览", type="numpy", elem_id="preview_image") gr.Markdown("### 生成结果") with gr.Tabs(): with gr.TabItem("完整结果"): output_image_full = gr.Image(label="完整结果(包含条件图像)") with gr.TabItem("仅生成图像"): output_image = gr.Image(label="生成图像") # 事件处理 background_image.select( draw_bbox, inputs=[background_image], outputs=[preview_image, x1_input, y1_input, x2_input, y2_input] ) # 坐标输入同步更新预览 coord_inputs = [x1_input, y1_input, x2_input, y2_input] for coord in coord_inputs: coord.change( update_bbox_from_input, inputs=[background_image, x1_input, y1_input, x2_input, y2_input], outputs=[preview_image] ) # 重置按钮 reset_btn.click( reset_bbox, inputs=[background_image], outputs=[preview_image, x1_input, y1_input, x2_input, y2_input] ) # 生成按钮 generate_btn.click( generate_image, inputs=[prompt, subject_image, background_image, x1_input, y1_input, x2_input, y2_input, version, seed, steps], outputs=[output_image_full, output_image, status_message] ) return demo # 主函数 if __name__ == "__main__": # 创建界面 demo = create_interface() # 加载模型 print("正在加载模型...") load_model() # 启动Gradio demo.launch(share=True)