SimulationImage / code /generate_seenable_object_dict.py
RyanWW's picture
Add files using upload-large-folder tool
a877b56 verified
"""
Generate seenable_obj_dict.json for all scenes.
Example:
python code/generate_seenable_object_dict.py /home/xwang378/scratch/2025/Taxonomy/Data/simulationImage/ --scene-workers 8 --camera-workers 8
"""
import os
import json
import argparse
import numpy as np
from PIL import Image
from concurrent.futures import ProcessPoolExecutor, as_completed
from multiprocessing import cpu_count
def process_camera(save_path, camera):
"""处理单个相机的数据"""
image_dir = os.path.join(save_path, camera)
seg_file = os.path.join(image_dir, "seg.png")
obj_anno_file = os.path.join(image_dir, "object_annots.json")
if not os.path.exists(seg_file) or not os.path.exists(obj_anno_file):
return f"[Warning] Missing files in {camera}, skipped.", False
if os.path.exists(os.path.join(save_path, camera, "seenable_obj_dict.json")):
return f"[Warning] seenable_obj_dict.json already exists for {save_path.split('/')[-1]}/{camera}, skipped.", False
# 读取 segmentation 图像和标注
with open(obj_anno_file, "r") as f:
obj_anno = json.load(f)
obj_annos = obj_anno.get("outputs", [])
seg = np.array(Image.open(seg_file))
rgb_mask = seg[:, :, :3]
# ⚡ 只获取唯一颜色,不计数
unique_colors = np.unique(rgb_mask.reshape(-1, 3), axis=0)
color_set = set(map(tuple, unique_colors))
obj_dict = {
obj_anno["object_id"]: tuple(obj_anno["color"][0:3])
for obj_anno in obj_annos
if tuple(obj_anno["color"][0:3]) in color_set
}
output_file = os.path.join(save_path, camera, "seenable_obj_dict.json")
with open(output_file, "w") as f:
json.dump(obj_dict, f, indent=4)
return f"[Saved] {output_file}", True
def process_scene(image_dir, scene_name, max_workers=None):
save_path = os.path.join(image_dir, scene_name)
if not os.path.exists(save_path):
print(f"[Error] Scene path not found: {save_path}")
return
camera_list = [x for x in os.listdir(save_path) if not x.endswith(".json")]
print(f"Processing scene: {scene_name}")
print(f"Found {len(camera_list)} camera folders.")
if not camera_list:
print(f"✅ Done processing scene: {scene_name} (no cameras found)\n")
return
# 并行处理所有相机
success_count = 0
with ProcessPoolExecutor(max_workers=max_workers) as executor:
# 提交所有任务
futures = {
executor.submit(process_camera, save_path, camera): camera
for camera in camera_list
}
# 收集结果
for future in as_completed(futures):
camera = futures[future]
try:
message, success = future.result()
print(message)
if success:
success_count += 1
except Exception as exc:
print(f"[Error] {camera} generated an exception: {exc}")
print(f"✅ Done processing scene: {scene_name} ({success_count}/{len(camera_list)} cameras processed)\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Generate seenable_obj_dict.json for all scenes.")
parser.add_argument("image_dir", type=str, help="Directory containing the image folders")
parser.add_argument("--scene-workers", type=int, default=None,
help="Number of parallel workers for scene-level processing (default: CPU count)")
parser.add_argument("--camera-workers", type=int, default=None,
help="Number of parallel workers for camera-level processing (default: CPU count)")
args = parser.parse_args()
# 获取所有场景
batch_dir = ['zehan', 'placement', 'jiawei', 'luoxin', 'additional']
scenes = []
for batch in batch_dir:
for scene in os.listdir(os.path.join(args.image_dir, batch)):
if os.path.isdir(os.path.join(args.image_dir, batch, scene)):
scenes.append(os.path.join(batch, scene))
print(f"Found {len(scenes)} scenes to process.")
print(f"Scene-level workers: {args.scene_workers or cpu_count()}")
print(f"Camera-level workers: {args.camera_workers or cpu_count()}\n")
# 并行处理所有场景
with ProcessPoolExecutor(max_workers=args.scene_workers) as executor:
futures = {
executor.submit(process_scene, args.image_dir, scene, args.camera_workers): scene
for scene in scenes
}
for future in as_completed(futures):
scene = futures[future]
try:
future.result()
except Exception as exc:
print(f"[Error] Scene {scene} generated an exception: {exc}")
print("\n🎉 All scenes processed!")