File size: 4,939 Bytes
a877b56
 
 
 
 
 
628d76c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a877b56
628d76c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a877b56
 
 
 
 
 
628d76c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
"""

Generate seenable_obj_dict.json for all scenes.

Example:

python code/generate_seenable_object_dict.py /home/xwang378/scratch/2025/Taxonomy/Data/simulationImage/ --scene-workers 8 --camera-workers 8

"""

import os
import json
import argparse
import numpy as np
from PIL import Image
from concurrent.futures import ProcessPoolExecutor, as_completed
from multiprocessing import cpu_count


def process_camera(save_path, camera):
    """处理单个相机的数据"""
    image_dir = os.path.join(save_path, camera)
    seg_file = os.path.join(image_dir, "seg.png")
    obj_anno_file = os.path.join(image_dir, "object_annots.json")

    if not os.path.exists(seg_file) or not os.path.exists(obj_anno_file):
        return f"[Warning] Missing files in {camera}, skipped.", False
    
    if os.path.exists(os.path.join(save_path, camera, "seenable_obj_dict.json")):
        return f"[Warning] seenable_obj_dict.json already exists for {save_path.split('/')[-1]}/{camera}, skipped.", False
    
    # 读取 segmentation 图像和标注
    with open(obj_anno_file, "r") as f:
        obj_anno = json.load(f)
    obj_annos = obj_anno.get("outputs", [])

    seg = np.array(Image.open(seg_file))

    rgb_mask = seg[:, :, :3]
    
    # ⚡ 只获取唯一颜色,不计数
    unique_colors = np.unique(rgb_mask.reshape(-1, 3), axis=0)
    color_set = set(map(tuple, unique_colors))
    
    obj_dict = {
        obj_anno["object_id"]: tuple(obj_anno["color"][0:3])
        for obj_anno in obj_annos
        if tuple(obj_anno["color"][0:3]) in color_set
    }
    
    output_file = os.path.join(save_path, camera, "seenable_obj_dict.json")
    with open(output_file, "w") as f:
        json.dump(obj_dict, f, indent=4)
    
    return f"[Saved] {output_file}", True


def process_scene(image_dir, scene_name, max_workers=None):
    save_path = os.path.join(image_dir, scene_name)

    if not os.path.exists(save_path):
        print(f"[Error] Scene path not found: {save_path}")
        return

    camera_list = [x for x in os.listdir(save_path) if not x.endswith(".json")]

    print(f"Processing scene: {scene_name}")
    print(f"Found {len(camera_list)} camera folders.")

    if not camera_list:
        print(f"✅ Done processing scene: {scene_name} (no cameras found)\n")
        return

    # 并行处理所有相机
    success_count = 0
    with ProcessPoolExecutor(max_workers=max_workers) as executor:
        # 提交所有任务
        futures = {
            executor.submit(process_camera, save_path, camera): camera 
            for camera in camera_list
        }
        
        # 收集结果
        for future in as_completed(futures):
            camera = futures[future]
            try:
                message, success = future.result()
                print(message)
                if success:
                    success_count += 1
            except Exception as exc:
                print(f"[Error] {camera} generated an exception: {exc}")

    print(f"✅ Done processing scene: {scene_name} ({success_count}/{len(camera_list)} cameras processed)\n")


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="Generate seenable_obj_dict.json for all scenes.")
    parser.add_argument("image_dir", type=str, help="Directory containing the image folders")
    parser.add_argument("--scene-workers", type=int, default=None, 
                        help="Number of parallel workers for scene-level processing (default: CPU count)")
    parser.add_argument("--camera-workers", type=int, default=None,
                        help="Number of parallel workers for camera-level processing (default: CPU count)")
    args = parser.parse_args()
    
    # 获取所有场景
    batch_dir = ['zehan', 'placement', 'jiawei', 'luoxin', 'additional']
    scenes = []
    for batch in batch_dir:
        for scene in os.listdir(os.path.join(args.image_dir, batch)):
            if os.path.isdir(os.path.join(args.image_dir, batch, scene)):
                scenes.append(os.path.join(batch, scene))
    
    print(f"Found {len(scenes)} scenes to process.")
    print(f"Scene-level workers: {args.scene_workers or cpu_count()}")
    print(f"Camera-level workers: {args.camera_workers or cpu_count()}\n")
    
    # 并行处理所有场景
    with ProcessPoolExecutor(max_workers=args.scene_workers) as executor:
        futures = {
            executor.submit(process_scene, args.image_dir, scene, args.camera_workers): scene 
            for scene in scenes
        }
        
        for future in as_completed(futures):
            scene = futures[future]
            try:
                future.result()
            except Exception as exc:
                print(f"[Error] Scene {scene} generated an exception: {exc}")
    
    print("\n🎉 All scenes processed!")