Spaces:
Running
Running
Commit
Β·
d323fcc
1
Parent(s):
fd37479
Add progress and completion messages to task 2 script for better user feedback
Browse files
medvqa/competitions/gi-2025/task_2.py
CHANGED
|
@@ -69,6 +69,7 @@ if os.path.isfile(os.path.join(snap_dir, "requirements.txt")):
|
|
| 69 |
sp.run(["python", "-m", "pip", "install", "-q", "-r",
|
| 70 |
f"{snap_dir}/requirements.txt"], cwd=snap_dir, check=True)
|
| 71 |
|
|
|
|
| 72 |
sp.run(["python", f"{snap_dir}/{submission_file}"],
|
| 73 |
cwd=snap_dir, check=True)
|
| 74 |
print(
|
|
|
|
| 69 |
sp.run(["python", "-m", "pip", "install", "-q", "-r",
|
| 70 |
f"{snap_dir}/requirements.txt"], cwd=snap_dir, check=True)
|
| 71 |
|
| 72 |
+
print("π Starting your script and loading submission details...")
|
| 73 |
sp.run(["python", f"{snap_dir}/{submission_file}"],
|
| 74 |
cwd=snap_dir, check=True)
|
| 75 |
print(
|
medvqa/submission_samples/gi-2025/submission_task2.py
CHANGED
|
@@ -60,7 +60,10 @@ SUBMISSION_INFO = {
|
|
| 60 |
hf_pipe = DiffusionPipeline.from_pretrained(
|
| 61 |
"runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16).to(device)
|
| 62 |
hf_pipe.load_lora_weights("waitwhoami/sd-kvasir-imagen-demo")
|
| 63 |
-
hf_pipe.safety_checker = lambda images, clip_input: (images,
|
|
|
|
|
|
|
|
|
|
| 64 |
|
| 65 |
# π----------------END SUBMISISON DETAILS and MODEL LOADING -----------------π#
|
| 66 |
|
|
@@ -73,14 +76,17 @@ timestamp = time.strftime("%Y%m%d_%H%M%S")
|
|
| 73 |
output_folder = f"generated_images_{timestamp}"
|
| 74 |
# Ensure output folder exists
|
| 75 |
os.makedirs(output_folder, exist_ok=True)
|
|
|
|
|
|
|
| 76 |
|
| 77 |
# βοΈβοΈ___________EDIT SECTION 2: IMAGE GENERATION___________βοΈβοΈ#
|
| 78 |
# πΉ TODO: PARTICIPANTS SHOULD MODIFY THIS STEP πΉ
|
| 79 |
# you have access to 'test_prompts' with all the prompts needed to be generated
|
| 80 |
|
| 81 |
-
batch_size = 2 # Adjust based on your GPU memory
|
| 82 |
-
|
| 83 |
-
|
|
|
|
| 84 |
batch = test_prompts[i:i + batch_size]
|
| 85 |
batched_prompts = [p for p in batch for _ in range(num_per_prompt)]
|
| 86 |
images = hf_pipe(batched_prompts).images
|
|
@@ -88,6 +94,7 @@ for i in range(0, len(test_prompts), batch_size):
|
|
| 88 |
p_idx = i + j // num_per_prompt + 1
|
| 89 |
i_idx = j % num_per_prompt + 1
|
| 90 |
img.save(f"{output_folder}/prompt{p_idx:04d}_img{i_idx:04d}.png")
|
|
|
|
| 91 |
# make sure 'output_folder' with generated images is available with proper filenames
|
| 92 |
|
| 93 |
# π________________ END IMAGE GENERATION ________________π#
|
|
@@ -153,9 +160,11 @@ for f in generated_files:
|
|
| 153 |
prompt_to_images.setdefault(prompt_idx, []).append(
|
| 154 |
os.path.join(output_folder, f))
|
| 155 |
|
|
|
|
| 156 |
all_features = {}
|
| 157 |
for prompt_idx, paths in tqdm(prompt_to_images.items(), desc="Extracting generated image's features"):
|
| 158 |
all_features[prompt_idx] = extract_features_from_paths(paths)
|
|
|
|
| 159 |
|
| 160 |
val_dataset = load_dataset("SimulaMet/Kvasir-VQA-test", split="validation")
|
| 161 |
prompt_to_real = requests.get(
|
|
@@ -189,6 +198,7 @@ fids, agreements, diversities = [], [], []
|
|
| 189 |
all_generated, all_real = [], []
|
| 190 |
per_prompt_data = []
|
| 191 |
|
|
|
|
| 192 |
for idx_A, idx_B, A, B in tqdm(objectives, desc="Scoring"):
|
| 193 |
sim_ab = mean_cosine_sim(A, B)
|
| 194 |
fid_ab = fid_score(A, B)
|
|
@@ -245,6 +255,10 @@ public_scores = {
|
|
| 245 |
|
| 246 |
|
| 247 |
# end calculating metrics
|
|
|
|
|
|
|
|
|
|
|
|
|
| 248 |
output_data = {"submission_info": SUBMISSION_INFO, "public_scores": public_scores, "total_time": total_time, "time_per_item": total_time / len(val_dataset),
|
| 249 |
"memory_used_mb": final_mem, "model_memory_mb": model_mem_used, "gpu_name": gpu_name, "predictions": all_features, "debug": {
|
| 250 |
"packages": json.loads(subprocess.check_output([sys.executable, "-m", "pip", "list", "--format=json"])),
|
|
@@ -258,6 +272,7 @@ output_data = {"submission_info": SUBMISSION_INFO, "public_scores": public_score
|
|
| 258 |
|
| 259 |
with open("predictions_2.json", "w") as f:
|
| 260 |
json.dump(output_data, f, indent=4)
|
|
|
|
| 261 |
print(f"Time: {total_time}s | Mem: {final_mem}MB | Model Load Mem: {model_mem_used}MB | GPU: {gpu_name}")
|
| 262 |
print("β
Scripts Looks Good! Generation process completed successfully. Results saved to 'predictions_2.json'.")
|
| 263 |
print("Next Step:\n 1) Upload this submission_task2.py script file to HuggingFace model repository.")
|
|
|
|
| 60 |
hf_pipe = DiffusionPipeline.from_pretrained(
|
| 61 |
"runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16).to(device)
|
| 62 |
hf_pipe.load_lora_weights("waitwhoami/sd-kvasir-imagen-demo")
|
| 63 |
+
hf_pipe.safety_checker = lambda images, clip_input: (images, None)
|
| 64 |
+
hf_pipe.set_progress_bar_config(disable=True)
|
| 65 |
+
|
| 66 |
+
print("π Model loaded successfully. Proceeding to image generation...")
|
| 67 |
|
| 68 |
# π----------------END SUBMISISON DETAILS and MODEL LOADING -----------------π#
|
| 69 |
|
|
|
|
| 76 |
output_folder = f"generated_images_{timestamp}"
|
| 77 |
# Ensure output folder exists
|
| 78 |
os.makedirs(output_folder, exist_ok=True)
|
| 79 |
+
# print full path of output folder
|
| 80 |
+
print(f"π Output folder: {os.path.abspath(output_folder)}")
|
| 81 |
|
| 82 |
# βοΈβοΈ___________EDIT SECTION 2: IMAGE GENERATION___________βοΈβοΈ#
|
| 83 |
# πΉ TODO: PARTICIPANTS SHOULD MODIFY THIS STEP πΉ
|
| 84 |
# you have access to 'test_prompts' with all the prompts needed to be generated
|
| 85 |
|
| 86 |
+
batch_size = 2 # Adjust based on your GPU memory, number of prompts to generate in one go
|
| 87 |
+
print(
|
| 88 |
+
f"π We have {len(test_prompts)} prompts and we are generating for two {batch_size} prompts at once. ")
|
| 89 |
+
for i in tqdm(range(0, len(test_prompts), batch_size), desc="π Generating images"):
|
| 90 |
batch = test_prompts[i:i + batch_size]
|
| 91 |
batched_prompts = [p for p in batch for _ in range(num_per_prompt)]
|
| 92 |
images = hf_pipe(batched_prompts).images
|
|
|
|
| 94 |
p_idx = i + j // num_per_prompt + 1
|
| 95 |
i_idx = j % num_per_prompt + 1
|
| 96 |
img.save(f"{output_folder}/prompt{p_idx:04d}_img{i_idx:04d}.png")
|
| 97 |
+
print("π Image generation completed. Proceeding to feature extraction...")
|
| 98 |
# make sure 'output_folder' with generated images is available with proper filenames
|
| 99 |
|
| 100 |
# π________________ END IMAGE GENERATION ________________π#
|
|
|
|
| 160 |
prompt_to_images.setdefault(prompt_idx, []).append(
|
| 161 |
os.path.join(output_folder, f))
|
| 162 |
|
| 163 |
+
print("π Extracting features for generated images...")
|
| 164 |
all_features = {}
|
| 165 |
for prompt_idx, paths in tqdm(prompt_to_images.items(), desc="Extracting generated image's features"):
|
| 166 |
all_features[prompt_idx] = extract_features_from_paths(paths)
|
| 167 |
+
print("π Feature extraction completed. Proceeding to scoring...")
|
| 168 |
|
| 169 |
val_dataset = load_dataset("SimulaMet/Kvasir-VQA-test", split="validation")
|
| 170 |
prompt_to_real = requests.get(
|
|
|
|
| 198 |
all_generated, all_real = [], []
|
| 199 |
per_prompt_data = []
|
| 200 |
|
| 201 |
+
print("π Calculating metrics and preparing output data...")
|
| 202 |
for idx_A, idx_B, A, B in tqdm(objectives, desc="Scoring"):
|
| 203 |
sim_ab = mean_cosine_sim(A, B)
|
| 204 |
fid_ab = fid_score(A, B)
|
|
|
|
| 255 |
|
| 256 |
|
| 257 |
# end calculating metrics
|
| 258 |
+
print(
|
| 259 |
+
f"π Metrics calculated. Fidelity: {fidelity_norm}, Agreement: {agreement_norm}, Diversity: {diversity_norm}")
|
| 260 |
+
print("π Saving results to 'predictions_2.json'...")
|
| 261 |
+
|
| 262 |
output_data = {"submission_info": SUBMISSION_INFO, "public_scores": public_scores, "total_time": total_time, "time_per_item": total_time / len(val_dataset),
|
| 263 |
"memory_used_mb": final_mem, "model_memory_mb": model_mem_used, "gpu_name": gpu_name, "predictions": all_features, "debug": {
|
| 264 |
"packages": json.loads(subprocess.check_output([sys.executable, "-m", "pip", "list", "--format=json"])),
|
|
|
|
| 272 |
|
| 273 |
with open("predictions_2.json", "w") as f:
|
| 274 |
json.dump(output_data, f, indent=4)
|
| 275 |
+
print("β
Results saved successfully. Script execution completed.")
|
| 276 |
print(f"Time: {total_time}s | Mem: {final_mem}MB | Model Load Mem: {model_mem_used}MB | GPU: {gpu_name}")
|
| 277 |
print("β
Scripts Looks Good! Generation process completed successfully. Results saved to 'predictions_2.json'.")
|
| 278 |
print("Next Step:\n 1) Upload this submission_task2.py script file to HuggingFace model repository.")
|