-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathinference_ren.py
More file actions
188 lines (163 loc) · 7.24 KB
/
inference_ren.py
File metadata and controls
188 lines (163 loc) · 7.24 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
import os
import torch
import cv2
import numpy as np
import matplotlib.pyplot as plt
import torch.multiprocessing as mp
import json
import argparse
from per_segment_anything import sam_model_registry
from segment_anything import SamPredictor
def load_sam_checkpoint(sam, checkpoint_path):
"""
Loads the SAM checkpoint from the given path, filters out keys related to
relative positional embeddings and loads the state dictionary into the model.
"""
state_dict = torch.load(checkpoint_path, map_location="cpu")
# Remove keys related to relative positional embeddings
keys_to_remove = [k for k in state_dict.keys() if "attn.rel_pos" in k]
for key in keys_to_remove:
print(f"[INFO] Removing key {key} from checkpoint state_dict")
del state_dict[key]
sam.load_state_dict(state_dict, strict=False)
print("Checkpoint loaded with filtered keys.")
return sam
# --------------------------
# Visualization Helper Functions
# --------------------------
def show_mask(mask, ax, random_color=False):
"""
Displays a semi-transparent mask overlay on the provided axis.
"""
if random_color:
color = np.concatenate([np.random.random(3), np.array([0.6])], axis=0)
else:
color = np.array([30/255, 144/255, 255/255, 0.6]) # Fixed RGBA color
h, w = mask.shape[-2:]
mask_image = mask.reshape(h, w, 1) * color.reshape(1, 1, -1)
ax.imshow(mask_image)
def show_points(coords, labels, ax, marker_size=375):
"""
Displays points on the image axis.
"""
pos_points = coords[labels == 1]
neg_points = coords[labels == 0]
ax.scatter(pos_points[:, 0], pos_points[:, 1],
color='green', marker='*', s=marker_size, edgecolor='white', linewidth=1.25)
ax.scatter(neg_points[:, 0], neg_points[:, 1],
color='red', marker='*', s=marker_size, edgecolor='white', linewidth=1.25)
def show_box(box, ax):
"""
Draws a rectangular box (x0, y0, x1, y1) on the provided axis.
"""
x0, y0 = box[0], box[1]
w, h = box[2] - box[0], box[3] - box[1]
ax.add_patch(plt.Rectangle((x0, y0), w, h, edgecolor='green', facecolor=(0, 0, 0, 0), lw=2))
def show_res(masks, scores, input_point, input_label, input_box, filename, image):
"""
Saves the visualization for each mask with the score printed.
"""
for i, (mask, score) in enumerate(zip(masks, scores)):
plt.figure(figsize=(10, 10))
plt.imshow(image)
show_mask(mask, plt.gca())
if input_box is not None:
box = input_box[i]
show_box(box, plt.gca())
if input_point is not None and input_label is not None:
show_points(input_point, input_label, plt.gca())
# Print the score for each mask
print(f"Score: {score:.3f}")
plt.axis('off')
out_file = filename + f'_{i}.png'
plt.savefig(out_file, bbox_inches='tight', pad_inches=-0.1)
print(f"Output saved at {out_file}")
plt.close()
# --------------------------
# Inference Function (for Distributed Setup)
# --------------------------
def inference(rank, world_size, images, input_points, input_labels, result_path):
setup(rank, world_size)
# Initialize model
sam_checkpoint = "/home/samhq/sam-hq/Personalize-SAM-HQ/work_dirs/non_encoder/sam_hq_epoch_4.pth"
model_type = "vit_b"
# Use the GPU corresponding to the current rank; fallback to device 1 if available
device = torch.device(f"cuda:{1}" if torch.cuda.is_available() else "cpu")
sam = sam_model_registry[model_type](checkpoint=None).to(device)
sam = load_sam_checkpoint(sam, sam_checkpoint)
sam = sam.to(device)
model = torch.nn.parallel.DistributedDataParallel(sam, device_ids=[1])
predictor = SamPredictor(model.module)
os.makedirs(result_path, exist_ok=True)
num_images = len(images)
images_per_rank = num_images // world_size
start_index = rank * images_per_rank
end_index = start_index + images_per_rank
# Run inference on a subset of images for this rank
for i in range(start_index, end_index):
image = cv2.imread(images[i])
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
predictor.set_image(image)
input_box = None
input_point = np.array(input_points[i], dtype=float) if input_points[i] is not None else np.array([[0,0],[0,0]], dtype=float)
input_label = input_labels[i] if input_labels[i] is not None else np.ones(input_point.shape[0]) # Default label 1 (positive)
hq_token_only = False
print(f"Rank: {rank}, Image Index: {i}, Input Points: {input_point}, Input Labels: {input_label}")
masks, scores, logits = predictor.predict(
point_coords=input_point,
point_labels=input_label,
box=input_box,
multimask_output=False,
hq_token_only=hq_token_only,
)
filename = os.path.join(result_path, f'example_{i}_rank_{rank}')
show_res(masks, scores, input_point, input_label, input_box, filename, image)
cleanup()
# --------------------------
# DDP Setup and Cleanup Functions
# --------------------------
def setup(rank, world_size):
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '11123'
# Initialize DDP process group
torch.distributed.init_process_group(backend="nccl", rank=rank, world_size=world_size)
def cleanup():
# Clean up DDP process group
torch.distributed.destroy_process_group()
# --------------------------
# Main Function and Argument Parsing
# --------------------------
def get_arguments():
parser = argparse.ArgumentParser(description="Inference with SAM Encoder and Decoder")
parser.add_argument('--ckpt', type=str, required=True,
help="Path to the SAM checkpoint (encoder+decoder) to load")
parser.add_argument('--sam_type', type=str, default='vit_b',
help="SAM model type (e.g., 'vit_b' or 'vit_t')")
return parser.parse_args()
def main():
args = get_arguments()
world_size = 1 # Number of GPUs (adjust as necessary)
image_dir = '/home/samhq/sam-hq/Personalize-SAM-HQ/input_imgs_renishaw_select'
json_dir = '/home/samhq/sam-hq/Personalize-SAM-HQ/json_select'
image_files = os.listdir(image_dir)
images = [os.path.join(image_dir, f) for f in image_files if f.endswith('.png')]
input_points = []
input_labels = []
for image_file in image_files:
if image_file.endswith('.png'):
json_filename = image_file.replace('.png', '.json')
json_path = os.path.join(json_dir, json_filename)
if os.path.exists(json_path):
with open(json_path, 'r') as f:
json_data = json.load(f)
clicked_points = np.array(json_data.get("clicked_points", []), dtype=float)
input_points.append(clicked_points)
input_labels.append(np.ones(clicked_points.shape[0]))
else:
input_points.append(None)
input_labels.append(None)
result_path = 'outputs/outdir_ren/'
print(f"Results will be stored in: {result_path}")
mp.spawn(inference, args=(world_size, images, input_points, input_labels, result_path), nprocs=world_size)
if __name__ == "__main__":
main()