Skip to content

Commit f918b4f

Browse files
authored
Bug fix and tuning (#62)
* Draw boundary on top of all detections * Update launch * Add bag name to video * Update params for better evaluation * Fix OBB bug * Update params * Tune transform
1 parent 1aa9a20 commit f918b4f

6 files changed

Lines changed: 92 additions & 17 deletions

File tree

Metrics/preprocessGT.py

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -35,16 +35,14 @@ def main(args):
3535
write_lines(result_name, normalized_lines)
3636

3737
# Define a list of points as tuples, must be clockwise
38-
# q7_2_may10_2023_clean bag
39-
# roi_points = np.array([(495, 230), (900, 230), (1910, 730), (525, 900)])
4038

41-
# q7_2_may10_2023_clean_short_range
39+
# may10 short_range
4240
# roi_points = np.array([(658,305), (980,960), (1900,737), (1049,287)])
4341

44-
# oct18_r9
42+
# oct18
4543
roi_points = np.array([(5,735),(1270,645),(1500,676),(240,1060)])
4644

47-
# dec7_dhd1_short
45+
# dec7
4846
# roi_points = np.array([(835,480), (1050,1056), (1918,845), (1130,465)])
4947

5048
# Create a polygon object from the points using shapely
@@ -56,7 +54,6 @@ def main(args):
5654
if args.image_path is not None:
5755
# Draw roi on image
5856
img = cv2.imread(args.image_path)
59-
cv2.polylines(img, [roi_points], True, (255, 0, 0), 3)
6057

6158
# Read the csv file using pandas
6259
df = pd.read_csv(result_name, header=None)
@@ -85,6 +82,10 @@ def main(args):
8582
if args.image_path is not None:
8683
cv2.polylines(img, [points], True, colour, 2)
8784

85+
if args.image_path is not None:
86+
# Draw roi on image
87+
cv2.polylines(img, [roi_points], True, (255, 0, 0), 3)
88+
8889
# Drop the rows that are in the remove list using pandas
8990
df = df.drop(remove_indices)
9091

fusion_module/fusion_module/fusion_visualizer_node.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,12 @@ def __init__(self):
2323
self.declare_parameter('save_video', False)
2424
save_video = self.get_parameter(
2525
'save_video').get_parameter_value().bool_value
26+
27+
self.declare_parameter('video_name', 'video')
28+
self.video_name = self.get_parameter(
29+
'video_name').get_parameter_value().string_value
30+
31+
print(self.video_name + '\n')
2632

2733
self.bridge = CvBridge()
2834
self.image_sub = Subscriber(self, Image, 'image')
@@ -104,7 +110,7 @@ def callback(self, image_msg, detection_msg):
104110
if self.video_writer is None:
105111
fourcc = VideoWriter_fourcc(*'mp4v') # choose codec
106112
fps = 10 # choose frame rate
107-
filename = 'video_{}.mp4'.format(datetime.datetime.now().strftime(
113+
filename = self.video_name + '_{}.mp4'.format(datetime.datetime.now().strftime(
108114
'%Y%m%d_%H%M%S')) # choose file name with current timestamp
109115
self.video_writer = VideoWriter(
110116
filename, fourcc, fps, (cv_image.shape[1], cv_image.shape[0]))

lidar_pipeline/configs/dec7_config.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ projection_node:
4040
# Transform from Lidar to camera frame
4141
lidar2cam_extrinsic:
4242
rotation: [0.0, 1.0, 0.0, 0.0, 0.0, -1.0, -1.0, 0.0, 0.0]
43-
translation: [0.00, 0.0508, 0.0502]
43+
translation: [-0.3, -0.32, 0.0502]
4444

4545
# Transform from lidar data from to lidar sensor frame
4646
# See https://github.com/ros-drivers/ros2_ouster_drivers/issues/87
Lines changed: 55 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,55 @@
1+
# These parameters are dependant on the sensor location
2+
# Note: For converting angles use https://www.andre-gaschler.com/rotationconverter/
3+
perception_node:
4+
ros__parameters:
5+
6+
# Transformation from sensor to world frame
7+
lidar2world_transform:
8+
# [x,y,z] in meters
9+
translation: [0.0, 0.0, 3.25]
10+
11+
# [w,x,y,z] normalized quaternion
12+
quaternion: [0.999769, 0.0, -0.021498, 0.0]
13+
14+
# The crop box isolates a region of interest in the pointcloud
15+
# all point outside this box are removed
16+
# Use the roi_visulaizer.py tool to find a new crop_box params if needed
17+
crop_box_transform:
18+
# [x,y,z] in meters
19+
translation: [35.0, -0.5, 1.75]
20+
21+
# [w,x,y,z] normalized quaternion
22+
quaternion: [0.9025852843498606, 0.0, 0.0, 0.43051109680829514]
23+
24+
# [x, y, z] in meters
25+
size: [16.0, 75.0, 3.5]
26+
27+
28+
# Use the same `lidar2world_transform` for both nodes
29+
projection_node:
30+
ros__parameters:
31+
32+
# Matrix from intrinsic camera calibration
33+
# This day's data was recorded at 1920x1080
34+
camera_matrix: [1199.821557, 0.0, 960.562236, 0.0, 1198.033465, 551.675808, 0.0, 0.0, 1.0]
35+
36+
# Transformation from sensor to world frame
37+
lidar2world_transform:
38+
# [x,y,z] in meters
39+
translation: [0.0, 0.0, 3.25]
40+
41+
# [w,x,y,z] normalized quaternion
42+
quaternion: [0.999769, 0.0, -0.021498, 0.0]
43+
44+
# Transform from Lidar to camera frame
45+
lidar2cam_extrinsic:
46+
rotation: [-0.0174524, 0.9998477, 0.0000000,-0.0000000, -0.0000000, -1.0000000,-0.9998477, -0.0174524, 0.0000000 ]
47+
translation: [0.00, 0.0508, 0.0502]
48+
49+
# Transform from lidar data from to lidar sensor frame
50+
# See https://github.com/ros-drivers/ros2_ouster_drivers/issues/87
51+
# The translation part may already be accounted for internally in the driver
52+
lidarData2lidarSensor_extrinsic:
53+
rotation: [-1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 1.0]
54+
translation: [0.0, 0.0, 0.03618]
55+

lidar_pipeline/launch/fusion_demo.launch.py

Lines changed: 19 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919
# Used to change playback rate of ros bag
2020
# A9 data bags were recoreded at 2.5Hz so they need a x4 speedup
2121
# if left as 1 then thre is no rate change
22-
BAG_PLAY_RATE = 0.16
22+
BAG_PLAY_RATE = 1.0
2323
FLIP_IMAGE = False
2424

2525
BAG_PLAY_LOOP = True
@@ -35,16 +35,16 @@
3535
'''
3636
ABS_PATH_TO_ROSBAGS = '/home/adrian/dev/bags/'
3737

38-
# 10, 7, 6, 12
39-
BAG_SELECTOR = 7
38+
# 10, 7, 6, 12, 13
39+
BAG_SELECTOR = 13
4040

41-
# Determines what kind of output you want, Video/Rviz2
41+
# Determines what kind of output you want, Video/Rviz2/csv_tracker_data
4242
SAVE_OUTPUT_VIDEO = True
4343
SAVE_CSV_FUSION_OUTPUT = True
4444
SHOW_RVIZ = False
4545

4646
# Fusion Overrides
47-
LIDAR_RESULT_ONLY = True
47+
LIDAR_RESULT_ONLY = False
4848
CAM_RESULT_ONLY = False
4949

5050
# Because of the yolov5 includes, its easier to just run this directly
@@ -58,7 +58,6 @@
5858
# FLIP_IMAGE = True
5959
CONFIG_NAME = 'default_config.yaml'
6060

61-
6261
# MARC Rooftop data without data syncronization
6362
# Fusion and projection will not work
6463
elif BAG_SELECTOR == 0:
@@ -77,6 +76,9 @@
7776
elif BAG_SELECTOR == 12:
7877
BAG_NAME = 'cleaned_bags/may10_r5_clean'
7978
CONFIG_NAME = 'may10_config.yaml'
79+
elif BAG_SELECTOR == 13:
80+
BAG_NAME = 'cleaned_bags/oct18_r1_clean'
81+
CONFIG_NAME = 'oct18_r1_config.yaml'
8082

8183
# MARC Rooftop data with syncronized lidar + camera
8284
elif BAG_SELECTOR == 2:
@@ -122,6 +124,15 @@
122124
print('Invalid bag selection!')
123125
exit(-1)
124126

127+
# Set and validate parameters
128+
129+
if SAVE_CSV_FUSION_OUTPUT:
130+
BAG_PLAY_RATE = 0.16
131+
132+
if LIDAR_RESULT_ONLY and CAM_RESULT_ONLY:
133+
print('Error: Must use lidar or camera results for fusion.')
134+
exit(ValueError)
135+
125136
START_BAG_DELAY = 0.0
126137
if SAVE_OUTPUT_VIDEO:
127138
START_BAG_DELAY = 7.5
@@ -196,13 +207,15 @@ def generate_launch_description():
196207
]
197208
)
198209

210+
video_name = BAG_NAME.split('/')[-1]
199211
fusion_viz = Node(
200212
package='fusion_module',
201213
executable='fusion_viz_node',
202214
name='fusion_viz_node',
203215
parameters=[
204216
{'flip_image': FLIP_IMAGE},
205217
{'save_video': SAVE_OUTPUT_VIDEO},
218+
{'video_name': video_name},
206219
]
207220
)
208221

lidar_pipeline/src/lidar_processing.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -277,9 +277,9 @@ vision_msgs::msg::BoundingBox3D LidarProcessing::getOrientedBoudingBox(const pcl
277277
size.getArray3fMap() = max_pt_T.getArray3fMap() - min_pt_T.getArray3fMap();
278278

279279
vision_msgs::msg::BoundingBox3D bbox;
280-
bbox.center.position.x = centroid[X];
281-
bbox.center.position.y = centroid[Y];
282-
bbox.center.position.z = centroid[Z];
280+
bbox.center.position.x = center[X];
281+
bbox.center.position.y = center[Y];
282+
bbox.center.position.z = center[Z];
283283
bbox.center.orientation.x = bboxQ.x();
284284
bbox.center.orientation.y = bboxQ.y();
285285
bbox.center.orientation.z = bboxQ.z();

0 commit comments

Comments
 (0)