root / rgbdslam / src / openni_listener.cpp @ 9240aaa3
History | View | Annotate | Download (33.2 KB)
1 |
/* This file is part of RGBDSLAM.
|
---|---|
2 |
*
|
3 |
* RGBDSLAM is free software: you can redistribute it and/or modify
|
4 |
* it under the terms of the GNU General Public License as published by
|
5 |
* the Free Software Foundation, either version 3 of the License, or
|
6 |
* (at your option) any later version.
|
7 |
*
|
8 |
* RGBDSLAM is distributed in the hope that it will be useful,
|
9 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
10 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
11 |
* GNU General Public License for more details.
|
12 |
*
|
13 |
* You should have received a copy of the GNU General Public License
|
14 |
* along with RGBDSLAM. If not, see <http://www.gnu.org/licenses/>.
|
15 |
*/
|
16 |
|
17 |
|
18 |
//Documentation see header file
|
19 |
#include "pcl/ros/conversions.h" |
20 |
#include <pcl/io/io.h> |
21 |
#include "pcl/common/transform.h" |
22 |
#include "pcl_ros/transforms.h" |
23 |
#include "openni_listener.h" |
24 |
#include <cv_bridge/CvBridge.h> |
25 |
#include <opencv2/imgproc/imgproc.hpp> |
26 |
#include <iostream> |
27 |
#include <sstream> |
28 |
#include <string> |
29 |
#include <cv.h> |
30 |
#include <ctime> |
31 |
#include <sensor_msgs/PointCloud2.h> |
32 |
#include <Eigen/Core> |
33 |
#include "node.h" |
34 |
#include "misc.h" |
35 |
//#include <image_geometry/pinhole_camera_model.h>
|
36 |
#include "pcl/ros/for_each_type.h" |
37 |
|
38 |
//For rosbag reading
|
39 |
#include <rosbag/view.h> |
40 |
#include <boost/foreach.hpp> |
41 |
|
42 |
|
43 |
#include "parameter_server.h" |
44 |
//for comparison with ground truth from mocap and movable cameras on robots
|
45 |
#include <tf/transform_listener.h> |
46 |
|
47 |
typedef message_filters::Subscriber<sensor_msgs::Image> image_sub_type;
|
48 |
typedef message_filters::Subscriber<sensor_msgs::CameraInfo> cinfo_sub_type;
|
49 |
typedef message_filters::Subscriber<sensor_msgs::PointCloud2> pc_sub_type;
|
50 |
typedef message_filters::Subscriber<sensor_msgs::PointCloud2> pc_sub_type;
|
51 |
|
52 |
typedef union |
53 |
{ |
54 |
struct /*anonymous*/ |
55 |
{ |
56 |
unsigned char Blue; |
57 |
unsigned char Green; |
58 |
unsigned char Red; |
59 |
unsigned char Alpha; |
60 |
}; |
61 |
float float_value;
|
62 |
long long_value;
|
63 |
} RGBValue; |
64 |
|
65 |
|
66 |
|
67 |
|
68 |
// Load bag
|
69 |
void OpenNIListener::loadBag(const std::string &filename) |
70 |
{ |
71 |
|
72 |
struct timespec starttime, finish; double elapsed; clock_gettime(CLOCK_MONOTONIC, &starttime); |
73 |
ROS_INFO("Loading Bagfile %s", filename.c_str());
|
74 |
rosbag::Bag bag; |
75 |
try{
|
76 |
bag.open(filename, rosbag::bagmode::Read); |
77 |
} catch(rosbag::BagIOException ex) {
|
78 |
ROS_FATAL("Opening Bagfile %s failed: %s Quitting!", filename.c_str(), ex.what());
|
79 |
ros::shutdown(); |
80 |
return;
|
81 |
} |
82 |
|
83 |
ParameterServer* params = ParameterServer::instance(); |
84 |
std::string visua_tpc = params->get<std::string>("topic_image_mono"); |
85 |
std::string depth_tpc = params->get<std::string>("topic_image_depth"); |
86 |
std::string cinfo_tpc = params->get<std::string>("camera_info_topic"); |
87 |
std::string tf_tpc = std::string("/tf"); |
88 |
|
89 |
// Image topics to load for bagfiles
|
90 |
std::vector<std::string> topics;
|
91 |
topics.push_back(visua_tpc); |
92 |
topics.push_back(depth_tpc); |
93 |
topics.push_back(cinfo_tpc); |
94 |
topics.push_back(tf_tpc); |
95 |
|
96 |
rosbag::View view(bag, rosbag::TopicQuery(topics)); |
97 |
// int lc=0;
|
98 |
// Simulate sending of the messages in the bagfile
|
99 |
BOOST_FOREACH(rosbag::MessageInstance const m, view)
|
100 |
{ |
101 |
// if(lc++ > 1000) break;
|
102 |
do{
|
103 |
usleep(250);
|
104 |
if(!ros::ok()) return; |
105 |
} while(pause_);
|
106 |
|
107 |
if (m.getTopic() == visua_tpc || ("/" + m.getTopic() == visua_tpc)) |
108 |
{ |
109 |
sensor_msgs::Image::ConstPtr rgb_img = m.instantiate<sensor_msgs::Image>(); |
110 |
if (rgb_img) rgb_img_sub_->newMessage(rgb_img);
|
111 |
ROS_INFO("Found Message of %s", visua_tpc.c_str());
|
112 |
} |
113 |
|
114 |
if (m.getTopic() == depth_tpc || ("/" + m.getTopic() == depth_tpc)) |
115 |
{ |
116 |
sensor_msgs::Image::ConstPtr depth_img = m.instantiate<sensor_msgs::Image>(); |
117 |
if (depth_img) depth_img_sub_->newMessage(depth_img);
|
118 |
ROS_INFO("Found Message of %s", depth_tpc.c_str());
|
119 |
} |
120 |
if (m.getTopic() == cinfo_tpc || ("/" + m.getTopic() == cinfo_tpc)) |
121 |
{ |
122 |
sensor_msgs::CameraInfo::ConstPtr cam_info = m.instantiate<sensor_msgs::CameraInfo>(); |
123 |
if (cam_info) cam_info_sub_->newMessage(cam_info);
|
124 |
ROS_INFO("Found Message of %s", cinfo_tpc.c_str());
|
125 |
} |
126 |
if (m.getTopic() == tf_tpc|| ("/" + m.getTopic() == tf_tpc)){ |
127 |
tf::tfMessage::ConstPtr tf_msg = m.instantiate<tf::tfMessage>(); |
128 |
if (tf_msg) tf_pub_.publish(tf_msg);
|
129 |
ROS_INFO("Found Message of %s", tf_tpc.c_str());
|
130 |
} |
131 |
} |
132 |
ROS_INFO("Finished processing of Bagfile");
|
133 |
bag.close(); |
134 |
do{
|
135 |
future_.waitForFinished(); //Wait if GraphManager ist still computing.
|
136 |
usleep(1000000); //give it a chance to receive further messages |
137 |
if(!ros::ok()) return; |
138 |
ROS_WARN("Waiting for processing to finish.");
|
139 |
} while(graph_mgr_->isBusy());
|
140 |
|
141 |
if(ParameterServer::instance()->get<bool>("batch_processing")){ |
142 |
graph_mgr_->saveTrajectory(QString(filename.c_str()) + "before_optimization");
|
143 |
graph_mgr_->optimizeGraph(20);
|
144 |
ROS_INFO("Finished with 1st optimization");
|
145 |
graph_mgr_->saveTrajectory(QString(filename.c_str()) + "after1_optimization");
|
146 |
//graph_mgr_->sanityCheck(100.0); //no trajectory is larger than a 100m
|
147 |
//graph_mgr_->pruneEdgesWithErrorAbove(0.5);
|
148 |
//graph_mgr_->optimizeGraph(20);
|
149 |
//ROS_INFO("Finished with 2nd optimization");
|
150 |
//graph_mgr_->saveTrajectory(QString(filename.c_str()) + "after2_optimization");
|
151 |
if(ParameterServer::instance()->get<bool>("store_pointclouds")){ |
152 |
graph_mgr_->saveIndividualClouds(QString(filename.c_str()), false);//not threaded |
153 |
} |
154 |
clock_gettime(CLOCK_MONOTONIC, &finish); elapsed = (finish.tv_sec - starttime.tv_sec); elapsed += (finish.tv_nsec - starttime.tv_nsec) / 1000000000.0; ROS_INFO_STREAM_COND_NAMED(elapsed > ParameterServer::instance()->get<double>("min_time_reported"), "timings", __FUNCTION__ << " runtime: "<< elapsed <<" s"); |
155 |
Q_EMIT bagFinished(); |
156 |
ros::shutdown(); |
157 |
usleep(10000000);//10sec to allow all threads to finish (don't know how much is required) |
158 |
if(ros::ok()){
|
159 |
ROS_ERROR("Should have shutdown meanwhile");
|
160 |
exit(1);
|
161 |
} |
162 |
} |
163 |
clock_gettime(CLOCK_MONOTONIC, &finish); elapsed = (finish.tv_sec - starttime.tv_sec); elapsed += (finish.tv_nsec - starttime.tv_nsec) / 1000000000.0; ROS_INFO_STREAM_COND_NAMED(elapsed > ParameterServer::instance()->get<double>("min_time_reported"), "timings", __FUNCTION__ << " runtime: "<< elapsed <<" s"); |
164 |
} |
165 |
|
166 |
OpenNIListener::OpenNIListener(ros::NodeHandle nh, GraphManager* graph_mgr) |
167 |
: graph_mgr_(graph_mgr), |
168 |
stereo_sync_(NULL), kinect_sync_(NULL), no_cloud_sync_(NULL), |
169 |
visua_sub_(NULL), depth_sub_(NULL), cloud_sub_(NULL), |
170 |
depth_mono8_img_(cv::Mat()), |
171 |
save_bag_file(false),
|
172 |
pause_(ParameterServer::instance()->get<bool>("start_paused")), |
173 |
getOneFrame_(false),
|
174 |
first_frame_(true),
|
175 |
tflistener_(new tf::TransformListener(nh)),
|
176 |
data_id_(0)
|
177 |
{ |
178 |
ParameterServer* params = ParameterServer::instance(); |
179 |
int q = params->get<int>("subscriber_queue_size"); |
180 |
std::string bagfile_name = params->get<std::string>("bagfile_name"); |
181 |
std::string visua_tpc = params->get<std::string>("topic_image_mono"); |
182 |
std::string depth_tpc = params->get<std::string>("topic_image_depth"); |
183 |
std::string cinfo_tpc = params->get<std::string>("camera_info_topic"); |
184 |
|
185 |
if(bagfile_name.empty()){
|
186 |
std::string cloud_tpc = params->get<std::string>("topic_points"); |
187 |
std::string widev_tpc = params->get<std::string>("wide_topic"); |
188 |
std::string widec_tpc = params->get<std::string>("wide_cloud_topic"); |
189 |
|
190 |
//All information from Kinect
|
191 |
if(!visua_tpc.empty() && !depth_tpc.empty() && !cloud_tpc.empty())
|
192 |
{ |
193 |
visua_sub_ = new image_sub_type(nh, visua_tpc, q);
|
194 |
depth_sub_ = new image_sub_type (nh, depth_tpc, q);
|
195 |
cloud_sub_ = new pc_sub_type (nh, cloud_tpc, q);
|
196 |
kinect_sync_ = new message_filters::Synchronizer<KinectSyncPolicy>(KinectSyncPolicy(q), *visua_sub_, *depth_sub_, *cloud_sub_),
|
197 |
kinect_sync_->registerCallback(boost::bind(&OpenNIListener::kinectCallback, this, _1, _2, _3));
|
198 |
ROS_INFO_STREAM("Listening to " << visua_tpc << ", " << depth_tpc << " and " << cloud_tpc); |
199 |
} |
200 |
//No cloud, but visual image and depth
|
201 |
else if(!visua_tpc.empty() && !depth_tpc.empty() && !cinfo_tpc.empty() && cloud_tpc.empty()) |
202 |
{ |
203 |
visua_sub_ = new image_sub_type(nh, visua_tpc, q);
|
204 |
depth_sub_ = new image_sub_type(nh, depth_tpc, q);
|
205 |
cinfo_sub_ = new cinfo_sub_type(nh, cinfo_tpc, q);
|
206 |
no_cloud_sync_ = new message_filters::Synchronizer<NoCloudSyncPolicy>(NoCloudSyncPolicy(q), *visua_sub_, *depth_sub_, *cinfo_sub_);
|
207 |
no_cloud_sync_->registerCallback(boost::bind(&OpenNIListener::noCloudCallback, this, _1, _2, _3));
|
208 |
ROS_INFO_STREAM("Listening to " << visua_tpc << " and " << depth_tpc); |
209 |
} |
210 |
|
211 |
//All information from stereo
|
212 |
if(!widec_tpc.empty() && !widec_tpc.empty())
|
213 |
{ |
214 |
visua_sub_ = new image_sub_type(nh, widev_tpc, q);
|
215 |
cloud_sub_ = new pc_sub_type(nh, widec_tpc, q);
|
216 |
stereo_sync_ = new message_filters::Synchronizer<StereoSyncPolicy>(StereoSyncPolicy(q), *visua_sub_, *cloud_sub_);
|
217 |
if(params->get<bool>("use_wide")){ |
218 |
stereo_sync_->registerCallback(boost::bind(&OpenNIListener::stereoCallback, this, _1, _2));
|
219 |
} |
220 |
ROS_INFO_STREAM("Listening to " << widev_tpc << " and " << widec_tpc ); |
221 |
} |
222 |
|
223 |
detector_ = createDetector(params->get<std::string>("feature_detector_type")); |
224 |
extractor_ = createDescriptorExtractor(params->get<std::string>("feature_extractor_type")); |
225 |
|
226 |
if(params->get<bool>("concurrent_node_construction")){ |
227 |
ROS_INFO("Threads used by QThreadPool on this Computer %i. Will increase this by one, b/c the QtRos Thread is very lightweight", QThread::idealThreadCount());
|
228 |
QThreadPool::globalInstance()->setMaxThreadCount(QThread::idealThreadCount()*2+2); |
229 |
} |
230 |
|
231 |
} |
232 |
else //Bagfile given |
233 |
{ |
234 |
|
235 |
tf_pub_ = nh.advertise<tf::tfMessage>("/tf", 10); |
236 |
//All information from Kinect
|
237 |
if(!visua_tpc.empty() && !depth_tpc.empty() && !cinfo_tpc.empty())
|
238 |
{ |
239 |
// Set up fake subscribers to capture images
|
240 |
depth_img_sub_ = new BagSubscriber<sensor_msgs::Image>();
|
241 |
rgb_img_sub_ = new BagSubscriber<sensor_msgs::Image>();
|
242 |
cam_info_sub_ = new BagSubscriber<sensor_msgs::CameraInfo>();
|
243 |
no_cloud_sync_ = new message_filters::Synchronizer<NoCloudSyncPolicy>(NoCloudSyncPolicy(q), *rgb_img_sub_, *depth_img_sub_, *cam_info_sub_);
|
244 |
no_cloud_sync_->registerCallback(boost::bind(&OpenNIListener::noCloudCallback, this, _1, _2, _3));
|
245 |
ROS_INFO_STREAM("Listening to " << visua_tpc << " and " << depth_tpc); |
246 |
} |
247 |
|
248 |
|
249 |
detector_ = createDetector(params->get<std::string>("feature_detector_type")); |
250 |
extractor_ = createDescriptorExtractor(params->get<std::string>("feature_extractor_type")); |
251 |
QtConcurrent::run(this, &OpenNIListener::loadBag, bagfile_name);
|
252 |
|
253 |
if(params->get<bool>("concurrent_node_construction")){ |
254 |
ROS_INFO("Threads used by QThreadPool on this Computer %i. Will increase this by two, b/c the QtRos and loadBag threads are lightweight", QThread::idealThreadCount());
|
255 |
QThreadPool::globalInstance()->setMaxThreadCount(QThread::idealThreadCount()+2);
|
256 |
} |
257 |
} |
258 |
} |
259 |
|
260 |
|
261 |
void OpenNIListener::stereoCallback(const sensor_msgs::ImageConstPtr& visual_img_msg, const sensor_msgs::PointCloud2ConstPtr& point_cloud){ |
262 |
struct timespec starttime, finish; double elapsed; clock_gettime(CLOCK_MONOTONIC, &starttime); |
263 |
ROS_INFO("Received data from stereo cam");
|
264 |
if(++data_id_ % ParameterServer::instance()->get<int>("data_skip_step") != 0){ |
265 |
ROS_INFO_THROTTLE(1, "Skipping Frame %i because of data_skip_step setting (this msg is only shown once a sec)", data_id_); |
266 |
return;
|
267 |
}; |
268 |
|
269 |
//calculate depthMask
|
270 |
cv::Mat_<uchar> depth_img(visual_img_msg->height, visual_img_msg->width); |
271 |
float value;
|
272 |
unsigned int pt_ctr = 0; |
273 |
for(unsigned int y = 0; y < visual_img_msg->height; y++){ |
274 |
for(unsigned int x = 0; x < visual_img_msg->width; x++){ |
275 |
const uchar* value_ch_ptr = &(point_cloud->data[pt_ctr]);
|
276 |
value = *((const float*)value_ch_ptr); |
277 |
pt_ctr += point_cloud->point_step; |
278 |
if(value != value){//isnan |
279 |
depth_img(y,x) = 0;
|
280 |
}else{
|
281 |
depth_img(y,x) = (char)(value*100.0); //Full white at 2.55 meter |
282 |
} |
283 |
} |
284 |
} |
285 |
|
286 |
//Get images into OpenCV format
|
287 |
sensor_msgs::CvBridge bridge; |
288 |
cv::Mat visual_img = bridge.imgMsgToCv(visual_img_msg, "mono8");
|
289 |
if(visual_img.rows != depth_img.rows ||
|
290 |
visual_img.cols != depth_img.cols || |
291 |
point_cloud->width != (uint32_t) visual_img.cols || |
292 |
point_cloud->height != (uint32_t) visual_img.rows){ |
293 |
ROS_ERROR("PointCloud, depth and visual image differ in size! Ignoring Data");
|
294 |
return;
|
295 |
} |
296 |
|
297 |
if (bagfile_mutex.tryLock() && save_bag_file){
|
298 |
// todo: make the names dynamic
|
299 |
bag.write("/wide_stereo/points2", ros::Time::now(), point_cloud);
|
300 |
bag.write("/wide_stereo/left/image_mono", ros::Time::now(), visual_img_msg);
|
301 |
ROS_INFO_STREAM("Wrote to bagfile " << bag.getFileName());
|
302 |
bagfile_mutex.unlock(); |
303 |
if(pause_) return; |
304 |
} |
305 |
pointcloud_type::Ptr pc_col(new pointcloud_type());//will belong to node |
306 |
if(ParameterServer::instance()->get<bool>("use_gui")){ |
307 |
Q_EMIT newVisualImage(cvMat2QImage(visual_img, 0)); //visual_idx=0 |
308 |
Q_EMIT newDepthImage (cvMat2QImage(depth_img,1));//overwrites last cvMat2QImage |
309 |
} |
310 |
pcl::fromROSMsg(*point_cloud,*pc_col); |
311 |
clock_gettime(CLOCK_MONOTONIC, &finish); elapsed = (finish.tv_sec - starttime.tv_sec); elapsed += (finish.tv_nsec - starttime.tv_nsec) / 1000000000.0; ROS_INFO_STREAM_COND_NAMED(elapsed > ParameterServer::instance()->get<double>("min_time_reported"), "timings", __FUNCTION__ << " runtime: "<< elapsed <<" s"); |
312 |
cameraCallback(visual_img, pc_col, depth_img); |
313 |
} |
314 |
|
315 |
OpenNIListener::~OpenNIListener(){ |
316 |
delete tflistener_;
|
317 |
} |
318 |
|
319 |
void OpenNIListener::noCloudCallback (const sensor_msgs::ImageConstPtr& visual_img_msg, |
320 |
const sensor_msgs::ImageConstPtr& depth_img_msg,
|
321 |
const sensor_msgs::CameraInfoConstPtr& cam_info_msg)
|
322 |
{ |
323 |
struct timespec starttime, finish; double elapsed; clock_gettime(CLOCK_MONOTONIC, &starttime); |
324 |
ROS_DEBUG("Received data from kinect");
|
325 |
|
326 |
if(++data_id_ % ParameterServer::instance()->get<int>("data_skip_step") != 0){ |
327 |
ROS_INFO_THROTTLE(1, "Skipping Frame %i because of data_skip_step setting (this msg is only shown once a sec)", data_id_); |
328 |
if(ParameterServer::instance()->get<bool>("use_gui")){//Show the image, even if not using it |
329 |
sensor_msgs::CvBridge bridge; |
330 |
cv::Mat depth_float_img = bridge.imgMsgToCv(depth_img_msg); |
331 |
cv::Mat visual_img = bridge.imgMsgToCv(visual_img_msg); |
332 |
if(visual_img.rows != depth_float_img.rows ||
|
333 |
visual_img.cols != depth_float_img.cols){ |
334 |
ROS_ERROR("depth and visual image differ in size! Ignoring Data");
|
335 |
return;
|
336 |
} |
337 |
depthToCV8UC1(depth_float_img, depth_mono8_img_); //float can't be visualized or used as mask in float format TODO: reprogram keypoint detector to use float values with nan to mask
|
338 |
Q_EMIT newVisualImage(cvMat2QImage(visual_img, 0)); //visual_idx=0 |
339 |
Q_EMIT newDepthImage (cvMat2QImage(depth_mono8_img_,1));//overwrites last cvMat2QImage |
340 |
} |
341 |
return;
|
342 |
} |
343 |
|
344 |
//Get images into OpenCV format
|
345 |
sensor_msgs::CvBridge bridge; |
346 |
cv::Mat depth_float_img = bridge.imgMsgToCv(depth_img_msg); |
347 |
cv::Mat visual_img = bridge.imgMsgToCv(visual_img_msg); |
348 |
if(visual_img.rows != depth_float_img.rows ||
|
349 |
visual_img.cols != depth_float_img.cols){ |
350 |
ROS_ERROR("depth and visual image differ in size! Ignoring Data");
|
351 |
return;
|
352 |
} |
353 |
depthToCV8UC1(depth_float_img, depth_mono8_img_); //float can't be visualized or used as mask in float format TODO: reprogram keypoint detector to use float values with nan to mask
|
354 |
|
355 |
ros::Time d_time = depth_img_msg->header.stamp; |
356 |
ros::Time rgb_time = visual_img_msg->header.stamp; |
357 |
|
358 |
long rgb_timediff = abs(static_cast<long>(rgb_time.nsec) - static_cast<long>(d_time.nsec)); |
359 |
if(rgb_timediff > 33333333){ |
360 |
ROS_WARN_COND(rgb_timediff > 33333333, "Depth and RGB image off more than 1/30sec: %li (nsec)", rgb_timediff); |
361 |
ROS_WARN_COND(ParameterServer::instance()->get<bool>("drop_async_frames"), "Asynchronous frames ignored. See parameters if you want to keep async frames."); |
362 |
ROS_INFO("Depth image time: %d - %d", d_time.sec, d_time.nsec);
|
363 |
ROS_INFO("RGB image time: %d - %d", rgb_time.sec, rgb_time.nsec);
|
364 |
if(ParameterServer::instance()->get<bool>("drop_async_frames")){ |
365 |
return;
|
366 |
} |
367 |
} else {
|
368 |
ROS_DEBUG("Depth image time: %d - %d", d_time.sec, d_time.nsec);
|
369 |
ROS_DEBUG("RGB image time: %d - %d", rgb_time.sec, rgb_time.nsec);
|
370 |
} |
371 |
|
372 |
if (bagfile_mutex.tryLock() && save_bag_file){
|
373 |
// todo: make the names dynamic
|
374 |
bag.write("/camera/rgb/image_mono", ros::Time::now(), visual_img_msg);
|
375 |
bag.write("/camera/depth/image", ros::Time::now(), depth_img_msg);
|
376 |
ROS_INFO_STREAM("Wrote to bagfile " << bag.getFileName());
|
377 |
bagfile_mutex.unlock(); |
378 |
if(pause_) return; |
379 |
} |
380 |
if(ParameterServer::instance()->get<bool>("use_gui")){ |
381 |
Q_EMIT newVisualImage(cvMat2QImage(visual_img, 0)); //visual_idx=0 |
382 |
Q_EMIT newDepthImage (cvMat2QImage(depth_mono8_img_,1));//overwrites last cvMat2QImage |
383 |
} |
384 |
pointcloud_type::Ptr pc_col(createXYZRGBPointCloud(depth_img_msg, visual_img_msg, cam_info_msg)); |
385 |
//sensor_msgs::PointCloud2::Ptr pc2(new sensor_msgs::PointCloud2());
|
386 |
//pcl::toROSMsg(*pc, *pc2);
|
387 |
clock_gettime(CLOCK_MONOTONIC, &finish); elapsed = (finish.tv_sec - starttime.tv_sec); elapsed += (finish.tv_nsec - starttime.tv_nsec) / 1000000000.0; ROS_INFO_STREAM_COND_NAMED(elapsed > ParameterServer::instance()->get<double>("min_time_reported"), "timings", __FUNCTION__ << " runtime: "<< elapsed <<" s"); |
388 |
cameraCallback(visual_img, pc_col, depth_mono8_img_); |
389 |
} |
390 |
|
391 |
|
392 |
|
393 |
void OpenNIListener::kinectCallback (const sensor_msgs::ImageConstPtr& visual_img_msg,//got to be mono? |
394 |
const sensor_msgs::ImageConstPtr& depth_img_msg,
|
395 |
const sensor_msgs::PointCloud2ConstPtr& point_cloud)
|
396 |
{ |
397 |
struct timespec starttime, finish; double elapsed; clock_gettime(CLOCK_MONOTONIC, &starttime); |
398 |
ROS_DEBUG("Received data from kinect");
|
399 |
|
400 |
//Get images into OpenCV format
|
401 |
sensor_msgs::CvBridge bridge; |
402 |
cv::Mat depth_float_img = bridge.imgMsgToCv(depth_img_msg); |
403 |
cv::Mat visual_img = bridge.imgMsgToCv(visual_img_msg); |
404 |
if(visual_img.rows != depth_float_img.rows ||
|
405 |
visual_img.cols != depth_float_img.cols || |
406 |
|
407 |
point_cloud->width != (uint32_t) visual_img.cols || |
408 |
point_cloud->height != (uint32_t) visual_img.rows){ |
409 |
ROS_ERROR("PointCloud, depth and visual image differ in size! Ignoring Data");
|
410 |
return;
|
411 |
} |
412 |
depthToCV8UC1(depth_float_img, depth_mono8_img_); //float can't be visualized or used as mask in float format TODO: reprogram keypoint detector to use float values with nan to mask
|
413 |
|
414 |
ros::Time d_time = depth_img_msg->header.stamp; |
415 |
ros::Time rgb_time = visual_img_msg->header.stamp; |
416 |
ros::Time pc_time = point_cloud->header.stamp; |
417 |
|
418 |
long rgb_timediff = abs(static_cast<long>(rgb_time.nsec) - static_cast<long>(pc_time.nsec)); |
419 |
if(d_time.nsec != pc_time.nsec ||rgb_timediff > 33333333){ |
420 |
ROS_WARN_COND(d_time.nsec != pc_time.nsec, "PointCloud doesn't correspond to depth image");
|
421 |
ROS_WARN_COND(rgb_timediff > 33333333, "Point cloud and RGB image off more than 1/30sec: %li (nsec)", rgb_timediff); |
422 |
ROS_WARN_COND(ParameterServer::instance()->get<bool>("drop_async_frames"), "Asynchronous frames ignored. See parameters if you want to keep async frames."); |
423 |
ROS_INFO("Depth image time: %d - %d", d_time.sec, d_time.nsec);
|
424 |
ROS_INFO("RGB image time: %d - %d", rgb_time.sec, rgb_time.nsec);
|
425 |
ROS_INFO("Point cloud time: %d - %d", pc_time.sec, pc_time.nsec);
|
426 |
if(ParameterServer::instance()->get<bool>("drop_async_frames")){ |
427 |
return;
|
428 |
} |
429 |
} else {
|
430 |
ROS_DEBUG("Depth image time: %d - %d", d_time.sec, d_time.nsec);
|
431 |
ROS_DEBUG("RGB image time: %d - %d", rgb_time.sec, rgb_time.nsec);
|
432 |
ROS_DEBUG("Point cloud time: %d - %d", pc_time.sec, pc_time.nsec);
|
433 |
} |
434 |
|
435 |
if (bagfile_mutex.tryLock() && save_bag_file){
|
436 |
// todo: make the names dynamic
|
437 |
bag.write("/camera/rgb/points", ros::Time::now(), point_cloud);
|
438 |
bag.write("/camera/rgb/image_mono", ros::Time::now(), visual_img_msg);
|
439 |
bag.write("/camera/depth/image", ros::Time::now(), depth_img_msg);
|
440 |
ROS_INFO_STREAM("Wrote to bagfile " << bag.getFileName());
|
441 |
bagfile_mutex.unlock(); |
442 |
if(pause_) return; |
443 |
} |
444 |
if(ParameterServer::instance()->get<bool>("use_gui")){ |
445 |
Q_EMIT newVisualImage(cvMat2QImage(visual_img, 0)); //visual_idx=0 |
446 |
Q_EMIT newDepthImage (cvMat2QImage(depth_mono8_img_,1));//overwrites last cvMat2QImage |
447 |
} |
448 |
|
449 |
pointcloud_type::Ptr pc_col(new pointcloud_type());//will belong to node |
450 |
pcl::fromROSMsg(*point_cloud,*pc_col); |
451 |
clock_gettime(CLOCK_MONOTONIC, &finish); elapsed = (finish.tv_sec - starttime.tv_sec); elapsed += (finish.tv_nsec - starttime.tv_nsec) / 1000000000.0; ROS_INFO_STREAM_COND_NAMED(elapsed > ParameterServer::instance()->get<double>("min_time_reported"), "timings", __FUNCTION__ << " runtime: "<< elapsed <<" s"); |
452 |
cameraCallback(visual_img, pc_col, depth_mono8_img_); |
453 |
} |
454 |
|
455 |
void OpenNIListener::cameraCallback(cv::Mat visual_img, pointcloud_type::Ptr point_cloud, cv::Mat depth_mono8_img)
|
456 |
{ |
457 |
struct timespec starttime, finish; double elapsed; clock_gettime(CLOCK_MONOTONIC, &starttime); |
458 |
ROS_WARN_COND(point_cloud ==NULL, "Nullpointer for pointcloud"); |
459 |
if(getOneFrame_) { //if getOneFrame_ is set, unset it and skip check for pause |
460 |
getOneFrame_ = false;
|
461 |
} else if(pause_ && !save_bag_file) { //Visualization and nothing else |
462 |
return;
|
463 |
} |
464 |
cv::Mat gray_img; |
465 |
if(visual_img.type() == CV_8UC3){
|
466 |
cvtColor(visual_img, gray_img, CV_RGB2GRAY); |
467 |
} else {
|
468 |
gray_img = visual_img; |
469 |
} |
470 |
|
471 |
|
472 |
Q_EMIT setGUIStatus("Computing Keypoints and Features");
|
473 |
|
474 |
//######### Main Work: create new node ##############################################################
|
475 |
Node* node_ptr = new Node(gray_img, detector_, extractor_, point_cloud, depth_mono8_img_);
|
476 |
|
477 |
// Fill in Transformation ---------------------------------------------------------------------------
|
478 |
//Retrieve the transform between the lens and the base-link at capturing time
|
479 |
std::clock_t tf_time=std::clock(); |
480 |
tf::StampedTransform base2points; |
481 |
std::string base_frame = ParameterServer::instance()->get<std::string>("base_frame_name"); |
482 |
ros::Time pc_time = point_cloud->header.stamp; |
483 |
try{
|
484 |
tflistener_->waitForTransform(base_frame, point_cloud->header.frame_id, pc_time, ros::Duration(0.1)); |
485 |
tflistener_->lookupTransform(base_frame, point_cloud->header.frame_id, pc_time, base2points); |
486 |
} |
487 |
catch (tf::TransformException ex){
|
488 |
ROS_WARN("%s",ex.what());
|
489 |
ROS_WARN("Using Standard kinect /openni_camera -> /openni_rgb_optical_frame as transformation");
|
490 |
//emulate the transformation from kinect openni_camera frame to openni_rgb_optical_frame
|
491 |
base2points.setRotation(tf::createQuaternionFromRPY(-1.57,0,-1.57)); |
492 |
base2points.setOrigin(tf::Point(0,-0.04,0)); |
493 |
} |
494 |
node_ptr->setBase2PointsTransform(base2points); |
495 |
|
496 |
std::string gt_frame = ParameterServer::instance()->get<std::string>("ground_truth_frame_name"); |
497 |
if(!gt_frame.empty()){
|
498 |
//Retrieve the ground truth data. For the first frame it will be
|
499 |
//set as origin. the rest will be used to compare
|
500 |
tf::StampedTransform ground_truth_transform; |
501 |
try{
|
502 |
tflistener_->waitForTransform(gt_frame, "/openni_camera", pc_time, ros::Duration(0.1)); |
503 |
tflistener_->lookupTransform(gt_frame, "/openni_camera", pc_time, ground_truth_transform);
|
504 |
tf::StampedTransform b2p; |
505 |
//HACK to comply with Jürgen Sturm's Ground truth, though I can't manage here to get the full transform from tf
|
506 |
b2p.setRotation(tf::createQuaternionFromRPY(-1.57,0,-1.57)); |
507 |
b2p.setOrigin(tf::Point(0,-0.04,0)); |
508 |
ground_truth_transform *= b2p; |
509 |
} |
510 |
catch (tf::TransformException ex){
|
511 |
ROS_WARN("%s - Using Identity for Ground Truth",ex.what());
|
512 |
ground_truth_transform = tf::StampedTransform(tf::Transform::getIdentity(), pc_time, "/missing_ground_truth", "/openni_camera"); |
513 |
} |
514 |
node_ptr->setGroundTruthTransform(ground_truth_transform); |
515 |
} |
516 |
ROS_INFO_STREAM_NAMED("timings", "tf time: "<< ( std::clock() - tf_time ) / (double)CLOCKS_PER_SEC <<"sec"); |
517 |
// End: Fill in Transformation -----------------------------------------------------------------------
|
518 |
|
519 |
|
520 |
std::clock_t parallel_wait_time=std::clock(); |
521 |
future_.waitForFinished(); //Wait if GraphManager ist still computing. Does this skip on empty qfuture?
|
522 |
ROS_INFO_STREAM_NAMED("timings", "waiting time: "<< ( std::clock() - parallel_wait_time ) / (double)CLOCKS_PER_SEC <<"sec"); |
523 |
|
524 |
//With this define, processNode runs in the background and after finishing visualizes the results
|
525 |
//Thus another Callback can be started in the meantime, to create a new node in parallel
|
526 |
if(ParameterServer::instance()->get<bool>("concurrent_node_construction")) { |
527 |
ROS_DEBUG("Processing Node in parallel");
|
528 |
future_ = QtConcurrent::run(this, &OpenNIListener::processNode, gray_img, point_cloud, node_ptr);
|
529 |
} |
530 |
else //regular function call |
531 |
processNode(gray_img, point_cloud, node_ptr); |
532 |
clock_gettime(CLOCK_MONOTONIC, &finish); elapsed = (finish.tv_sec - starttime.tv_sec); elapsed += (finish.tv_nsec - starttime.tv_nsec) / 1000000000.0; ROS_INFO_STREAM_COND_NAMED(elapsed > ParameterServer::instance()->get<double>("min_time_reported"), "timings", __FUNCTION__ << " runtime: "<< elapsed <<" s"); |
533 |
} |
534 |
|
535 |
void OpenNIListener::processNode(cv::Mat& visual_img, // will be drawn to |
536 |
pointcloud_type::Ptr point_cloud, |
537 |
Node* new_node) |
538 |
{ |
539 |
struct timespec starttime, finish; double elapsed; clock_gettime(CLOCK_MONOTONIC, &starttime); |
540 |
Q_EMIT setGUIStatus("GraphSLAM");
|
541 |
bool has_been_added = graph_mgr_->addNode(new_node);
|
542 |
|
543 |
//######### Visualization code #############################################
|
544 |
cv::Mat feature_img = cv::Mat::zeros(visual_img.rows, visual_img.cols, CV_8UC1); |
545 |
if(ParameterServer::instance()->get<bool>("use_gui")){ |
546 |
if(has_been_added){
|
547 |
graph_mgr_->drawFeatureFlow(feature_img); |
548 |
Q_EMIT newFeatureFlowImage(cvMat2QImage(visual_img,depth_mono8_img_, feature_img, 2)); //show registration |
549 |
} else {
|
550 |
cv::drawKeypoints(feature_img, new_node->feature_locations_2d_, feature_img, cv::Scalar(155), 5); |
551 |
Q_EMIT newFeatureFlowImage(cvMat2QImage(visual_img,depth_mono8_img_, feature_img, 2)); //show registration |
552 |
delete new_node;
|
553 |
} |
554 |
} |
555 |
clock_gettime(CLOCK_MONOTONIC, &finish); elapsed = (finish.tv_sec - starttime.tv_sec); elapsed += (finish.tv_nsec - starttime.tv_nsec) / 1000000000.0; ROS_INFO_STREAM_COND_NAMED(elapsed > ParameterServer::instance()->get<double>("min_time_reported"), "timings", __FUNCTION__ << " runtime: "<< elapsed <<" s"); |
556 |
} |
557 |
|
558 |
|
559 |
void OpenNIListener::toggleBagRecording(){
|
560 |
bagfile_mutex.lock(); |
561 |
save_bag_file = !save_bag_file; |
562 |
// save bag
|
563 |
if (save_bag_file)
|
564 |
{ |
565 |
time_t rawtime; |
566 |
struct tm * timeinfo;
|
567 |
char buffer [80]; |
568 |
|
569 |
time ( &rawtime ); |
570 |
timeinfo = localtime ( &rawtime ); |
571 |
// create a nice name
|
572 |
strftime (buffer,80,"kinect_%Y-%m-%d-%H-%M-%S.bag",timeinfo); |
573 |
|
574 |
bag.open(buffer, rosbag::bagmode::Write); |
575 |
ROS_INFO_STREAM("Opened bagfile " << bag.getFileName());
|
576 |
} else {
|
577 |
ROS_INFO_STREAM("Closing bagfile " << bag.getFileName());
|
578 |
bag.close(); |
579 |
} |
580 |
bagfile_mutex.unlock(); |
581 |
} |
582 |
|
583 |
void OpenNIListener::togglePause(){
|
584 |
pause_ = !pause_; |
585 |
ROS_INFO("Pause toggled to: %s", pause_? "true":"false"); |
586 |
if(pause_) Q_EMIT setGUIStatus("Processing Thread Stopped"); |
587 |
else Q_EMIT setGUIStatus("Processing Thread Running"); |
588 |
} |
589 |
void OpenNIListener::getOneFrame(){
|
590 |
getOneFrame_=true;
|
591 |
} |
592 |
/// Create a QImage from image. The QImage stores its data in the rgba_buffers_ indexed by idx (reused/overwritten each call)
|
593 |
QImage OpenNIListener::cvMat2QImage(const cv::Mat& channel1, const cv::Mat& channel2, const cv::Mat& channel3, unsigned int idx){ |
594 |
if(rgba_buffers_.size() <= idx){
|
595 |
rgba_buffers_.resize(idx+1);
|
596 |
} |
597 |
if(channel2.rows != channel1.rows || channel2.cols != channel1.cols ||
|
598 |
channel3.rows != channel1.rows || channel3.cols != channel1.cols){ |
599 |
ROS_ERROR("Image channels to be combined differ in size");
|
600 |
} |
601 |
if(channel1.rows != rgba_buffers_[idx].rows || channel1.cols != rgba_buffers_[idx].cols){
|
602 |
ROS_DEBUG("Created new rgba_buffer with index %i", idx);
|
603 |
rgba_buffers_[idx] = cv::Mat( channel1.rows, channel1.cols, CV_8UC4); |
604 |
printMatrixInfo(rgba_buffers_[idx]); |
605 |
} |
606 |
std::vector<cv::Mat> input; |
607 |
cv::Mat alpha( channel1.rows, channel1.cols, CV_8UC1, cv::Scalar(255)); //TODO this could be buffered for performance |
608 |
input.push_back(channel1); |
609 |
input.push_back(channel2); |
610 |
input.push_back(channel3); |
611 |
input.push_back(alpha); |
612 |
cv::merge(input, rgba_buffers_[idx]); |
613 |
printMatrixInfo(rgba_buffers_[idx]); |
614 |
return QImage((unsigned char *)(rgba_buffers_[idx].data), |
615 |
rgba_buffers_[idx].cols, rgba_buffers_[idx].rows, |
616 |
rgba_buffers_[idx].step, QImage::Format_RGB32 ); |
617 |
} |
618 |
|
619 |
/// Create a QImage from image. The QImage stores its data in the rgba_buffers_ indexed by idx (reused/overwritten each call)
|
620 |
QImage OpenNIListener::cvMat2QImage(const cv::Mat& image, unsigned int idx){ |
621 |
ROS_DEBUG_STREAM("Converting Matrix of type " << openCVCode2String(image.type()) << " to RGBA"); |
622 |
if(rgba_buffers_.size() <= idx){
|
623 |
rgba_buffers_.resize(idx+1);
|
624 |
} |
625 |
ROS_DEBUG_STREAM("Target Matrix is of type " << openCVCode2String(rgba_buffers_[idx].type()));
|
626 |
if(image.rows != rgba_buffers_[idx].rows || image.cols != rgba_buffers_[idx].cols){
|
627 |
ROS_DEBUG("Created new rgba_buffer with index %i", idx);
|
628 |
rgba_buffers_[idx] = cv::Mat( image.rows, image.cols, CV_8UC4); |
629 |
printMatrixInfo(rgba_buffers_[idx]); |
630 |
} |
631 |
cv::Mat alpha( image.rows, image.cols, CV_8UC1, cv::Scalar(255)); //TODO this could be buffered for performance |
632 |
cv::Mat in[] = { image, alpha }; |
633 |
// rgba[0] -> bgr[2], rgba[1] -> bgr[1],
|
634 |
// rgba[2] -> bgr[0], rgba[3] -> alpha[0]
|
635 |
|
636 |
if(image.type() == CV_8UC1){
|
637 |
int from_to[] = { 0,0, 0,1, 0,2, 1,3 }; |
638 |
mixChannels( in , 2, &rgba_buffers_[idx], 1, from_to, 4 ); |
639 |
} else {
|
640 |
int from_to[] = { 2,0, 1,1, 0,2, 3,3 }; //BGR+A -> RGBA |
641 |
mixChannels( in , 2, &rgba_buffers_[idx], 1, from_to, 4 ); |
642 |
} |
643 |
printMatrixInfo(rgba_buffers_[idx]); |
644 |
//cv::cvtColor(image, rgba_buffers_, CV_GRAY2RGBA);}
|
645 |
//}
|
646 |
return QImage((unsigned char *)(rgba_buffers_[idx].data), |
647 |
rgba_buffers_[idx].cols, rgba_buffers_[idx].rows, |
648 |
rgba_buffers_[idx].step, QImage::Format_RGB32 ); |
649 |
} |
650 |
|
651 |
|
652 |
pointcloud_type* createXYZRGBPointCloud (const sensor_msgs::ImageConstPtr& depth_msg, const sensor_msgs::ImageConstPtr& rgb_msg, const sensor_msgs::CameraInfoConstPtr& cam_info) |
653 |
{ |
654 |
struct timespec starttime, finish; double elapsed; clock_gettime(CLOCK_MONOTONIC, &starttime); |
655 |
pointcloud_type* cloud_msg (new pointcloud_type() );
|
656 |
cloud_msg->header.stamp = depth_msg->header.stamp; |
657 |
cloud_msg->header.frame_id = rgb_msg->header.frame_id; |
658 |
cloud_msg->is_dense = false;
|
659 |
|
660 |
float cx, cy, fx,fy;//principal point and focal lengths |
661 |
unsigned color_step, color_skip;
|
662 |
|
663 |
cloud_msg->height = depth_msg->height; |
664 |
cloud_msg->width = depth_msg->width; |
665 |
cx = cam_info->K[2]; //(cloud_msg->width >> 1) - 0.5f; |
666 |
cy = cam_info->K[5]; //(cloud_msg->height >> 1) - 0.5f; |
667 |
fx = 1.0f / cam_info->K[0]; |
668 |
fy = 1.0f / cam_info->K[4]; |
669 |
int pixel_data_size = 0; |
670 |
if(rgb_msg->encoding.compare("mono8") == 0) pixel_data_size = 1; |
671 |
if(rgb_msg->encoding.compare("rgb8") == 0) pixel_data_size = 3; |
672 |
ROS_ERROR_COND(pixel_data_size == 0, "Unknown image encoding: %s!", rgb_msg->encoding.c_str()); |
673 |
color_step = pixel_data_size * rgb_msg->width / cloud_msg->width; |
674 |
color_skip = pixel_data_size * (rgb_msg->height / cloud_msg->height - 1) * rgb_msg->width;
|
675 |
|
676 |
cloud_msg->points.resize (cloud_msg->height * cloud_msg->width); |
677 |
|
678 |
const float* depth_buffer = reinterpret_cast<const float*>(&depth_msg->data[0]); |
679 |
const uint8_t* rgb_buffer = &rgb_msg->data[0]; |
680 |
|
681 |
// depth_msg already has the desired dimensions, but rgb_msg may be higher res.
|
682 |
int color_idx = 0, depth_idx = 0; |
683 |
double depth_scaling = ParameterServer::instance()->get<double>("depth_scaling_factor"); |
684 |
|
685 |
pointcloud_type::iterator pt_iter = cloud_msg->begin (); |
686 |
for (int v = 0; v < (int)cloud_msg->height; ++v, color_idx += color_skip) |
687 |
{ |
688 |
for (int u = 0; u < (int)cloud_msg->width; ++u, color_idx += color_step, ++depth_idx, ++pt_iter) |
689 |
{ |
690 |
point_type& pt = *pt_iter; |
691 |
float Z = depth_buffer[depth_idx] * depth_scaling;
|
692 |
|
693 |
// Check for invalid measurements
|
694 |
if (std::isnan (Z))
|
695 |
{ |
696 |
pt.x = pt.y = pt.z = Z; |
697 |
} |
698 |
else // Fill in XYZ |
699 |
{ |
700 |
pt.x = (u - cx) * Z * fx; |
701 |
pt.y = (v - cy) * Z * fy; |
702 |
pt.z = Z; |
703 |
} |
704 |
|
705 |
// Fill in color
|
706 |
RGBValue color; |
707 |
if(pixel_data_size == 3){ |
708 |
color.Red = rgb_buffer[color_idx]; |
709 |
color.Green = rgb_buffer[color_idx + 1];
|
710 |
color.Blue = rgb_buffer[color_idx + 2];
|
711 |
} else {
|
712 |
color.Red = color.Green = color.Blue = rgb_buffer[color_idx]; |
713 |
} |
714 |
color.Alpha = 0;
|
715 |
pt.rgb = color.float_value; |
716 |
} |
717 |
} |
718 |
|
719 |
clock_gettime(CLOCK_MONOTONIC, &finish); elapsed = (finish.tv_sec - starttime.tv_sec); elapsed += (finish.tv_nsec - starttime.tv_nsec) / 1000000000.0; ROS_INFO_STREAM_COND_NAMED(elapsed > ParameterServer::instance()->get<double>("min_time_reported"), "timings", __FUNCTION__ << " runtime: "<< elapsed <<" s"); |
720 |
return cloud_msg;
|
721 |
} |