root / vision / src / opencvdetect.cpp @ 14f111dd
History | View | Annotate | Download (4.59 KB)
1 |
#include "/home/nstanley/OpenCV/OpenCV-2.3.1/include/opencv/cv.h" |
---|---|
2 |
#include "/home/nstanley/OpenCV/OpenCV-2.3.1/include/opencv/highgui.h" |
3 |
#include <opencv/cxcore.h> |
4 |
#include <ros/ros.h> |
5 |
#include <stdio.h> |
6 |
#include <cv_bridge/cv_bridge.h> |
7 |
#include <sensor_msgs/image_encodings.h> |
8 |
#include <vision/TargetDescriptor.h> |
9 |
#include <image_transport/image_transport.h> |
10 |
#include <geometry_msgs/Point.h> |
11 |
#include <tf/transform_listener.h> |
12 |
#include <algorithm> |
13 |
#include </home/nstanley/cvblobs8.3_linux/blob.h> |
14 |
#include </home/nstanley/cvblobs8.3_linux/BlobResult.h> |
15 |
|
16 |
ros::Publisher pub; |
17 |
ros::Publisher img_pub; |
18 |
|
19 |
int hk = 216; |
20 |
int sk = 255; |
21 |
int vk = 255; |
22 |
//int threshold = 25;
|
23 |
|
24 |
int filter(int r, int g, int b, int threshold) { |
25 |
int orange[3] = {216, 255, 255}; // Orange in HSV |
26 |
int diff = (orange[0]-r)*(orange[0]-r) + |
27 |
(orange[1]-g)*(orange[1]-g); |
28 |
|
29 |
if(diff < threshold) return abs(diff-threshold); |
30 |
return 0; |
31 |
} |
32 |
|
33 |
void target_cb(const sensor_msgs::Image orig) { |
34 |
|
35 |
printf("beginning callback \n");
|
36 |
tf::TransformListener listener; |
37 |
geometry_msgs::Point point; |
38 |
tf::StampedTransform transform; |
39 |
|
40 |
int width = orig.width;
|
41 |
int height = orig.height;
|
42 |
/*try {
|
43 |
listener.lookupTransform("/camera", "/kinect", ros::Time(0), transform);
|
44 |
}
|
45 |
catch (tf::TransformException ex) {
|
46 |
ROS_ERROR("%s", ex.what());
|
47 |
}*/
|
48 |
|
49 |
// convert to OpenCV
|
50 |
|
51 |
sensor_msgs::Image hsvcopy = orig; |
52 |
cv_bridge::CvImagePtr cv_ptr; |
53 |
try{
|
54 |
cv_ptr = cv_bridge::toCvCopy(orig, sensor_msgs::image_encodings::BGR8); |
55 |
} |
56 |
catch (cv_bridge::Exception& e) {
|
57 |
ROS_ERROR("cv_bridge exception: %s", e.what());
|
58 |
return;
|
59 |
} |
60 |
|
61 |
// detect blob
|
62 |
|
63 |
IplImage* input; |
64 |
*input = cv_ptr->image; |
65 |
|
66 |
IplImage* img; |
67 |
IplImage *hsv_img; |
68 |
IplImage *bw_img; |
69 |
IplImage* i1; |
70 |
CBlobResult blobs; |
71 |
CBlob blobArea; |
72 |
|
73 |
// Initialize images, allocate memory
|
74 |
|
75 |
img = cvCloneImage(input); |
76 |
hsv_img = cvCloneImage(img); |
77 |
bw_img = cvCloneImage(img); |
78 |
i1 = cvCreateImage(cvSize(img->width, img->height), 8, 1); |
79 |
|
80 |
// Smooth input image using a Gaussian filter, assign HSV, BW image
|
81 |
cvSmooth(input, img, CV_GAUSSIAN, 7, 9, 0 ,0); |
82 |
cvCvtColor(img, bw_img, CV_RGB2GRAY); |
83 |
cvCvtColor(img, hsv_img, CV_RGB2HSV); |
84 |
|
85 |
// Simple filter that creates a mask whose color is near orange in i1
|
86 |
for(int i = 0; i < hsv_img->height; i++) { |
87 |
for(int j = 0; j < hsv_img->width; j++) { |
88 |
int r = ((uchar *)(hsv_img->imageData + i*hsv_img->widthStep))[j*hsv_img->nChannels + 2]; |
89 |
int g = ((uchar *)(hsv_img->imageData + i*hsv_img->widthStep))[j*hsv_img->nChannels + 1]; |
90 |
int b = ((uchar *)(hsv_img->imageData + i*hsv_img->widthStep))[j*hsv_img->nChannels + 0]; |
91 |
int f = filter(r, g, b, 5000); |
92 |
if(f) {
|
93 |
((uchar *)(i1->imageData + i*i1->widthStep))[j] = 250;
|
94 |
} |
95 |
} |
96 |
} |
97 |
|
98 |
// Detect Blobs using the mask and a threshold for area. Sort blobs according to area
|
99 |
blobs = CBlobResult(bw_img, i1, 100/*, true*/); |
100 |
blobs.Filter(blobs, B_INCLUDE, CBlobGetArea(), B_GREATER, 500);
|
101 |
blobs.GetNthBlob(CBlobGetArea(), blobs.GetNumBlobs() - 1, blobArea); // example |
102 |
|
103 |
for (int i = 1; i < blobs.GetNumBlobs(); i++ ) |
104 |
{ |
105 |
blobArea = blobs.GetBlob(i); |
106 |
if(blobArea.Area() > 150) |
107 |
blobArea.FillBlob(img, cvScalar(255, 0, 0)); |
108 |
} |
109 |
|
110 |
// Publish blob image
|
111 |
|
112 |
cv_bridge::CvImage blob_image; |
113 |
//blob_image.header = img->header;
|
114 |
blob_image.encoding = sensor_msgs::image_encodings::BGR8; |
115 |
blob_image.image = *(img->imageData); |
116 |
|
117 |
/*sensor_msgs::Image blob_image;
|
118 |
blob_image.height = img->height;
|
119 |
blob_image.width = img->width;
|
120 |
blob_image.data = *(img->imageData); */
|
121 |
img_pub.publish(blob_image); |
122 |
|
123 |
// Clean up
|
124 |
cvReleaseImage(&i1); |
125 |
cvReleaseImage(&bw_img); |
126 |
cvReleaseImage(&hsv_img); |
127 |
cvReleaseImage(&img); |
128 |
cvReleaseImage(&input); |
129 |
|
130 |
//int comX;
|
131 |
//int comY;
|
132 |
//int area;
|
133 |
|
134 |
//int distance;
|
135 |
|
136 |
// fill point based on target and tf
|
137 |
|
138 |
// geometry_msgs::Point origPoint;
|
139 |
|
140 |
// TODO: Change coordinate axes
|
141 |
|
142 |
// origPoint.x = distance;
|
143 |
// origPoint.y = width / 2 - comX;
|
144 |
// origPoint.z = comY - height / 2;
|
145 |
|
146 |
// TODO transform into point
|
147 |
|
148 |
// for now just publish that
|
149 |
|
150 |
// pub.publish(origPoint);
|
151 |
|
152 |
printf("Ending callback\n");
|
153 |
|
154 |
} |
155 |
|
156 |
void target_cb_test(const sensor_msgs::Image image){ |
157 |
|
158 |
geometry_msgs::Point point; |
159 |
point.x = 1;
|
160 |
point.y = 2;
|
161 |
point.z = 3;
|
162 |
pub.publish(point); |
163 |
} |
164 |
|
165 |
int main(int argc, char **argv) { |
166 |
ros::init(argc, argv, "v2v3_converter");
|
167 |
ros::NodeHandle n; |
168 |
pub = n.advertise<geometry_msgs::Point>("vision/target_3d", 1000); |
169 |
img_pub = n.advertise<sensor_msgs::Image>("vision/blob_image", 1000); |
170 |
ros::Subscriber sub = n.subscribe("/camera/rgb/image_color", 1, target_cb); |
171 |
ros::spin(); |
172 |
return 0; |
173 |
} |