root / vision / src / v2v3_converter.cpp @ 91a0bfa9
History | View | Annotate | Download (5.78 KB)
1 |
#nclude <ros/ros.h>
|
---|---|
2 |
#include <stdio.h> |
3 |
#include <vision/TargetDescriptor.h> |
4 |
#include <image_transport/image_transport.h> |
5 |
#include <geometry_msgs/Point.h> |
6 |
#include <tf/transform_listener.h> |
7 |
|
8 |
ros::Publisher pub; // publishes Point
|
9 |
ros::Publisher img_pub; //publishes Image with only blob showing
|
10 |
|
11 |
int hk = 15; // target hsv values |
12 |
int sk = 255; |
13 |
int vk = 255; |
14 |
|
15 |
/* Given index p and image pointer img,
|
16 |
* determines whether or not the pixel
|
17 |
* in the indicated vicinity is equal to it.
|
18 |
*/
|
19 |
bool isRight(sensor_msgs::Image * img, int p){ |
20 |
return (img->data[p + 1] == img->data[p]); |
21 |
} |
22 |
|
23 |
bool isLeft(sensor_msgs::Image * img, int p){ |
24 |
return (img->data[p - 1] == img->data[p]); |
25 |
} |
26 |
|
27 |
bool isUp(sensor_msgs::Image * img, int p){ |
28 |
return (img->data[p - img->width] == img->data[p]);
|
29 |
} |
30 |
|
31 |
bool isDown(sensor_msgs::Image * img, int p){ |
32 |
return (img->data[p + img->width] == img->data[p]);
|
33 |
} |
34 |
|
35 |
/* Checks for any adjacent pixel, ensuring a continuous blob.
|
36 |
*/
|
37 |
bool hasAnAdjacent(sensor_msgs::Image * image, int j){ |
38 |
int nl = image->height; // number of lines |
39 |
int nc = image->width;
|
40 |
int nChannels = image->step / image->width;
|
41 |
int pos = j / nChannels;
|
42 |
if ((pos % nc) != 0){ |
43 |
if (isLeft(image, pos)) return true; |
44 |
} |
45 |
if ((pos / nc) != 0){ |
46 |
if (isUp(image, pos)) return true; |
47 |
} |
48 |
if ((pos % nc) != nc - 1){ |
49 |
if (isRight(image, pos)) return true; |
50 |
} |
51 |
if ((pos / nc) != nc - 1){ |
52 |
if (isDown(image, pos)) return true; |
53 |
} |
54 |
else return false; |
55 |
} |
56 |
|
57 |
/* Detects the blobs in an image within above defined constraints. Transforms image into 255
|
58 |
* for pixels in the blob, 0 for not in the blob.
|
59 |
*/
|
60 |
void detectBlobs(sensor_msgs::Image * image, int * comX, int * comY, int * area){ |
61 |
int nl = image->height; // number of lines |
62 |
int nc = image->step; // number of columns |
63 |
int nChannels = image->step / image->width; // effective width of each pixel |
64 |
|
65 |
// process image
|
66 |
for (int i = 0; i < nl; i++){ |
67 |
for (int j = 0; j < nc; j+= nChannels){ |
68 |
// if within bounds...
|
69 |
if (image->data[i * image->width + j] <= hk + 10 && image->data[j] >= hk - 10){ |
70 |
image->data[i * image->width + j] = 255; // set each to 255 |
71 |
image->data[i * image->width + j+1] = 255; |
72 |
image->data[i * image->width + j+2] = 255; |
73 |
} |
74 |
// otherwise set to 0
|
75 |
else {
|
76 |
image->data[i * image->width + j] = 0;
|
77 |
image->data[i * image->width + j+1] = 0; |
78 |
image->data[i * image->width + j+2] = 0; |
79 |
} |
80 |
} |
81 |
} |
82 |
|
83 |
|
84 |
// The following is to find the average
|
85 |
int comXSum = 0; |
86 |
int comYSum = 0; |
87 |
int blobPixelCount = 0; |
88 |
for (int i = 0; i < image->width * image->height; i+=nChannels){ |
89 |
// if the pixel is not both the right hue and has adjacents, set to 0
|
90 |
if ( !(image->data[i] == 255 && hasAnAdjacent(image, i)) ){ |
91 |
image->data[i] = 0;
|
92 |
image->data[i + 1] = 0; |
93 |
image->data[i + 2] = 0; |
94 |
} |
95 |
else {
|
96 |
blobPixelCount++; |
97 |
comXSum += image->data[i] / 255;
|
98 |
comYSum += image->data[i] / 255;
|
99 |
} |
100 |
} |
101 |
if (blobPixelCount != 0){ |
102 |
// get average
|
103 |
comXSum /= blobPixelCount; |
104 |
comYSum /= blobPixelCount; |
105 |
} |
106 |
// output results
|
107 |
*area = blobPixelCount; |
108 |
*comX = comXSum; |
109 |
*comY = comYSum; |
110 |
|
111 |
img_pub.publish(*image); // publish new image of only black and white
|
112 |
} |
113 |
|
114 |
void convertToHSV(sensor_msgs::Image * orig){
|
115 |
|
116 |
int nChannels = orig->step / orig->width;
|
117 |
printf("Converting to HSV \n");
|
118 |
|
119 |
// get the max, set it as v
|
120 |
|
121 |
for (int i = 0; i < orig->height * orig->width; i += nChannels){ |
122 |
int h;
|
123 |
int s;
|
124 |
int v;
|
125 |
int min;
|
126 |
|
127 |
// get min, max
|
128 |
v = max(max(orig->data[i], orig->data[i+1]), orig->data[i+2]); |
129 |
min = min(min(orig->data[i], orig->data[i+1]), orig->data[i+2]); |
130 |
|
131 |
// set s
|
132 |
if (v != 0){ |
133 |
s = (v - min) / v; |
134 |
} |
135 |
else s = 0; |
136 |
|
137 |
// set h
|
138 |
if (s == 0) h = 0; |
139 |
else if (v == orig->data[i]){ |
140 |
h = 60 * (orig->data[i+1] - orig->data[i+2]) / s; |
141 |
} |
142 |
else if (v == orig->data[i+1]){ |
143 |
h = 120 + 60 * (orig->data[i+2] - orig->data[i]) / s; |
144 |
} |
145 |
else {
|
146 |
h = 240 + 60 * (orig->data[i] - orig->data[i + 1]) / s; |
147 |
} |
148 |
|
149 |
// set values
|
150 |
orig->data[i] = h; |
151 |
orig->data[i+1] = s;
|
152 |
orig->data[i+2] = v;
|
153 |
|
154 |
} |
155 |
printf("Conversion complete\n");
|
156 |
} |
157 |
|
158 |
void target_cb(const sensor_msgs::Image orig) { |
159 |
|
160 |
printf("beginning callback \n");
|
161 |
tf::TransformListener listener; |
162 |
geometry_msgs::Point point; |
163 |
tf::StampedTransform transform; |
164 |
/*try {
|
165 |
listener.lookupTransform("/camera", "/kinect", ros::Time(0), transform);
|
166 |
}
|
167 |
catch (tf::TransformException ex) {
|
168 |
ROS_ERROR("%s", ex.what());
|
169 |
}*/
|
170 |
|
171 |
// image types
|
172 |
|
173 |
sensor_msgs::Image copy = orig; |
174 |
sensor_msgs::Image * source = © // pointer to the image
|
175 |
sensor_msgs::Image hsvImage = orig; // May be time
|
176 |
sensor_msgs::Image * hsvPtr = &hsvImage; |
177 |
|
178 |
// convert to HSV
|
179 |
|
180 |
convertToHSV(source); |
181 |
|
182 |
// detect blob
|
183 |
|
184 |
int comX;
|
185 |
int comY;
|
186 |
int area;
|
187 |
|
188 |
detectBlobs(hsvPtr, &comX, &comY, &area); |
189 |
|
190 |
int distance;
|
191 |
|
192 |
// give distance as inversely proportional to area, for (x,y,z)
|
193 |
if (area > 1){ |
194 |
|
195 |
distance = 100.0 / (float) area; |
196 |
|
197 |
} |
198 |
else {
|
199 |
distance = -1;
|
200 |
} |
201 |
|
202 |
// fill point based on target and tf
|
203 |
|
204 |
geometry_msgs::Point origPoint; |
205 |
|
206 |
// TODO: Change coordinate axes
|
207 |
|
208 |
origPoint.x = comX; |
209 |
origPoint.y = comY; |
210 |
origPoint.z = distance; |
211 |
|
212 |
// TODO transform into point
|
213 |
|
214 |
// for now just publish that
|
215 |
|
216 |
pub.publish(origPoint); |
217 |
|
218 |
printf("Ending callback");
|
219 |
|
220 |
} |
221 |
|
222 |
void target_cb_test(const sensor_msgs::Image image){ |
223 |
|
224 |
geometry_msgs::Point point; |
225 |
point.x = 1;
|
226 |
point.y = 2;
|
227 |
point.z = 3;
|
228 |
pub.publish(point); |
229 |
} |
230 |
|
231 |
int main(int argc, char **argv) { |
232 |
ros::init(argc, argv, "v2v3_converter");
|
233 |
ros::NodeHandle n; |
234 |
pub = n.advertise<geometry_msgs::Point>("vision/target_3d", 1000); |
235 |
img_pub = n.advertise<sensor_msgs::Image>("vision/blob_image", 1000); |
236 |
ros::Subscriber sub = n.subscribe("/camera/rgb/image_color", 1, target_cb); |
237 |
ros::spin(); |
238 |
return 0; |
239 |
} |