root / vision / src / converteroverwrite.cpp @ 32006eb7
History | View | Annotate | Download (6.02 KB)
1 |
#include <ros/ros.h> |
---|---|
2 |
#include <stdio.h> |
3 |
#include <vision/TargetDescriptor.h> |
4 |
#include <image_transport/image_transport.h> |
5 |
#include <geometry_msgs/Point.h> |
6 |
#include <tf/transform_listener.h> |
7 |
#include <algorithm> |
8 |
|
9 |
ros::Publisher pub; |
10 |
ros::Publisher img_pub; |
11 |
|
12 |
int hk = 216; |
13 |
int sk = 255; |
14 |
int vk = 255; |
15 |
int threshold = 25; |
16 |
|
17 |
bool isRight(sensor_msgs::Image * img, int p){ |
18 |
return (img->data[p + 1] == img->data[p]); |
19 |
} |
20 |
|
21 |
bool isLeft(sensor_msgs::Image * img, int p){ |
22 |
return (img->data[p - 1] == img->data[p]); |
23 |
} |
24 |
|
25 |
bool isUp(sensor_msgs::Image * img, int p){ |
26 |
return (img->data[p - img->width] == img->data[p]);
|
27 |
} |
28 |
|
29 |
bool isDown(sensor_msgs::Image * img, int p){ |
30 |
return (img->data[p + img->width] == img->data[p]);
|
31 |
} |
32 |
|
33 |
int min(int a, int b){ |
34 |
if (a <= b) return a; |
35 |
else return b; |
36 |
} |
37 |
|
38 |
int max(int a, int b){ |
39 |
if (a >= b) return a; |
40 |
else return b; |
41 |
} |
42 |
|
43 |
bool hasAnAdjacent(sensor_msgs::Image * image, int j){ |
44 |
int nl = image->height; // number of lines |
45 |
int nc = image->width;
|
46 |
int nChannels = image->step / image->width;
|
47 |
int pos = j / nChannels;
|
48 |
if ((pos % nc) != 0){ |
49 |
if (isLeft(image, pos)) return true; |
50 |
} |
51 |
if ((pos / nc) != 0){ |
52 |
if (isUp(image, pos)) return true; |
53 |
} |
54 |
if ((pos % nc) != nc - 1){ |
55 |
if (isRight(image, pos)) return true; |
56 |
} |
57 |
if ((pos / nc) != nc - 1){ |
58 |
if (isDown(image, pos)) return true; |
59 |
} |
60 |
return false; |
61 |
} |
62 |
|
63 |
/* Detects the blobs in an image within above defined constraints. Transforms image into 255
|
64 |
* for pixels in the blob, 0 for not in the blob.
|
65 |
*/
|
66 |
void detectBlobs(sensor_msgs::Image * image, int * comX, int * comY, int * area){ |
67 |
int nl = image->height; // number of lines |
68 |
int nc = image->step; // number of columns |
69 |
int nChannels = image->step / image->width; // effective width |
70 |
|
71 |
// process image
|
72 |
for (int i = 0; i < nl; i++){ |
73 |
for (int j = 0; j < nc; j+= nChannels){ |
74 |
if (image->data[i * image->width + j] <= hk + threshold && image->data[j] >= hk - threshold){
|
75 |
image->data[i * image->width + j] = 255;
|
76 |
image->data[i * image->width + j+1] = 255; |
77 |
image->data[i * image->width + j+2] = 255; |
78 |
} |
79 |
else {
|
80 |
image->data[i * image->width + j] = 0;
|
81 |
image->data[i * image->width + j+1] = 0; |
82 |
image->data[i * image->width + j+2] = 0; |
83 |
} |
84 |
} |
85 |
} |
86 |
|
87 |
int comXSum = 0; |
88 |
int comYSum = 0; |
89 |
int blobPixelCount = 0; |
90 |
for (unsigned int i = 0; i < image->width * image->height; i+=nChannels){ |
91 |
if ( !(image->data[i] == 255 && hasAnAdjacent(image, i)) ){ |
92 |
image->data[i] = 0;
|
93 |
image->data[i + 1] = 0; |
94 |
image->data[i + 2] = 0; |
95 |
continue;
|
96 |
} |
97 |
blobPixelCount++; |
98 |
comXSum += i / nChannels % image->width; |
99 |
comYSum += i / nChannels / image->width; |
100 |
} |
101 |
if (blobPixelCount != 0){ |
102 |
// get average
|
103 |
comXSum /= blobPixelCount; |
104 |
comYSum /= blobPixelCount; |
105 |
} |
106 |
|
107 |
*area = blobPixelCount; |
108 |
*comX = comXSum; |
109 |
*comY = comYSum; |
110 |
|
111 |
img_pub.publish(*image); |
112 |
} |
113 |
|
114 |
void convertToHSV(sensor_msgs::Image * orig){
|
115 |
|
116 |
int nChannels = orig->step / orig->width;
|
117 |
printf("Converting to HSV \n");
|
118 |
|
119 |
// get the max, set it as v
|
120 |
|
121 |
for (unsigned int i = 0; i < orig->height * orig->width; i += nChannels){ |
122 |
int h;
|
123 |
int s;
|
124 |
int v;
|
125 |
int min;
|
126 |
|
127 |
// get min, max
|
128 |
if (orig->data[i] >= orig->data[i+1] && orig->data[i] >= orig->data[i+2]){ |
129 |
v = orig->data[i]; |
130 |
if (orig->data[i+1] >= orig->data[i+2]){ |
131 |
min = orig->data[i+2];
|
132 |
} |
133 |
else min = orig->data[i+1]; |
134 |
} |
135 |
else if (orig->data[i+1] >= orig->data[i] && orig->data[i+1] >= orig->data[i+2]){ |
136 |
v = orig->data[i+1];
|
137 |
if (orig->data[i] >= orig->data[i+2]){ |
138 |
min = orig->data[i+2];
|
139 |
} |
140 |
else min = orig->data[i];
|
141 |
} |
142 |
else {
|
143 |
v = orig->data[i+2];
|
144 |
if (orig->data[i] >= orig->data[i+1]){ |
145 |
min = orig->data[i+1];
|
146 |
} |
147 |
else min = orig->data[i];
|
148 |
} |
149 |
|
150 |
// set s
|
151 |
if (v != 0){ |
152 |
s = (v - min) / v; |
153 |
} |
154 |
else s = 0; |
155 |
|
156 |
// set h
|
157 |
if (s == 0) h = 0; |
158 |
else if (v == orig->data[i]){ |
159 |
h = 60 * (orig->data[i+1] - orig->data[i+2]) / s; |
160 |
} |
161 |
else if (v == orig->data[i+1]){ |
162 |
h = 120 + 60 * (orig->data[i+2] - orig->data[i]) / s; |
163 |
} |
164 |
else {
|
165 |
h = 240 + 60 * (orig->data[i] - orig->data[i + 1]) / s; |
166 |
} |
167 |
|
168 |
orig->data[i] = h; |
169 |
orig->data[i+1] = s;
|
170 |
orig->data[i+2] = v;
|
171 |
|
172 |
} |
173 |
printf("Conversion complete\n");
|
174 |
} |
175 |
|
176 |
void target_cb(const sensor_msgs::Image orig) { |
177 |
|
178 |
printf("beginning callback \n");
|
179 |
tf::TransformListener listener; |
180 |
geometry_msgs::Point point; |
181 |
tf::StampedTransform transform; |
182 |
|
183 |
int width = orig.width;
|
184 |
int height = orig.height;
|
185 |
/*try {
|
186 |
listener.lookupTransform("/camera", "/kinect", ros::Time(0), transform);
|
187 |
}
|
188 |
catch (tf::TransformException ex) {
|
189 |
ROS_ERROR("%s", ex.what());
|
190 |
}*/
|
191 |
|
192 |
// convert to OpenCV
|
193 |
|
194 |
sensor_msgs::Image copy = orig; |
195 |
sensor_msgs::Image * source = © // pointer to the image
|
196 |
sensor_msgs::Image hsvImage = orig; // May be time
|
197 |
sensor_msgs::Image * hsvPtr = &hsvImage; |
198 |
|
199 |
// convert to HSV
|
200 |
|
201 |
convertToHSV(source); |
202 |
|
203 |
// detect blob
|
204 |
|
205 |
int comX;
|
206 |
int comY;
|
207 |
int area;
|
208 |
|
209 |
detectBlobs(hsvPtr, &comX, &comY, &area); |
210 |
|
211 |
int distance;
|
212 |
|
213 |
// give distance as inversely proportional to area, for (x,y,z)
|
214 |
if (area > 5){ |
215 |
|
216 |
distance = 100.0 / (float) area; |
217 |
|
218 |
} |
219 |
else {
|
220 |
distance = -1;
|
221 |
} |
222 |
|
223 |
// fill point based on target and tf
|
224 |
|
225 |
geometry_msgs::Point origPoint; |
226 |
|
227 |
// TODO: Change coordinate axes
|
228 |
|
229 |
origPoint.x = distance; |
230 |
origPoint.y = width / 2 - comX;
|
231 |
origPoint.z = comY - height / 2;
|
232 |
|
233 |
// TODO transform into point
|
234 |
|
235 |
// for now just publish that
|
236 |
|
237 |
pub.publish(origPoint); |
238 |
|
239 |
printf("Ending callback");
|
240 |
|
241 |
} |
242 |
|
243 |
void target_cb_test(const sensor_msgs::Image image){ |
244 |
|
245 |
geometry_msgs::Point point; |
246 |
point.x = 1;
|
247 |
point.y = 2;
|
248 |
point.z = 3;
|
249 |
pub.publish(point); |
250 |
} |
251 |
|
252 |
int main(int argc, char **argv) { |
253 |
ros::init(argc, argv, "v2v3_converter");
|
254 |
ros::NodeHandle n; |
255 |
pub = n.advertise<geometry_msgs::Point>("vision/target_3d", 1000); |
256 |
img_pub = n.advertise<sensor_msgs::Image>("vision/blob_image", 1000); |
257 |
ros::Subscriber sub = n.subscribe("/camera/rgb/image_color", 1, target_cb); |
258 |
ros::spin(); |
259 |
return 0; |
260 |
} |