Revision f3966b0d
ID | f3966b0da330af0011ac09817870ff8043032cdd |
changed a few things, tested, and now it outputs a sensor_msgs::point so that the control group can use it. We still need to figure out a target HSV value so that we can use that to test.
vision/src/v2v3_converter.cpp | ||
---|---|---|
1 | 1 |
#include <ros/ros.h> |
2 |
#include <stdio.h> |
|
2 | 3 |
#include <vision/TargetDescriptor.h> |
3 | 4 |
#include <image_transport/image_transport.h> |
4 | 5 |
#include <geometry_msgs/Point.h> |
... | ... | |
7 | 8 |
ros::Publisher pub; |
8 | 9 |
ros::Publisher img_pub; |
9 | 10 |
|
10 |
int hk = 25;
|
|
11 |
int hk = 15;
|
|
11 | 12 |
int sk = 255; |
12 | 13 |
int vk = 255; |
13 | 14 |
|
... | ... | |
30 | 31 |
bool hasAnAdjacent(sensor_msgs::Image * image, int j){ |
31 | 32 |
int nl = image->height; // number of lines |
32 | 33 |
int nc = image->width; |
33 |
int nChannels = image->width / image->step;
|
|
34 |
int nChannels = image->step / image->width;
|
|
34 | 35 |
int pos = j / nChannels; |
35 | 36 |
if ((pos % nc) != 0){ |
36 | 37 |
if (isLeft(image, pos)) return true; |
... | ... | |
56 | 57 |
int nChannels = image->step / image->width; // effective width |
57 | 58 |
|
58 | 59 |
// process image |
59 |
for (int i = 1; i < nl; i++){
|
|
60 |
for (int i = 0; i < nl; i++){
|
|
60 | 61 |
for (int j = 0; j < nc; j+= nChannels){ |
61 |
if (image->data[i * image->width + j] < 30 && image->data[j] > 20){
|
|
62 |
if (image->data[i * image->width + j] <= hk + 10 && image->data[j] >= hk - 10){
|
|
62 | 63 |
image->data[i * image->width + j] = 255; |
63 | 64 |
image->data[i * image->width + j+1] = 255; |
64 | 65 |
image->data[i * image->width + j+2] = 255; |
... | ... | |
79 | 80 |
image->data[i] = 0; |
80 | 81 |
image->data[i + 1] = 0; |
81 | 82 |
image->data[i + 2] = 0; |
82 |
continue; |
|
83 |
continue;
|
|
83 | 84 |
} |
84 | 85 |
blobPixelCount++; |
85 | 86 |
comXSum += image->data[i] / 255; |
86 | 87 |
comYSum += image->data[i] / 255; |
87 | 88 |
} |
88 | 89 |
if (blobPixelCount != 0){ |
90 |
// get average |
|
89 | 91 |
comXSum /= blobPixelCount; |
90 | 92 |
comYSum /= blobPixelCount; |
91 | 93 |
} |
... | ... | |
98 | 100 |
} |
99 | 101 |
|
100 | 102 |
void convertToHSV(sensor_msgs::Image * orig){ |
101 |
int nChannels = orig->width / orig->step; |
|
103 |
|
|
104 |
int nChannels = orig->step / orig->width; |
|
105 |
printf("Converting to HSV \n"); |
|
102 | 106 |
|
103 | 107 |
// get the max, set it as v |
104 | 108 |
|
... | ... | |
154 | 158 |
orig->data[i+2] = v; |
155 | 159 |
|
156 | 160 |
} |
157 |
|
|
161 |
printf("Conversion complete\n"); |
|
158 | 162 |
} |
159 | 163 |
|
160 | 164 |
void target_cb(const sensor_msgs::Image orig) { |
165 |
|
|
166 |
printf("beginning callback \n"); |
|
161 | 167 |
tf::TransformListener listener; |
162 | 168 |
geometry_msgs::Point point; |
163 | 169 |
tf::StampedTransform transform; |
... | ... | |
187 | 193 |
|
188 | 194 |
detectBlobs(hsvPtr, &comX, &comY, &area); |
189 | 195 |
|
196 |
int distance; |
|
197 |
|
|
190 | 198 |
// give distance as inversely proportional to area, for (x,y,z) |
191 | 199 |
if (area > 10){ |
192 | 200 |
|
193 |
int distance = 100.0 / (float) area; |
|
201 |
distance = 100.0 / (float) area; |
|
202 |
|
|
203 |
} |
|
204 |
else { |
|
205 |
distance = -1; |
|
206 |
} |
|
207 |
|
|
208 |
// fill point based on target and tf |
|
194 | 209 |
|
195 |
// fill point based on target and tf
|
|
210 |
geometry_msgs::Point origPoint;
|
|
196 | 211 |
|
197 |
geometry_msgs::Point origPoint;
|
|
212 |
// TODO: Change coordinate axes
|
|
198 | 213 |
|
199 |
origPoint.x = comX;
|
|
200 |
origPoint.y = comY;
|
|
201 |
origPoint.z = distance;
|
|
214 |
origPoint.x = comX; |
|
215 |
origPoint.y = comY; |
|
216 |
origPoint.z = distance; |
|
202 | 217 |
|
203 |
// TODO transform into point
|
|
218 |
// TODO transform into point |
|
204 | 219 |
|
205 |
// for now just publish that |
|
220 |
// for now just publish that |
|
221 |
|
|
222 |
pub.publish(origPoint); |
|
206 | 223 |
|
207 |
pub.publish(origPoint); |
|
208 |
} |
|
224 |
printf("Ending callback"); |
|
225 |
|
|
226 |
} |
|
227 |
|
|
228 |
void target_cb_test(const sensor_msgs::Image image){ |
|
229 |
|
|
230 |
geometry_msgs::Point point; |
|
231 |
point.x = 1; |
|
232 |
point.y = 2; |
|
233 |
point.z = 3; |
|
234 |
pub.publish(point); |
|
209 | 235 |
} |
210 | 236 |
|
211 | 237 |
int main(int argc, char **argv) { |
212 | 238 |
ros::init(argc, argv, "v2v3_converter"); |
213 | 239 |
ros::NodeHandle n; |
240 |
pub = n.advertise<geometry_msgs::Point>("vision/target_3d", 1000); |
|
241 |
img_pub = n.advertise<sensor_msgs::Image>("vision/blob_image", 1000); |
|
214 | 242 |
ros::Subscriber sub = n.subscribe("/camera/rgb/image_color", 1, target_cb); |
215 |
pub = n.advertise<geometry_msgs::Point>("target_3d", 1000); |
|
216 |
img_pub = n.advertise<sensor_msgs::Image>("blob_image", 1000); |
|
217 | 243 |
ros::spin(); |
218 | 244 |
return 0; |
219 | 245 |
} |
Also available in: Unified diff