Revision 14f111dd
ID | 14f111dd1abb85de1b1627467f5220d3ebb5db82 |
Added files, changed it, still getting a bad executable error.
vision/src/blob.h | ||
---|---|---|
1 |
/************************************************************************ |
|
2 |
Blob.h |
|
3 |
|
|
4 |
FUNCIONALITAT: Definici? de la classe CBlob |
|
5 |
AUTOR: Inspecta S.L. |
|
6 |
MODIFICACIONS (Modificaci?, Autor, Data): |
|
7 |
|
|
8 |
FUNCTIONALITY: Definition of the CBlob class and some helper classes to perform |
|
9 |
some calculations on it |
|
10 |
AUTHOR: Inspecta S.L. |
|
11 |
MODIFICATIONS (Modification, Author, Date): |
|
12 |
|
|
13 |
**************************************************************************/ |
|
14 |
|
|
15 |
//! Disable warnings referred to 255 character truncation for the std:map |
|
16 |
#pragma warning( disable : 4786 ) |
|
17 |
|
|
18 |
#ifndef CBLOB_INSPECTA_INCLUDED |
|
19 |
#define CBLOB_INSPECTA_INCLUDED |
|
20 |
|
|
21 |
#include <opencv/cxcore.h> |
|
22 |
#include "BlobLibraryConfiguration.h" |
|
23 |
#include "BlobContour.h" |
|
24 |
|
|
25 |
|
|
26 |
#ifdef BLOB_OBJECT_FACTORY |
|
27 |
//! Object factory pattern implementation |
|
28 |
#include "..\inspecta\DesignPatterns\ObjectFactory.h" |
|
29 |
#endif |
|
30 |
|
|
31 |
|
|
32 |
//! Type of labelled images |
|
33 |
typedef unsigned int t_labelType; |
|
34 |
|
|
35 |
|
|
36 |
//! Blob class |
|
37 |
class CBlob |
|
38 |
{ |
|
39 |
typedef std::list<CBlobContour> t_contourList; |
|
40 |
|
|
41 |
public: |
|
42 |
CBlob(); |
|
43 |
CBlob( t_labelType id, CvPoint startPoint, CvSize originalImageSize ); |
|
44 |
~CBlob(); |
|
45 |
|
|
46 |
//! Copy constructor |
|
47 |
CBlob( const CBlob &src ); |
|
48 |
CBlob( const CBlob *src ); |
|
49 |
|
|
50 |
//! Operador d'assignaci? |
|
51 |
//! Assigment operator |
|
52 |
CBlob& operator=(const CBlob &src ); |
|
53 |
|
|
54 |
//! Adds a new internal contour to the blob |
|
55 |
void AddInternalContour( const CBlobContour &newContour ); |
|
56 |
|
|
57 |
//! Retrieves contour in Freeman's chain code |
|
58 |
CBlobContour *GetExternalContour() |
|
59 |
{ |
|
60 |
return &m_externalContour; |
|
61 |
} |
|
62 |
|
|
63 |
//! Retrieves blob storage |
|
64 |
CvMemStorage *GetStorage() |
|
65 |
{ |
|
66 |
return m_storage; |
|
67 |
} |
|
68 |
|
|
69 |
//! Get label ID |
|
70 |
t_labelType GetID() |
|
71 |
{ |
|
72 |
return m_id; |
|
73 |
} |
|
74 |
//! > 0 for extern blobs, 0 if not |
|
75 |
int Exterior( IplImage *mask, bool xBorder = true, bool yBorder = true ); |
|
76 |
//! Compute blob's area |
|
77 |
double Area(); |
|
78 |
//! Compute blob's perimeter |
|
79 |
double Perimeter(); |
|
80 |
//! Compute blob's moment (p,q up to MAX_CALCULATED_MOMENTS) |
|
81 |
double Moment(int p, int q); |
|
82 |
|
|
83 |
//! Compute extern perimeter |
|
84 |
double ExternPerimeter( IplImage *mask, bool xBorder = true, bool yBorder = true ); |
|
85 |
|
|
86 |
//! Get mean grey color |
|
87 |
double Mean( IplImage *image ); |
|
88 |
|
|
89 |
//! Get standard deviation grey color |
|
90 |
double StdDev( IplImage *image ); |
|
91 |
|
|
92 |
//! Indica si el blob est? buit ( no t? cap info associada ) |
|
93 |
//! Shows if the blob has associated information |
|
94 |
bool IsEmpty(); |
|
95 |
|
|
96 |
//! Retorna el poligon convex del blob |
|
97 |
//! Calculates the convex hull of the blob |
|
98 |
t_PointList GetConvexHull(); |
|
99 |
|
|
100 |
//! Pinta l'interior d'un blob d'un color determinat |
|
101 |
//! Paints the blob in an image |
|
102 |
void FillBlob( IplImage *imatge, CvScalar color, int offsetX = 0, int offsetY = 0 ); |
|
103 |
|
|
104 |
//! Join a blob to current one (add's contour |
|
105 |
void JoinBlob( CBlob *blob ); |
|
106 |
|
|
107 |
//! Get bounding box |
|
108 |
CvRect GetBoundingBox(); |
|
109 |
//! Get bounding ellipse |
|
110 |
CvBox2D GetEllipse(); |
|
111 |
|
|
112 |
//! Minimun X |
|
113 |
double MinX() |
|
114 |
{ |
|
115 |
return GetBoundingBox().x; |
|
116 |
} |
|
117 |
//! Minimun Y |
|
118 |
double MinY() |
|
119 |
{ |
|
120 |
return GetBoundingBox().y; |
|
121 |
} |
|
122 |
//! Maximun X |
|
123 |
double MaxX() |
|
124 |
{ |
|
125 |
return GetBoundingBox().x + GetBoundingBox().width; |
|
126 |
} |
|
127 |
//! Maximun Y |
|
128 |
double MaxY() |
|
129 |
{ |
|
130 |
return GetBoundingBox().y + GetBoundingBox().height; |
|
131 |
} |
|
132 |
private: |
|
133 |
|
|
134 |
//! Deallocates all contours |
|
135 |
void ClearContours(); |
|
136 |
////////////////////////////////////////////////////////////////////////// |
|
137 |
// Blob contours |
|
138 |
////////////////////////////////////////////////////////////////////////// |
|
139 |
|
|
140 |
|
|
141 |
//! Contour storage memory |
|
142 |
CvMemStorage *m_storage; |
|
143 |
//! External contour of the blob (crack codes) |
|
144 |
CBlobContour m_externalContour; |
|
145 |
//! Internal contours (crack codes) |
|
146 |
t_contourList m_internalContours; |
|
147 |
|
|
148 |
////////////////////////////////////////////////////////////////////////// |
|
149 |
// Blob features |
|
150 |
////////////////////////////////////////////////////////////////////////// |
|
151 |
|
|
152 |
//! Label number |
|
153 |
t_labelType m_id; |
|
154 |
//! Area |
|
155 |
double m_area; |
|
156 |
//! Perimeter |
|
157 |
double m_perimeter; |
|
158 |
//! Extern perimeter from blob |
|
159 |
double m_externPerimeter; |
|
160 |
//! Mean gray color |
|
161 |
double m_meanGray; |
|
162 |
//! Standard deviation from gray color blob distribution |
|
163 |
double m_stdDevGray; |
|
164 |
//! Bounding box |
|
165 |
CvRect m_boundingBox; |
|
166 |
//! Bounding ellipse |
|
167 |
CvBox2D m_ellipse; |
|
168 |
//! Sizes from image where blob is extracted |
|
169 |
CvSize m_originalImageSize; |
|
170 |
}; |
|
171 |
|
|
172 |
#endif //CBLOB_INSPECTA_INCLUDED |
vision/src/converteroverwrite.cpp | ||
---|---|---|
1 |
#include <ros/ros.h> |
|
2 |
#include <stdio.h> |
|
3 |
#include <vision/TargetDescriptor.h> |
|
4 |
#include <image_transport/image_transport.h> |
|
5 |
#include <geometry_msgs/Point.h> |
|
6 |
#include <tf/transform_listener.h> |
|
7 |
#include <algorithm> |
|
8 |
|
|
9 |
ros::Publisher pub; |
|
10 |
ros::Publisher img_pub; |
|
11 |
|
|
12 |
int hk = 216; |
|
13 |
int sk = 255; |
|
14 |
int vk = 255; |
|
15 |
int threshold = 25; |
|
16 |
|
|
17 |
bool isRight(sensor_msgs::Image * img, int p){ |
|
18 |
return (img->data[p + 1] == img->data[p]); |
|
19 |
} |
|
20 |
|
|
21 |
bool isLeft(sensor_msgs::Image * img, int p){ |
|
22 |
return (img->data[p - 1] == img->data[p]); |
|
23 |
} |
|
24 |
|
|
25 |
bool isUp(sensor_msgs::Image * img, int p){ |
|
26 |
return (img->data[p - img->width] == img->data[p]); |
|
27 |
} |
|
28 |
|
|
29 |
bool isDown(sensor_msgs::Image * img, int p){ |
|
30 |
return (img->data[p + img->width] == img->data[p]); |
|
31 |
} |
|
32 |
|
|
33 |
int min(int a, int b){ |
|
34 |
if (a <= b) return a; |
|
35 |
else return b; |
|
36 |
} |
|
37 |
|
|
38 |
int max(int a, int b){ |
|
39 |
if (a >= b) return a; |
|
40 |
else return b; |
|
41 |
} |
|
42 |
|
|
43 |
bool hasAnAdjacent(sensor_msgs::Image * image, int j){ |
|
44 |
int nl = image->height; // number of lines |
|
45 |
int nc = image->width; |
|
46 |
int nChannels = image->step / image->width; |
|
47 |
int pos = j / nChannels; |
|
48 |
if ((pos % nc) != 0){ |
|
49 |
if (isLeft(image, pos)) return true; |
|
50 |
} |
|
51 |
if ((pos / nc) != 0){ |
|
52 |
if (isUp(image, pos)) return true; |
|
53 |
} |
|
54 |
if ((pos % nc) != nc - 1){ |
|
55 |
if (isRight(image, pos)) return true; |
|
56 |
} |
|
57 |
if ((pos / nc) != nc - 1){ |
|
58 |
if (isDown(image, pos)) return true; |
|
59 |
} |
|
60 |
else return false; |
|
61 |
} |
|
62 |
|
|
63 |
/* Detects the blobs in an image within above defined constraints. Transforms image into 255 |
|
64 |
* for pixels in the blob, 0 for not in the blob. |
|
65 |
*/ |
|
66 |
void detectBlobs(sensor_msgs::Image * image, int * comX, int * comY, int * area){ |
|
67 |
int nl = image->height; // number of lines |
|
68 |
int nc = image->step; // number of columns |
|
69 |
int nChannels = image->step / image->width; // effective width |
|
70 |
|
|
71 |
// process image |
|
72 |
for (int i = 0; i < nl; i++){ |
|
73 |
for (int j = 0; j < nc; j+= nChannels){ |
|
74 |
if (image->data[i * image->width + j] <= hk + threshold && image->data[j] >= hk - threshold){ |
|
75 |
image->data[i * image->width + j] = 255; |
|
76 |
image->data[i * image->width + j+1] = 255; |
|
77 |
image->data[i * image->width + j+2] = 255; |
|
78 |
} |
|
79 |
else { |
|
80 |
image->data[i * image->width + j] = 0; |
|
81 |
image->data[i * image->width + j+1] = 0; |
|
82 |
image->data[i * image->width + j+2] = 0; |
|
83 |
} |
|
84 |
} |
|
85 |
} |
|
86 |
|
|
87 |
int comXSum = 0; |
|
88 |
int comYSum = 0; |
|
89 |
int blobPixelCount = 0; |
|
90 |
for (int i = 0; i < image->width * image->height; i+=nChannels){ |
|
91 |
if ( !(image->data[i] == 255 && hasAnAdjacent(image, i)) ){ |
|
92 |
image->data[i] = 0; |
|
93 |
image->data[i + 1] = 0; |
|
94 |
image->data[i + 2] = 0; |
|
95 |
continue; |
|
96 |
} |
|
97 |
blobPixelCount++; |
|
98 |
comXSum += i / nChannels % image->width; |
|
99 |
comYSum += i / nChannels / image->width; |
|
100 |
} |
|
101 |
if (blobPixelCount != 0){ |
|
102 |
// get average |
|
103 |
comXSum /= blobPixelCount; |
|
104 |
comYSum /= blobPixelCount; |
|
105 |
} |
|
106 |
|
|
107 |
*area = blobPixelCount; |
|
108 |
*comX = comXSum; |
|
109 |
*comY = comYSum; |
|
110 |
|
|
111 |
img_pub.publish(*image); |
|
112 |
} |
|
113 |
|
|
114 |
void convertToHSV(sensor_msgs::Image * orig){ |
|
115 |
|
|
116 |
int nChannels = orig->step / orig->width; |
|
117 |
printf("Converting to HSV \n"); |
|
118 |
|
|
119 |
// get the max, set it as v |
|
120 |
|
|
121 |
for (int i = 0; i < orig->height * orig->width; i += nChannels){ |
|
122 |
int h; |
|
123 |
int s; |
|
124 |
int v; |
|
125 |
int min; |
|
126 |
|
|
127 |
// get min, max |
|
128 |
if (orig->data[i] >= orig->data[i+1] && orig->data[i] >= orig->data[i+2]){ |
|
129 |
v = orig->data[i]; |
|
130 |
if (orig->data[i+1] >= orig->data[i+2]){ |
|
131 |
min = orig->data[i+2]; |
|
132 |
} |
|
133 |
else min = orig->data[i+1]; |
|
134 |
} |
|
135 |
else if (orig->data[i+1] >= orig->data[i] && orig->data[i+1] >= orig->data[i+2]){ |
|
136 |
v = orig->data[i+1]; |
|
137 |
if (orig->data[i] >= orig->data[i+2]){ |
|
138 |
min = orig->data[i+2]; |
|
139 |
} |
|
140 |
else min = orig->data[i]; |
|
141 |
} |
|
142 |
else { |
|
143 |
v = orig->data[i+2]; |
|
144 |
if (orig->data[i] >= orig->data[i+1]){ |
|
145 |
min = orig->data[i+1]; |
|
146 |
} |
|
147 |
else min = orig->data[i]; |
|
148 |
} |
|
149 |
|
|
150 |
// set s |
|
151 |
if (v != 0){ |
|
152 |
s = (v - min) / v; |
|
153 |
} |
|
154 |
else s = 0; |
|
155 |
|
|
156 |
// set h |
|
157 |
if (s == 0) h = 0; |
|
158 |
else if (v == orig->data[i]){ |
|
159 |
h = 60 * (orig->data[i+1] - orig->data[i+2]) / s; |
|
160 |
} |
|
161 |
else if (v == orig->data[i+1]){ |
|
162 |
h = 120 + 60 * (orig->data[i+2] - orig->data[i]) / s; |
|
163 |
} |
|
164 |
else { |
|
165 |
h = 240 + 60 * (orig->data[i] - orig->data[i + 1]) / s; |
|
166 |
} |
|
167 |
|
|
168 |
orig->data[i] = h; |
|
169 |
orig->data[i+1] = s; |
|
170 |
orig->data[i+2] = v; |
|
171 |
|
|
172 |
} |
|
173 |
printf("Conversion complete\n"); |
|
174 |
} |
|
175 |
|
|
176 |
void target_cb(const sensor_msgs::Image orig) { |
|
177 |
|
|
178 |
printf("beginning callback \n"); |
|
179 |
tf::TransformListener listener; |
|
180 |
geometry_msgs::Point point; |
|
181 |
tf::StampedTransform transform; |
|
182 |
|
|
183 |
int width = orig.width; |
|
184 |
int height = orig.height; |
|
185 |
/*try { |
|
186 |
listener.lookupTransform("/camera", "/kinect", ros::Time(0), transform); |
|
187 |
} |
|
188 |
catch (tf::TransformException ex) { |
|
189 |
ROS_ERROR("%s", ex.what()); |
|
190 |
}*/ |
|
191 |
|
|
192 |
// convert to OpenCV |
|
193 |
|
|
194 |
sensor_msgs::Image copy = orig; |
|
195 |
sensor_msgs::Image * source = © // pointer to the image |
|
196 |
sensor_msgs::Image hsvImage = orig; // May be time |
|
197 |
sensor_msgs::Image * hsvPtr = &hsvImage; |
|
198 |
|
|
199 |
// convert to HSV |
|
200 |
|
|
201 |
convertToHSV(source); |
|
202 |
|
|
203 |
// detect blob |
|
204 |
|
|
205 |
int comX; |
|
206 |
int comY; |
|
207 |
int area; |
|
208 |
|
|
209 |
detectBlobs(hsvPtr, &comX, &comY, &area); |
|
210 |
|
|
211 |
int distance; |
|
212 |
|
|
213 |
// give distance as inversely proportional to area, for (x,y,z) |
|
214 |
if (area > 5){ |
|
215 |
|
|
216 |
distance = 100.0 / (float) area; |
|
217 |
|
|
218 |
} |
|
219 |
else { |
|
220 |
distance = -1; |
|
221 |
} |
|
222 |
|
|
223 |
// fill point based on target and tf |
|
224 |
|
|
225 |
geometry_msgs::Point origPoint; |
|
226 |
|
|
227 |
// TODO: Change coordinate axes |
|
228 |
|
|
229 |
origPoint.x = distance; |
|
230 |
origPoint.y = width / 2 - comX; |
|
231 |
origPoint.z = comY - height / 2; |
|
232 |
|
|
233 |
// TODO transform into point |
|
234 |
|
|
235 |
// for now just publish that |
|
236 |
|
|
237 |
pub.publish(origPoint); |
|
238 |
|
|
239 |
printf("Ending callback"); |
|
240 |
|
|
241 |
} |
|
242 |
|
|
243 |
void target_cb_test(const sensor_msgs::Image image){ |
|
244 |
|
|
245 |
geometry_msgs::Point point; |
|
246 |
point.x = 1; |
|
247 |
point.y = 2; |
|
248 |
point.z = 3; |
|
249 |
pub.publish(point); |
|
250 |
} |
|
251 |
|
|
252 |
int main(int argc, char **argv) { |
|
253 |
ros::init(argc, argv, "v2v3_converter"); |
|
254 |
ros::NodeHandle n; |
|
255 |
pub = n.advertise<geometry_msgs::Point>("vision/target_3d", 1000); |
|
256 |
img_pub = n.advertise<sensor_msgs::Image>("vision/blob_image", 1000); |
|
257 |
ros::Subscriber sub = n.subscribe("/camera/rgb/image_color", 1, target_cb); |
|
258 |
ros::spin(); |
|
259 |
return 0; |
|
260 |
} |
vision/src/opencvdetect.cpp | ||
---|---|---|
1 |
#include "/home/nstanley/OpenCV/OpenCV-2.3.1/include/opencv/cv.h" |
|
2 |
#include "/home/nstanley/OpenCV/OpenCV-2.3.1/include/opencv/highgui.h" |
|
3 |
#include <opencv/cxcore.h> |
|
4 |
#include <ros/ros.h> |
|
5 |
#include <stdio.h> |
|
6 |
#include <cv_bridge/cv_bridge.h> |
|
7 |
#include <sensor_msgs/image_encodings.h> |
|
8 |
#include <vision/TargetDescriptor.h> |
|
9 |
#include <image_transport/image_transport.h> |
|
10 |
#include <geometry_msgs/Point.h> |
|
11 |
#include <tf/transform_listener.h> |
|
12 |
#include <algorithm> |
|
13 |
#include </home/nstanley/cvblobs8.3_linux/blob.h> |
|
14 |
#include </home/nstanley/cvblobs8.3_linux/BlobResult.h> |
|
15 |
|
|
16 |
ros::Publisher pub; |
|
17 |
ros::Publisher img_pub; |
|
18 |
|
|
19 |
int hk = 216; |
|
20 |
int sk = 255; |
|
21 |
int vk = 255; |
|
22 |
//int threshold = 25; |
|
23 |
|
|
24 |
int filter(int r, int g, int b, int threshold) { |
|
25 |
int orange[3] = {216, 255, 255}; // Orange in HSV |
|
26 |
int diff = (orange[0]-r)*(orange[0]-r) + |
|
27 |
(orange[1]-g)*(orange[1]-g); |
|
28 |
|
|
29 |
if(diff < threshold) return abs(diff-threshold); |
|
30 |
return 0; |
|
31 |
} |
|
32 |
|
|
33 |
void target_cb(const sensor_msgs::Image orig) { |
|
34 |
|
|
35 |
printf("beginning callback \n"); |
|
36 |
tf::TransformListener listener; |
|
37 |
geometry_msgs::Point point; |
|
38 |
tf::StampedTransform transform; |
|
39 |
|
|
40 |
int width = orig.width; |
|
41 |
int height = orig.height; |
|
42 |
/*try { |
|
43 |
listener.lookupTransform("/camera", "/kinect", ros::Time(0), transform); |
|
44 |
} |
|
45 |
catch (tf::TransformException ex) { |
|
46 |
ROS_ERROR("%s", ex.what()); |
|
47 |
}*/ |
|
48 |
|
|
49 |
// convert to OpenCV |
|
50 |
|
|
51 |
sensor_msgs::Image hsvcopy = orig; |
|
52 |
cv_bridge::CvImagePtr cv_ptr; |
|
53 |
try{ |
|
54 |
cv_ptr = cv_bridge::toCvCopy(orig, sensor_msgs::image_encodings::BGR8); |
|
55 |
} |
|
56 |
catch (cv_bridge::Exception& e) { |
|
57 |
ROS_ERROR("cv_bridge exception: %s", e.what()); |
|
58 |
return; |
|
59 |
} |
|
60 |
|
|
61 |
// detect blob |
|
62 |
|
|
63 |
IplImage* input; |
|
64 |
*input = cv_ptr->image; |
|
65 |
|
|
66 |
IplImage* img; |
|
67 |
IplImage *hsv_img; |
|
68 |
IplImage *bw_img; |
|
69 |
IplImage* i1; |
|
70 |
CBlobResult blobs; |
|
71 |
CBlob blobArea; |
|
72 |
|
|
73 |
// Initialize images, allocate memory |
|
74 |
|
|
75 |
img = cvCloneImage(input); |
|
76 |
hsv_img = cvCloneImage(img); |
|
77 |
bw_img = cvCloneImage(img); |
|
78 |
i1 = cvCreateImage(cvSize(img->width, img->height), 8, 1); |
|
79 |
|
|
80 |
// Smooth input image using a Gaussian filter, assign HSV, BW image |
|
81 |
cvSmooth(input, img, CV_GAUSSIAN, 7, 9, 0 ,0); |
|
82 |
cvCvtColor(img, bw_img, CV_RGB2GRAY); |
|
83 |
cvCvtColor(img, hsv_img, CV_RGB2HSV); |
|
84 |
|
|
85 |
// Simple filter that creates a mask whose color is near orange in i1 |
|
86 |
for(int i = 0; i < hsv_img->height; i++) { |
|
87 |
for(int j = 0; j < hsv_img->width; j++) { |
|
88 |
int r = ((uchar *)(hsv_img->imageData + i*hsv_img->widthStep))[j*hsv_img->nChannels + 2]; |
|
89 |
int g = ((uchar *)(hsv_img->imageData + i*hsv_img->widthStep))[j*hsv_img->nChannels + 1]; |
|
90 |
int b = ((uchar *)(hsv_img->imageData + i*hsv_img->widthStep))[j*hsv_img->nChannels + 0]; |
|
91 |
int f = filter(r, g, b, 5000); |
|
92 |
if(f) { |
|
93 |
((uchar *)(i1->imageData + i*i1->widthStep))[j] = 250; |
|
94 |
} |
|
95 |
} |
|
96 |
} |
|
97 |
|
|
98 |
// Detect Blobs using the mask and a threshold for area. Sort blobs according to area |
|
99 |
blobs = CBlobResult(bw_img, i1, 100/*, true*/); |
|
100 |
blobs.Filter(blobs, B_INCLUDE, CBlobGetArea(), B_GREATER, 500); |
|
101 |
blobs.GetNthBlob(CBlobGetArea(), blobs.GetNumBlobs() - 1, blobArea); // example |
|
102 |
|
|
103 |
for (int i = 1; i < blobs.GetNumBlobs(); i++ ) |
|
104 |
{ |
|
105 |
blobArea = blobs.GetBlob(i); |
|
106 |
if(blobArea.Area() > 150) |
|
107 |
blobArea.FillBlob(img, cvScalar(255, 0, 0)); |
|
108 |
} |
|
109 |
|
|
110 |
// Publish blob image |
|
111 |
|
|
112 |
cv_bridge::CvImage blob_image; |
|
113 |
//blob_image.header = img->header; |
|
114 |
blob_image.encoding = sensor_msgs::image_encodings::BGR8; |
|
115 |
blob_image.image = *(img->imageData); |
|
116 |
|
|
117 |
/*sensor_msgs::Image blob_image; |
|
118 |
blob_image.height = img->height; |
|
119 |
blob_image.width = img->width; |
|
120 |
blob_image.data = *(img->imageData); */ |
|
121 |
img_pub.publish(blob_image); |
|
122 |
|
|
123 |
// Clean up |
|
124 |
cvReleaseImage(&i1); |
|
125 |
cvReleaseImage(&bw_img); |
|
126 |
cvReleaseImage(&hsv_img); |
|
127 |
cvReleaseImage(&img); |
|
128 |
cvReleaseImage(&input); |
|
129 |
|
|
130 |
//int comX; |
|
131 |
//int comY; |
|
132 |
//int area; |
|
133 |
|
|
134 |
//int distance; |
|
135 |
|
|
136 |
// fill point based on target and tf |
|
137 |
|
|
138 |
// geometry_msgs::Point origPoint; |
|
139 |
|
|
140 |
// TODO: Change coordinate axes |
|
141 |
|
|
142 |
// origPoint.x = distance; |
|
143 |
// origPoint.y = width / 2 - comX; |
|
144 |
// origPoint.z = comY - height / 2; |
|
145 |
|
|
146 |
// TODO transform into point |
|
147 |
|
|
148 |
// for now just publish that |
|
149 |
|
|
150 |
// pub.publish(origPoint); |
|
151 |
|
|
152 |
printf("Ending callback\n"); |
|
153 |
|
|
154 |
} |
|
155 |
|
|
156 |
void target_cb_test(const sensor_msgs::Image image){ |
|
157 |
|
|
158 |
geometry_msgs::Point point; |
|
159 |
point.x = 1; |
|
160 |
point.y = 2; |
|
161 |
point.z = 3; |
|
162 |
pub.publish(point); |
|
163 |
} |
|
164 |
|
|
165 |
int main(int argc, char **argv) { |
|
166 |
ros::init(argc, argv, "v2v3_converter"); |
|
167 |
ros::NodeHandle n; |
|
168 |
pub = n.advertise<geometry_msgs::Point>("vision/target_3d", 1000); |
|
169 |
img_pub = n.advertise<sensor_msgs::Image>("vision/blob_image", 1000); |
|
170 |
ros::Subscriber sub = n.subscribe("/camera/rgb/image_color", 1, target_cb); |
|
171 |
ros::spin(); |
|
172 |
return 0; |
|
173 |
} |
Also available in: Unified diff