Revision 937
New vision algorithm utilitizing Hough Transform and edge detection
instead of the older one using ellipse fitting and thresholding
vision.c | ||
---|---|---|
14 | 14 |
#include <stdio.h> |
15 | 15 |
#include <stdlib.h> |
16 | 16 |
|
17 |
#define MINH 50 //min threshold level |
|
18 |
#define MAXH 75 //max threshold level |
|
17 |
#define MIN_PADDING 70 |
|
19 | 18 |
|
20 |
#define MIN_BOX_WIDTH 10 |
|
21 |
#define MAX_BOX_WIDTH 20 |
|
22 |
#define MIN_BOX_HEIGHT 10 |
|
23 |
#define MAX_BOX_HEIGHT 20 |
|
24 |
|
|
25 |
#define BEST_CENTER_PADDING_X 9 |
|
26 |
#define BEST_CENTER_PADDING_Y 9 |
|
27 |
|
|
28 |
#define ELLIPSE_COUNT_TO_BE_OBJECT 7 |
|
29 |
|
|
30 | 19 |
#define DEBUG 1 //Debug to find threshold level |
31 | 20 |
|
32 | 21 |
int write_JPEG_to_mem (unsigned char *storage, int length, IplImage *img, int quality); |
... | ... | |
43 | 32 |
int vision_init(/*char* filename_*/) { |
44 | 33 |
/*filename = filename_;*/ |
45 | 34 |
|
46 |
if (DEBUG) |
|
47 |
cvNamedWindow("Result", CV_WINDOW_AUTOSIZE); |
|
48 |
|
|
49 | 35 |
//TODO: replace with 1394 capture so we don't have to use Coriander |
50 | 36 |
capture = cvCreateCameraCapture(-1); |
51 |
|
|
52 | 37 |
if( !capture ) { |
53 | 38 |
fprintf( stderr, "ERROR: capture is NULL \n" ); |
54 | 39 |
return -1; |
55 | 40 |
} |
41 |
|
|
42 |
if (DEBUG) |
|
43 |
cvNamedWindow("Result", CV_WINDOW_AUTOSIZE); |
|
56 | 44 |
|
57 | 45 |
return 0; |
58 | 46 |
} |
... | ... | |
79 | 67 |
} |
80 | 68 |
|
81 | 69 |
int vision_get_robot_positions(VisionPosition** positions) { |
82 |
IplImage* image03; |
|
83 |
|
|
84 |
IplImage* im_capture; |
|
85 |
|
|
70 |
IplImage *image03; |
|
71 |
|
|
86 | 72 |
if(!cvGrabFrame(capture)){ // capture a frame |
87 | 73 |
printf("Could not grab a frame\n\7"); |
88 |
exit(0);
|
|
74 |
return -1;
|
|
89 | 75 |
} |
90 |
im_capture=cvRetrieveFrame(capture); // retrieve the captured frame |
|
91 |
|
|
76 |
IplImage *im_capture=cvRetrieveFrame(capture); // retrieve the captured frame
|
|
77 |
|
|
92 | 78 |
image03 = cvCreateImage(cvGetSize(im_capture), im_capture->depth, 1); |
93 | 79 |
cvCvtColor(im_capture, image03, CV_RGB2GRAY); |
94 | 80 |
|
95 |
/*/ load image and force it to be grayscale |
|
96 |
if ((image03 = cvLoadImage(filename, 0)) == 0) { |
|
97 |
fprintf(stderr, "Failed to load image.\n"); |
|
98 |
return -1; |
|
99 |
}*/ |
|
81 |
struct CenterP centers[100] = {0}; |
|
82 |
int count = 0; |
|
100 | 83 |
|
101 |
// Create the destination images
|
|
102 |
IplImage* image02 = cvCloneImage(image03);
|
|
103 |
IplImage* image04 = cvCloneImage(image03);
|
|
84 |
cvSmooth( image03, image03, CV_GAUSSIAN, 9, 9 ); // smooth it, otherwise a lot of false circles may be detected
|
|
85 |
CvMemStorage* storage = cvCreateMemStorage(0);
|
|
86 |
CvSeq* circles = cvHoughCircles( image03, storage, CV_HOUGH_GRADIENT, 1, 1, 100, 43 );
|
|
104 | 87 |
|
105 |
// Create dynamic structure and sequence. |
|
106 |
CvMemStorage* stor = cvCreateMemStorage(0); |
|
107 |
CvSeq* cont = cvCreateSeq(CV_SEQ_ELTYPE_POINT, sizeof(CvSeq), sizeof(CvPoint) , stor); |
|
88 |
int i; |
|
89 |
for( i = 0; i < circles->total && count < 100; i++ ) |
|
90 |
{ |
|
91 |
//TODO: Augment with Harr cascade second level detecting orbs + identify robot with orb color |
|
108 | 92 |
|
109 |
struct CenterP bestc[100];
|
|
110 |
int index=0;
|
|
111 |
int h;
|
|
93 |
float* p = (float*)cvGetSeqElem( circles, i );
|
|
94 |
CvPoint center = cvPoint(cvRound(p[0]), cvRound(p[1]));
|
|
95 |
int r = cvRound(p[2]);
|
|
112 | 96 |
|
113 |
for (h = MINH; h < MAXH; h++) { |
|
114 |
// Threshold the source image. This needful for cvFindContours(). |
|
115 |
cvThreshold(image03, image02, h, 255, CV_THRESH_BINARY); |
|
97 |
if (DEBUG) cvCircle( image03, center, 3, CV_RGB(255,255,255), -1, 8, 0 ); |
|
116 | 98 |
|
117 |
// Find all contours.
|
|
118 |
cvFindContours(image02, stor, &cont, sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_NONE, cvPoint(0,0));
|
|
99 |
float dist = MIN_PADDING;
|
|
100 |
int loc = count;
|
|
119 | 101 |
|
120 |
// Clear images. IPL use. |
|
121 |
cvZero(image02); |
|
122 |
cvZero(image04); |
|
102 |
int j; |
|
103 |
for (j = 0; j < count; j++) |
|
104 |
{ |
|
105 |
int dx = centers[j].center.x-center.x; |
|
106 |
int dy = centers[j].center.y-center.y; |
|
107 |
float d = cvSqrt(dx*dx + dy*dy); |
|
123 | 108 |
|
124 |
// This cycle draw all contours and approximate it by ellipses. |
|
125 |
for(; cont; cont = cont->h_next) { |
|
126 |
int i; // Indicator of cycle. |
|
127 |
int count = cont->total; // This is number point in contour |
|
128 |
CvPoint center; |
|
129 |
CvSize size; |
|
130 |
|
|
131 |
// Number point must be more than or equal to 6 (for cvFitEllipse_32f). |
|
132 |
if (count < 6) continue; |
|
133 |
|
|
134 |
// Alloc memory for contour point set. |
|
135 |
CvPoint* PointArray = (CvPoint*) malloc(count * sizeof(CvPoint)); |
|
136 |
CvPoint2D32f* PointArray2D32f= (CvPoint2D32f*) malloc(count * sizeof(CvPoint2D32f)); |
|
137 |
|
|
138 |
// Alloc memory for ellipse data. |
|
139 |
CvBox2D32f* box = (CvBox2D32f*)malloc(sizeof(CvBox2D32f)); |
|
140 |
|
|
141 |
// Get contour point set. |
|
142 |
cvCvtSeqToArray(cont, PointArray, CV_WHOLE_SEQ); |
|
143 |
|
|
144 |
// Convert CvPoint set to CvBox2D32f set. |
|
145 |
for(i=0; i<count; i++) { |
|
146 |
PointArray2D32f[i].x = (float)PointArray[i].x; |
|
147 |
PointArray2D32f[i].y = (float)PointArray[i].y; |
|
109 |
if (d < dist) |
|
110 |
{ |
|
111 |
dist = d; |
|
112 |
loc = j; |
|
148 | 113 |
} |
114 |
} |
|
149 | 115 |
|
150 |
// Fits ellipse to current contour. |
|
151 |
cvFitEllipse(PointArray2D32f, count, box); |
|
116 |
int c = centers[loc].count; |
|
117 |
centers[loc].center.x = (centers[loc].center.x * c + center.x) / (c + 1); |
|
118 |
centers[loc].center.y = (centers[loc].center.y * c + center.y) / (c + 1); |
|
119 |
centers[loc].count++; |
|
120 |
if (loc == count) count++; |
|
121 |
} |
|
152 | 122 |
|
153 |
// Convert ellipse data from float to integer representation.
|
|
154 |
center.x = cvRound(box->center.x);
|
|
155 |
center.y = cvRound(box->center.y);
|
|
156 |
size.width = cvRound(box->size.width*0.5);
|
|
157 |
size.height = cvRound(box->size.height*0.5);
|
|
158 |
box->angle = -box->angle;
|
|
123 |
if (DEBUG)
|
|
124 |
{
|
|
125 |
for (i = 0; i < count; i++)
|
|
126 |
{
|
|
127 |
cvCircle( image03, centers[i].center, 60, CV_RGB(255,255,255), 3, 8, 0 );
|
|
128 |
}
|
|
159 | 129 |
|
160 |
if (size.width > MIN_BOX_WIDTH && size.height > MIN_BOX_HEIGHT |
|
161 |
&& size.width < MAX_BOX_WIDTH && size.height < MAX_BOX_HEIGHT){ |
|
162 |
//printf("%d %d %d %d\n",center.x,center.y,size.width,size.height); |
|
163 |
|
|
164 |
int found=0, j; |
|
165 |
for (j = 0; j < index; j++) { |
|
166 |
if (abs(bestc[j].center.x-center.x) < BEST_CENTER_PADDING_X |
|
167 |
&& abs(bestc[j].center.y-center.y) < BEST_CENTER_PADDING_Y) { |
|
168 |
bestc[j].count++; |
|
169 |
found=1; |
|
170 |
break; |
|
171 |
} |
|
172 |
} |
|
173 |
|
|
174 |
if (!found){ |
|
175 |
struct CenterP c; |
|
176 |
c.center=center; |
|
177 |
c.count=1; |
|
178 |
bestc[index]=c; |
|
179 |
index++; |
|
180 |
} |
|
181 |
} |
|
182 |
|
|
183 |
// Free memory. |
|
184 |
free(PointArray); |
|
185 |
free(PointArray2D32f); |
|
186 |
free(box); |
|
187 |
} |
|
130 |
cvShowImage( "Result", image03 ); |
|
188 | 131 |
} |
189 | 132 |
|
190 |
image04 = cvCloneImage(image03);
|
|
133 |
cvReleaseMemStorage(&storage);
|
|
191 | 134 |
|
192 |
int count = 0; |
|
193 |
int i; |
|
194 |
for (i = 0; i < index; i++) { |
|
195 |
if (bestc[i].count > ELLIPSE_COUNT_TO_BE_OBJECT){ |
|
196 |
count++; |
|
197 |
} |
|
198 |
} |
|
135 |
cvReleaseImage(&image03); |
|
199 | 136 |
|
200 | 137 |
VisionPosition* pos_array = (VisionPosition*)malloc(sizeof(VisionPosition) * count); |
201 | 138 |
if (pos_array == NULL) { |
... | ... | |
204 | 141 |
} |
205 | 142 |
|
206 | 143 |
int c = 0; |
207 |
for (i = 0; i < index; i++) { |
|
208 |
if (bestc[i].count > ELLIPSE_COUNT_TO_BE_OBJECT){ |
|
209 |
pos_array[c].x = bestc[i].center.x; |
|
210 |
pos_array[c].y = bestc[i].center.y; |
|
211 |
c++; |
|
212 |
|
|
213 |
if (DEBUG) cvCircle(image04, bestc[i].center, 20, CV_RGB(0,0,0), 5, 8, 0); |
|
214 |
} |
|
144 |
for (i = 0; i < count; i++) { |
|
145 |
pos_array[c].x = centers[i].center.x; |
|
146 |
pos_array[c].y = centers[i].center.y; |
|
147 |
c++; |
|
215 | 148 |
} |
216 | 149 |
|
217 |
cvReleaseImage(&image02); |
|
218 |
cvReleaseImage(&image03); |
|
219 |
|
|
220 |
// Show image. HighGUI use. |
|
221 |
if (DEBUG) cvShowImage( "Result", image04 ); |
|
222 |
|
|
223 |
cvReleaseImage(&image04); |
|
224 |
|
|
225 |
cvReleaseMemStorage(&stor); |
|
226 |
|
|
227 | 150 |
*positions = pos_array; |
228 | 151 |
return count; |
229 | 152 |
} |
Also available in: Unified diff