1
+ #include < vector>
2
+ #include < iostream>
3
+ #include < cstdlib>
4
+ #include < fstream>
5
+ #include < algorithm>
6
+ #include < opencv2/opencv.hpp>
7
+ #include < opencv2/objdetect.hpp>
8
+ #include < opencv2/core/core.hpp>
9
+ #include < opencv2/highgui/highgui.hpp>
10
+ #include < opencv2/features2d.hpp>
11
+ #include < opencv2/xfeatures2d.hpp>
12
+ #include < opencv2/xfeatures2d/nonfree.hpp>
13
+ #include < opencv2/calib3d.hpp>
14
+ #include < opencv2/imgproc.hpp>
15
+ #include < opencv2/core/utility.hpp>
16
+ #include < opencv2/core/ocl.hpp>
17
+
18
+ using std::cout;
19
+ using std::cerr;
20
+ using std::vector;
21
+ using std::string;
22
+
23
+ using cv::Mat;
24
+ using cv::Point2f;
25
+ using cv::KeyPoint;
26
+ using cv::Scalar;
27
+ using cv::Ptr ;
28
+
29
+ using cv::FastFeatureDetector;
30
+ using cv::SimpleBlobDetector;
31
+
32
+ using cv::DMatch;
33
+ using cv::BFMatcher;
34
+ using cv::DrawMatchesFlags;
35
+ using cv::Feature2D;
36
+ using cv::ORB;
37
+ using cv::BRISK;
38
+ using cv::AKAZE;
39
+ using cv::KAZE;
40
+
41
+ using cv::xfeatures2d::BriefDescriptorExtractor;
42
+ using cv::xfeatures2d::SURF;
43
+ using cv::xfeatures2d::SIFT;
44
+ using cv::xfeatures2d::DAISY;
45
+ using cv::xfeatures2d::FREAK;
46
+
47
+ const double kDistanceCoef = 4.0 ;
48
+ const int kMaxMatchingSize = 50 ;
49
+ const double subsamplingRatio = 0.45 ;
50
+
51
+ void detect_and_compute (string type, Mat& img, vector<KeyPoint>& kpts, Mat& desc) {
52
+ if (type.find (" fast" ) == 0 ) {
53
+ type = type.substr (4 );
54
+ Ptr <FastFeatureDetector> detector = FastFeatureDetector::create (10 , true );
55
+ detector->detect (img, kpts);
56
+ }
57
+ if (type.find (" blob" ) == 0 ) {
58
+ type = type.substr (4 );
59
+ Ptr <SimpleBlobDetector> detector = SimpleBlobDetector::create ();
60
+ detector->detect (img, kpts);
61
+ }
62
+ if (type == " surf" ) {
63
+ Ptr <Feature2D> surf = SURF::create (800.0 );
64
+ surf->detectAndCompute (img, Mat (), kpts, desc);
65
+ }
66
+ if (type == " sift" ) {
67
+ Ptr <Feature2D> sift = SIFT::create ();
68
+ sift->detectAndCompute (img, Mat (), kpts, desc);
69
+ }
70
+ if (type == " orb" ) {
71
+ Ptr <ORB> orb = ORB::create ();
72
+ orb->detectAndCompute (img, Mat (), kpts, desc);
73
+ }
74
+ if (type == " brisk" ) {
75
+ Ptr <BRISK> brisk = BRISK::create ();
76
+ brisk->detectAndCompute (img, Mat (), kpts, desc);
77
+ }
78
+ if (type == " kaze" ) {
79
+ Ptr <KAZE> kaze = KAZE::create ();
80
+ kaze->detectAndCompute (img, Mat (), kpts, desc);
81
+ }
82
+ if (type == " akaze" ) {
83
+ Ptr <AKAZE> akaze = AKAZE::create ();
84
+ akaze->detectAndCompute (img, Mat (), kpts, desc);
85
+ }
86
+ if (type == " freak" ) {
87
+ Ptr <FREAK> freak = FREAK::create ();
88
+ freak->compute (img, kpts, desc);
89
+ }
90
+ if (type == " daisy" ) {
91
+ Ptr <DAISY> daisy = DAISY::create ();
92
+ daisy->compute (img, kpts, desc);
93
+ }
94
+ if (type == " brief" ) {
95
+ Ptr <BriefDescriptorExtractor> brief = BriefDescriptorExtractor::create (64 );
96
+ brief->compute (img, kpts, desc);
97
+ }
98
+ }
99
+
100
+
101
+ void match (string type, Mat& desc1, Mat& desc2, vector<DMatch>& matches) {
102
+ matches.clear ();
103
+ if (type == " bf" ) {
104
+ BFMatcher desc_matcher (cv::NORM_L2, true );
105
+ desc_matcher.match (desc1, desc2, matches, Mat ());
106
+ }
107
+ if (type == " knn" ) {
108
+ BFMatcher desc_matcher (cv::NORM_L2, true );
109
+ vector< vector<DMatch> > vmatches;
110
+ desc_matcher.knnMatch (desc1, desc2, vmatches, 1 );
111
+ for (int i = 0 ; i < static_cast <int >(vmatches.size ()); ++i) {
112
+ if (!vmatches[i].size ()) {
113
+ continue ;
114
+ }
115
+ matches.push_back (vmatches[i][0 ]);
116
+ }
117
+ }
118
+ std::sort (matches.begin (), matches.end ());
119
+ while (matches.front ().distance * kDistanceCoef < matches.back ().distance ) {
120
+ matches.pop_back ();
121
+ }
122
+ while (matches.size () > kMaxMatchingSize ) {
123
+ matches.pop_back ();
124
+ }
125
+ }
126
+
127
+
128
+ int main (int argc, char ** argv) {
129
+ // Program expects at least four arguments:
130
+ // - descriptors type ("surf", "sift", "orb", "brisk", "kaze", "akaze", "freak", "daisy", "brief").
131
+ //
132
+ // For "brief", "freak" and "daisy" you also need a prefix that is either "blob" or "fast" (e.g. "fastbrief", "blobdaisy")
133
+ if (argc != 5 ) {
134
+ cerr << " \n Error: wrong (you had: " << argc << " ) number of arguments (should be 5).\n " ;
135
+ cerr << " Examples:\n "
136
+ << argv[0 ] << " surf knn ../box.png ../box_in_scene.png\n "
137
+ << argv[0 ] << " fastfreak bf ../box.png ../box_in_scene.png\n "
138
+ << " \n NOTE: Not all of these methods are free, check licensing conditions!\n\n "
139
+ << std::endl;
140
+ exit (1 );
141
+ }
142
+
143
+ string desc_type (argv[1 ]);
144
+ string match_type (argv[2 ]);
145
+
146
+ string img_file1 (argv[3 ]);
147
+ string img_file2 (argv[4 ]);
148
+
149
+ Mat img1 = cv::imread (img_file1, CV_LOAD_IMAGE_COLOR);
150
+ Mat img2 = cv::imread (img_file2, CV_LOAD_IMAGE_COLOR);
151
+
152
+ if (img1.channels () != 1 ) {
153
+ cvtColor (img1, img1, cv::COLOR_RGB2GRAY);
154
+ }
155
+
156
+ if (img2.channels () != 1 ) {
157
+ cvtColor (img2, img2, cv::COLOR_RGB2GRAY);
158
+ }
159
+
160
+ // Read input video
161
+ cv::VideoCapture cap (img_file2);
162
+
163
+
164
+ vector<KeyPoint> kpts1;
165
+ vector<KeyPoint> kpts2;
166
+
167
+ Mat desc1;
168
+ Mat desc2;
169
+
170
+
171
+ detect_and_compute (desc_type, img1, kpts1, desc1);
172
+
173
+ Mat last_T;
174
+
175
+
176
+ for (;;)
177
+ {
178
+
179
+ // Start timer
180
+ double timer = cv::getTickCount ();
181
+
182
+ // Define variable for storing frames
183
+ Mat curr, curr_gray;
184
+
185
+ // Read next frame
186
+ bool success = cap.read (curr);
187
+ if (!success) break ;
188
+
189
+ // Convert frame to grayscale
190
+ cv::cvtColor (curr, curr_gray, cv::COLOR_BGR2GRAY);
191
+
192
+ detect_and_compute (desc_type, curr_gray, kpts2, desc2);
193
+
194
+
195
+ vector<DMatch> matches;
196
+ match (match_type, desc1, desc2, matches);
197
+
198
+ vector<char > match_mask (matches.size (), 1 );
199
+
200
+ vector<Point2f> obj;
201
+ vector<Point2f> scene;
202
+ for (int i = 0 ; i < static_cast <int >(matches.size ()); ++i) {
203
+ obj.push_back (kpts1[matches[i].queryIdx ].pt );
204
+ scene.push_back (kpts2[matches[i].trainIdx ].pt );
205
+ }
206
+
207
+ Mat T = cv::findHomography (obj, scene, cv::RANSAC, 4 , match_mask);
208
+
209
+ // We'll just use the last known good transform.
210
+ if (T.data == NULL ) last_T.copyTo (T);
211
+ T.copyTo (last_T);
212
+
213
+ // Extract traslation
214
+ double dx = T.at <double >(0 ,2 );
215
+ double dy = T.at <double >(1 ,2 );
216
+
217
+ // Extract rotation angle
218
+ double da = atan2 (T.at <double >(1 ,0 ), T.at <double >(0 ,0 ));
219
+
220
+
221
+ vector<Point2f> obj_corners (4 );
222
+ vector<Point2f> scene_corners (4 );
223
+
224
+ obj_corners[0 ] = cvPoint (0 ,0 );
225
+ obj_corners[1 ] = cvPoint ( img1.cols , 0 );
226
+ obj_corners[2 ] = cvPoint ( img1.cols , img1.rows ); // img.shape[0] shows the row of the image, img.shape[1] shows the column of the image
227
+ obj_corners[3 ] = cvPoint ( 0 , img1.rows );
228
+
229
+ cv::perspectiveTransform ( obj_corners, scene_corners, T);
230
+
231
+ // -- Draw lines between the corners (the mapped object in the scene - image_2 )
232
+ line ( res, scene_corners[0 ] + Point2f ( img1.cols , 0 ), scene_corners[1 ] + Point2f ( img1.cols , 0 ), Scalar (0 , 255 , 0 ), 4 );
233
+ line ( res, scene_corners[1 ] + Point2f ( img1.cols , 0 ), scene_corners[2 ] + Point2f ( img1.cols , 0 ), Scalar ( 0 , 255 , 0 ), 4 );
234
+ line ( res, scene_corners[2 ] + Point2f ( img1.cols , 0 ), scene_corners[3 ] + Point2f ( img1.cols , 0 ), Scalar ( 0 , 255 , 0 ), 4 );
235
+ line ( res, scene_corners[3 ] + Point2f ( img1.cols , 0 ), scene_corners[0 ] + Point2f ( img1.cols , 0 ), Scalar ( 0 , 255 , 0 ), 4 );
236
+
237
+
238
+ Mat res;
239
+ cv::drawMatches (img1, kpts1, img2, kpts2, matches, res, Scalar::all (-1 ), Scalar::all (-1 ), match_mask, DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
240
+
241
+ int fps = cv::getTickFrequency () / (cv::getTickCount ()-timer);
242
+
243
+ // Display Decsriptor Type on the frame
244
+ putText (res, " Decsriptor Type : " + desc_type, cvPoint (10 ,20 ), FONT_HERSHEY_SIMPLEX, 0.75 , cvScalar (0 ,0 ,0 ), 2 );
245
+
246
+
247
+ // Display FPS on the frame
248
+ string dsp = to_string (fps)
249
+ putText (res, " FPS : " + fps, cvPoint (10 ,50 ), FONT_HERSHEY_SIMPLEX, 0.75 , cvScalar (0 ,0 ,0 ), 2 );
250
+
251
+
252
+ // Display Good Matches Size on the frame
253
+ string GoodMatches = to_string (matches.size ())
254
+ putText (res, " Good Matches : " + GoodMatches, cvPoint (10 ,50 ), FONT_HERSHEY_SIMPLEX, 0.75 , cvScalar (0 ,0 ,0 ), 2 );
255
+
256
+
257
+ // Display Transfprmations on the frame
258
+ string dX = to_string (cv::round (dx))
259
+ string dY = to_string (cv::round (dy))
260
+ string dA = to_string (cv::round (da))
261
+ putText (res, " Transfprmations = [dx] : " + dX + " " + " [dy] : " + dY, cvPoint (10 ,50 ), FONT_HERSHEY_SIMPLEX, 0.75 , cvScalar (0 ,0 ,0 ), 2 );
262
+
263
+
264
+ cv::imshow (" result" , res);
265
+ cv::waitKey (0 );
266
+ }
267
+
268
+ return 0 ;
269
+ }
0 commit comments