1 #include <iostream>
2 #include <fstream>
3 #include <string>
4 #include "opencv2/opencv_modules.hpp"
5 #include <opencv2/core/utility.hpp>
6 #include "opencv2/imgcodecs.hpp"
7 #include "opencv2/highgui.hpp"
8 #include "opencv2/stitching/detail/autocalib.hpp"
9 #include "opencv2/stitching/detail/blenders.hpp"
10 #include "opencv2/stitching/detail/timelapsers.hpp"
11 #include "opencv2/stitching/detail/camera.hpp"
12 #include "opencv2/stitching/detail/exposure_compensate.hpp"
13 #include "opencv2/stitching/detail/matchers.hpp"
14 #include "opencv2/stitching/detail/motion_estimators.hpp"
15 #include "opencv2/stitching/detail/seam_finders.hpp"
16 #include "opencv2/stitching/detail/warpers.hpp"
17 #include "opencv2/stitching/warpers.hpp"
18 #ifdef HAVE_OPENCV_XFEATURES2D
19 #include "opencv2/xfeatures2d/nonfree.hpp"
20 #endif
21 #define ENABLE_LOG 1
22 #define LOG(msg) std::cout << msg
23 #define LOGLN(msg) std::cout << msg << std::endl
24 using namespace std;
25 using namespace cv;
26 using namespace cv::detail;
27 // Default command line args
28
29 /*#if 1
30 #define DLL_API __declspec(dllexport)
31 #else
32 #define DLL_API __declspec(dllimport)
33 #endif
34 */
35
36 /*extern "C" { //因为python一般只支持c的接口
37 typedef struct ImageBase {
38 int w; //图像的宽
39 int h; //图像的高
40 int c; //通道数
41 unsigned char *data; //我们要写python和c++交互的数据结构,0-255的单字符指针
42 }ImageMeta;
43 //typedef ImageBase ImageMeta;
44
45 DLL_API int Stitch(ImageMeta *im1, ImageMeta *im2);//函数导出,要改
46
47 };
48 */
49 vector<String> img_names;
50 int num_images;
51 bool preview = false;
52 bool try_cuda = false;
53 double work_megapix = 0.6;
54 double seam_megapix = 0.1;
55 double compose_megapix = -1;
56 float conf_thresh = 1.f;
57 #ifdef HAVE_OPENCV_XFEATURES2D
58 string features_type = "surf";
59 #else
60 string features_type = "orb";
61 #endif
62 string matcher_type = "homography";
63 string estimator_type = "homography";
64 string ba_cost_func = "ray";
65 string ba_refine_mask = "xxxxx";
66 bool do_wave_correct = true;
67 WaveCorrectKind wave_correct = detail::WAVE_CORRECT_HORIZ;
68 bool save_graph = false;
69 std::string save_graph_to;
70 string warp_type = "spherical";
71 int expos_comp_type = ExposureCompensator::GAIN_BLOCKS;
72 int expos_comp_nr_feeds = 1;
73 int expos_comp_nr_filtering = 2;
74 int expos_comp_block_size = 32;
75 float match_conf = 0.3f;
76 string seam_find_type = "gc_color";
77 int blend_type = Blender::MULTI_BAND;
78 int timelapse_type = Timelapser::AS_IS;//延时摄影
79 float blend_strength = 5;
80 string result_name = "D:/result.jpg";
81 bool timelapse = false;//首先定义timelapse的默认布尔类型为False
82 int range_width = -1;
83
84 /*static int parseCmdArgs(int argc, char** argv)
85 {
86 if (argc == 1)
87 {
88 printUsage();
89 return -1;
90 }
91 for (int i = 1; i < argc; ++i)
92 {
93 if (string(argv[i]) == "--help" || string(argv[i]) == "/?")
94 {
95 printUsage();
96 return -1;
97 }
98 else if (string(argv[i]) == "--preview")
99 {
100 preview = true;
101 }
102 else if (string(argv[i]) == "--try_cuda")
103 {
104 if (string(argv[i + 1]) == "no")
105 try_cuda = false;
106 else if (string(argv[i + 1]) == "yes")
107 try_cuda = true;
108 else
109 {
110 cout << "Bad --try_cuda flag value\n";
111 return -1;
112 }
113 i++;
114 }
115 else if (string(argv[i]) == "--work_megapix")
116 {
117 work_megapix = atof(argv[i + 1]);
118 i++;
119 }
120 else if (string(argv[i]) == "--seam_megapix")
121 {
122 seam_megapix = atof(argv[i + 1]);
123 i++;
124 }
125 else if (string(argv[i]) == "--compose_megapix")
126 {
127 compose_megapix = atof(argv[i + 1]);
128 i++;
129 }
130 else if (string(argv[i]) == "--result")
131 {
132 result_name = argv[i + 1];
133 i++;
134 }
135 else if (string(argv[i]) == "--features")
136 {
137 features_type = argv[i + 1];
138 if (features_type == "orb")
139 match_conf = 0.3f;
140 i++;
141 }
142 else if (string(argv[i]) == "--matcher")
143 {
144 if (string(argv[i + 1]) == "homography" || string(argv[i + 1]) == "affine")
145 matcher_type = argv[i + 1];
146 else
147 {
148 cout << "Bad --matcher flag value\n";
149 return -1;
150 }
151 i++;
152 }
153 else if (string(argv[i]) == "--estimator")
154 {
155 if (string(argv[i + 1]) == "homography" || string(argv[i + 1]) == "affine")
156 estimator_type = argv[i + 1];
157 else
158 {
159 cout << "Bad --estimator flag value\n";
160 return -1;
161 }
162 i++;
163 }
164 else if (string(argv[i]) == "--match_conf")
165 {
166 match_conf = static_cast<float>(atof(argv[i + 1]));
167 i++;
168 }
169 else if (string(argv[i]) == "--conf_thresh")
170 {
171 conf_thresh = static_cast<float>(atof(argv[i + 1]));
172 i++;
173 }
174 else if (string(argv[i]) == "--ba")
175 {
176 ba_cost_func = argv[i + 1];
177 i++;
178 }
179 else if (string(argv[i]) == "--ba_refine_mask")
180 {
181 ba_refine_mask = argv[i + 1];
182 if (ba_refine_mask.size() != 5)
183 {
184 cout << "Incorrect refinement mask length.\n";
185 return -1;
186 }
187 i++;
188 }
189 else if (string(argv[i]) == "--wave_correct")
190 {
191 if (string(argv[i + 1]) == "no")
192 do_wave_correct = false;
193 else if (string(argv[i + 1]) == "horiz")
194 {
195 do_wave_correct = true;
196 wave_correct = detail::WAVE_CORRECT_HORIZ;
197 }
198 else if (string(argv[i + 1]) == "vert")
199 {
200 do_wave_correct = true;
201 wave_correct = detail::WAVE_CORRECT_VERT;
202 }
203 else
204 {
205 cout << "Bad --wave_correct flag value\n";
206 return -1;
207 }
208 i++;
209 }
210 else if (string(argv[i]) == "--save_graph")
211 {
212 save_graph = true;
213 save_graph_to = argv[i + 1];
214 i++;
215 }
216 else if (string(argv[i]) == "--warp")
217 {
218 warp_type = string(argv[i + 1]);
219 i++;
220 }
221 else if (string(argv[i]) == "--expos_comp")
222 {
223 if (string(argv[i + 1]) == "no")
224 expos_comp_type = ExposureCompensator::NO;
225 else if (string(argv[i + 1]) == "gain")
226 expos_comp_type = ExposureCompensator::GAIN;
227 else if (string(argv[i + 1]) == "gain_blocks")
228 expos_comp_type = ExposureCompensator::GAIN_BLOCKS;
229 else if (string(argv[i + 1]) == "channels")
230 expos_comp_type = ExposureCompensator::CHANNELS;
231 else if (string(argv[i + 1]) == "channels_blocks")
232 expos_comp_type = ExposureCompensator::CHANNELS_BLOCKS;
233 else
234 {
235 cout << "Bad exposure compensation method\n";
236 return -1;
237 }
238 i++;
239 }
240 else if (string(argv[i]) == "--expos_comp_nr_feeds")
241 {
242 expos_comp_nr_feeds = atoi(argv[i + 1]);
243 i++;
244 }
245 else if (string(argv[i]) == "--expos_comp_nr_filtering")
246 {
247 expos_comp_nr_filtering = atoi(argv[i + 1]);
248 i++;
249 }
250 else if (string(argv[i]) == "--expos_comp_block_size")
251 {
252 expos_comp_block_size = atoi(argv[i + 1]);
253 i++;
254 }
255 else if (string(argv[i]) == "--seam")
256 {
257 if (string(argv[i + 1]) == "no" ||
258 string(argv[i + 1]) == "voronoi" ||
259 string(argv[i + 1]) == "gc_color" ||
260 string(argv[i + 1]) == "gc_colorgrad" ||
261 string(argv[i + 1]) == "dp_color" ||
262 string(argv[i + 1]) == "dp_colorgrad")
263 seam_find_type = argv[i + 1];
264 else
265 {
266 cout << "Bad seam finding method\n";
267 return -1;
268 }
269 i++;
270 }
271 else if (string(argv[i]) == "--blend")
272 {
273 if (string(argv[i + 1]) == "no")
274 blend_type = Blender::NO;
275 else if (string(argv[i + 1]) == "feather")
276 blend_type = Blender::FEATHER;
277 else if (string(argv[i + 1]) == "multiband")
278 blend_type = Blender::MULTI_BAND;
279 else
280 {
281 cout << "Bad blending method\n";
282 return -1;
283 }
284 i++;
285 }
286 else if (string(argv[i]) == "--timelapse")
287 {
288 timelapse = true;
289 if (string(argv[i + 1]) == "as_is")
290 timelapse_type = Timelapser::AS_IS;
291 else if (string(argv[i + 1]) == "crop")
292 timelapse_type = Timelapser::CROP;
293 else
294 {
295 cout << "Bad timelapse method\n";
296 return -1;
297 }
298 i++;
299 }
300 else if (string(argv[i]) == "--rangewidth")
301 {
302 range_width = atoi(argv[i + 1]);
303 i++;
304 }
305 else if (string(argv[i]) == "--blend_strength")
306 {
307 blend_strength = static_cast<float>(atof(argv[i + 1]));
308 i++;
309 }
310 else if (string(argv[i]) == "--output")
311 {
312 result_name = argv[i + 1];
313 i++;
314 }
315 else
316 img_names.push_back(argv[i]);
317 }
318 if (preview)
319 {
320 compose_megapix = 0.6;
321 }
322 return 0;
323 }*/
324
325 int main()
326 //vector<Mat> img_list
327 {//一个int数;一个图片类型的列表
328 //predict先判断长度 然后长度作为一个参数传给
329 //preview = true;
330 //try_cuda = true;
331 //preview = true;
332 //result = 'D:/result.jpg';
333 //work_megapix = -1;
334 //features_type = "orb";
335 Mat img1, img2;
336 img1 = imread("D:/1Hill.jpg");
337 img2 = imread("D:/2Hill.jpg");
338 vector<Mat> ALLimages(2);
339 ALLimages[0] = img1.clone();
340 ALLimages[1] = img2.clone();
341 //img_names.push_back("D:/1Hill.jpg");
342 //img_names.push_back("D:/2Hill.jpg");//??
343 //img_names.push_back("D:/3Hill.jpg");//??
344 num_images = 2;
345 #if ENABLE_LOG
346 int64 app_start_time = getTickCount();
347 #endif
348 #if 0
349 cv::setBreakOnError(true);
350 #endif
351 //int retval = parseCmdArgs(argc, argv);
352 //if (retval)
353 //return retval;
354 // Check if have enough images
355 //int num_images = static_cast<int>(img_names.size());
356 if (num_images < 2)
357 {
358 LOGLN("Need more images");
359 return -1;
360 }
361 double work_scale = 1, seam_scale = 1, compose_scale = 1;
362 bool is_work_scale_set = false, is_seam_scale_set = false, is_compose_scale_set = false;
363 LOGLN("Finding features...");
364 #if ENABLE_LOG
365 int64 t = getTickCount();
366 #endif
367 Ptr<Feature2D> finder;
368 if (features_type == "orb")
369 {
370 finder = ORB::create();
371 }
372 else if (features_type == "akaze")
373 {
374 finder = AKAZE::create();
375 }
376 #ifdef HAVE_OPENCV_XFEATURES2D
377 else if (features_type == "surf")
378 {
379 finder = xfeatures2d::SURF::create();
380 }
381 else if (features_type == "sift") {
382 finder = xfeatures2d::SIFT::create();
383 }
384 #endif
385 else
386 {
387 cout << "Unknown 2D features type: '" << features_type << "'.\n";
388 return -1;
389 }
390 Mat full_img, img;
391 vector<ImageFeatures> features(num_images);
392 vector<Mat> images(num_images);
393 vector<Size> full_img_sizes(num_images);
394 double seam_work_aspect = 1;
395 for (int i = 0; i < num_images; ++i)
396 {
397 full_img = ALLimages[i];
398
399 //获取一张图。imread_img -------->Mat
400 //先把一边调通了再去组合调试,分治
401 //full_img = img_list;
402 //python传进来n张图片的base64,可以转成读取后的图片。
403
404 //先在c中定义图像HWC结构数组数组转一次 Mat, dll返回Mat结果,Mat转一次结构体
405 //main输入 Mat1,Mat2
406 //dll返回数组,python转化成cv2image,然后输出image2base64
407
408 //full_image里面是读取的imread_img类型
409 //base64的size容易确定
410 //先在predict前提取到图片的整个Mat传给DLL
411 full_img_sizes[i] = full_img.size();//结果:full_img_sizes = [(500,300),(200,100)]
412 if (full_img.empty())
413 {
414 //LOGLN("Can't open image " << img_names[i]);//访问了空指针,和img_names有关
415 return -2;
416 }
417 if (work_megapix < 0)
418 {
419 img = full_img;
420 work_scale = 1;
421 is_work_scale_set = true;
422 }
423 else
424 {
425 if (!is_work_scale_set)
426 {
427 work_scale = min(1.0, sqrt(work_megapix * 1e6 / full_img.size().area()));
428 is_work_scale_set = true;
429 }
430 resize(full_img, img, Size(), work_scale, work_scale, INTER_LINEAR_EXACT);
431 }
432 if (!is_seam_scale_set)
433 {
434 seam_scale = min(1.0, sqrt(seam_megapix * 1e6 / full_img.size().area()));
435 seam_work_aspect = seam_scale / work_scale;
436 is_seam_scale_set = true;
437 }
438 computeImageFeatures(finder, img, features[i]);
439 features[i].img_idx = i;
440 LOGLN("Features in image #" << i + 1 << ": " << features[i].keypoints.size());
441 resize(full_img, img, Size(), seam_scale, seam_scale, INTER_LINEAR_EXACT);
442 images[i] = img.clone();
443 //循环是为了找到每张图的特征,然后把图片copy到images里
444 }
445 full_img.release();
446 img.release();
447 LOGLN("Finding features, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
448 LOG("Pairwise matching");
449 #if ENABLE_LOG
450 t = getTickCount();
451 #endif
452 vector<MatchesInfo> pairwise_matches;
453 Ptr<FeaturesMatcher> matcher;
454 if (matcher_type == "affine")
455 matcher = makePtr<AffineBestOf2NearestMatcher>(false, try_cuda, match_conf);
456 else if (range_width == -1)
457 matcher = makePtr<BestOf2NearestMatcher>(try_cuda, match_conf);
458 else
459 matcher = makePtr<BestOf2NearestRangeMatcher>(range_width, try_cuda, match_conf);
460 (*matcher)(features, pairwise_matches);
461 matcher->collectGarbage();
462 LOGLN("Pairwise matching, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
463 // Check if we should save matches graph
464 //if (save_graph)
465 //{
466 // LOGLN("Saving matches graph...");
467 // ofstream f(save_graph_to.c_str());
468 // f << matchesGraphAsString(img_names, pairwise_matches, conf_thresh);
469 //}
470 // Leave only images we are sure are from the same panorama
471 vector<int> indices = leaveBiggestComponent(features, pairwise_matches, conf_thresh);
472 if (indices.size() != 2)//判断两个图片的相关性
473 return -1;
474
475 if (num_images < 2)
476 {
477 LOGLN("Need more images");
478 return -1;
479 }
480 Ptr<Estimator> estimator;
481 if (estimator_type == "affine")
482 estimator = makePtr<AffineBasedEstimator>();
483 else
484 estimator = makePtr<HomographyBasedEstimator>();
485 vector<CameraParams> cameras;
486 if (!(*estimator)(features, pairwise_matches, cameras))
487 {
488 cout << "Homography estimation failed.\n";
489 return -1;
490 }
491 for (size_t i = 0; i < cameras.size(); ++i)
492 {
493 Mat R;
494 cameras[i].R.convertTo(R, CV_32F);
495 cameras[i].R = R;
496 //LOGLN("Initial camera intrinsics #" << indices[i] + 1 << ":\nK:\n" << cameras[i].K() << "\nR:\n" << cameras[i].R);
497 }
498 Ptr<detail::BundleAdjusterBase> adjuster;
499 if (ba_cost_func == "reproj") adjuster = makePtr<detail::BundleAdjusterReproj>();
500 else if (ba_cost_func == "ray") adjuster = makePtr<detail::BundleAdjusterRay>();
501 else if (ba_cost_func == "affine") adjuster = makePtr<detail::BundleAdjusterAffinePartial>();
502 else if (ba_cost_func == "no") adjuster = makePtr<NoBundleAdjuster>();
503 else
504 {
505 cout << "Unknown bundle adjustment cost function: '" << ba_cost_func << "'.\n";
506 return -1;
507 }
508 adjuster->setConfThresh(conf_thresh);
509 Mat_<uchar> refine_mask = Mat::zeros(3, 3, CV_8U);
510 if (ba_refine_mask[0] == 'x') refine_mask(0, 0) = 1;
511 if (ba_refine_mask[1] == 'x') refine_mask(0, 1) = 1;
512 if (ba_refine_mask[2] == 'x') refine_mask(0, 2) = 1;
513 if (ba_refine_mask[3] == 'x') refine_mask(1, 1) = 1;
514 if (ba_refine_mask[4] == 'x') refine_mask(1, 2) = 1;
515 adjuster->setRefinementMask(refine_mask);
516 if (!(*adjuster)(features, pairwise_matches, cameras))
517 {
518 cout << "Camera parameters adjusting failed.\n";
519 return -1;
520 }
521 // Find median focal length
522 vector<double> focals;
523 for (size_t i = 0; i < cameras.size(); ++i)
524 {
525 //LOGLN("Camera #" << indices[i] + 1 << ":\nK:\n" << cameras[i].K() << "\nR:\n" << cameras[i].R);
526 focals.push_back(cameras[i].focal);
527 }
528 sort(focals.begin(), focals.end());
529 float warped_image_scale;
530 if (focals.size() % 2 == 1)
531 warped_image_scale = static_cast<float>(focals[focals.size() / 2]);
532 else
533 warped_image_scale = static_cast<float>(focals[focals.size() / 2 - 1] + focals[focals.size() / 2]) * 0.5f;
534 if (do_wave_correct)
535 {
536 vector<Mat> rmats;
537 for (size_t i = 0; i < cameras.size(); ++i)
538 rmats.push_back(cameras[i].R.clone());
539 waveCorrect(rmats, wave_correct);
540 for (size_t i = 0; i < cameras.size(); ++i)
541 cameras[i].R = rmats[i];
542 }
543 LOGLN("Warping images (auxiliary)... ");
544 #if ENABLE_LOG
545 t = getTickCount();
546 #endif
547 vector<Point> corners(num_images);
548 vector<UMat> masks_warped(num_images);
549 vector<UMat> images_warped(num_images);
550 vector<Size> sizes(num_images);
551 vector<UMat> masks(num_images);
552 // Prepare images masks
553 for (int i = 0; i < num_images; ++i)
554 {
555 masks[i].create(images[i].size(), CV_8U);
556 masks[i].setTo(Scalar::all(255));
557 }
558 // Warp images and their masks
559 Ptr<WarperCreator> warper_creator;
560 #ifdef HAVE_OPENCV_CUDAWARPING
561 if (try_cuda && cuda::getCudaEnabledDeviceCount() > 0)
562 {
563 if (warp_type == "plane")
564 warper_creator = makePtr<cv::PlaneWarperGpu>();
565 else if (warp_type == "cylindrical")
566 warper_creator = makePtr<cv::CylindricalWarperGpu>();
567 else if (warp_type == "spherical")
568 warper_creator = makePtr<cv::SphericalWarperGpu>();
569 }
570 else
571 #endif
572 {
573 if (warp_type == "plane")
574 warper_creator = makePtr<cv::PlaneWarper>();
575 else if (warp_type == "affine")
576 warper_creator = makePtr<cv::AffineWarper>();
577 else if (warp_type == "cylindrical")
578 warper_creator = makePtr<cv::CylindricalWarper>();
579 else if (warp_type == "spherical")
580 warper_creator = makePtr<cv::SphericalWarper>();
581 else if (warp_type == "fisheye")
582 warper_creator = makePtr<cv::FisheyeWarper>();
583 else if (warp_type == "stereographic")
584 warper_creator = makePtr<cv::StereographicWarper>();
585 else if (warp_type == "compressedPlaneA2B1")
586 warper_creator = makePtr<cv::CompressedRectilinearWarper>(2.0f, 1.0f);
587 else if (warp_type == "compressedPlaneA1.5B1")
588 warper_creator = makePtr<cv::CompressedRectilinearWarper>(1.5f, 1.0f);
589 else if (warp_type == "compressedPlanePortraitA2B1")
590 warper_creator = makePtr<cv::CompressedRectilinearPortraitWarper>(2.0f, 1.0f);
591 else if (warp_type == "compressedPlanePortraitA1.5B1")
592 warper_creator = makePtr<cv::CompressedRectilinearPortraitWarper>(1.5f, 1.0f);
593 else if (warp_type == "paniniA2B1")
594 warper_creator = makePtr<cv::PaniniWarper>(2.0f, 1.0f);
595 else if (warp_type == "paniniA1.5B1")
596 warper_creator = makePtr<cv::PaniniWarper>(1.5f, 1.0f);
597 else if (warp_type == "paniniPortraitA2B1")
598 warper_creator = makePtr<cv::PaniniPortraitWarper>(2.0f, 1.0f);
599 else if (warp_type == "paniniPortraitA1.5B1")
600 warper_creator = makePtr<cv::PaniniPortraitWarper>(1.5f, 1.0f);
601 else if (warp_type == "mercator")
602 warper_creator = makePtr<cv::MercatorWarper>();
603 else if (warp_type == "transverseMercator")
604 warper_creator = makePtr<cv::TransverseMercatorWarper>();
605 }
606 if (!warper_creator)
607 {
608 cout << "Can't create the following warper '" << warp_type << "'\n";
609 return 1;
610 }
611 Ptr<RotationWarper> warper = warper_creator->create(static_cast<float>(warped_image_scale * seam_work_aspect));
612 for (int i = 0; i < num_images; ++i)
613 {
614 Mat_<float> K;
615 cameras[i].K().convertTo(K, CV_32F);
616 float swa = (float)seam_work_aspect;
617 K(0, 0) *= swa; K(0, 2) *= swa;
618 K(1, 1) *= swa; K(1, 2) *= swa;
619 corners[i] = warper->warp(images[i], K, cameras[i].R, INTER_LINEAR, BORDER_REFLECT, images_warped[i]);
620 sizes[i] = images_warped[i].size();
621 warper->warp(masks[i], K, cameras[i].R, INTER_NEAREST, BORDER_CONSTANT, masks_warped[i]);
622 }
623 vector<UMat> images_warped_f(num_images);
624 for (int i = 0; i < num_images; ++i)
625 images_warped[i].convertTo(images_warped_f[i], CV_32F);
626 LOGLN("Warping images, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
627 LOGLN("Compensating exposure...");
628 #if ENABLE_LOG
629 t = getTickCount();
630 #endif
631 Ptr<ExposureCompensator> compensator = ExposureCompensator::createDefault(expos_comp_type);
632 if (dynamic_cast<GainCompensator*>(compensator.get()))
633 {
634 GainCompensator* gcompensator = dynamic_cast<GainCompensator*>(compensator.get());
635 gcompensator->setNrFeeds(expos_comp_nr_feeds);
636 }
637 if (dynamic_cast<ChannelsCompensator*>(compensator.get()))
638 {
639 ChannelsCompensator* ccompensator = dynamic_cast<ChannelsCompensator*>(compensator.get());
640 ccompensator->setNrFeeds(expos_comp_nr_feeds);
641 }
642 if (dynamic_cast<BlocksCompensator*>(compensator.get()))
643 {
644 BlocksCompensator* bcompensator = dynamic_cast<BlocksCompensator*>(compensator.get());
645 bcompensator->setNrFeeds(expos_comp_nr_feeds);
646 bcompensator->setNrGainsFilteringIterations(expos_comp_nr_filtering);
647 bcompensator->setBlockSize(expos_comp_block_size, expos_comp_block_size);
648 }
649 compensator->feed(corners, images_warped, masks_warped);
650 LOGLN("Compensating exposure, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
651 LOGLN("Finding seams...");
652 #if ENABLE_LOG
653 t = getTickCount();
654 #endif
655 Ptr<SeamFinder> seam_finder;
656 if (seam_find_type == "no")
657 seam_finder = makePtr<detail::NoSeamFinder>();
658 else if (seam_find_type == "voronoi")
659 seam_finder = makePtr<detail::VoronoiSeamFinder>();
660 else if (seam_find_type == "gc_color")
661 {
662 #ifdef HAVE_OPENCV_CUDALEGACY
663 if (try_cuda && cuda::getCudaEnabledDeviceCount() > 0)
664 seam_finder = makePtr<detail::GraphCutSeamFinderGpu>(GraphCutSeamFinderBase::COST_COLOR);
665 else
666 #endif
667 seam_finder = makePtr<detail::GraphCutSeamFinder>(GraphCutSeamFinderBase::COST_COLOR);
668 }
669 else if (seam_find_type == "gc_colorgrad")
670 {
671 #ifdef HAVE_OPENCV_CUDALEGACY
672 if (try_cuda && cuda::getCudaEnabledDeviceCount() > 0)
673 seam_finder = makePtr<detail::GraphCutSeamFinderGpu>(GraphCutSeamFinderBase::COST_COLOR_GRAD);
674 else
675 #endif
676 seam_finder = makePtr<detail::GraphCutSeamFinder>(GraphCutSeamFinderBase::COST_COLOR_GRAD);
677 }
678 else if (seam_find_type == "dp_color")
679 seam_finder = makePtr<detail::DpSeamFinder>(DpSeamFinder::COLOR);
680 else if (seam_find_type == "dp_colorgrad")
681 seam_finder = makePtr<detail::DpSeamFinder>(DpSeamFinder::COLOR_GRAD);
682 if (!seam_finder)
683 {
684 cout << "Can't create the following seam finder '" << seam_find_type << "'\n";
685 return 1;
686 }
687 seam_finder->find(images_warped_f, corners, masks_warped);
688 LOGLN("Finding seams, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
689 // Release unused memory
690 images.clear();
691 images_warped.clear();
692 images_warped_f.clear();
693 masks.clear();
694 LOGLN("Compositing...");
695 #if ENABLE_LOG
696 t = getTickCount();
697 #endif
698 Mat img_warped, img_warped_s;
699 Mat dilated_mask, seam_mask, mask, mask_warped;
700 Ptr<Blender> blender;
701 Ptr<Timelapser> timelapser;
702 //double compose_seam_aspect = 1;
703 double compose_work_aspect = 1;
704 for (int img_idx = 0; img_idx < num_images; ++img_idx)
705 {
706 //LOGLN("Compositing image #" << indices[img_idx] + 1);
707 // Read image and resize it if necessary
708 full_img = ALLimages[img_idx];
709 if (!is_compose_scale_set)
710 {
711 if (compose_megapix > 0)
712 compose_scale = min(1.0, sqrt(compose_megapix * 1e6 / full_img.size().area()));
713 is_compose_scale_set = true;
714 // Compute relative scales
715 //compose_seam_aspect = compose_scale / seam_scale;
716 compose_work_aspect = compose_scale / work_scale;
717 // Update warped image scale
718 warped_image_scale *= static_cast<float>(compose_work_aspect);
719 warper = warper_creator->create(warped_image_scale);
720 // Update corners and sizes
721 for (int i = 0; i < num_images; ++i)
722 {
723 // Update intrinsics
724 cameras[i].focal *= compose_work_aspect;
725 cameras[i].ppx *= compose_work_aspect;
726 cameras[i].ppy *= compose_work_aspect;
727 // Update corner and size
728 Size sz = full_img_sizes[i];
729 Mat K;
730 cameras[i].K().convertTo(K, CV_32F);
731 Rect roi = warper->warpRoi(sz, K, cameras[i].R);
732 corners[i] = roi.tl();
733 sizes[i] = roi.size();
734 }
735 }
736 if (abs(compose_scale - 1) > 1e-1)//没用
737 resize(full_img, img, Size(), compose_scale, compose_scale, INTER_LINEAR_EXACT);
738 else
739 img = full_img;
740 full_img.release();
741 Size img_size = img.size();
742 Mat K;
743 cameras[img_idx].K().convertTo(K, CV_32F);
744 // Warp the current image
745 warper->warp(img, K, cameras[img_idx].R, INTER_LINEAR, BORDER_REFLECT, img_warped);
746 // Warp the current image mask
747 mask.create(img_size, CV_8U);
748 mask.setTo(Scalar::all(255));
749 warper->warp(mask, K, cameras[img_idx].R, INTER_NEAREST, BORDER_CONSTANT, mask_warped);
750 // Compensate exposure
751 compensator->apply(img_idx, corners[img_idx], img_warped, mask_warped);
752 img_warped.convertTo(img_warped_s, CV_16S);
753 img_warped.release();
754 img.release();
755 mask.release();
756 dilate(masks_warped[img_idx], dilated_mask, Mat());
757 resize(dilated_mask, seam_mask, mask_warped.size(), 0, 0, INTER_LINEAR_EXACT);
758 mask_warped = seam_mask & mask_warped;
759 if (!blender && !timelapse)//blender是False,timelapse也是False,这里运行了!
760 {//做multiband
761 blender = Blender::createDefault(blend_type, try_cuda);
762 Size dst_sz = resultRoi(corners, sizes).size();
763 float blend_width = sqrt(static_cast<float>(dst_sz.area())) * blend_strength / 100.f;
764 if (blend_width < 1.f)
765 blender = Blender::createDefault(Blender::NO, try_cuda);
766 else if (blend_type == Blender::MULTI_BAND)
767 {
768 MultiBandBlender* mb = dynamic_cast<MultiBandBlender*>(blender.get());
769 mb->setNumBands(static_cast<int>(ceil(log(blend_width) / log(2.)) - 1.));
770 LOGLN("Multi-band blender, number of bands: " << mb->numBands());
771 }
772 else if (blend_type == Blender::FEATHER)//未运行
773 {
774 FeatherBlender* fb = dynamic_cast<FeatherBlender*>(blender.get());
775 fb->setSharpness(1.f / blend_width);
776 LOGLN("Feather blender, sharpness: " << fb->sharpness());
777 }
778 blender->prepare(corners, sizes);
779 }
780 else if (!timelapser && timelapse)//timelapse是假,timelapser是什么??没运行
781 {
782 timelapser = Timelapser::createDefault(timelapse_type);
783 timelapser->initialize(corners, sizes);
784 cout << "----------------------------运行---------------------------------" << endl;
785 }
786 // Blend the current image
787 if (timelapse)//默认是假
788 {
789 cout << "----------------------------运行2---------------------------------" << endl;
790 }
791 else
792 {//这里运行了两次,因为在循环体中,图片有两张
793 blender->feed(img_warped_s, mask_warped, corners[img_idx]);
794 cout << "----------------------------运行3---------------------------------" << endl;
795 }
796 }
797 if (!timelapse)//运行了
798 {
799 Mat result, result_mask;
800 blender->blend(result, result_mask);
801 LOGLN("Compositing, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
802 imwrite(result_name, result);
803 }
804 LOGLN("Finished, total time: " << ((getTickCount() - app_start_time) / getTickFrequency()) << " sec");
805 return 0;
806 }
807