From 659ffaddb4aa2cd1ae94603e514711dfe171a971 Mon Sep 17 00:00:00 2001 From: Brian Wignall Date: Thu, 26 Dec 2019 06:45:03 -0500 Subject: [PATCH] Fix spelling typos --- apps/createsamples/utility.cpp | 4 ++-- apps/traincascade/HOGfeatures.h | 4 ++-- .../js_contours_hierarchy/js_contours_hierarchy.markdown | 2 +- .../py_feature_homography/py_feature_homography.markdown | 4 ++-- .../py_feature2d/py_shi_tomasi/py_shi_tomasi.markdown | 2 +- .../py_contour_features/py_contour_features.markdown | 2 +- .../py_contours_hierarchy/py_contours_hierarchy.markdown | 2 +- .../calib3d/real_time_pose/real_time_pose.markdown | 2 +- .../porting_anisotropic_segmentation.markdown | 4 ++-- .../gapi/face_beautification/face_beautification.markdown | 2 +- .../interactive_face_detection.markdown | 2 +- .../clojure_dev_intro/clojure_dev_intro.markdown | 2 +- .../cascade_classifier/cascade_classifier.markdown | 2 +- doc/tutorials/objdetect/traincascade.markdown | 2 +- doc/tutorials/videoio/kinect_openni.markdown | 2 +- modules/calib3d/include/opencv2/calib3d.hpp | 2 +- modules/calib3d/src/calibration.cpp | 2 +- modules/calib3d/src/chessboard.cpp | 4 ++-- modules/calib3d/src/chessboard.hpp | 2 +- modules/calib3d/test/test_chesscorners_badarg.cpp | 2 +- modules/core/include/opencv2/core/core_c.h | 2 +- modules/core/include/opencv2/core/hal/intrin_avx.hpp | 2 +- modules/core/include/opencv2/core/hal/intrin_cpp.hpp | 2 +- modules/core/include/opencv2/core/hal/intrin_msa.hpp | 2 +- modules/core/include/opencv2/core/hal/intrin_wasm.hpp | 2 +- modules/core/include/opencv2/core/hal/msa_macros.h | 4 ++-- modules/core/include/opencv2/core/opencl/opencl_info.hpp | 2 +- modules/core/include/opencv2/core/optim.hpp | 2 +- modules/core/include/opencv2/core/vsx_utils.hpp | 4 ++-- modules/core/src/array.cpp | 4 ++-- modules/core/src/convert_scale.simd.hpp | 4 ++-- modules/core/src/downhill_simplex.cpp | 2 +- modules/core/src/system.cpp | 6 +++--- modules/dnn/include/opencv2/dnn/all_layers.hpp | 2 +- modules/dnn/src/cuda/slice.cu | 2 +- modules/dnn/src/cuda4dnn/csl/cudnn/convolution.hpp | 2 +- modules/dnn/src/cuda4dnn/csl/memory.hpp | 2 +- modules/dnn/src/cuda4dnn/csl/pointer.hpp | 2 +- modules/dnn/src/cuda4dnn/csl/tensor.hpp | 6 +++--- modules/dnn/src/cuda4dnn/csl/tensor_ops.hpp | 6 +++--- modules/dnn/src/cuda4dnn/primitives/pooling.hpp | 2 +- modules/dnn/src/ie_ngraph.cpp | 4 ++-- modules/dnn/src/onnx/opencv-onnx.proto | 6 +++--- modules/dnn/src/op_cuda.hpp | 2 +- modules/dnn/src/vkcom/src/context.cpp | 2 +- modules/features2d/include/opencv2/features2d.hpp | 2 +- modules/features2d/src/kaze/nldiffusion_functions.h | 2 +- modules/features2d/src/mser.cpp | 2 +- modules/flann/include/opencv2/flann/simplex_downhill.h | 2 +- modules/gapi/doc/00-root.markdown | 2 +- modules/gapi/doc/01-background.markdown | 2 +- modules/gapi/doc/slides/gapi_overview.org | 2 +- modules/gapi/include/opencv2/gapi/cpu/gcpukernel.hpp | 2 +- modules/gapi/include/opencv2/gapi/fluid/gfluidkernel.hpp | 2 +- modules/gapi/include/opencv2/gapi/gcompiled.hpp | 2 +- modules/gapi/include/opencv2/gapi/gcomputation.hpp | 4 ++-- modules/gapi/include/opencv2/gapi/gtype_traits.hpp | 2 +- modules/gapi/include/opencv2/gapi/ocl/goclkernel.hpp | 2 +- modules/gapi/include/opencv2/gapi/streaming/cap.hpp | 2 +- modules/gapi/samples/draw_example.cpp | 2 +- modules/gapi/src/api/ft_render.cpp | 6 +++--- modules/gapi/src/api/render_ocv.cpp | 4 ++-- modules/gapi/src/backends/fluid/gfluidimgproc.cpp | 2 +- .../gapi/src/backends/fluid/gfluidimgproc_func.dispatch.cpp | 2 +- modules/gapi/src/backends/fluid/gfluidutils.hpp | 2 +- modules/gapi/src/backends/ie/giebackend.cpp | 2 +- modules/gapi/src/backends/ocl/goclimgproc.hpp | 2 +- modules/gapi/src/compiler/passes/islands.cpp | 4 ++-- modules/gapi/src/executor/gstreamingexecutor.cpp | 2 +- modules/gapi/test/common/gapi_core_tests.hpp | 2 +- modules/gapi/test/common/gapi_core_tests_inl.hpp | 4 ++-- modules/gapi/test/common/gapi_imgproc_tests.hpp | 2 +- modules/gapi/test/gapi_async_test.cpp | 2 +- modules/gapi/test/gapi_smoke_test.cpp | 2 +- modules/imgproc/include/opencv2/imgproc/imgproc_c.h | 2 +- modules/imgproc/src/approx.cpp | 2 +- modules/imgproc/src/shapedescr.cpp | 4 ++-- modules/imgproc/test/test_approxpoly.cpp | 2 +- modules/imgproc/test/test_intersection.cpp | 2 +- modules/java/android_sdk/CMakeLists.txt | 4 ++-- .../java/org/opencv/android/CameraBridgeViewBase.java | 2 +- modules/js/perf/README.md | 2 +- modules/ml/include/opencv2/ml.hpp | 2 +- modules/python/test/test_algorithm_rw.py | 2 +- modules/python/test/test_cuda.py | 2 +- modules/python/test/test_persistence.py | 2 +- .../stitching/include/opencv2/stitching/detail/matchers.hpp | 4 ++-- modules/ts/include/opencv2/ts/ts_gtest.h | 2 +- modules/ts/misc/run_android.py | 4 ++-- modules/ts/src/ts.cpp | 2 +- modules/video/src/bgfg_KNN.cpp | 4 ++-- modules/video/src/bgfg_gaussmix2.cpp | 2 +- .../videoio/include/opencv2/videoio/legacy/constants_c.h | 4 ++-- modules/videoio/src/cap_aravis.cpp | 2 +- modules/videoio/src/cap_avfoundation.mm | 2 +- modules/videoio/src/cap_avfoundation_mac.mm | 4 ++-- modules/videoio/src/cap_gstreamer.cpp | 2 +- platforms/ios/cmake/Modules/Platform/iOS.cmake | 2 +- platforms/linux/mips.toolchain.cmake | 2 +- platforms/linux/mips32r5el-gnu.toolchain.cmake | 2 +- platforms/linux/mips64r6el-gnu.toolchain.cmake | 2 +- samples/cpp/CMakeLists.txt | 2 +- samples/cpp/delaunay2.cpp | 2 +- samples/cpp/logistic_regression.cpp | 4 ++-- samples/cpp/pca.cpp | 2 +- .../calib3d/real_time_pose_estimation/src/Model.h | 2 +- samples/cpp/videocapture_openni.cpp | 2 +- samples/opencl/opencl-opencv-interop.cpp | 2 +- samples/python/deconvolution.py | 2 +- samples/winrt/ImageManipulations/Constants.cpp | 2 +- 110 files changed, 142 insertions(+), 142 deletions(-) diff --git a/apps/createsamples/utility.cpp b/apps/createsamples/utility.cpp index 919ad2dcc471..5176f148365d 100644 --- a/apps/createsamples/utility.cpp +++ b/apps/createsamples/utility.cpp @@ -1078,8 +1078,8 @@ void cvCreateTrainingSamples( const char* filename, icvPlaceDistortedSample( sample, inverse, maxintensitydev, maxxangle, maxyangle, maxzangle, 0 /* nonzero means placing image without cut offs */, - 0.0 /* nozero adds random shifting */, - 0.0 /* nozero adds random scaling */, + 0.0 /* nonzero adds random shifting */, + 0.0 /* nonzero adds random scaling */, &data ); if( showsamples ) diff --git a/apps/traincascade/HOGfeatures.h b/apps/traincascade/HOGfeatures.h index cdf758777dd1..3d4092eab988 100644 --- a/apps/traincascade/HOGfeatures.h +++ b/apps/traincascade/HOGfeatures.h @@ -45,7 +45,7 @@ class CvHOGEvaluator : public CvFeatureEvaluator }; std::vector features; - cv::Mat normSum; //for nomalization calculation (L1 or L2) + cv::Mat normSum; //for normalization calculation (L1 or L2) std::vector hist; }; @@ -70,7 +70,7 @@ inline float CvHOGEvaluator::Feature::calc( const std::vector& _hists, const float *pnormSum = _normSum.ptr((int)y); normFactor = (float)(pnormSum[fastRect[0].p0] - pnormSum[fastRect[1].p1] - pnormSum[fastRect[2].p2] + pnormSum[fastRect[3].p3]); - res = (res > 0.001f) ? ( res / (normFactor + 0.001f) ) : 0.f; //for cutting negative values, which apper due to floating precision + res = (res > 0.001f) ? ( res / (normFactor + 0.001f) ) : 0.f; //for cutting negative values, which appear due to floating precision return res; } diff --git a/doc/js_tutorials/js_imgproc/js_contours/js_contours_hierarchy/js_contours_hierarchy.markdown b/doc/js_tutorials/js_imgproc/js_contours/js_contours_hierarchy/js_contours_hierarchy.markdown index 5dc807742b01..c98628e48648 100644 --- a/doc/js_tutorials/js_imgproc/js_contours/js_contours_hierarchy/js_contours_hierarchy.markdown +++ b/doc/js_tutorials/js_imgproc/js_contours/js_contours_hierarchy/js_contours_hierarchy.markdown @@ -145,7 +145,7 @@ no child, parent is contour-3. So array is [-1,-1,-1,3]. And this is the final guy, Mr.Perfect. It retrieves all the contours and creates a full family hierarchy list. **It even tells, who is the grandpa, father, son, grandson and even beyond... :)**. -For examle, I took above image, rewrite the code for cv.RETR_TREE, reorder the contours as per the +For example, I took above image, rewrite the code for cv.RETR_TREE, reorder the contours as per the result given by OpenCV and analyze it. Again, red letters give the contour number and green letters give the hierarchy order. diff --git a/doc/py_tutorials/py_feature2d/py_feature_homography/py_feature_homography.markdown b/doc/py_tutorials/py_feature2d/py_feature_homography/py_feature_homography.markdown index f2c0cdddb502..ed76d638f527 100644 --- a/doc/py_tutorials/py_feature2d/py_feature_homography/py_feature_homography.markdown +++ b/doc/py_tutorials/py_feature2d/py_feature_homography/py_feature_homography.markdown @@ -17,7 +17,7 @@ In short, we found locations of some parts of an object in another cluttered ima is sufficient to find the object exactly on the trainImage. For that, we can use a function from calib3d module, ie **cv.findHomography()**. If we pass the set -of points from both the images, it will find the perpective transformation of that object. Then we +of points from both the images, it will find the perspective transformation of that object. Then we can use **cv.perspectiveTransform()** to find the object. It needs atleast four correct points to find the transformation. @@ -68,7 +68,7 @@ Now we set a condition that atleast 10 matches (defined by MIN_MATCH_COUNT) are find the object. Otherwise simply show a message saying not enough matches are present. If enough matches are found, we extract the locations of matched keypoints in both the images. They -are passed to find the perpective transformation. Once we get this 3x3 transformation matrix, we use +are passed to find the perspective transformation. Once we get this 3x3 transformation matrix, we use it to transform the corners of queryImage to corresponding points in trainImage. Then we draw it. @code{.py} if len(good)>MIN_MATCH_COUNT: diff --git a/doc/py_tutorials/py_feature2d/py_shi_tomasi/py_shi_tomasi.markdown b/doc/py_tutorials/py_feature2d/py_shi_tomasi/py_shi_tomasi.markdown index b440f463386b..1229581ce685 100644 --- a/doc/py_tutorials/py_feature2d/py_shi_tomasi/py_shi_tomasi.markdown +++ b/doc/py_tutorials/py_feature2d/py_shi_tomasi/py_shi_tomasi.markdown @@ -28,7 +28,7 @@ If it is a greater than a threshold value, it is considered as a corner. If we p ![image](images/shitomasi_space.png) From the figure, you can see that only when \f$\lambda_1\f$ and \f$\lambda_2\f$ are above a minimum value, -\f$\lambda_{min}\f$, it is conidered as a corner(green region). +\f$\lambda_{min}\f$, it is considered as a corner(green region). Code ---- diff --git a/doc/py_tutorials/py_imgproc/py_contours/py_contour_features/py_contour_features.markdown b/doc/py_tutorials/py_imgproc/py_contours/py_contour_features/py_contour_features.markdown index 35cb667e1a2f..5af1e5a1e054 100644 --- a/doc/py_tutorials/py_imgproc/py_contours/py_contour_features/py_contour_features.markdown +++ b/doc/py_tutorials/py_imgproc/py_contours/py_contour_features/py_contour_features.markdown @@ -144,7 +144,7 @@ cv.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2) ### 7.b. Rotated Rectangle Here, bounding rectangle is drawn with minimum area, so it considers the rotation also. The function -used is **cv.minAreaRect()**. It returns a Box2D structure which contains following detals - ( +used is **cv.minAreaRect()**. It returns a Box2D structure which contains following details - ( center (x,y), (width, height), angle of rotation ). But to draw this rectangle, we need 4 corners of the rectangle. It is obtained by the function **cv.boxPoints()** @code{.py} diff --git a/doc/py_tutorials/py_imgproc/py_contours/py_contours_hierarchy/py_contours_hierarchy.markdown b/doc/py_tutorials/py_imgproc/py_contours/py_contours_hierarchy/py_contours_hierarchy.markdown index 831754d21266..2619ea2a7095 100644 --- a/doc/py_tutorials/py_imgproc/py_contours/py_contours_hierarchy/py_contours_hierarchy.markdown +++ b/doc/py_tutorials/py_imgproc/py_contours/py_contours_hierarchy/py_contours_hierarchy.markdown @@ -185,7 +185,7 @@ array([[[ 3, -1, 1, -1], And this is the final guy, Mr.Perfect. It retrieves all the contours and creates a full family hierarchy list. **It even tells, who is the grandpa, father, son, grandson and even beyond... :)**. -For examle, I took above image, rewrite the code for cv.RETR_TREE, reorder the contours as per the +For example, I took above image, rewrite the code for cv.RETR_TREE, reorder the contours as per the result given by OpenCV and analyze it. Again, red letters give the contour number and green letters give the hierarchy order. diff --git a/doc/tutorials/calib3d/real_time_pose/real_time_pose.markdown b/doc/tutorials/calib3d/real_time_pose/real_time_pose.markdown index e05e6e11ac66..f61fa7a8b4ea 100644 --- a/doc/tutorials/calib3d/real_time_pose/real_time_pose.markdown +++ b/doc/tutorials/calib3d/real_time_pose/real_time_pose.markdown @@ -381,7 +381,7 @@ Here is explained in detail the code for the real time application: as not, there are false correspondences or also called *outliers*. The [Random Sample Consensus](http://en.wikipedia.org/wiki/RANSAC) or *Ransac* is a non-deterministic iterative method which estimate parameters of a mathematical model from observed data producing an - approximate result as the number of iterations increase. After appyling *Ransac* all the *outliers* + approximate result as the number of iterations increase. After applying *Ransac* all the *outliers* will be eliminated to then estimate the camera pose with a certain probability to obtain a good solution. diff --git a/doc/tutorials/gapi/anisotropic_segmentation/porting_anisotropic_segmentation.markdown b/doc/tutorials/gapi/anisotropic_segmentation/porting_anisotropic_segmentation.markdown index a3f03c986f9b..2912c6fba530 100644 --- a/doc/tutorials/gapi/anisotropic_segmentation/porting_anisotropic_segmentation.markdown +++ b/doc/tutorials/gapi/anisotropic_segmentation/porting_anisotropic_segmentation.markdown @@ -153,7 +153,7 @@ file name before running the application, e.g.: $ GRAPH_DUMP_PATH=segm.dot ./bin/example_tutorial_porting_anisotropic_image_segmentation_gapi -Now this file can be visalized with a `dot` command like this: +Now this file can be visualized with a `dot` command like this: $ dot segm.dot -Tpng -o segm.png @@ -368,7 +368,7 @@ visualization like this: ![Anisotropic image segmentation graph with OpenCV & Fluid kernels](pics/segm_fluid.gif) -This graph doesn't differ structually from its previous version (in +This graph doesn't differ structurally from its previous version (in terms of operations and data objects), though a changed layout (on the left side of the dump) is easily noticeable. diff --git a/doc/tutorials/gapi/face_beautification/face_beautification.markdown b/doc/tutorials/gapi/face_beautification/face_beautification.markdown index e219ceed9e74..9e56db0a54b0 100644 --- a/doc/tutorials/gapi/face_beautification/face_beautification.markdown +++ b/doc/tutorials/gapi/face_beautification/face_beautification.markdown @@ -427,7 +427,7 @@ the ROI, which will lead to accuracy improvement. Unfortunately, another problem occurs if we do that: if the rectangular ROI is near the border, a describing square will probably go out of the frame --- that leads to errors of the landmarks detector. -To aviod such a mistake, we have to implement an algorithm that, firstly, +To avoid such a mistake, we have to implement an algorithm that, firstly, describes every rectangle by a square, then counts the farthest coordinates turned up to be outside of the frame and, finally, pads the source image by borders (e.g. single-colored) with the size counted. It will be safe to take diff --git a/doc/tutorials/gapi/interactive_face_detection/interactive_face_detection.markdown b/doc/tutorials/gapi/interactive_face_detection/interactive_face_detection.markdown index 755fcc45fef7..e5ca466da79c 100644 --- a/doc/tutorials/gapi/interactive_face_detection/interactive_face_detection.markdown +++ b/doc/tutorials/gapi/interactive_face_detection/interactive_face_detection.markdown @@ -145,7 +145,7 @@ description requires three parameters: regular "functions" which take and return data. Here network `Faces` (a detector) takes a cv::GMat and returns a cv::GMat, while network `AgeGender` is known to provide two outputs (age and gender - blobs, respecitvely) -- so its has a `std::tuple<>` as a return + blobs, respectively) -- so its has a `std::tuple<>` as a return type. 3. A topology name -- can be any non-empty string, G-API is using these names to distinguish networks inside. Names should be unique diff --git a/doc/tutorials/introduction/clojure_dev_intro/clojure_dev_intro.markdown b/doc/tutorials/introduction/clojure_dev_intro/clojure_dev_intro.markdown index 93e24a69f724..f8b121cc721b 100644 --- a/doc/tutorials/introduction/clojure_dev_intro/clojure_dev_intro.markdown +++ b/doc/tutorials/introduction/clojure_dev_intro/clojure_dev_intro.markdown @@ -499,7 +499,7 @@ using the following OpenCV methods: - the imwrite static method from the Highgui class to write an image to a file - the GaussianBlur static method from the Imgproc class to apply to blur the original image -We're also going to use the Mat class which is returned from the imread method and accpeted as the +We're also going to use the Mat class which is returned from the imread method and accepted as the main argument to both the GaussianBlur and the imwrite methods. ### Add an image to the project diff --git a/doc/tutorials/objdetect/cascade_classifier/cascade_classifier.markdown b/doc/tutorials/objdetect/cascade_classifier/cascade_classifier.markdown index 33d5a95ff4c7..43fa143748d0 100644 --- a/doc/tutorials/objdetect/cascade_classifier/cascade_classifier.markdown +++ b/doc/tutorials/objdetect/cascade_classifier/cascade_classifier.markdown @@ -10,7 +10,7 @@ In this tutorial, - We will see the basics of face detection and eye detection using the Haar Feature-based Cascade Classifiers - We will use the @ref cv::CascadeClassifier class to detect objects in a video stream. Particularly, we will use the functions: - - @ref cv::CascadeClassifier::load to load a .xml classifier file. It can be either a Haar or a LBP classifer + - @ref cv::CascadeClassifier::load to load a .xml classifier file. It can be either a Haar or a LBP classifier - @ref cv::CascadeClassifier::detectMultiScale to perform the detection. Theory diff --git a/doc/tutorials/objdetect/traincascade.markdown b/doc/tutorials/objdetect/traincascade.markdown index 0dd4e41f4665..a31234085291 100644 --- a/doc/tutorials/objdetect/traincascade.markdown +++ b/doc/tutorials/objdetect/traincascade.markdown @@ -168,7 +168,7 @@ Command line arguments of opencv_traincascade application grouped by purposes: - `-w ` : Width of training samples (in pixels). Must have exactly the same value as used during training samples creation (opencv_createsamples utility). - `-h ` : Height of training samples (in pixels). Must have exactly the same value as used during training samples creation (opencv_createsamples utility). -- Boosted classifer parameters: +- Boosted classifier parameters: - `-bt <{DAB, RAB, LB, GAB(default)}>` : Type of boosted classifiers: DAB - Discrete AdaBoost, RAB - Real AdaBoost, LB - LogitBoost, GAB - Gentle AdaBoost. - `-minHitRate ` : Minimal desired hit rate for each stage of the classifier. Overall hit rate may be estimated as (min_hit_rate ^ number_of_stages), @cite Viola04 §4.1. - `-maxFalseAlarmRate ` : Maximal desired false alarm rate for each stage of the classifier. Overall false alarm rate may be estimated as (max_false_alarm_rate ^ number_of_stages), @cite Viola04 §4.1. diff --git a/doc/tutorials/videoio/kinect_openni.markdown b/doc/tutorials/videoio/kinect_openni.markdown index b815970f7ad8..0734ea6c5873 100644 --- a/doc/tutorials/videoio/kinect_openni.markdown +++ b/doc/tutorials/videoio/kinect_openni.markdown @@ -43,7 +43,7 @@ VideoCapture can retrieve the following data: - CAP_OPENNI_POINT_CLOUD_MAP - XYZ in meters (CV_32FC3) - CAP_OPENNI_DISPARITY_MAP - disparity in pixels (CV_8UC1) - CAP_OPENNI_DISPARITY_MAP_32F - disparity in pixels (CV_32FC1) - - CAP_OPENNI_VALID_DEPTH_MASK - mask of valid pixels (not ocluded, not shaded etc.) + - CAP_OPENNI_VALID_DEPTH_MASK - mask of valid pixels (not occluded, not shaded etc.) (CV_8UC1) -# data given from BGR image generator: diff --git a/modules/calib3d/include/opencv2/calib3d.hpp b/modules/calib3d/include/opencv2/calib3d.hpp index ced32fee9bc2..9644e246a32f 100644 --- a/modules/calib3d/include/opencv2/calib3d.hpp +++ b/modules/calib3d/include/opencv2/calib3d.hpp @@ -1321,7 +1321,7 @@ struct CV_EXPORTS_W_SIMPLE CirclesGridFinderParameters GridType gridType; CV_PROP_RW float squareSize; //!< Distance between two adjacent points. Used by CALIB_CB_CLUSTERING. - CV_PROP_RW float maxRectifiedDistance; //!< Max deviation from predicion. Used by CALIB_CB_CLUSTERING. + CV_PROP_RW float maxRectifiedDistance; //!< Max deviation from prediction. Used by CALIB_CB_CLUSTERING. }; #ifndef DISABLE_OPENCV_3_COMPATIBILITY diff --git a/modules/calib3d/src/calibration.cpp b/modules/calib3d/src/calibration.cpp index d9f6444156da..b99417c10f55 100644 --- a/modules/calib3d/src/calibration.cpp +++ b/modules/calib3d/src/calibration.cpp @@ -48,7 +48,7 @@ #include /* - This is stright-forward port v3 of Matlab calibration engine by Jean-Yves Bouguet + This is straight-forward port v3 of Matlab calibration engine by Jean-Yves Bouguet that is (in a large extent) based on the paper: Z. Zhang. "A flexible new technique for camera calibration". IEEE Transactions on Pattern Analysis and Machine Intelligence, 22(11):1330-1334, 2000. diff --git a/modules/calib3d/src/chessboard.cpp b/modules/calib3d/src/chessboard.cpp index 38051b762783..99a804550d52 100644 --- a/modules/calib3d/src/chessboard.cpp +++ b/modules/calib3d/src/chessboard.cpp @@ -2474,7 +2474,7 @@ int Chessboard::Board::validateCorners(const cv::Mat &data,cv::flann::Index &fla std::vector::const_iterator iter1 = points.begin(); for(;iter1 != points.end();++iter1) { - // we do not have to check for NaN because of getCorners(flase) + // we do not have to check for NaN because of getCorners(false) std::vector::const_iterator iter2 = iter1+1; for(;iter2 != points.end();++iter2) if(*iter1 == *iter2) @@ -3007,7 +3007,7 @@ Chessboard::Board Chessboard::detectImpl(const Mat& gray,std::vector &f if(keypoints_seed.empty()) return Chessboard::Board(); - // check how many points are likely a checkerbord corner + // check how many points are likely a checkerboard corner float response = fabs(keypoints_seed.front().response*MIN_RESPONSE_RATIO); std::vector::const_iterator seed_iter = keypoints_seed.begin(); int count = 0; diff --git a/modules/calib3d/src/chessboard.hpp b/modules/calib3d/src/chessboard.hpp index 41c64cf34705..12af4762cd3e 100644 --- a/modules/calib3d/src/chessboard.hpp +++ b/modules/calib3d/src/chessboard.hpp @@ -650,7 +650,7 @@ class Chessboard: public cv::Feature2D bool top(bool check_empty=false); // moves one corner to the top or returns false bool checkCorner()const; // returns true if the current corner belongs to at least one // none empty cell - bool isNaN()const; // returns true if the currnet corner is NaN + bool isNaN()const; // returns true if the current corner is NaN const cv::Point2f* operator*() const; // current corner coordinate cv::Point2f* operator*(); // current corner coordinate diff --git a/modules/calib3d/test/test_chesscorners_badarg.cpp b/modules/calib3d/test/test_chesscorners_badarg.cpp index e58d9d925cee..7c9758b84f94 100644 --- a/modules/calib3d/test/test_chesscorners_badarg.cpp +++ b/modules/calib3d/test/test_chesscorners_badarg.cpp @@ -94,7 +94,7 @@ void CV_ChessboardDetectorBadArgTest::run( int /*start_from */) img = cb.clone(); initArgs(); pattern_size = Size(2,2); - errors += run_test_case( Error::StsOutOfRange, "Invlid pattern size" ); + errors += run_test_case( Error::StsOutOfRange, "Invalid pattern size" ); pattern_size = cbg.cornersSize(); cb.convertTo(img, CV_32F); diff --git a/modules/core/include/opencv2/core/core_c.h b/modules/core/include/opencv2/core/core_c.h index 2684151f3f33..09ac1e789a89 100644 --- a/modules/core/include/opencv2/core/core_c.h +++ b/modules/core/include/opencv2/core/core_c.h @@ -1309,7 +1309,7 @@ CVAPI(void) cvMulTransposed( const CvArr* src, CvArr* dst, int order, const CvArr* delta CV_DEFAULT(NULL), double scale CV_DEFAULT(1.) ); -/** Tranposes matrix. Square matrices can be transposed in-place */ +/** Transposes matrix. Square matrices can be transposed in-place */ CVAPI(void) cvTranspose( const CvArr* src, CvArr* dst ); #define cvT cvTranspose diff --git a/modules/core/include/opencv2/core/hal/intrin_avx.hpp b/modules/core/include/opencv2/core/hal/intrin_avx.hpp index 6fc03b727489..ca315ae284b5 100644 --- a/modules/core/include/opencv2/core/hal/intrin_avx.hpp +++ b/modules/core/include/opencv2/core/hal/intrin_avx.hpp @@ -569,7 +569,7 @@ inline v_int64x4 v256_blend(const v_int64x4& a, const v_int64x4& b) { return v_int64x4(v256_blend(v_uint64x4(a.val), v_uint64x4(b.val)).val); } // shuffle -// todo: emluate 64bit +// todo: emulate 64bit #define OPENCV_HAL_IMPL_AVX_SHUFFLE(_Tpvec, intrin) \ template \ inline _Tpvec v256_shuffle(const _Tpvec& a) \ diff --git a/modules/core/include/opencv2/core/hal/intrin_cpp.hpp b/modules/core/include/opencv2/core/hal/intrin_cpp.hpp index 67d3155f0086..15ae380e65f0 100644 --- a/modules/core/include/opencv2/core/hal/intrin_cpp.hpp +++ b/modules/core/include/opencv2/core/hal/intrin_cpp.hpp @@ -73,7 +73,7 @@ implemented as a structure based on a one SIMD register. - cv::v_uint8x16 and cv::v_int8x16: sixteen 8-bit integer values (unsigned/signed) - char - cv::v_uint16x8 and cv::v_int16x8: eight 16-bit integer values (unsigned/signed) - short -- cv::v_uint32x4 and cv::v_int32x4: four 32-bit integer values (unsgined/signed) - int +- cv::v_uint32x4 and cv::v_int32x4: four 32-bit integer values (unsigned/signed) - int - cv::v_uint64x2 and cv::v_int64x2: two 64-bit integer values (unsigned/signed) - int64 - cv::v_float32x4: four 32-bit floating point values (signed) - float - cv::v_float64x2: two 64-bit floating point values (signed) - double diff --git a/modules/core/include/opencv2/core/hal/intrin_msa.hpp b/modules/core/include/opencv2/core/hal/intrin_msa.hpp index 0db137b03209..76e6bed20024 100755 --- a/modules/core/include/opencv2/core/hal/intrin_msa.hpp +++ b/modules/core/include/opencv2/core/hal/intrin_msa.hpp @@ -1805,7 +1805,7 @@ inline v_float32x4 v_broadcast_element(const v_float32x4& a) return v_setall_f32(v_extract_n(a)); } -////// FP16 suport /////// +////// FP16 support /////// #if CV_FP16 inline v_float32x4 v_load_expand(const float16_t* ptr) { diff --git a/modules/core/include/opencv2/core/hal/intrin_wasm.hpp b/modules/core/include/opencv2/core/hal/intrin_wasm.hpp index 7b3259f4c0a6..b8c250fcc226 100644 --- a/modules/core/include/opencv2/core/hal/intrin_wasm.hpp +++ b/modules/core/include/opencv2/core/hal/intrin_wasm.hpp @@ -94,7 +94,7 @@ struct v_uint16x8 } ushort get0() const { - return (ushort)wasm_i16x8_extract_lane(val, 0); // wasm_u16x8_extract_lane() unimplemeted yet + return (ushort)wasm_i16x8_extract_lane(val, 0); // wasm_u16x8_extract_lane() unimplemented yet } v128_t val; diff --git a/modules/core/include/opencv2/core/hal/msa_macros.h b/modules/core/include/opencv2/core/hal/msa_macros.h index 3ed6e58d3c8b..bd6ddb127aac 100755 --- a/modules/core/include/opencv2/core/hal/msa_macros.h +++ b/modules/core/include/opencv2/core/hal/msa_macros.h @@ -50,7 +50,7 @@ typedef double v1f64 __attribute__ ((vector_size(8), aligned(8))); #define msa_ld1q_f32(__a) ((v4f32)__builtin_msa_ld_w(__a, 0)) #define msa_ld1q_f64(__a) ((v2f64)__builtin_msa_ld_d(__a, 0)) -/* Store 64bits vector elments values to the given memory address. */ +/* Store 64bits vector elements values to the given memory address. */ #define msa_st1_s8(__a, __b) (*((v8i8*)(__a)) = __b) #define msa_st1_s16(__a, __b) (*((v4i16*)(__a)) = __b) #define msa_st1_s32(__a, __b) (*((v2i32*)(__a)) = __b) @@ -377,7 +377,7 @@ typedef double v1f64 __attribute__ ((vector_size(8), aligned(8))); }) /* Right shift elements in a 128 bits vector by an immediate value, saturate the results and them in a 64 bits vector. - Input is signed and outpus is unsigned. */ + Input is signed and output is unsigned. */ #define msa_qrshrun_n_s16(__a, __b) \ ({ \ v8i16 __d = __builtin_msa_srlri_h(__builtin_msa_max_s_h(__builtin_msa_fill_h(0), (v8i16)(__a)), (int)(__b)); \ diff --git a/modules/core/include/opencv2/core/opencl/opencl_info.hpp b/modules/core/include/opencv2/core/opencl/opencl_info.hpp index 21b367292553..5e5c846ad059 100644 --- a/modules/core/include/opencv2/core/opencl/opencl_info.hpp +++ b/modules/core/include/opencv2/core/opencl/opencl_info.hpp @@ -62,7 +62,7 @@ static String getDeviceTypeString(const cv::ocl::Device& device) } } - return "unkown"; + return "unknown"; } } // namespace diff --git a/modules/core/include/opencv2/core/optim.hpp b/modules/core/include/opencv2/core/optim.hpp index 70e037f89edc..f61a2b940716 100644 --- a/modules/core/include/opencv2/core/optim.hpp +++ b/modules/core/include/opencv2/core/optim.hpp @@ -165,7 +165,7 @@ class CV_EXPORTS DownhillSolver : public MinProblemSolver /** @brief Sets the initial step that will be used in downhill simplex algorithm. - Step, together with initial point (givin in DownhillSolver::minimize) are two `n`-dimensional + Step, together with initial point (given in DownhillSolver::minimize) are two `n`-dimensional vectors that are used to determine the shape of initial simplex. Roughly said, initial point determines the position of a simplex (it will become simplex's centroid), while step determines the spread (size in each dimension) of a simplex. To be more precise, if \f$s,x_0\in\mathbb{R}^n\f$ are diff --git a/modules/core/include/opencv2/core/vsx_utils.hpp b/modules/core/include/opencv2/core/vsx_utils.hpp index 91669bff31e1..d7c714060720 100644 --- a/modules/core/include/opencv2/core/vsx_utils.hpp +++ b/modules/core/include/opencv2/core/vsx_utils.hpp @@ -317,7 +317,7 @@ VSX_IMPL_1RG(vec_udword2, wi, vec_float4, wf, xvcvspuxds, vec_ctulo) * Also there's already an open bug https://bugs.llvm.org/show_bug.cgi?id=31837 * * So we're not able to use inline asm and only use built-in functions that CLANG supports - * and use __builtin_convertvector if clang missng any of vector conversions built-in functions + * and use __builtin_convertvector if clang missing any of vector conversions built-in functions * * todo: clang asm template bug is fixed, need to reconsider the current workarounds. */ @@ -491,7 +491,7 @@ VSX_IMPL_CONV_EVEN_2_4(vec_uint4, vec_double2, vec_ctu, vec_ctuo) // Only for Eigen! /* * changing behavior of conversion intrinsics for gcc has effect on Eigen - * so we redfine old behavior again only on gcc, clang + * so we redefine old behavior again only on gcc, clang */ #if !defined(__clang__) || __clang_major__ > 4 // ignoring second arg since Eigen only truncates toward zero diff --git a/modules/core/src/array.cpp b/modules/core/src/array.cpp index ce6e4e83cf2d..057dd4ba885c 100644 --- a/modules/core/src/array.cpp +++ b/modules/core/src/array.cpp @@ -250,7 +250,7 @@ cvInitMatNDHeader( CvMatND* mat, int dims, const int* sizes, for( int i = dims - 1; i >= 0; i-- ) { if( sizes[i] < 0 ) - CV_Error( CV_StsBadSize, "one of dimesion sizes is non-positive" ); + CV_Error( CV_StsBadSize, "one of dimension sizes is non-positive" ); mat->dim[i].size = sizes[i]; if( step > INT_MAX ) CV_Error( CV_StsOutOfRange, "The array is too big" ); @@ -545,7 +545,7 @@ cvCreateSparseMat( int dims, const int* sizes, int type ) for( i = 0; i < dims; i++ ) { if( sizes[i] <= 0 ) - CV_Error( CV_StsBadSize, "one of dimesion sizes is non-positive" ); + CV_Error( CV_StsBadSize, "one of dimension sizes is non-positive" ); } CvSparseMat* arr = (CvSparseMat*)cvAlloc(sizeof(*arr)+MAX(0,dims-CV_MAX_DIM)*sizeof(arr->size[0])); diff --git a/modules/core/src/convert_scale.simd.hpp b/modules/core/src/convert_scale.simd.hpp index 56a28dd68c85..2c6d55462be1 100644 --- a/modules/core/src/convert_scale.simd.hpp +++ b/modules/core/src/convert_scale.simd.hpp @@ -53,7 +53,7 @@ cvtabs_32f( const _Ts* src, size_t sstep, _Td* dst, size_t dstep, } } -// variant for convrsions 16f <-> ... w/o unrolling +// variant for conversions 16f <-> ... w/o unrolling template inline void cvtabs1_32f( const _Ts* src, size_t sstep, _Td* dst, size_t dstep, Size size, float a, float b ) @@ -123,7 +123,7 @@ cvt_32f( const _Ts* src, size_t sstep, _Td* dst, size_t dstep, } } -// variant for convrsions 16f <-> ... w/o unrolling +// variant for conversions 16f <-> ... w/o unrolling template inline void cvt1_32f( const _Ts* src, size_t sstep, _Td* dst, size_t dstep, Size size, float a, float b ) diff --git a/modules/core/src/downhill_simplex.cpp b/modules/core/src/downhill_simplex.cpp index 15d6469465f4..5013d4c5063a 100644 --- a/modules/core/src/downhill_simplex.cpp +++ b/modules/core/src/downhill_simplex.cpp @@ -77,7 +77,7 @@ Replaced y(1,ndim,0.0) ------> y(1,ndim+1,0.0) *********************************************************************************************************************************** -The code below was used in tesing the source code. +The code below was used in testing the source code. Created by @SareeAlnaghy #include diff --git a/modules/core/src/system.cpp b/modules/core/src/system.cpp index 104a8c7c9b30..872019dd9e10 100644 --- a/modules/core/src/system.cpp +++ b/modules/core/src/system.cpp @@ -1519,7 +1519,7 @@ class TlsStorage { TlsAbstraction* tls = getTlsAbstraction(); if (NULL == tls) - return; // TLS signleton is not available (terminated) + return; // TLS singleton is not available (terminated) ThreadData *pTD = tlsValue == NULL ? (ThreadData*)tls->getData() : (ThreadData*)tlsValue; if (pTD == NULL) return; // no OpenCV TLS data for this thread @@ -1610,7 +1610,7 @@ class TlsStorage TlsAbstraction* tls = getTlsAbstraction(); if (NULL == tls) - return NULL; // TLS signleton is not available (terminated) + return NULL; // TLS singleton is not available (terminated) ThreadData* threadData = (ThreadData*)tls->getData(); if(threadData && threadData->slots.size() > slotIdx) @@ -1646,7 +1646,7 @@ class TlsStorage TlsAbstraction* tls = getTlsAbstraction(); if (NULL == tls) - return; // TLS signleton is not available (terminated) + return; // TLS singleton is not available (terminated) ThreadData* threadData = (ThreadData*)tls->getData(); if(!threadData) diff --git a/modules/dnn/include/opencv2/dnn/all_layers.hpp b/modules/dnn/include/opencv2/dnn/all_layers.hpp index 4dcca623bf99..2e1366aa07aa 100644 --- a/modules/dnn/include/opencv2/dnn/all_layers.hpp +++ b/modules/dnn/include/opencv2/dnn/all_layers.hpp @@ -134,7 +134,7 @@ CV__DNN_INLINE_NS_BEGIN virtual void setOutShape(const MatShape &outTailShape = MatShape()) = 0; /** @deprecated Use flag `produce_cell_output` in LayerParams. - * @brief Specifies either interpret first dimension of input blob as timestamp dimenion either as sample. + * @brief Specifies either interpret first dimension of input blob as timestamp dimension either as sample. * * If flag is set to true then shape of input blob will be interpreted as [`T`, `N`, `[data dims]`] where `T` specifies number of timestamps, `N` is number of independent streams. * In this case each forward() call will iterate through `T` timestamps and update layer's state `T` times. diff --git a/modules/dnn/src/cuda/slice.cu b/modules/dnn/src/cuda/slice.cu index 27a166f36bdf..df45efd71955 100644 --- a/modules/dnn/src/cuda/slice.cu +++ b/modules/dnn/src/cuda/slice.cu @@ -84,7 +84,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels { * Reasoning: * ---------- * Suppose an item's indices in the output tensor is [o1, o2, ...]. The indices in the input - * tensor will be [o1 + off1, o2 + off2, ...]. The rest of the elements in the input are igored. + * tensor will be [o1 + off1, o2 + off2, ...]. The rest of the elements in the input are ignored. * * If the size of the first axis of the input and output tensor is unity, the input and output indices * for all the elements will be of the form be [0, o2 + off2, ...] and [0, o2, ...] respectively. Note that diff --git a/modules/dnn/src/cuda4dnn/csl/cudnn/convolution.hpp b/modules/dnn/src/cuda4dnn/csl/cudnn/convolution.hpp index 679429ba994c..d8ff49818574 100644 --- a/modules/dnn/src/cuda4dnn/csl/cudnn/convolution.hpp +++ b/modules/dnn/src/cuda4dnn/csl/cudnn/convolution.hpp @@ -227,7 +227,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace csl { namespace cu if (std::is_same::value) CUDA4DNN_CHECK_CUDNN(cudnnSetConvolutionMathType(descriptor, CUDNN_TENSOR_OP_MATH)); } catch (...) { - /* cudnnDestroyConvolutionDescriptor will not fail for a valid desriptor object */ + /* cudnnDestroyConvolutionDescriptor will not fail for a valid descriptor object */ CUDA4DNN_CHECK_CUDNN(cudnnDestroyConvolutionDescriptor(descriptor)); throw; } diff --git a/modules/dnn/src/cuda4dnn/csl/memory.hpp b/modules/dnn/src/cuda4dnn/csl/memory.hpp index 2ffa32ffcade..05c8446beeab 100644 --- a/modules/dnn/src/cuda4dnn/csl/memory.hpp +++ b/modules/dnn/src/cuda4dnn/csl/memory.hpp @@ -266,7 +266,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace csl { /** page-locks \p size_in_bytes bytes of memory starting from \p ptr_ * - * Pre-conditons: + * Pre-conditions: * - host memory should be unregistered */ MemoryLockGuard(void* ptr_, std::size_t size_in_bytes) { diff --git a/modules/dnn/src/cuda4dnn/csl/pointer.hpp b/modules/dnn/src/cuda4dnn/csl/pointer.hpp index 0d891126575b..45bf94bf0a3b 100644 --- a/modules/dnn/src/cuda4dnn/csl/pointer.hpp +++ b/modules/dnn/src/cuda4dnn/csl/pointer.hpp @@ -33,7 +33,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace csl { * * A `DevicePtr` can implicitly convert to `DevicePtr`. * - * Specalizations: + * Specializations: * - DevicePtr/DevicePtr do not support pointer arithmetic (but relational operators are provided) * - any device pointer pointing to mutable memory is implicitly convertible to DevicePtr * - any device pointer is implicitly convertible to DevicePtr diff --git a/modules/dnn/src/cuda4dnn/csl/tensor.hpp b/modules/dnn/src/cuda4dnn/csl/tensor.hpp index b01d80320808..5ba9865f0470 100644 --- a/modules/dnn/src/cuda4dnn/csl/tensor.hpp +++ b/modules/dnn/src/cuda4dnn/csl/tensor.hpp @@ -67,7 +67,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace csl { */ template class Tensor { - static_assert(std::is_standard_layout::value, "T must staisfy StandardLayoutType"); + static_assert(std::is_standard_layout::value, "T must satisfy StandardLayoutType"); public: using value_type = typename ManagedPtr::element_type; @@ -553,7 +553,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace csl { * - [start, end) represents a forward range containing the length of the axes in order * - the number of axis lengths must be less than or equal to the rank * - at most one axis length is allowed for length deduction - * - the lengths provided must ensure that the total number of elements remains unchnged + * - the lengths provided must ensure that the total number of elements remains unchanged * * Exception Guarantee: Strong */ @@ -898,7 +898,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace csl { * - [start, end) represents a forward range containing length of the axes in order starting from axis zero * - the number of axis lengths must be less than or equal to the tensor rank * - at most one axis length is allowed for length deduction - * - the lengths provided must ensure that the total number of elements remains unchnged + * - the lengths provided must ensure that the total number of elements remains unchanged * * Exception Guarantee: Strong */ diff --git a/modules/dnn/src/cuda4dnn/csl/tensor_ops.hpp b/modules/dnn/src/cuda4dnn/csl/tensor_ops.hpp index 1d396745c5d1..fc29d9b1211c 100644 --- a/modules/dnn/src/cuda4dnn/csl/tensor_ops.hpp +++ b/modules/dnn/src/cuda4dnn/csl/tensor_ops.hpp @@ -35,7 +35,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace csl { * Pre-conditions: * - \p dest and \p src must have the same shape * - * Exception Gaurantee: Basic + * Exception Guarantee: Basic */ template inline void copy(const Stream& stream, TensorSpan dest, TensorView src) { @@ -50,7 +50,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace csl { * - \p A and \p B must meet the mathematical requirements for matrix multiplication * - \p result must be large enough to hold the result * - * Exception Gaurantee: Basic + * Exception Guarantee: Basic */ template inline void gemm(const cublas::Handle& handle, T beta, TensorSpan result, T alpha, bool transa, TensorView A, bool transb, TensorView B) { @@ -108,7 +108,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace csl { * Pre-conditions: * - \p A and \p result must be compatible tensors * - * Exception Gaurantee: Basic + * Exception Guarantee: Basic */ template inline void softmax(const cudnn::Handle& handle, TensorSpan output, TensorView input, int channel_axis, bool log) { diff --git a/modules/dnn/src/cuda4dnn/primitives/pooling.hpp b/modules/dnn/src/cuda4dnn/primitives/pooling.hpp index 544d8110ccf8..bd8a73c2f214 100644 --- a/modules/dnn/src/cuda4dnn/primitives/pooling.hpp +++ b/modules/dnn/src/cuda4dnn/primitives/pooling.hpp @@ -103,7 +103,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { CV_Assert(pooling_order == pads_end.size()); /* cuDNN rounds down by default; hence, if ceilMode is false, we do nothing - * otherwise, we add extra padding towards the end so that the convolution arithmetic yeilds + * otherwise, we add extra padding towards the end so that the convolution arithmetic yields * the correct output size without having to deal with fancy fractional sizes */ auto pads_end_modified = pads_end; diff --git a/modules/dnn/src/ie_ngraph.cpp b/modules/dnn/src/ie_ngraph.cpp index 6b5c611c9a0e..2688428dc310 100644 --- a/modules/dnn/src/ie_ngraph.cpp +++ b/modules/dnn/src/ie_ngraph.cpp @@ -622,7 +622,7 @@ void InfEngineNgraphNet::forward(const std::vector >& outBlo try { wrapper->outProms[processedOutputs].setException(std::current_exception()); } catch(...) { - CV_LOG_ERROR(NULL, "DNN: Exception occured during async inference exception propagation"); + CV_LOG_ERROR(NULL, "DNN: Exception occurred during async inference exception propagation"); } } } @@ -635,7 +635,7 @@ void InfEngineNgraphNet::forward(const std::vector >& outBlo try { wrapper->outProms[processedOutputs].setException(e); } catch(...) { - CV_LOG_ERROR(NULL, "DNN: Exception occured during async inference exception propagation"); + CV_LOG_ERROR(NULL, "DNN: Exception occurred during async inference exception propagation"); } } } diff --git a/modules/dnn/src/onnx/opencv-onnx.proto b/modules/dnn/src/onnx/opencv-onnx.proto index b8b616a0d8ac..b24220adb9a3 100644 --- a/modules/dnn/src/onnx/opencv-onnx.proto +++ b/modules/dnn/src/onnx/opencv-onnx.proto @@ -116,7 +116,7 @@ message AttributeProto { // The type field MUST be present for this version of the IR. // For 0.0.1 versions of the IR, this field was not defined, and - // implementations needed to use has_field hueristics to determine + // implementations needed to use has_field heuristics to determine // which value field was in use. For IR_VERSION 0.0.2 or later, this // field MUST be set and match the f|i|s|t|... field in use. This // change was made to accommodate proto3 implementations. @@ -323,7 +323,7 @@ message TensorProto { // For float and complex64 values // Complex64 tensors are encoded as a single array of floats, // with the real components appearing in odd numbered positions, - // and the corresponding imaginary component apparing in the + // and the corresponding imaginary component appearing in the // subsequent even numbered position. (e.g., [1.0 + 2.0i, 3.0 + 4.0i] // is encoded as [1.0, 2.0 ,3.0 ,4.0] // When this field is present, the data_type field MUST be FLOAT or COMPLEX64. @@ -373,7 +373,7 @@ message TensorProto { // For double // Complex64 tensors are encoded as a single array of doubles, // with the real components appearing in odd numbered positions, - // and the corresponding imaginary component apparing in the + // and the corresponding imaginary component appearing in the // subsequent even numbered position. (e.g., [1.0 + 2.0i, 3.0 + 4.0i] // is encoded as [1.0, 2.0 ,3.0 ,4.0] // When this field is present, the data_type field MUST be DOUBLE or COMPLEX128 diff --git a/modules/dnn/src/op_cuda.hpp b/modules/dnn/src/op_cuda.hpp index 1cf3890fe0f0..ccb7877e88d7 100644 --- a/modules/dnn/src/op_cuda.hpp +++ b/modules/dnn/src/op_cuda.hpp @@ -350,7 +350,7 @@ namespace cv { namespace dnn { private: /* The same tensor memory can be reused by different layers whenever possible. - * Hence, it is possible for different backend warppers to point to the same memory. + * Hence, it is possible for different backend wrappers to point to the same memory. * However, it may use only a part of that memory and have a different shape. * * We store the common information such as device tensor and its corresponding host memory in diff --git a/modules/dnn/src/vkcom/src/context.cpp b/modules/dnn/src/vkcom/src/context.cpp index 01f8eda66822..5a6376f1126c 100644 --- a/modules/dnn/src/vkcom/src/context.cpp +++ b/modules/dnn/src/vkcom/src/context.cpp @@ -243,7 +243,7 @@ Context::Context() queueCreateInfo.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO; queueCreateInfo.queueFamilyIndex = kQueueFamilyIndex; queueCreateInfo.queueCount = 1; // create one queue in this family. We don't need more. - float queuePriorities = 1.0; // we only have one queue, so this is not that imporant. + float queuePriorities = 1.0; // we only have one queue, so this is not that important. queueCreateInfo.pQueuePriorities = &queuePriorities; VkDeviceCreateInfo deviceCreateInfo = {}; diff --git a/modules/features2d/include/opencv2/features2d.hpp b/modules/features2d/include/opencv2/features2d.hpp index 7b9f3a6dd91c..24f0af516341 100644 --- a/modules/features2d/include/opencv2/features2d.hpp +++ b/modules/features2d/include/opencv2/features2d.hpp @@ -398,7 +398,7 @@ code which is distributed under GPL. class CV_EXPORTS_W MSER : public Feature2D { public: - /** @brief Full consturctor for %MSER detector + /** @brief Full constructor for %MSER detector @param _delta it compares \f$(size_{i}-size_{i-delta})/size_{i-delta}\f$ @param _min_area prune the area which smaller than minArea diff --git a/modules/features2d/src/kaze/nldiffusion_functions.h b/modules/features2d/src/kaze/nldiffusion_functions.h index 97c36a2094d0..4254edc6a0ae 100644 --- a/modules/features2d/src/kaze/nldiffusion_functions.h +++ b/modules/features2d/src/kaze/nldiffusion_functions.h @@ -36,7 +36,7 @@ void image_derivatives_scharr(const cv::Mat& src, cv::Mat& dst, int xorder, int // Nonlinear diffusion filtering scalar step void nld_step_scalar(cv::Mat& Ld, const cv::Mat& c, cv::Mat& Lstep, float stepsize); -// For non-maxima suppresion +// For non-maxima suppression bool check_maximum_neighbourhood(const cv::Mat& img, int dsize, float value, int row, int col, bool same_img); // Image downsampling diff --git a/modules/features2d/src/mser.cpp b/modules/features2d/src/mser.cpp index 85187f7c811a..a37b4ea4822f 100755 --- a/modules/features2d/src/mser.cpp +++ b/modules/features2d/src/mser.cpp @@ -983,7 +983,7 @@ extractMSER_8uC3( const Mat& src, double s = (double)(lr->size-lr->sizei)/(lr->dt-lr->di); if ( s < lr->s ) { - // skip the first one and check stablity + // skip the first one and check stability if ( i > lr->reinit+1 && MSCRStableCheck( lr, params ) ) { if ( lr->tmsr == NULL ) diff --git a/modules/flann/include/opencv2/flann/simplex_downhill.h b/modules/flann/include/opencv2/flann/simplex_downhill.h index 9fd84f488cc0..20b7e03c9202 100644 --- a/modules/flann/include/opencv2/flann/simplex_downhill.h +++ b/modules/flann/include/opencv2/flann/simplex_downhill.h @@ -131,7 +131,7 @@ float optimizeSimplexDownhill(T* points, int n, F func, float* vals = NULL ) } if (val_r~); - Operation identifier (a string); -- Metadata callback -- desribe what is the output value format(s), +- Metadata callback -- describe what is the output value format(s), given the input and arguments. - Use ~OpType::on(...)~ to use a new kernel ~OpType~ to construct graphs. #+LaTeX: {\footnotesize diff --git a/modules/gapi/include/opencv2/gapi/cpu/gcpukernel.hpp b/modules/gapi/include/opencv2/gapi/cpu/gcpukernel.hpp index 079534a98ea7..85a90ce797af 100644 --- a/modules/gapi/include/opencv2/gapi/cpu/gcpukernel.hpp +++ b/modules/gapi/include/opencv2/gapi/cpu/gcpukernel.hpp @@ -124,7 +124,7 @@ class GAPI_EXPORTS GCPUKernel F m_f; }; -// FIXME: This is an ugly ad-hoc imlpementation. TODO: refactor +// FIXME: This is an ugly ad-hoc implementation. TODO: refactor namespace detail { diff --git a/modules/gapi/include/opencv2/gapi/fluid/gfluidkernel.hpp b/modules/gapi/include/opencv2/gapi/fluid/gfluidkernel.hpp index 4c6b91356130..3f7a0f811c15 100644 --- a/modules/gapi/include/opencv2/gapi/fluid/gfluidkernel.hpp +++ b/modules/gapi/include/opencv2/gapi/fluid/gfluidkernel.hpp @@ -39,7 +39,7 @@ namespace fluid */ GAPI_EXPORTS cv::gapi::GBackend backend(); /** @} */ -} // namespace flud +} // namespace fluid } // namespace gapi diff --git a/modules/gapi/include/opencv2/gapi/gcompiled.hpp b/modules/gapi/include/opencv2/gapi/gcompiled.hpp index 0a411126c9fa..b08451af2675 100644 --- a/modules/gapi/include/opencv2/gapi/gcompiled.hpp +++ b/modules/gapi/include/opencv2/gapi/gcompiled.hpp @@ -148,7 +148,7 @@ class GAPI_EXPORTS GCompiled * @param outs vector of output cv::Mat objects to produce by the * computation. * - * Numbers of elements in ins/outs vectos must match numbers of + * Numbers of elements in ins/outs vectors must match numbers of * inputs/outputs which were used to define the source GComputation. */ void operator() (const std::vector &ins, // Compatibility overload diff --git a/modules/gapi/include/opencv2/gapi/gcomputation.hpp b/modules/gapi/include/opencv2/gapi/gcomputation.hpp index 678b22d47c02..1ff874ae7783 100644 --- a/modules/gapi/include/opencv2/gapi/gcomputation.hpp +++ b/modules/gapi/include/opencv2/gapi/gcomputation.hpp @@ -314,7 +314,7 @@ class GAPI_EXPORTS GComputation * @param args compilation arguments for underlying compilation * process. * - * Numbers of elements in ins/outs vectos must match numbers of + * Numbers of elements in ins/outs vectors must match numbers of * inputs/outputs which were used to define this GComputation. */ void apply(const std::vector& ins, // Compatibility overload @@ -373,7 +373,7 @@ class GAPI_EXPORTS GComputation // template // GCompiled compile(const Ts&... metas, GCompileArgs &&args) // - // But not all compilers can hande this (and seems they shouldn't be able to). + // But not all compilers can handle this (and seems they shouldn't be able to). // FIXME: SFINAE looks ugly in the generated documentation /** * @overload diff --git a/modules/gapi/include/opencv2/gapi/gtype_traits.hpp b/modules/gapi/include/opencv2/gapi/gtype_traits.hpp index 8cea478813fe..a1373f3f8517 100644 --- a/modules/gapi/include/opencv2/gapi/gtype_traits.hpp +++ b/modules/gapi/include/opencv2/gapi/gtype_traits.hpp @@ -101,7 +101,7 @@ namespace detail template<> struct GTypeOf { using type = cv::GScalar; }; template struct GTypeOf > { using type = cv::GArray; }; // FIXME: This is not quite correct since IStreamSource may produce not only Mat but also Scalar - // and vector data. TODO: Extend the type dispatchig on these types too. + // and vector data. TODO: Extend the type dispatching on these types too. template<> struct GTypeOf { using type = cv::GMat;}; template using g_type_of_t = typename GTypeOf::type; diff --git a/modules/gapi/include/opencv2/gapi/ocl/goclkernel.hpp b/modules/gapi/include/opencv2/gapi/ocl/goclkernel.hpp index a51fa8ff5df8..b729171ae15c 100644 --- a/modules/gapi/include/opencv2/gapi/ocl/goclkernel.hpp +++ b/modules/gapi/include/opencv2/gapi/ocl/goclkernel.hpp @@ -94,7 +94,7 @@ class GAPI_EXPORTS GOCLKernel F m_f; }; -// FIXME: This is an ugly ad-hoc imlpementation. TODO: refactor +// FIXME: This is an ugly ad-hoc implementation. TODO: refactor namespace detail { diff --git a/modules/gapi/include/opencv2/gapi/streaming/cap.hpp b/modules/gapi/include/opencv2/gapi/streaming/cap.hpp index 4168bcca2b0a..faa555063afe 100644 --- a/modules/gapi/include/opencv2/gapi/streaming/cap.hpp +++ b/modules/gapi/include/opencv2/gapi/streaming/cap.hpp @@ -35,7 +35,7 @@ namespace wip { * This class implements IStreamSource interface. * Its constructor takes the same parameters as cv::VideoCapture does. * - * Please make sure that videoio OpenCV module is avaiable before using + * Please make sure that videoio OpenCV module is available before using * this in your application (G-API doesn't depend on it directly). * * @note stream sources are passed to G-API via shared pointers, so diff --git a/modules/gapi/samples/draw_example.cpp b/modules/gapi/samples/draw_example.cpp index 9826da6a7b75..4c4ee8e5834f 100644 --- a/modules/gapi/samples/draw_example.cpp +++ b/modules/gapi/samples/draw_example.cpp @@ -7,7 +7,7 @@ int main(int argc, char *argv[]) { if (argc < 2) { - std::cerr << "Filename requried" << std::endl; + std::cerr << "Filename required" << std::endl; return 1; } diff --git a/modules/gapi/src/api/ft_render.cpp b/modules/gapi/src/api/ft_render.cpp index 9d9f6b3370ca..7561dff83328 100644 --- a/modules/gapi/src/api/ft_render.cpp +++ b/modules/gapi/src/api/ft_render.cpp @@ -61,13 +61,13 @@ cv::Size cv::gapi::wip::draw::FTTextRender::Priv::getTextSize(const std::wstring // or decrement (for right-to-left writing) the pen position after a // glyph has been rendered when processing text // - // widht (bitmap->width) - The width of glyph + // width (bitmap->width) - The width of glyph // // - // Algorihm to compute size of the text bounding box: + // Algorithm to compute size of the text bounding box: // // 1) Go through all symbols and shift pen position and save glyph parameters (left, advance, width) - // If left + pen postion < 0 set left to 0. For example it's maybe happened + // If left + pen position < 0 set left to 0. For example it's maybe happened // if we print first letter 'J' or any other letter with negative 'left' // We want to render glyph in pen position + left, so we must't allow it to be negative // diff --git a/modules/gapi/src/api/render_ocv.cpp b/modules/gapi/src/api/render_ocv.cpp index 4aa2388f7cbf..74a0b07a8aaa 100644 --- a/modules/gapi/src/api/render_ocv.cpp +++ b/modules/gapi/src/api/render_ocv.cpp @@ -184,7 +184,7 @@ void drawPrimitivesOCV(cv::Mat& in, cv::Point org(0, mask.rows - baseline); cv::putText(mask, tp.text, org, tp.ff, tp.fs, 255, tp.thick); - // Org is bottom left point, trasform it to top left point for blendImage + // Org is bottom left point, transform it to top left point for blendImage cv::Point tl(tp.org.x, tp.org.y - mask.size().height + baseline); blendTextMask(in, mask, tl, tp.color); @@ -208,7 +208,7 @@ void drawPrimitivesOCV(cv::Mat& in, cv::Point org(0, mask.rows - baseline); ftpr->putText(mask, ftp.text, org, ftp.fh); - // Org is bottom left point, trasform it to top left point for blendImage + // Org is bottom left point, transform it to top left point for blendImage cv::Point tl(ftp.org.x, ftp.org.y - mask.size().height + baseline); blendTextMask(in, mask, tl, color); diff --git a/modules/gapi/src/backends/fluid/gfluidimgproc.cpp b/modules/gapi/src/backends/fluid/gfluidimgproc.cpp index d1445675fa06..ee0dd019690c 100644 --- a/modules/gapi/src/backends/fluid/gfluidimgproc.cpp +++ b/modules/gapi/src/backends/fluid/gfluidimgproc.cpp @@ -1823,7 +1823,7 @@ GAPI_FLUID_KERNEL(GFluidBayerGR2RGB, cv::gapi::imgproc::GBayerGR2RGB, false) } }; -} // namespace fliud +} // namespace fluid } // namespace gapi } // namespace cv diff --git a/modules/gapi/src/backends/fluid/gfluidimgproc_func.dispatch.cpp b/modules/gapi/src/backends/fluid/gfluidimgproc_func.dispatch.cpp index 7b6dfb11f2e1..7854d3e988fa 100644 --- a/modules/gapi/src/backends/fluid/gfluidimgproc_func.dispatch.cpp +++ b/modules/gapi/src/backends/fluid/gfluidimgproc_func.dispatch.cpp @@ -209,7 +209,7 @@ RUN_MEDBLUR3X3_IMPL( float) #undef RUN_MEDBLUR3X3_IMPL -} // namespace fliud +} // namespace fluid } // namespace gapi } // namespace cv diff --git a/modules/gapi/src/backends/fluid/gfluidutils.hpp b/modules/gapi/src/backends/fluid/gfluidutils.hpp index fa15ee9e6267..4da16f2dee13 100644 --- a/modules/gapi/src/backends/fluid/gfluidutils.hpp +++ b/modules/gapi/src/backends/fluid/gfluidutils.hpp @@ -25,7 +25,7 @@ using cv::gapi::own::rintd; //-------------------------------- // -// Macros for mappig of data types +// Macros for mapping of data types // //-------------------------------- diff --git a/modules/gapi/src/backends/ie/giebackend.cpp b/modules/gapi/src/backends/ie/giebackend.cpp index d754a27810b1..df6e347ff5da 100644 --- a/modules/gapi/src/backends/ie/giebackend.cpp +++ b/modules/gapi/src/backends/ie/giebackend.cpp @@ -185,7 +185,7 @@ struct IEUnit { // The practice shows that not all inputs and not all outputs // are mandatory to specify in IE model. // So what we're concerned here about is: - // if opeation's (not topology's) input/output number is + // if operation's (not topology's) input/output number is // greater than 1, then we do care about input/output layer // names. Otherwise, names are picked up automatically. // TODO: Probably this check could be done at the API entry point? (gnet) diff --git a/modules/gapi/src/backends/ocl/goclimgproc.hpp b/modules/gapi/src/backends/ocl/goclimgproc.hpp index 864f5fef7011..f692ed5dc26b 100644 --- a/modules/gapi/src/backends/ocl/goclimgproc.hpp +++ b/modules/gapi/src/backends/ocl/goclimgproc.hpp @@ -15,7 +15,7 @@ namespace cv { namespace gimpl { -// NB: This is what a "Kernel Package" from the origianl Wiki doc should be. +// NB: This is what a "Kernel Package" from the original Wiki doc should be. void loadOCLImgProc(std::map &kmap); }} diff --git a/modules/gapi/src/compiler/passes/islands.cpp b/modules/gapi/src/compiler/passes/islands.cpp index dd14ab19095b..1ca99311c025 100644 --- a/modules/gapi/src/compiler/passes/islands.cpp +++ b/modules/gapi/src/compiler/passes/islands.cpp @@ -32,7 +32,7 @@ namespace // // In this case, Data object is part of Island A if and only if: // - Data object's producer is part of Island A, - // - AND any of Data obejct's consumers is part of Island A. + // - AND any of Data object's consumers is part of Island A. // // Op["island0"] --> Data[ ? ] --> Op["island0"] // : @@ -147,7 +147,7 @@ void cv::gimpl::passes::checkIslands(ade::passes::PassContext &ctx) // Run the recursive traversal process as described in 5/a-d. // This process is like a flood-fill traversal for island. - // If there's to distint successful flood-fills happened for the same island + // If there's to distinct successful flood-fills happened for the same island // name, there are two islands with this name. std::stack stack; stack.push(tagged_nh); diff --git a/modules/gapi/src/executor/gstreamingexecutor.cpp b/modules/gapi/src/executor/gstreamingexecutor.cpp index 4f4c11048784..ba19ae17b77d 100644 --- a/modules/gapi/src/executor/gstreamingexecutor.cpp +++ b/modules/gapi/src/executor/gstreamingexecutor.cpp @@ -198,7 +198,7 @@ void sync_data(cv::GRunArgs &results, cv::GRunArgsP &outputs) // "Stop" is received. // // Queue reader is the class which encapsulates all this logic and -// provies threads with a managed storage and an easy API to obtain +// provides threads with a managed storage and an easy API to obtain // data. class QueueReader { diff --git a/modules/gapi/test/common/gapi_core_tests.hpp b/modules/gapi/test/common/gapi_core_tests.hpp index d1bfa6aa9dbf..8f6f573112c0 100644 --- a/modules/gapi/test/common/gapi_core_tests.hpp +++ b/modules/gapi/test/common/gapi_core_tests.hpp @@ -67,7 +67,7 @@ inline std::ostream& operator<<(std::ostream& os, bitwiseOp op) // initMatsRandU - function that is used to initialize input/output data // FIXTURE_API(mathOp,bool,double,bool) - test-specific parameters (types) // 4 - number of test-specific parameters -// opType, testWithScalar, scale, doReverseOp - test-spcific parameters (names) +// opType, testWithScalar, scale, doReverseOp - test-specific parameters (names) // // We get: // 1. Default parameters: int type, cv::Size sz, int dtype, getCompileArgs() function diff --git a/modules/gapi/test/common/gapi_core_tests_inl.hpp b/modules/gapi/test/common/gapi_core_tests_inl.hpp index 601d39d77775..64d842ddfaa6 100644 --- a/modules/gapi/test/common/gapi_core_tests_inl.hpp +++ b/modules/gapi/test/common/gapi_core_tests_inl.hpp @@ -294,7 +294,7 @@ TEST_P(Polar2CartTest, AccuracyTest) // expect of single-precision elementary functions implementation. // // However, good idea is making such threshold configurable: parameter - // of this test - which a specific test istantiation could setup. + // of this test - which a specific test instantiation could setup. // // Note that test instantiation for the OpenCV back-end could even let // the threshold equal to zero, as CV back-end calls the same kernel. @@ -340,7 +340,7 @@ TEST_P(Cart2PolarTest, AccuracyTest) // expect of single-precision elementary functions implementation. // // However, good idea is making such threshold configurable: parameter - // of this test - which a specific test istantiation could setup. + // of this test - which a specific test instantiation could setup. // // Note that test instantiation for the OpenCV back-end could even let // the threshold equal to zero, as CV back-end calls the same kernel. diff --git a/modules/gapi/test/common/gapi_imgproc_tests.hpp b/modules/gapi/test/common/gapi_imgproc_tests.hpp index 11104a455b74..57cc1195f239 100644 --- a/modules/gapi/test/common/gapi_imgproc_tests.hpp +++ b/modules/gapi/test/common/gapi_imgproc_tests.hpp @@ -19,7 +19,7 @@ namespace opencv_test // initMatrixRandN - function that is used to initialize input/output data // FIXTURE_API(CompareMats,int,int) - test-specific parameters (types) // 3 - number of test-specific parameters -// cmpF, kernSize, borderType - test-spcific parameters (names) +// cmpF, kernSize, borderType - test-specific parameters (names) // // We get: // 1. Default parameters: int type, cv::Size sz, int dtype, getCompileArgs() function diff --git a/modules/gapi/test/gapi_async_test.cpp b/modules/gapi/test/gapi_async_test.cpp index 9702119b528d..38a3bbb5ec6c 100644 --- a/modules/gapi/test/gapi_async_test.cpp +++ b/modules/gapi/test/gapi_async_test.cpp @@ -426,7 +426,7 @@ struct output_args_lifetime : ::testing::Test{ static constexpr const int num_of_requests = 20; }; TYPED_TEST_CASE_P(output_args_lifetime); -//There are intentionaly no actual checks (asserts and verify) in output_args_lifetime tests. +//There are intentionally no actual checks (asserts and verify) in output_args_lifetime tests. //They are more of example use-cases than real tests. (ASAN/valgrind can still catch issues here) TYPED_TEST_P(output_args_lifetime, callback){ diff --git a/modules/gapi/test/gapi_smoke_test.cpp b/modules/gapi/test/gapi_smoke_test.cpp index 9ac47f6d7478..ae068a473b75 100644 --- a/modules/gapi/test/gapi_smoke_test.cpp +++ b/modules/gapi/test/gapi_smoke_test.cpp @@ -64,7 +64,7 @@ TEST(GAPI, Mat_Recreate) EXPECT_EQ(m3.at(0, 0), m4.at(0, 0)); // cv::Mat::create must be NOOP if we don't change the meta, - // even if the origianl mat is created from handle. + // even if the original mat is created from handle. m4.create(3, 3, CV_8U); EXPECT_EQ(m3.rows, m4.rows); EXPECT_EQ(m3.cols, m4.cols); diff --git a/modules/imgproc/include/opencv2/imgproc/imgproc_c.h b/modules/imgproc/include/opencv2/imgproc/imgproc_c.h index 9f7131f7b838..86dc119fdd37 100644 --- a/modules/imgproc/include/opencv2/imgproc/imgproc_c.h +++ b/modules/imgproc/include/opencv2/imgproc/imgproc_c.h @@ -1151,7 +1151,7 @@ CVAPI(CvScalar) cvColorToScalar( double packed_color, int arrtype ); /** @brief Returns the polygon points which make up the given ellipse. The ellipse is define by the box of size 'axes' rotated 'angle' around the 'center'. A partial -sweep of the ellipse arc can be done by spcifying arc_start and arc_end to be something other than +sweep of the ellipse arc can be done by specifying arc_start and arc_end to be something other than 0 and 360, respectively. The input array 'pts' must be large enough to hold the result. The total number of points stored into 'pts' is returned by this function. @see cv::ellipse2Poly diff --git a/modules/imgproc/src/approx.cpp b/modules/imgproc/src/approx.cpp index 195cd1639923..e581fb09e742 100644 --- a/modules/imgproc/src/approx.cpp +++ b/modules/imgproc/src/approx.cpp @@ -630,7 +630,7 @@ approxPolyDP_( const Point_* src_contour, int count0, Point_* dst_contour, WRITE_PT( src_contour[count-1] ); // last stage: do final clean-up of the approximated contour - - // remove extra points on the [almost] stright lines. + // remove extra points on the [almost] straight lines. is_closed = is_closed0; count = new_count; pos = is_closed ? count - 1 : 0; diff --git a/modules/imgproc/src/shapedescr.cpp b/modules/imgproc/src/shapedescr.cpp index 8ba4b41424a5..a8c64d478992 100644 --- a/modules/imgproc/src/shapedescr.cpp +++ b/modules/imgproc/src/shapedescr.cpp @@ -776,7 +776,7 @@ cv::RotatedRect cv::fitEllipseDirect( InputArray _points ) namespace cv { -// Calculates bounding rectagnle of a point set or retrieves already calculated +// Calculates bounding rectangle of a point set or retrieves already calculated static Rect pointSetBoundingRect( const Mat& points ) { int npoints = points.checkVector(2); @@ -1392,7 +1392,7 @@ cvFitEllipse2( const CvArr* array ) return cvBox2D(cv::fitEllipse(points)); } -/* Calculates bounding rectagnle of a point set or retrieves already calculated */ +/* Calculates bounding rectangle of a point set or retrieves already calculated */ CV_IMPL CvRect cvBoundingRect( CvArr* array, int update ) { diff --git a/modules/imgproc/test/test_approxpoly.cpp b/modules/imgproc/test/test_approxpoly.cpp index 77be6bbf393f..f5b7248fafd4 100644 --- a/modules/imgproc/test/test_approxpoly.cpp +++ b/modules/imgproc/test/test_approxpoly.cpp @@ -325,7 +325,7 @@ void CV_ApproxPolyTest::run( int /*start_from*/ ) if( DstSeq == NULL ) { ts->printf( cvtest::TS::LOG, - "cvApproxPoly returned NULL for contour #%d, espilon = %g\n", i, Eps ); + "cvApproxPoly returned NULL for contour #%d, epsilon = %g\n", i, Eps ); code = cvtest::TS::FAIL_INVALID_OUTPUT; goto _exit_; } // if( DstSeq == NULL ) diff --git a/modules/imgproc/test/test_intersection.cpp b/modules/imgproc/test/test_intersection.cpp index 0e419c4702c9..93909b3a9ee9 100644 --- a/modules/imgproc/test/test_intersection.cpp +++ b/modules/imgproc/test/test_intersection.cpp @@ -60,7 +60,7 @@ namespace opencv_test { namespace { // 6 - partial intersection, rectangle on top of different size // 7 - full intersection, rectangle fully enclosed in the other // 8 - partial intersection, rectangle corner just touching. point contact -// 9 - partial intersetion. rectangle side by side, line contact +// 9 - partial intersection. rectangle side by side, line contact static void compare(const std::vector& test, const std::vector& target) { diff --git a/modules/java/android_sdk/CMakeLists.txt b/modules/java/android_sdk/CMakeLists.txt index fef286afbb67..1f50f84339ed 100644 --- a/modules/java/android_sdk/CMakeLists.txt +++ b/modules/java/android_sdk/CMakeLists.txt @@ -44,7 +44,7 @@ foreach(file ${seed_project_files_rel}) endforeach() list(APPEND depends gen_opencv_java_source "${OPENCV_DEPHELPER}/gen_opencv_java_source") -ocv_copyfiles_add_target(${the_module}_android_source_copy JAVA_SRC_COPY "Copy Java(Andoid SDK) source files" ${depends}) +ocv_copyfiles_add_target(${the_module}_android_source_copy JAVA_SRC_COPY "Copy Java(Android SDK) source files" ${depends}) file(REMOVE "${OPENCV_DEPHELPER}/${the_module}_android_source_copy") # force rebuild after CMake run set(depends ${the_module}_android_source_copy "${OPENCV_DEPHELPER}/${the_module}_android_source_copy") @@ -134,7 +134,7 @@ foreach(file ${__files_rel}) endforeach() list(APPEND depends gen_opencv_java_source "${OPENCV_DEPHELPER}/gen_opencv_java_source") -ocv_copyfiles_add_target(${the_module}_android_source_copy JAVA_SRC_COPY "Copy Java(Andoid SDK) source files" ${depends}) +ocv_copyfiles_add_target(${the_module}_android_source_copy JAVA_SRC_COPY "Copy Java(Android SDK) source files" ${depends}) file(REMOVE "${OPENCV_DEPHELPER}/${the_module}_android_source_copy") # force rebuild after CMake run set(depends ${the_module}_android_source_copy "${OPENCV_DEPHELPER}/${the_module}_android_source_copy") diff --git a/modules/java/generator/android/java/org/opencv/android/CameraBridgeViewBase.java b/modules/java/generator/android/java/org/opencv/android/CameraBridgeViewBase.java index 4ee14e008f52..07c059b7d97d 100644 --- a/modules/java/generator/android/java/org/opencv/android/CameraBridgeViewBase.java +++ b/modules/java/generator/android/java/org/opencv/android/CameraBridgeViewBase.java @@ -248,7 +248,7 @@ public void enableView() { /** * This method is provided for clients, so they can disable camera connection and stop - * the delivery of frames even though the surface view itself is not destroyed and still stays on the scren + * the delivery of frames even though the surface view itself is not destroyed and still stays on the screen */ public void disableView() { synchronized(mSyncObject) { diff --git a/modules/js/perf/README.md b/modules/js/perf/README.md index c851bd51dd6f..2389aab2c684 100644 --- a/modules/js/perf/README.md +++ b/modules/js/perf/README.md @@ -32,4 +32,4 @@ To run performance tests, please launch a local web server in /bin fo Navigate the web browser to the kernel page you want to test, like http://localhost:8080/perf/imgproc/cvtcolor.html. -You can input the paramater, and then click the `Run` button to run the specific case, or it will run all the cases. +You can input the parameter, and then click the `Run` button to run the specific case, or it will run all the cases. diff --git a/modules/ml/include/opencv2/ml.hpp b/modules/ml/include/opencv2/ml.hpp index a92b8f9a90ea..7fdb460eb3ce 100644 --- a/modules/ml/include/opencv2/ml.hpp +++ b/modules/ml/include/opencv2/ml.hpp @@ -1683,7 +1683,7 @@ class CV_EXPORTS_W LogisticRegression : public StatModel /** @brief This function returns the trained parameters arranged across rows. - For a two class classifcation problem, it returns a row matrix. It returns learnt parameters of + For a two class classification problem, it returns a row matrix. It returns learnt parameters of the Logistic Regression as a matrix of type CV_32F. */ CV_WRAP virtual Mat get_learnt_thetas() const = 0; diff --git a/modules/python/test/test_algorithm_rw.py b/modules/python/test/test_algorithm_rw.py index c925a99e7b37..29351869be48 100644 --- a/modules/python/test/test_algorithm_rw.py +++ b/modules/python/test/test_algorithm_rw.py @@ -1,5 +1,5 @@ #!/usr/bin/env python -"""Algorithm serializaion test.""" +"""Algorithm serialization test.""" import tempfile import os import cv2 as cv diff --git a/modules/python/test/test_cuda.py b/modules/python/test/test_cuda.py index abe4a1706a0b..4d6249143583 100644 --- a/modules/python/test/test_cuda.py +++ b/modules/python/test/test_cuda.py @@ -181,7 +181,7 @@ def test_cudacodec(self): self.assertTrue('GpuMat' in str(type(gpu_mat)), msg=type(gpu_mat)) #TODO: print(cv.utils.dumpInputArray(gpu_mat)) # - no support for GpuMat - # not checking output, therefore sepearate tests for different signatures is unecessary + # not checking output, therefore sepearate tests for different signatures is unnecessary ret, _gpu_mat2 = reader.nextFrame(gpu_mat) #TODO: self.assertTrue(gpu_mat == gpu_mat2) self.assertTrue(ret) diff --git a/modules/python/test/test_persistence.py b/modules/python/test/test_persistence.py index 4c1ec8ee0ae1..cfa4225ebeb5 100644 --- a/modules/python/test/test_persistence.py +++ b/modules/python/test/test_persistence.py @@ -1,5 +1,5 @@ #!/usr/bin/env python -""""Core serializaion tests.""" +""""Core serialization tests.""" import tempfile import os import cv2 as cv diff --git a/modules/stitching/include/opencv2/stitching/detail/matchers.hpp b/modules/stitching/include/opencv2/stitching/detail/matchers.hpp index c933a36d0742..ef4684fd0502 100644 --- a/modules/stitching/include/opencv2/stitching/detail/matchers.hpp +++ b/modules/stitching/include/opencv2/stitching/detail/matchers.hpp @@ -215,14 +215,14 @@ finds two best matches for each feature and leaves the best one only if the ratio between descriptor distances is greater than the threshold match_conf. Unlike cv::detail::BestOf2NearestMatcher this matcher uses affine -transformation (affine trasformation estimate will be placed in matches_info). +transformation (affine transformation estimate will be placed in matches_info). @sa cv::detail::FeaturesMatcher cv::detail::BestOf2NearestMatcher */ class CV_EXPORTS_W AffineBestOf2NearestMatcher : public BestOf2NearestMatcher { public: - /** @brief Constructs a "best of 2 nearest" matcher that expects affine trasformation + /** @brief Constructs a "best of 2 nearest" matcher that expects affine transformation between images @param full_affine whether to use full affine transformation with 6 degress of freedom or reduced diff --git a/modules/ts/include/opencv2/ts/ts_gtest.h b/modules/ts/include/opencv2/ts/ts_gtest.h index cd62024d5856..0cffe61fea2e 100644 --- a/modules/ts/include/opencv2/ts/ts_gtest.h +++ b/modules/ts/include/opencv2/ts/ts_gtest.h @@ -11367,7 +11367,7 @@ void UniversalTersePrint(const T& value, ::std::ostream* os) { // NUL-terminated string. template void UniversalPrint(const T& value, ::std::ostream* os) { - // A workarond for the bug in VC++ 7.1 that prevents us from instantiating + // A workaround for the bug in VC++ 7.1 that prevents us from instantiating // UniversalPrinter with T directly. typedef T T1; UniversalPrinter::Print(value, os); diff --git a/modules/ts/misc/run_android.py b/modules/ts/misc/run_android.py index 84f290728893..4aa2b0dd782e 100644 --- a/modules/ts/misc/run_android.py +++ b/modules/ts/misc/run_android.py @@ -94,11 +94,11 @@ def dump(self, exe): # get test instrumentation info instrumentation_tag = [t for t in tags if t.startswith("instrumentation ")] if not instrumentation_tag: - raise Err("Can not find instrumentation detials in: %s", exe) + raise Err("Can not find instrumentation details in: %s", exe) res.pkg_runner = re.search(r"^[ ]+A: android:name\(0x[0-9a-f]{8}\)=\"(?P.*?)\" \(Raw: \"(?P=runner)\"\)\r?$", instrumentation_tag[0], flags=re.MULTILINE).group("runner") res.pkg_target = re.search(r"^[ ]+A: android:targetPackage\(0x[0-9a-f]{8}\)=\"(?P.*?)\" \(Raw: \"(?P=pkg)\"\)\r?$", instrumentation_tag[0], flags=re.MULTILINE).group("pkg") if not res.pkg_name or not res.pkg_runner or not res.pkg_target: - raise Err("Can not find instrumentation detials in: %s", exe) + raise Err("Can not find instrumentation details in: %s", exe) return res diff --git a/modules/ts/src/ts.cpp b/modules/ts/src/ts.cpp index 98f29f33c532..d710b2d0c64e 100644 --- a/modules/ts/src/ts.cpp +++ b/modules/ts/src/ts.cpp @@ -452,7 +452,7 @@ int BadArgTest::run_test_case( int expected_code, const string& _descr ) { thrown = true; if (e.code != expected_code && - e.code != cv::Error::StsError && e.code != cv::Error::StsAssert // Exact error codes support will be dropped. Checks should provide proper text messages intead. + e.code != cv::Error::StsError && e.code != cv::Error::StsAssert // Exact error codes support will be dropped. Checks should provide proper text messages instead. ) { ts->printf(TS::LOG, "%s (test case #%d): the error code %d is different from the expected %d\n", diff --git a/modules/video/src/bgfg_KNN.cpp b/modules/video/src/bgfg_KNN.cpp index 39cd6457e927..1ddf1b7d519d 100755 --- a/modules/video/src/bgfg_KNN.cpp +++ b/modules/video/src/bgfg_KNN.cpp @@ -110,7 +110,7 @@ class BackgroundSubtractorKNNImpl CV_FINAL : public BackgroundSubtractorKNN //set parameters // N - the number of samples stored in memory per model nN = defaultNsamples; - //kNN - k nearest neighbour - number on NN for detcting background - default K=[0.1*nN] + //kNN - k nearest neighbour - number on NN for detecting background - default K=[0.1*nN] nkNN=MAX(1,cvRound(0.1*nN*3+0.40)); //Tb - Threshold Tb*kernelwidth @@ -292,7 +292,7 @@ class BackgroundSubtractorKNNImpl CV_FINAL : public BackgroundSubtractorKNN //less important parameters - things you might change but be careful //////////////////////// int nN;//totlal number of samples - int nkNN;//number on NN for detcting background - default K=[0.1*nN] + int nkNN;//number on NN for detecting background - default K=[0.1*nN] //shadow detection parameters bool bShadowDetection;//default 1 - do shadow detection diff --git a/modules/video/src/bgfg_gaussmix2.cpp b/modules/video/src/bgfg_gaussmix2.cpp index 4241670f1c39..69e4baf657b3 100644 --- a/modules/video/src/bgfg_gaussmix2.cpp +++ b/modules/video/src/bgfg_gaussmix2.cpp @@ -181,7 +181,7 @@ class BackgroundSubtractorMOG2Impl CV_FINAL : public BackgroundSubtractorMOG2 //! computes a background image which are the mean of all background gaussians virtual void getBackgroundImage(OutputArray backgroundImage) const CV_OVERRIDE; - //! re-initiaization method + //! re-initialization method void initialize(Size _frameSize, int _frameType) { frameSize = _frameSize; diff --git a/modules/videoio/include/opencv2/videoio/legacy/constants_c.h b/modules/videoio/include/opencv2/videoio/legacy/constants_c.h index 44450027d581..d484353a8ba6 100644 --- a/modules/videoio/include/opencv2/videoio/legacy/constants_c.h +++ b/modules/videoio/include/opencv2/videoio/legacy/constants_c.h @@ -225,8 +225,8 @@ enum CV_CAP_PROP_XI_COOLING = 466, // Start camera cooling. CV_CAP_PROP_XI_TARGET_TEMP = 467, // Set sensor target temperature for cooling. CV_CAP_PROP_XI_CHIP_TEMP = 468, // Camera sensor temperature - CV_CAP_PROP_XI_HOUS_TEMP = 469, // Camera housing tepmerature - CV_CAP_PROP_XI_HOUS_BACK_SIDE_TEMP = 590, // Camera housing back side tepmerature + CV_CAP_PROP_XI_HOUS_TEMP = 469, // Camera housing temperature + CV_CAP_PROP_XI_HOUS_BACK_SIDE_TEMP = 590, // Camera housing back side temperature CV_CAP_PROP_XI_SENSOR_BOARD_TEMP = 596, // Camera sensor board temperature CV_CAP_PROP_XI_CMS = 470, // Mode of color management system. CV_CAP_PROP_XI_APPLY_CMS = 471, // Enable applying of CMS profiles to xiGetImage (see XI_PRM_INPUT_CMS_PROFILE, XI_PRM_OUTPUT_CMS_PROFILE). diff --git a/modules/videoio/src/cap_aravis.cpp b/modules/videoio/src/cap_aravis.cpp index 65dc4532a54d..85ac38121e84 100644 --- a/modules/videoio/src/cap_aravis.cpp +++ b/modules/videoio/src/cap_aravis.cpp @@ -300,7 +300,7 @@ bool CvCaptureCAM_Aravis::grabFrame() size_t buffer_size; framebuffer = (void*)arv_buffer_get_data (arv_buffer, &buffer_size); - // retrieve image size properites + // retrieve image size properties arv_buffer_get_image_region (arv_buffer, &xoffset, &yoffset, &width, &height); // retrieve image ID set by camera diff --git a/modules/videoio/src/cap_avfoundation.mm b/modules/videoio/src/cap_avfoundation.mm index 70caaee5323b..f1d1af3eec79 100644 --- a/modules/videoio/src/cap_avfoundation.mm +++ b/modules/videoio/src/cap_avfoundation.mm @@ -1298,7 +1298,7 @@ -(int) updateImage { colorSpace, kCGImageAlphaLast|kCGBitmapByteOrderDefault, provider, NULL, false, kCGRenderingIntentDefault); - //CGImage -> CVPixelBufferRef coversion + //CGImage -> CVPixelBufferRef conversion CVPixelBufferRef pixelBuffer = NULL; CFDataRef cfData = CGDataProviderCopyData(CGImageGetDataProvider(cgImage)); int status = CVPixelBufferCreateWithBytes(NULL, diff --git a/modules/videoio/src/cap_avfoundation_mac.mm b/modules/videoio/src/cap_avfoundation_mac.mm index 0c61a08a65ef..230e6d743de5 100644 --- a/modules/videoio/src/cap_avfoundation_mac.mm +++ b/modules/videoio/src/cap_avfoundation_mac.mm @@ -814,7 +814,7 @@ -(int) updateImage { if (mMode == CV_CAP_MODE_BGR || mMode == CV_CAP_MODE_RGB) { // For CV_CAP_MODE_BGR, read frames as BGRA (AV Foundation's YUV->RGB conversion is slightly faster than OpenCV's CV_YUV2BGR_YV12) // kCVPixelFormatType_32ABGR is reportedly faster on OS X, but OpenCV doesn't have a CV_ABGR2BGR conversion. - // kCVPixelFormatType_24RGB is significanly slower than kCVPixelFormatType_32BGRA. + // kCVPixelFormatType_24RGB is significantly slower than kCVPixelFormatType_32BGRA. pixelFormat = kCVPixelFormatType_32BGRA; mFormat = CV_8UC3; } else if (mMode == CV_CAP_MODE_GRAY) { @@ -1332,7 +1332,7 @@ static void releaseCallback( void *releaseRefCon, const void * ) { colorSpace, kCGImageAlphaLast|kCGBitmapByteOrderDefault, provider, NULL, false, kCGRenderingIntentDefault); - //CGImage -> CVPixelBufferRef coversion + //CGImage -> CVPixelBufferRef conversion CVPixelBufferRef pixelBuffer = NULL; CFDataRef cfData = CGDataProviderCopyData(CGImageGetDataProvider(cgImage)); int status = CVPixelBufferCreateWithBytes(NULL, diff --git a/modules/videoio/src/cap_gstreamer.cpp b/modules/videoio/src/cap_gstreamer.cpp index 3d3cc934a36c..18d6101ece84 100644 --- a/modules/videoio/src/cap_gstreamer.cpp +++ b/modules/videoio/src/cap_gstreamer.cpp @@ -953,7 +953,7 @@ bool GStreamerCapture::open(const String &filename_) * \return property value * * There are two ways the properties can be retrieved. For seek-based properties we can query the pipeline. - * For frame-based properties, we use the caps of the lasst receivef sample. This means that some properties + * For frame-based properties, we use the caps of the last receivef sample. This means that some properties * are not available until a first frame was received */ double GStreamerCapture::getProperty(int propId) const diff --git a/platforms/ios/cmake/Modules/Platform/iOS.cmake b/platforms/ios/cmake/Modules/Platform/iOS.cmake index 5e2bbc56720d..207f41f4555a 100644 --- a/platforms/ios/cmake/Modules/Platform/iOS.cmake +++ b/platforms/ios/cmake/Modules/Platform/iOS.cmake @@ -46,7 +46,7 @@ if (APPLE_FRAMEWORK AND BUILD_SHARED_LIBS) set (CMAKE_INSTALL_NAME_DIR "@rpath") endif() -# Hidden visibilty is required for cxx on iOS +# Hidden visibility is required for cxx on iOS set (no_warn "-Wno-unused-function -Wno-overloaded-virtual") set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${no_warn}") set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++ -fvisibility=hidden -fvisibility-inlines-hidden ${no_warn}") diff --git a/platforms/linux/mips.toolchain.cmake b/platforms/linux/mips.toolchain.cmake index 425d5dfcf969..b6b4609050ae 100755 --- a/platforms/linux/mips.toolchain.cmake +++ b/platforms/linux/mips.toolchain.cmake @@ -4,7 +4,7 @@ # Toolchains with 'img' in the name are for MIPS R6 instruction sets. # It is recommended to use cmake-gui application for build scripts configuration and generation: # 1. Run cmake-gui -# 2. Specifiy toolchain file for cross-compiling, mips32r5el-gnu.toolchian.cmake or mips64r6el-gnu.toolchain.cmake +# 2. Specify toolchain file for cross-compiling, mips32r5el-gnu.toolchian.cmake or mips64r6el-gnu.toolchain.cmake # can be selected. # 3. Configure and Generate makefiles. # 4. make -j4 & make install diff --git a/platforms/linux/mips32r5el-gnu.toolchain.cmake b/platforms/linux/mips32r5el-gnu.toolchain.cmake index 1937270f82bb..d93d4f2c945f 100755 --- a/platforms/linux/mips32r5el-gnu.toolchain.cmake +++ b/platforms/linux/mips32r5el-gnu.toolchain.cmake @@ -4,7 +4,7 @@ # Toolchains with 'img' in the name are for MIPS R6 instruction sets. # It is recommended to use cmake-gui for build scripts configuration and generation: # 1. Run cmake-gui -# 2. Specifiy toolchain file mips32r5el-gnu.toolchian.cmake for cross-compiling. +# 2. Specify toolchain file mips32r5el-gnu.toolchian.cmake for cross-compiling. # 3. Configure and Generate makefiles. # 4. make -j4 & make install # ---------------------------------------------------------------------------------------------- diff --git a/platforms/linux/mips64r6el-gnu.toolchain.cmake b/platforms/linux/mips64r6el-gnu.toolchain.cmake index b022240a722f..ce63668665c7 100755 --- a/platforms/linux/mips64r6el-gnu.toolchain.cmake +++ b/platforms/linux/mips64r6el-gnu.toolchain.cmake @@ -4,7 +4,7 @@ # Toolchains with 'img' in the name are for MIPS R6 instruction sets. # It is recommended to use cmake-gui for build scripts configuration and generation: # 1. Run cmake-gui -# 2. Specifiy toolchain file mips64r6el-gnu.toolchain.cmake for cross-compiling. +# 2. Specify toolchain file mips64r6el-gnu.toolchain.cmake for cross-compiling. # 3. Configure and Generate makefiles. # 4. make -j4 & make install # ---------------------------------------------------------------------------------------------- diff --git a/samples/cpp/CMakeLists.txt b/samples/cpp/CMakeLists.txt index 33cf490a2a3b..617629df2ece 100644 --- a/samples/cpp/CMakeLists.txt +++ b/samples/cpp/CMakeLists.txt @@ -47,7 +47,7 @@ foreach(sample_filename ${cpp_samples}) target_compile_definitions(${tgt} PRIVATE HAVE_OPENGL) endif() if(sample_filename MATCHES "simd_") - # disabled intentionally - demonstation purposes only + # disabled intentionally - demonstration purposes only #target_include_directories(${tgt} PRIVATE "${CMAKE_CURRENT_LIST_DIR}") #target_compile_definitions(${tgt} PRIVATE OPENCV_SIMD_CONFIG_HEADER=opencv_simd_config_custom.hpp) #target_compile_definitions(${tgt} PRIVATE OPENCV_SIMD_CONFIG_INCLUDE_DIR=1) diff --git a/samples/cpp/delaunay2.cpp b/samples/cpp/delaunay2.cpp index 925477b4a0c0..26f10bd66814 100644 --- a/samples/cpp/delaunay2.cpp +++ b/samples/cpp/delaunay2.cpp @@ -12,7 +12,7 @@ static void help() "It draws a random set of points in an image and then delaunay triangulates them.\n" "Usage: \n" "./delaunay \n" - "\nThis program builds the traingulation interactively, you may stop this process by\n" + "\nThis program builds the triangulation interactively, you may stop this process by\n" "hitting any key.\n"; } diff --git a/samples/cpp/logistic_regression.cpp b/samples/cpp/logistic_regression.cpp index 365b32e5238c..4338b61f7bce 100644 --- a/samples/cpp/logistic_regression.cpp +++ b/samples/cpp/logistic_regression.cpp @@ -157,7 +157,7 @@ int main() cout << responses.t() << endl; cout << "accuracy: " << calculateAccuracyPercent(labels_test, responses) << "%" << endl; - // save the classfier + // save the classifier const String saveFilename = "NewLR_Trained.xml"; cout << "saving the classifier to " << saveFilename << endl; lr1->save(saveFilename); @@ -167,7 +167,7 @@ int main() Ptr lr2 = StatModel::load(saveFilename); // predict using loaded classifier - cout << "predicting the dataset using the loaded classfier..."; + cout << "predicting the dataset using the loaded classifier..."; Mat responses2; lr2->predict(data_test, responses2); cout << "done!" << endl; diff --git a/samples/cpp/pca.cpp b/samples/cpp/pca.cpp index ba42700f189f..a5a1c54a9252 100644 --- a/samples/cpp/pca.cpp +++ b/samples/cpp/pca.cpp @@ -10,7 +10,7 @@ * This program demonstrates how to use OpenCV PCA with a * specified amount of variance to retain. The effect * is illustrated further by using a trackbar to -* change the value for retained varaince. +* change the value for retained variance. * * The program takes as input a text file with each line * begin the full path to an image. PCA will be performed diff --git a/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/Model.h b/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/Model.h index 7380af5d0e07..92f004d4d908 100644 --- a/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/Model.h +++ b/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/Model.h @@ -36,7 +36,7 @@ class Model void load(const std::string &path); private: - /** The current number of correspondecnes */ + /** The current number of correspondences */ int n_correspondences_; /** The list of 2D points on the model surface */ std::vector list_keypoints_; diff --git a/samples/cpp/videocapture_openni.cpp b/samples/cpp/videocapture_openni.cpp index e8a87193e12a..0b67d92f61eb 100644 --- a/samples/cpp/videocapture_openni.cpp +++ b/samples/cpp/videocapture_openni.cpp @@ -17,7 +17,7 @@ static void help() " CAP_OPENNI_POINT_CLOUD_MAP - XYZ in meters (CV_32FC3)\n" " CAP_OPENNI_DISPARITY_MAP - disparity in pixels (CV_8UC1)\n" " CAP_OPENNI_DISPARITY_MAP_32F - disparity in pixels (CV_32FC1)\n" - " CAP_OPENNI_VALID_DEPTH_MASK - mask of valid pixels (not ocluded, not shaded etc.) (CV_8UC1)\n" + " CAP_OPENNI_VALID_DEPTH_MASK - mask of valid pixels (not occluded, not shaded etc.) (CV_8UC1)\n" "2.) Data given from RGB image generator\n" " CAP_OPENNI_BGR_IMAGE - color image (CV_8UC3)\n" " CAP_OPENNI_GRAY_IMAGE - gray image (CV_8UC1)\n" diff --git a/samples/opencl/opencl-opencv-interop.cpp b/samples/opencl/opencl-opencv-interop.cpp index f648f78bf8bb..e889623463e4 100644 --- a/samples/opencl/opencl-opencv-interop.cpp +++ b/samples/opencl/opencl-opencv-interop.cpp @@ -3,7 +3,7 @@ // This will loop through frames of video either from input media file // or camera device and do processing of these data in OpenCL and then // in OpenCV. In OpenCL it does inversion of pixels in left half of frame and -// in OpenCV it does bluring in the right half of frame. +// in OpenCV it does blurring in the right half of frame. */ #include #include diff --git a/samples/python/deconvolution.py b/samples/python/deconvolution.py index bf136c522fcf..b276ca8cfbf9 100755 --- a/samples/python/deconvolution.py +++ b/samples/python/deconvolution.py @@ -15,7 +15,7 @@ Use sliders to adjust PSF paramitiers. Keys: - SPACE - switch btw linear/cirular PSF + SPACE - switch btw linear/circular PSF ESC - exit Examples: diff --git a/samples/winrt/ImageManipulations/Constants.cpp b/samples/winrt/ImageManipulations/Constants.cpp index a26634272bb7..c819b7680931 100644 --- a/samples/winrt/ImageManipulations/Constants.cpp +++ b/samples/winrt/ImageManipulations/Constants.cpp @@ -17,6 +17,6 @@ using namespace SDKSample; Platform::Array^ MainPage::scenariosInner = ref new Platform::Array { // The format here is the following: - // { "Description for the sample", "Fully quaified name for the class that implements the scenario" } + // { "Description for the sample", "Fully qualified name for the class that implements the scenario" } { "Enumerate cameras and add a video effect", "SDKSample.MediaCapture.AdvancedCapture" }, };