Skip to content

Commit c78a8df

Browse files
committed
fix 4.x links
1 parent be110d0 commit c78a8df

File tree

134 files changed

+334
-332
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

134 files changed

+334
-332
lines changed

README.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44

55
* Homepage: <https://opencv.org>
66
* Courses: <https://opencv.org/courses>
7-
* Docs: <https://docs.opencv.org/master/>
7+
* Docs: <https://docs.opencv.org/4.x/>
88
* Q&A forum: <https://forum.opencv.org>
99
* previous forum (read only): <http://answers.opencv.org>
1010
* Issue tracking: <https://github.com/opencv/opencv/issues>

apps/opencv_stitching_tool/opencv_stitching/blender.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -36,11 +36,11 @@ def prepare(self, corners, sizes):
3636
self.blender.prepare(dst_sz)
3737

3838
def feed(self, img, mask, corner):
39-
"""https://docs.opencv.org/master/d6/d4a/classcv_1_1detail_1_1Blender.html#a64837308bcf4e414a6219beff6cbe37a""" # noqa
39+
"""https://docs.opencv.org/4.x/d6/d4a/classcv_1_1detail_1_1Blender.html#a64837308bcf4e414a6219beff6cbe37a""" # noqa
4040
self.blender.feed(cv.UMat(img.astype(np.int16)), mask, corner)
4141

4242
def blend(self):
43-
"""https://docs.opencv.org/master/d6/d4a/classcv_1_1detail_1_1Blender.html#aa0a91ce0d6046d3a63e0123cbb1b5c00""" # noqa
43+
"""https://docs.opencv.org/4.x/d6/d4a/classcv_1_1detail_1_1Blender.html#aa0a91ce0d6046d3a63e0123cbb1b5c00""" # noqa
4444
result = None
4545
result_mask = None
4646
result, result_mask = self.blender.blend(result, result_mask)

apps/opencv_stitching_tool/opencv_stitching/camera_adjuster.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66

77

88
class CameraAdjuster:
9-
"""https://docs.opencv.org/master/d5/d56/classcv_1_1detail_1_1BundleAdjusterBase.html""" # noqa
9+
"""https://docs.opencv.org/4.x/d5/d56/classcv_1_1detail_1_1BundleAdjusterBase.html""" # noqa
1010

1111
CAMERA_ADJUSTER_CHOICES = OrderedDict()
1212
CAMERA_ADJUSTER_CHOICES['ray'] = cv.detail_BundleAdjusterRay

apps/opencv_stitching_tool/opencv_stitching/camera_wave_corrector.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44

55

66
class WaveCorrector:
7-
"""https://docs.opencv.org/master/d7/d74/group__stitching__rotation.html#ga83b24d4c3e93584986a56d9e43b9cf7f""" # noqa
7+
"""https://docs.opencv.org/4.x/d7/d74/group__stitching__rotation.html#ga83b24d4c3e93584986a56d9e43b9cf7f""" # noqa
88
WAVE_CORRECT_CHOICES = OrderedDict()
99
WAVE_CORRECT_CHOICES['horiz'] = cv.detail.WAVE_CORRECT_HORIZ
1010
WAVE_CORRECT_CHOICES['vert'] = cv.detail.WAVE_CORRECT_VERT

apps/opencv_stitching_tool/opencv_stitching/exposure_error_compensator.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -32,9 +32,9 @@ def __init__(self,
3232
)
3333

3434
def feed(self, *args):
35-
"""https://docs.opencv.org/master/d2/d37/classcv_1_1detail_1_1ExposureCompensator.html#ae6b0cc69a7bc53818ddea53eddb6bdba""" # noqa
35+
"""https://docs.opencv.org/4.x/d2/d37/classcv_1_1detail_1_1ExposureCompensator.html#ae6b0cc69a7bc53818ddea53eddb6bdba""" # noqa
3636
self.compensator.feed(*args)
3737

3838
def apply(self, *args):
39-
"""https://docs.opencv.org/master/d2/d37/classcv_1_1detail_1_1ExposureCompensator.html#a473eaf1e585804c08d77c91e004f93aa""" # noqa
39+
"""https://docs.opencv.org/4.x/d2/d37/classcv_1_1detail_1_1ExposureCompensator.html#a473eaf1e585804c08d77c91e004f93aa""" # noqa
4040
return self.compensator.apply(*args)

apps/opencv_stitching_tool/opencv_stitching/feature_matcher.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -15,13 +15,13 @@ def __init__(self,
1515
**kwargs):
1616

1717
if matcher_type == "affine":
18-
"""https://docs.opencv.org/master/d3/dda/classcv_1_1detail_1_1AffineBestOf2NearestMatcher.html""" # noqa
18+
"""https://docs.opencv.org/4.x/d3/dda/classcv_1_1detail_1_1AffineBestOf2NearestMatcher.html""" # noqa
1919
self.matcher = cv.detail_AffineBestOf2NearestMatcher(**kwargs)
2020
elif range_width == -1:
21-
"""https://docs.opencv.org/master/d4/d26/classcv_1_1detail_1_1BestOf2NearestMatcher.html""" # noqa
21+
"""https://docs.opencv.org/4.x/d4/d26/classcv_1_1detail_1_1BestOf2NearestMatcher.html""" # noqa
2222
self.matcher = cv.detail.BestOf2NearestMatcher_create(**kwargs)
2323
else:
24-
"""https://docs.opencv.org/master/d8/d72/classcv_1_1detail_1_1BestOf2NearestRangeMatcher.html""" # noqa
24+
"""https://docs.opencv.org/4.x/d8/d72/classcv_1_1detail_1_1BestOf2NearestRangeMatcher.html""" # noqa
2525
self.matcher = cv.detail.BestOf2NearestRangeMatcher_create(
2626
range_width, **kwargs
2727
)

apps/opencv_stitching_tool/opencv_stitching/seam_finder.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66

77

88
class SeamFinder:
9-
"""https://docs.opencv.org/master/d7/d09/classcv_1_1detail_1_1SeamFinder.html""" # noqa
9+
"""https://docs.opencv.org/4.x/d7/d09/classcv_1_1detail_1_1SeamFinder.html""" # noqa
1010
SEAM_FINDER_CHOICES = OrderedDict()
1111
SEAM_FINDER_CHOICES['dp_color'] = cv.detail_DpSeamFinder('COLOR')
1212
SEAM_FINDER_CHOICES['dp_colorgrad'] = cv.detail_DpSeamFinder('COLOR_GRAD')
@@ -19,7 +19,7 @@ def __init__(self, finder=DEFAULT_SEAM_FINDER):
1919
self.finder = SeamFinder.SEAM_FINDER_CHOICES[finder]
2020

2121
def find(self, imgs, corners, masks):
22-
"""https://docs.opencv.org/master/d0/dd5/classcv_1_1detail_1_1DpSeamFinder.html#a7914624907986f7a94dd424209a8a609""" # noqa
22+
"""https://docs.opencv.org/4.x/d0/dd5/classcv_1_1detail_1_1DpSeamFinder.html#a7914624907986f7a94dd424209a8a609""" # noqa
2323
imgs_float = [img.astype(np.float32) for img in imgs]
2424
return self.finder.find(imgs_float, corners, masks)
2525

apps/opencv_stitching_tool/opencv_stitching/test/SAMPLE_IMAGES_TO_DOWNLOAD.txt

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
https://github.com/opencv/opencv_extra/tree/master/testdata/stitching
1+
https://github.com/opencv/opencv_extra/tree/4.x/testdata/stitching
22

33
s1.jpg s2.jpg
44
boat1.jpg boat2.jpg boat3.jpg boat4.jpg boat5.jpg boat6.jpg

apps/opencv_stitching_tool/opencv_stitching/test/stitching_detailed.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -317,7 +317,7 @@ def main():
317317
sizes = []
318318
blender = None
319319
timelapser = None
320-
# https://github.com/opencv/opencv/blob/master/samples/cpp/stitching_detailed.cpp#L725 ?
320+
# https://github.com/opencv/opencv/blob/4.x/samples/cpp/stitching_detailed.cpp#L725 ?
321321
for idx, name in enumerate(img_names):
322322
full_img = cv.imread(name)
323323
if not is_compose_scale_set:

apps/opencv_stitching_tool/opencv_stitching/timelapser.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ def __init__(self, timelapse=DEFAULT_TIMELAPSE):
2626
)
2727

2828
def initialize(self, *args):
29-
"""https://docs.opencv.org/master/dd/dac/classcv_1_1detail_1_1Timelapser.html#aaf0f7c4128009f02473332a0c41f6345""" # noqa
29+
"""https://docs.opencv.org/4.x/dd/dac/classcv_1_1detail_1_1Timelapser.html#aaf0f7c4128009f02473332a0c41f6345""" # noqa
3030
self.timelapser.initialize(*args)
3131

3232
def process_and_save_frame(self, img_name, img, corner):

apps/opencv_stitching_tool/opencv_stitching/warper.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ def warp_roi(self, width, height, camera, scale=None, aspect=1):
5555

5656
def update_scale(self, scale):
5757
if scale is not None and scale != self.scale:
58-
self.warper = cv.PyRotationWarper(self.warper_type, scale) # setScale not working: https://docs.opencv.org/master/d5/d76/classcv_1_1PyRotationWarper.html#a90b000bb75f95294f9b0b6ec9859eb55
58+
self.warper = cv.PyRotationWarper(self.warper_type, scale) # setScale not working: https://docs.opencv.org/4.x/d5/d76/classcv_1_1PyRotationWarper.html#a90b000bb75f95294f9b0b6ec9859eb55
5959
self.scale = scale
6060

6161
@staticmethod

doc/js_tutorials/js_assets/js_image_classification.html

+1-1
Original file line numberDiff line numberDiff line change
@@ -116,7 +116,7 @@ <h2>Model Info:</h2>
116116
needSoftmax = false;
117117

118118
// url for label file, can from local or Internet
119-
labelsUrl = "https://raw.githubusercontent.com/opencv/opencv/master/samples/data/dnn/classification_classes_ILSVRC2012.txt";
119+
labelsUrl = "https://raw.githubusercontent.com/opencv/opencv/4.x/samples/data/dnn/classification_classes_ILSVRC2012.txt";
120120
</script>
121121

122122
<script id="codeSnippet1" type="text/code-snippet">

doc/js_tutorials/js_assets/js_image_classification_model_info.json

+5-5
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
"std": "1",
77
"swapRB": "false",
88
"needSoftmax": "false",
9-
"labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/master/samples/data/dnn/classification_classes_ILSVRC2012.txt",
9+
"labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/4.x/samples/data/dnn/classification_classes_ILSVRC2012.txt",
1010
"modelUrl": "http://dl.caffe.berkeleyvision.org/bvlc_alexnet.caffemodel",
1111
"configUrl": "https://raw.githubusercontent.com/BVLC/caffe/master/models/bvlc_alexnet/deploy.prototxt"
1212
},
@@ -16,7 +16,7 @@
1616
"std": "0.007843",
1717
"swapRB": "false",
1818
"needSoftmax": "true",
19-
"labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/master/samples/data/dnn/classification_classes_ILSVRC2012.txt",
19+
"labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/4.x/samples/data/dnn/classification_classes_ILSVRC2012.txt",
2020
"modelUrl": "https://drive.google.com/open?id=0B7ubpZO7HnlCcHlfNmJkU2VPelE",
2121
"configUrl": "https://raw.githubusercontent.com/shicai/DenseNet-Caffe/master/DenseNet_121.prototxt"
2222
},
@@ -26,7 +26,7 @@
2626
"std": "1",
2727
"swapRB": "false",
2828
"needSoftmax": "false",
29-
"labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/master/samples/data/dnn/classification_classes_ILSVRC2012.txt",
29+
"labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/4.x/samples/data/dnn/classification_classes_ILSVRC2012.txt",
3030
"modelUrl": "http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel",
3131
"configUrl": "https://raw.githubusercontent.com/BVLC/caffe/master/models/bvlc_googlenet/deploy.prototxt"
3232
},
@@ -36,7 +36,7 @@
3636
"std": "1",
3737
"swapRB": "false",
3838
"needSoftmax": "false",
39-
"labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/master/samples/data/dnn/classification_classes_ILSVRC2012.txt",
39+
"labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/4.x/samples/data/dnn/classification_classes_ILSVRC2012.txt",
4040
"modelUrl": "https://raw.githubusercontent.com/forresti/SqueezeNet/master/SqueezeNet_v1.0/squeezenet_v1.0.caffemodel",
4141
"configUrl": "https://raw.githubusercontent.com/forresti/SqueezeNet/master/SqueezeNet_v1.0/deploy.prototxt"
4242
},
@@ -46,7 +46,7 @@
4646
"std": "1",
4747
"swapRB": "false",
4848
"needSoftmax": "false",
49-
"labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/master/samples/data/dnn/classification_classes_ILSVRC2012.txt",
49+
"labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/4.x/samples/data/dnn/classification_classes_ILSVRC2012.txt",
5050
"modelUrl": "http://www.robots.ox.ac.uk/~vgg/software/very_deep/caffe/VGG_ILSVRC_19_layers.caffemodel",
5151
"configUrl": "https://gist.githubusercontent.com/ksimonyan/3785162f95cd2d5fee77/raw/f02f8769e64494bcd3d7e97d5d747ac275825721/VGG_ILSVRC_19_layers_deploy.prototxt"
5252
}

doc/js_tutorials/js_assets/js_image_classification_webnn_polyfill.html

+1-1
Original file line numberDiff line numberDiff line change
@@ -117,7 +117,7 @@ <h2>Model Info:</h2>
117117
needSoftmax = false;
118118

119119
// url for label file, can from local or Internet
120-
labelsUrl = "https://raw.githubusercontent.com/opencv/opencv/master/samples/data/dnn/classification_classes_ILSVRC2012.txt";
120+
labelsUrl = "https://raw.githubusercontent.com/opencv/opencv/4.x/samples/data/dnn/classification_classes_ILSVRC2012.txt";
121121
</script>
122122

123123
<script id="codeSnippet1" type="text/code-snippet">

doc/js_tutorials/js_assets/js_image_classification_with_camera.html

+1-1
Original file line numberDiff line numberDiff line change
@@ -116,7 +116,7 @@ <h2>Model Info:</h2>
116116
needSoftmax = false;
117117

118118
// url for label file, can from local or Internet
119-
labelsUrl = "https://raw.githubusercontent.com/opencv/opencv/master/samples/data/dnn/classification_classes_ILSVRC2012.txt";
119+
labelsUrl = "https://raw.githubusercontent.com/opencv/opencv/4.x/samples/data/dnn/classification_classes_ILSVRC2012.txt";
120120
</script>
121121

122122
<script id="codeSnippet1" type="text/code-snippet">

doc/js_tutorials/js_assets/js_object_detection.html

+1-1
Original file line numberDiff line numberDiff line change
@@ -94,7 +94,7 @@ <h2>Model Info:</h2>
9494
outType = "SSD";
9595

9696
// url for label file, can from local or Internet
97-
labelsUrl = "https://raw.githubusercontent.com/opencv/opencv/master/samples/data/dnn/object_detection_classes_pascal_voc.txt";
97+
labelsUrl = "https://raw.githubusercontent.com/opencv/opencv/4.x/samples/data/dnn/object_detection_classes_pascal_voc.txt";
9898
</script>
9999

100100
<script id="codeSnippet1" type="text/code-snippet">

doc/js_tutorials/js_assets/js_object_detection_model_info.json

+3-3
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
"std": "0.007843",
88
"swapRB": "false",
99
"outType": "SSD",
10-
"labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/master/samples/data/dnn/object_detection_classes_pascal_voc.txt",
10+
"labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/4.x/samples/data/dnn/object_detection_classes_pascal_voc.txt",
1111
"modelUrl": "https://raw.githubusercontent.com/chuanqi305/MobileNet-SSD/master/mobilenet_iter_73000.caffemodel",
1212
"configUrl": "https://raw.githubusercontent.com/chuanqi305/MobileNet-SSD/master/deploy.prototxt"
1313
},
@@ -18,7 +18,7 @@
1818
"std": "1",
1919
"swapRB": "false",
2020
"outType": "SSD",
21-
"labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/master/samples/data/dnn/object_detection_classes_pascal_voc.txt",
21+
"labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/4.x/samples/data/dnn/object_detection_classes_pascal_voc.txt",
2222
"modelUrl": "https://drive.google.com/uc?id=0BzKzrI_SkD1_WVVTSmQxU0dVRzA&export=download",
2323
"configUrl": "https://drive.google.com/uc?id=0BzKzrI_SkD1_WVVTSmQxU0dVRzA&export=download"
2424
}
@@ -31,7 +31,7 @@
3131
"std": "0.00392",
3232
"swapRB": "false",
3333
"outType": "YOLO",
34-
"labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/master/samples/data/dnn/object_detection_classes_yolov3.txt",
34+
"labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/4.x/samples/data/dnn/object_detection_classes_yolov3.txt",
3535
"modelUrl": "https://pjreddie.com/media/files/yolov2-tiny.weights",
3636
"configUrl": "https://raw.githubusercontent.com/pjreddie/darknet/master/cfg/yolov2-tiny.cfg"
3737
}

doc/js_tutorials/js_assets/js_object_detection_with_camera.html

+1-1
Original file line numberDiff line numberDiff line change
@@ -94,7 +94,7 @@ <h2>Model Info:</h2>
9494
outType = "SSD";
9595

9696
// url for label file, can from local or Internet
97-
labelsUrl = "https://raw.githubusercontent.com/opencv/opencv/master/samples/data/dnn/object_detection_classes_pascal_voc.txt";
97+
labelsUrl = "https://raw.githubusercontent.com/opencv/opencv/4.x/samples/data/dnn/object_detection_classes_pascal_voc.txt";
9898
</script>
9999

100100
<script id="codeSnippet1" type="text/code-snippet">

doc/js_tutorials/js_assets/webnn-electron/js_image_classification_webnn_electron.html

+1-1
Original file line numberDiff line numberDiff line change
@@ -116,7 +116,7 @@ <h2>Model Info:</h2>
116116
needSoftmax = false;
117117

118118
// url for label file, can from local or Internet
119-
labelsUrl = "https://raw.githubusercontent.com/opencv/opencv/master/samples/data/dnn/classification_classes_ILSVRC2012.txt";
119+
labelsUrl = "https://raw.githubusercontent.com/opencv/opencv/4.x/samples/data/dnn/classification_classes_ILSVRC2012.txt";
120120
</script>
121121

122122
<script id="codeSnippet1" type="text/code-snippet">

doc/js_tutorials/js_setup/js_nodejs/js_nodejs.markdown

+1-1
Original file line numberDiff line numberDiff line change
@@ -333,7 +333,7 @@ function installDOM(){
333333
### Execute it ###
334334

335335
- Save the file as `exampleNodeCanvasData.js`.
336-
- Make sure the files `aarcascade_frontalface_default.xml` and `haarcascade_eye.xml` are present in project's directory. They can be obtained from [OpenCV sources](https://github.com/opencv/opencv/tree/master/data/haarcascades).
336+
- Make sure the files `aarcascade_frontalface_default.xml` and `haarcascade_eye.xml` are present in project's directory. They can be obtained from [OpenCV sources](https://github.com/opencv/opencv/tree/4.x/data/haarcascades).
337337
- Make sure a sample image file `lena.jpg` exists in project's directory. It should display people's faces for this example to make sense. The following image is known to work:
338338

339339
![image](lena.jpg)

doc/js_tutorials/js_setup/js_usage/js_usage.markdown

+3-1
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,9 @@ Using OpenCV.js {#tutorial_js_usage}
44
Steps
55
-----
66

7-
In this tutorial, you will learn how to include and start to use `opencv.js` inside a web page. You can get a copy of `opencv.js` from `opencv-{VERSION_NUMBER}-docs.zip` in each [release](https://github.com/opencv/opencv/releases), or simply download the prebuilt script from the online documentations at "https://docs.opencv.org/{VERSION_NUMBER}/opencv.js" (For example, [https://docs.opencv.org/3.4.0/opencv.js](https://docs.opencv.org/3.4.0/opencv.js). Use `master` if you want the latest build). You can also build your own copy by following the tutorial on Build Opencv.js.
7+
In this tutorial, you will learn how to include and start to use `opencv.js` inside a web page.
8+
You can get a copy of `opencv.js` from `opencv-{VERSION_NUMBER}-docs.zip` in each [release](https://github.com/opencv/opencv/releases), or simply download the prebuilt script from the online documentations at "https://docs.opencv.org/{VERSION_NUMBER}/opencv.js" (For example, [https://docs.opencv.org/4.5.0/opencv.js](https://docs.opencv.org/4.5.0/opencv.js). Use `4.x` if you want the latest build).
9+
You can also build your own copy by following the tutorial @ref tutorial_js_setup.
810

911
### Create a web page
1012

doc/py_tutorials/py_ml/py_svm/py_svm_opencv/py_svm_opencv.markdown

+1-1
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ gives us a feature vector containing 64 values. This is the feature vector we us
3636

3737
Finally, as in the previous case, we start by splitting our big dataset into individual cells. For
3838
every digit, 250 cells are reserved for training data and remaining 250 data is reserved for
39-
testing. Full code is given below, you also can download it from [here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/ml/py_svm_opencv/hogsvm.py):
39+
testing. Full code is given below, you also can download it from [here](https://github.com/opencv/opencv/tree/4.x/samples/python/tutorial_code/ml/py_svm_opencv/hogsvm.py):
4040

4141
@include samples/python/tutorial_code/ml/py_svm_opencv/hogsvm.py
4242

doc/tutorials/app/intelperc.markdown

+1-1
Original file line numberDiff line numberDiff line change
@@ -83,5 +83,5 @@ there are two flags that should be used to set/get property of the needed genera
8383
flag value is assumed by default if neither of the two possible values of the property is set.
8484

8585
For more information please refer to the example of usage
86-
[videocapture_realsense.cpp](https://github.com/opencv/opencv/tree/master/samples/cpp/videocapture_realsense.cpp)
86+
[videocapture_realsense.cpp](https://github.com/opencv/opencv/tree/4.x/samples/cpp/videocapture_realsense.cpp)
8787
in opencv/samples/cpp folder.

doc/tutorials/app/kinect_openni.markdown

+1-1
Original file line numberDiff line numberDiff line change
@@ -140,5 +140,5 @@ property. The following properties of cameras available through OpenNI interface
140140
- CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION = CAP_OPENNI_DEPTH_GENERATOR + CAP_PROP_OPENNI_REGISTRATION
141141

142142
For more information please refer to the example of usage
143-
[videocapture_openni.cpp](https://github.com/opencv/opencv/tree/master/samples/cpp/videocapture_openni.cpp) in
143+
[videocapture_openni.cpp](https://github.com/opencv/opencv/tree/4.x/samples/cpp/videocapture_openni.cpp) in
144144
opencv/samples/cpp folder.

doc/tutorials/app/orbbec_astra.markdown

+1-1
Original file line numberDiff line numberDiff line change
@@ -165,5 +165,5 @@ but the depth data makes it easy.
165165
![Depth frame](images/astra_depth.png)
166166

167167
The complete implementation can be found in
168-
[orbbec_astra.cpp](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/videoio/orbbec_astra/orbbec_astra.cpp)
168+
[orbbec_astra.cpp](https://github.com/opencv/opencv/tree/4.x/samples/cpp/tutorial_code/videoio/orbbec_astra/orbbec_astra.cpp)
169169
in `samples/cpp/tutorial_code/videoio` directory.

doc/tutorials/app/trackbar.markdown

+3-3
Original file line numberDiff line numberDiff line change
@@ -37,19 +37,19 @@ Let's modify the program made in the tutorial @ref tutorial_adding_images. We wi
3737

3838
@add_toggle_cpp
3939
This tutorial code's is shown lines below. You can also download it from
40-
[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/HighGUI/AddingImagesTrackbar.cpp)
40+
[here](https://github.com/opencv/opencv/tree/4.x/samples/cpp/tutorial_code/HighGUI/AddingImagesTrackbar.cpp)
4141
@include cpp/tutorial_code/HighGUI/AddingImagesTrackbar.cpp
4242
@end_toggle
4343

4444
@add_toggle_java
4545
This tutorial code's is shown lines below. You can also download it from
46-
[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/highgui/trackbar/AddingImagesTrackbar.java)
46+
[here](https://github.com/opencv/opencv/tree/4.x/samples/java/tutorial_code/highgui/trackbar/AddingImagesTrackbar.java)
4747
@include java/tutorial_code/highgui/trackbar/AddingImagesTrackbar.java
4848
@end_toggle
4949

5050
@add_toggle_python
5151
This tutorial code's is shown lines below. You can also download it from
52-
[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/highgui/trackbar/AddingImagesTrackbar.py)
52+
[here](https://github.com/opencv/opencv/tree/4.x/samples/python/tutorial_code/highgui/trackbar/AddingImagesTrackbar.py)
5353
@include python/tutorial_code/highgui/trackbar/AddingImagesTrackbar.py
5454
@end_toggle
5555

0 commit comments

Comments
 (0)