1
/*M///////////////////////////////////////////////////////////////////////////////////////
3
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
5
// By downloading, copying, installing or using the software you agree to this license.
6
// If you do not agree to this license, do not download, install,
7
// copy or use the software.
11
// For Open Source Computer Vision Library
13
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
15
// Third party copyrights are property of their respective owners.
17
// Redistribution and use in source and binary forms, with or without modification,
18
// are permitted provided that the following conditions are met:
20
// * Redistribution's of source code must retain the above copyright notice,
21
// this list of conditions and the following disclaimer.
23
// * Redistribution's in binary form must reproduce the above copyright notice,
24
// this list of conditions and the following disclaimer in the documentation
25
// and/or other materials provided with the distribution.
27
// * The name of the copyright holders may not be used to endorse or promote products
28
// derived from this software without specific prior written permission.
30
// This software is provided by the copyright holders and contributors "as is" and
31
// any express or implied warranties, including, but not limited to, the implied
32
// warranties of merchantability and fitness for a particular purpose are disclaimed.
33
// In no event shall the Intel Corporation or contributors be liable for any direct,
34
// indirect, incidental, special, exemplary, or consequential damages
35
// (including, but not limited to, procurement of substitute goods or services;
36
// loss of use, data, or profits; or business interruption) however caused
37
// and on any theory of liability, whether in contract, strict liability,
38
// or tort (including negligence or otherwise) arising in any way out of
39
// the use of this software, even if advised of the possibility of such damage.
43
#include "precomp.hpp"
44
#include "opencv2/core/hal/intrin.hpp"
61
static inline void spatialGradientKernel( T& vx, T& vy,
62
const T& v00, const T& v01, const T& v02,
63
const T& v10, const T& v12,
64
const T& v20, const T& v21, const T& v22 )
66
// vx = (v22 - v00) + (v02 - v20) + 2 * (v12 - v10)
67
// vy = (v22 - v00) + (v20 - v02) + 2 * (v21 - v01)
69
T tmp_add = v22 - v00,
74
vx = tmp_add + tmp_sub + tmp_x + tmp_x;
75
vy = tmp_add - tmp_sub + tmp_y + tmp_y;
78
void spatialGradient( InputArray _src, OutputArray _dx, OutputArray _dy,
79
int ksize, int borderType )
82
// Prepare InputArray src
83
Mat src = _src.getMat();
84
CV_Assert( !src.empty() );
85
CV_Assert( src.type() == CV_8UC1 );
86
CV_Assert( borderType == BORDER_DEFAULT || borderType == BORDER_REPLICATE );
88
// Prepare OutputArrays dx, dy
89
_dx.create( src.size(), CV_16SC1 );
90
_dy.create( src.size(), CV_16SC1 );
91
Mat dx = _dx.getMat(),
94
// TODO: Allow for other kernel sizes
95
CV_Assert(ksize == 3);
98
const int H = src.rows,
101
// Row, column indices
105
// Handle border types
106
int i_top = 0, // Case for H == 1 && W == 1 && BORDER_REPLICATE
108
j_offl = 0, // j offset from 0th pixel to reach -1st pixel
109
j_offr = 0; // j offset from W-1th pixel to reach Wth pixel
111
if ( borderType == BORDER_DEFAULT ) // Equiv. to BORDER_REFLECT_101
125
// Pointer to row vectors
126
uchar *p_src, *c_src, *n_src; // previous, current, next row
131
#if CV_SIMD128 && CV_SSE2
135
// Characters in variable names have the following meanings:
143
// Example: umn is offset -1 in row and offset 0 in column
144
for ( i = 0; i < H - 1; i += 2 )
146
if ( i == 0 ) p_src = src.ptr<uchar>(i_top);
147
else p_src = src.ptr<uchar>(i-1);
149
c_src = src.ptr<uchar>(i);
150
n_src = src.ptr<uchar>(i+1);
152
if ( i == H - 2 ) m_src = src.ptr<uchar>(i_bottom);
153
else m_src = src.ptr<uchar>(i+2);
155
c_dx = dx.ptr<short>(i);
156
c_dy = dy.ptr<short>(i);
157
n_dx = dx.ptr<short>(i+1);
158
n_dy = dy.ptr<short>(i+1);
160
v_uint8x16 v_select_m = v_uint8x16(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
163
// Process rest of columns 16-column chunks at a time
164
for ( j = 1; j < W - 16; j += 16 )
166
// Load top row for 3x3 Sobel filter
167
v_uint8x16 v_um = v_load(&p_src[j-1]);
168
v_uint8x16 v_up = v_load(&p_src[j+1]);
169
// TODO: Replace _mm_slli_si128 with hal method
170
v_uint8x16 v_un = v_select(v_select_m, v_uint8x16(_mm_slli_si128(v_up.val, 1)),
171
v_uint8x16(_mm_srli_si128(v_um.val, 1)));
172
v_uint16x8 v_um1, v_um2, v_un1, v_un2, v_up1, v_up2;
173
v_expand(v_um, v_um1, v_um2);
174
v_expand(v_un, v_un1, v_un2);
175
v_expand(v_up, v_up1, v_up2);
176
v_int16x8 v_s1m1 = v_reinterpret_as_s16(v_um1);
177
v_int16x8 v_s1m2 = v_reinterpret_as_s16(v_um2);
178
v_int16x8 v_s1n1 = v_reinterpret_as_s16(v_un1);
179
v_int16x8 v_s1n2 = v_reinterpret_as_s16(v_un2);
180
v_int16x8 v_s1p1 = v_reinterpret_as_s16(v_up1);
181
v_int16x8 v_s1p2 = v_reinterpret_as_s16(v_up2);
183
// Load second row for 3x3 Sobel filter
184
v_um = v_load(&c_src[j-1]);
185
v_up = v_load(&c_src[j+1]);
186
// TODO: Replace _mm_slli_si128 with hal method
187
v_un = v_select(v_select_m, v_uint8x16(_mm_slli_si128(v_up.val, 1)),
188
v_uint8x16(_mm_srli_si128(v_um.val, 1)));
189
v_expand(v_um, v_um1, v_um2);
190
v_expand(v_un, v_un1, v_un2);
191
v_expand(v_up, v_up1, v_up2);
192
v_int16x8 v_s2m1 = v_reinterpret_as_s16(v_um1);
193
v_int16x8 v_s2m2 = v_reinterpret_as_s16(v_um2);
194
v_int16x8 v_s2n1 = v_reinterpret_as_s16(v_un1);
195
v_int16x8 v_s2n2 = v_reinterpret_as_s16(v_un2);
196
v_int16x8 v_s2p1 = v_reinterpret_as_s16(v_up1);
197
v_int16x8 v_s2p2 = v_reinterpret_as_s16(v_up2);
199
// Load third row for 3x3 Sobel filter
200
v_um = v_load(&n_src[j-1]);
201
v_up = v_load(&n_src[j+1]);
202
// TODO: Replace _mm_slli_si128 with hal method
203
v_un = v_select(v_select_m, v_uint8x16(_mm_slli_si128(v_up.val, 1)),
204
v_uint8x16(_mm_srli_si128(v_um.val, 1)));
205
v_expand(v_um, v_um1, v_um2);
206
v_expand(v_un, v_un1, v_un2);
207
v_expand(v_up, v_up1, v_up2);
208
v_int16x8 v_s3m1 = v_reinterpret_as_s16(v_um1);
209
v_int16x8 v_s3m2 = v_reinterpret_as_s16(v_um2);
210
v_int16x8 v_s3n1 = v_reinterpret_as_s16(v_un1);
211
v_int16x8 v_s3n2 = v_reinterpret_as_s16(v_un2);
212
v_int16x8 v_s3p1 = v_reinterpret_as_s16(v_up1);
213
v_int16x8 v_s3p2 = v_reinterpret_as_s16(v_up2);
215
// dx & dy for rows 1, 2, 3
216
v_int16x8 v_sdx1, v_sdy1;
217
spatialGradientKernel<v_int16x8>( v_sdx1, v_sdy1,
218
v_s1m1, v_s1n1, v_s1p1,
220
v_s3m1, v_s3n1, v_s3p1 );
222
v_int16x8 v_sdx2, v_sdy2;
223
spatialGradientKernel<v_int16x8>( v_sdx2, v_sdy2,
224
v_s1m2, v_s1n2, v_s1p2,
226
v_s3m2, v_s3n2, v_s3p2 );
229
v_store(&c_dx[j], v_sdx1);
230
v_store(&c_dx[j+8], v_sdx2);
231
v_store(&c_dy[j], v_sdy1);
232
v_store(&c_dy[j+8], v_sdy2);
234
// Load fourth row for 3x3 Sobel filter
235
v_um = v_load(&m_src[j-1]);
236
v_up = v_load(&m_src[j+1]);
237
// TODO: Replace _mm_slli_si128 with hal method
238
v_un = v_select(v_select_m, v_uint8x16(_mm_slli_si128(v_up.val, 1)),
239
v_uint8x16(_mm_srli_si128(v_um.val, 1)));
240
v_expand(v_um, v_um1, v_um2);
241
v_expand(v_un, v_un1, v_un2);
242
v_expand(v_up, v_up1, v_up2);
243
v_int16x8 v_s4m1 = v_reinterpret_as_s16(v_um1);
244
v_int16x8 v_s4m2 = v_reinterpret_as_s16(v_um2);
245
v_int16x8 v_s4n1 = v_reinterpret_as_s16(v_un1);
246
v_int16x8 v_s4n2 = v_reinterpret_as_s16(v_un2);
247
v_int16x8 v_s4p1 = v_reinterpret_as_s16(v_up1);
248
v_int16x8 v_s4p2 = v_reinterpret_as_s16(v_up2);
250
// dx & dy for rows 2, 3, 4
251
spatialGradientKernel<v_int16x8>( v_sdx1, v_sdy1,
252
v_s2m1, v_s2n1, v_s2p1,
254
v_s4m1, v_s4n1, v_s4p1 );
256
spatialGradientKernel<v_int16x8>( v_sdx2, v_sdy2,
257
v_s2m2, v_s2n2, v_s2p2,
259
v_s4m2, v_s4n2, v_s4p2 );
262
v_store(&n_dx[j], v_sdx1);
263
v_store(&n_dx[j+8], v_sdx2);
264
v_store(&n_dy[j], v_sdy1);
265
v_store(&n_dy[j+8], v_sdy2);
272
uchar v00, v01, v02, v10, v11, v12, v20, v21, v22;
273
for ( i = 0; i < H; i++ )
275
if ( i == 0 ) p_src = src.ptr<uchar>(i_top);
276
else p_src = src.ptr<uchar>(i-1);
278
c_src = src.ptr<uchar>(i);
280
if ( i == H - 1 ) n_src = src.ptr<uchar>(i_bottom);
281
else n_src = src.ptr<uchar>(i+1);
283
c_dx = dx.ptr<short>(i);
284
c_dy = dy.ptr<short>(i);
286
// Process left-most column
290
if ( j_n >= W ) j_n = j + j_offr;
291
v00 = p_src[j_p]; v01 = p_src[j]; v02 = p_src[j_n];
292
v10 = c_src[j_p]; v11 = c_src[j]; v12 = c_src[j_n];
293
v20 = n_src[j_p]; v21 = n_src[j]; v22 = n_src[j_n];
294
spatialGradientKernel<short>( c_dx[0], c_dy[0], v00, v01, v02, v10,
295
v12, v20, v21, v22 );
296
v00 = v01; v10 = v11; v20 = v21;
297
v01 = v02; v11 = v12; v21 = v22;
299
// Process middle columns
300
j = i >= i_start ? 1 : j_start;
302
v00 = p_src[j_p]; v01 = p_src[j];
303
v10 = c_src[j_p]; v11 = c_src[j];
304
v20 = n_src[j_p]; v21 = n_src[j];
306
for ( ; j < W - 1; j++ )
308
// Get values for next column
309
j_n = j + 1; v02 = p_src[j_n]; v12 = c_src[j_n]; v22 = n_src[j_n];
310
spatialGradientKernel<short>( c_dx[j], c_dy[j], v00, v01, v02, v10,
311
v12, v20, v21, v22 );
313
// Move values back one column for next iteration
314
v00 = v01; v10 = v11; v20 = v21;
315
v01 = v02; v11 = v12; v21 = v22;
318
// Process right-most column
321
j_n = j + j_offr; v02 = p_src[j_n]; v12 = c_src[j_n]; v22 = n_src[j_n];
322
spatialGradientKernel<short>( c_dx[j], c_dy[j], v00, v01, v02, v10,
323
v12, v20, v21, v22 );