1
/*M///////////////////////////////////////////////////////////////////////////////////////
3
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
5
// By downloading, copying, installing or using the software you agree to this license.
6
// If you do not agree to this license, do not download, install,
7
// copy or use the software.
11
// For Open Source Computer Vision Library
13
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
15
// Third party copyrights are property of their respective owners.
17
// Redistribution and use in source and binary forms, with or without modification,
18
// are permitted provided that the following conditions are met:
20
// * Redistribution's of source code must retain the above copyright notice,
21
// this list of conditions and the following disclaimer.
23
// * Redistribution's in binary form must reproduce the above copyright notice,
24
// this list of conditions and the following disclaimer in the documentation
25
// and/or other materials provided with the distribution.
27
// * The name of the copyright holders may not be used to endorse or promote products
28
// derived from this software without specific prior written permission.
30
// This software is provided by the copyright holders and contributors "as is" and
31
// any express or implied warranties, including, but not limited to, the implied
32
// warranties of merchantability and fitness for a particular purpose are disclaimed.
33
// In no event shall the Intel Corporation or contributors be liable for any direct,
34
// indirect, incidental, special, exemplary, or consequential damages
35
// (including, but not limited to, procurement of substitute goods or services;
36
// loss of use, data, or profits; or business interruption) however caused
37
// and on any theory of liability, whether in contract, strict liability,
38
// or tort (including negligence or otherwise) arising in any way out of
39
// the use of this software, even if advised of the possibility of such damage.
43
#include "test_precomp.hpp"
47
// http://www.christian-seiler.de/projekte/fpmath/
55
#if defined(__GNUC__) && !defined(__APPLE__) && !defined(__arm__) && !defined(__aarch64__)
56
fpu_control_t fpu_oldcw, fpu_cw;
57
#elif defined(_WIN32) && !defined(_WIN64)
58
unsigned int fpu_oldcw, fpu_cw;
62
FpuControl::FpuControl()
64
#if defined(__GNUC__) && !defined(__APPLE__) && !defined(__arm__) && !defined(__aarch64__)
65
_FPU_GETCW(fpu_oldcw);
66
fpu_cw = (fpu_oldcw & ~_FPU_EXTENDED & ~_FPU_DOUBLE & ~_FPU_SINGLE) | _FPU_SINGLE;
68
#elif defined(_WIN32) && !defined(_WIN64)
69
_controlfp_s(&fpu_cw, 0, 0);
71
_controlfp_s(&fpu_cw, _PC_24, _MCW_PC);
75
FpuControl::~FpuControl()
77
#if defined(__GNUC__) && !defined(__APPLE__) && !defined(__arm__) && !defined(__aarch64__)
78
_FPU_SETCW(fpu_oldcw);
79
#elif defined(_WIN32) && !defined(_WIN64)
80
_controlfp_s(&fpu_cw, fpu_oldcw, _MCW_PC);
85
TestHaarCascadeApplication::TestHaarCascadeApplication(std::string testName_, NCVTestSourceProvider<Ncv8u> &src_,
86
std::string cascadeName_, Ncv32u width_, Ncv32u height_)
88
NCVTestProvider(testName_),
90
cascadeName(cascadeName_),
97
bool TestHaarCascadeApplication::toString(std::ofstream &strOut)
99
strOut << "cascadeName=" << cascadeName << std::endl;
100
strOut << "width=" << width << std::endl;
101
strOut << "height=" << height << std::endl;
106
bool TestHaarCascadeApplication::init()
111
bool TestHaarCascadeApplication::process()
116
Ncv32u numStages, numNodes, numFeatures;
118
ncvStat = ncvHaarGetClassifierSize(this->cascadeName, numStages, numNodes, numFeatures);
119
ncvAssertReturn(ncvStat == NCV_SUCCESS, false);
121
NCVVectorAlloc<HaarStage64> h_HaarStages(*this->allocatorCPU.get(), numStages);
122
ncvAssertReturn(h_HaarStages.isMemAllocated(), false);
123
NCVVectorAlloc<HaarClassifierNode128> h_HaarNodes(*this->allocatorCPU.get(), numNodes);
124
ncvAssertReturn(h_HaarNodes.isMemAllocated(), false);
125
NCVVectorAlloc<HaarFeature64> h_HaarFeatures(*this->allocatorCPU.get(), numFeatures);
126
ncvAssertReturn(h_HaarFeatures.isMemAllocated(), false);
128
NCVVectorAlloc<HaarStage64> d_HaarStages(*this->allocatorGPU.get(), numStages);
129
ncvAssertReturn(d_HaarStages.isMemAllocated(), false);
130
NCVVectorAlloc<HaarClassifierNode128> d_HaarNodes(*this->allocatorGPU.get(), numNodes);
131
ncvAssertReturn(d_HaarNodes.isMemAllocated(), false);
132
NCVVectorAlloc<HaarFeature64> d_HaarFeatures(*this->allocatorGPU.get(), numFeatures);
133
ncvAssertReturn(d_HaarFeatures.isMemAllocated(), false);
135
HaarClassifierCascadeDescriptor haar;
136
haar.ClassifierSize.width = haar.ClassifierSize.height = 1;
137
haar.bNeedsTiltedII = false;
138
haar.NumClassifierRootNodes = numNodes;
139
haar.NumClassifierTotalNodes = numNodes;
140
haar.NumFeatures = numFeatures;
141
haar.NumStages = numStages;
143
NCV_SET_SKIP_COND(this->allocatorGPU.get()->isCounting());
146
ncvStat = ncvHaarLoadFromFile_host(this->cascadeName, haar, h_HaarStages, h_HaarNodes, h_HaarFeatures);
147
ncvAssertReturn(ncvStat == NCV_SUCCESS, false);
149
ncvAssertReturn(NCV_SUCCESS == h_HaarStages.copySolid(d_HaarStages, 0), false);
150
ncvAssertReturn(NCV_SUCCESS == h_HaarNodes.copySolid(d_HaarNodes, 0), false);
151
ncvAssertReturn(NCV_SUCCESS == h_HaarFeatures.copySolid(d_HaarFeatures, 0), false);
152
ncvAssertCUDAReturn(cudaStreamSynchronize(0), false);
156
NcvSize32s srcRoi, srcIIRoi, searchRoi;
157
srcRoi.width = this->width;
158
srcRoi.height = this->height;
159
srcIIRoi.width = srcRoi.width + 1;
160
srcIIRoi.height = srcRoi.height + 1;
161
searchRoi.width = srcIIRoi.width - haar.ClassifierSize.width;
162
searchRoi.height = srcIIRoi.height - haar.ClassifierSize.height;
163
if (searchRoi.width <= 0 || searchRoi.height <= 0)
167
NcvSize32u searchRoiU(searchRoi.width, searchRoi.height);
169
NCVMatrixAlloc<Ncv8u> d_img(*this->allocatorGPU.get(), this->width, this->height);
170
ncvAssertReturn(d_img.isMemAllocated(), false);
171
NCVMatrixAlloc<Ncv8u> h_img(*this->allocatorCPU.get(), this->width, this->height);
172
ncvAssertReturn(h_img.isMemAllocated(), false);
174
Ncv32u integralWidth = this->width + 1;
175
Ncv32u integralHeight = this->height + 1;
177
NCVMatrixAlloc<Ncv32u> d_integralImage(*this->allocatorGPU.get(), integralWidth, integralHeight);
178
ncvAssertReturn(d_integralImage.isMemAllocated(), false);
179
NCVMatrixAlloc<Ncv64u> d_sqIntegralImage(*this->allocatorGPU.get(), integralWidth, integralHeight);
180
ncvAssertReturn(d_sqIntegralImage.isMemAllocated(), false);
181
NCVMatrixAlloc<Ncv32u> h_integralImage(*this->allocatorCPU.get(), integralWidth, integralHeight);
182
ncvAssertReturn(h_integralImage.isMemAllocated(), false);
183
NCVMatrixAlloc<Ncv64u> h_sqIntegralImage(*this->allocatorCPU.get(), integralWidth, integralHeight);
184
ncvAssertReturn(h_sqIntegralImage.isMemAllocated(), false);
186
NCVMatrixAlloc<Ncv32f> d_rectStdDev(*this->allocatorGPU.get(), this->width, this->height);
187
ncvAssertReturn(d_rectStdDev.isMemAllocated(), false);
188
NCVMatrixAlloc<Ncv32u> d_pixelMask(*this->allocatorGPU.get(), this->width, this->height);
189
ncvAssertReturn(d_pixelMask.isMemAllocated(), false);
190
NCVMatrixAlloc<Ncv32f> h_rectStdDev(*this->allocatorCPU.get(), this->width, this->height);
191
ncvAssertReturn(h_rectStdDev.isMemAllocated(), false);
192
NCVMatrixAlloc<Ncv32u> h_pixelMask(*this->allocatorCPU.get(), this->width, this->height);
193
ncvAssertReturn(h_pixelMask.isMemAllocated(), false);
195
NCVVectorAlloc<NcvRect32u> d_hypotheses(*this->allocatorGPU.get(), this->width * this->height);
196
ncvAssertReturn(d_hypotheses.isMemAllocated(), false);
197
NCVVectorAlloc<NcvRect32u> h_hypotheses(*this->allocatorCPU.get(), this->width * this->height);
198
ncvAssertReturn(h_hypotheses.isMemAllocated(), false);
201
Ncv32u szTmpBufIntegral, szTmpBufSqIntegral;
202
nppStat = nppiStIntegralGetSize_8u32u(NcvSize32u(this->width, this->height), &szTmpBufIntegral, this->devProp);
203
ncvAssertReturn(nppStat == NPPST_SUCCESS, false);
204
nppStat = nppiStSqrIntegralGetSize_8u64u(NcvSize32u(this->width, this->height), &szTmpBufSqIntegral, this->devProp);
205
ncvAssertReturn(nppStat == NPPST_SUCCESS, false);
206
NCVVectorAlloc<Ncv8u> d_tmpIIbuf(*this->allocatorGPU.get(), std::max(szTmpBufIntegral, szTmpBufSqIntegral));
207
ncvAssertReturn(d_tmpIIbuf.isMemAllocated(), false);
209
Ncv32u detectionsOnThisScale_d = 0;
210
Ncv32u detectionsOnThisScale_h = 0;
214
ncvAssertReturn(this->src.fill(h_img), false);
215
ncvStat = h_img.copySolid(d_img, 0);
216
ncvAssertReturn(ncvStat == NCV_SUCCESS, false);
217
ncvAssertCUDAReturn(cudaStreamSynchronize(0), false);
219
nppStat = nppiStIntegral_8u32u_C1R(d_img.ptr(), d_img.pitch(),
220
d_integralImage.ptr(), d_integralImage.pitch(),
221
NcvSize32u(d_img.width(), d_img.height()),
222
d_tmpIIbuf.ptr(), szTmpBufIntegral, this->devProp);
223
ncvAssertReturn(nppStat == NPPST_SUCCESS, false);
225
nppStat = nppiStSqrIntegral_8u64u_C1R(d_img.ptr(), d_img.pitch(),
226
d_sqIntegralImage.ptr(), d_sqIntegralImage.pitch(),
227
NcvSize32u(d_img.width(), d_img.height()),
228
d_tmpIIbuf.ptr(), szTmpBufSqIntegral, this->devProp);
229
ncvAssertReturn(nppStat == NPPST_SUCCESS, false);
231
const NcvRect32u rect(
234
haar.ClassifierSize.width - 2*HAAR_STDDEV_BORDER,
235
haar.ClassifierSize.height - 2*HAAR_STDDEV_BORDER);
236
nppStat = nppiStRectStdDev_32f_C1R(
237
d_integralImage.ptr(), d_integralImage.pitch(),
238
d_sqIntegralImage.ptr(), d_sqIntegralImage.pitch(),
239
d_rectStdDev.ptr(), d_rectStdDev.pitch(),
240
NcvSize32u(searchRoi.width, searchRoi.height), rect,
242
ncvAssertReturn(nppStat == NPPST_SUCCESS, false);
244
ncvStat = d_integralImage.copySolid(h_integralImage, 0);
245
ncvAssertReturn(ncvStat == NCV_SUCCESS, false);
246
ncvStat = d_rectStdDev.copySolid(h_rectStdDev, 0);
247
ncvAssertReturn(ncvStat == NCV_SUCCESS, false);
249
for (Ncv32u i=0; i<searchRoiU.height; i++)
251
for (Ncv32u j=0; j<h_pixelMask.stride(); j++)
253
if (j<searchRoiU.width)
255
h_pixelMask.ptr()[i*h_pixelMask.stride()+j] = (i << 16) | j;
259
h_pixelMask.ptr()[i*h_pixelMask.stride()+j] = OBJDET_MASK_ELEMENT_INVALID_32U;
263
ncvAssertReturn(cudaSuccess == cudaStreamSynchronize(0), false);
270
ncvStat = ncvApplyHaarClassifierCascade_host(
271
h_integralImage, h_rectStdDev, h_pixelMask,
272
detectionsOnThisScale_h,
273
haar, h_HaarStages, h_HaarNodes, h_HaarFeatures, false,
274
searchRoiU, 1, 1.0f);
275
ncvAssertReturn(ncvStat == NCV_SUCCESS, false);
281
ncvAssertCUDAReturn(cudaGetDevice(&devId), false);
282
cudaDeviceProp _devProp;
283
ncvAssertCUDAReturn(cudaGetDeviceProperties(&_devProp, devId), false);
285
ncvStat = ncvApplyHaarClassifierCascade_device(
286
d_integralImage, d_rectStdDev, d_pixelMask,
287
detectionsOnThisScale_d,
288
haar, h_HaarStages, d_HaarStages, d_HaarNodes, d_HaarFeatures, false,
290
*this->allocatorGPU.get(), *this->allocatorCPU.get(),
292
ncvAssertReturn(ncvStat == NCV_SUCCESS, false);
294
NCVMatrixAlloc<Ncv32u> h_pixelMask_d(*this->allocatorCPU.get(), this->width, this->height);
295
ncvAssertReturn(h_pixelMask_d.isMemAllocated(), false);
298
bool bLoopVirgin = true;
302
ncvStat = d_pixelMask.copySolid(h_pixelMask_d, 0);
303
ncvAssertReturn(ncvStat == NCV_SUCCESS, false);
305
if (detectionsOnThisScale_d != detectionsOnThisScale_h)
311
std::sort(h_pixelMask_d.ptr(), h_pixelMask_d.ptr() + detectionsOnThisScale_d);
312
for (Ncv32u i=0; i<detectionsOnThisScale_d && bLoopVirgin; i++)
314
if (h_pixelMask.ptr()[i] != h_pixelMask_d.ptr()[i])
332
bool TestHaarCascadeApplication::deinit()