1
by Ilia Platone
Initial commit |
1 |
// Copyright (C) 2018-2019 Intel Corporation
|
2 |
// SPDX-License-Identifier: Apache-2.0
|
|
3 |
//
|
|
4 |
||
5 |
#include "ext_list.hpp" |
|
6 |
#include "ext_base.hpp" |
|
7 |
||
8 |
#include <vector> |
|
9 |
||
10 |
namespace InferenceEngine { |
|
11 |
namespace Extensions { |
|
12 |
namespace Cpu { |
|
13 |
||
14 |
class OneHotImpl: public ExtLayerBase { |
|
15 |
public: |
|
16 |
explicit OneHotImpl(const CNNLayer* layer) { |
|
17 |
try { |
|
18 |
depth = layer->GetParamAsUInt("depth"); |
|
19 |
on_value = layer->GetParamAsFloat("on_value", 1.0f); |
|
20 |
off_value = layer->GetParamAsFloat("off_value", 0.0f); |
|
21 |
axis = layer->GetParamAsInt("axis", -1); |
|
22 |
||
23 |
src_dims = layer->insData[0].lock()->getTensorDesc().getDims(); |
|
24 |
dst_dims = layer->outData[0]->getTensorDesc().getDims(); |
|
25 |
||
26 |
int output_dims_size = dst_dims.size(); |
|
27 |
if (layer->CheckParamPresence("axis") && |
|
28 |
(-1 > axis || axis >= output_dims_size)) { |
|
29 |
THROW_IE_EXCEPTION << "The value of " << layer->name << " layer axis parameter must be between -1 <= axis < "\ |
|
30 |
<< output_dims_size << ", but actually it is " << axis; |
|
31 |
}
|
|
32 |
||
33 |
if (!( ((1 + src_dims.size()) == dst_dims.size()) || |
|
34 |
(src_dims.size() == 1 && dst_dims.size() == 1 && dst_dims[0] == depth && src_dims[0] == 1))) |
|
35 |
THROW_IE_EXCEPTION << layer->name << " Incorrect number of input/output dimensions!"; |
|
36 |
||
37 |
addConfig(layer, { DataConfigurator(ConfLayout::PLN) }, { DataConfigurator(ConfLayout::PLN) }); |
|
38 |
} catch (InferenceEngine::details::InferenceEngineException &ex) { |
|
39 |
errorMsg = ex.what(); |
|
40 |
}
|
|
41 |
}
|
|
42 |
||
43 |
StatusCode execute(std::vector<Blob::Ptr>& inputs, std::vector<Blob::Ptr>& outputs, ResponseDesc *resp) noexcept override { |
|
44 |
const auto *src_data = inputs[0]->cbuffer().as<const float *>(); |
|
45 |
auto *dst_data = outputs[0]->buffer().as<float *>(); |
|
46 |
std::size_t prefix_size = 1; |
|
47 |
auto input_dims = inputs[0]->getTensorDesc().getDims(); |
|
48 |
||
49 |
std::size_t actual_axis = (axis == -1) ? src_dims.size() : axis; |
|
50 |
for (size_t i = 0; i < actual_axis; ++i) |
|
51 |
prefix_size *= input_dims[i]; |
|
52 |
||
53 |
std::size_t suffix_size = inputs[0]->size() / prefix_size; |
|
54 |
||
55 |
std::size_t dst_offset = 0; |
|
56 |
for (std::size_t prefix_idx = 0; prefix_idx < prefix_size; ++prefix_idx) { |
|
57 |
for (std::size_t depth_idx = 0; depth_idx < depth; ++depth_idx) { |
|
58 |
for (std::size_t suffix_idx = 0; suffix_idx < suffix_size; suffix_idx++) { |
|
59 |
auto src_index = prefix_idx * suffix_size + suffix_idx; |
|
60 |
std::size_t v = static_cast<std::size_t>(src_data[src_index]); |
|
61 |
dst_data[dst_offset++] = (v == depth_idx) ? on_value : off_value; |
|
62 |
}
|
|
63 |
}
|
|
64 |
}
|
|
65 |
return OK; |
|
66 |
}
|
|
67 |
||
68 |
private: |
|
69 |
uint32_t depth; |
|
70 |
float on_value = 1.f; |
|
71 |
float off_value = 0.f; |
|
72 |
int32_t axis = -1; |
|
73 |
SizeVector src_dims; |
|
74 |
SizeVector dst_dims; |
|
75 |
};
|
|
76 |
||
77 |
REG_FACTORY_FOR(ImplFactory<OneHotImpl>, OneHot); |
|
78 |
||
79 |
} // namespace Cpu |
|
80 |
} // namespace Extensions |
|
81 |
} // namespace InferenceEngine |