20#include "ir/DataType.h"
32 const auto tensor_shape = shape.rank() == 0 ?
ir::Shape{1} : shape;
34 const uint32_t rank = tensor_shape.
rank();
36 ::arm_compute::TensorShape res{};
38 res.set_num_dimensions(rank);
40 for (uint32_t axis = 0; axis < rank; ++axis)
49 res.set(
ToARMComputeAxis(rank, axis).value(), tensor_shape.
dim(axis), apply_dim_correction);
57 const uint32_t rank = coord.
size();
59 ::arm_compute::Coordinates res{};
61 res.set_num_dimensions(rank);
63 for (uint32_t axis = 0; axis < rank; ++axis)
75 case ir::DataType::FLOAT32:
76 return ::arm_compute::DataType::F32;
77 case ir::DataType::INT32:
78 return ::arm_compute::DataType::S32;
79 case ir::DataType::UINT32:
80 return ::arm_compute::DataType::U32;
81 case ir::DataType::QUANT_UINT8_ASYMM:
82 return ::arm_compute::DataType::QASYMM8;
83 case ir::DataType::BOOL8:
84 case ir::DataType::UINT8:
85 return ::arm_compute::DataType::U8;
86 case ir::DataType::QUANT_INT8_SYMM:
87 return ::arm_compute::DataType::QSYMM8;
88 case ir::DataType::QUANT_INT8_ASYMM:
89 return ::arm_compute::DataType::QASYMM8_SIGNED;
90 case ir::DataType::FLOAT16:
91 return ::arm_compute::DataType::F16;
92 case ir::DataType::INT64:
93 return ::arm_compute::DataType::S64;
94 case ir::DataType::QUANT_INT16_ASYMM:
95 return ::arm_compute::DataType::QASYMM16;
96 case ir::DataType::QUANT_INT8_SYMM_PER_CHANNEL:
97 return ::arm_compute::DataType::QSYMM8_PER_CHANNEL;
99 throw std::runtime_error(
"Not supported internal data type, yet");
106 return ::arm_compute::QuantizationInfo(scale,
offset);
110 bool apply_dim_correction)
115 info.set_data_layout(::arm_compute::DataLayout::NHWC);
122 return ::arm_compute::PadStrideInfo{stride.
horizontal,
128 ::arm_compute::DimensionRoundingType::FLOOR};
136 return ::arm_compute::ActivationLayerInfo{};
138 return ::arm_compute::ActivationLayerInfo{
139 ::arm_compute::ActivationLayerInfo::ActivationFunction::RELU};
141 return ::arm_compute::ActivationLayerInfo{
142 ::arm_compute::ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 1.0f, -1.0f};
144 return ::arm_compute::ActivationLayerInfo{
145 ::arm_compute::ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.0f, 0.0f};
148 return ::arm_compute::ActivationLayerInfo{
149 ::arm_compute::ActivationLayerInfo::ActivationFunction::TANH, 1.0f, 1.0f};
155 return ::arm_compute::ActivationLayerInfo{
156 ::arm_compute::ActivationLayerInfo::ActivationFunction::LOGISTIC, 0.0f, 0.0f};
158 throw std::runtime_error{
"Not supported internal activation, yet"};
163::arm_compute::ActivationLayerInfo
174 return ::arm_compute::ActivationLayerInfo{
175 ::arm_compute::ActivationLayerInfo::ActivationFunction::RELU};
179 return ::arm_compute::ActivationLayerInfo{
180 ::arm_compute::ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, alpha};
185 return ::arm_compute::ActivationLayerInfo{
186 ::arm_compute::ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, alpha, beta};
189 return ::arm_compute::ActivationLayerInfo{
190 ::arm_compute::ActivationLayerInfo::ActivationFunction::TANH, alpha, beta};
196 return ::arm_compute::ActivationLayerInfo{
197 ::arm_compute::ActivationLayerInfo::ActivationFunction::LOGISTIC};
199 return ::arm_compute::ActivationLayerInfo{
200 ::arm_compute::ActivationLayerInfo::ActivationFunction::LEAKY_RELU, alpha};
202 throw std::runtime_error{
"Not supported internal elementwise activation, yet"};
209 std::set<uint32_t> axes =
asSet(operand, rank);
211 arm_compute::Coordinates reduce_axes;
212 for (
const int32_t axis : axes)
214 reduce_axes.set(reduce_axes.num_dimensions(), axis);
222 std::set<std::uint32_t> axes;
224 for (
size_t i = 0; i < operand.
shape().num_elements(); ++i)
229 case ir::DataType::INT32:
230 axis =
reinterpret_cast<const int32_t *
>(operand.
data()->base())[i];
232 case ir::DataType::INT64:
233 axis =
reinterpret_cast<const int64_t *
>(operand.
data()->base())[i];
236 throw std::runtime_error(
"acl_common::asSet: Not supported data type");
246std::unique_ptr<AclFunction>
asAclFunction(std::unique_ptr<::arm_compute::IFunction> &&layer)
248 return std::make_unique<AclFunction>(std::move(layer));
246std::unique_ptr<AclFunction>
asAclFunction(std::unique_ptr<::arm_compute::IFunction> &&layer) {
…}
255 case ::arm_compute::DataType::F32:
256 return ir::DataType::FLOAT32;
257 case ::arm_compute::DataType::S32:
258 return ir::DataType::INT32;
259 case ::arm_compute::DataType::U32:
260 return ir::DataType::UINT32;
261 case ::arm_compute::DataType::QASYMM8:
262 return ir::DataType::QUANT_UINT8_ASYMM;
263 case ::arm_compute::DataType::QASYMM8_SIGNED:
264 return ir::DataType::QUANT_INT8_ASYMM;
265 case ::arm_compute::DataType::U8:
266 return ir::DataType::UINT8;
267 case ::arm_compute::DataType::QSYMM8:
268 return ir::DataType::QUANT_INT8_SYMM;
269 case ::arm_compute::DataType::F16:
270 return ir::DataType::FLOAT16;
271 case ::arm_compute::DataType::S64:
272 return ir::DataType::INT64;
274 throw std::runtime_error{
"Not supported acl data type, yet"};
281 switch (pool_type_ir)
284 return arm_compute::PoolingType::AVG;
286 return arm_compute::PoolingType::L2;
288 return arm_compute::PoolingType::MAX;
290 throw std::runtime_error(
"convertPoolType: Not supported operation yet");
296 switch (reduce_type_ir)
299 return arm_compute::ReductionOperation::MAX;
301 return arm_compute::ReductionOperation::MIN;
303 return arm_compute::ReductionOperation::SUM;
305 throw std::runtime_error(
"convertReduceType: Not supported operation yet");
312 assert(operand.
shape().num_elements() == 1);
315 case ir::DataType::INT32:
316 return arm_compute::PixelValue(operand.
asScalar<int32_t>());
317 case ir::DataType::INT64:
318 return arm_compute::PixelValue(operand.
asScalar<int64_t>());
319 case ir::DataType::UINT32:
320 return arm_compute::PixelValue(operand.
asScalar<uint64_t>());
321 case ir::DataType::UINT8:
322 return arm_compute::PixelValue(operand.
asScalar<uint8_t>());
323 case ir::DataType::FLOAT32:
324 return arm_compute::PixelValue(operand.
asScalar<
float>());
326 throw std::runtime_error(
"asPixelValue : Not supported datatype yet");
330arm_compute::Size2D
asDilation(uint32_t dilation_width, uint32_t dilation_height)
332 assert(dilation_width != 0);
333 assert(dilation_height != 0);
335 return arm_compute::Size2D(dilation_width, dilation_height);
330arm_compute::Size2D
asDilation(uint32_t dilation_width, uint32_t dilation_height) {
…}
const Dimension & dim(uint32_t axis) const
uint32_t rank(void) const
Class to represent position(offset) of tensor. Assume that the front is higher dimensional....
size_t size() const
Return size of coordinates.
const TypeInfo & typeInfo(void) const
const Shape & shape(void) const
void data(std::shared_ptr< Data > &&data)
bool isConstant(void) const
Get true if Operand is const, otherwise false a.
int32_t zero_point() const
__global uchar * offset(const Image *img, int x, int y)
volatile const char info[]
arm_compute::PoolingType convertPoolType(ir::operation::Pool2D::PoolType pool_type_ir)
ARMComputeAxis ToARMComputeAxis(uint32_t rank, uint32_t axis)
::arm_compute::Coordinates asTensorCoordinate(const ir::Coordinates &coord)
std::set< uint32_t > asSet(const ir::Operand &operand, int32_t rank)
::arm_compute::ActivationLayerInfo asActivationLayerInfo(const ir::Activation act_code)
arm_compute::ReductionOperation convertReduceType(ir::operation::Reduce::ReduceType reduce_type_ir)
arm_compute::Coordinates asCoordinates(const ir::Operand &operand, int32_t rank)
arm_compute::PixelValue asPixelValue(const ir::Operand &operand)
arm_compute::Size2D asDilation(uint32_t dilation_width, uint32_t dilation_height)
::arm_compute::PadStrideInfo asPadStrideInfo(const ir::ExplicitPadding &padding, const ir::Stride &stride)
std::unique_ptr< AclFunction > asAclFunction(std::unique_ptr<::arm_compute::IFunction > &&layer)
::arm_compute::TensorShape asTensorShape(const ir::Shape &shape, bool apply_dim_correction)
::arm_compute::TensorInfo asTensorInfo(const ir::Shape &shape, const ir::TypeInfo &typeInfo, bool apply_dim_correction)
::arm_compute::DataType asDataType(const ir::DataType type)
::arm_compute::QuantizationInfo asQuantizationInfo(const float scale, const int32_t offset)
ir::DataType asRuntimeDataType(::arm_compute::DataType data_type)