20#include "ir/DataType.h"
36 const auto tensor_shape = shape.rank() == 0 ?
ir::Shape{1} : shape;
38 const uint32_t rank = tensor_shape.
rank();
40 ::arm_compute::TensorShape res{};
42 res.set_num_dimensions(rank);
44 for (uint32_t axis = 0; axis < rank; ++axis)
53 res.set(
ToARMComputeAxis(rank, axis).value(), tensor_shape.
dim(axis), apply_dim_correction);
61 const uint32_t rank = coord.
size();
63 ::arm_compute::Coordinates res{};
65 res.set_num_dimensions(rank);
67 for (uint32_t axis = 0; axis < rank; ++axis)
79 case ir::DataType::FLOAT32:
80 return ::arm_compute::DataType::F32;
81 case ir::DataType::INT32:
82 return ::arm_compute::DataType::S32;
83 case ir::DataType::UINT32:
84 return ::arm_compute::DataType::U32;
85 case ir::DataType::QUANT_UINT8_ASYMM:
86 return ::arm_compute::DataType::QASYMM8;
87 case ir::DataType::BOOL8:
88 case ir::DataType::UINT8:
89 return ::arm_compute::DataType::U8;
90 case ir::DataType::QUANT_INT8_SYMM:
91 return ::arm_compute::DataType::QSYMM8;
92 case ir::DataType::QUANT_INT8_ASYMM:
93 return ::arm_compute::DataType::QASYMM8_SIGNED;
94 case ir::DataType::FLOAT16:
95 return ::arm_compute::DataType::F16;
96 case ir::DataType::INT64:
97 return ::arm_compute::DataType::S64;
98 case ir::DataType::QUANT_INT16_ASYMM:
99 return ::arm_compute::DataType::QASYMM16;
100 case ir::DataType::QUANT_INT8_SYMM_PER_CHANNEL:
101 return ::arm_compute::DataType::QSYMM8_PER_CHANNEL;
103 throw std::runtime_error(
"Not supported internal data type, yet");
110 return ::arm_compute::QuantizationInfo(scale,
offset);
114 bool apply_dim_correction)
119 info.set_data_layout(::arm_compute::DataLayout::NHWC);
126 return ::arm_compute::PadStrideInfo{stride.
horizontal,
132 ::arm_compute::DimensionRoundingType::FLOOR};
140 return ::arm_compute::ActivationLayerInfo{};
142 return ::arm_compute::ActivationLayerInfo{
143 ::arm_compute::ActivationLayerInfo::ActivationFunction::RELU};
145 return ::arm_compute::ActivationLayerInfo{
146 ::arm_compute::ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 1.0f, -1.0f};
148 return ::arm_compute::ActivationLayerInfo{
149 ::arm_compute::ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.0f, 0.0f};
152 return ::arm_compute::ActivationLayerInfo{
153 ::arm_compute::ActivationLayerInfo::ActivationFunction::TANH, 1.0f, 1.0f};
159 return ::arm_compute::ActivationLayerInfo{
160 ::arm_compute::ActivationLayerInfo::ActivationFunction::LOGISTIC, 0.0f, 0.0f};
162 throw std::runtime_error{
"Not supported internal activation, yet"};
167::arm_compute::ActivationLayerInfo
178 return ::arm_compute::ActivationLayerInfo{
179 ::arm_compute::ActivationLayerInfo::ActivationFunction::RELU};
183 return ::arm_compute::ActivationLayerInfo{
184 ::arm_compute::ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, alpha};
189 return ::arm_compute::ActivationLayerInfo{
190 ::arm_compute::ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, alpha, beta};
193 return ::arm_compute::ActivationLayerInfo{
194 ::arm_compute::ActivationLayerInfo::ActivationFunction::TANH, alpha, beta};
200 return ::arm_compute::ActivationLayerInfo{
201 ::arm_compute::ActivationLayerInfo::ActivationFunction::LOGISTIC};
203 return ::arm_compute::ActivationLayerInfo{
204 ::arm_compute::ActivationLayerInfo::ActivationFunction::LEAKY_RELU, alpha};
206 throw std::runtime_error{
"Not supported internal elementwise activation, yet"};
213 std::set<uint32_t> axes =
asSet(operand, rank);
215 arm_compute::Coordinates reduce_axes;
216 for (
const int32_t axis : axes)
218 reduce_axes.set(reduce_axes.num_dimensions(), axis);
226 std::set<std::uint32_t> axes;
228 for (
size_t i = 0; i < operand.
shape().num_elements(); ++i)
233 case ir::DataType::INT32:
234 axis =
reinterpret_cast<const int32_t *
>(operand.
data()->base())[i];
236 case ir::DataType::INT64:
237 axis =
reinterpret_cast<const int64_t *
>(operand.
data()->base())[i];
240 throw std::runtime_error(
"acl_common::asSet: Not supported data type");
250std::unique_ptr<AclFunction>
asAclFunction(std::unique_ptr<::arm_compute::IFunction> &&layer)
252 return std::make_unique<AclFunction>(std::move(layer));
259 case ::arm_compute::DataType::F32:
260 return ir::DataType::FLOAT32;
261 case ::arm_compute::DataType::S32:
262 return ir::DataType::INT32;
263 case ::arm_compute::DataType::U32:
264 return ir::DataType::UINT32;
265 case ::arm_compute::DataType::QASYMM8:
266 return ir::DataType::QUANT_UINT8_ASYMM;
267 case ::arm_compute::DataType::QASYMM8_SIGNED:
268 return ir::DataType::QUANT_INT8_ASYMM;
269 case ::arm_compute::DataType::U8:
270 return ir::DataType::UINT8;
271 case ::arm_compute::DataType::QSYMM8:
272 return ir::DataType::QUANT_INT8_SYMM;
273 case ::arm_compute::DataType::F16:
274 return ir::DataType::FLOAT16;
275 case ::arm_compute::DataType::S64:
276 return ir::DataType::INT64;
278 throw std::runtime_error{
"Not supported acl data type, yet"};
285 switch (pool_type_ir)
288 return arm_compute::PoolingType::AVG;
290 return arm_compute::PoolingType::L2;
292 return arm_compute::PoolingType::MAX;
294 throw std::runtime_error(
"convertPoolType: Not supported operation yet");
300 switch (reduce_type_ir)
303 return arm_compute::ReductionOperation::MAX;
305 return arm_compute::ReductionOperation::MIN;
307 return arm_compute::ReductionOperation::SUM;
309 throw std::runtime_error(
"convertReduceType: Not supported operation yet");
316 assert(operand.
shape().num_elements() == 1);
319 case ir::DataType::INT32:
320 return arm_compute::PixelValue(operand.
asScalar<int32_t>());
321 case ir::DataType::INT64:
322 return arm_compute::PixelValue(operand.
asScalar<int64_t>());
323 case ir::DataType::UINT32:
324 return arm_compute::PixelValue(operand.
asScalar<uint64_t>());
325 case ir::DataType::UINT8:
326 return arm_compute::PixelValue(operand.
asScalar<uint8_t>());
327 case ir::DataType::FLOAT32:
328 return arm_compute::PixelValue(operand.
asScalar<
float>());
330 throw std::runtime_error(
"asPixelValue : Not supported datatype yet");
334arm_compute::Size2D
asDilation(uint32_t dilation_width, uint32_t dilation_height)
336 assert(dilation_width != 0);
337 assert(dilation_height != 0);
339 return arm_compute::Size2D(dilation_width, dilation_height);
const Dimension & dim(uint32_t axis) const
uint32_t rank(void) const
Class to represent position(offset) of tensor. Assume that the front is higher dimensional....
size_t size() const
Return size of coordinates.
const TypeInfo & typeInfo(void) const
const Shape & shape(void) const
void data(std::shared_ptr< Data > &&data)
bool isConstant(void) const
Get true if Operand is const, otherwise false a.
int32_t zero_point() const
__global uchar * offset(const Image *img, int x, int y)
volatile const char info[]
arm_compute::PoolingType convertPoolType(ir::operation::Pool2D::PoolType pool_type_ir)
ARMComputeAxis ToARMComputeAxis(uint32_t rank, uint32_t axis)
::arm_compute::Coordinates asTensorCoordinate(const ir::Coordinates &coord)
std::set< uint32_t > asSet(const ir::Operand &operand, int32_t rank)
::arm_compute::ActivationLayerInfo asActivationLayerInfo(const ir::Activation act_code)
arm_compute::ReductionOperation convertReduceType(ir::operation::Reduce::ReduceType reduce_type_ir)
arm_compute::Coordinates asCoordinates(const ir::Operand &operand, int32_t rank)
arm_compute::PixelValue asPixelValue(const ir::Operand &operand)
arm_compute::Size2D asDilation(uint32_t dilation_width, uint32_t dilation_height)
::arm_compute::PadStrideInfo asPadStrideInfo(const ir::ExplicitPadding &padding, const ir::Stride &stride)
std::unique_ptr< AclFunction > asAclFunction(std::unique_ptr<::arm_compute::IFunction > &&layer)
::arm_compute::TensorShape asTensorShape(const ir::Shape &shape, bool apply_dim_correction)
::arm_compute::TensorInfo asTensorInfo(const ir::Shape &shape, const ir::TypeInfo &typeInfo, bool apply_dim_correction)
::arm_compute::DataType asDataType(const ir::DataType type)
::arm_compute::QuantizationInfo asQuantizationInfo(const float scale, const int32_t offset)
ir::DataType asRuntimeDataType(::arm_compute::DataType data_type)