19#include <arm_compute/runtime/CL/CLFunctions.h>
28#include "ir/DataType.h"
38using ::onert::backend::acl_common::asAclFunction;
43 const ir::Graph &graph,
const std::shared_ptr<TensorBuilder> &tensor_builder,
45 : basic::KernelGeneratorBase{graph}, _ctx(graph.operands()), _operations_ctx(graph.operations()),
46 _tensor_builder(tensor_builder), _tensor_reg(tensor_reg)
53 auto ret = std::make_unique<exec::FunctionSequence>();
54 ret->enableDynamicShapeInferer(
false);
66 const auto block_size_index{
69 const auto NNApiInputs = 2;
73 if (!_ctx.
at(crops_index).isConstant())
75 throw std::runtime_error(
"Non-constant crops NYI for acl_cl backend BatchToSpaceND");
78 auto crops = _ctx.
at(crops_index).asVector<int32_t>();
79 for (
auto &&crop : crops)
83 throw std::runtime_error(
"Non-zero crops NYI for acl_cl backend BatchToSpaceND");
88 auto ofm_tensor = _tensor_reg->getAclTensor(ofm_index);
89 auto ifm_tensor = _tensor_reg->getAclTensor(ifm_index);
91 if (!_ctx.
at(block_size_index).data())
92 throw std::runtime_error(
"ACL CL does not support dynamic block size for BatchToSpaceND");
94 auto block = _ctx.
at(block_size_index).asVector<int32_t>();
95 int32_t height = block[0];
96 int32_t width = block[1];
98 auto fn = acl_common::generateLayer<arm_compute::CLBatchToSpaceLayer>(
99 ifm_tensor->handle(), width, height, ofm_tensor->handle());
104void KernelGenerator::visit(
const ir::operation::BinaryArithmetic &node)
106 const auto ofm_index{node.getOutputs().at(0)};
110 const auto activation = node.param().activation;
112 auto ofm_tensor = _tensor_reg->getAclTensor(ofm_index);
113 auto lhs_tensor = _tensor_reg->getAclTensor(lhs_index);
114 auto rhs_tensor = _tensor_reg->getAclTensor(rhs_index);
118 std::unique_ptr<arm_compute::IFunction> fn;
119 switch (node.param().arithmetic_type)
123 arm_compute::CLArithmeticAddition::validate(lhs_tensor->info(), rhs_tensor->info(),
125 arm_compute::ConvertPolicy::SATURATE, act_info)
127 fn = acl_common::generateLayer<arm_compute::CLArithmeticAddition>(
128 lhs_tensor->handle(), rhs_tensor->handle(), ofm_tensor->handle(),
129 arm_compute::ConvertPolicy::SATURATE, act_info);
134 arm_compute::CLArithmeticSubtraction::validate(lhs_tensor->info(), rhs_tensor->info(),
136 arm_compute::ConvertPolicy::SATURATE, act_info)
138 fn = acl_common::generateLayer<arm_compute::CLArithmeticSubtraction>(
139 lhs_tensor->handle(), rhs_tensor->handle(), ofm_tensor->handle(),
140 arm_compute::ConvertPolicy::SATURATE, act_info);
145 arm_compute::CLPixelWiseMultiplication::validate(
146 lhs_tensor->info(), rhs_tensor->info(), ofm_tensor->info(), 1.0,
147 arm_compute::ConvertPolicy::SATURATE, arm_compute::RoundingPolicy::TO_NEAREST_EVEN,
150 fn = acl_common::generateLayer<arm_compute::CLPixelWiseMultiplication>(
151 lhs_tensor->handle(), rhs_tensor->handle(), ofm_tensor->handle(), 1.0,
152 arm_compute::ConvertPolicy::SATURATE, arm_compute::RoundingPolicy::TO_NEAREST_EVEN,
158 arm_compute::CLArithmeticDivision::validate(lhs_tensor->info(), rhs_tensor->info(),
159 ofm_tensor->info(), act_info)
161 fn = acl_common::generateLayer<arm_compute::CLArithmeticDivision>(
162 lhs_tensor->handle(), rhs_tensor->handle(), ofm_tensor->handle(), act_info);
166 assert(
false &&
"The BinaryArithmetic operation supports only binary arithmetic operations");
173void KernelGenerator::visit(
const ir::operation::Conv2D &node)
175 using ir::operation::Conv2D;
177 const auto ofm_index{node.getOutputs().at(0)};
178 const auto ifm_index{node.getInputs().at(Conv2D::Input::INPUT)};
179 const auto ker_index{node.getInputs().at(Conv2D::Input::KERNEL)};
180 const auto bias_index{node.getInputs().at(Conv2D::Input::BIAS)};
182 const auto ifm_shape = _ctx.
at(ifm_index).shape().asFeature();
183 const auto ofm_shape = _ctx.
at(ofm_index).shape().asFeature();
185 const auto &ker_shape = _ctx.
at(ker_index).shape();
186 const auto ker_height = ker_shape.dim(1);
187 const auto ker_width = ker_shape.dim(2);
189 const auto stride = node.param().stride;
191 ir::calculatePadding(node.param().padding, ifm_shape, ofm_shape, stride, ker_width, ker_height);
192 const auto activation = node.param().activation;
194 auto ofm_tensor = _tensor_reg->getAclTensor(ofm_index);
195 auto ifm_tensor = _tensor_reg->getAclTensor(ifm_index);
196 auto ker_tensor = _tensor_reg->getAclTensor(ker_index);
197 auto bias_tensor = _tensor_reg->getAclTensor(bias_index);
202 auto fn = acl_common::generateLayer<arm_compute::CLConvolutionLayer>(
203 _tensor_builder->acl_tensor_manager()->internal_buffer_manager(), ifm_tensor->handle(),
205 ::arm_compute::WeightsInfo(), ::arm_compute::Size2D(1U, 1U), act_info);
210void KernelGenerator::visit(
const ir::operation::DepthwiseConv2D &node)
212 using ir::operation::DepthwiseConv2D;
214 const auto ofm_index{node.getOutputs().at(0)};
215 const auto ifm_index{node.getInputs().at(DepthwiseConv2D::Input::INPUT)};
216 const auto ker_index{node.getInputs().at(DepthwiseConv2D::Input::KERNEL)};
217 const auto bias_index{node.getInputs().at(DepthwiseConv2D::Input::BIAS)};
219 const auto ifm_shape = _ctx.
at(ifm_index).shape().asFeature();
220 const auto ofm_shape = _ctx.
at(ofm_index).shape().asFeature();
222 const auto &ker_shape = _ctx.
at(ker_index).shape();
223 const auto ker_height = ker_shape.dim(1);
224 const auto ker_width = ker_shape.dim(2);
226 const auto stride = node.param().stride;
227 const auto dilation = node.param().dilation;
230 dilation.width_factor, dilation.height_factor);
231 const auto multiplier = node.param().multiplier;
232 const auto activation = node.param().activation;
234 auto ofm_tensor = _tensor_reg->getAclTensor(ofm_index);
235 auto ifm_tensor = _tensor_reg->getAclTensor(ifm_index);
236 auto ker_tensor = _tensor_reg->getAclTensor(ker_index);
237 auto bias_tensor = _tensor_reg->getAclTensor(bias_index);
243 auto fn = acl_common::generateLayer<arm_compute::CLDepthwiseConvolutionLayer>(
245 conv_info, multiplier, act_info, dilation_info);
250void KernelGenerator::visit(
const ir::operation::Concat &node)
252 const auto ofm_index{node.getOutputs().at(0)};
254 std::vector<ir::OperandIndex> input_indexes;
256 for (
const auto &input : node.getInputs())
257 input_indexes.emplace_back(
input);
259 const auto axis = node.param().axis;
262 bool eliminated = _tensor_builder->areSubTensorsOf(ofm_index, node.getInputs());
266 VERBOSE(acl_cl_KernelGenerator_Concat) <<
"Concat eliminated" << std::endl;
267 _return_fn = std::make_unique<exec::NopFunction>();
272 std::vector<const ::arm_compute::ICLTensor *> input_tensors;
273 for (
const auto &ifm_ind : input_indexes)
274 input_tensors.emplace_back(_tensor_reg->getAclTensor(ifm_ind)->handle());
276 std::unique_ptr<::arm_compute::IFunction> fn;
277 if (input_indexes.size() < 2)
279 ::arm_compute::ICLTensor *input_tesor =
280 _tensor_reg->getAclTensor(input_indexes.at(0))->handle();
282 fn = acl_common::generateLayer<arm_compute::CLCopy>(input_tesor,
output_tensor->handle());
286 const auto rank = _ctx.
at(ofm_index).shape().rank();
288 fn = acl_common::generateLayer<::arm_compute::CLConcatenateLayer>(
295void KernelGenerator::visit(
const ir::operation::FullyConnected &node)
297 const auto output_index{node.getOutputs().at(0)};
298 auto output_tensor = _tensor_reg->getAclTensor(output_index);
299 const auto activation = node.param().activation;
301 throw std::runtime_error(
302 "KernelGenerator(acl_cl): FullyConnected 16x1Float32 weights is not supported.");
306 node, _ctx, _tensor_builder, _tensor_reg);
307 _return_fn = std::make_unique<exec::FunctionSequence>(
311void KernelGenerator::visit(
const ir::operation::Reduce &node)
313 const auto output_index{node.getOutputs().at(0)};
316 const auto keep_dims{node.param().keep_dims};
317 const auto reduce_type = node.param().reduce_type;
319 auto output_tensor = _tensor_reg->getAclTensor(output_index);
320 auto input_tensor = _tensor_reg->getAclTensor(input_index);
323 const auto &axes = _ctx.at(axes_index);
324 const auto input_rank = _ctx.at(input_index).shape().rank();
326 std::unique_ptr<arm_compute::IFunction> fn;
330 fn = acl_common::generateLayer<arm_compute::CLReduceMean>(input_tensor->handle(), acl_axes,
337 fn = acl_common::generateLayer<arm_compute::CLReduceOperation>(
338 _tensor_builder->acl_tensor_manager()->internal_buffer_manager(), input_tensor->handle(),
345void KernelGenerator::visit(
const ir::operation::Reshape &node)
347 const auto output_index{node.getOutputs().at(0)};
350 auto output_tensor = _tensor_reg->getAclTensor(output_index);
351 auto input_tensor = _tensor_reg->getAclTensor(input_index);
353 auto fn = acl_common::generateLayer<arm_compute::CLReshapeLayer>(input_tensor->handle(),
359void KernelGenerator::visit(
const ir::operation::Squeeze &node)
364 const auto output_index{node.getOutputs().at(0)};
366 const auto dims{node.param().dims};
367 const auto ndim{node.param().ndim};
371 auto output_tensor = _tensor_reg->getAclTensor(output_index);
372 auto input_tensor = _tensor_reg->getAclTensor(input_index);
373 auto fn = acl_common::generateLayer<arm_compute::CLReshapeLayer>(input_tensor->handle(),
378void KernelGenerator::visit(
const ir::operation::Softmax &node)
380 const auto output_index{node.getOutputs().at(0)};
383 const auto beta = node.param().beta;
385 auto output_tensor = _tensor_reg->getAclTensor(output_index);
386 auto input_tensor = _tensor_reg->getAclTensor(input_index);
388 auto fn = acl_common::generateLayer<arm_compute::CLSoftmaxLayer>(
389 _tensor_builder->acl_tensor_manager()->internal_buffer_manager(), input_tensor->handle(),
395void KernelGenerator::visit(
const ir::operation::Slice &node)
397 const auto output_index{node.getOutputs().at(0)};
402 auto outputData_tensor = _tensor_reg->getAclTensor(output_index);
403 auto inputData_tensor = _tensor_reg->getAclTensor(input_index);
406 int input_rank = _ctx.at(input_index).shape().rank();
407 std::vector<int32_t> starts;
408 std::vector<int32_t> ends;
409 starts.resize(input_rank, 0);
410 ends.resize(input_rank, 0);
412 assert(_ctx.at(begins_index).data());
413 assert(_ctx.at(sizes_index).data());
414 auto beginData_base = _ctx.at(begins_index).data()->base();
415 auto sizeData_base = _ctx.at(sizes_index).data()->base();
416 [[maybe_unused]]
const int beginData_size = _ctx.at(begins_index).shape().num_elements();
417 [[maybe_unused]]
const int sizeData_size = _ctx.at(sizes_index).shape().num_elements();
421 assert(_ctx.at(begins_index).typeInfo().type() == DataType::INT32);
422 assert(_ctx.at(sizes_index).typeInfo().type() == DataType::INT32);
423 assert(beginData_size == input_rank);
424 assert(sizeData_size == input_rank);
426 assert(beginData_base !=
nullptr);
427 for (
int n = 0; n < input_rank; ++n)
431 int32_t begin_value = *(
reinterpret_cast<const int32_t *
>(beginData_base) + n);
432 starts[axis] = begin_value;
434 int32_t size_value = *(
reinterpret_cast<const int32_t *
>(sizeData_base) + n);
435 ends[axis] = begin_value + size_value;
439 ::arm_compute::Coordinates starts_set;
440 ::arm_compute::Coordinates ends_set;
442 for (
size_t i = 0; i < starts.size(); ++i)
444 starts_set.set(i, starts[i]);
445 ends_set.set(i, ends[i]);
448 auto fn = acl_common::generateLayer<arm_compute::CLSlice>(
449 inputData_tensor->handle(), outputData_tensor->handle(), starts_set, ends_set);
454void KernelGenerator::visit(
const ir::operation::StridedSlice &node)
456 const auto output_index{node.getOutputs().at(0)};
462 auto outputData_tensor = _tensor_reg->getAclTensor(output_index);
463 auto inputData_tensor = _tensor_reg->getAclTensor(input_index);
466 int input_rank = _ctx.at(input_index).shape().rank();
467 std::vector<int32_t> starts;
468 std::vector<int32_t> ends;
469 std::vector<int32_t> strides;
470 starts.resize(input_rank, 0);
471 ends.resize(input_rank, 0);
472 strides.resize(input_rank, 0);
474 assert(_ctx.at(starts_index).data());
475 assert(_ctx.at(ends_index).data());
476 assert(_ctx.at(strides_index).data());
477 auto startData_base = _ctx.at(starts_index).data()->base();
478 auto endData_base = _ctx.at(ends_index).data()->base();
479 auto stridesData_base = _ctx.at(strides_index).data()->base();
480 [[maybe_unused]]
const int startData_size = _ctx.at(starts_index).shape().num_elements();
481 [[maybe_unused]]
const int endData_size = _ctx.at(ends_index).shape().num_elements();
482 [[maybe_unused]]
const int stridesData_size = _ctx.at(strides_index).shape().num_elements();
486 assert(_ctx.at(starts_index).typeInfo().type() == DataType::INT32);
487 assert(_ctx.at(ends_index).typeInfo().type() == DataType::INT32);
488 assert(_ctx.at(strides_index).typeInfo().type() == DataType::INT32);
489 assert(startData_size == input_rank);
490 assert(endData_size == input_rank);
491 assert(stridesData_size == input_rank);
493 assert(startData_base !=
nullptr);
494 for (
int n = 0; n < input_rank; ++n)
498 int32_t start_value = *(
reinterpret_cast<const int32_t *
>(startData_base) + n);
499 starts[axis] = start_value;
501 int32_t end_value = *(
reinterpret_cast<const int32_t *
>(endData_base) + n);
502 ends[axis] = end_value;
504 int32_t strides_value = *(
reinterpret_cast<const int32_t *
>(stridesData_base) + n);
505 strides[axis] = strides_value;
510 const auto begin_mask = acl_common::ReorderBits<int32_t>(node.param().begin_mask, input_rank);
511 const auto end_mask = acl_common::ReorderBits<int32_t>(node.param().end_mask, input_rank);
512 const auto shrink_axis_mask =
513 acl_common::ReorderBits<int32_t>(node.param().shrink_axis_mask, input_rank);
515 ::arm_compute::Coordinates starts_set;
516 ::arm_compute::Coordinates ends_set;
517 ::arm_compute::BiStrides strides_set;
519 for (
size_t i = 0; i < starts.size(); ++i)
521 starts_set.set(i, starts[i]);
522 ends_set.set(i, ends[i]);
523 strides_set.set(i, strides[i]);
527 if (inputData_tensor->num_dimensions() != inputData_tensor->info()->num_dimensions())
533 auto fn = acl_common::generateLayer<arm_compute::CLStridedSlice>(
534 inputData_tensor->handle(), outputData_tensor->handle(), starts_set, ends_set, strides_set,
535 begin_mask, end_mask, shrink_axis_mask);
538 if (inputData_tensor->dimension(0) == 1)
546void KernelGenerator::visit(
const ir::operation::Transpose &node)
548 const auto ofm_idx{node.getOutputs().at(0)};
552 const auto rank = _ctx.at(ifm_idx).shape().rank();
554 auto ofm_tensor = _tensor_reg->getAclTensor(ofm_idx);
555 auto ifm_tensor = _tensor_reg->getAclTensor(ifm_idx);
557 const auto &perms = _ctx.at(perm_idx);
558 std::vector<int32_t> pv;
559 if (perms.shape() == ir::Shape{0})
562 std::iota(pv.begin(), pv.end(), 0);
563 std::reverse(pv.begin(), pv.end());
567 pv = _ctx.at(perm_idx).asVector<int32_t>();
570 std::unique_ptr<arm_compute::IFunction> fn;
573 fn = acl_common::generateLayer<arm_compute::CLCopy>(ifm_tensor->handle(), ofm_tensor->handle());
577 assert(pv.size() == 2 && pv.at(0) == 1 && pv.at(1) == 0);
578 fn = acl_common::generateLayer<arm_compute::CLTranspose>(ifm_tensor->handle(),
579 ofm_tensor->handle());
585 fn = acl_common::generateLayer<arm_compute::CLPermute>(ifm_tensor->handle(),
586 ofm_tensor->handle(), backend_pv);
592void KernelGenerator::visit(
const ir::operation::ElementwiseActivation &node)
594 const auto ofm_index{node.getOutputs().at(0)};
597 auto ofm_tensor = _tensor_reg->getAclTensor(ofm_index);
598 auto ifm_tensor = _tensor_reg->getAclTensor(ifm_index);
600 const ::arm_compute::ActivationLayerInfo act_info =
603 auto fn = acl_common::generateLayer<arm_compute::CLActivationLayer>(
604 ifm_tensor->handle(), ofm_tensor->handle(), act_info);
609void KernelGenerator::visit(
const ir::operation::ElementwiseBinary &node)
611 const auto output_index{node.getOutputs().at(0)};
615 auto output_tensor = _tensor_reg->getAclTensor(output_index);
616 auto lhs_tensor = _tensor_reg->getAclTensor(lhs_index);
617 auto rhs_tensor = _tensor_reg->getAclTensor(rhs_index);
619 std::unique_ptr<arm_compute::IFunction> fn;
620 switch (node.param().op_type)
624 fn = acl_common::generateLayer<arm_compute::CLBinaryLogicalOp>(
625 lhs_tensor->handle(), rhs_tensor->handle(),
output_tensor->handle(),
631 fn = acl_common::generateLayer<arm_compute::CLBitwiseOr>(
632 lhs_tensor->handle(), rhs_tensor->handle(),
output_tensor->handle());
637 fn = acl_common::generateLayer<arm_compute::CLElementwiseMax>(
638 lhs_tensor->handle(), rhs_tensor->handle(),
output_tensor->handle());
643 fn = acl_common::generateLayer<arm_compute::CLElementwiseMin>(
644 lhs_tensor->handle(), rhs_tensor->handle(),
output_tensor->handle());
649 std::string err_msg(
"acl_cl KernelGenerator : " + node.name() +
650 "is not elementwise-binary operations");
651 assert(
false && err_msg.c_str());
659void KernelGenerator::visit(
const ir::operation::ElementwiseUnary &node)
661 const auto output_index{node.getOutputs().at(0)};
664 auto output_tensor = _tensor_reg->getAclTensor(output_index);
665 auto input_tensor = _tensor_reg->getAclTensor(input_index);
667 std::unique_ptr<arm_compute::IFunction> fn;
668 switch (node.param().op_type)
672 const ::arm_compute::ActivationLayerInfo act_info{
673 ::arm_compute::ActivationLayerInfo::ActivationFunction::ABS};
675 fn = acl_common::generateLayer<arm_compute::CLActivationLayer>(
683 fn = acl_common::generateLayer<arm_compute::CLCopy>(input_tensor->handle(),
686 else if (_ctx.at(input_index).typeInfo().type() == ir::DataType::BOOL8)
688 fn = acl_common::generateLayer<arm_compute::CLCastBool>(input_tensor->handle(),
694 fn = acl_common::generateLayer<arm_compute::CLCast>(
695 input_tensor->handle(),
output_tensor->handle(), arm_compute::ConvertPolicy::SATURATE);
701 fn = acl_common::generateLayer<arm_compute::CLDequantizationLayer>(input_tensor->handle(),
707 fn = acl_common::generateLayer<arm_compute::CLExpLayer>(input_tensor->handle(),
713 fn = acl_common::generateLayer<arm_compute::CLFloor>(input_tensor->handle(),
719 fn = acl_common::generateLayer<arm_compute::CLBitwiseNot>(input_tensor->handle(),
725 fn = acl_common::generateLayer<arm_compute::CLNeg>(input_tensor->handle(),
731 fn = acl_common::generateLayer<arm_compute::CLRsqrtLayer>(input_tensor->handle(),
737 const ::arm_compute::ActivationLayerInfo act_info{
738 ::arm_compute::ActivationLayerInfo::ActivationFunction::SQRT};
740 fn = acl_common::generateLayer<arm_compute::CLActivationLayer>(
746 throw std::runtime_error(
"acl_cl KernelGenerator : " + node.name() +
"is not supported yet");
756void KernelGenerator::visit(
const ir::operation::ExpandDims &node)
758 const auto output_index{node.getOutputs().at(0)};
761 auto output_tensor = _tensor_reg->getAclTensor(output_index);
762 auto input_tensor = _tensor_reg->getAclTensor(input_index);
764 auto fn = acl_common::generateLayer<arm_compute::CLReshapeLayer>(input_tensor->handle(),
770void KernelGenerator::visit(
const ir::operation::InstanceNorm &node)
772 const auto ofm_index{node.getOutputs().at(0)};
777 auto ofm_tensor = _tensor_reg->getAclTensor(ofm_index);
778 auto ifm_tensor = _tensor_reg->getAclTensor(ifm_index);
779 auto gamma_tensor = _tensor_reg->getAclTensor(gamma_index);
780 auto beta_tensor = _tensor_reg->getAclTensor(beta_index);
781 auto epsilon = node.param().epsilon;
782 auto activation = node.param().activation;
784 auto fn = acl_common::generateLayer<arm_compute::CLInstanceNormalizationLayerEx>(
785 ifm_tensor->handle(), ofm_tensor->handle(), gamma_tensor->handle(), beta_tensor->handle(),
788 _return_fn = std::make_unique<exec::FunctionSequence>(
792void KernelGenerator::visit(
const ir::operation::LSTM &node)
795 ::arm_compute::CLLSTMLayer>(node, _ctx, _tensor_reg);
798void KernelGenerator::visit(
const ir::operation::Comparison &node)
800 const auto output_index{node.getOutputs().at(0)};
804 const auto comparison_type = node.param().comparison_type;
806 auto output_tensor = _tensor_reg->getAclTensor(output_index);
807 auto input0_tensor = _tensor_reg->getAclTensor(input0_index);
808 auto input1_tensor = _tensor_reg->getAclTensor(input1_index);
810 auto fn = acl_common::generateLayer<arm_compute::CLComparison>(
811 input0_tensor->handle(), input1_tensor->handle(),
output_tensor->handle(),
812 (arm_compute::ComparisonOperation)comparison_type);
817void KernelGenerator::visit(
const ir::operation::OneHot &node)
819 const auto output_idx{node.getOutputs().at(0)};
824 const auto depth = _ctx.at(depth_idx).asScalar<int32_t>();
828 auto indices_tensor = _tensor_reg->getAclTensor(indices_idx);
829 auto onvalue_tensor = _tensor_reg->getAclTensor(onvalue_idx);
831 const size_t output_rank = _ctx.at(output_idx).shape().rank();
832 int32_t axis = node.param().axis == -1 ? output_rank - 1 : node.param().axis;
841 std::unique_ptr<::arm_compute::IFunction> fn;
842 const auto &offvalue = _ctx.at(offvalue_idx);
843 if (offvalue.isConstant())
845 fn = acl_common::generateLayer<arm_compute::CLOneHot>(
846 indices_tensor->handle(), onvalue_tensor->handle(),
output_tensor->handle(),
851 auto offvalue_tensor = _tensor_reg->getAclTensor(offvalue_idx);
852 fn = acl_common::generateLayer<arm_compute::CLOneHot>(
853 indices_tensor->handle(), onvalue_tensor->handle(), offvalue_tensor->handle(),
854 output_tensor->handle(),
static_cast<uint32_t
>(depth), axis);
865void KernelGenerator::visit(
const ir::operation::Pack &node)
867 const auto output_index{node.getOutputs().at(0)};
868 auto axis{node.param().axis};
870 const auto output_rank = _ctx.at(output_index).shape().rank();
872 std::vector<ir::OperandIndex> input_indexes;
873 for (
const auto &input_index : node.getInputs())
876 auto output = _tensor_reg->getAclTensor(output_index)->handle();
877 std::vector<arm_compute::ICLTensor *>
inputs;
878 for (
const auto &input_index : input_indexes)
886 for (
const auto &input_index : input_indexes)
888 const auto &input_tensor = _tensor_reg->getAclTensor(input_index);
889 if (input_tensor->num_dimensions() != input_tensor->info()->num_dimensions())
896 auto fn = acl_common::generateLayer<arm_compute::CLStackLayer>(inputs, axis, output);
899 for (
const auto &input_index : input_indexes)
901 const auto &input_tensor = _tensor_reg->getAclTensor(input_index);
902 if (input_tensor->dimension(0) == 1)
911void KernelGenerator::visit(
const ir::operation::Pool2D &node)
913 auto raw_fn = acl_common::kernelGenPool2D<::arm_compute::CLPoolingLayer>(
916 const auto ofm_index{node.getOutputs().at(0)};
917 auto ofm_tensor = _tensor_reg->getAclTensor(ofm_index);
918 const auto activation = node.param().activation;
919 _return_fn = std::make_unique<exec::FunctionSequence>(
924void KernelGenerator::visit(
const ir::operation::ResizeBilinear &node)
926 const auto ofm_index{node.getOutputs().at(0)};
929 auto ofm_tensor = _tensor_reg->getAclTensor(ofm_index);
930 auto ifm_tensor = _tensor_reg->getAclTensor(ifm_index);
932 auto fn = acl_common::generateLayer<arm_compute::CLScale>(
933 ifm_tensor->handle(), ofm_tensor->handle(),
934 ::arm_compute::ScaleKernelInfo{
935 ::arm_compute::InterpolationPolicy::BILINEAR, ::arm_compute::BorderMode::REPLICATE,
936 ::arm_compute::PixelValue(0.f), ::arm_compute::SamplingPolicy::TOP_LEFT});
941void KernelGenerator::visit(
const ir::operation::ResizeNearestNeighbor &node)
943 const auto ofm_index{node.getOutputs().at(0)};
946 auto ofm_tensor = _tensor_reg->getAclTensor(ofm_index);
947 auto ifm_tensor = _tensor_reg->getAclTensor(ifm_index);
949 auto fn = acl_common::generateLayer<arm_compute::CLScale>(
950 ifm_tensor->handle(), ofm_tensor->handle(),
951 ::arm_compute::ScaleKernelInfo{
952 ::arm_compute::InterpolationPolicy::NEAREST_NEIGHBOR, ::arm_compute::BorderMode::REPLICATE,
953 ::arm_compute::PixelValue(0.f), ::arm_compute::SamplingPolicy::TOP_LEFT});
958void KernelGenerator::visit(
const ir::operation::RNN &node)
961 const auto hidden_state_out_index{
966 const auto recurrent_weights_index{
971 const auto activation = node.param().activation;
973 auto output_tensor = _tensor_reg->getAclTensor(output_index);
974 auto hidden_state_out_tensor = _tensor_reg->getAclTensor(hidden_state_out_index);
976 auto input_tensor = _tensor_reg->getAclTensor(input_index);
977 auto weights_tensor = _tensor_reg->getAclTensor(weights_index);
978 auto recurrent_weights_tensor = _tensor_reg->getAclTensor(recurrent_weights_index);
979 auto bias_tensor = _tensor_reg->getAclTensor(bias_index);
980 auto hidden_state_in_tensor = _tensor_reg->getAclTensor(hidden_state_in_index);
983 auto copy_layer = acl_common::generateLayer<arm_compute::CLCopy>(
984 hidden_state_in_tensor->handle(), hidden_state_out_tensor->handle());
987 auto fn = acl_common::generateLayer<arm_compute::CLRNNLayer>(
988 _tensor_builder->acl_tensor_manager()->internal_buffer_manager(), input_tensor->handle(),
989 weights_tensor->handle(), recurrent_weights_tensor->handle(),
bias_tensor->handle(),
990 hidden_state_out_tensor->handle(),
output_tensor->handle(), act_info);
994void KernelGenerator::visit(
const ir::operation::SpaceToBatchND &node)
996 const auto ofm_index{node.getOutputs().at(0)};
998 const auto block_size_index{
1002 auto ofm_tensor = _tensor_reg->getAclTensor(ofm_index);
1003 auto ifm_tensor = _tensor_reg->getAclTensor(ifm_index);
1004 auto block_size_tensor = _tensor_reg->getAclTensor(block_size_index);
1005 auto paddings_tensor = _tensor_reg->getAclTensor(paddings_index);
1007 assert(_ctx.at(block_size_index).data());
1008 assert(_ctx.at(paddings_index).data());
1010 auto fn = acl_common::generateLayer<arm_compute::CLSpaceToBatchLayer>(
1011 ifm_tensor->handle(), block_size_tensor->handle(), paddings_tensor->handle(),
1012 ofm_tensor->handle());
1017void KernelGenerator::visit(
const ir::operation::SpaceToDepth &node)
1019 const auto ofm_index{node.getOutputs().at(0)};
1022 auto block_size = node.param().block_size;
1024 auto ofm_tensor = _tensor_reg->getAclTensor(ofm_index);
1025 auto ifm_tensor = _tensor_reg->getAclTensor(ifm_index);
1027 auto fn = acl_common::generateLayer<arm_compute::CLSpaceToDepthLayer>(
1028 ifm_tensor->handle(), ofm_tensor->handle(), block_size);
1033void KernelGenerator::visit(
const ir::operation::EmbeddingLookup &node)
1035 const auto output_index{node.getOutputs().at(0)};
1039 auto output_tensor = _tensor_reg->getAclTensor(output_index);
1040 auto lookups_tensor = _tensor_reg->getAclTensor(lookups_index);
1041 auto values_tensor = _tensor_reg->getAclTensor(values_index);
1043 auto fn = acl_common::generateLayer<arm_compute::CLEmbeddingLookup>(
1044 values_tensor->handle(),
output_tensor->handle(), lookups_tensor->handle());
1049void KernelGenerator::visit(
const ir::operation::L2Normalization &node)
1051 const auto ofm_index{node.getOutputs().at(0)};
1059 const auto &ifm_shape = _ctx.at(ifm_index).shape();
1061 const auto normalization_axis = _ctx.at(ifm_index).shape().rank() - 1;
1063 2 * ifm_shape.dim(normalization_axis) + 1;
1068 auto ofm_tensor = _tensor_reg->getAclTensor(ofm_index);
1069 auto ifm_tensor = _tensor_reg->getAclTensor(ifm_index);
1071 const auto norm_info = ::arm_compute::NormalizationLayerInfo(::arm_compute::NormType::CROSS_MAP,
1072 radius, alpha, beta, bias,
false);
1074 auto fn = acl_common::generateLayer<arm_compute::CLNormalizationLayer>(
1075 ifm_tensor->handle(), ofm_tensor->handle(), norm_info);
1080void KernelGenerator::visit(
const ir::operation::HashtableLookup &node)
1089 auto output_tensor = _tensor_reg->getAclTensor(output_index);
1090 auto hits_tensor = _tensor_reg->getAclTensor(hits_index);
1092 auto lookups_tensor = _tensor_reg->getAclTensor(lookups_index);
1093 auto keys_tensor = _tensor_reg->getAclTensor(keys_index);
1094 auto values_tensor = _tensor_reg->getAclTensor(values_index);
1096 auto fn = acl_common::generateLayer<arm_compute::CLHashtableLookup>(
1097 lookups_tensor->handle(), keys_tensor->handle(), values_tensor->handle(),
1103void KernelGenerator::visit(
const ir::operation::PReLU &node)
1105 const auto ofm_index{node.getOutputs().at(0)};
1109 auto ofm_tensor = _tensor_reg->getAclTensor(ofm_index);
1110 auto ifm_tensor = _tensor_reg->getAclTensor(ifm_index);
1111 auto alpha_tensor = _tensor_reg->getAclTensor(alpha_index);
1113 auto fn = acl_common::generateLayer<arm_compute::CLPReluLayer>(
1114 ifm_tensor->handle(), alpha_tensor->handle(), ofm_tensor->handle());
1119void KernelGenerator::visit(
const ir::operation::TransposeConv &node)
1121 const auto ofm_index{node.getOutputs().at(0)};
1125 const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature();
1126 const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature();
1127 const auto ker_shape = _ctx.at(ker_index).shape().asFeature();
1129 const auto stride = node.param().stride;
1134 ker_shape.W, ker_shape.H);
1135 uint32_t invalid_horizontal = 0;
1136 uint32_t invalid_vertical = 0;
1139 invalid_horizontal =
1140 ofm_shape.W - (1 + (ifm_shape.W - 1) * stride.horizontal) - (ker_shape.W - 1);
1141 invalid_vertical = ofm_shape.H - (1 + (ifm_shape.H - 1) * stride.vertical) - (ker_shape.H - 1);
1144 auto ofm_tensor = _tensor_reg->getAclTensor(ofm_index);
1145 auto ifm_tensor = _tensor_reg->getAclTensor(ifm_index);
1146 auto ker_tensor = _tensor_reg->getAclTensor(ker_index);
1150 auto fn = acl_common::generateLayer<arm_compute::CLTransposeConvLayer>(
1151 _tensor_builder->acl_tensor_manager()->internal_buffer_manager(), ifm_tensor->handle(),
1152 ker_tensor->handle(),
nullptr, ofm_tensor->handle(), tconv_info, invalid_horizontal,
1158void KernelGenerator::visit(
const ir::operation::SquaredDifference &node)
1160 const auto ofm_index{node.getOutputs().at(0)};
1164 auto ofm_tensor = _tensor_reg->getAclTensor(ofm_index);
1165 auto lhs_tensor = _tensor_reg->getAclTensor(lhs_index);
1166 auto rhs_tensor = _tensor_reg->getAclTensor(rhs_index);
1168 auto fn = acl_common::generateLayer<arm_compute::CLElementwiseSquaredDiff>(
1169 lhs_tensor->handle(), rhs_tensor->handle(), ofm_tensor->handle());
1174void KernelGenerator::visit(
const ir::operation::TopKV2 &node)
1177 const auto outputIndices_index{
1183 assert(_ctx.at(inputData_index).shape().rank() == 1 ||
1184 _ctx.at(inputData_index).shape().rank() == 2);
1186 const auto k = node.param().k;
1188 auto values_tensor = _tensor_reg->getAclTensor(outputValues_index);
1189 auto indices_tensor = _tensor_reg->getAclTensor(outputIndices_index);
1190 auto input_tensor = _tensor_reg->getAclTensor(inputData_index);
1192 auto fn = acl_common::generateLayer<arm_compute::CLTopKV2>(
1193 input_tensor->handle(), k, values_tensor->handle(), indices_tensor->handle());
1198void KernelGenerator::visit(
const ir::operation::Gather &node)
1200 const auto ofm_index{node.getOutputs().at(0)};
1205 const auto ifm_rank = _ctx.at(ifm_index).shape().rank();
1206 const auto axis_raw = node.param().axis;
1207 const auto axis_value = (axis_raw < 0 ? (ifm_rank + axis_raw) : axis_raw);
1210 auto ofm_tensor = _tensor_reg->getAclTensor(ofm_index);
1211 auto ifm_tensor = _tensor_reg->getAclTensor(ifm_index);
1212 auto indices_tensor = _tensor_reg->getAclTensor(indices_index);
1215 size_t n = ifm_rank;
1216 assert(n == ifm_tensor->num_dimensions());
1217 size_t k = _ctx.at(indices_index).shape().rank();
1218 assert(k == indices_tensor->num_dimensions());
1221 if (n != ifm_tensor->info()->num_dimensions())
1226 if (k != indices_tensor->info()->num_dimensions())
1232 auto fn = acl_common::generateLayer<arm_compute::CLGatherEx>(
1233 ifm_tensor->handle(), indices_tensor->handle(), ofm_tensor->handle(), axis);
1236 if (ifm_tensor->dimension(0) == 1)
1240 if (indices_tensor->dimension(0) == 1)
1248void KernelGenerator::visit(
const ir::operation::ArgMinMax &node)
1250 const auto ofm_index{node.getOutputs().at(0)};
1254 auto ifm_shape = _ctx.at(ifm_index).shape();
1255 auto ofm_shape = _ctx.at(ofm_index).shape();
1257 assert((ifm_shape.rank() - 1) == ofm_shape.rank());
1259 auto ofm_tensor = _tensor_reg->getAclTensor(ofm_index);
1260 auto ifm_tensor = _tensor_reg->getAclTensor(ifm_index);
1261 const auto ifm_rank = _ctx.at(ifm_index).shape().rank();
1263 int axis_value = _ctx.at(axis_index).asScalar<int32_t>();
1266 axis_value += ifm_rank;
1270 auto reduce_type = node.param().is_arg_max ? ::arm_compute::ReductionOperation::ARG_IDX_MAX
1271 : ::arm_compute::ReductionOperation::ARG_IDX_MIN;
1272 auto fn = acl_common::generateLayer<arm_compute::CLArgMinMaxLayer>(
1273 ifm_tensor->handle(), acl_axis, ofm_tensor->handle(), reduce_type);
1278void KernelGenerator::visit(
const ir::operation::LocalResponseNormalization &node)
1280 const auto ofm_index{node.getOutputs().at(0)};
1281 const auto ifm_index{
1284 auto radius = node.param().radius;
1285 auto alpha = node.param().alpha;
1286 auto beta = node.param().beta;
1287 auto bias = node.param().bias;
1289 auto ofm_tensor = _tensor_reg->getAclTensor(ofm_index);
1290 auto ifm_tensor = _tensor_reg->getAclTensor(ifm_index);
1292 const auto norm_info = ::arm_compute::NormalizationLayerInfo(
1293 ::arm_compute::NormType::CROSS_MAP, radius * 2 + 1, alpha, beta, bias,
false);
1295 auto fn = acl_common::generateLayer<arm_compute::CLNormalizationLayer>(
1296 ifm_tensor->handle(), ofm_tensor->handle(), norm_info);
1301void KernelGenerator::visit(
const ir::operation::DepthToSpace &node)
1303 const auto output_index{node.getOutputs().at(0)};
1306 auto block_size = node.param().block_size;
1307 assert(block_size > 0);
1309 auto output_tensor = _tensor_reg->getAclTensor(output_index);
1310 auto input_tensor = _tensor_reg->getAclTensor(input_index);
1312 auto fn = acl_common::generateLayer<arm_compute::CLDepthToSpaceLayer>(
1313 input_tensor->handle(),
output_tensor->handle(), block_size);
1318void KernelGenerator::visit(
const ir::operation::Split &node)
1323 assert(node.param().num_splits ==
static_cast<int>(node.getOutputs().size()));
1324 if (!_ctx.at(axis_index).isConstant())
1326 throw std::runtime_error(
"Non-constant axis_index NYI for acl_cl backend");
1329 const auto ifm_rank = _ctx.at(ifm_index).shape().rank();
1330 std::vector<ir::OperandIndex> output_indexes;
1331 for (
const auto &output : node.getOutputs())
1332 output_indexes.emplace_back(
output);
1334 auto ifm_tensor = _tensor_reg->getAclTensor(ifm_index);
1335 std::vector<arm_compute::ICLTensor *> output_tensors;
1336 for (
const auto &ofm_ind : output_indexes)
1337 output_tensors.emplace_back(_tensor_reg->getAclTensor(ofm_ind)->handle());
1339 auto axis = _ctx.at(axis_index).asScalar<int32_t>();
1345 acl_common::generateLayer<arm_compute::CLSplit>(ifm_tensor->handle(), output_tensors, axis);
1350void KernelGenerator::visit(
const ir::operation::SplitV &node)
1356 assert(node.param().num_splits ==
static_cast<int>(node.getOutputs().size()));
1358 const size_t ifm_rank = _ctx.at(ifm_index).shape().rank();
1359 std::vector<ir::OperandIndex> output_indexes;
1360 for (
const auto &output : node.getOutputs())
1361 output_indexes.emplace_back(
output);
1363 auto ifm_tensor = _tensor_reg->getAclTensor(ifm_index);
1364 auto size_split_tensor = _tensor_reg->getAclTensor(size_split_index);
1366 std::vector<arm_compute::ICLTensor *> output_tensors;
1367 for (
const auto &ofm_ind : output_indexes)
1368 output_tensors.emplace_back(_tensor_reg->getAclTensor(ofm_ind)->handle());
1370 auto fn = std::make_unique<arm_compute::CLSplitVEx>();
1371 const auto &split_dim_op = _ctx.at(split_dim_index);
1372 if (split_dim_op.isConstant())
1374 int32_t split_dim = split_dim_op.asScalar<int32_t>();
1375 uint32_t split_dim_revised = (split_dim < 0) ? (split_dim + ifm_rank) : split_dim;
1377 if (ifm_tensor->num_dimensions() != ifm_tensor->info()->num_dimensions())
1384 fn->configure(ifm_tensor->handle(), size_split_tensor->handle(), split_dim_revised,
1385 output_tensors, node.param().num_splits);
1387 if (ifm_tensor->dimension(0) == 1)
1394 throw std::runtime_error(
"Non-constant split_dim NYI for acl_cl backend");
1400void KernelGenerator::visit(
const ir::operation::Unpack &node)
1403 auto axis{node.param().axis};
1405 const auto input_rank = _ctx.at(input_index).shape().rank();
1407 std::vector<ir::OperandIndex> output_indexes;
1408 for (
const auto &output_index : node.getOutputs())
1409 output_indexes.emplace_back(output_index);
1411 auto input_tensor = _tensor_reg->getAclTensor(input_index);
1412 std::vector<arm_compute::ICLTensor *> outputs;
1413 for (
const auto &output_index : output_indexes)
1414 outputs.emplace_back(_tensor_reg->getAclTensor(output_index)->handle());
1421 if (input_tensor->num_dimensions() != input_tensor->info()->num_dimensions())
1428 acl_common::generateLayer<arm_compute::CLUnstack>(input_tensor->handle(), outputs, axis);
1431 if (input_tensor->dimension(0) == 1)
1439void KernelGenerator::visit(
const ir::operation::Pad &node)
1443 const auto output_index{node.getOutputs().at(0)};
1444 assert(_ctx.at(pad_index).data());
1446 auto rank = _ctx.at(input_index).shape().rank();
1447 auto pad_base = _ctx.at(pad_index).data()->base();
1449 auto input_type = _ctx.at(input_index).typeInfo();
1451 auto quant_info = ::arm_compute::QuantizationInfo(input_type.scale(), input_type.zero_point());
1452 const auto pixel_value = ::arm_compute::PixelValue(0, data_type, quant_info);
1454 auto input = _tensor_reg->getAclTensor(input_index)->handle();
1455 auto output = _tensor_reg->getAclTensor(output_index)->handle();
1457 ::arm_compute::PaddingList padding_list;
1458 padding_list.resize(rank);
1459 for (int32_t n = 0; n < rank; ++n)
1461 const int32_t *from =
reinterpret_cast<const int32_t *
>(pad_base) + (n * 2);
1464 padding_list[axis] = ::arm_compute::PaddingInfo{from[0], from[1]};
1468 const auto &input_tensor = _tensor_reg->getAclTensor(input_index);
1469 if (input_tensor->num_dimensions() != input_tensor->info()->num_dimensions())
1476 acl_common::generateLayer<arm_compute::CLPadLayerEx>(input, output, padding_list, pixel_value);
1484 if (input_tensor->num_dimensions() < 4 && input_tensor->dimension(0) == 1)
1492void KernelGenerator::visit(
const ir::operation::ConvertFp32ToFp16 &node)
1494 const auto ofm_index{node.getOutputs().at(0)};
1497 auto ofm_tensor = _tensor_reg->getAclTensor(ofm_index);
1498 auto ifm_tensor = _tensor_reg->getAclTensor(ifm_index);
1500 auto fn = acl_common::generateLayer<arm_compute::CLDepthConvertLayer>(
1501 ifm_tensor->handle(), ofm_tensor->handle(), ::arm_compute::ConvertPolicy::SATURATE, 0);
1506void KernelGenerator::visit(
const ir::operation::ConvertFp16ToFp32 &node)
1508 const auto ofm_index{node.getOutputs().at(0)};
1511 auto ofm_tensor = _tensor_reg->getAclTensor(ofm_index);
1512 auto ifm_tensor = _tensor_reg->getAclTensor(ifm_index);
1514 auto fn = acl_common::generateLayer<arm_compute::CLDepthConvertLayer>(
1515 ifm_tensor->handle(), ofm_tensor->handle(), ::arm_compute::ConvertPolicy::SATURATE, 0);
1520void KernelGenerator::visit(
const ir::operation::Reverse &node)
1522 const auto ofm_index{node.getOutputs().at(0)};
1526 auto ofm_tensor = _tensor_reg->getAclTensor(ofm_index);
1527 auto ifm_tensor = _tensor_reg->getAclTensor(ifm_index);
1528 auto axis_tensor = _tensor_reg->getAclTensor(axis_index);
1532 if (_ctx.at(axis_index).isConstant() &&
1533 (axis_tensor->handle()->info()->data_type() == arm_compute::DataType::S32))
1535 axis_tensor->handle()->info()->set_data_type(arm_compute::DataType::U32);
1538 auto fn = acl_common::generateLayer<arm_compute::CLReverse>(
1539 ifm_tensor->handle(), ofm_tensor->handle(), axis_tensor->handle(),
false);
This file defines NopFunction.
Class to run FullyConnected Layer after reshaping input tensor.
std::unique_ptr< exec::FunctionSequence > generate(ir::OperationIndex ind) override
KernelGenerator(const ir::Graph &graph, const std::shared_ptr< TensorBuilder > &tensor_builder, const std::shared_ptr< acl_common::AclTensorRegistry< TensorManager > > &_tensor_reg)
uint32_t value(void) const
static std::unique_ptr< exec::IFunction > generate(ir::Activation code, T_Tensor *ifm_alloc)
Tensor registry class for acl backends.
std::unique_ptr< exec::IFunction > _return_fn
std::unique_ptr< exec::IFunction > releaseFunction()
const Operations & operations() const override
const OperandIndex & at(IOIndex set_index) const
const OperandIndexSequence & getOutputs() const override
OperandIndexSequence & getInputs()
const Object & at(const Index &index) const
Get the object that is associated with the given index.
#define VERBOSE(name, lv)
std::vector< int > dims(const std::string &src)
arm_compute::PoolingType convertPoolType(ir::operation::Pool2D::PoolType pool_type_ir)
ARMComputeAxis ToARMComputeAxis(uint32_t rank, uint32_t axis)
inline ::arm_compute::PermutationVector getARMComputePermutationVector(uint32_t rank, const std::vector< int32_t > runtime_pv)
std::unique_ptr< exec::IFunction > kernelGenLSTM(const ir::operation::LSTM &node, const ir::Operands &operands, const std::shared_ptr< T_TensorRegistry > &tensor_reg)
std::set< uint32_t > asSet(const ir::Operand &operand, int32_t rank)
::arm_compute::ActivationLayerInfo asActivationLayerInfo(const ir::Activation act_code)
arm_compute::ReductionOperation convertReduceType(ir::operation::Reduce::ReduceType reduce_type_ir)
arm_compute::Coordinates asCoordinates(const ir::Operand &operand, int32_t rank)
void enableDimCorrection(IACLTensor *tensor)
arm_compute::PixelValue asPixelValue(const ir::Operand &operand)
arm_compute::Size2D asDilation(uint32_t dilation_width, uint32_t dilation_height)
::arm_compute::PadStrideInfo asPadStrideInfo(const ir::ExplicitPadding &padding, const ir::Stride &stride)
std::unique_ptr< AclFunction > asAclFunction(std::unique_ptr<::arm_compute::IFunction > &&layer)
std::unique_ptr< exec::IFunction > kernelGenFullyConnected(const ir::operation::FullyConnected &node, const ir::Operands &operands, const std::shared_ptr< T_TensorBuilder > &tensor_builder, const std::shared_ptr< T_TensorRegistry > &tensor_reg)
::arm_compute::DataType asDataType(const ir::DataType type)
void disableDimCorrection(IACLTensor *tensor)
const ExplicitPadding calculatePadding(const Padding &padding, const FeatureShape &ifm_shape, const FeatureShape &ofm_shape, const Stride &stride, uint32_t kw, uint32_t kh, uint32_t dwf=1, uint32_t dhf=1)