ONE - On-device Neural Engine
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
arm_compute::NEReduceSum Class Reference

#include <NEReduceSum.h>

Collaboration diagram for arm_compute::NEReduceSum:

Public Member Functions

 NEReduceSum (std::shared_ptr< IMemoryManager > memory_manager=nullptr)
 
void configure (ITensor *input, const Coordinates &reduction_axis, bool keep_dims, ITensor *output)
 
void run () override
 

Static Public Member Functions

static Status validate (const ITensorInfo *input, const Coordinates &reduction_axis, bool keep_dims, const ITensorInfo *output)
 

Detailed Description

Basic function to perform reduce operation

Definition at line 59 of file NEReduceSum.h.

Constructor & Destructor Documentation

◆ NEReduceSum()

NEReduceSum::NEReduceSum ( std::shared_ptr< IMemoryManager >  memory_manager = nullptr)

Constructor

Definition at line 53 of file NEReduceSum.cpp.

54 : _memory_group(std::move(memory_manager)), _reduction_kernels(), _reduced_outs(), _reshape(),
55 _reduction_ops(), _keep_dims()
56{
57}

Member Function Documentation

◆ configure()

void NEReduceSum::configure ( ITensor *  input,
const Coordinates &  reduction_axis,
bool  keep_dims,
ITensor *  output 
)

Configure kernel

Note
Supported tensor rank: up to 4
Parameters
[in]inputSource tensor. Data type supported: QASYMM8/F16/F32
[in]reduction_axisReduction axis vector.
[in]keep_dimsIf positive, retains reduced dimensions with length 1.
[out]outputDestination tensor. Data type supported: Same as input

Definition at line 105 of file NEReduceSum.cpp.

107{
108 ARM_COMPUTE_ERROR_ON_NULLPTR(input);
109
110 _reduction_ops = reduction_axis.num_dimensions();
111 _reduction_kernels.resize(_reduction_ops);
112 _reduced_outs.resize(_reduction_ops - (keep_dims ? 1 : 0));
113 _keep_dims = keep_dims;
114
115 Coordinates axis_local = reduction_axis;
116 const int input_dims = input->info()->num_dimensions();
117 const unsigned int reduction_ops = reduction_axis.num_dimensions();
118
119 // Convert negative axis
120 for (unsigned int i = 0; i < reduction_ops; ++i)
121 {
122 axis_local[i] = wrap_around(axis_local[i], input_dims);
123 }
124
125 // Perform reduction for every axis
126 for (unsigned int i = 0; i < _reduction_ops; ++i)
127 {
128 TensorShape out_shape =
129 i == 0 ? input->info()->tensor_shape() : (&_reduced_outs[i - 1])->info()->tensor_shape();
130 out_shape.set(axis_local[i], 1);
131 auto in = (i == 0) ? input : (&_reduced_outs[i - 1]);
132
133 if (i == _reduction_ops - 1 && keep_dims)
134 {
135 _reduction_kernels[i].configure(in, output, axis_local[i], ReductionOperation::SUM);
136 }
137 else
138 {
139 _reduced_outs[i].allocator()->init(TensorInfo(out_shape, input->info()->num_channels(),
140 input->info()->data_type(),
141 input->info()->quantization_info())
142 .set_data_layout(input->info()->data_layout()));
143 _memory_group.manage(&_reduced_outs[i]);
144 _reduction_kernels[i].configure(in, &_reduced_outs[i], axis_local[i],
145 ReductionOperation::SUM);
146 }
147 }
148
149 // Allocate intermediate tensors
150 for (unsigned int i = 0; i < _reduction_ops - (keep_dims ? 1 : 0); ++i)
151 {
152 _reduced_outs[i].allocator()->allocate();
153 }
154
155 // Configure reshape layer if we want to drop the dimensions
156 if (!keep_dims)
157 {
158 TensorShape out_shape = input->info()->tensor_shape();
159
160 // We have to sort the reduction axis vectors in order for remove_dimension
161 // to work properly
162 std::sort(axis_local.begin(), axis_local.begin() + _reduction_ops);
163 for (unsigned int i = 0; i < _reduction_ops; ++i)
164 {
165 out_shape.remove_dimension(axis_local[i] - i);
166 }
167 auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(out_shape));
168 _reshape.configure(&_reduced_outs[_reduction_ops - 1], output);
169 }
170}
volatile const char info[]
::nncc::core::ADT::tensor::Shape TensorShape
Definition TensorShape.h:25

References info.

◆ run()

void NEReduceSum::run ( )
override

Definition at line 172 of file NEReduceSum.cpp.

173{
174 MemoryGroupResourceScope scope_mg(_memory_group);
175
176 for (unsigned int i = 0; i < _reduction_ops; ++i)
177 {
178 _reduction_kernels[i].run();
179 }
180
181 if (!_keep_dims)
182 {
183 _reshape.run();
184 }
185}

◆ validate()

Status NEReduceSum::validate ( const ITensorInfo *  input,
const Coordinates &  reduction_axis,
bool  keep_dims,
const ITensorInfo *  output 
)
static

Static function to check if given info will lead to a valid configuration of NEReduceSum

Parameters
[in]inputSource tensor. Data type supported: QASYMM8/F16/F32
[in]reduction_axisReduction axis vector.
[in]keep_dimsIf positive, retains reduced dimensions with length 1.
[in]outputDestination tensor. Data type supported: Same as input
Returns
A status

Definition at line 59 of file NEReduceSum.cpp.

61{
62 ARM_COMPUTE_UNUSED(keep_dims);
63 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input);
64 ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input);
65 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F16,
66 DataType::F32);
67 ARM_COMPUTE_RETURN_ERROR_ON(reduction_axis.num_dimensions() > input->num_dimensions());
68
69 TensorShape out_shape = input->tensor_shape();
70 const unsigned int reduction_ops = reduction_axis.num_dimensions();
71 const int input_dims = input->num_dimensions();
72 Coordinates axis_local = reduction_axis;
73
74 // Convert negative axis
75 for (unsigned int i = 0; i < reduction_ops; ++i)
76 {
77 axis_local[i] = wrap_around(axis_local[i], input_dims);
78 }
79
80 std::sort(axis_local.begin(), axis_local.begin() + reduction_ops);
81 for (unsigned int i = 0; i < reduction_ops; ++i)
82 {
83 ARM_COMPUTE_RETURN_ERROR_ON(axis_local[i] > 3);
84 ARM_COMPUTE_RETURN_ERROR_ON(static_cast<unsigned int>(axis_local[i]) >
85 input->num_dimensions() - 1);
86 if (output->total_size() > 0 && keep_dims)
87 {
88 ARM_COMPUTE_RETURN_ERROR_ON(output->dimension(axis_local[i]) != 1);
89 }
90 if (keep_dims)
91 {
92 out_shape.set(axis_local[i], 1);
93 }
94 else
95 {
96 out_shape.remove_dimension(axis_local[i] - i);
97 }
98 }
99 const TensorInfo out_info = input->clone()->set_tensor_shape(out_shape);
100 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output, &out_info);
101
102 return Status{};
103}

The documentation for this class was generated from the following files: