ONE - On-device Neural Engine
Loading...
Searching...
No Matches
arm_compute::CLFullyConnectedReshapingLayer Class Reference

Class to run FullyConnected Layer after reshaping input tensor. More...

#include <CLFullyConnectedReshapingLayer.h>

Collaboration diagram for arm_compute::CLFullyConnectedReshapingLayer:

Public Types

enum class  KernelType { GENERAL , PREPROCESSED_WEIGHTS }
 

Public Member Functions

 CLFullyConnectedReshapingLayer (std::shared_ptr< IMemoryManager > memory_manager=nullptr)
 
void configure (const arm_compute::ICLTensor *input, const arm_compute::ICLTensor *weights, const arm_compute::ICLTensor *biases, arm_compute::ICLTensor *output, bool needs_reshape, const arm_compute::TensorShape &reshape, KernelType kernel_type)
 Configure the layer.
 
void run (void) override
 Run the operation. Must be called after configure().
 
void prepare (void) override
 Prepare the operation.
 

Detailed Description

Class to run FullyConnected Layer after reshaping input tensor.

Definition at line 35 of file CLFullyConnectedReshapingLayer.h.

Member Enumeration Documentation

◆ KernelType

Enumerator
GENERAL 
PREPROCESSED_WEIGHTS 

Definition at line 38 of file CLFullyConnectedReshapingLayer.h.

39 {
40 GENERAL, //< General FC
41 PREPROCESSED_WEIGHTS //< Weights are constants so it can be preprocessed
42 };

Constructor & Destructor Documentation

◆ CLFullyConnectedReshapingLayer()

arm_compute::CLFullyConnectedReshapingLayer::CLFullyConnectedReshapingLayer ( std::shared_ptr< IMemoryManager >  memory_manager = nullptr)
inline

Definition at line 45 of file CLFullyConnectedReshapingLayer.h.

46 : _input(nullptr), _weights(nullptr), _biases(nullptr), _output(nullptr), _cl_buffer{},
47 _memory_manager{memory_manager}, _cl_fc{nullptr}, _cl_reshape{}, _needs_reshape(false)
48 {
49 // DO NOTHING
50 }

Member Function Documentation

◆ configure()

void CLFullyConnectedReshapingLayer::configure ( const arm_compute::ICLTensor *  input,
const arm_compute::ICLTensor *  weights,
const arm_compute::ICLTensor *  biases,
arm_compute::ICLTensor *  output,
bool  needs_reshape,
const arm_compute::TensorShape &  reshape,
KernelType  kernel_type 
)

Configure the layer.

Parameters
[in]inputThe source tensor
[in]weightsThe tensor that is filled with weight values
[in]biasesThe tensor that is filled with biase values
[in]outputThe destination tensor
[in]needs_reshapeWhether it needs to be reshaped or not
[in]reshapeThe tensor shape to be reshaped. Only valid when needs_reshape is true.
Returns
N/A

Definition at line 26 of file CLFullyConnectedReshapingLayer.cpp.

32{
33 _input = input;
34 _weights = weights;
35 _biases = biases;
36 _output = output;
37 _needs_reshape = needs_reshape;
38
39 const ICLTensor *input_to_use = input;
40 if (_needs_reshape)
41 {
42 // reshape
43 auto_init_if_empty(*_cl_buffer.info(),
44 _input->info()->clone()->set_tensor_shape(reshape).set_data_layout(
45 _input->info()->data_layout()));
46 _cl_reshape.configure(_input, &_cl_buffer);
47 input_to_use = &_cl_buffer;
48 }
49
50 _cl_fc = [&]() {
51 if (kernel_type == KernelType::GENERAL)
52 {
53 auto fc = new arm_compute::CLFullyConnectedLayerEx{_memory_manager};
54 fc->configure(input_to_use, _weights, _biases, _output);
55 return std::unique_ptr<arm_compute::IFunction>(fc);
56 }
57 else if (kernel_type == KernelType::PREPROCESSED_WEIGHTS)
58 {
59 bool is_hybrid = (input->info()->data_type() == DataType::F32 ||
60 input->info()->data_type() == DataType::F16) &&
61 (weights->info()->data_type() == DataType::QSYMM8 ||
62 weights->info()->data_type() == DataType::QASYMM8_SIGNED);
63
64 if (is_hybrid)
65 {
66 auto fc = new arm_compute::CLFullyConnectedHybridLayer{_memory_manager};
67 ITensorInfo *weights_info = const_cast<ITensorInfo *>(_weights->info());
68 const auto orgin_weights_data_type = weights_info->data_type();
69 weights_info->set_data_type(DataType::QASYMM8_SIGNED);
70 fc->configure(input_to_use, _weights, _biases, _output);
71 weights_info->set_data_type(orgin_weights_data_type);
72 return std::unique_ptr<arm_compute::IFunction>(fc);
73 }
74 else
75 {
76 auto fc = new arm_compute::CLFullyConnectedLayer{_memory_manager};
77 fc->configure(input_to_use, _weights, _biases, _output);
78 return std::unique_ptr<arm_compute::IFunction>(fc);
79 }
80 }
81 else
82 {
83 throw std::runtime_error("CLFullyConnectedReshapingLayer: Unsupported kernel type");
84 }
85 }();
86
87 if (_needs_reshape)
88 {
89 // NOTE _cl_buffer is inaccessible from outside, and thus it is safe to invoke allocate here.
90 _cl_buffer.allocator()->allocate();
91 }
92}

References GENERAL, and PREPROCESSED_WEIGHTS.

◆ prepare()

void CLFullyConnectedReshapingLayer::prepare ( void  )
override

Prepare the operation.

Returns
N/A

Definition at line 102 of file CLFullyConnectedReshapingLayer.cpp.

102{ _cl_fc->prepare(); }

◆ run()

void CLFullyConnectedReshapingLayer::run ( void  )
override

Run the operation. Must be called after configure().

Returns
N/A

Definition at line 94 of file CLFullyConnectedReshapingLayer.cpp.

95{
96 if (_needs_reshape)
97 _cl_reshape.run();
98
99 _cl_fc->run();
100}

Referenced by package.infer.session::inference().


The documentation for this class was generated from the following files: