ONE - On-device Neural Engine
Loading...
Searching...
No Matches
PALAddN.h
Go to the documentation of this file.
1/*
2 * Copyright (c) 2024 Samsung Electronics Co., Ltd. All Rights Reserved
3 * Copyright 2017 The TensorFlow Authors. All Rights Reserved.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18#ifndef ONERT_MICRO_EXECUTE_PAL_ADD_N_H
19#define ONERT_MICRO_EXECUTE_PAL_ADD_N_H
20
21#include "PALAddNCommon.h"
22
23namespace onert_micro
24{
25namespace execute
26{
27namespace pal
28{
29template <>
30OMStatus AddN<int8_t>(const size_t flat_size, const size_t num_inputs,
31 const int8_t *const *input_data, int8_t *output_data)
32{
33 assert(false && "Not IMPL yet");
34 return UnsupportedOp;
35}
36
37template <>
38OMStatus AddN<int16_t>(const size_t flat_size, const size_t num_inputs,
39 const int16_t *const *input_data, int16_t *output_data)
40{
41 assert(false && "Not IMPL yet");
42 return UnsupportedOp;
43}
44
45} // namespace pal
46} // namespace execute
47} // namespace onert_micro
48
49#endif // ONERT_MICRO_EXECUTE_PAL_ADD_N_H
OMStatus AddN< int16_t >(const size_t flat_size, const size_t num_inputs, const int16_t *const *input_data, int16_t *output_data)
Definition PALAddN.h:38
OMStatus AddN< int8_t >(const size_t flat_size, const size_t num_inputs, const int8_t *const *input_data, int8_t *output_data)
Definition PALAddN.h:30
@ UnsupportedOp
Definition OMStatus.h:29