ONE - On-device Neural Engine
Loading...
Searching...
No Matches
PALMul.h
Go to the documentation of this file.
1
/*
2
* Copyright (c) 2021 Samsung Electronics Co., Ltd. All Rights Reserved
3
* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
4
*
5
* Licensed under the Apache License, Version 2.0 (the "License");
6
* you may not use this file except in compliance with the License.
7
* You may obtain a copy of the License at
8
*
9
* http://www.apache.org/licenses/LICENSE-2.0
10
*
11
* Unless required by applicable law or agreed to in writing, software
12
* distributed under the License is distributed on an "AS IS" BASIS,
13
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
* See the License for the specific language governing permissions and
15
* limitations under the License.
16
*/
17
18
#ifndef LUCI_INTERPRETER_PAL_MUL_H
19
#define LUCI_INTERPRETER_PAL_MUL_H
20
21
#include "PALMulCommon.h"
22
23
namespace
luci_interpreter_pal
24
{
25
26
template
<>
27
inline
void
Mul<int8_t>
(
const
ArithmeticParams
&,
const
int
,
const
int8_t *,
const
int8_t *,
28
int8_t *)
29
{
30
assert(
false
&&
"Not IMPL yet"
);
31
}
32
33
template
<>
34
inline
void
Mul<int16_t>
(
const
ArithmeticParams
&,
const
int
,
const
int16_t *,
const
int16_t *,
35
int16_t *)
36
{
37
assert(
false
&&
"Not IMPL yet"
);
38
}
39
40
}
// namespace luci_interpreter_pal
41
42
#endif
// LUCI_INTERPRETER_PAL_MUL_H
luci_interpreter_pal
Definition
PALArgMax.h:23
luci_interpreter_pal::Mul< int16_t >
void Mul< int16_t >(const ArithmeticParams ¶ms, const int flat_size, const int16_t *input1_data, const int16_t *input2_data, int16_t *output_data)
Definition
PALMul.h:38
luci_interpreter_pal::Mul< int8_t >
void Mul< int8_t >(const ArithmeticParams ¶ms, const int flat_size, const int8_t *input1_data, const int8_t *input2_data, int8_t *output_data)
Definition
PALMul.h:27
luci_interpreter_pal::ArithmeticParams
Definition
Params.h:178
onert-micro
luci-interpreter
pal
mcu
PALMul.h
Generated by
1.9.8