ONE - On-device Neural Engine
Loading...
Searching...
No Matches
PALArgMax.h
Go to the documentation of this file.
1/*
2 * Copyright (c) 2024 Samsung Electronics Co., Ltd. All Rights Reserved
3 * Copyright 2017 The TensorFlow Authors. All Rights Reserved.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18#ifndef ONERT_MICRO_EXECUTE_PAL_ARG_MAX_H
19#define ONERT_MICRO_EXECUTE_PAL_ARG_MAX_H
20
21#include "PALArgMinMaxCommon.h"
22namespace onert_micro
23{
24namespace execute
25{
26namespace pal
27{
28
29template <typename T1, typename T2, typename T3>
30OMStatus ArgMax(const core::OMRuntimeShape &input1_shape, const T1 *input1_data,
31 const T3 *input2_data, const core::OMRuntimeShape &output_shape, T2 *output_data)
32{
33 return ArgMinMax(input1_shape, input1_data, input2_data, output_shape, output_data,
34 std::greater<T1>());
35}
36} // namespace pal
37} // namespace execute
38} // namespace onert_micro
39#endif // ONERT_MICRO_EXECUTE_PAL_ARG_MAX_H
const luci_interpreter::RuntimeShape output_shape
OMStatus ArgMax(const core::OMRuntimeShape &input1_shape, const T1 *input1_data, const T3 *input2_data, const core::OMRuntimeShape &output_shape, T2 *output_data)
Definition PALArgMax.h:30
OMStatus ArgMinMax(const core::OMRuntimeShape &input1_shape, const T1 *input1_data, const T3 *input2_data, const core::OMRuntimeShape &output_shape, T2 *output_data, const Cmp &cmp)