mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2024-11-23 02:04:41 +08:00
mesa: Import TensorFlow Lite headers
These define a stable API for implementing delegates that can be used in a Gallium frontend. Acked-by: Christian Gmeiner <cgmeiner@igalia.com> Acked-by: Dave Airlie <airlied@redhat.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/25714>
This commit is contained in:
parent
d8fb1dc7ae
commit
9290410870
15
include/tensorflow/README.md
Normal file
15
include/tensorflow/README.md
Normal file
@ -0,0 +1,15 @@
|
||||
These headers have been copied from TensorFlow 2.13.0.
|
||||
|
||||
To update the files to those in newer versions of TensorFlow:
|
||||
|
||||
cd $TENSORFLOW_CHECKOUT
|
||||
cp --parents tensorflow/lite/builtin_ops.h $MESA_DIR/include/.
|
||||
cp --parents tensorflow/lite/c/common.h $MESA_DIR/include/.
|
||||
cp --parents tensorflow/lite/c/c_api.h $MESA_DIR/include/.
|
||||
cp --parents tensorflow/lite/core/async/c/types.h $MESA_DIR/include/.
|
||||
cp --parents tensorflow/lite/core/c/builtin_op_data.h $MESA_DIR/include/.
|
||||
cp --parents tensorflow/lite/core/c/c_api.h $MESA_DIR/include/.
|
||||
cp --parents tensorflow/lite/core/c/c_api_experimental.h $MESA_DIR/include/.
|
||||
cp --parents tensorflow/lite/core/c/c_api_opaque.h $MESA_DIR/include/.
|
||||
cp --parents tensorflow/lite/core/c/c_api_types.h $MESA_DIR/include/.
|
||||
cp --parents tensorflow/lite/core/c/common.h $MESA_DIR/include/.
|
197
include/tensorflow/lite/builtin_ops.h
Normal file
197
include/tensorflow/lite/builtin_ops.h
Normal file
@ -0,0 +1,197 @@
|
||||
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
|
||||
#ifndef TENSORFLOW_LITE_BUILTIN_OPS_H_
|
||||
#define TENSORFLOW_LITE_BUILTIN_OPS_H_
|
||||
|
||||
// DO NOT EDIT MANUALLY: This file is automatically generated by
|
||||
// `schema/builtin_ops_header/generator.cc`.
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif // __cplusplus
|
||||
|
||||
// The enum for builtin operators.
|
||||
// Note: CUSTOM, DELEGATE, and PLACEHOLDER_FOR_GREATER_OP_CODES are 3 special
|
||||
// ops which are not real built-in ops.
|
||||
typedef enum {
|
||||
kTfLiteBuiltinAdd = 0,
|
||||
kTfLiteBuiltinAveragePool2d = 1,
|
||||
kTfLiteBuiltinConcatenation = 2,
|
||||
kTfLiteBuiltinConv2d = 3,
|
||||
kTfLiteBuiltinDepthwiseConv2d = 4,
|
||||
kTfLiteBuiltinDepthToSpace = 5,
|
||||
kTfLiteBuiltinDequantize = 6,
|
||||
kTfLiteBuiltinEmbeddingLookup = 7,
|
||||
kTfLiteBuiltinFloor = 8,
|
||||
kTfLiteBuiltinFullyConnected = 9,
|
||||
kTfLiteBuiltinHashtableLookup = 10,
|
||||
kTfLiteBuiltinL2Normalization = 11,
|
||||
kTfLiteBuiltinL2Pool2d = 12,
|
||||
kTfLiteBuiltinLocalResponseNormalization = 13,
|
||||
kTfLiteBuiltinLogistic = 14,
|
||||
kTfLiteBuiltinLshProjection = 15,
|
||||
kTfLiteBuiltinLstm = 16,
|
||||
kTfLiteBuiltinMaxPool2d = 17,
|
||||
kTfLiteBuiltinMul = 18,
|
||||
kTfLiteBuiltinRelu = 19,
|
||||
kTfLiteBuiltinReluN1To1 = 20,
|
||||
kTfLiteBuiltinRelu6 = 21,
|
||||
kTfLiteBuiltinReshape = 22,
|
||||
kTfLiteBuiltinResizeBilinear = 23,
|
||||
kTfLiteBuiltinRnn = 24,
|
||||
kTfLiteBuiltinSoftmax = 25,
|
||||
kTfLiteBuiltinSpaceToDepth = 26,
|
||||
kTfLiteBuiltinSvdf = 27,
|
||||
kTfLiteBuiltinTanh = 28,
|
||||
kTfLiteBuiltinConcatEmbeddings = 29,
|
||||
kTfLiteBuiltinSkipGram = 30,
|
||||
kTfLiteBuiltinCall = 31,
|
||||
kTfLiteBuiltinCustom = 32,
|
||||
kTfLiteBuiltinEmbeddingLookupSparse = 33,
|
||||
kTfLiteBuiltinPad = 34,
|
||||
kTfLiteBuiltinUnidirectionalSequenceRnn = 35,
|
||||
kTfLiteBuiltinGather = 36,
|
||||
kTfLiteBuiltinBatchToSpaceNd = 37,
|
||||
kTfLiteBuiltinSpaceToBatchNd = 38,
|
||||
kTfLiteBuiltinTranspose = 39,
|
||||
kTfLiteBuiltinMean = 40,
|
||||
kTfLiteBuiltinSub = 41,
|
||||
kTfLiteBuiltinDiv = 42,
|
||||
kTfLiteBuiltinSqueeze = 43,
|
||||
kTfLiteBuiltinUnidirectionalSequenceLstm = 44,
|
||||
kTfLiteBuiltinStridedSlice = 45,
|
||||
kTfLiteBuiltinBidirectionalSequenceRnn = 46,
|
||||
kTfLiteBuiltinExp = 47,
|
||||
kTfLiteBuiltinTopkV2 = 48,
|
||||
kTfLiteBuiltinSplit = 49,
|
||||
kTfLiteBuiltinLogSoftmax = 50,
|
||||
kTfLiteBuiltinDelegate = 51,
|
||||
kTfLiteBuiltinBidirectionalSequenceLstm = 52,
|
||||
kTfLiteBuiltinCast = 53,
|
||||
kTfLiteBuiltinPrelu = 54,
|
||||
kTfLiteBuiltinMaximum = 55,
|
||||
kTfLiteBuiltinArgMax = 56,
|
||||
kTfLiteBuiltinMinimum = 57,
|
||||
kTfLiteBuiltinLess = 58,
|
||||
kTfLiteBuiltinNeg = 59,
|
||||
kTfLiteBuiltinPadv2 = 60,
|
||||
kTfLiteBuiltinGreater = 61,
|
||||
kTfLiteBuiltinGreaterEqual = 62,
|
||||
kTfLiteBuiltinLessEqual = 63,
|
||||
kTfLiteBuiltinSelect = 64,
|
||||
kTfLiteBuiltinSlice = 65,
|
||||
kTfLiteBuiltinSin = 66,
|
||||
kTfLiteBuiltinTransposeConv = 67,
|
||||
kTfLiteBuiltinSparseToDense = 68,
|
||||
kTfLiteBuiltinTile = 69,
|
||||
kTfLiteBuiltinExpandDims = 70,
|
||||
kTfLiteBuiltinEqual = 71,
|
||||
kTfLiteBuiltinNotEqual = 72,
|
||||
kTfLiteBuiltinLog = 73,
|
||||
kTfLiteBuiltinSum = 74,
|
||||
kTfLiteBuiltinSqrt = 75,
|
||||
kTfLiteBuiltinRsqrt = 76,
|
||||
kTfLiteBuiltinShape = 77,
|
||||
kTfLiteBuiltinPow = 78,
|
||||
kTfLiteBuiltinArgMin = 79,
|
||||
kTfLiteBuiltinFakeQuant = 80,
|
||||
kTfLiteBuiltinReduceProd = 81,
|
||||
kTfLiteBuiltinReduceMax = 82,
|
||||
kTfLiteBuiltinPack = 83,
|
||||
kTfLiteBuiltinLogicalOr = 84,
|
||||
kTfLiteBuiltinOneHot = 85,
|
||||
kTfLiteBuiltinLogicalAnd = 86,
|
||||
kTfLiteBuiltinLogicalNot = 87,
|
||||
kTfLiteBuiltinUnpack = 88,
|
||||
kTfLiteBuiltinReduceMin = 89,
|
||||
kTfLiteBuiltinFloorDiv = 90,
|
||||
kTfLiteBuiltinReduceAny = 91,
|
||||
kTfLiteBuiltinSquare = 92,
|
||||
kTfLiteBuiltinZerosLike = 93,
|
||||
kTfLiteBuiltinFill = 94,
|
||||
kTfLiteBuiltinFloorMod = 95,
|
||||
kTfLiteBuiltinRange = 96,
|
||||
kTfLiteBuiltinResizeNearestNeighbor = 97,
|
||||
kTfLiteBuiltinLeakyRelu = 98,
|
||||
kTfLiteBuiltinSquaredDifference = 99,
|
||||
kTfLiteBuiltinMirrorPad = 100,
|
||||
kTfLiteBuiltinAbs = 101,
|
||||
kTfLiteBuiltinSplitV = 102,
|
||||
kTfLiteBuiltinUnique = 103,
|
||||
kTfLiteBuiltinCeil = 104,
|
||||
kTfLiteBuiltinReverseV2 = 105,
|
||||
kTfLiteBuiltinAddN = 106,
|
||||
kTfLiteBuiltinGatherNd = 107,
|
||||
kTfLiteBuiltinCos = 108,
|
||||
kTfLiteBuiltinWhere = 109,
|
||||
kTfLiteBuiltinRank = 110,
|
||||
kTfLiteBuiltinElu = 111,
|
||||
kTfLiteBuiltinReverseSequence = 112,
|
||||
kTfLiteBuiltinMatrixDiag = 113,
|
||||
kTfLiteBuiltinQuantize = 114,
|
||||
kTfLiteBuiltinMatrixSetDiag = 115,
|
||||
kTfLiteBuiltinRound = 116,
|
||||
kTfLiteBuiltinHardSwish = 117,
|
||||
kTfLiteBuiltinIf = 118,
|
||||
kTfLiteBuiltinWhile = 119,
|
||||
kTfLiteBuiltinNonMaxSuppressionV4 = 120,
|
||||
kTfLiteBuiltinNonMaxSuppressionV5 = 121,
|
||||
kTfLiteBuiltinScatterNd = 122,
|
||||
kTfLiteBuiltinSelectV2 = 123,
|
||||
kTfLiteBuiltinDensify = 124,
|
||||
kTfLiteBuiltinSegmentSum = 125,
|
||||
kTfLiteBuiltinBatchMatmul = 126,
|
||||
kTfLiteBuiltinPlaceholderForGreaterOpCodes = 127,
|
||||
kTfLiteBuiltinCumsum = 128,
|
||||
kTfLiteBuiltinCallOnce = 129,
|
||||
kTfLiteBuiltinBroadcastTo = 130,
|
||||
kTfLiteBuiltinRfft2d = 131,
|
||||
kTfLiteBuiltinConv3d = 132,
|
||||
kTfLiteBuiltinImag = 133,
|
||||
kTfLiteBuiltinReal = 134,
|
||||
kTfLiteBuiltinComplexAbs = 135,
|
||||
kTfLiteBuiltinHashtable = 136,
|
||||
kTfLiteBuiltinHashtableFind = 137,
|
||||
kTfLiteBuiltinHashtableImport = 138,
|
||||
kTfLiteBuiltinHashtableSize = 139,
|
||||
kTfLiteBuiltinReduceAll = 140,
|
||||
kTfLiteBuiltinConv3dTranspose = 141,
|
||||
kTfLiteBuiltinVarHandle = 142,
|
||||
kTfLiteBuiltinReadVariable = 143,
|
||||
kTfLiteBuiltinAssignVariable = 144,
|
||||
kTfLiteBuiltinBroadcastArgs = 145,
|
||||
kTfLiteBuiltinRandomStandardNormal = 146,
|
||||
kTfLiteBuiltinBucketize = 147,
|
||||
kTfLiteBuiltinRandomUniform = 148,
|
||||
kTfLiteBuiltinMultinomial = 149,
|
||||
kTfLiteBuiltinGelu = 150,
|
||||
kTfLiteBuiltinDynamicUpdateSlice = 151,
|
||||
kTfLiteBuiltinRelu0To1 = 152,
|
||||
kTfLiteBuiltinUnsortedSegmentProd = 153,
|
||||
kTfLiteBuiltinUnsortedSegmentMax = 154,
|
||||
kTfLiteBuiltinUnsortedSegmentSum = 155,
|
||||
kTfLiteBuiltinAtan2 = 156,
|
||||
kTfLiteBuiltinUnsortedSegmentMin = 157,
|
||||
kTfLiteBuiltinSign = 158,
|
||||
kTfLiteBuiltinBitcast = 159,
|
||||
kTfLiteBuiltinBitwiseXor = 160,
|
||||
kTfLiteBuiltinRightShift = 161,
|
||||
} TfLiteBuiltinOperator;
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif // __cplusplus
|
||||
#endif // TENSORFLOW_LITE_BUILTIN_OPS_H_
|
26
include/tensorflow/lite/c/c_api.h
Normal file
26
include/tensorflow/lite/c/c_api.h
Normal file
@ -0,0 +1,26 @@
|
||||
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#ifndef TENSORFLOW_LITE_C_C_API_H_
|
||||
#define TENSORFLOW_LITE_C_C_API_H_
|
||||
|
||||
/// \file
|
||||
///
|
||||
/// C API for TensorFlow Lite.
|
||||
///
|
||||
/// For documentation, see tensorflow/lite/core/c/c_api.h
|
||||
|
||||
#include "tensorflow/lite/core/c/c_api.h"
|
||||
|
||||
#endif // TENSORFLOW_LITE_C_C_API_H_
|
41
include/tensorflow/lite/c/common.h
Normal file
41
include/tensorflow/lite/c/common.h
Normal file
@ -0,0 +1,41 @@
|
||||
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
|
||||
// This file defines common C types and APIs for implementing operations,
|
||||
// delegates and other constructs in TensorFlow Lite. The actual operations and
|
||||
// delegates can be defined using C++, but the interface between the interpreter
|
||||
// and the operations are C.
|
||||
//
|
||||
// Summary of abstractions
|
||||
// TF_LITE_ENSURE - Self-sufficient error checking
|
||||
// TfLiteStatus - Status reporting
|
||||
// TfLiteIntArray - stores tensor shapes (dims),
|
||||
// TfLiteContext - allows an op to access the tensors
|
||||
// TfLiteTensor - tensor (a multidimensional array)
|
||||
// TfLiteNode - a single node or operation
|
||||
// TfLiteRegistration - the implementation of a conceptual operation.
|
||||
// TfLiteDelegate - allows delegation of nodes to alternative backends.
|
||||
//
|
||||
// Some abstractions in this file are created and managed by Interpreter.
|
||||
//
|
||||
// NOTE: The order of values in these structs are "semi-ABI stable". New values
|
||||
// should be added only to the end of structs and never reordered.
|
||||
|
||||
#ifndef TENSORFLOW_LITE_C_COMMON_H_
|
||||
#define TENSORFLOW_LITE_C_COMMON_H_
|
||||
|
||||
#include "tensorflow/lite/core/c/common.h"
|
||||
|
||||
#endif // TENSORFLOW_LITE_C_COMMON_H_
|
43
include/tensorflow/lite/core/async/c/types.h
Normal file
43
include/tensorflow/lite/core/async/c/types.h
Normal file
@ -0,0 +1,43 @@
|
||||
/* Copyright 2022 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#ifndef TENSORFLOW_LITE_CORE_ASYNC_C_TYPES_H_
|
||||
#define TENSORFLOW_LITE_CORE_ASYNC_C_TYPES_H_
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif // __cplusplus
|
||||
|
||||
/// Opaque type for TfLiteAsyncKernel.
|
||||
typedef struct TfLiteAsyncKernel TfLiteAsyncKernel;
|
||||
|
||||
/// Opaque type for TfLiteExecutionTask.
|
||||
///
|
||||
/// See tensorflow/lite/core/async/c/task.h
|
||||
/// NOTE: TfLiteExecutionTask is NOT thread-safe.
|
||||
typedef struct TfLiteExecutionTask TfLiteExecutionTask;
|
||||
|
||||
/// Enum tag for specifying whether a tensor is the input or output to the
|
||||
/// model.
|
||||
typedef enum TfLiteIoType {
|
||||
kTfLiteIoTypeUnknown = 0,
|
||||
kTfLiteIoTypeInput = 1,
|
||||
kTfLiteIoTypeOutput = 2,
|
||||
} TfLiteIoType;
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif // __cplusplus
|
||||
|
||||
#endif // TENSORFLOW_LITE_CORE_ASYNC_C_TYPES_H_
|
537
include/tensorflow/lite/core/c/builtin_op_data.h
Normal file
537
include/tensorflow/lite/core/c/builtin_op_data.h
Normal file
@ -0,0 +1,537 @@
|
||||
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
/// WARNING: Users of TensorFlow Lite should not include this file directly,
|
||||
/// but should instead include
|
||||
/// "third_party/tensorflow/lite/c/builtin_op_data.h".
|
||||
/// Only the TensorFlow Lite implementation itself should include this
|
||||
/// file directly.
|
||||
#ifndef TENSORFLOW_LITE_CORE_C_BUILTIN_OP_DATA_H_
|
||||
#define TENSORFLOW_LITE_CORE_C_BUILTIN_OP_DATA_H_
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include "tensorflow/lite/core/c/common.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif // __cplusplus
|
||||
|
||||
// TfLiteReshapeParams can't have dynamic data so we fix the maximum possible
|
||||
// number of dimensions.
|
||||
#define TFLITE_RESHAPE_PARAMS_MAX_DIMENSION_COUNT 8
|
||||
|
||||
// TODO(aselle): Consider using "if this then that" for testing.
|
||||
|
||||
// Useful placeholder to put in otherwise empty structs to avoid size warnings.
|
||||
typedef struct {
|
||||
char dummy;
|
||||
} EmptyStructPlaceholder;
|
||||
|
||||
// IMPORTANT: All new members of structs must be added at the end to ensure
|
||||
// backwards compatibility.
|
||||
|
||||
// Possible padding types (for convolutions)
|
||||
typedef enum {
|
||||
kTfLitePaddingUnknown = 0,
|
||||
kTfLitePaddingSame,
|
||||
kTfLitePaddingValid,
|
||||
} TfLitePadding;
|
||||
|
||||
typedef enum {
|
||||
kTfLiteMirrorPaddingUnknown = 0,
|
||||
kTfLiteMirrorPaddingReflect,
|
||||
kTfLiteMirrorPaddingSymmetric,
|
||||
} TfLiteMirrorPaddingMode;
|
||||
|
||||
// TODO(b/130259536): We should move this out of builtin_op_data.
|
||||
typedef struct {
|
||||
int width;
|
||||
int height;
|
||||
int width_offset;
|
||||
int height_offset;
|
||||
} TfLitePaddingValues;
|
||||
|
||||
typedef struct {
|
||||
TfLiteMirrorPaddingMode mode;
|
||||
} TfLiteMirrorPaddingParams;
|
||||
|
||||
// Possible fused activation functions.
|
||||
typedef enum {
|
||||
kTfLiteActNone = 0,
|
||||
kTfLiteActRelu,
|
||||
kTfLiteActReluN1To1, // min(max(-1, x), 1)
|
||||
kTfLiteActRelu6, // min(max(0, x), 6)
|
||||
kTfLiteActTanh,
|
||||
kTfLiteActSignBit,
|
||||
kTfLiteActSigmoid,
|
||||
} TfLiteFusedActivation;
|
||||
|
||||
typedef struct {
|
||||
// Parameters for CONV_2D version 1.
|
||||
TfLitePadding padding;
|
||||
int stride_width;
|
||||
int stride_height;
|
||||
TfLiteFusedActivation activation;
|
||||
|
||||
// Parameters for CONV_2D version 2.
|
||||
// Note: Version 2 supports dilation values not equal to 1.
|
||||
int dilation_width_factor;
|
||||
int dilation_height_factor;
|
||||
} TfLiteConvParams;
|
||||
|
||||
typedef struct {
|
||||
TfLitePadding padding;
|
||||
int stride_width;
|
||||
int stride_height;
|
||||
int stride_depth;
|
||||
int dilation_width_factor;
|
||||
int dilation_height_factor;
|
||||
int dilation_depth_factor;
|
||||
TfLiteFusedActivation activation;
|
||||
} TfLiteConv3DParams;
|
||||
|
||||
typedef TfLiteConv3DParams TfLiteConv3DTransposeParams;
|
||||
|
||||
typedef struct {
|
||||
TfLitePadding padding;
|
||||
int stride_width;
|
||||
int stride_height;
|
||||
int filter_width;
|
||||
int filter_height;
|
||||
TfLiteFusedActivation activation;
|
||||
struct {
|
||||
TfLitePaddingValues padding;
|
||||
} computed;
|
||||
} TfLitePoolParams;
|
||||
|
||||
typedef struct {
|
||||
// Parameters for DepthwiseConv version 1 or above.
|
||||
TfLitePadding padding;
|
||||
int stride_width;
|
||||
int stride_height;
|
||||
// `depth_multiplier` is redundant. It's used by CPU kernels in
|
||||
// TensorFlow 2.0 or below, but ignored in versions above.
|
||||
//
|
||||
// The information can be deduced from the shape of input and the shape of
|
||||
// weights. Since the TFLiteConverter toolchain doesn't support partially
|
||||
// specified shapes, relying on `depth_multiplier` stops us from supporting
|
||||
// graphs with dynamic shape tensors.
|
||||
//
|
||||
// Note: Some of the delegates (e.g. NNAPI, GPU) are still relying on this
|
||||
// field.
|
||||
int depth_multiplier;
|
||||
TfLiteFusedActivation activation;
|
||||
// Parameters for DepthwiseConv version 2 or above.
|
||||
int dilation_width_factor;
|
||||
int dilation_height_factor;
|
||||
} TfLiteDepthwiseConvParams;
|
||||
|
||||
typedef struct {
|
||||
int rank;
|
||||
TfLiteFusedActivation activation;
|
||||
|
||||
// Parameter for SVDF version 4.
|
||||
bool asymmetric_quantize_inputs;
|
||||
} TfLiteSVDFParams;
|
||||
|
||||
typedef struct {
|
||||
TfLiteFusedActivation activation;
|
||||
|
||||
// Parameter for RNN version 3.
|
||||
bool asymmetric_quantize_inputs;
|
||||
} TfLiteRNNParams;
|
||||
|
||||
typedef struct {
|
||||
bool time_major;
|
||||
TfLiteFusedActivation activation;
|
||||
|
||||
// Parameter for Sequence RNN version 3.
|
||||
bool asymmetric_quantize_inputs;
|
||||
} TfLiteSequenceRNNParams;
|
||||
|
||||
typedef struct {
|
||||
bool time_major;
|
||||
TfLiteFusedActivation activation;
|
||||
bool merge_outputs;
|
||||
|
||||
// Parameter for Bidirectional RNN verison 3.
|
||||
bool asymmetric_quantize_inputs;
|
||||
} TfLiteBidirectionalSequenceRNNParams;
|
||||
|
||||
typedef enum {
|
||||
kTfLiteFullyConnectedWeightsFormatDefault = 0,
|
||||
kTfLiteFullyConnectedWeightsFormatShuffled4x16Int8 = 1,
|
||||
} TfLiteFullyConnectedWeightsFormat;
|
||||
|
||||
typedef struct {
|
||||
// Parameters for FullyConnected version 1 or above.
|
||||
TfLiteFusedActivation activation;
|
||||
|
||||
// Parameters for FullyConnected version 2 or above.
|
||||
TfLiteFullyConnectedWeightsFormat weights_format;
|
||||
|
||||
// Parameters for FullyConnected version 5 or above.
|
||||
// If set to true, then the number of dimensions in the input and the output
|
||||
// tensors are the same. Furthermore, all but the last dimension of the input
|
||||
// and output shapes will be equal.
|
||||
bool keep_num_dims;
|
||||
|
||||
// Parameters for FullyConnected version 7 or above.
|
||||
// If set to true and the weights are quantized, then non constant inputs
|
||||
// are quantized at evaluation time with asymmetric quantization.
|
||||
bool asymmetric_quantize_inputs;
|
||||
} TfLiteFullyConnectedParams;
|
||||
|
||||
typedef enum {
|
||||
kTfLiteLshProjectionUnknown = 0,
|
||||
kTfLiteLshProjectionSparse = 1,
|
||||
kTfLiteLshProjectionDense = 2,
|
||||
} TfLiteLSHProjectionType;
|
||||
|
||||
typedef struct {
|
||||
TfLiteLSHProjectionType type;
|
||||
} TfLiteLSHProjectionParams;
|
||||
|
||||
typedef struct {
|
||||
float beta;
|
||||
} TfLiteSoftmaxParams;
|
||||
|
||||
typedef struct {
|
||||
int axis;
|
||||
TfLiteFusedActivation activation;
|
||||
} TfLiteConcatenationParams;
|
||||
|
||||
typedef struct {
|
||||
TfLiteFusedActivation activation;
|
||||
// Parameter added for the version 4.
|
||||
bool pot_scale_int16;
|
||||
} TfLiteAddParams;
|
||||
|
||||
typedef struct {
|
||||
EmptyStructPlaceholder placeholder;
|
||||
} TfLiteSpaceToBatchNDParams;
|
||||
|
||||
typedef struct {
|
||||
EmptyStructPlaceholder placeholder;
|
||||
} TfLiteBatchToSpaceNDParams;
|
||||
|
||||
typedef struct {
|
||||
bool adj_x;
|
||||
bool adj_y;
|
||||
// Parameters for BatchMatMul version 4 or above.
|
||||
// If set to true and the weights are quantized, then non constant inputs
|
||||
// are quantized at evaluation time with asymmetric quantization.
|
||||
bool asymmetric_quantize_inputs;
|
||||
} TfLiteBatchMatMulParams;
|
||||
|
||||
typedef struct {
|
||||
TfLiteFusedActivation activation;
|
||||
} TfLiteMulParams;
|
||||
|
||||
typedef struct {
|
||||
TfLiteFusedActivation activation;
|
||||
// Parameter added for the version 5.
|
||||
bool pot_scale_int16;
|
||||
} TfLiteSubParams;
|
||||
|
||||
typedef struct {
|
||||
TfLiteFusedActivation activation;
|
||||
} TfLiteDivParams;
|
||||
|
||||
typedef struct {
|
||||
TfLiteFusedActivation activation;
|
||||
} TfLiteL2NormParams;
|
||||
|
||||
typedef struct {
|
||||
int radius;
|
||||
float bias;
|
||||
float alpha;
|
||||
float beta;
|
||||
} TfLiteLocalResponseNormParams;
|
||||
|
||||
typedef enum {
|
||||
kTfLiteLSTMFullKernel = 0,
|
||||
kTfLiteLSTMBasicKernel
|
||||
} TfLiteLSTMKernelType;
|
||||
|
||||
typedef struct {
|
||||
// Parameters for LSTM version 1.
|
||||
TfLiteFusedActivation activation;
|
||||
float cell_clip;
|
||||
float proj_clip;
|
||||
|
||||
// Parameters for LSTM version 2.
|
||||
// kTfLiteLSTMBasicKernel is only supported in version 2 or above.
|
||||
TfLiteLSTMKernelType kernel_type;
|
||||
|
||||
// Parameters for LSTM version 4.
|
||||
bool asymmetric_quantize_inputs;
|
||||
} TfLiteLSTMParams;
|
||||
|
||||
typedef struct {
|
||||
// Parameters needed for the underlying LSTM.
|
||||
TfLiteFusedActivation activation;
|
||||
float cell_clip;
|
||||
float proj_clip;
|
||||
|
||||
// If set to true then the first dimension is time, otherwise batch.
|
||||
bool time_major;
|
||||
|
||||
// Parameter for unidirectional sequence RNN version 3.
|
||||
bool asymmetric_quantize_inputs;
|
||||
|
||||
// Parameter for unidirectional sequence RNN version 4.
|
||||
bool diagonal_recurrent_tensors;
|
||||
} TfLiteUnidirectionalSequenceLSTMParams;
|
||||
|
||||
typedef struct {
|
||||
// Parameters supported by version 1:
|
||||
// Parameters inherited for the LSTM kernel.
|
||||
TfLiteFusedActivation activation;
|
||||
float cell_clip;
|
||||
float proj_clip;
|
||||
|
||||
// If true, store the outputs of both directions in the first output.
|
||||
bool merge_outputs;
|
||||
|
||||
// Parameters supported by version 2:
|
||||
// If set to true then the first dimension is time, otherwise batch.
|
||||
bool time_major;
|
||||
|
||||
// Parameters supported by version 3:
|
||||
// If set to true, then hybrid ops use asymmetric quantization for inputs.
|
||||
bool asymmetric_quantize_inputs;
|
||||
} TfLiteBidirectionalSequenceLSTMParams;
|
||||
|
||||
typedef struct {
|
||||
bool align_corners;
|
||||
// half_pixel_centers assumes pixels are of half the actual dimensions, and
|
||||
// yields more accurate resizes. Corresponds to the same argument for the
|
||||
// original TensorFlow op in TF2.0.
|
||||
bool half_pixel_centers;
|
||||
} TfLiteResizeBilinearParams;
|
||||
|
||||
typedef struct {
|
||||
bool align_corners;
|
||||
bool half_pixel_centers;
|
||||
} TfLiteResizeNearestNeighborParams;
|
||||
|
||||
typedef struct {
|
||||
EmptyStructPlaceholder placeholder;
|
||||
} TfLitePadParams;
|
||||
|
||||
typedef struct {
|
||||
EmptyStructPlaceholder placeholder;
|
||||
} TfLitePadV2Params;
|
||||
|
||||
typedef struct {
|
||||
// These fields are only used in old models for backward compatibility.
|
||||
// In the current implementation, we use the 2nd input of the op as the shape,
|
||||
// and these fields are unused.
|
||||
int shape[TFLITE_RESHAPE_PARAMS_MAX_DIMENSION_COUNT];
|
||||
int num_dimensions;
|
||||
} TfLiteReshapeParams;
|
||||
|
||||
typedef struct {
|
||||
int ngram_size;
|
||||
int max_skip_size;
|
||||
bool include_all_ngrams;
|
||||
} TfLiteSkipGramParams;
|
||||
|
||||
typedef struct {
|
||||
int block_size;
|
||||
} TfLiteSpaceToDepthParams;
|
||||
|
||||
typedef struct {
|
||||
int block_size;
|
||||
} TfLiteDepthToSpaceParams;
|
||||
|
||||
typedef struct {
|
||||
TfLiteType in_data_type;
|
||||
TfLiteType out_data_type;
|
||||
} TfLiteCastParams;
|
||||
|
||||
typedef enum {
|
||||
kTfLiteCombinerTypeSum = 0,
|
||||
kTfLiteCombinerTypeMean = 1,
|
||||
kTfLiteCombinerTypeSqrtn = 2,
|
||||
} TfLiteCombinerType;
|
||||
|
||||
typedef struct {
|
||||
TfLiteCombinerType combiner;
|
||||
} TfLiteEmbeddingLookupSparseParams;
|
||||
|
||||
typedef struct {
|
||||
int axis;
|
||||
int batch_dims;
|
||||
} TfLiteGatherParams;
|
||||
|
||||
typedef struct {
|
||||
EmptyStructPlaceholder placeholder;
|
||||
} TfLiteTransposeParams;
|
||||
|
||||
typedef struct {
|
||||
bool keep_dims;
|
||||
} TfLiteReducerParams;
|
||||
|
||||
typedef struct {
|
||||
int num_splits;
|
||||
} TfLiteSplitParams;
|
||||
|
||||
typedef struct {
|
||||
int num_splits;
|
||||
} TfLiteSplitVParams;
|
||||
|
||||
typedef struct {
|
||||
// TODO(ahentz): We can't have dynamic data in this struct, at least not yet.
|
||||
// For now we will fix the maximum possible number of dimensions.
|
||||
int squeeze_dims[8];
|
||||
int num_squeeze_dims;
|
||||
} TfLiteSqueezeParams;
|
||||
|
||||
typedef struct {
|
||||
int begin_mask;
|
||||
int end_mask;
|
||||
int ellipsis_mask;
|
||||
int new_axis_mask;
|
||||
int shrink_axis_mask;
|
||||
} TfLiteStridedSliceParams;
|
||||
|
||||
typedef struct {
|
||||
TfLiteType output_type;
|
||||
} TfLiteArgMaxParams;
|
||||
|
||||
typedef struct {
|
||||
TfLiteType output_type;
|
||||
} TfLiteArgMinParams;
|
||||
|
||||
typedef struct {
|
||||
// Parameters supported by version 1:
|
||||
TfLitePadding padding;
|
||||
int stride_width;
|
||||
int stride_height;
|
||||
|
||||
// Parameters supported by version 4:
|
||||
TfLiteFusedActivation activation;
|
||||
} TfLiteTransposeConvParams;
|
||||
|
||||
typedef struct {
|
||||
bool validate_indices;
|
||||
} TfLiteSparseToDenseParams;
|
||||
|
||||
typedef struct {
|
||||
TfLiteType out_type;
|
||||
} TfLiteShapeParams;
|
||||
|
||||
typedef struct {
|
||||
EmptyStructPlaceholder placeholder;
|
||||
} TfLiteRankParams;
|
||||
|
||||
typedef struct {
|
||||
// Parameters supported by version 1:
|
||||
float min;
|
||||
float max;
|
||||
int num_bits;
|
||||
|
||||
// Parameters supported by version 2:
|
||||
bool narrow_range;
|
||||
} TfLiteFakeQuantParams;
|
||||
|
||||
typedef struct {
|
||||
int values_count;
|
||||
int axis;
|
||||
} TfLitePackParams;
|
||||
|
||||
typedef struct {
|
||||
int axis;
|
||||
} TfLiteOneHotParams;
|
||||
|
||||
typedef struct {
|
||||
int num;
|
||||
int axis;
|
||||
} TfLiteUnpackParams;
|
||||
|
||||
typedef struct {
|
||||
float alpha;
|
||||
} TfLiteLeakyReluParams;
|
||||
|
||||
typedef struct {
|
||||
TfLiteType index_out_type;
|
||||
} TfLiteUniqueParams;
|
||||
|
||||
typedef struct {
|
||||
int seq_dim;
|
||||
int batch_dim;
|
||||
} TfLiteReverseSequenceParams;
|
||||
|
||||
typedef struct {
|
||||
EmptyStructPlaceholder placeholder;
|
||||
} TfLiteMatrixDiagParams;
|
||||
|
||||
typedef struct {
|
||||
EmptyStructPlaceholder placeholder;
|
||||
} TfLiteMatrixSetDiagParams;
|
||||
|
||||
typedef struct {
|
||||
int then_subgraph_index;
|
||||
int else_subgraph_index;
|
||||
} TfLiteIfParams;
|
||||
|
||||
typedef struct {
|
||||
int cond_subgraph_index;
|
||||
int body_subgraph_index;
|
||||
} TfLiteWhileParams;
|
||||
|
||||
typedef struct {
|
||||
bool exclusive;
|
||||
bool reverse;
|
||||
} TfLiteCumsumParams;
|
||||
|
||||
typedef struct {
|
||||
int init_subgraph_index;
|
||||
} TfLiteCallOnceParams;
|
||||
|
||||
typedef struct {
|
||||
int table_id;
|
||||
TfLiteType key_dtype;
|
||||
TfLiteType value_dtype;
|
||||
} TfLiteHashtableParams;
|
||||
|
||||
typedef struct {
|
||||
const char* container;
|
||||
const char* shared_name;
|
||||
} TfLiteVarHandleParams;
|
||||
|
||||
typedef struct {
|
||||
int seed;
|
||||
int seed2;
|
||||
} TfLiteRandomParams;
|
||||
|
||||
typedef struct {
|
||||
int num_boundaries;
|
||||
// This points to the memory stored in the model (flatbuffer),
|
||||
// and is not owned.
|
||||
const float* boundaries;
|
||||
} TfLiteBucketizeParams;
|
||||
|
||||
typedef struct {
|
||||
bool approximate;
|
||||
} TfLiteGeluParams;
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif // __cplusplus
|
||||
|
||||
#endif // TENSORFLOW_LITE_CORE_C_BUILTIN_OP_DATA_H_
|
566
include/tensorflow/lite/core/c/c_api.h
Normal file
566
include/tensorflow/lite/core/c/c_api.h
Normal file
@ -0,0 +1,566 @@
|
||||
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
/// \warning Users of TensorFlow Lite should not include this file directly,
|
||||
/// but should instead include "third_party/tensorflow/lite/c/c_api.h".
|
||||
/// Only the TensorFlow Lite implementation itself should include this
|
||||
/// file directly.
|
||||
#ifndef TENSORFLOW_LITE_CORE_C_C_API_H_
|
||||
#define TENSORFLOW_LITE_CORE_C_C_API_H_
|
||||
|
||||
#include <stdarg.h>
|
||||
#include <stdbool.h>
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "tensorflow/lite/builtin_ops.h"
|
||||
#include "tensorflow/lite/core/async/c/types.h"
|
||||
#include "tensorflow/lite/core/c/c_api_types.h" // IWYU pragma: export
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
/// \file
|
||||
/// C API for TensorFlow Lite.
|
||||
///
|
||||
/// The API leans towards simplicity and uniformity instead of convenience, as
|
||||
/// most usage will be by language-specific wrappers. It provides largely the
|
||||
/// same set of functionality as that of the C++ TensorFlow Lite `Interpreter`
|
||||
/// API, but is useful for shared libraries where having a stable ABI boundary
|
||||
/// is important.
|
||||
///
|
||||
/// Conventions:
|
||||
/// * We use the prefix TfLite for everything in the API.
|
||||
/// * size_t is used to represent byte sizes of objects that are
|
||||
/// materialized in the address space of the calling process.
|
||||
/// * int is used as an index into arrays.
|
||||
///
|
||||
/// Usage:
|
||||
/// <pre><code>
|
||||
/// // Create the model and interpreter options.
|
||||
/// TfLiteModel* model = TfLiteModelCreateFromFile("/path/to/model.tflite");
|
||||
/// TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate();
|
||||
/// TfLiteInterpreterOptionsSetNumThreads(options, 2);
|
||||
///
|
||||
/// // Create the interpreter.
|
||||
/// TfLiteInterpreter* interpreter = TfLiteInterpreterCreate(model, options);
|
||||
///
|
||||
/// // Allocate tensors and populate the input tensor data.
|
||||
/// TfLiteInterpreterAllocateTensors(interpreter);
|
||||
/// TfLiteTensor* input_tensor =
|
||||
/// TfLiteInterpreterGetInputTensor(interpreter, 0);
|
||||
/// TfLiteTensorCopyFromBuffer(input_tensor, input.data(),
|
||||
/// input.size() * sizeof(float));
|
||||
///
|
||||
/// // Execute inference.
|
||||
/// TfLiteInterpreterInvoke(interpreter);
|
||||
///
|
||||
/// // Extract the output tensor data.
|
||||
/// const TfLiteTensor* output_tensor =
|
||||
/// TfLiteInterpreterGetOutputTensor(interpreter, 0);
|
||||
/// TfLiteTensorCopyToBuffer(output_tensor, output.data(),
|
||||
/// output.size() * sizeof(float));
|
||||
///
|
||||
/// // Dispose of the model and interpreter objects.
|
||||
/// TfLiteInterpreterDelete(interpreter);
|
||||
/// TfLiteInterpreterOptionsDelete(options);
|
||||
/// TfLiteModelDelete(model);
|
||||
///
|
||||
/// </code></pre>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif // __cplusplus
|
||||
|
||||
// This header should be valid in both C (e.g. C99) and C++,
|
||||
// so 'void' in parameters is not redundant.
|
||||
// NOLINTBEGIN(modernize-redundant-void-arg)
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Opaque types used by the C API. (See also c_api_types.h.)
|
||||
|
||||
/// TfLiteModel wraps a loaded TensorFlow Lite model.
|
||||
typedef struct TfLiteModel TfLiteModel;
|
||||
|
||||
/// TfLiteInterpreterOptions allows customized interpreter configuration.
|
||||
typedef struct TfLiteInterpreterOptions TfLiteInterpreterOptions;
|
||||
|
||||
/// TfLiteInterpreter provides inference from a provided model.
|
||||
typedef struct TfLiteInterpreter TfLiteInterpreter;
|
||||
|
||||
/// A tensor in the interpreter system which is a wrapper around a buffer of
|
||||
/// data including a dimensionality (or NULL if not currently defined).
|
||||
typedef struct TfLiteTensor TfLiteTensor;
|
||||
|
||||
/// TfLiteRegistrationExternal is an external version of TfLiteRegistration to
|
||||
/// use custom op registration API.
|
||||
/// \warning This is an experimental type and subject to change.
|
||||
typedef struct TfLiteRegistrationExternal TfLiteRegistrationExternal;
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
/// The TensorFlow Lite Runtime version.
|
||||
///
|
||||
/// Returns a pointer to a statically allocated string that is the version
|
||||
/// number of the (potentially dynamically loaded) TF Lite Runtime library.
|
||||
/// TensorFlow Lite uses semantic versioning, and the return value should be
|
||||
/// in semver 2 format <http://semver.org>, starting with MAJOR.MINOR.PATCH,
|
||||
/// e.g. "2.12.0" or "2.13.0-rc2".
|
||||
TFL_CAPI_EXPORT extern const char* TfLiteVersion(void);
|
||||
|
||||
/// The supported TensorFlow Lite model file Schema version.
|
||||
///
|
||||
/// Returns the (major) version number of the Schema used for model
|
||||
/// files that is supported by the (potentially dynamically loaded)
|
||||
/// TensorFlow Lite Runtime.
|
||||
///
|
||||
/// Model files using schema versions different to this may not be supported by
|
||||
/// the current version of the TF Lite Runtime.
|
||||
TFL_CAPI_EXPORT int TfLiteSchemaVersion(void);
|
||||
|
||||
/// Returns a model from the provided buffer, or null on failure.
|
||||
///
|
||||
/// \note The caller retains ownership of the `model_data` buffer and should
|
||||
/// ensure that the lifetime of the `model_data` buffer must be at least as long
|
||||
/// as the lifetime of the `TfLiteModel` and of any `TfLiteInterpreter` objects
|
||||
/// created from that `TfLiteModel`, and furthermore the contents of the
|
||||
/// `model_data` buffer must not be modified during that time."
|
||||
TFL_CAPI_EXPORT extern TfLiteModel* TfLiteModelCreate(const void* model_data,
|
||||
size_t model_size);
|
||||
|
||||
/// Same as `TfLiteModelCreate` with customizble error reporter.
|
||||
/// * `reporter` takes the provided `user_data` object, as well as a C-style
|
||||
/// format string and arg list (see also vprintf).
|
||||
/// * `user_data` is optional. If non-null, it is owned by the client and must
|
||||
/// remain valid for the duration of the interpreter lifetime.
|
||||
TFL_CAPI_EXPORT extern TfLiteModel* TfLiteModelCreateWithErrorReporter(
|
||||
const void* model_data, size_t model_size,
|
||||
void (*reporter)(void* user_data, const char* format, va_list args),
|
||||
void* user_data);
|
||||
|
||||
/// Returns a model from the provided file, or null on failure.
|
||||
///
|
||||
/// \note The file's contents must not be modified during the lifetime of the
|
||||
/// `TfLiteModel` or of any `TfLiteInterpreter` objects created from that
|
||||
/// `TfLiteModel`.
|
||||
TFL_CAPI_EXPORT extern TfLiteModel* TfLiteModelCreateFromFile(
|
||||
const char* model_path);
|
||||
|
||||
/// Same as `TfLiteModelCreateFromFile` with customizble error reporter.
|
||||
/// * `reporter` takes the provided `user_data` object, as well as a C-style
|
||||
/// format string and arg list (see also vprintf).
|
||||
/// * `user_data` is optional. If non-null, it is owned by the client and must
|
||||
/// remain valid for the duration of the interpreter lifetime.
|
||||
TFL_CAPI_EXPORT extern TfLiteModel* TfLiteModelCreateFromFileWithErrorReporter(
|
||||
const char* model_path,
|
||||
void (*reporter)(void* user_data, const char* format, va_list args),
|
||||
void* user_data);
|
||||
|
||||
/// Destroys the model instance.
|
||||
TFL_CAPI_EXPORT extern void TfLiteModelDelete(TfLiteModel* model);
|
||||
|
||||
/// Returns a new interpreter options instances.
|
||||
TFL_CAPI_EXPORT extern TfLiteInterpreterOptions*
|
||||
TfLiteInterpreterOptionsCreate();
|
||||
|
||||
/// Creates and returns a shallow copy of an options object.
|
||||
///
|
||||
/// The caller is responsible for calling `TfLiteInterpreterOptionsDelete` to
|
||||
/// deallocate the object pointed to by the returned pointer.
|
||||
TFL_CAPI_EXPORT extern TfLiteInterpreterOptions* TfLiteInterpreterOptionsCopy(
|
||||
const TfLiteInterpreterOptions* from);
|
||||
|
||||
/// Destroys the interpreter options instance.
|
||||
TFL_CAPI_EXPORT extern void TfLiteInterpreterOptionsDelete(
|
||||
TfLiteInterpreterOptions* options);
|
||||
|
||||
/// Sets the number of CPU threads to use for the interpreter.
|
||||
TFL_CAPI_EXPORT extern void TfLiteInterpreterOptionsSetNumThreads(
|
||||
TfLiteInterpreterOptions* options, int32_t num_threads);
|
||||
|
||||
/// Adds a delegate to be applied during `TfLiteInterpreter` creation.
|
||||
///
|
||||
/// If delegate application fails, interpreter creation will also fail with an
|
||||
/// associated error logged.
|
||||
///
|
||||
/// \note The caller retains ownership of the delegate and should ensure that it
|
||||
/// remains valid for the duration of any created interpreter's lifetime.
|
||||
///
|
||||
/// If you are NOT using "TensorFlow Lite in Play Services", and NOT building
|
||||
/// with `TFLITE_WITH_STABLE_ABI` or `TFLITE_USE_OPAQUE_DELEGATE` macros
|
||||
/// enabled, it is possible to pass a `TfLiteDelegate*` rather than a
|
||||
/// `TfLiteOpaqueDelegate*` to this function, since in those cases,
|
||||
/// `TfLiteOpaqueDelegate` is just a typedef alias for `TfLiteDelegate`.
|
||||
/// This is for compatibility with existing source code
|
||||
/// and existing delegates. For new delegates, it is recommended to
|
||||
/// use `TfLiteOpaqueDelegate` rather than `TfLiteDelegate`. (See
|
||||
/// `TfLiteOpaqueDelegate` in tensorflow/lite/core/c/c_api_types.h.)
|
||||
TFL_CAPI_EXPORT extern void TfLiteInterpreterOptionsAddDelegate(
|
||||
TfLiteInterpreterOptions* options, TfLiteOpaqueDelegate* delegate);
|
||||
|
||||
/// Sets a custom error reporter for interpreter execution.
|
||||
///
|
||||
/// * `reporter` takes the provided `user_data` object, as well as a C-style
|
||||
/// format string and arg list (see also vprintf).
|
||||
/// * `user_data` is optional. If non-null, it is owned by the client and must
|
||||
/// remain valid for the duration of the interpreter lifetime.
|
||||
TFL_CAPI_EXPORT extern void TfLiteInterpreterOptionsSetErrorReporter(
|
||||
TfLiteInterpreterOptions* options,
|
||||
void (*reporter)(void* user_data, const char* format, va_list args),
|
||||
void* user_data);
|
||||
|
||||
/// Adds an op registration to be applied during `TfLiteInterpreter` creation.
|
||||
///
|
||||
/// The `TfLiteRegistrationExternal` object is needed to implement custom op of
|
||||
/// TFLite Interpreter via C API. Calling this function ensures that any
|
||||
/// `TfLiteInterpreter` created with the specified `options` can execute models
|
||||
/// that use the custom operator specified in `registration`.
|
||||
/// Please refer https://www.tensorflow.org/lite/guide/ops_custom for custom op
|
||||
/// support.
|
||||
/// \note The caller retains ownership of the TfLiteRegistrationExternal object
|
||||
/// and should ensure that it remains valid for the duration of any created
|
||||
/// interpreter's lifetime.
|
||||
/// \warning This is an experimental API and subject to change.
|
||||
TFL_CAPI_EXPORT extern void TfLiteInterpreterOptionsAddRegistrationExternal(
|
||||
TfLiteInterpreterOptions* options,
|
||||
TfLiteRegistrationExternal* registration);
|
||||
|
||||
/// Enables users to cancel in-flight invocations with
|
||||
/// `TfLiteInterpreterCancel`.
|
||||
///
|
||||
/// By default it is disabled and calling to `TfLiteInterpreterCancel` will
|
||||
/// return kTfLiteError. See `TfLiteInterpreterCancel`.
|
||||
///
|
||||
/// \warning This is an experimental API and subject to change.
|
||||
TFL_CAPI_EXPORT extern TfLiteStatus TfLiteInterpreterOptionsEnableCancellation(
|
||||
TfLiteInterpreterOptions* options, bool enable);
|
||||
|
||||
/// Returns a new interpreter using the provided model and options, or null on
|
||||
/// failure.
|
||||
///
|
||||
/// * `model` must be a valid model instance. The caller retains ownership of
|
||||
/// the object, and may destroy it (via TfLiteModelDelete) immediately after
|
||||
/// creating the interpreter. However, if the TfLiteModel was allocated with
|
||||
/// TfLiteModelCreate, then the `model_data` buffer that was passed to
|
||||
/// TfLiteModelCreate must outlive the lifetime of the TfLiteInterpreter
|
||||
/// object that this function returns, and must not be modified during that
|
||||
/// time; and if the TfLiteModel was allocated with TfLiteModelCreateFromFile,
|
||||
/// then the contents of the model file must not be modified during the
|
||||
/// lifetime of the TfLiteInterpreter object that this function returns.
|
||||
/// * `optional_options` may be null. The caller retains ownership of the
|
||||
/// object, and can safely destroy it (via TfLiteInterpreterOptionsDelete)
|
||||
/// immediately after creating the interpreter.
|
||||
///
|
||||
/// \note The client *must* explicitly allocate tensors before attempting to
|
||||
/// access input tensor data or invoke the interpreter.
|
||||
TFL_CAPI_EXPORT extern TfLiteInterpreter* TfLiteInterpreterCreate(
|
||||
const TfLiteModel* model, const TfLiteInterpreterOptions* optional_options);
|
||||
|
||||
/// Destroys the interpreter.
|
||||
TFL_CAPI_EXPORT extern void TfLiteInterpreterDelete(
|
||||
TfLiteInterpreter* interpreter);
|
||||
|
||||
/// Returns the number of input tensors associated with the model.
|
||||
TFL_CAPI_EXPORT extern int32_t TfLiteInterpreterGetInputTensorCount(
|
||||
const TfLiteInterpreter* interpreter);
|
||||
|
||||
/// Returns a pointer to an array of input tensor indices. The length of the
|
||||
/// array can be obtained via a call to `TfLiteInterpreterGetInputTensorCount`.
|
||||
///
|
||||
/// Typically the input tensors associated with an `interpreter` would be set
|
||||
/// during the initialization of the `interpreter`, through a mechanism like the
|
||||
/// `InterpreterBuilder`, and remain unchanged throughout the lifetime of the
|
||||
/// interpreter. However, there are some circumstances in which the pointer may
|
||||
/// not remain valid throughout the lifetime of the interpreter, because calls
|
||||
/// to `SetInputs` on the interpreter invalidate the returned pointer.
|
||||
///
|
||||
/// The ownership of the array remains with the TFLite runtime.
|
||||
TFL_CAPI_EXPORT const int* TfLiteInterpreterInputTensorIndices(
|
||||
const TfLiteInterpreter* interpreter);
|
||||
|
||||
/// Returns the tensor associated with the input index.
|
||||
/// REQUIRES: 0 <= input_index < TfLiteInterpreterGetInputTensorCount(tensor)
|
||||
TFL_CAPI_EXPORT extern TfLiteTensor* TfLiteInterpreterGetInputTensor(
|
||||
const TfLiteInterpreter* interpreter, int32_t input_index);
|
||||
|
||||
/// Resizes the specified input tensor.
|
||||
///
|
||||
/// \note After a resize, the client *must* explicitly allocate tensors before
|
||||
/// attempting to access the resized tensor data or invoke the interpreter.
|
||||
///
|
||||
/// REQUIRES: 0 <= input_index < TfLiteInterpreterGetInputTensorCount(tensor)
|
||||
///
|
||||
/// This function makes a copy of the input dimensions, so the client can safely
|
||||
/// deallocate `input_dims` immediately after this function returns.
|
||||
TFL_CAPI_EXPORT extern TfLiteStatus TfLiteInterpreterResizeInputTensor(
|
||||
TfLiteInterpreter* interpreter, int32_t input_index, const int* input_dims,
|
||||
int32_t input_dims_size);
|
||||
|
||||
/// Updates allocations for all tensors, resizing dependent tensors using the
|
||||
/// specified input tensor dimensionality.
|
||||
///
|
||||
/// This is a relatively expensive operation, and need only be called after
|
||||
/// creating the graph and/or resizing any inputs.
|
||||
TFL_CAPI_EXPORT extern TfLiteStatus TfLiteInterpreterAllocateTensors(
|
||||
TfLiteInterpreter* interpreter);
|
||||
|
||||
/// Runs inference for the loaded graph.
|
||||
///
|
||||
/// Before calling this function, the caller should first invoke
|
||||
/// TfLiteInterpreterAllocateTensors() and should also set the values for the
|
||||
/// input tensors. After successfully calling this function, the values for the
|
||||
/// output tensors will be set.
|
||||
///
|
||||
/// \note It is possible that the interpreter is not in a ready state to
|
||||
/// evaluate (e.g., if AllocateTensors() hasn't been called, or if a
|
||||
/// ResizeInputTensor() has been performed without a subsequent call to
|
||||
/// AllocateTensors()).
|
||||
///
|
||||
/// If the (experimental!) delegate fallback option was enabled in the
|
||||
/// interpreter options, then the interpreter will automatically fall back to
|
||||
/// not using any delegates if execution with delegates fails. For details,
|
||||
/// see TfLiteInterpreterOptionsSetEnableDelegateFallback in
|
||||
/// c_api_experimental.h.
|
||||
///
|
||||
/// Returns one of the following status codes:
|
||||
/// - kTfLiteOk: Success. Output is valid.
|
||||
/// - kTfLiteDelegateError: Execution with delegates failed, due to a problem
|
||||
/// with the delegate(s). If fallback was not enabled, output is invalid.
|
||||
/// If fallback was enabled, this return value indicates that fallback
|
||||
/// succeeded, the output is valid, and all delegates previously applied to
|
||||
/// the interpreter have been undone.
|
||||
/// - kTfLiteApplicationError: Same as for kTfLiteDelegateError, except that
|
||||
/// the problem was not with the delegate itself, but rather was
|
||||
/// due to an incompatibility between the delegate(s) and the
|
||||
/// interpreter or model.
|
||||
/// - kTfLiteError: Unexpected/runtime failure. Output is invalid.
|
||||
TFL_CAPI_EXPORT extern TfLiteStatus TfLiteInterpreterInvoke(
|
||||
TfLiteInterpreter* interpreter);
|
||||
|
||||
/// Returns the number of output tensors associated with the model.
|
||||
TFL_CAPI_EXPORT extern int32_t TfLiteInterpreterGetOutputTensorCount(
|
||||
const TfLiteInterpreter* interpreter);
|
||||
|
||||
/// Returns a pointer to an array of output tensor indices. The length of the
|
||||
/// array can be obtained via a call to `TfLiteInterpreterGetOutputTensorCount`.
|
||||
///
|
||||
/// Typically the output tensors associated with an `interpreter` would be set
|
||||
/// during the initialization of the `interpreter`, through a mechanism like the
|
||||
/// `InterpreterBuilder`, and remain unchanged throughout the lifetime of the
|
||||
/// interpreter. However, there are some circumstances in which the pointer may
|
||||
/// not remain valid throughout the lifetime of the interpreter, because calls
|
||||
/// to `SetOutputs` on the interpreter invalidate the returned pointer.
|
||||
///
|
||||
/// The ownership of the array remains with the TFLite runtime.
|
||||
TFL_CAPI_EXPORT const int* TfLiteInterpreterOutputTensorIndices(
|
||||
const TfLiteInterpreter* interpreter);
|
||||
|
||||
/// Returns the tensor associated with the output index.
|
||||
/// REQUIRES: 0 <= output_index < TfLiteInterpreterGetOutputTensorCount(tensor)
|
||||
///
|
||||
/// \note The shape and underlying data buffer for output tensors may be not
|
||||
/// be available until after the output tensor has been both sized and
|
||||
/// allocated.
|
||||
/// In general, best practice is to interact with the output tensor *after*
|
||||
/// calling TfLiteInterpreterInvoke().
|
||||
TFL_CAPI_EXPORT extern const TfLiteTensor* TfLiteInterpreterGetOutputTensor(
|
||||
const TfLiteInterpreter* interpreter, int32_t output_index);
|
||||
|
||||
/// Returns modifiable access to the tensor that corresponds to the
|
||||
/// specified `index` and is associated with the provided `interpreter`.
|
||||
///
|
||||
/// This requires the `index` to be between 0 and N - 1, where N is the
|
||||
/// number of tensors in the model.
|
||||
///
|
||||
/// Typically the tensors associated with the `interpreter` would be set during
|
||||
/// the `interpreter` initialization, through a mechanism like the
|
||||
/// `InterpreterBuilder`, and remain unchanged throughout the lifetime of the
|
||||
/// interpreter. However, there are some circumstances in which the pointer may
|
||||
/// not remain valid throughout the lifetime of the interpreter, because calls
|
||||
/// to `AddTensors` on the interpreter invalidate the returned pointer.
|
||||
///
|
||||
/// Note the difference between this function and
|
||||
/// `TfLiteInterpreterGetInputTensor` (or `TfLiteInterpreterGetOutputTensor` for
|
||||
/// that matter): `TfLiteInterpreterGetTensor` takes an index into the array of
|
||||
/// all tensors associated with the `interpreter`'s model, whereas
|
||||
/// `TfLiteInterpreterGetInputTensor` takes an index into the array of input
|
||||
/// tensors.
|
||||
///
|
||||
/// The ownership of the tensor remains with the TFLite runtime, meaning the
|
||||
/// caller should not deallocate the pointer.
|
||||
TFL_CAPI_EXPORT
|
||||
TfLiteTensor* TfLiteInterpreterGetTensor(const TfLiteInterpreter* interpreter,
|
||||
int index);
|
||||
|
||||
/// Tries to cancel any in-flight invocation.
|
||||
///
|
||||
/// \note This only cancels `TfLiteInterpreterInvoke` calls that happen before
|
||||
/// calling this and it does not cancel subsequent invocations.
|
||||
/// \note Calling this function will also cancel any in-flight invocations of
|
||||
/// SignatureRunners constructed from this interpreter.
|
||||
/// Non-blocking and thread safe.
|
||||
///
|
||||
/// Returns kTfLiteError if cancellation is not enabled via
|
||||
/// `TfLiteInterpreterOptionsEnableCancellation`.
|
||||
///
|
||||
/// \warning This is an experimental API and subject to change.
|
||||
TFL_CAPI_EXPORT extern TfLiteStatus TfLiteInterpreterCancel(
|
||||
const TfLiteInterpreter* interpreter);
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// TfLiteTensor wraps data associated with a graph tensor.
|
||||
//
|
||||
// Note that, while the TfLiteTensor struct is not currently opaque, and its
|
||||
// fields can be accessed directly, these methods are still convenient for
|
||||
// language bindings. In the future the tensor struct will likely be made opaque
|
||||
// in the public API.
|
||||
|
||||
/// Returns the type of a tensor element.
|
||||
TFL_CAPI_EXPORT extern TfLiteType TfLiteTensorType(const TfLiteTensor* tensor);
|
||||
|
||||
/// Returns the number of dimensions that the tensor has. Returns -1 in case
|
||||
/// the 'opaque_tensor' does not have its dimensions property set.
|
||||
TFL_CAPI_EXPORT extern int32_t TfLiteTensorNumDims(const TfLiteTensor* tensor);
|
||||
|
||||
/// Returns the length of the tensor in the "dim_index" dimension.
|
||||
/// REQUIRES: 0 <= dim_index < TFLiteTensorNumDims(tensor)
|
||||
TFL_CAPI_EXPORT extern int32_t TfLiteTensorDim(const TfLiteTensor* tensor,
|
||||
int32_t dim_index);
|
||||
|
||||
/// Returns the size of the underlying data in bytes.
|
||||
TFL_CAPI_EXPORT extern size_t TfLiteTensorByteSize(const TfLiteTensor* tensor);
|
||||
|
||||
/// Returns a pointer to the underlying data buffer.
|
||||
///
|
||||
/// \note The result may be null if tensors have not yet been allocated, e.g.,
|
||||
/// if the Tensor has just been created or resized and `TfLiteAllocateTensors()`
|
||||
/// has yet to be called, or if the output tensor is dynamically sized and the
|
||||
/// interpreter hasn't been invoked.
|
||||
TFL_CAPI_EXPORT extern void* TfLiteTensorData(const TfLiteTensor* tensor);
|
||||
|
||||
/// Returns the (null-terminated) name of the tensor.
|
||||
TFL_CAPI_EXPORT extern const char* TfLiteTensorName(const TfLiteTensor* tensor);
|
||||
|
||||
/// Returns the parameters for asymmetric quantization. The quantization
|
||||
/// parameters are only valid when the tensor type is `kTfLiteUInt8` and the
|
||||
/// `scale != 0`. Quantized values can be converted back to float using:
|
||||
/// real_value = scale * (quantized_value - zero_point);
|
||||
TFL_CAPI_EXPORT extern TfLiteQuantizationParams TfLiteTensorQuantizationParams(
|
||||
const TfLiteTensor* tensor);
|
||||
|
||||
/// Copies from the provided input buffer into the tensor's buffer.
|
||||
/// REQUIRES: input_data_size == TfLiteTensorByteSize(tensor)
|
||||
TFL_CAPI_EXPORT extern TfLiteStatus TfLiteTensorCopyFromBuffer(
|
||||
TfLiteTensor* tensor, const void* input_data, size_t input_data_size);
|
||||
|
||||
/// Copies to the provided output buffer from the tensor's buffer.
|
||||
/// REQUIRES: output_data_size == TfLiteTensorByteSize(tensor)
|
||||
TFL_CAPI_EXPORT extern TfLiteStatus TfLiteTensorCopyToBuffer(
|
||||
const TfLiteTensor* output_tensor, void* output_data,
|
||||
size_t output_data_size);
|
||||
|
||||
/// Returns a new TfLiteRegistrationExternal instance.
|
||||
///
|
||||
/// \note The caller retains ownership and should ensure that
|
||||
/// the lifetime of the `TfLiteRegistrationExternal` must be at least as long as
|
||||
/// the lifetime of the `TfLiteInterpreter`.
|
||||
/// \warning This is an experimental API and subject to change.
|
||||
TFL_CAPI_EXPORT extern TfLiteRegistrationExternal*
|
||||
TfLiteRegistrationExternalCreate(TfLiteBuiltinOperator builtin_code,
|
||||
const char* custom_name, int version);
|
||||
|
||||
/// Return the builtin op code of the provided external 'registration'.
|
||||
///
|
||||
/// \warning This is an experimental API and subject to change.
|
||||
TFL_CAPI_EXPORT extern TfLiteBuiltinOperator
|
||||
TfLiteRegistrationExternalGetBuiltInCode(
|
||||
const TfLiteRegistrationExternal* registration);
|
||||
|
||||
/// Return the OP version of the provided external 'registration'. Return -1
|
||||
/// in case of error, or if the provided address is null.
|
||||
///
|
||||
/// \warning This is an experimental API and subject to change.
|
||||
TFL_CAPI_EXPORT extern int TfLiteRegistrationExternalGetVersion(
|
||||
const TfLiteRegistrationExternal* registration);
|
||||
|
||||
/// Returns the custom name of the provided 'registration'. The returned pointer
|
||||
/// will be non-null iff the op is a custom op.
|
||||
///
|
||||
/// \warning This is an experimental API and subject to change.
|
||||
TFL_CAPI_EXPORT extern const char* TfLiteRegistrationExternalGetCustomName(
|
||||
const TfLiteRegistrationExternal* registration);
|
||||
|
||||
/// Destroys the TfLiteRegistrationExternal instance.
|
||||
/// \warning This is an experimental API and subject to change.
|
||||
TFL_CAPI_EXPORT extern void TfLiteRegistrationExternalDelete(
|
||||
TfLiteRegistrationExternal* registration);
|
||||
|
||||
/// Sets the initialization callback for the registration.
|
||||
///
|
||||
/// The callback is called to initialize the op from serialized data.
|
||||
/// Please refer `init` of `TfLiteRegistration` for the detail.
|
||||
/// \warning This is an experimental API and subject to change.
|
||||
TFL_CAPI_EXPORT extern void TfLiteRegistrationExternalSetInit(
|
||||
TfLiteRegistrationExternal* registration,
|
||||
void* (*init)(TfLiteOpaqueContext* context, const char* buffer,
|
||||
size_t length));
|
||||
|
||||
/// Sets the deallocation callback for the registration.
|
||||
///
|
||||
/// This callback is called to deallocate the data returned by the init
|
||||
/// callback. The value passed in the `data` parameter is the value that was
|
||||
/// returned by the `init` callback.
|
||||
/// Please refer `free` of `TfLiteRegistration` for the detail.
|
||||
/// \warning This is an experimental API and subject to change.
|
||||
TFL_CAPI_EXPORT extern void TfLiteRegistrationExternalSetFree(
|
||||
TfLiteRegistrationExternal* registration,
|
||||
void (*free)(TfLiteOpaqueContext* context, void* data));
|
||||
|
||||
/// Sets the preparation callback for the registration.
|
||||
///
|
||||
/// The callback is called when the inputs of operator have been resized.
|
||||
/// Please refer `prepare` of `TfLiteRegistration` for the detail.
|
||||
/// \warning This is an experimental API and subject to change.
|
||||
TFL_CAPI_EXPORT extern void TfLiteRegistrationExternalSetPrepare(
|
||||
TfLiteRegistrationExternal* registration,
|
||||
TfLiteStatus (*prepare)(TfLiteOpaqueContext* context,
|
||||
TfLiteOpaqueNode* node));
|
||||
|
||||
/// Sets the invocation callback for the registration.
|
||||
///
|
||||
/// The callback is called when the operator is executed.
|
||||
/// Please refer `invoke` of `TfLiteRegistration` for the detail.
|
||||
/// \warning This is an experimental API and subject to change.
|
||||
TFL_CAPI_EXPORT extern void TfLiteRegistrationExternalSetInvoke(
|
||||
TfLiteRegistrationExternal* registration,
|
||||
TfLiteStatus (*invoke)(TfLiteOpaqueContext* context,
|
||||
TfLiteOpaqueNode* node));
|
||||
|
||||
/// Sets the async kernel accessor callback for the registration.
|
||||
///
|
||||
/// The callback is called to retrieve the async kernel if the delegate supports
|
||||
/// it. If the delegate does not support async execution, either this function
|
||||
/// should not be called, or `async_kernel` needs to be nullptr.
|
||||
/// `node` is the delegate TfLiteNode created by `ModifyGraphWithDelegate`.
|
||||
/// Please refer `async_kernel` of `TfLiteRegistration` for the detail.
|
||||
/// \warning This is an experimental API and subject to change.
|
||||
TFL_CAPI_EXPORT extern void TfLiteRegistrationExternalSetAsyncKernel(
|
||||
TfLiteRegistrationExternal* registration,
|
||||
TfLiteAsyncKernel* (*async_kernel)(TfLiteOpaqueContext* context,
|
||||
TfLiteOpaqueNode* node));
|
||||
|
||||
// NOLINTEND(modernize-redundant-void-arg)
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif // __cplusplus
|
||||
|
||||
#endif // TENSORFLOW_LITE_CORE_C_C_API_H_
|
468
include/tensorflow/lite/core/c/c_api_experimental.h
Normal file
468
include/tensorflow/lite/core/c/c_api_experimental.h
Normal file
@ -0,0 +1,468 @@
|
||||
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
/// WARNING: Users of TensorFlow Lite should not include this file directly,
|
||||
/// but should instead include
|
||||
/// "third_party/tensorflow/lite/c/c_api_experimental.h".
|
||||
/// Only the TensorFlow Lite implementation itself should include this
|
||||
/// file directly.
|
||||
#ifndef TENSORFLOW_LITE_CORE_C_C_API_EXPERIMENTAL_H_
|
||||
#define TENSORFLOW_LITE_CORE_C_C_API_EXPERIMENTAL_H_
|
||||
|
||||
#include "tensorflow/lite/builtin_ops.h"
|
||||
#include "tensorflow/lite/core/c/c_api.h"
|
||||
#include "tensorflow/lite/core/c/common.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif // __cplusplus
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Opaque types used by the C API.
|
||||
|
||||
/// TfLiteSignatureRunner is used to run inference on a signature.
|
||||
///
|
||||
/// Note: A signature is used to define a computation in a TF model. A model can
|
||||
/// have multiple signatures. Each signature contains three components:
|
||||
/// * Signature Key: A unique string to identify a signature
|
||||
/// * Inputs: A list of names, each mapped to an input tensor of a signature
|
||||
/// * Outputs: A list of names, each mapped to an output tensor of a signature
|
||||
///
|
||||
/// To learn more about signatures in TFLite, refer to:
|
||||
/// https://www.tensorflow.org/lite/guide/signatures
|
||||
///
|
||||
/// Using the TfLiteSignatureRunner, for a particular signature, you can set its
|
||||
/// inputs, invoke (i.e. execute) the computation, and retrieve its outputs.
|
||||
typedef struct TfLiteSignatureRunner TfLiteSignatureRunner;
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
/// Resets all variable tensors to zero.
|
||||
///
|
||||
/// WARNING: This is an experimental API and subject to change.
|
||||
TFL_CAPI_EXPORT extern TfLiteStatus TfLiteInterpreterResetVariableTensors(
|
||||
TfLiteInterpreter* interpreter);
|
||||
|
||||
/// Adds an op registration for a builtin operator.
|
||||
///
|
||||
/// Op registrations are used to map ops referenced in the flatbuffer model
|
||||
/// to executable function pointers (`TfLiteRegistration`s).
|
||||
///
|
||||
/// NOTE: The interpreter will make a shallow copy of `registration` internally,
|
||||
/// so the caller should ensure that its contents (function pointers, etc...)
|
||||
/// remain valid for the duration of the interpreter's lifetime. A common
|
||||
/// practice is making the provided `TfLiteRegistration` instance static.
|
||||
///
|
||||
/// Code that uses this function should NOT call
|
||||
/// `TfLiteInterpreterOptionsSetOpResolver` (or related functions) on the same
|
||||
/// options object.
|
||||
///
|
||||
/// WARNING: This is an experimental API and subject to change.
|
||||
TFL_CAPI_EXPORT void TfLiteInterpreterOptionsAddBuiltinOp(
|
||||
TfLiteInterpreterOptions* options, TfLiteBuiltinOperator op,
|
||||
const TfLiteRegistration* registration, int32_t min_version,
|
||||
int32_t max_version);
|
||||
|
||||
/// Adds an op registration for a custom operator.
|
||||
///
|
||||
/// Op registrations are used to map ops referenced in the flatbuffer model
|
||||
/// to executable function pointers (`TfLiteRegistration`s).
|
||||
///
|
||||
/// NOTE: The interpreter will make a shallow copy of `registration` internally,
|
||||
/// so the caller should ensure that its contents (function pointers, etc...)
|
||||
/// remain valid for the duration of any created interpreter's lifetime. A
|
||||
/// common practice is making the provided `TfLiteRegistration` instance static.
|
||||
///
|
||||
/// The lifetime of the string pointed to by `name` must be at least as long
|
||||
/// as the lifetime of the `TfLiteInterpreterOptions`.
|
||||
///
|
||||
/// Code that uses this function should NOT call
|
||||
/// `TfLiteInterpreterOptionsSetOpResolver` (or related functions) on the same
|
||||
/// options object.
|
||||
///
|
||||
/// WARNING: This is an experimental API and subject to change.
|
||||
TFL_CAPI_EXPORT void TfLiteInterpreterOptionsAddCustomOp(
|
||||
TfLiteInterpreterOptions* options, const char* name,
|
||||
const TfLiteRegistration* registration, int32_t min_version,
|
||||
int32_t max_version);
|
||||
|
||||
/// Registers callbacks for resolving builtin or custom operators.
|
||||
///
|
||||
/// The `TfLiteInterpreterOptionsSetOpResolverExternal` function provides an
|
||||
/// alternative method for registering builtin ops and/or custom ops, by
|
||||
/// providing operator resolver callbacks. Unlike using
|
||||
/// `TfLiteInterpreterOptionsAddBuiltinOp` and/or
|
||||
/// `TfLiteInterpreterOptionsAddAddCustomOp`, these let you register all the
|
||||
/// operators in a single call.
|
||||
///
|
||||
/// Code that uses this function should NOT call
|
||||
/// `TfLiteInterpreterOptionsAddBuiltin` or
|
||||
/// `TfLiteInterpreterOptionsAddCustomOp` on the same options object.
|
||||
///
|
||||
/// If `op_resolver_user_data` is non-null, its lifetime must be at least as
|
||||
/// long as the lifetime of the `TfLiteInterpreterOptions`.
|
||||
///
|
||||
/// WARNING: This is an experimental API and subject to change.
|
||||
void TfLiteInterpreterOptionsSetOpResolverExternal(
|
||||
TfLiteInterpreterOptions* options,
|
||||
const TfLiteRegistrationExternal* (*find_builtin_op)(void* user_data,
|
||||
int op, int version),
|
||||
const TfLiteRegistrationExternal* (*find_custom_op)(void* user_data,
|
||||
const char* custom_op,
|
||||
int version),
|
||||
void* op_resolver_user_data);
|
||||
|
||||
/// Registers callbacks for resolving builtin or custom operators.
|
||||
///
|
||||
/// The `TfLiteInterpreterOptionsSetOpResolver` function provides an alternative
|
||||
/// method for registering builtin ops and/or custom ops, by providing operator
|
||||
/// resolver callbacks. Unlike using `TfLiteInterpreterOptionsAddBuiltinOp`
|
||||
/// and/or `TfLiteInterpreterOptionsAddAddCustomOp`, these let you register all
|
||||
/// the operators in a single call.
|
||||
///
|
||||
/// Code that uses this function should NOT call
|
||||
/// `TfLiteInterpreterOptionsAddBuiltin` or
|
||||
/// `TfLiteInterpreterOptionsAddCustomOp` on the same options object.
|
||||
///
|
||||
/// If `op_resolver_user_data` is non-null, its lifetime must be at least as
|
||||
/// long as the lifetime of the `TfLiteInterpreterOptions`.
|
||||
///
|
||||
/// WARNING: This is an experimental API and subject to change.
|
||||
///
|
||||
/// DEPRECATED: use TfLiteInterpreterOptionsSetOpResolverExternal instead.
|
||||
void TfLiteInterpreterOptionsSetOpResolver(
|
||||
TfLiteInterpreterOptions* options,
|
||||
const TfLiteRegistration* (*find_builtin_op)(void* user_data,
|
||||
TfLiteBuiltinOperator op,
|
||||
int version),
|
||||
const TfLiteRegistration* (*find_custom_op)(void* user_data,
|
||||
const char* custom_op,
|
||||
int version),
|
||||
void* op_resolver_user_data);
|
||||
|
||||
/// \private
|
||||
/// Backward-compat version of TfLiteInterpreterOptionsSetOpResolver.
|
||||
///
|
||||
/// WARNING: This function is deprecated / not an official part of the API, is
|
||||
/// only for binary backwards compatibility, and should not be called.
|
||||
void TfLiteInterpreterOptionsSetOpResolverV2(
|
||||
TfLiteInterpreterOptions* options,
|
||||
const TfLiteRegistration_V2* (*find_builtin_op_v2)(void* user_data,
|
||||
TfLiteBuiltinOperator op,
|
||||
int version),
|
||||
const TfLiteRegistration_V2* (*find_custom_op_v2)(void* user_data,
|
||||
const char* op,
|
||||
int version),
|
||||
void* op_resolver_user_data);
|
||||
|
||||
/// \private
|
||||
/// Backward-compat version of TfLiteInterpreterOptionsSetOpResolver.
|
||||
///
|
||||
/// WARNING: This function is deprecated / not an official part of the API, is
|
||||
/// only for binary backwards compatibility, and should not be called.
|
||||
void TfLiteInterpreterOptionsSetOpResolverV1(
|
||||
TfLiteInterpreterOptions* options,
|
||||
const TfLiteRegistration_V1* (*find_builtin_op_v1)(void* user_data,
|
||||
TfLiteBuiltinOperator op,
|
||||
int version),
|
||||
const TfLiteRegistration_V1* (*find_custom_op_v1)(void* user_data,
|
||||
const char* op,
|
||||
int version),
|
||||
void* op_resolver_user_data);
|
||||
|
||||
/// Returns a new interpreter using the provided model and options, or null on
|
||||
/// failure, where the model uses only the operators explicitly added to the
|
||||
/// options. This is the same as `TFLiteInterpreterCreate` from `c_api.h`,
|
||||
/// except that the only operators that are supported are the ones registered
|
||||
/// in `options` via calls to `TfLiteInterpreterOptionsSetOpResolver`,
|
||||
/// `TfLiteInterpreterOptionsAddBuiltinOp`, and/or
|
||||
/// `TfLiteInterpreterOptionsAddCustomOp`.
|
||||
///
|
||||
/// * `model` must be a valid model instance. The caller retains ownership of
|
||||
/// the object, and can destroy it immediately after creating the interpreter;
|
||||
/// the interpreter will maintain its own reference to the underlying model
|
||||
/// data.
|
||||
/// * `options` should not be null. The caller retains ownership of the object,
|
||||
/// and can safely destroy it immediately after creating the interpreter.
|
||||
///
|
||||
/// NOTE: The client *must* explicitly allocate tensors before attempting to
|
||||
/// access input tensor data or invoke the interpreter.
|
||||
///
|
||||
/// WARNING: This is an experimental API and subject to change.
|
||||
TFL_CAPI_EXPORT extern TfLiteInterpreter*
|
||||
TfLiteInterpreterCreateWithSelectedOps(const TfLiteModel* model,
|
||||
const TfLiteInterpreterOptions* options);
|
||||
|
||||
/// Enable or disable the NN API delegate for the interpreter (true to enable).
|
||||
///
|
||||
/// WARNING: This is an experimental API and subject to change.
|
||||
TFL_CAPI_EXPORT extern void TfLiteInterpreterOptionsSetUseNNAPI(
|
||||
TfLiteInterpreterOptions* options, bool enable);
|
||||
|
||||
/// Enable or disable CPU fallback for the interpreter (true to enable).
|
||||
/// If enabled, TfLiteInterpreterInvoke will do automatic fallback from
|
||||
/// executing with delegate(s) to regular execution without delegates
|
||||
/// (i.e. on CPU).
|
||||
///
|
||||
/// Allowing the fallback is suitable only if both of the following hold:
|
||||
/// - The caller is known not to cache pointers to tensor data across
|
||||
/// TfLiteInterpreterInvoke calls.
|
||||
/// - The model is not stateful (no variables, no LSTMs) or the state isn't
|
||||
/// needed between batches.
|
||||
///
|
||||
/// When delegate fallback is enabled, TfLiteInterpreterInvoke will
|
||||
/// behave as follows:
|
||||
/// If one or more delegates were set in the interpreter options
|
||||
/// (see TfLiteInterpreterOptionsAddDelegate),
|
||||
/// AND inference fails,
|
||||
/// then the interpreter will fall back to not using any delegates.
|
||||
/// In that case, the previously applied delegate(s) will be automatically
|
||||
/// undone, and an attempt will be made to return the interpreter to an
|
||||
/// invokable state, which may invalidate previous tensor addresses,
|
||||
/// and the inference will be attempted again, using input tensors with
|
||||
/// the same value as previously set.
|
||||
///
|
||||
/// WARNING: This is an experimental API and subject to change.
|
||||
TFL_CAPI_EXPORT extern void TfLiteInterpreterOptionsSetEnableDelegateFallback(
|
||||
TfLiteInterpreterOptions* options, bool enable);
|
||||
|
||||
// Set if buffer handle output is allowed.
|
||||
//
|
||||
/// When using hardware delegation, Interpreter will make the data of output
|
||||
/// tensors available in `tensor->data` by default. If the application can
|
||||
/// consume the buffer handle directly (e.g. reading output from OpenGL
|
||||
/// texture), it can set this flag to false, so Interpreter won't copy the
|
||||
/// data from buffer handle to CPU memory. WARNING: This is an experimental
|
||||
/// API and subject to change.
|
||||
TFL_CAPI_EXPORT extern void TfLiteSetAllowBufferHandleOutput(
|
||||
const TfLiteInterpreter* interpreter, bool allow_buffer_handle_output);
|
||||
|
||||
/// Allow a delegate to look at the graph and modify the graph to handle
|
||||
/// parts of the graph themselves. After this is called, the graph may
|
||||
/// contain new nodes that replace 1 more nodes.
|
||||
/// 'delegate' must outlive the interpreter.
|
||||
/// Use `TfLiteInterpreterOptionsAddDelegate` instead of this unless
|
||||
/// absolutely required.
|
||||
/// Returns one of the following three status codes:
|
||||
/// 1. kTfLiteOk: Success.
|
||||
/// 2. kTfLiteDelegateError: Delegation failed due to an error in the
|
||||
/// delegate. The Interpreter has been restored to its pre-delegation state.
|
||||
/// NOTE: This undoes all delegates previously applied to the Interpreter.
|
||||
/// 3. kTfLiteError: Unexpected/runtime failure.
|
||||
/// WARNING: This is an experimental API and subject to change.
|
||||
TFL_CAPI_EXPORT extern TfLiteStatus TfLiteInterpreterModifyGraphWithDelegate(
|
||||
const TfLiteInterpreter* interpreter, TfLiteDelegate* delegate);
|
||||
|
||||
/// Returns the tensor index corresponding to the input tensor
|
||||
///
|
||||
/// WARNING: This is an experimental API and subject to change.
|
||||
TFL_CAPI_EXPORT extern int32_t TfLiteInterpreterGetInputTensorIndex(
|
||||
const TfLiteInterpreter* interpreter, int32_t input_index);
|
||||
|
||||
/// Returns the tensor index corresponding to the output tensor
|
||||
///
|
||||
/// WARNING: This is an experimental API and subject to change.
|
||||
TFL_CAPI_EXPORT extern int32_t TfLiteInterpreterGetOutputTensorIndex(
|
||||
const TfLiteInterpreter* interpreter, int32_t output_index);
|
||||
|
||||
/// --------------------------------------------------------------------------
|
||||
/// SignatureRunner APIs
|
||||
///
|
||||
/// You can run inference by either:
|
||||
///
|
||||
/// (i) (recommended) using the Interpreter to initialize SignatureRunner(s) and
|
||||
/// then only using SignatureRunner APIs.
|
||||
///
|
||||
/// (ii) only using Interpreter APIs.
|
||||
///
|
||||
/// NOTE:
|
||||
/// * Only use one of the above options to run inference, i.e. avoid mixing both
|
||||
/// SignatureRunner APIs and Interpreter APIs to run inference as they share
|
||||
/// the same underlying data (e.g. updating an input tensor “A” retrieved
|
||||
/// using the Interpreter APIs will update the state of the input tensor “B”
|
||||
/// retrieved using SignatureRunner APIs, if they point to the same underlying
|
||||
/// tensor in the model; as it is not possible for a user to debug this by
|
||||
/// analyzing the code, it can lead to undesirable behavior).
|
||||
/// * The TfLiteSignatureRunner type is conditionally thread-safe, provided that
|
||||
/// no two threads attempt to simultaneously access two TfLiteSignatureRunner
|
||||
/// instances that point to the same underlying signature, or access a
|
||||
/// TfLiteSignatureRunner and its underlying TfLiteInterpreter, unless all
|
||||
/// such simultaneous accesses are reads (rather than writes).
|
||||
/// * The lifetime of a TfLiteSignatureRunner object ends when
|
||||
/// TfLiteSignatureRunnerDelete() is called on it (or when the lifetime of the
|
||||
/// underlying TfLiteInterpreter ends -- but you should call
|
||||
/// TfLiteSignatureRunnerDelete() before that happens in order to avoid
|
||||
/// resource leaks).
|
||||
/// * You can only apply delegates to the interpreter (via
|
||||
/// TfLiteInterpreterOptions) and not to a signature.
|
||||
|
||||
/// Returns the number of signatures defined in the model.
|
||||
///
|
||||
/// WARNING: This is an experimental API and subject to change.
|
||||
TFL_CAPI_EXPORT extern int32_t TfLiteInterpreterGetSignatureCount(
|
||||
const TfLiteInterpreter* interpreter);
|
||||
|
||||
/// Returns the key of the Nth signature in the model, where N is specified as
|
||||
/// `signature_index`.
|
||||
///
|
||||
/// NOTE: The lifetime of the returned key is the same as (and depends on) the
|
||||
/// lifetime of `interpreter`.
|
||||
///
|
||||
/// WARNING: This is an experimental API and subject to change.
|
||||
TFL_CAPI_EXPORT extern const char* TfLiteInterpreterGetSignatureKey(
|
||||
const TfLiteInterpreter* interpreter, int32_t signature_index);
|
||||
|
||||
/// Returns a new signature runner using the provided interpreter and signature
|
||||
/// key, or nullptr on failure.
|
||||
///
|
||||
/// NOTE: `signature_key` is a null-terminated C string that must match the
|
||||
/// key of a signature in the interpreter's model.
|
||||
///
|
||||
/// NOTE: The returned signature runner should be destroyed, by calling
|
||||
/// TfLiteSignatureRunnerDelete(), before the interpreter is destroyed.
|
||||
///
|
||||
/// WARNING: This is an experimental API and subject to change.
|
||||
TFL_CAPI_EXPORT extern TfLiteSignatureRunner*
|
||||
TfLiteInterpreterGetSignatureRunner(const TfLiteInterpreter* interpreter,
|
||||
const char* signature_key);
|
||||
|
||||
/// Returns the number of inputs associated with a signature.
|
||||
///
|
||||
/// WARNING: This is an experimental API and subject to change.
|
||||
TFL_CAPI_EXPORT extern size_t TfLiteSignatureRunnerGetInputCount(
|
||||
const TfLiteSignatureRunner* signature_runner);
|
||||
|
||||
/// Returns the (null-terminated) name of the Nth input in a signature, where N
|
||||
/// is specified as `input_index`.
|
||||
///
|
||||
/// NOTE: The lifetime of the returned name is the same as (and depends on) the
|
||||
/// lifetime of `signature_runner`.
|
||||
///
|
||||
/// WARNING: This is an experimental API and subject to change.
|
||||
TFL_CAPI_EXPORT extern const char* TfLiteSignatureRunnerGetInputName(
|
||||
const TfLiteSignatureRunner* signature_runner, const int32_t input_index);
|
||||
|
||||
/// Resizes the input tensor identified as `input_name` to be the dimensions
|
||||
/// specified by `input_dims` and `input_dims_size`. Only unknown dimensions can
|
||||
/// be resized with this function. Unknown dimensions are indicated as `-1` in
|
||||
/// the `dims_signature` attribute of a TfLiteTensor.
|
||||
///
|
||||
/// Returns status of failure or success. Note that this doesn't actually resize
|
||||
/// any existing buffers. A call to TfLiteSignatureRunnerAllocateTensors() is
|
||||
/// required to change the tensor input buffer.
|
||||
///
|
||||
/// NOTE: This function is similar to TfLiteInterpreterResizeInputTensorStrict()
|
||||
/// and not TfLiteInterpreterResizeInputTensor().
|
||||
///
|
||||
/// NOTE: `input_name` must match the name of an input in the signature.
|
||||
///
|
||||
/// NOTE: This function makes a copy of the input dimensions, so the caller can
|
||||
/// safely deallocate `input_dims` immediately after this function returns.
|
||||
///
|
||||
/// WARNING: This is an experimental API and subject to change.
|
||||
TFL_CAPI_EXPORT extern TfLiteStatus TfLiteSignatureRunnerResizeInputTensor(
|
||||
TfLiteSignatureRunner* signature_runner, const char* input_name,
|
||||
const int* input_dims, int32_t input_dims_size);
|
||||
|
||||
/// Updates allocations for tensors associated with a signature and resizes
|
||||
/// dependent tensors using the specified input tensor dimensionality.
|
||||
/// This is a relatively expensive operation and hence should only be called
|
||||
/// after initializing the signature runner object and/or resizing any inputs.
|
||||
///
|
||||
/// WARNING: This is an experimental API and subject to change.
|
||||
TFL_CAPI_EXPORT extern TfLiteStatus TfLiteSignatureRunnerAllocateTensors(
|
||||
TfLiteSignatureRunner* signature_runner);
|
||||
|
||||
/// Returns the input tensor identified by `input_name` in the given signature.
|
||||
/// Returns nullptr if the given name is not valid.
|
||||
///
|
||||
/// NOTE: The lifetime of the returned tensor is the same as (and depends on)
|
||||
/// the lifetime of `signature_runner`.
|
||||
///
|
||||
/// WARNING: This is an experimental API and subject to change.
|
||||
TFL_CAPI_EXPORT extern TfLiteTensor* TfLiteSignatureRunnerGetInputTensor(
|
||||
TfLiteSignatureRunner* signature_runner, const char* input_name);
|
||||
|
||||
/// Runs inference on a given signature.
|
||||
///
|
||||
/// Before calling this function, the caller should first invoke
|
||||
/// TfLiteSignatureRunnerAllocateTensors() and should also set the values for
|
||||
/// the input tensors. After successfully calling this function, the values for
|
||||
/// the output tensors will be set.
|
||||
///
|
||||
/// WARNING: This is an experimental API and subject to change.
|
||||
TFL_CAPI_EXPORT extern TfLiteStatus TfLiteSignatureRunnerInvoke(
|
||||
TfLiteSignatureRunner* signature_runner);
|
||||
|
||||
/// Returns the number of output tensors associated with the signature.
|
||||
///
|
||||
/// WARNING: This is an experimental API and subject to change.
|
||||
TFL_CAPI_EXPORT extern size_t TfLiteSignatureRunnerGetOutputCount(
|
||||
const TfLiteSignatureRunner* signature_runner);
|
||||
|
||||
/// Returns the (null-terminated) name of the Nth output in a signature, where
|
||||
/// N is specified as `output_index`.
|
||||
///
|
||||
/// NOTE: The lifetime of the returned name is the same as (and depends on) the
|
||||
/// lifetime of `signature_runner`.
|
||||
///
|
||||
/// WARNING: This is an experimental API and subject to change.
|
||||
TFL_CAPI_EXPORT extern const char* TfLiteSignatureRunnerGetOutputName(
|
||||
const TfLiteSignatureRunner* signature_runner, int32_t output_index);
|
||||
|
||||
/// Returns the output tensor identified by `output_name` in the given
|
||||
/// signature. Returns nullptr if the given name is not valid.
|
||||
///
|
||||
/// NOTE: The lifetime of the returned tensor is the same as (and depends on)
|
||||
/// the lifetime of `signature_runner`.
|
||||
///
|
||||
/// WARNING: This is an experimental API and subject to change.
|
||||
TFL_CAPI_EXPORT extern const TfLiteTensor* TfLiteSignatureRunnerGetOutputTensor(
|
||||
const TfLiteSignatureRunner* signature_runner, const char* output_name);
|
||||
|
||||
/// Attempts to cancel in flight invocation if any.
|
||||
/// This will not affect calls to `Invoke` that happend after this.
|
||||
/// Non blocking and thread safe.
|
||||
/// Returns kTfLiteError if cancellation is not enabled, otherwise returns
|
||||
/// kTfLiteOk.
|
||||
/// NOTE: Calling this function will cancel in-flight invocations
|
||||
/// in all SignatureRunners built from the same interpreter.
|
||||
///
|
||||
/// WARNING: This is an experimental API and subject to change.
|
||||
TFL_CAPI_EXPORT extern TfLiteStatus TfLiteSignatureRunnerCancel(
|
||||
TfLiteSignatureRunner* signature_runner);
|
||||
|
||||
/// Destroys the signature runner.
|
||||
///
|
||||
/// WARNING: This is an experimental API and subject to change.
|
||||
TFL_CAPI_EXPORT extern void TfLiteSignatureRunnerDelete(
|
||||
TfLiteSignatureRunner* signature_runner);
|
||||
|
||||
// Forward declaration, to avoid need for dependency on
|
||||
// tensorflow/lite/profiling/telemetry/profiler.h.
|
||||
struct TfLiteTelemetryProfilerStruct;
|
||||
|
||||
/// Registers the telemetry profiler to the interpreter.
|
||||
/// Note: The interpreter does not take the ownership of profiler, but callers
|
||||
/// must ensure profiler->data outlives the lifespan of the interpreter.
|
||||
///
|
||||
/// WARNING: This is an experimental API and subject to change.
|
||||
TFL_CAPI_EXPORT extern void TfLiteInterpreterOptionsSetTelemetryProfiler(
|
||||
TfLiteInterpreterOptions* options,
|
||||
struct TfLiteTelemetryProfilerStruct* profiler);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif // __cplusplus
|
||||
|
||||
#endif // TENSORFLOW_LITE_CORE_C_C_API_EXPERIMENTAL_H_
|
548
include/tensorflow/lite/core/c/c_api_opaque.h
Normal file
548
include/tensorflow/lite/core/c/c_api_opaque.h
Normal file
@ -0,0 +1,548 @@
|
||||
/* Copyright 2022 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#ifndef TENSORFLOW_LITE_CORE_C_C_API_OPAQUE_H_
|
||||
#define TENSORFLOW_LITE_CORE_C_C_API_OPAQUE_H_
|
||||
|
||||
#include "tensorflow/lite/core/c/c_api.h"
|
||||
#include "tensorflow/lite/core/c/c_api_types.h" // IWYU pragma: export
|
||||
#include "tensorflow/lite/core/c/common.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif // __cplusplus
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
/// C API for TensorFlow Lite Opaque Types.
|
||||
///
|
||||
/// These APIs are accessors for TFLite Opaque Types. These APIs are primarily
|
||||
/// intended to be used by delegates and custom OP implementations.
|
||||
///
|
||||
/// WARNING: This is an experimental API and subject to change.
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Accessors for TfLiteOpaqueTensor.
|
||||
|
||||
// Returns the type of a tensor element.
|
||||
TFL_CAPI_EXPORT extern TfLiteType TfLiteOpaqueTensorType(
|
||||
const TfLiteOpaqueTensor* opaque_tensor);
|
||||
|
||||
// Returns the number of dimensions that the tensor has. Returns -1 in case
|
||||
// the 'opaque_tensor' does not have its dimensions property set.
|
||||
TFL_CAPI_EXPORT extern int32_t TfLiteOpaqueTensorNumDims(
|
||||
const TfLiteOpaqueTensor* opaque_tensor);
|
||||
|
||||
// Returns the length of the tensor in the "dim_index" dimension.
|
||||
TFL_CAPI_EXPORT extern int32_t TfLiteOpaqueTensorDim(
|
||||
const TfLiteOpaqueTensor* opaque_tensor, int32_t dim_index);
|
||||
|
||||
// Loads into the provided 'num_dims' the number of dimensions that the tensor's
|
||||
// signature has. Returns 'kTfLiteOk' if 'num_dims' was successfully loaded. Any
|
||||
// other return code indicates an error and 'num_dims' won't be loaded.
|
||||
//
|
||||
// A tensor's dimension signature encodes shapes with unknown dimensions with
|
||||
// -1. E.g. for a tensor with three dimensions, whose first dimension has an
|
||||
// unknown size, and the second and third dimension have a size of 2, the
|
||||
// dimension signature is [-1,2,2], and 'TfLiteOpaqueTensorGetNumDimsSignature'
|
||||
// loads 3 into 'num_dims'. If the tensor does not have its dimension signature
|
||||
// field set then 'num_dims' is set to -1.
|
||||
TFL_CAPI_EXPORT extern TfLiteStatus TfLiteOpaqueTensorGetNumDimsSignature(
|
||||
const TfLiteOpaqueTensor* opaque_tensor, int32_t* num_dims);
|
||||
|
||||
// Loads into the provided 'dim_length' the length of the tensor in the
|
||||
// 'dim_index' signature dimension or -1 if that dimension has unknown length.
|
||||
// Returns 'kTfLiteOk' if 'dim_length' was successfully loaded. Any
|
||||
// other return code indicates an error and 'dim_length' won't be loaded.
|
||||
TFL_CAPI_EXPORT extern TfLiteStatus TfLiteOpaqueTensorGetDimSignature(
|
||||
const TfLiteOpaqueTensor* opaque_tensor, int32_t dim_index,
|
||||
int32_t* dim_length);
|
||||
|
||||
// Returns 'non-zero' if the provided 'opaque_tensor' is a variable, and returns
|
||||
// zero otherwise.
|
||||
TFL_CAPI_EXPORT extern int TfLiteOpaqueTensorIsVariable(
|
||||
const TfLiteOpaqueTensor* opaque_tensor);
|
||||
|
||||
// Returns the size of the underlying data in bytes.
|
||||
TFL_CAPI_EXPORT extern size_t TfLiteOpaqueTensorByteSize(
|
||||
const TfLiteOpaqueTensor* opaque_tensor);
|
||||
|
||||
// Returns a pointer to the underlying data buffer.
|
||||
TFL_CAPI_EXPORT extern void* TfLiteOpaqueTensorData(
|
||||
const TfLiteOpaqueTensor* opaque_tensor);
|
||||
|
||||
// Returns the 'opaque_tensor's allocation type.
|
||||
TFL_CAPI_EXPORT extern TfLiteAllocationType TfLiteOpaqueTensorGetAllocationType(
|
||||
const TfLiteOpaqueTensor* opaque_tensor);
|
||||
|
||||
// Returns the (null-terminated) name of the tensor.
|
||||
TFL_CAPI_EXPORT extern const char* TfLiteOpaqueTensorName(
|
||||
const TfLiteOpaqueTensor* opaque_tensor);
|
||||
|
||||
// Returns the 'opaque_tensor's quantization information.
|
||||
TFL_CAPI_EXPORT extern TfLiteQuantization TfLiteOpaqueTensorGetQuantization(
|
||||
const TfLiteOpaqueTensor* opaque_tensor);
|
||||
|
||||
// Returns the 'opaque_tensor's quantization parameters.
|
||||
TFL_CAPI_EXPORT extern TfLiteQuantizationParams
|
||||
TfLiteOpaqueTensorGetQuantizationParams(
|
||||
const TfLiteOpaqueTensor* opaque_tensor);
|
||||
|
||||
// Copies from the provided input buffer into the tensor's buffer.
|
||||
TFL_CAPI_EXPORT extern TfLiteStatus TfLiteOpaqueTensorCopyFromBuffer(
|
||||
TfLiteOpaqueTensor* opaque_tensor, const void* input_data,
|
||||
size_t input_data_size);
|
||||
|
||||
// Copies to the provided output buffer from the tensor's buffer.
|
||||
TFL_CAPI_EXPORT extern TfLiteStatus TfLiteOpaqueTensorCopyToBuffer(
|
||||
const TfLiteOpaqueTensor* opaque_tensor, void* output_data,
|
||||
size_t output_data_size);
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Accessors for TfLiteOpaqueNode.
|
||||
|
||||
// Returns the input tensor of the given node.
|
||||
TFL_CAPI_EXPORT extern const TfLiteOpaqueTensor* TfLiteOpaqueNodeGetInput(
|
||||
const TfLiteOpaqueContext* opaque_context,
|
||||
const TfLiteOpaqueNode* opaque_node, int index);
|
||||
|
||||
// Returns the output tensor of the given node.
|
||||
TFL_CAPI_EXPORT extern TfLiteOpaqueTensor* TfLiteOpaqueNodeGetOutput(
|
||||
TfLiteOpaqueContext* opaque_context, const TfLiteOpaqueNode* opaque_node,
|
||||
int index);
|
||||
|
||||
// Gets the number of input tensors of the provided 'opaque_node'.
|
||||
TFL_CAPI_EXPORT int TfLiteOpaqueNodeNumberOfInputs(
|
||||
const TfLiteOpaqueNode* opaque_node);
|
||||
|
||||
// Gets the number of output tensors of the provided 'opaque_node'.
|
||||
TFL_CAPI_EXPORT int TfLiteOpaqueNodeNumberOfOutputs(
|
||||
const TfLiteOpaqueNode* opaque_node);
|
||||
|
||||
// Returns opaque data provided by the node implementer. The value returned
|
||||
// from this function is the value that was returned from the `init` callback
|
||||
// that was passed to `TfLiteRegistrationExternalSetInit`.
|
||||
TFL_CAPI_EXPORT extern void* TfLiteOpaqueNodeGetUserData(
|
||||
const TfLiteOpaqueNode* opaque_node);
|
||||
|
||||
// Returns the builtin data associated with the provided 'opaque_node'.
|
||||
//
|
||||
// The builtin init data associated with a node would typically be set during
|
||||
// the creation of the associated interpreter, through a mechanism like the
|
||||
// interpreter builder that loads a TFLite model and initialises the
|
||||
// interpreter's nodes accordingly. Under these conditions the returned address
|
||||
// remains valid throughout the lifetime of the 'opaque_node'.
|
||||
TFL_CAPI_EXPORT extern void* TfLiteOpaqueNodeGetBuiltinData(
|
||||
const TfLiteOpaqueNode* opaque_node);
|
||||
|
||||
// Loads into the provided '*init_data' pointer the address of the custom init
|
||||
// data associated with the provided 'opaque_node'. The length of data is
|
||||
// loaded into the provided 'size' pointer. Returns 'kTfLiteOk' in case
|
||||
// of success. Any other return value indicates a failure and will leave
|
||||
// 'init_data' and 'size' in an unspecified state.
|
||||
//
|
||||
// The custom init data associated with a node would typically be set during the
|
||||
// creation of the associated interpreter, through a mechanism like the
|
||||
// interpreter builder that loads a TFLite model and initialises the
|
||||
// interpreter's nodes accordingly. Under these conditions the returned address
|
||||
// remains valid throughout the lifetime of the 'opaque_node'.
|
||||
TFL_CAPI_EXPORT extern TfLiteStatus TfLiteOpaqueNodeGetCustomInitialData(
|
||||
const TfLiteOpaqueNode* opaque_node, const void** init_data, int* size);
|
||||
|
||||
// Loads into the provided '*inputs' pointer the starting address of an array
|
||||
// of indices representing the tensors that are inputs of the provided
|
||||
// 'opaque_node'. The length of the array is loaded into the provided
|
||||
// 'num_inputs' pointer. Returns 'kTfLiteOk' in case of success. Any other
|
||||
// return value indicates a failure and will leave 'inputs' and
|
||||
// 'num_inputs' in an unspecified state.
|
||||
//
|
||||
// The input tensors associated with a node would typically be set during the
|
||||
// creation of the associated interpreter, through a mechanism like the
|
||||
// interpreter builder that loads a TFLite model and initialises the
|
||||
// interpreter's nodes accordingly. Under these conditions the loaded address
|
||||
// remains valid throughout the lifetime of the 'opaque_node'.
|
||||
TFL_CAPI_EXPORT TfLiteStatus TfLiteOpaqueNodeInputs(
|
||||
const TfLiteOpaqueNode* opaque_node, const int** inputs, int* num_inputs);
|
||||
|
||||
// Loads into the provided '*outputs' pointer the starting address of an array
|
||||
// of indices representing the tensors that are outputs of the provided
|
||||
// 'opaque_node'. The length of the array is loaded into the provided
|
||||
// 'num_outputs' pointer. Returns 'kTfLiteOk' in case of success. Any other
|
||||
// return value indicates a failure and will leave 'outputs' and
|
||||
// 'num_outputs' in an unspecified state.
|
||||
//
|
||||
// The output tensors associated with a node would typically be set during the
|
||||
// creation of the associated interpreter, through a mechanism like the
|
||||
// interpreter builder that loads a TFLite model and initialises the
|
||||
// interpreter's nodes accordingly. Under these conditions the loaded address
|
||||
// remains valid throughout the lifetime of the 'opaque_node'.
|
||||
TFL_CAPI_EXPORT TfLiteStatus TfLiteOpaqueNodeOutputs(
|
||||
const TfLiteOpaqueNode* opaque_node, const int** outputs, int* num_outputs);
|
||||
|
||||
// Loads into the provided '*temporaries' pointer the starting address of an
|
||||
// array of indices representing the temporary tensors associated with the
|
||||
// provided 'opaque_node'. The length of the array is loaded into the provided
|
||||
// 'num_temporaries' pointer. Returns 'kTfLiteOk' in case of success. Any other
|
||||
// return value indicates a failure and will leave 'temporaries' and
|
||||
// 'num_temporaries' in an unspecified state.
|
||||
//
|
||||
// The temporary tensors associated with a node would typically be set during
|
||||
// the creation of the associated interpreter, through a mechanism like the
|
||||
// interpreter builder that loads a TFLite model and initialises the
|
||||
// interpreter's nodes accordingly. Under these conditions the loaded address
|
||||
// remains valid throughout the lifetime of the 'opaque_node'.
|
||||
TFL_CAPI_EXPORT
|
||||
TfLiteStatus TfLiteOpaqueNodeTemporaries(const TfLiteOpaqueNode* opaque_node,
|
||||
const int** temporaries,
|
||||
int* num_temporaries);
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Accessors for TfLiteOpaqueContext.
|
||||
|
||||
typedef struct TfLiteIntArray TfLiteIntArray;
|
||||
|
||||
// Loads the provided `execution_plan` associated with the provided
|
||||
// `opaque_context`. Returns `kTfLiteOk` if the `execution_plan` was
|
||||
// successfully loaded. A return value different from `kTfLiteOk` indicates a
|
||||
// failure and the `execution_plan` will be left in an unspecified state.
|
||||
TFL_CAPI_EXPORT extern TfLiteStatus TfLiteOpaqueContextGetExecutionPlan(
|
||||
TfLiteOpaqueContext* opaque_context, TfLiteIntArray** execution_plan);
|
||||
|
||||
// Given the specified 'opaque_context' and 'node_index', load the caller's
|
||||
// opaque '*node' and '*registration_external' pointer. Return 'kTfLiteOk' if
|
||||
// both the '*node' as well as the '*registration_external' have been loaded
|
||||
// correctly. Any other return code indicates a failure and both '*node' as
|
||||
// well as '*registration_external' will be in an unspecified state.
|
||||
//
|
||||
// A caller can obtain a node's index by calling
|
||||
// 'TfLiteOpaqueContextGetExecutionPlan', which provides an array of node
|
||||
// indices, sorted in execution order. A node index might also come from the
|
||||
// data structures passed to the delegate kernel's callback parameters, like the
|
||||
// delegate parameters data structure passed to the 'init' callback that
|
||||
// contains an array of node indices that are meant to be handled by the
|
||||
// delegate kernel.
|
||||
//
|
||||
// This function is expected to be called from within a delegate callback, like
|
||||
// 'Prepare', or a delegate kernel callback (i.e., a callback registered with
|
||||
// a 'TfLiteRegistrationExternal' object).
|
||||
//
|
||||
// The loaded '*node' and '*registration_external' pointers will generally
|
||||
// remain valid for the lifetime of the associated 'opaque_context', but can be
|
||||
// invalidated through API calls where delegates get un-applied, like API calls
|
||||
// that modify the model graph via a delegate, or if input tensors get re-sized.
|
||||
//
|
||||
// TODO(b/237983452): Further clarify the lifetime guarantees of pointers that
|
||||
// are returned to the users and which actions invalidate them.
|
||||
TFL_CAPI_EXPORT TfLiteStatus TfLiteOpaqueContextGetNodeAndRegistration(
|
||||
struct TfLiteOpaqueContext* opaque_context, int node_index,
|
||||
TfLiteOpaqueNode** node,
|
||||
TfLiteRegistrationExternal** registration_external);
|
||||
|
||||
// WARNING: This is an experimental API and subject to change.
|
||||
// Entry point for C API ReplaceNodeSubsetsWithDelegateKernels
|
||||
//
|
||||
// Replaces the specified `nodes_to_replace` that are associated with the
|
||||
// provided `opaque_context` with delegate kernels. The provided
|
||||
// `registration_external` represents the delegate kernel and will be used for
|
||||
// each node subset that will be delegate to the provided `opaque_delegate`.
|
||||
//
|
||||
// The TF Lite runtime will take ownership of the `registration_external` and
|
||||
// will delete it when the associated `opaque_context` gets destroyed.
|
||||
//
|
||||
// The ownership of the `nodes_to_replace` and the `opaque_delegate` remains
|
||||
// with the caller.
|
||||
TFL_CAPI_EXPORT TfLiteStatus
|
||||
TfLiteOpaqueContextReplaceNodeSubsetsWithDelegateKernels(
|
||||
struct TfLiteOpaqueContext* opaque_context,
|
||||
TfLiteRegistrationExternal* registration_external,
|
||||
const TfLiteIntArray* nodes_to_replace,
|
||||
TfLiteOpaqueDelegate* opaque_delegate);
|
||||
|
||||
// Returns modifiable access to the opaque tensor that corresponds to the
|
||||
// specified `index` and is associated with the provided `opaque_context`.
|
||||
//
|
||||
// This requires the `index` to be between 0 and N - 1, where N is the
|
||||
// number of tensors in the model.
|
||||
//
|
||||
// Typically the tensors associated with the `context` would be set
|
||||
// during the initialization of the `interpreter` that the `context` belongs to,
|
||||
// through a mechanism like the `InterpreterBuilder`, and remain unchanged
|
||||
// throughout the lifetime of the interpreter. However, there are some
|
||||
// circumstances in which the pointer may not remain valid throughout the
|
||||
// lifetime of the interpreter, because calls to `AddTensors` on the interpreter
|
||||
// invalidate the returned pointer.
|
||||
//
|
||||
// The ownership of the tensor remains with the TFLite runtime, meaning the
|
||||
// caller should not deallocate the pointer.
|
||||
TFL_CAPI_EXPORT
|
||||
TfLiteOpaqueTensor* TfLiteOpaqueContextGetOpaqueTensor(
|
||||
const TfLiteOpaqueContext* opaque_context, int index);
|
||||
|
||||
// Loads into the provided '*inputs' pointer the starting address of an array
|
||||
// of indices representing the tensors that are inputs to the subgraph that is
|
||||
// associated with the provided 'opaque_context'. The length of the array is
|
||||
// loaded into the provided 'num_inputs' pointer. Returns 'kTfLiteOk' in case
|
||||
// of success. Any other return value indicates a failure and will leave
|
||||
// 'inputs' and 'num_inputs' in an unspecified state. Calls to 'SetInputs' on
|
||||
// the associated subgraph invalidate the loaded pointers.
|
||||
TFL_CAPI_EXPORT
|
||||
TfLiteStatus TfLiteOpaqueContextGetInputs(
|
||||
const struct TfLiteOpaqueContext* opaque_context, const int** inputs,
|
||||
int* num_inputs);
|
||||
|
||||
// Loads into the provided '*outputs' pointer the starting address of an array
|
||||
// of indices representing the tensors that are outputs to the subgraph that is
|
||||
// associated with the provided 'opaque_context'. The length of the array is
|
||||
// loaded into the provided 'num_outputs' pointer. Returns 'kTfLiteOk' in case
|
||||
// of success. Any other return value indicates a failure and will leave
|
||||
// 'outputs' and 'num_outputs' in an unspecified state. Calls to 'SetOutputs'
|
||||
// on the associated subgraph invalidate the loaded pointers.
|
||||
TFL_CAPI_EXPORT
|
||||
TfLiteStatus TfLiteOpaqueContextGetOutputs(
|
||||
const struct TfLiteOpaqueContext* opaque_context, const int** outputs,
|
||||
int* num_outputs);
|
||||
|
||||
// Loads into the provided '*variables' pointer the starting address of an array
|
||||
// of indices representing the tensors that are variables to the subgraph that
|
||||
// is associated with the provided 'opaque_context'. The length of the array is
|
||||
// loaded into the provided 'num_variables' pointer. Returns 'kTfLiteOk' in
|
||||
// case of success. Any other return value indicates a failure and will leave
|
||||
// 'variables' and 'num_variables' in an unspecified state. Calls to
|
||||
// 'SetVariables' on the associated subgraph invalidate the loaded pointers.
|
||||
TFL_CAPI_EXPORT
|
||||
TfLiteStatus TfLiteOpaqueContextGetVariables(
|
||||
const struct TfLiteOpaqueContext* opaque_context, const int** variables,
|
||||
int* num_variables);
|
||||
|
||||
// Returns the number of nodes associated with the provided 'opaque_context'.
|
||||
TFL_CAPI_EXPORT
|
||||
size_t TfLiteOpaqueContextGetNumNodes(
|
||||
const struct TfLiteOpaqueContext* opaque_context);
|
||||
|
||||
// Returns the number of tensors associated with the provided 'opaque_context'.
|
||||
TFL_CAPI_EXPORT
|
||||
size_t TfLiteOpaqueContextGetNumTensors(
|
||||
const struct TfLiteOpaqueContext* opaque_context);
|
||||
|
||||
// Returns the name of the subgraph that is associated with the provided
|
||||
// 'opaque_context'. Typically the returned pointer will remain valid
|
||||
// throughout the lifetime of the subgraph, but may be invalidated by a call to
|
||||
// 'Subgraph::SetName'.
|
||||
TFL_CAPI_EXPORT
|
||||
const char* TfLiteOpaqueContextGetName(
|
||||
const struct TfLiteOpaqueContext* opaque_context);
|
||||
|
||||
// Resizes the provided 'tensor' that is associated with the provided
|
||||
// 'context' so that the 'tensor's shape matches the dimensionality specified
|
||||
// via the provided 'new_size' array. Returns 'kTfLiteOk' in
|
||||
// case of success. Any other return value indicates a failure and will leave
|
||||
// the 'tensor' in an unspecified state. The TF Lite runtime takes ownership
|
||||
// of the 'new_size' array, even in case of failure.
|
||||
TFL_CAPI_EXPORT
|
||||
TfLiteStatus TfLiteOpaqueContextResizeTensor(TfLiteOpaqueContext* context,
|
||||
TfLiteOpaqueTensor* tensor,
|
||||
TfLiteIntArray* new_size);
|
||||
|
||||
// Entry point for C API GetSubgraphContext.
|
||||
//
|
||||
// Retrieves the corresponding TfLiteOpaqueContext of a subgraph given a
|
||||
// subgraph index. If an invalid subgraph index is given, then returns nullptr.
|
||||
TFL_CAPI_EXPORT
|
||||
TfLiteOpaqueContext* TfLiteOpaqueContextGetSubgraphContext(
|
||||
struct TfLiteOpaqueContext* opaque_context, int subgraph_index);
|
||||
|
||||
// Entry point for C API MarkSubgraphAsDelegationSkippable
|
||||
//
|
||||
// Marks the subgraph with the given index as "delegation-skippable". Returns
|
||||
// kTfLiteOk if the given subgraph index is valid and is successfully marked
|
||||
// as delegation-skippable, and an error status if the subgraph index is
|
||||
// invalid.
|
||||
// If a subgraph is delegation-skippable, then the subgraph will be handled by a
|
||||
// TfLiteOpaqueDelegate (and that the delegate is supposed to be already aware
|
||||
// of this state), and therefore, TfLiteInterpreter can skip invoking
|
||||
// `ModifyGraphWithDelegate` on this subgraph.
|
||||
// NOTE: This function is expected to be called only when the subgraph that
|
||||
// `subgraph_index` is pointing to should be skipped by
|
||||
// interpreter::ModifyGraphWithDelegate (e.g. the subgraph is part of the list
|
||||
// of callee subgraphs of the same control flow node, and all of those callees
|
||||
// are supported by the same delegate at once).
|
||||
//
|
||||
// For example, this function can be used when the delegate is handling control
|
||||
// flow ops like while op.
|
||||
// E.g. A while op has condition subgraph indexed at `i` and body subgraph
|
||||
// indexed at `j`. The op can be delegated when the following condition
|
||||
// satisfied:
|
||||
// 1. The delegate supports while op
|
||||
// 2. Both condition subgraph `i` and body subgraph `j` can be fully delegated
|
||||
// by the delegate.
|
||||
// Then if the delegate decides to support the while node along with both body
|
||||
// and condition subgraphs, it should mark subgraphs `i` and `j` skippable so
|
||||
// those two subgraphs won't be delegated separately again after being
|
||||
// absorbed by the parent subgraph.
|
||||
// WARNING: It is the delegate's responsibility to define when to skip
|
||||
// subgraph->ModifyGraphWithDelegate, to check any edge cases (i.e. multiple
|
||||
// references to the subgraph that `subgraph_index` is pointing to), and to mark
|
||||
// that subgraph as skippable using this function.
|
||||
TFL_CAPI_EXPORT
|
||||
TfLiteStatus TfLiteOpaqueContextMarkSubgraphAsDelegationSkippable(
|
||||
TfLiteOpaqueContext* opaque_context, int subgraph_index);
|
||||
|
||||
// Reports an error message formed by using the provided 'format' string in
|
||||
// combination with the data provided via the unnamed arguments following the
|
||||
// the 'format' parameter ('...'). The intended usage and behavior is the same
|
||||
// as with 'printf' with regards to how the data and the formatting string
|
||||
// interact. E.g.
|
||||
// 'TfLiteOpaqueContextReportError(opaque_context, "a=%d b=%d", a, b);'
|
||||
//
|
||||
// The provided 'opaque_context' will be used for reporting the resulting error
|
||||
// message.
|
||||
//
|
||||
// Note that TF Lite clients can use macros like 'TF_LITE_OPAQUE_ENSURE' to
|
||||
// check for certain conditions to be true, and print an error message if the
|
||||
// condition does not hold. Direct usage of this function from application code
|
||||
// should therefore be rare.
|
||||
TFL_CAPI_EXPORT
|
||||
void TfLiteOpaqueContextReportError(struct TfLiteOpaqueContext* opaque_context,
|
||||
const char* format, ...);
|
||||
|
||||
// Same as 'TfLiteOpaqueContextReportError', but with the variable arguments
|
||||
// passed via a 'va_list' instead of directly.
|
||||
//
|
||||
// Callers that receive an ellipsis and want to forward it to
|
||||
// to the opaque context error reporting API can add the ellipsis content to a
|
||||
// 'va_list' and then call 'TfLiteOpaqueContextReportErrorVa'. E.g.:
|
||||
//
|
||||
// void MyErrorReporter(struct TfLiteOpaqueContext* opaque_context,
|
||||
// const char* format, ...) {
|
||||
// va_list vlist;
|
||||
// va_start(vlist, format);
|
||||
// TfLiteOpaqueContextReportErrorVa(opaque_context, format, vlist);
|
||||
// va_end(vlist);
|
||||
// }
|
||||
TFL_CAPI_EXPORT
|
||||
void TfLiteOpaqueContextReportErrorVa(
|
||||
struct TfLiteOpaqueContext* opaque_context, const char* format,
|
||||
va_list vlist);
|
||||
|
||||
// Since we must not depend on any libraries, define a minimal subset of
|
||||
// error macros while avoiding names that have pre-conceived meanings like
|
||||
// assert and check.
|
||||
|
||||
// Try to make all reporting calls through TF_LITE_OPAQUE_KERNEL_LOG rather than
|
||||
// calling the TfLiteOpaqueContextReportError function directly, so that message
|
||||
// strings can be stripped out if the binary size needs to be severely
|
||||
// optimized.
|
||||
#ifndef TF_LITE_STRIP_ERROR_STRINGS
|
||||
|
||||
#if !defined(TF_LITE_OPAQUE_KERNEL_LOG)
|
||||
#define TF_LITE_OPAQUE_KERNEL_LOG(opaque_context, ...) \
|
||||
do { \
|
||||
TfLiteOpaqueContextReportError((opaque_context), __VA_ARGS__); \
|
||||
} while (false)
|
||||
#endif
|
||||
|
||||
#if !defined(TF_LITE_OPAQUE_MAYBE_KERNEL_LOG)
|
||||
#define TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(opaque_context, ...) \
|
||||
do { \
|
||||
if ((opaque_context) != nullptr) { \
|
||||
TfLiteOpaqueContextReportError((opaque_context), __VA_ARGS__); \
|
||||
} \
|
||||
} while (false)
|
||||
#endif
|
||||
|
||||
#else // TF_LITE_STRIP_ERROR_STRINGS
|
||||
#define ARGS_UNUSED(...) (void)sizeof(#__VA_ARGS__)
|
||||
|
||||
#if !defined(TF_LITE_OPAQUE_MAYBE_KERNEL_LOG)
|
||||
#define TF_LITE_OPAQUE_KERNEL_LOG(opaque_context, ...) ARGS_UNUSED(__VA_ARGS__)
|
||||
#endif
|
||||
|
||||
#if !defined(TF_LITE_OPAQUE_MAYBE_KERNEL_LOG)
|
||||
#define TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(opaque_context, ...) \
|
||||
ARGS_UNUSED(__VA_ARGS__)
|
||||
#endif
|
||||
|
||||
#endif // TF_LITE_STRIP_ERROR_STRINGS
|
||||
|
||||
// Check whether value is true, and if not return kTfLiteError from
|
||||
// the current function (and report the error string msg).
|
||||
#if !defined(TF_LITE_OPAQUE_ENSURE_MSG)
|
||||
#define TF_LITE_OPAQUE_ENSURE_MSG(opaque_context, value, msg) \
|
||||
do { \
|
||||
if (!(value)) { \
|
||||
TF_LITE_OPAQUE_KERNEL_LOG((opaque_context), __FILE__ " " msg); \
|
||||
return kTfLiteError; \
|
||||
} \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
// Check whether the value `a` is true, and if not return kTfLiteError from
|
||||
// the current function, while also reporting the location of the error.
|
||||
#if !defined(TF_LITE_OPAQUE_ENSURE)
|
||||
#define TF_LITE_OPAQUE_ENSURE(opaque_context, a) \
|
||||
do { \
|
||||
if (!(a)) { \
|
||||
TF_LITE_OPAQUE_KERNEL_LOG(opaque_context, "%s:%d: %s was not true.", \
|
||||
__FILE__, __LINE__, #a); \
|
||||
return kTfLiteError; \
|
||||
} \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
// Check whether the value `a == b` is true, and if not return kTfLiteError from
|
||||
// the current function, while also reporting the location of the error.
|
||||
// `a` and `b` may be evaluated more than once, so no side effects or
|
||||
// extremely expensive computations should be done.
|
||||
// NOTE: Use TF_LITE_ENSURE_TYPES_EQ if comparing TfLiteTypes.
|
||||
#if !defined(TF_LITE_OPAQUE_ENSURE_EQ)
|
||||
#define TF_LITE_OPAQUE_ENSURE_EQ(opaque_context, a, b) \
|
||||
do { \
|
||||
if ((a) != (b)) { \
|
||||
TF_LITE_OPAQUE_KERNEL_LOG((opaque_context), \
|
||||
"%s:%d: %s != %s (%d != %d)", __FILE__, \
|
||||
__LINE__, #a, #b, (a), (b)); \
|
||||
return kTfLiteError; \
|
||||
} \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
#if !defined(TF_LITE_OPAQUE_ENSURE_TYPES_EQ)
|
||||
#define TF_LITE_OPAQUE_ENSURE_TYPES_EQ(opaque_context, a, b) \
|
||||
do { \
|
||||
if ((a) != (b)) { \
|
||||
TF_LITE_OPAQUE_KERNEL_LOG( \
|
||||
(opaque_context), "%s:%d: %s != %s (%s != %s)", __FILE__, __LINE__, \
|
||||
#a, #b, TfLiteTypeGetName(a), TfLiteTypeGetName(b)); \
|
||||
return kTfLiteError; \
|
||||
} \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
#if !defined(TF_LITE_OPAQUE_ENSURE_NEAR)
|
||||
#define TF_LITE_OPAQUE_ENSURE_NEAR(opaque_context, a, b, epsilon) \
|
||||
do { \
|
||||
double delta = ((a) > (b)) ? ((a) - (b)) : ((b) - (a)); \
|
||||
if (delta > epsilon) { \
|
||||
TF_LITE_OPAQUE_KERNEL_LOG((opaque_context), \
|
||||
"%s:%d: %s not near %s (%f != %f)", __FILE__, \
|
||||
__LINE__, #a, #b, (double)(a), (double)(b)); \
|
||||
return kTfLiteError; \
|
||||
} \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif // __cplusplus
|
||||
|
||||
#endif // TENSORFLOW_LITE_CORE_C_C_API_OPAQUE_H_
|
169
include/tensorflow/lite/core/c/c_api_types.h
Normal file
169
include/tensorflow/lite/core/c/c_api_types.h
Normal file
@ -0,0 +1,169 @@
|
||||
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
|
||||
// This file declares types used by the pure C inference API defined in c_api.h,
|
||||
// some of which are also used in the C++ and C kernel and interpreter APIs.
|
||||
|
||||
/// WARNING: Users of TensorFlow Lite should not include this file directly,
|
||||
/// but should instead include
|
||||
/// "third_party/tensorflow/lite/c/c_api_types.h".
|
||||
/// Only the TensorFlow Lite implementation itself should include this
|
||||
/// file directly.
|
||||
// IWYU pragma: private, include "third_party/tensorflow/lite/c/c_api_types.h"
|
||||
|
||||
#ifndef TENSORFLOW_LITE_CORE_C_C_API_TYPES_H_
|
||||
#define TENSORFLOW_LITE_CORE_C_C_API_TYPES_H_
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
// Define TFL_CAPI_EXPORT macro to export a function properly with a shared
|
||||
// library.
|
||||
#ifdef SWIG
|
||||
#define TFL_CAPI_EXPORT
|
||||
#elif defined(TFL_STATIC_LIBRARY_BUILD)
|
||||
#define TFL_CAPI_EXPORT
|
||||
#else // not definded TFL_STATIC_LIBRARY_BUILD
|
||||
#if defined(_WIN32)
|
||||
#ifdef TFL_COMPILE_LIBRARY
|
||||
#define TFL_CAPI_EXPORT __declspec(dllexport)
|
||||
#else
|
||||
#define TFL_CAPI_EXPORT __declspec(dllimport)
|
||||
#endif // TFL_COMPILE_LIBRARY
|
||||
#else
|
||||
#define TFL_CAPI_EXPORT __attribute__((visibility("default")))
|
||||
#endif // _WIN32
|
||||
#endif // SWIG
|
||||
|
||||
// Note that new error status values may be added in future in order to
|
||||
// indicate more fine-grained internal states, therefore, applications should
|
||||
// not rely on status values being members of the enum.
|
||||
typedef enum TfLiteStatus {
|
||||
kTfLiteOk = 0,
|
||||
|
||||
// Generally referring to an error in the runtime (i.e. interpreter)
|
||||
kTfLiteError = 1,
|
||||
|
||||
// Generally referring to an error from a TfLiteDelegate itself.
|
||||
kTfLiteDelegateError = 2,
|
||||
|
||||
// Generally referring to an error in applying a delegate due to
|
||||
// incompatibility between runtime and delegate, e.g., this error is returned
|
||||
// when trying to apply a TF Lite delegate onto a model graph that's already
|
||||
// immutable.
|
||||
kTfLiteApplicationError = 3,
|
||||
|
||||
// Generally referring to serialized delegate data not being found.
|
||||
// See tflite::delegates::Serialization.
|
||||
kTfLiteDelegateDataNotFound = 4,
|
||||
|
||||
// Generally referring to data-writing issues in delegate serialization.
|
||||
// See tflite::delegates::Serialization.
|
||||
kTfLiteDelegateDataWriteError = 5,
|
||||
|
||||
// Generally referring to data-reading issues in delegate serialization.
|
||||
// See tflite::delegates::Serialization.
|
||||
kTfLiteDelegateDataReadError = 6,
|
||||
|
||||
// Generally referring to issues when the TF Lite model has ops that cannot be
|
||||
// resolved at runtime. This could happen when the specific op is not
|
||||
// registered or built with the TF Lite framework.
|
||||
kTfLiteUnresolvedOps = 7,
|
||||
|
||||
// Generally referring to invocation cancelled by the user.
|
||||
// See `interpreter::Cancel`.
|
||||
// TODO(b/194915839): Implement `interpreter::Cancel`.
|
||||
// TODO(b/250636993): Cancellation triggered by `SetCancellationFunction`
|
||||
// should also return this status code.
|
||||
kTfLiteCancelled = 8,
|
||||
} TfLiteStatus;
|
||||
|
||||
// Types supported by tensor
|
||||
typedef enum {
|
||||
kTfLiteNoType = 0,
|
||||
kTfLiteFloat32 = 1,
|
||||
kTfLiteInt32 = 2,
|
||||
kTfLiteUInt8 = 3,
|
||||
kTfLiteInt64 = 4,
|
||||
kTfLiteString = 5,
|
||||
kTfLiteBool = 6,
|
||||
kTfLiteInt16 = 7,
|
||||
kTfLiteComplex64 = 8,
|
||||
kTfLiteInt8 = 9,
|
||||
kTfLiteFloat16 = 10,
|
||||
kTfLiteFloat64 = 11,
|
||||
kTfLiteComplex128 = 12,
|
||||
kTfLiteUInt64 = 13,
|
||||
kTfLiteResource = 14,
|
||||
kTfLiteVariant = 15,
|
||||
kTfLiteUInt32 = 16,
|
||||
kTfLiteUInt16 = 17,
|
||||
kTfLiteInt4 = 18,
|
||||
} TfLiteType;
|
||||
|
||||
// Legacy. Will be deprecated in favor of TfLiteAffineQuantization.
|
||||
// If per-layer quantization is specified this field will still be populated in
|
||||
// addition to TfLiteAffineQuantization.
|
||||
// Parameters for asymmetric quantization. Quantized values can be converted
|
||||
// back to float using:
|
||||
// real_value = scale * (quantized_value - zero_point)
|
||||
typedef struct TfLiteQuantizationParams {
|
||||
float scale;
|
||||
int32_t zero_point;
|
||||
} TfLiteQuantizationParams;
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Opaque types used by c_api.h, c_api_opaque.h and common.h.
|
||||
|
||||
// TfLiteOpaqueContext is an opaque version of TfLiteContext;
|
||||
typedef struct TfLiteOpaqueContext TfLiteOpaqueContext;
|
||||
|
||||
// TfLiteOpaqueNode is an opaque version of TfLiteNode;
|
||||
typedef struct TfLiteOpaqueNode TfLiteOpaqueNode;
|
||||
|
||||
// TfLiteOpaqueTensor is an opaque version of TfLiteTensor;
|
||||
typedef struct TfLiteOpaqueTensor TfLiteOpaqueTensor;
|
||||
|
||||
// TfLiteDelegate: allows delegation of nodes to alternative backends.
|
||||
// Forward declaration of concrete type declared in common.h.
|
||||
typedef struct TfLiteDelegate TfLiteDelegate;
|
||||
|
||||
// TfLiteOpaqueDelegateStruct: unconditionally opaque version of
|
||||
// TfLiteDelegate; allows delegation of nodes to alternative backends.
|
||||
//
|
||||
// This is an abstract type that is intended to have the same
|
||||
// role as TfLiteDelegate, but without exposing the implementation
|
||||
// details of how delegates are implemented.
|
||||
// WARNING: This is an experimental type and subject to change.
|
||||
typedef struct TfLiteOpaqueDelegateStruct TfLiteOpaqueDelegateStruct;
|
||||
|
||||
// TfLiteOpaqueDelegate: conditionally opaque version of
|
||||
// TfLiteDelegate; allows delegation of nodes to alternative backends.
|
||||
// For TF Lite in Play Services, this is an opaque type,
|
||||
// but for regular TF Lite, this is just a typedef for TfLiteDelegate.
|
||||
// WARNING: This is an experimental type and subject to change.
|
||||
#if TFLITE_WITH_STABLE_ABI || TFLITE_USE_OPAQUE_DELEGATE
|
||||
typedef TfLiteOpaqueDelegateStruct TfLiteOpaqueDelegate;
|
||||
#else
|
||||
typedef TfLiteDelegate TfLiteOpaqueDelegate;
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern C
|
||||
#endif
|
||||
#endif // TENSORFLOW_LITE_CORE_C_C_API_TYPES_H_
|
1205
include/tensorflow/lite/core/c/common.h
Normal file
1205
include/tensorflow/lite/core/c/common.h
Normal file
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue
Block a user