diff options
author | Lev Proleev <levp@google.com> | 2019-11-21 17:35:30 +0000 |
---|---|---|
committer | Lev Proleev <levp@google.com> | 2019-11-27 14:55:49 +0000 |
commit | cdb67453de1f54e38b6d321a4fc2e658d4734dd7 (patch) | |
tree | 4fc356b8e180c4ffcde10d8c260b983392edfa5f /neuralnetworks | |
parent | d1b3450b3d317cb687f87b6dd954f288adfdaf82 (diff) | |
download | platform_hardware_interfaces-cdb67453de1f54e38b6d321a4fc2e658d4734dd7.tar.gz platform_hardware_interfaces-cdb67453de1f54e38b6d321a4fc2e658d4734dd7.tar.bz2 platform_hardware_interfaces-cdb67453de1f54e38b6d321a4fc2e658d4734dd7.zip |
Add TENSOR_QUANT8_ASYMM_SIGNED support for activations
Ops updated: RELU, RELU1, RELU6, TANH, LOGISTIC
Fix: 143933951
Fix: 143934720
Fix: 143933831
Fix: 143934770
Fix: 143934743
Test: quantization coupling tests in CTS and VTS
Change-Id: Id5e7a8c6b30463708bd93dbf6a3f30d05c2bcf40
Merged-In: Id5e7a8c6b30463708bd93dbf6a3f30d05c2bcf40
(cherry picked from commit 3466c78b04ee9cbbd8622157ef1357f1dfb5bb6a)
Diffstat (limited to 'neuralnetworks')
-rw-r--r-- | neuralnetworks/1.3/types.hal | 18 |
1 files changed, 15 insertions, 3 deletions
diff --git a/neuralnetworks/1.3/types.hal b/neuralnetworks/1.3/types.hal index 3641355221..3551d5762a 100644 --- a/neuralnetworks/1.3/types.hal +++ b/neuralnetworks/1.3/types.hal @@ -956,6 +956,7 @@ enum OperationType : int32_t { * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2) * * {@link OperandType::TENSOR_FLOAT32} * * {@link OperandType::TENSOR_QUANT8_ASYMM} + * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3) * * Supported tensor rank: up to 4. * @@ -967,6 +968,8 @@ enum OperationType : int32_t { * * 0: The output tensor of same shape as input0. * For {@link OperandType::TENSOR_QUANT8_ASYMM}, * the scale must be 1.f / 256 and the zeroPoint must be 0. + * For {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}, + * the scale must be 1.f / 256 and the zeroPoint must be -128. */ LOGISTIC = @1.2::OperationType:LOGISTIC, @@ -1384,6 +1387,7 @@ enum OperationType : int32_t { * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2) * * {@link OperandType::TENSOR_FLOAT32} * * {@link OperandType::TENSOR_QUANT8_ASYMM} + * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3) * * Supported tensor rank: up to 4. * @@ -1393,7 +1397,8 @@ enum OperationType : int32_t { * * Outputs: * * 0: The output tensor of same shape as input0. - * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor, + * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and + * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor, * the scale and zeroPoint must be the same as input0. */ RELU = @1.2::OperationType:RELU, @@ -1409,6 +1414,7 @@ enum OperationType : int32_t { * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2) * * {@link OperandType::TENSOR_FLOAT32} * * {@link OperandType::TENSOR_QUANT8_ASYMM} + * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3) * * Supported tensor rank: up to 4. * @@ -1418,7 +1424,8 @@ enum OperationType : int32_t { * * Outputs: * * 0: The output tensor of the same shape as input0. - * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor, + * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and + * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor, * the scale and zeroPoint must be the same as input0. */ RELU1 = @1.2::OperationType:RELU1, @@ -1434,6 +1441,7 @@ enum OperationType : int32_t { * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2) * * {@link OperandType::TENSOR_FLOAT32} * * {@link OperandType::TENSOR_QUANT8_ASYMM} + * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3) * * Supported tensor rank: up to 4. * @@ -1443,7 +1451,8 @@ enum OperationType : int32_t { * * Outputs: * * 0: The output tensor of same shape as input0. - * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor, + * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and + * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor, * the scale and zeroPoint must be the same as input0. */ RELU6 = @1.2::OperationType:RELU6, @@ -1764,6 +1773,7 @@ enum OperationType : int32_t { * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2) * * {@link OperandType::TENSOR_FLOAT32} * * {@link OperandType::TENSOR_QUANT8_ASYMM} (since HAL version 1.2) + * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3) * * Supported tensor rank: up to 4. * @@ -1775,6 +1785,8 @@ enum OperationType : int32_t { * * 0: The output tensor of same shape as input0. * For {@link OperandType::TENSOR_QUANT8_ASYMM}, * the scale must be 1.f / 128 and the zeroPoint must be 128. + * For {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}, + * the scale must be 1.f / 128 and the zeroPoint must be 0. */ TANH = @1.2::OperationType:TANH, |