diff options
author | Lev Proleev <levp@google.com> | 2019-12-05 16:57:30 +0000 |
---|---|---|
committer | Lev Proleev <levp@google.com> | 2019-12-05 16:59:20 +0000 |
commit | 2bd0b3339cc278ed283dc10aa0daf438b5540ad9 (patch) | |
tree | ed7bccb43646a2d6b815a7077086ea0dc55f0093 /neuralnetworks | |
parent | c5e116c18bb156e6239e7524a880e5b62b552321 (diff) | |
download | platform_hardware_interfaces-2bd0b3339cc278ed283dc10aa0daf438b5540ad9.tar.gz platform_hardware_interfaces-2bd0b3339cc278ed283dc10aa0daf438b5540ad9.tar.bz2 platform_hardware_interfaces-2bd0b3339cc278ed283dc10aa0daf438b5540ad9.zip |
Add TENSOR_QUANT8_ASYMM_SIGNED support for DEQUANTIZE
Add TENSOR_QUANT8_ASYMM_SIGNED to the list of exceptions when mutating
DEQUANTIZE for validation.
Bug: 143934768
Test: VtsHalNeuralnetworksV1_3TargetTest
Change-Id: I1b3b0a362d3949d4e31708388100d4794846ca3a
Diffstat (limited to 'neuralnetworks')
-rw-r--r-- | neuralnetworks/1.3/vts/functional/ValidateModel.cpp | 5 |
1 files changed, 3 insertions, 2 deletions
diff --git a/neuralnetworks/1.3/vts/functional/ValidateModel.cpp b/neuralnetworks/1.3/vts/functional/ValidateModel.cpp index 242e12ecc7..6c618b30dd 100644 --- a/neuralnetworks/1.3/vts/functional/ValidateModel.cpp +++ b/neuralnetworks/1.3/vts/functional/ValidateModel.cpp @@ -323,8 +323,8 @@ static bool mutateOperationOperandTypeSkip(size_t operand, OperandType type, con // - CAST's argument can be any of TENSOR_(FLOAT16|FLOAT32|INT32|QUANT8_ASYMM). // - RANDOM_MULTINOMIAL's argument can be either TENSOR_FLOAT16 or TENSOR_FLOAT32. // - DEQUANTIZE input can be any of - // TENSOR_(QUANT8_ASYMM|QUANT8_SYMM|QUANT8_SYMM_PER_CHANNEL), output can - // be of either TENSOR_FLOAT16 or TENSOR_FLOAT32. + // TENSOR_(QUANT8_ASYMM|QUANT8_ASYMM_SIGNED|QUANT8_SYMM|QUANT8_SYMM_PER_CHANNEL), + // output can be of either TENSOR_FLOAT16 or TENSOR_FLOAT32. // - QUANTIZE input can be either TENSOR_FLOAT16 or TENSOR_FLOAT32 // - CONV_2D filter type (arg 1) can be QUANT8_ASYMM or QUANT8_SYMM_PER_CHANNEL // - DEPTHWISE_CONV_2D filter type (arg 1) can be QUANT8_ASYMM or QUANT8_SYMM_PER_CHANNEL @@ -364,6 +364,7 @@ static bool mutateOperationOperandTypeSkip(size_t operand, OperandType type, con case OperationType::DEQUANTIZE: { if (operand == operation.inputs[0] && (type == OperandType::TENSOR_QUANT8_ASYMM || + type == OperandType::TENSOR_QUANT8_ASYMM_SIGNED || type == OperandType::TENSOR_QUANT8_SYMM || type == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL)) { return true; |