summaryrefslogtreecommitdiffstats
path: root/neuralnetworks
diff options
context:
space:
mode:
Diffstat (limited to 'neuralnetworks')
-rw-r--r--neuralnetworks/1.2/types.hal11
-rw-r--r--neuralnetworks/1.3/types.hal15
-rw-r--r--neuralnetworks/1.3/vts/functional/CompilationCachingTests.cpp42
-rw-r--r--neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp150
-rw-r--r--neuralnetworks/1.3/vts/functional/GeneratedTestHarness.h13
-rw-r--r--neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp5
-rw-r--r--neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.h2
7 files changed, 175 insertions, 63 deletions
diff --git a/neuralnetworks/1.2/types.hal b/neuralnetworks/1.2/types.hal
index 837ced5b48..ef71ea8712 100644
--- a/neuralnetworks/1.2/types.hal
+++ b/neuralnetworks/1.2/types.hal
@@ -2448,15 +2448,17 @@ enum OperationType : int32_t {
* then clipping is disabled.
* If all the input tensors have type {@link OperandType::TENSOR_FLOAT32},
* this scalar must be of the type {@link OperandType::FLOAT32},
- * otherwise if all the input tensors have the type {@link OperandType::TENSOR_FLOAT16},
- * this scalar must be of type {@link OperandType::FLOAT16}.
+ * otherwise if all the input tensors have the type
+ * {@link OperandType::TENSOR_FLOAT16}, this scalar must be
+ * of type {@link OperandType::FLOAT16}.
* * 50: The clipping threshold for the output from the
* projection layer, such that values are bound within
* [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
* If all the input tensors have type {@link OperandType::TENSOR_FLOAT32},
* this scalar must be of the type {@link OperandType::FLOAT32},
- * otherwise if all the input tensors have the type {@link OperandType::TENSOR_FLOAT16},
- * this scalar must be of type {@link OperandType::FLOAT16}.
+ * otherwise if all the input tensors have the type
+ * {@link OperandType::TENSOR_FLOAT16}, this scalar must be
+ * of type {@link OperandType::FLOAT16}.
* * 51: merge_outputs
* An {@link OperandType::BOOL} scalar specifying if the outputs
* from forward and backward cells should be merged.
@@ -4124,7 +4126,6 @@ enum OperationType : int32_t {
* * 0: A tensor of the same type and shape as input1 and input2.
* For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
* the scale and zeroPoint can be different from inputs' scale and zeroPoint.
- *
*/
SELECT = 84,
diff --git a/neuralnetworks/1.3/types.hal b/neuralnetworks/1.3/types.hal
index 7df14b18f6..f959e45b5a 100644
--- a/neuralnetworks/1.3/types.hal
+++ b/neuralnetworks/1.3/types.hal
@@ -2375,15 +2375,17 @@ enum OperationType : int32_t {
* then clipping is disabled.
* If all the input tensors have type {@link OperandType::TENSOR_FLOAT32},
* this scalar must be of the type {@link OperandType::FLOAT32},
- * otherwise if all the input tensors have the type {@link OperandType::TENSOR_FLOAT16},
- * this scalar must be of type {@link OperandType::FLOAT16}.
+ * otherwise if all the input tensors have the type
+ * {@link OperandType::TENSOR_FLOAT16}, this scalar must be
+ * of type {@link OperandType::FLOAT16}.
* * 50: The clipping threshold for the output from the
* projection layer, such that values are bound within
* [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
* If all the input tensors have type {@link OperandType::TENSOR_FLOAT32},
* this scalar must be of the type {@link OperandType::FLOAT32},
- * otherwise if all the input tensors have the type {@link OperandType::TENSOR_FLOAT16},
- * this scalar must be of type {@link OperandType::FLOAT16}.
+ * otherwise if all the input tensors have the type
+ * {@link OperandType::TENSOR_FLOAT16}, this scalar must be
+ * of type {@link OperandType::FLOAT16}.
* * 51: merge_outputs
* An {@link OperandType::BOOL} scalar specifying if the outputs
* from forward and backward cells should be merged.
@@ -4034,6 +4036,7 @@ enum OperationType : int32_t {
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_INT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
*
* Supported tensor rank: from 1
*
@@ -4044,14 +4047,14 @@ enum OperationType : int32_t {
* true) or input2 (if false).
* * 1: An input tensor of the same shape as input0.
* * 2: An input tensor of the same shape and type as input1.
- * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * and {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
* the scales and zeroPoint can be different from input1 scale and zeroPoint.
*
* Outputs:
* * 0: A tensor of the same type and shape as input1 and input2.
* For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
* the scale and zeroPoint can be different from inputs' scale and zeroPoint.
- *
*/
SELECT = @1.2::OperationType:SELECT,
diff --git a/neuralnetworks/1.3/vts/functional/CompilationCachingTests.cpp b/neuralnetworks/1.3/vts/functional/CompilationCachingTests.cpp
index d8a7534461..60992d57d7 100644
--- a/neuralnetworks/1.3/vts/functional/CompilationCachingTests.cpp
+++ b/neuralnetworks/1.3/vts/functional/CompilationCachingTests.cpp
@@ -456,8 +456,7 @@ TEST_P(CompilationCachingTest, CacheSavingAndRetrieval) {
}
// Execute and verify results.
- EvaluatePreparedModel(preparedModel, testModel,
- /*testDynamicOutputShape=*/false);
+ EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
}
TEST_P(CompilationCachingTest, CacheSavingAndRetrievalNonZeroOffset) {
@@ -519,8 +518,7 @@ TEST_P(CompilationCachingTest, CacheSavingAndRetrievalNonZeroOffset) {
}
// Execute and verify results.
- EvaluatePreparedModel(preparedModel, testModel,
- /*testDynamicOutputShape=*/false);
+ EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
}
TEST_P(CompilationCachingTest, SaveToCacheInvalidNumCache) {
@@ -541,8 +539,7 @@ TEST_P(CompilationCachingTest, SaveToCacheInvalidNumCache) {
saveModelToCache(model, modelCache, dataCache, &preparedModel);
ASSERT_NE(preparedModel, nullptr);
// Execute and verify results.
- EvaluatePreparedModel(preparedModel, testModel,
- /*testDynamicOutputShape=*/false);
+ EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
// Check if prepareModelFromCache fails.
preparedModel = nullptr;
ErrorStatus status;
@@ -566,8 +563,7 @@ TEST_P(CompilationCachingTest, SaveToCacheInvalidNumCache) {
saveModelToCache(model, modelCache, dataCache, &preparedModel);
ASSERT_NE(preparedModel, nullptr);
// Execute and verify results.
- EvaluatePreparedModel(preparedModel, testModel,
- /*testDynamicOutputShape=*/false);
+ EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
// Check if prepareModelFromCache fails.
preparedModel = nullptr;
ErrorStatus status;
@@ -590,8 +586,7 @@ TEST_P(CompilationCachingTest, SaveToCacheInvalidNumCache) {
saveModelToCache(model, modelCache, dataCache, &preparedModel);
ASSERT_NE(preparedModel, nullptr);
// Execute and verify results.
- EvaluatePreparedModel(preparedModel, testModel,
- /*testDynamicOutputShape=*/false);
+ EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
// Check if prepareModelFromCache fails.
preparedModel = nullptr;
ErrorStatus status;
@@ -615,8 +610,7 @@ TEST_P(CompilationCachingTest, SaveToCacheInvalidNumCache) {
saveModelToCache(model, modelCache, dataCache, &preparedModel);
ASSERT_NE(preparedModel, nullptr);
// Execute and verify results.
- EvaluatePreparedModel(preparedModel, testModel,
- /*testDynamicOutputShape=*/false);
+ EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
// Check if prepareModelFromCache fails.
preparedModel = nullptr;
ErrorStatus status;
@@ -727,8 +721,7 @@ TEST_P(CompilationCachingTest, SaveToCacheInvalidNumFd) {
saveModelToCache(model, modelCache, dataCache, &preparedModel);
ASSERT_NE(preparedModel, nullptr);
// Execute and verify results.
- EvaluatePreparedModel(preparedModel, testModel,
- /*testDynamicOutputShape=*/false);
+ EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
// Check if prepareModelFromCache fails.
preparedModel = nullptr;
ErrorStatus status;
@@ -752,8 +745,7 @@ TEST_P(CompilationCachingTest, SaveToCacheInvalidNumFd) {
saveModelToCache(model, modelCache, dataCache, &preparedModel);
ASSERT_NE(preparedModel, nullptr);
// Execute and verify results.
- EvaluatePreparedModel(preparedModel, testModel,
- /*testDynamicOutputShape=*/false);
+ EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
// Check if prepareModelFromCache fails.
preparedModel = nullptr;
ErrorStatus status;
@@ -776,8 +768,7 @@ TEST_P(CompilationCachingTest, SaveToCacheInvalidNumFd) {
saveModelToCache(model, modelCache, dataCache, &preparedModel);
ASSERT_NE(preparedModel, nullptr);
// Execute and verify results.
- EvaluatePreparedModel(preparedModel, testModel,
- /*testDynamicOutputShape=*/false);
+ EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
// Check if prepareModelFromCache fails.
preparedModel = nullptr;
ErrorStatus status;
@@ -801,8 +792,7 @@ TEST_P(CompilationCachingTest, SaveToCacheInvalidNumFd) {
saveModelToCache(model, modelCache, dataCache, &preparedModel);
ASSERT_NE(preparedModel, nullptr);
// Execute and verify results.
- EvaluatePreparedModel(preparedModel, testModel,
- /*testDynamicOutputShape=*/false);
+ EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
// Check if prepareModelFromCache fails.
preparedModel = nullptr;
ErrorStatus status;
@@ -914,8 +904,7 @@ TEST_P(CompilationCachingTest, SaveToCacheInvalidAccessMode) {
saveModelToCache(model, modelCache, dataCache, &preparedModel);
ASSERT_NE(preparedModel, nullptr);
// Execute and verify results.
- EvaluatePreparedModel(preparedModel, testModel,
- /*testDynamicOutputShape=*/false);
+ EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
// Check if prepareModelFromCache fails.
preparedModel = nullptr;
ErrorStatus status;
@@ -937,8 +926,7 @@ TEST_P(CompilationCachingTest, SaveToCacheInvalidAccessMode) {
saveModelToCache(model, modelCache, dataCache, &preparedModel);
ASSERT_NE(preparedModel, nullptr);
// Execute and verify results.
- EvaluatePreparedModel(preparedModel, testModel,
- /*testDynamicOutputShape=*/false);
+ EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
// Check if prepareModelFromCache fails.
preparedModel = nullptr;
ErrorStatus status;
@@ -1082,8 +1070,7 @@ TEST_P(CompilationCachingTest, SaveToCache_TOCTOU) {
ASSERT_EQ(preparedModel, nullptr);
} else {
ASSERT_NE(preparedModel, nullptr);
- EvaluatePreparedModel(preparedModel, testModelAdd,
- /*testDynamicOutputShape=*/false);
+ EvaluatePreparedModel(preparedModel, testModelAdd, /*testKind=*/TestKind::GENERAL);
}
}
}
@@ -1144,8 +1131,7 @@ TEST_P(CompilationCachingTest, PrepareFromCache_TOCTOU) {
ASSERT_EQ(preparedModel, nullptr);
} else {
ASSERT_NE(preparedModel, nullptr);
- EvaluatePreparedModel(preparedModel, testModelAdd,
- /*testDynamicOutputShape=*/false);
+ EvaluatePreparedModel(preparedModel, testModelAdd, /*testKind=*/TestKind::GENERAL);
}
}
}
diff --git a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp
index 2ec29887df..3e947f5163 100644
--- a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp
@@ -79,6 +79,21 @@ struct TestConfig {
Executor executor;
MeasureTiming measureTiming;
OutputType outputType;
+ // `reportSkipping` indicates if a test should print an info message in case
+ // it is skipped. The field is set to true by default and is set to false in
+ // quantization coupling tests to suppress skipping a test
+ bool reportSkipping;
+ TestConfig(Executor executor, MeasureTiming measureTiming, OutputType outputType)
+ : executor(executor),
+ measureTiming(measureTiming),
+ outputType(outputType),
+ reportSkipping(true) {}
+ TestConfig(Executor executor, MeasureTiming measureTiming, OutputType outputType,
+ bool reportSkipping)
+ : executor(executor),
+ measureTiming(measureTiming),
+ outputType(outputType),
+ reportSkipping(reportSkipping) {}
};
} // namespace
@@ -219,7 +234,10 @@ static std::shared_ptr<::android::nn::ExecutionBurstController> CreateBurst(
}
void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestModel& testModel,
- const TestConfig& testConfig) {
+ const TestConfig& testConfig, bool* skipped = nullptr) {
+ if (skipped != nullptr) {
+ *skipped = false;
+ }
// If output0 does not have size larger than one byte, we can not test with insufficient buffer.
if (testConfig.outputType == OutputType::INSUFFICIENT &&
!isOutputSizeGreaterThanOne(testModel, 0)) {
@@ -290,6 +308,12 @@ void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestMo
if (testConfig.outputType != OutputType::FULLY_SPECIFIED &&
executionStatus == ErrorStatus::GENERAL_FAILURE) {
+ if (skipped != nullptr) {
+ *skipped = true;
+ }
+ if (!testConfig.reportSkipping) {
+ return;
+ }
LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
"execute model that it does not support.";
std::cout << "[ ] Early termination of test because vendor service cannot "
@@ -343,44 +367,117 @@ void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestMo
}
void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestModel& testModel,
- bool testDynamicOutputShape) {
+ TestKind testKind) {
std::initializer_list<OutputType> outputTypesList;
std::initializer_list<MeasureTiming> measureTimingList;
std::initializer_list<Executor> executorList;
- if (testDynamicOutputShape) {
- outputTypesList = {OutputType::UNSPECIFIED, OutputType::INSUFFICIENT};
- measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
- executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST};
- } else {
- outputTypesList = {OutputType::FULLY_SPECIFIED};
- measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
- executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST};
+ switch (testKind) {
+ case TestKind::GENERAL: {
+ outputTypesList = {OutputType::FULLY_SPECIFIED};
+ measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
+ executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST};
+ } break;
+ case TestKind::DYNAMIC_SHAPE: {
+ outputTypesList = {OutputType::UNSPECIFIED, OutputType::INSUFFICIENT};
+ measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
+ executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST};
+ } break;
+ case TestKind::QUANTIZATION_COUPLING: {
+ LOG(FATAL) << "Wrong TestKind for EvaluatePreparedModel";
+ return;
+ } break;
}
for (const OutputType outputType : outputTypesList) {
for (const MeasureTiming measureTiming : measureTimingList) {
for (const Executor executor : executorList) {
- const TestConfig testConfig = {.executor = executor,
- .measureTiming = measureTiming,
- .outputType = outputType};
+ const TestConfig testConfig(executor, measureTiming, outputType);
EvaluatePreparedModel(preparedModel, testModel, testConfig);
}
}
}
}
-void Execute(const sp<IDevice>& device, const TestModel& testModel, bool testDynamicOutputShape) {
+void EvaluatePreparedCoupledModels(const sp<IPreparedModel>& preparedModel,
+ const TestModel& testModel,
+ const sp<IPreparedModel>& preparedCoupledModel,
+ const TestModel& coupledModel) {
+ std::initializer_list<OutputType> outputTypesList = {OutputType::FULLY_SPECIFIED};
+ std::initializer_list<MeasureTiming> measureTimingList = {MeasureTiming::NO,
+ MeasureTiming::YES};
+ std::initializer_list<Executor> executorList = {Executor::ASYNC, Executor::SYNC,
+ Executor::BURST};
+
+ for (const OutputType outputType : outputTypesList) {
+ for (const MeasureTiming measureTiming : measureTimingList) {
+ for (const Executor executor : executorList) {
+ const TestConfig testConfig(executor, measureTiming, outputType,
+ /*reportSkipping=*/false);
+ bool baseSkipped = false;
+ EvaluatePreparedModel(preparedModel, testModel, testConfig, &baseSkipped);
+ bool coupledSkipped = false;
+ EvaluatePreparedModel(preparedCoupledModel, coupledModel, testConfig,
+ &coupledSkipped);
+ ASSERT_EQ(baseSkipped, coupledSkipped);
+ if (baseSkipped) {
+ LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
+ "execute model that it does not support.";
+ std::cout << "[ ] Early termination of test because vendor service "
+ "cannot "
+ "execute model that it does not support."
+ << std::endl;
+ GTEST_SKIP();
+ }
+ }
+ }
+ }
+}
+
+void Execute(const sp<IDevice>& device, const TestModel& testModel, TestKind testKind) {
Model model = createModel(testModel);
- if (testDynamicOutputShape) {
+ if (testKind == TestKind::DYNAMIC_SHAPE) {
makeOutputDimensionsUnspecified(&model);
}
sp<IPreparedModel> preparedModel;
- createPreparedModel(device, model, &preparedModel);
- if (preparedModel == nullptr) return;
-
- EvaluatePreparedModel(preparedModel, testModel, testDynamicOutputShape);
+ switch (testKind) {
+ case TestKind::GENERAL: {
+ createPreparedModel(device, model, &preparedModel);
+ if (preparedModel == nullptr) return;
+ EvaluatePreparedModel(preparedModel, testModel, TestKind::GENERAL);
+ } break;
+ case TestKind::DYNAMIC_SHAPE: {
+ createPreparedModel(device, model, &preparedModel);
+ if (preparedModel == nullptr) return;
+ EvaluatePreparedModel(preparedModel, testModel, TestKind::DYNAMIC_SHAPE);
+ } break;
+ case TestKind::QUANTIZATION_COUPLING: {
+ ASSERT_TRUE(testModel.hasQuant8AsymmOperands());
+ createPreparedModel(device, model, &preparedModel, /*reportSkipping*/ false);
+ TestModel signedQuantizedModel = convertQuant8AsymmOperandsToSigned(testModel);
+ sp<IPreparedModel> preparedCoupledModel;
+ createPreparedModel(device, createModel(signedQuantizedModel), &preparedCoupledModel,
+ /*reportSkipping*/ false);
+ // If we couldn't prepare a model with unsigned quantization, we must
+ // fail to prepare a model with signed quantization as well.
+ if (preparedModel == nullptr) {
+ ASSERT_EQ(preparedCoupledModel, nullptr);
+ // If we failed to prepare both of the models, we can safely skip
+ // the test.
+ LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
+ "prepare model that it does not support.";
+ std::cout
+ << "[ ] Early termination of test because vendor service cannot "
+ "prepare model that it does not support."
+ << std::endl;
+ GTEST_SKIP();
+ }
+ ASSERT_NE(preparedCoupledModel, nullptr);
+ EvaluatePreparedCoupledModels(preparedModel, testModel, preparedCoupledModel,
+ signedQuantizedModel);
+ } break;
+ }
}
void GeneratedTestBase::SetUp() {
@@ -403,12 +500,19 @@ class GeneratedTest : public GeneratedTestBase {};
// Tag for the dynamic output shape tests
class DynamicOutputShapeTest : public GeneratedTest {};
+// Tag for the dynamic output shape tests
+class DISABLED_QuantizationCouplingTest : public GeneratedTest {};
+
TEST_P(GeneratedTest, Test) {
- Execute(kDevice, kTestModel, /*testDynamicOutputShape=*/false);
+ Execute(kDevice, kTestModel, /*testKind=*/TestKind::GENERAL);
}
TEST_P(DynamicOutputShapeTest, Test) {
- Execute(kDevice, kTestModel, /*testDynamicOutputShape=*/true);
+ Execute(kDevice, kTestModel, /*testKind=*/TestKind::DYNAMIC_SHAPE);
+}
+
+TEST_P(DISABLED_QuantizationCouplingTest, Test) {
+ Execute(kDevice, kTestModel, /*testKind=*/TestKind::QUANTIZATION_COUPLING);
}
INSTANTIATE_GENERATED_TEST(GeneratedTest,
@@ -417,4 +521,8 @@ INSTANTIATE_GENERATED_TEST(GeneratedTest,
INSTANTIATE_GENERATED_TEST(DynamicOutputShapeTest,
[](const TestModel& testModel) { return !testModel.expectFailure; });
+INSTANTIATE_GENERATED_TEST(DISABLED_QuantizationCouplingTest, [](const TestModel& testModel) {
+ return testModel.hasQuant8AsymmOperands() && testModel.operations.size() == 1;
+});
+
} // namespace android::hardware::neuralnetworks::V1_3::vts::functional
diff --git a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.h b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.h
index 45cff5b75b..ad6323f48c 100644
--- a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.h
+++ b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.h
@@ -57,8 +57,19 @@ Model createModel(const test_helper::TestModel& testModel);
void PrepareModel(const sp<IDevice>& device, const Model& model, sp<IPreparedModel>* preparedModel);
+enum class TestKind {
+ // Runs a test model and compares the results to a golden data
+ GENERAL,
+ // Same as GENERAL but sets dimensions for the output tensors to zeros
+ DYNAMIC_SHAPE,
+ // Tests if quantized model with TENSOR_QUANT8_ASYMM produces the same result
+ // (OK/SKIPPED/FAILED) as the model with all such tensors converted to
+ // TENSOR_QUANT8_ASYMM_SIGNED.
+ QUANTIZATION_COUPLING
+};
+
void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel,
- const test_helper::TestModel& testModel, bool testDynamicOutputShape);
+ const test_helper::TestModel& testModel, TestKind testKind);
} // namespace android::hardware::neuralnetworks::V1_3::vts::functional
diff --git a/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp
index 625913d485..92d8fa7376 100644
--- a/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp
+++ b/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp
@@ -37,7 +37,7 @@ using V1_1::ExecutionPreference;
// internal helper function
void createPreparedModel(const sp<IDevice>& device, const Model& model,
- sp<IPreparedModel>* preparedModel) {
+ sp<IPreparedModel>* preparedModel, bool reportSkipping) {
ASSERT_NE(nullptr, preparedModel);
*preparedModel = nullptr;
@@ -74,6 +74,9 @@ void createPreparedModel(const sp<IDevice>& device, const Model& model,
// can continue.
if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
ASSERT_EQ(nullptr, preparedModel->get());
+ if (!reportSkipping) {
+ return;
+ }
LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot prepare "
"model that it does not support.";
std::cout << "[ ] Early termination of test because vendor service cannot "
diff --git a/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.h b/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.h
index 8cb42d47e5..4e51052966 100644
--- a/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.h
+++ b/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.h
@@ -47,7 +47,7 @@ std::string printNeuralnetworksHidlTest(
// Create an IPreparedModel object. If the model cannot be prepared,
// "preparedModel" will be nullptr instead.
void createPreparedModel(const sp<IDevice>& device, const Model& model,
- sp<IPreparedModel>* preparedModel);
+ sp<IPreparedModel>* preparedModel, bool reportSkipping = true);
// Utility function to get PreparedModel from callback and downcast to V1_2.
sp<IPreparedModel> getPreparedModel_1_3(const sp<implementation::PreparedModelCallback>& callback);