diff options
author | Michael Butler <butlermichael@google.com> | 2019-08-26 23:55:47 -0700 |
---|---|---|
committer | Michael Butler <butlermichael@google.com> | 2019-08-29 12:55:56 -0700 |
commit | 62749b917ef790a31e3ef850ebef94eb71e5d869 (patch) | |
tree | 4246a8ce8ea872136cde209dd9bcaf6aabc4f53f /neuralnetworks/1.0 | |
parent | 15b826ad6a694b2f47835b9ebdc0c84ff91380c9 (diff) | |
download | platform_hardware_interfaces-62749b917ef790a31e3ef850ebef94eb71e5d869.tar.gz platform_hardware_interfaces-62749b917ef790a31e3ef850ebef94eb71e5d869.tar.bz2 platform_hardware_interfaces-62749b917ef790a31e3ef850ebef94eb71e5d869.zip |
Cleanup NNAPI VTS tests
This CL includes the following cleanups:
* namespace compression
* remove "using" from header files
* remove no-op code, default no-op constructors
* clang-formats the code
Bug: N/A
Test: mma
Test: VtsHalNeuralnetworksV1_*TargetTest
Change-Id: I023997d8686ca65223858eed3a0881f5444ed2d6
Merged-In: I023997d8686ca65223858eed3a0881f5444ed2d6
(cherry picked from commit bbe5dad26674e8fac9570f52814571f626f09a3b)
Diffstat (limited to 'neuralnetworks/1.0')
9 files changed, 74 insertions, 176 deletions
diff --git a/neuralnetworks/1.0/vts/functional/BasicTests.cpp b/neuralnetworks/1.0/vts/functional/BasicTests.cpp index 945c4065e5..5727ca4034 100644 --- a/neuralnetworks/1.0/vts/functional/BasicTests.cpp +++ b/neuralnetworks/1.0/vts/functional/BasicTests.cpp @@ -18,12 +18,7 @@ #include "VtsHalNeuralnetworks.h" -namespace android { -namespace hardware { -namespace neuralnetworks { -namespace V1_0 { -namespace vts { -namespace functional { +namespace android::hardware::neuralnetworks::V1_0::vts::functional { // create device test TEST_F(NeuralnetworksHidlTest, CreateDevice) {} @@ -38,19 +33,14 @@ TEST_F(NeuralnetworksHidlTest, StatusTest) { // initialization TEST_F(NeuralnetworksHidlTest, GetCapabilitiesTest) { Return<void> ret = - device->getCapabilities([](ErrorStatus status, const Capabilities& capabilities) { - EXPECT_EQ(ErrorStatus::NONE, status); - EXPECT_LT(0.0f, capabilities.float32Performance.execTime); - EXPECT_LT(0.0f, capabilities.float32Performance.powerUsage); - EXPECT_LT(0.0f, capabilities.quantized8Performance.execTime); - EXPECT_LT(0.0f, capabilities.quantized8Performance.powerUsage); - }); + device->getCapabilities([](ErrorStatus status, const Capabilities& capabilities) { + EXPECT_EQ(ErrorStatus::NONE, status); + EXPECT_LT(0.0f, capabilities.float32Performance.execTime); + EXPECT_LT(0.0f, capabilities.float32Performance.powerUsage); + EXPECT_LT(0.0f, capabilities.quantized8Performance.execTime); + EXPECT_LT(0.0f, capabilities.quantized8Performance.powerUsage); + }); EXPECT_TRUE(ret.isOk()); } -} // namespace functional -} // namespace vts -} // namespace V1_0 -} // namespace neuralnetworks -} // namespace hardware -} // namespace android +} // namespace android::hardware::neuralnetworks::V1_0::vts::functional diff --git a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp index 5f96539fc4..33a6fa5d6a 100644 --- a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp +++ b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp @@ -33,23 +33,12 @@ #include <gtest/gtest.h> #include <iostream> -namespace android { -namespace hardware { -namespace neuralnetworks { -namespace V1_0 { -namespace vts { -namespace functional { +namespace android::hardware::neuralnetworks::V1_0::vts::functional { using namespace test_helper; -using ::android::hardware::neuralnetworks::V1_0::ErrorStatus; -using ::android::hardware::neuralnetworks::V1_0::IDevice; -using ::android::hardware::neuralnetworks::V1_0::IPreparedModel; -using ::android::hardware::neuralnetworks::V1_0::Model; -using ::android::hardware::neuralnetworks::V1_0::Request; -using ::android::hardware::neuralnetworks::V1_0::RequestArgument; -using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback; -using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback; -using ::android::hidl::memory::V1_0::IMemory; +using hidl::memory::V1_0::IMemory; +using implementation::ExecutionCallback; +using implementation::PreparedModelCallback; Model createModel(const TestModel& testModel) { // Model operands. @@ -206,9 +195,4 @@ TEST_P(GeneratedTest, Test) { INSTANTIATE_GENERATED_TEST(GeneratedTest, [](const TestModel& testModel) { return !testModel.expectFailure; }); -} // namespace functional -} // namespace vts -} // namespace V1_0 -} // namespace neuralnetworks -} // namespace hardware -} // namespace android +} // namespace android::hardware::neuralnetworks::V1_0::vts::functional diff --git a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.h b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.h index f86e8b3e72..a42f271cad 100644 --- a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.h +++ b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.h @@ -21,12 +21,7 @@ #include "TestHarness.h" #include "VtsHalNeuralnetworks.h" -namespace android { -namespace hardware { -namespace neuralnetworks { -namespace V1_0 { -namespace vts { -namespace functional { +namespace android::hardware::neuralnetworks::V1_0::vts::functional { class GeneratedTestBase : public NeuralnetworksHidlTest, @@ -59,11 +54,6 @@ class ValidationTest : public GeneratedTestBase { Model createModel(const ::test_helper::TestModel& testModel); -} // namespace functional -} // namespace vts -} // namespace V1_0 -} // namespace neuralnetworks -} // namespace hardware -} // namespace android +} // namespace android::hardware::neuralnetworks::V1_0::vts::functional #endif // ANDROID_HARDWARE_NEURALNETWORKS_V1_0_GENERATED_TEST_HARNESS_H diff --git a/neuralnetworks/1.0/vts/functional/Utils.cpp b/neuralnetworks/1.0/vts/functional/Utils.cpp index 5aa27516db..5de99fd0b3 100644 --- a/neuralnetworks/1.0/vts/functional/Utils.cpp +++ b/neuralnetworks/1.0/vts/functional/Utils.cpp @@ -28,15 +28,13 @@ #include <algorithm> #include <vector> -namespace android { -namespace hardware { -namespace neuralnetworks { +namespace android::hardware::neuralnetworks { using namespace test_helper; -using ::android::hardware::neuralnetworks::V1_0::DataLocation; -using ::android::hardware::neuralnetworks::V1_0::Request; -using ::android::hardware::neuralnetworks::V1_0::RequestArgument; -using ::android::hidl::memory::V1_0::IMemory; +using hidl::memory::V1_0::IMemory; +using V1_0::DataLocation; +using V1_0::Request; +using V1_0::RequestArgument; constexpr uint32_t kInputPoolIndex = 0; constexpr uint32_t kOutputPoolIndex = 1; @@ -118,6 +116,4 @@ std::vector<TestBuffer> getOutputBuffers(const Request& request) { return outputBuffers; } -} // namespace neuralnetworks -} // namespace hardware -} // namespace android +} // namespace android::hardware::neuralnetworks diff --git a/neuralnetworks/1.0/vts/functional/ValidateModel.cpp b/neuralnetworks/1.0/vts/functional/ValidateModel.cpp index 5845aabe70..9854395084 100644 --- a/neuralnetworks/1.0/vts/functional/ValidateModel.cpp +++ b/neuralnetworks/1.0/vts/functional/ValidateModel.cpp @@ -20,15 +20,9 @@ #include "GeneratedTestHarness.h" #include "VtsHalNeuralnetworks.h" -namespace android { -namespace hardware { -namespace neuralnetworks { -namespace V1_0 { -namespace vts { -namespace functional { +namespace android::hardware::neuralnetworks::V1_0::vts::functional { -using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback; -using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback; +using implementation::PreparedModelCallback; ///////////////////////// UTILITY FUNCTIONS ///////////////////////// @@ -37,9 +31,9 @@ static void validateGetSupportedOperations(const sp<IDevice>& device, const std: SCOPED_TRACE(message + " [getSupportedOperations]"); Return<void> ret = - device->getSupportedOperations(model, [&](ErrorStatus status, const hidl_vec<bool>&) { - EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status); - }); + device->getSupportedOperations(model, [&](ErrorStatus status, const hidl_vec<bool>&) { + EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status); + }); EXPECT_TRUE(ret.isOk()); } @@ -48,7 +42,6 @@ static void validatePrepareModel(const sp<IDevice>& device, const std::string& m SCOPED_TRACE(message + " [prepareModel]"); sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback(); - ASSERT_NE(nullptr, preparedModelCallback.get()); Return<ErrorStatus> prepareLaunchStatus = device->prepareModel(model, preparedModelCallback); ASSERT_TRUE(prepareLaunchStatus.isOk()); ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(prepareLaunchStatus)); @@ -94,13 +87,13 @@ static uint32_t hidl_vec_push_back(hidl_vec<Type>* vec, const Type& value) { static uint32_t addOperand(Model* model) { return hidl_vec_push_back(&model->operands, { - .type = OperandType::INT32, - .dimensions = {}, - .numberOfConsumers = 0, - .scale = 0.0f, - .zeroPoint = 0, - .lifetime = OperandLifeTime::MODEL_INPUT, - .location = {.poolIndex = 0, .offset = 0, .length = 0}, + .type = OperandType::INT32, + .dimensions = {}, + .numberOfConsumers = 0, + .scale = 0.0f, + .zeroPoint = 0, + .lifetime = OperandLifeTime::MODEL_INPUT, + .location = {.poolIndex = 0, .offset = 0, .length = 0}, }); } @@ -114,10 +107,10 @@ static uint32_t addOperand(Model* model, OperandLifeTime lifetime) { ///////////////////////// VALIDATE MODEL OPERAND TYPE ///////////////////////// static const int32_t invalidOperandTypes[] = { - static_cast<int32_t>(OperandType::FLOAT32) - 1, // lower bound fundamental - static_cast<int32_t>(OperandType::TENSOR_QUANT8_ASYMM) + 1, // upper bound fundamental - static_cast<int32_t>(OperandType::OEM) - 1, // lower bound OEM - static_cast<int32_t>(OperandType::TENSOR_OEM_BYTE) + 1, // upper bound OEM + static_cast<int32_t>(OperandType::FLOAT32) - 1, // lower bound fundamental + static_cast<int32_t>(OperandType::TENSOR_QUANT8_ASYMM) + 1, // upper bound fundamental + static_cast<int32_t>(OperandType::OEM) - 1, // lower bound OEM + static_cast<int32_t>(OperandType::TENSOR_OEM_BYTE) + 1, // upper bound OEM }; static void mutateOperandTypeTest(const sp<IDevice>& device, const V1_0::Model& model) { @@ -210,7 +203,7 @@ static std::vector<int32_t> getInvalidZeroPoints(OperandType type) { static void mutateOperandZeroPointTest(const sp<IDevice>& device, const V1_0::Model& model) { for (size_t operand = 0; operand < model.operands.size(); ++operand) { const std::vector<int32_t> invalidZeroPoints = - getInvalidZeroPoints(model.operands[operand].type); + getInvalidZeroPoints(model.operands[operand].type); for (int32_t invalidZeroPoint : invalidZeroPoints) { const std::string message = "mutateOperandZeroPointTest: operand " + std::to_string(operand) + " has zero point of " + @@ -242,18 +235,18 @@ static void mutateOperand(Operand* operand, OperandType type) { break; case OperandType::TENSOR_FLOAT32: newOperand.dimensions = - operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1}); + operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1}); newOperand.scale = 0.0f; newOperand.zeroPoint = 0; break; case OperandType::TENSOR_INT32: newOperand.dimensions = - operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1}); + operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1}); newOperand.zeroPoint = 0; break; case OperandType::TENSOR_QUANT8_ASYMM: newOperand.dimensions = - operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1}); + operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1}); newOperand.scale = operand->scale != 0.0f ? operand->scale : 1.0f; break; case OperandType::OEM: @@ -303,10 +296,10 @@ static void mutateOperationOperandTypeTest(const sp<IDevice>& device, const V1_0 ///////////////////////// VALIDATE MODEL OPERATION TYPE ///////////////////////// static const int32_t invalidOperationTypes[] = { - static_cast<int32_t>(OperationType::ADD) - 1, // lower bound fundamental - static_cast<int32_t>(OperationType::TANH) + 1, // upper bound fundamental - static_cast<int32_t>(OperationType::OEM_OPERATION) - 1, // lower bound OEM - static_cast<int32_t>(OperationType::OEM_OPERATION) + 1, // upper bound OEM + static_cast<int32_t>(OperationType::ADD) - 1, // lower bound fundamental + static_cast<int32_t>(OperationType::TANH) + 1, // upper bound fundamental + static_cast<int32_t>(OperationType::OEM_OPERATION) - 1, // lower bound OEM + static_cast<int32_t>(OperationType::OEM_OPERATION) + 1, // upper bound OEM }; static void mutateOperationTypeTest(const sp<IDevice>& device, const V1_0::Model& model) { @@ -317,7 +310,7 @@ static void mutateOperationTypeTest(const sp<IDevice>& device, const V1_0::Model std::to_string(invalidOperationType); validate(device, message, model, [operation, invalidOperationType](Model* model) { model->operations[operation].type = - static_cast<OperationType>(invalidOperationType); + static_cast<OperationType>(invalidOperationType); }); } } @@ -470,7 +463,7 @@ static void addOperationInputTest(const sp<IDevice>& device, const V1_0::Model& static void addOperationOutputTest(const sp<IDevice>& device, const V1_0::Model& model) { for (size_t operation = 0; operation < model.operations.size(); ++operation) { const std::string message = - "addOperationOutputTest: operation " + std::to_string(operation); + "addOperationOutputTest: operation " + std::to_string(operation); validate(device, message, model, [operation](Model* model) { uint32_t index = addOperand(model, OperandLifeTime::MODEL_OUTPUT); hidl_vec_push_back(&model->operations[operation].outputs, index); @@ -498,9 +491,4 @@ void ValidationTest::validateModel(const V1_0::Model& model) { addOperationOutputTest(device, model); } -} // namespace functional -} // namespace vts -} // namespace V1_0 -} // namespace neuralnetworks -} // namespace hardware -} // namespace android +} // namespace android::hardware::neuralnetworks::V1_0::vts::functional diff --git a/neuralnetworks/1.0/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.0/vts/functional/ValidateRequest.cpp index 730e054757..d8f3e65e8c 100644 --- a/neuralnetworks/1.0/vts/functional/ValidateRequest.cpp +++ b/neuralnetworks/1.0/vts/functional/ValidateRequest.cpp @@ -20,14 +20,9 @@ #include "GeneratedTestHarness.h" #include "VtsHalNeuralnetworks.h" -namespace android { -namespace hardware { -namespace neuralnetworks { -namespace V1_0 { -namespace vts { -namespace functional { +namespace android::hardware::neuralnetworks::V1_0::vts::functional { -using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback; +using implementation::ExecutionCallback; ///////////////////////// UTILITY FUNCTIONS ///////////////////////// @@ -41,7 +36,6 @@ static void validate(const sp<IPreparedModel>& preparedModel, const std::string& SCOPED_TRACE(message + " [execute]"); sp<ExecutionCallback> executionCallback = new ExecutionCallback(); - ASSERT_NE(nullptr, executionCallback.get()); Return<ErrorStatus> executeLaunchStatus = preparedModel->execute(request, executionCallback); ASSERT_TRUE(executeLaunchStatus.isOk()); ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(executeLaunchStatus)); @@ -99,9 +93,4 @@ void ValidationTest::validateRequest(const sp<IPreparedModel>& preparedModel, removeOutputTest(preparedModel, request); } -} // namespace functional -} // namespace vts -} // namespace V1_0 -} // namespace neuralnetworks -} // namespace hardware -} // namespace android +} // namespace android::hardware::neuralnetworks::V1_0::vts::functional diff --git a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp index a51f71f8c6..9ee4e37cb0 100644 --- a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp +++ b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp @@ -24,16 +24,11 @@ #include <android-base/logging.h> -namespace android { -namespace hardware { -namespace neuralnetworks { -namespace V1_0 { -namespace vts { -namespace functional { +namespace android::hardware::neuralnetworks::V1_0::vts::functional { -using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback; +using implementation::PreparedModelCallback; -static void createPreparedModel(const sp<IDevice>& device, const V1_0::Model& model, +static void createPreparedModel(const sp<IDevice>& device, const Model& model, sp<IPreparedModel>* preparedModel) { ASSERT_NE(nullptr, preparedModel); @@ -50,7 +45,6 @@ static void createPreparedModel(const sp<IDevice>& device, const V1_0::Model& mo // launch prepare model sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback(); - ASSERT_NE(nullptr, preparedModelCallback.get()); Return<ErrorStatus> prepareLaunchStatus = device->prepareModel(model, preparedModelCallback); ASSERT_TRUE(prepareLaunchStatus.isOk()); ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus)); @@ -81,10 +75,6 @@ static void createPreparedModel(const sp<IDevice>& device, const V1_0::Model& mo } // A class for test environment setup -NeuralnetworksHidlEnvironment::NeuralnetworksHidlEnvironment() {} - -NeuralnetworksHidlEnvironment::~NeuralnetworksHidlEnvironment() {} - NeuralnetworksHidlEnvironment* NeuralnetworksHidlEnvironment::getInstance() { // This has to return a "new" object because it is freed inside // ::testing::AddGlobalTestEnvironment when the gtest is being torn down @@ -97,14 +87,8 @@ void NeuralnetworksHidlEnvironment::registerTestServices() { } // The main test class for NEURALNETWORK HIDL HAL. -NeuralnetworksHidlTest::NeuralnetworksHidlTest() {} - -NeuralnetworksHidlTest::~NeuralnetworksHidlTest() {} - void NeuralnetworksHidlTest::SetUp() { ::testing::VtsHalHidlTargetTestBase::SetUp(); - device = ::testing::VtsHalHidlTargetTestBase::getService<IDevice>( - NeuralnetworksHidlEnvironment::getInstance()); #ifdef PRESUBMIT_NOT_VTS const std::string name = @@ -119,7 +103,6 @@ void NeuralnetworksHidlTest::SetUp() { } void NeuralnetworksHidlTest::TearDown() { - device = nullptr; ::testing::VtsHalHidlTargetTestBase::TearDown(); } @@ -128,10 +111,8 @@ void ValidationTest::validateEverything(const Model& model, const Request& reque // create IPreparedModel sp<IPreparedModel> preparedModel; - ASSERT_NO_FATAL_FAILURE(createPreparedModel(device, model, &preparedModel)); - if (preparedModel == nullptr) { - return; - } + createPreparedModel(device, model, &preparedModel); + if (preparedModel == nullptr) return; validateRequest(preparedModel, request); } @@ -145,12 +126,7 @@ TEST_P(ValidationTest, Test) { INSTANTIATE_GENERATED_TEST(ValidationTest, [](const test_helper::TestModel&) { return true; }); -} // namespace functional -} // namespace vts -} // namespace V1_0 -} // namespace neuralnetworks -} // namespace hardware -} // namespace android +} // namespace android::hardware::neuralnetworks::V1_0::vts::functional namespace android::hardware::neuralnetworks::V1_0 { diff --git a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.h b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.h index 9638a0e667..fa9ad3b3c1 100644 --- a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.h +++ b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.h @@ -30,20 +30,14 @@ #include "TestHarness.h" -namespace android { -namespace hardware { -namespace neuralnetworks { -namespace V1_0 { -namespace vts { -namespace functional { +namespace android::hardware::neuralnetworks::V1_0::vts::functional { // A class for test environment setup class NeuralnetworksHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase { DISALLOW_COPY_AND_ASSIGN(NeuralnetworksHidlEnvironment); - NeuralnetworksHidlEnvironment(); - ~NeuralnetworksHidlEnvironment() override; + NeuralnetworksHidlEnvironment() = default; - public: + public: static NeuralnetworksHidlEnvironment* getInstance(); void registerTestServices() override; }; @@ -52,22 +46,17 @@ class NeuralnetworksHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvB class NeuralnetworksHidlTest : public ::testing::VtsHalHidlTargetTestBase { DISALLOW_COPY_AND_ASSIGN(NeuralnetworksHidlTest); - public: - NeuralnetworksHidlTest(); - ~NeuralnetworksHidlTest() override; + public: + NeuralnetworksHidlTest() = default; void SetUp() override; void TearDown() override; - protected: - sp<IDevice> device; + protected: + const sp<IDevice> device = ::testing::VtsHalHidlTargetTestBase::getService<IDevice>( + NeuralnetworksHidlEnvironment::getInstance()); }; -} // namespace functional -} // namespace vts -} // namespace V1_0 -} // namespace neuralnetworks -} // namespace hardware -} // namespace android +} // namespace android::hardware::neuralnetworks::V1_0::vts::functional namespace android::hardware::neuralnetworks::V1_0 { diff --git a/neuralnetworks/1.0/vts/functional/include/1.0/Utils.h b/neuralnetworks/1.0/vts/functional/include/1.0/Utils.h index 2955b6e35c..274cb584ea 100644 --- a/neuralnetworks/1.0/vts/functional/include/1.0/Utils.h +++ b/neuralnetworks/1.0/vts/functional/include/1.0/Utils.h @@ -17,14 +17,13 @@ #ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_0_UTILS_H #define ANDROID_HARDWARE_NEURALNETWORKS_V1_0_UTILS_H +#include <android-base/logging.h> #include <android/hardware/neuralnetworks/1.0/types.h> #include <algorithm> #include <vector> #include "TestHarness.h" -namespace android { -namespace hardware { -namespace neuralnetworks { +namespace android::hardware::neuralnetworks { // Create HIDL Request from the TestModel struct. V1_0::Request createRequest(const ::test_helper::TestModel& testModel); @@ -37,23 +36,20 @@ std::vector<::test_helper::TestBuffer> getOutputBuffers(const V1_0::Request& req // resizing the hidl_vec to one less. template <typename Type> inline void hidl_vec_removeAt(hidl_vec<Type>* vec, uint32_t index) { - if (vec) { - std::rotate(vec->begin() + index, vec->begin() + index + 1, vec->end()); - vec->resize(vec->size() - 1); - } + CHECK(vec != nullptr); + std::rotate(vec->begin() + index, vec->begin() + index + 1, vec->end()); + vec->resize(vec->size() - 1); } template <typename Type> inline uint32_t hidl_vec_push_back(hidl_vec<Type>* vec, const Type& value) { - // assume vec is valid + CHECK(vec != nullptr); const uint32_t index = vec->size(); vec->resize(index + 1); (*vec)[index] = value; return index; } -} // namespace neuralnetworks -} // namespace hardware -} // namespace android +} // namespace android::hardware::neuralnetworks #endif // ANDROID_HARDWARE_NEURALNETWORKS_V1_0_UTILS_H |