summaryrefslogtreecommitdiffstats
path: root/neuralnetworks
diff options
context:
space:
mode:
authorXusong Wang <xusongw@google.com>2020-02-25 11:43:10 -0800
committerXusong Wang <xusongw@google.com>2020-03-13 20:04:38 -0700
commit41adc5bc115d558cecfdb933d8dae7b17f707b20 (patch)
treeac9989dc8f30e69ba38516f51d8ae97ab5fe957f /neuralnetworks
parent140597c6a07b2ad87bef7314d551a204e3f7efc0 (diff)
downloadplatform_hardware_interfaces-41adc5bc115d558cecfdb933d8dae7b17f707b20.tar.gz
platform_hardware_interfaces-41adc5bc115d558cecfdb933d8dae7b17f707b20.tar.bz2
platform_hardware_interfaces-41adc5bc115d558cecfdb933d8dae7b17f707b20.zip
Add BLOB AHWB tests in VTS.
Bug: 149847930 Test: 1.3 VTS Change-Id: I9c795dcb7696c843afd12551927463c5529a4b60
Diffstat (limited to 'neuralnetworks')
-rw-r--r--neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp6
-rw-r--r--neuralnetworks/1.0/vts/functional/Utils.cpp91
-rw-r--r--neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp3
-rw-r--r--neuralnetworks/1.0/vts/functional/include/1.0/Utils.h74
-rw-r--r--neuralnetworks/1.1/vts/functional/GeneratedTestHarness.cpp6
-rw-r--r--neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.cpp3
-rw-r--r--neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp20
-rw-r--r--neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp3
-rw-r--r--neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp156
-rw-r--r--neuralnetworks/1.3/vts/functional/QualityOfServiceTests.cpp15
-rw-r--r--neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp3
11 files changed, 272 insertions, 108 deletions
diff --git a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
index e28605dca2..4ab228f85b 100644
--- a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
@@ -125,7 +125,9 @@ Model createModel(const TestModel& testModel) {
// Test driver for those generated from ml/nn/runtime/test/spec
void Execute(const sp<IDevice>& device, const TestModel& testModel) {
const Model model = createModel(testModel);
- const Request request = createRequest(testModel);
+
+ ExecutionContext context;
+ const Request request = context.createRequest(testModel);
// Create IPreparedModel.
sp<IPreparedModel> preparedModel;
@@ -143,7 +145,7 @@ void Execute(const sp<IDevice>& device, const TestModel& testModel) {
ASSERT_EQ(ErrorStatus::NONE, executionCallback->getStatus());
// Retrieve execution results.
- const std::vector<TestBuffer> outputs = getOutputBuffers(request);
+ const std::vector<TestBuffer> outputs = context.getOutputBuffers(request);
// We want "close-enough" results.
checkResults(testModel, outputs);
diff --git a/neuralnetworks/1.0/vts/functional/Utils.cpp b/neuralnetworks/1.0/vts/functional/Utils.cpp
index 0dba85acd9..3613e69088 100644
--- a/neuralnetworks/1.0/vts/functional/Utils.cpp
+++ b/neuralnetworks/1.0/vts/functional/Utils.cpp
@@ -21,10 +21,13 @@
#include <android-base/logging.h>
#include <android/hardware/neuralnetworks/1.0/types.h>
+#include <android/hardware_buffer.h>
#include <android/hidl/allocator/1.0/IAllocator.h>
#include <android/hidl/memory/1.0/IMemory.h>
#include <hidlmemory/mapping.h>
+#include <vndk/hardware_buffer.h>
+#include <gtest/gtest.h>
#include <algorithm>
#include <iostream>
#include <vector>
@@ -37,10 +40,64 @@ using V1_0::DataLocation;
using V1_0::Request;
using V1_0::RequestArgument;
-constexpr uint32_t kInputPoolIndex = 0;
-constexpr uint32_t kOutputPoolIndex = 1;
+std::unique_ptr<TestAshmem> TestAshmem::create(uint32_t size) {
+ auto ashmem = std::make_unique<TestAshmem>(size);
+ return ashmem->mIsValid ? std::move(ashmem) : nullptr;
+}
+
+void TestAshmem::initialize(uint32_t size) {
+ mIsValid = false;
+ ASSERT_GT(size, 0);
+ mHidlMemory = nn::allocateSharedMemory(size);
+ ASSERT_TRUE(mHidlMemory.valid());
+ mMappedMemory = mapMemory(mHidlMemory);
+ ASSERT_NE(mMappedMemory, nullptr);
+ mPtr = static_cast<uint8_t*>(static_cast<void*>(mMappedMemory->getPointer()));
+ ASSERT_NE(mPtr, nullptr);
+ mIsValid = true;
+}
+
+std::unique_ptr<TestBlobAHWB> TestBlobAHWB::create(uint32_t size) {
+ auto ahwb = std::make_unique<TestBlobAHWB>(size);
+ return ahwb->mIsValid ? std::move(ahwb) : nullptr;
+}
+
+void TestBlobAHWB::initialize(uint32_t size) {
+ mIsValid = false;
+ ASSERT_GT(size, 0);
+ const auto usage = AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN | AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN;
+ const AHardwareBuffer_Desc desc = {
+ .width = size,
+ .height = 1,
+ .layers = 1,
+ .format = AHARDWAREBUFFER_FORMAT_BLOB,
+ .usage = usage,
+ .stride = size,
+ };
+ ASSERT_EQ(AHardwareBuffer_allocate(&desc, &mAhwb), 0);
+ ASSERT_NE(mAhwb, nullptr);
+
+ void* buffer = nullptr;
+ ASSERT_EQ(AHardwareBuffer_lock(mAhwb, usage, -1, nullptr, &buffer), 0);
+ ASSERT_NE(buffer, nullptr);
+ mPtr = static_cast<uint8_t*>(buffer);
+
+ const native_handle_t* handle = AHardwareBuffer_getNativeHandle(mAhwb);
+ ASSERT_NE(handle, nullptr);
+ mHidlMemory = hidl_memory("hardware_buffer_blob", handle, desc.width);
+ mIsValid = true;
+}
+
+TestBlobAHWB::~TestBlobAHWB() {
+ if (mAhwb) {
+ AHardwareBuffer_unlock(mAhwb, nullptr);
+ AHardwareBuffer_release(mAhwb);
+ }
+}
+
+Request ExecutionContext::createRequest(const TestModel& testModel, MemoryType memoryType) {
+ CHECK(memoryType == MemoryType::ASHMEM || memoryType == MemoryType::BLOB_AHWB);
-Request createRequest(const TestModel& testModel) {
// Model inputs.
hidl_vec<RequestArgument> inputs(testModel.main.inputIndexes.size());
size_t inputSize = 0;
@@ -80,16 +137,19 @@ Request createRequest(const TestModel& testModel) {
}
// Allocate memory pools.
- hidl_vec<hidl_memory> pools = {nn::allocateSharedMemory(inputSize),
- nn::allocateSharedMemory(outputSize)};
- CHECK_NE(pools[kInputPoolIndex].size(), 0u);
- CHECK_NE(pools[kOutputPoolIndex].size(), 0u);
- sp<IMemory> inputMemory = mapMemory(pools[kInputPoolIndex]);
- CHECK(inputMemory.get() != nullptr);
- uint8_t* inputPtr = static_cast<uint8_t*>(static_cast<void*>(inputMemory->getPointer()));
- CHECK(inputPtr != nullptr);
+ if (memoryType == MemoryType::ASHMEM) {
+ mInputMemory = TestAshmem::create(inputSize);
+ mOutputMemory = TestAshmem::create(outputSize);
+ } else {
+ mInputMemory = TestBlobAHWB::create(inputSize);
+ mOutputMemory = TestBlobAHWB::create(outputSize);
+ }
+ EXPECT_NE(mInputMemory, nullptr);
+ EXPECT_NE(mOutputMemory, nullptr);
+ hidl_vec<hidl_memory> pools = {mInputMemory->getHidlMemory(), mOutputMemory->getHidlMemory()};
// Copy input data to the memory pool.
+ uint8_t* inputPtr = mInputMemory->getPointer();
for (uint32_t i = 0; i < testModel.main.inputIndexes.size(); i++) {
const auto& op = testModel.main.operands[testModel.main.inputIndexes[i]];
if (op.data.size() > 0) {
@@ -102,18 +162,13 @@ Request createRequest(const TestModel& testModel) {
return {.inputs = std::move(inputs), .outputs = std::move(outputs), .pools = std::move(pools)};
}
-std::vector<TestBuffer> getOutputBuffers(const Request& request) {
- sp<IMemory> outputMemory = mapMemory(request.pools[kOutputPoolIndex]);
- CHECK(outputMemory.get() != nullptr);
- uint8_t* outputPtr = static_cast<uint8_t*>(static_cast<void*>(outputMemory->getPointer()));
- CHECK(outputPtr != nullptr);
-
+std::vector<TestBuffer> ExecutionContext::getOutputBuffers(const Request& request) const {
// Copy out output results.
+ uint8_t* outputPtr = mOutputMemory->getPointer();
std::vector<TestBuffer> outputBuffers;
for (const auto& output : request.outputs) {
outputBuffers.emplace_back(output.location.length, outputPtr + output.location.offset);
}
-
return outputBuffers;
}
diff --git a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp
index cb2225025b..7f7dac056b 100644
--- a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp
+++ b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp
@@ -129,7 +129,8 @@ void validateEverything(const sp<IDevice>& device, const Model& model, const Req
TEST_P(ValidationTest, Test) {
const Model model = createModel(kTestModel);
- const Request request = createRequest(kTestModel);
+ ExecutionContext context;
+ const Request request = context.createRequest(kTestModel);
ASSERT_FALSE(kTestModel.expectFailure);
validateEverything(kDevice, model, request);
}
diff --git a/neuralnetworks/1.0/vts/functional/include/1.0/Utils.h b/neuralnetworks/1.0/vts/functional/include/1.0/Utils.h
index 6d4534cb4e..3292f79b1a 100644
--- a/neuralnetworks/1.0/vts/functional/include/1.0/Utils.h
+++ b/neuralnetworks/1.0/vts/functional/include/1.0/Utils.h
@@ -19,6 +19,8 @@
#include <android-base/logging.h>
#include <android/hardware/neuralnetworks/1.0/types.h>
+#include <android/hardware_buffer.h>
+#include <android/hidl/memory/1.0/IMemory.h>
#include <algorithm>
#include <iosfwd>
#include <string>
@@ -28,11 +30,73 @@
namespace android::hardware::neuralnetworks {
-// Create HIDL Request from the TestModel struct.
-V1_0::Request createRequest(const test_helper::TestModel& testModel);
-
-// After execution, copy out output results from the output memory pool.
-std::vector<::test_helper::TestBuffer> getOutputBuffers(const V1_0::Request& request);
+// Convenience class to manage the lifetime of memory resources.
+class TestMemoryBase {
+ DISALLOW_COPY_AND_ASSIGN(TestMemoryBase);
+
+ public:
+ TestMemoryBase() = default;
+ virtual ~TestMemoryBase() = default;
+ uint8_t* getPointer() const { return mPtr; }
+ hidl_memory getHidlMemory() const { return mHidlMemory; }
+
+ protected:
+ uint8_t* mPtr = nullptr;
+ hidl_memory mHidlMemory;
+ bool mIsValid = false;
+};
+
+class TestAshmem : public TestMemoryBase {
+ public:
+ static std::unique_ptr<TestAshmem> create(uint32_t size);
+
+ // Prefer TestAshmem::create.
+ // The constructor calls initialize, which constructs the memory resources. This is a workaround
+ // that gtest macros cannot be used directly in a constructor.
+ TestAshmem(uint32_t size) { initialize(size); }
+
+ private:
+ void initialize(uint32_t size);
+ sp<hidl::memory::V1_0::IMemory> mMappedMemory;
+};
+
+class TestBlobAHWB : public TestMemoryBase {
+ public:
+ static std::unique_ptr<TestBlobAHWB> create(uint32_t size);
+
+ // Prefer TestBlobAHWB::create.
+ // The constructor calls initialize, which constructs the memory resources. This is a
+ // workaround that gtest macros cannot be used directly in a constructor.
+ TestBlobAHWB(uint32_t size) { initialize(size); }
+ ~TestBlobAHWB();
+
+ private:
+ void initialize(uint32_t size);
+ AHardwareBuffer* mAhwb = nullptr;
+};
+
+enum class MemoryType { ASHMEM, BLOB_AHWB, DEVICE };
+
+// Manages the lifetime of memory resources used in an execution.
+class ExecutionContext {
+ DISALLOW_COPY_AND_ASSIGN(ExecutionContext);
+
+ public:
+ static constexpr uint32_t kInputPoolIndex = 0;
+ static constexpr uint32_t kOutputPoolIndex = 1;
+
+ ExecutionContext() = default;
+
+ // Create HIDL Request from the TestModel struct.
+ V1_0::Request createRequest(const test_helper::TestModel& testModel,
+ MemoryType memoryType = MemoryType::ASHMEM);
+
+ // After execution, copy out output results from the output memory pool.
+ std::vector<test_helper::TestBuffer> getOutputBuffers(const V1_0::Request& request) const;
+
+ private:
+ std::unique_ptr<TestMemoryBase> mInputMemory, mOutputMemory;
+};
// Delete element from hidl_vec. hidl_vec doesn't support a "remove" operation,
// so this is efficiently accomplished by moving the element to the end and
diff --git a/neuralnetworks/1.1/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.1/vts/functional/GeneratedTestHarness.cpp
index cee15a35a1..14d300db71 100644
--- a/neuralnetworks/1.1/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.1/vts/functional/GeneratedTestHarness.cpp
@@ -133,7 +133,9 @@ Model createModel(const TestModel& testModel) {
// Test driver for those generated from ml/nn/runtime/test/spec
void Execute(const sp<IDevice>& device, const TestModel& testModel) {
const Model model = createModel(testModel);
- const Request request = createRequest(testModel);
+
+ ExecutionContext context;
+ const Request request = context.createRequest(testModel);
// Create IPreparedModel.
sp<IPreparedModel> preparedModel;
@@ -151,7 +153,7 @@ void Execute(const sp<IDevice>& device, const TestModel& testModel) {
ASSERT_EQ(ErrorStatus::NONE, executionCallback->getStatus());
// Retrieve execution results.
- const std::vector<TestBuffer> outputs = getOutputBuffers(request);
+ const std::vector<TestBuffer> outputs = context.getOutputBuffers(request);
// We want "close-enough" results.
checkResults(testModel, outputs);
diff --git a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.cpp
index d56d40b2ba..04af6ec704 100644
--- a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.cpp
+++ b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.cpp
@@ -132,7 +132,8 @@ void validateEverything(const sp<IDevice>& device, const Model& model, const Req
TEST_P(ValidationTest, Test) {
const Model model = createModel(kTestModel);
- const Request request = createRequest(kTestModel);
+ ExecutionContext context;
+ const Request request = context.createRequest(kTestModel);
ASSERT_FALSE(kTestModel.expectFailure);
validateEverything(kDevice, model, request);
}
diff --git a/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp
index 3ab01351e9..aaaafc7b72 100644
--- a/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp
@@ -68,6 +68,7 @@ struct TestConfig {
Executor executor;
MeasureTiming measureTiming;
OutputType outputType;
+ MemoryType memoryType;
};
} // namespace
@@ -216,7 +217,8 @@ void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestMo
return;
}
- Request request = createRequest(testModel);
+ ExecutionContext context;
+ Request request = context.createRequest(testModel, testConfig.memoryType);
if (testConfig.outputType == OutputType::INSUFFICIENT) {
makeOutputInsufficientSize(/*outputIndex=*/0, &request);
}
@@ -326,7 +328,7 @@ void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestMo
}
// Retrieve execution results.
- const std::vector<TestBuffer> outputs = getOutputBuffers(request);
+ const std::vector<TestBuffer> outputs = context.getOutputBuffers(request);
// We want "close-enough" results.
checkResults(testModel, outputs);
@@ -337,24 +339,30 @@ void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestMo
std::vector<OutputType> outputTypesList;
std::vector<MeasureTiming> measureTimingList;
std::vector<Executor> executorList;
+ std::vector<MemoryType> memoryTypeList;
if (testDynamicOutputShape) {
outputTypesList = {OutputType::UNSPECIFIED, OutputType::INSUFFICIENT};
measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST};
+ memoryTypeList = {MemoryType::ASHMEM};
} else {
outputTypesList = {OutputType::FULLY_SPECIFIED};
measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST};
+ memoryTypeList = {MemoryType::ASHMEM, MemoryType::BLOB_AHWB};
}
for (const OutputType outputType : outputTypesList) {
for (const MeasureTiming measureTiming : measureTimingList) {
for (const Executor executor : executorList) {
- const TestConfig testConfig = {.executor = executor,
- .measureTiming = measureTiming,
- .outputType = outputType};
- EvaluatePreparedModel(preparedModel, testModel, testConfig);
+ for (const MemoryType memoryType : memoryTypeList) {
+ const TestConfig testConfig = {.executor = executor,
+ .measureTiming = measureTiming,
+ .outputType = outputType,
+ .memoryType = memoryType};
+ EvaluatePreparedModel(preparedModel, testModel, testConfig);
+ }
}
}
}
diff --git a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp
index 4fbd0e270f..5853fa49f6 100644
--- a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp
+++ b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp
@@ -153,7 +153,8 @@ void validateFailure(const sp<IDevice>& device, const Model& model, const Reques
TEST_P(ValidationTest, Test) {
const Model model = createModel(kTestModel);
- const Request request = createRequest(kTestModel);
+ ExecutionContext context;
+ const Request request = context.createRequest(kTestModel);
if (kTestModel.expectFailure) {
validateFailure(kDevice, model, request);
} else {
diff --git a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp
index aae58bfb3e..45e0f827c0 100644
--- a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp
@@ -76,8 +76,6 @@ enum class Executor { ASYNC, SYNC, BURST, FENCED };
enum class OutputType { FULLY_SPECIFIED, UNSPECIFIED, INSUFFICIENT, MISSED_DEADLINE };
-enum class MemoryType { SHARED, DEVICE };
-
enum class IOType { INPUT, OUTPUT };
static void waitForSyncFence(int syncFd) {
@@ -338,21 +336,39 @@ static void makeOutputDimensionsUnspecified(Model* model) {
}
}
-constexpr uint32_t kInputPoolIndex = 0;
-constexpr uint32_t kOutputPoolIndex = 1;
-constexpr uint32_t kDeviceMemoryBeginIndex = 2;
+class ExecutionContextV1_3 {
+ public:
+ ExecutionContextV1_3(sp<IDevice> device, sp<IPreparedModel> preparedModel)
+ : kDevice(std::move(device)), kPreparedModel(std::move(preparedModel)) {}
+
+ std::optional<Request> createRequest(const TestModel& testModel, MemoryType memoryType);
+ std::vector<TestBuffer> getOutputBuffers(const TestModel& testModel,
+ const Request& request) const;
-static std::pair<Request, std::vector<sp<IBuffer>>> createRequest(
- const sp<IDevice>& device, const sp<IPreparedModel>& preparedModel,
- const TestModel& testModel, bool preferDeviceMemory) {
+ private:
+ // Get a TestBuffer with data copied from an IBuffer object.
+ void getBuffer(const sp<IBuffer>& buffer, size_t size, TestBuffer* testBuffer) const;
+
+ static constexpr uint32_t kInputPoolIndex = 0;
+ static constexpr uint32_t kOutputPoolIndex = 1;
+ static constexpr uint32_t kDeviceMemoryBeginIndex = 2;
+
+ const sp<IDevice> kDevice;
+ const sp<IPreparedModel> kPreparedModel;
+ std::unique_ptr<TestMemoryBase> mInputMemory, mOutputMemory;
+ std::vector<sp<IBuffer>> mBuffers;
+};
+
+std::optional<Request> ExecutionContextV1_3::createRequest(const TestModel& testModel,
+ MemoryType memoryType) {
// Memory pools are organized as:
// - 0: Input shared memory pool
// - 1: Output shared memory pool
// - [2, 2+i): Input device memories
// - [2+i, 2+i+o): Output device memories
- DeviceMemoryAllocator allocator(device, preparedModel, testModel);
- std::vector<sp<IBuffer>> buffers;
+ DeviceMemoryAllocator allocator(kDevice, kPreparedModel, testModel);
std::vector<uint32_t> tokens;
+ mBuffers.clear();
// Model inputs.
hidl_vec<RequestArgument> inputs(testModel.main.inputIndexes.size());
@@ -363,13 +379,13 @@ static std::pair<Request, std::vector<sp<IBuffer>>> createRequest(
// Omitted input.
inputs[i] = {.hasNoValue = true};
continue;
- } else if (preferDeviceMemory) {
+ } else if (memoryType == MemoryType::DEVICE) {
SCOPED_TRACE("Input index = " + std::to_string(i));
auto [buffer, token] = allocator.allocate<IOType::INPUT>(i);
if (buffer != nullptr) {
- DataLocation loc = {.poolIndex = static_cast<uint32_t>(buffers.size() +
+ DataLocation loc = {.poolIndex = static_cast<uint32_t>(mBuffers.size() +
kDeviceMemoryBeginIndex)};
- buffers.push_back(std::move(buffer));
+ mBuffers.push_back(std::move(buffer));
tokens.push_back(token);
inputs[i] = {.hasNoValue = false, .location = loc, .dimensions = {}};
continue;
@@ -389,13 +405,13 @@ static std::pair<Request, std::vector<sp<IBuffer>>> createRequest(
size_t outputSize = 0;
for (uint32_t i = 0; i < testModel.main.outputIndexes.size(); i++) {
const auto& op = testModel.main.operands[testModel.main.outputIndexes[i]];
- if (preferDeviceMemory) {
+ if (memoryType == MemoryType::DEVICE) {
SCOPED_TRACE("Output index = " + std::to_string(i));
auto [buffer, token] = allocator.allocate<IOType::OUTPUT>(i);
if (buffer != nullptr) {
- DataLocation loc = {.poolIndex = static_cast<uint32_t>(buffers.size() +
+ DataLocation loc = {.poolIndex = static_cast<uint32_t>(mBuffers.size() +
kDeviceMemoryBeginIndex)};
- buffers.push_back(std::move(buffer));
+ mBuffers.push_back(std::move(buffer));
tokens.push_back(token);
outputs[i] = {.hasNoValue = false, .location = loc, .dimensions = {}};
continue;
@@ -418,21 +434,29 @@ static std::pair<Request, std::vector<sp<IBuffer>>> createRequest(
outputs[i] = {.hasNoValue = false, .location = loc, .dimensions = {}};
}
+ if (memoryType == MemoryType::DEVICE && mBuffers.empty()) {
+ return std::nullopt;
+ }
+
// Memory pools.
- hidl_vec<Request::MemoryPool> pools(kDeviceMemoryBeginIndex + buffers.size());
- pools[kInputPoolIndex].hidlMemory(nn::allocateSharedMemory(std::max<size_t>(inputSize, 1)));
- pools[kOutputPoolIndex].hidlMemory(nn::allocateSharedMemory(std::max<size_t>(outputSize, 1)));
- CHECK_NE(pools[kInputPoolIndex].hidlMemory().size(), 0u);
- CHECK_NE(pools[kOutputPoolIndex].hidlMemory().size(), 0u);
- for (uint32_t i = 0; i < buffers.size(); i++) {
+ hidl_vec<Request::MemoryPool> pools(kDeviceMemoryBeginIndex + mBuffers.size());
+ if (memoryType == MemoryType::BLOB_AHWB) {
+ mInputMemory = TestBlobAHWB::create(std::max<size_t>(inputSize, 1));
+ mOutputMemory = TestBlobAHWB::create(std::max<size_t>(outputSize, 1));
+ } else {
+ mInputMemory = TestAshmem::create(std::max<size_t>(inputSize, 1));
+ mOutputMemory = TestAshmem::create(std::max<size_t>(outputSize, 1));
+ }
+ EXPECT_NE(mInputMemory, nullptr);
+ EXPECT_NE(mOutputMemory, nullptr);
+ pools[kInputPoolIndex].hidlMemory(mInputMemory->getHidlMemory());
+ pools[kOutputPoolIndex].hidlMemory(mOutputMemory->getHidlMemory());
+ for (uint32_t i = 0; i < mBuffers.size(); i++) {
pools[kDeviceMemoryBeginIndex + i].token(tokens[i]);
}
// Copy input data to the input shared memory pool.
- sp<IMemory> inputMemory = mapMemory(pools[kInputPoolIndex].hidlMemory());
- CHECK(inputMemory.get() != nullptr);
- uint8_t* inputPtr = static_cast<uint8_t*>(static_cast<void*>(inputMemory->getPointer()));
- CHECK(inputPtr != nullptr);
+ uint8_t* inputPtr = mInputMemory->getPointer();
for (uint32_t i = 0; i < testModel.main.inputIndexes.size(); i++) {
if (!inputs[i].hasNoValue && inputs[i].location.poolIndex == kInputPoolIndex) {
const auto& op = testModel.main.operands[testModel.main.inputIndexes[i]];
@@ -441,37 +465,14 @@ static std::pair<Request, std::vector<sp<IBuffer>>> createRequest(
std::copy(begin, end, inputPtr + inputs[i].location.offset);
}
}
-
- Request request = {
+ return Request{
.inputs = std::move(inputs), .outputs = std::move(outputs), .pools = std::move(pools)};
- return {std::move(request), std::move(buffers)};
}
-// Get a TestBuffer with data copied from an IBuffer object.
-static void getBuffer(const sp<IBuffer>& buffer, size_t size, TestBuffer* testBuffer) {
- // IBuffer -> Shared memory.
- hidl_memory tmp = nn::allocateSharedMemory(size);
- const auto ret = buffer->copyTo(tmp);
- ASSERT_TRUE(ret.isOk());
- ASSERT_EQ(static_cast<ErrorStatus>(ret), ErrorStatus::NONE);
-
- // Shared memory -> TestBuffer.
- sp<IMemory> outputMemory = mapMemory(tmp);
- ASSERT_NE(outputMemory.get(), nullptr);
- uint8_t* outputPtr = static_cast<uint8_t*>(static_cast<void*>(outputMemory->getPointer()));
- ASSERT_NE(outputPtr, nullptr);
- ASSERT_NE(testBuffer, nullptr);
- *testBuffer = TestBuffer(size, outputPtr);
-}
-
-static std::vector<TestBuffer> getOutputBuffers(const TestModel& testModel, const Request& request,
- const std::vector<sp<IBuffer>>& buffers) {
- sp<IMemory> outputMemory = mapMemory(request.pools[kOutputPoolIndex].hidlMemory());
- CHECK(outputMemory.get() != nullptr);
- uint8_t* outputPtr = static_cast<uint8_t*>(static_cast<void*>(outputMemory->getPointer()));
- CHECK(outputPtr != nullptr);
-
+std::vector<TestBuffer> ExecutionContextV1_3::getOutputBuffers(const TestModel& testModel,
+ const Request& request) const {
// Copy out output results.
+ uint8_t* outputPtr = mOutputMemory->getPointer();
std::vector<TestBuffer> outputBuffers;
for (uint32_t i = 0; i < request.outputs.size(); i++) {
const auto& outputLoc = request.outputs[i].location;
@@ -480,12 +481,12 @@ static std::vector<TestBuffer> getOutputBuffers(const TestModel& testModel, cons
} else {
const auto& op = testModel.main.operands[testModel.main.outputIndexes[i]];
if (op.data.size() == 0) {
- outputBuffers.emplace_back();
+ outputBuffers.emplace_back(0, nullptr);
} else {
SCOPED_TRACE("Output index = " + std::to_string(i));
const uint32_t bufferIndex = outputLoc.poolIndex - kDeviceMemoryBeginIndex;
TestBuffer buffer;
- getBuffer(buffers[bufferIndex], op.data.size(), &buffer);
+ getBuffer(mBuffers[bufferIndex], op.data.size(), &buffer);
outputBuffers.push_back(std::move(buffer));
}
}
@@ -493,6 +494,24 @@ static std::vector<TestBuffer> getOutputBuffers(const TestModel& testModel, cons
return outputBuffers;
}
+// Get a TestBuffer with data copied from an IBuffer object.
+void ExecutionContextV1_3::getBuffer(const sp<IBuffer>& buffer, size_t size,
+ TestBuffer* testBuffer) const {
+ // IBuffer -> Shared memory.
+ hidl_memory tmp = nn::allocateSharedMemory(size);
+ const auto ret = buffer->copyTo(tmp);
+ ASSERT_TRUE(ret.isOk());
+ ASSERT_EQ(static_cast<ErrorStatus>(ret), ErrorStatus::NONE);
+
+ // Shared memory -> TestBuffer.
+ sp<IMemory> outputMemory = mapMemory(tmp);
+ ASSERT_NE(outputMemory.get(), nullptr);
+ uint8_t* outputPtr = static_cast<uint8_t*>(static_cast<void*>(outputMemory->getPointer()));
+ ASSERT_NE(outputPtr, nullptr);
+ ASSERT_NE(testBuffer, nullptr);
+ *testBuffer = TestBuffer(size, outputPtr);
+}
+
static bool hasZeroSizedOutput(const TestModel& testModel) {
return std::any_of(testModel.main.outputIndexes.begin(), testModel.main.outputIndexes.end(),
[&testModel](uint32_t index) {
@@ -543,13 +562,14 @@ void EvaluatePreparedModel(const sp<IDevice>& device, const sp<IPreparedModel>&
return;
}
- auto [request, buffers] =
- createRequest(device, preparedModel, testModel,
- /*preferDeviceMemory=*/testConfig.memoryType == MemoryType::DEVICE);
+ ExecutionContextV1_3 context(device, preparedModel);
+ auto maybeRequest = context.createRequest(testModel, testConfig.memoryType);
// Skip if testing memory domain but no device memory has been allocated.
- if (testConfig.memoryType == MemoryType::DEVICE && buffers.empty()) {
+ if (!maybeRequest.has_value()) {
return;
}
+
+ Request request = std::move(maybeRequest.value());
if (testConfig.outputType == OutputType::INSUFFICIENT) {
makeOutputInsufficientSize(/*outputIndex=*/0, &request);
}
@@ -744,7 +764,7 @@ void EvaluatePreparedModel(const sp<IDevice>& device, const sp<IPreparedModel>&
}
// Retrieve execution results.
- const std::vector<TestBuffer> outputs = getOutputBuffers(testModel, request, buffers);
+ const std::vector<TestBuffer> outputs = context.getOutputBuffers(testModel, request);
// We want "close-enough" results.
checkResults(testModel, outputs);
@@ -755,29 +775,32 @@ void EvaluatePreparedModel(const sp<IDevice>& device, const sp<IPreparedModel>&
std::vector<OutputType> outputTypesList;
std::vector<MeasureTiming> measureTimingList;
std::vector<Executor> executorList;
- MemoryType memoryType = MemoryType::SHARED;
+ std::vector<MemoryType> memoryTypeList;
switch (testKind) {
case TestKind::GENERAL: {
outputTypesList = {OutputType::FULLY_SPECIFIED};
measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST};
+ memoryTypeList = {MemoryType::ASHMEM};
} break;
case TestKind::DYNAMIC_SHAPE: {
outputTypesList = {OutputType::UNSPECIFIED, OutputType::INSUFFICIENT};
measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST, Executor::FENCED};
+ memoryTypeList = {MemoryType::ASHMEM};
} break;
case TestKind::MEMORY_DOMAIN: {
outputTypesList = {OutputType::FULLY_SPECIFIED};
measureTimingList = {MeasureTiming::NO};
executorList = {Executor::ASYNC, Executor::SYNC, Executor::FENCED};
- memoryType = MemoryType::DEVICE;
+ memoryTypeList = {MemoryType::BLOB_AHWB, MemoryType::DEVICE};
} break;
case TestKind::FENCED_COMPUTE: {
outputTypesList = {OutputType::FULLY_SPECIFIED};
measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
executorList = {Executor::FENCED};
+ memoryTypeList = {MemoryType::ASHMEM};
} break;
case TestKind::QUANTIZATION_COUPLING: {
LOG(FATAL) << "Wrong TestKind for EvaluatePreparedModel";
@@ -788,14 +811,17 @@ void EvaluatePreparedModel(const sp<IDevice>& device, const sp<IPreparedModel>&
measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
// Burst does not support V1_3 loop timeout.
executorList = {Executor::ASYNC, Executor::SYNC, Executor::FENCED};
+ memoryTypeList = {MemoryType::ASHMEM};
} break;
}
for (const OutputType outputType : outputTypesList) {
for (const MeasureTiming measureTiming : measureTimingList) {
for (const Executor executor : executorList) {
- const TestConfig testConfig(executor, measureTiming, outputType, memoryType);
- EvaluatePreparedModel(device, preparedModel, testModel, testConfig);
+ for (const MemoryType memoryType : memoryTypeList) {
+ const TestConfig testConfig(executor, measureTiming, outputType, memoryType);
+ EvaluatePreparedModel(device, preparedModel, testModel, testConfig);
+ }
}
}
}
@@ -814,7 +840,7 @@ void EvaluatePreparedCoupledModels(const sp<IDevice>& device,
for (const OutputType outputType : outputTypesList) {
for (const MeasureTiming measureTiming : measureTimingList) {
for (const Executor executor : executorList) {
- const TestConfig testConfig(executor, measureTiming, outputType, MemoryType::SHARED,
+ const TestConfig testConfig(executor, measureTiming, outputType, MemoryType::ASHMEM,
/*reportSkipping=*/false);
bool baseSkipped = false;
EvaluatePreparedModel(device, preparedModel, testModel, testConfig, &baseSkipped);
diff --git a/neuralnetworks/1.3/vts/functional/QualityOfServiceTests.cpp b/neuralnetworks/1.3/vts/functional/QualityOfServiceTests.cpp
index 879989ea2a..2ef1e8f6bd 100644
--- a/neuralnetworks/1.3/vts/functional/QualityOfServiceTests.cpp
+++ b/neuralnetworks/1.3/vts/functional/QualityOfServiceTests.cpp
@@ -214,7 +214,8 @@ static MaybeResults executeSynchronously(const sp<IPreparedModel>& preparedModel
}
void runExecutionTest(const sp<IPreparedModel>& preparedModel, const TestModel& testModel,
- const Request& request, bool synchronous, DeadlineBoundType deadlineBound) {
+ const Request& request, const ExecutionContext& context, bool synchronous,
+ DeadlineBoundType deadlineBound) {
const ExecutionFunction execute = synchronous ? executeSynchronously : executeAsynchronously;
const auto deadline = makeDeadline(deadlineBound);
@@ -261,7 +262,7 @@ void runExecutionTest(const sp<IPreparedModel>& preparedModel, const TestModel&
// Retrieve execution results.
ASSERT_TRUE(nn::compliantWithV1_0(request));
const V1_0::Request request10 = nn::convertToV1_0(request);
- const std::vector<TestBuffer> outputs = getOutputBuffers(request10);
+ const std::vector<TestBuffer> outputs = context.getOutputBuffers(request10);
// We want "close-enough" results.
if (status == ErrorStatus::NONE) {
@@ -270,10 +271,11 @@ void runExecutionTest(const sp<IPreparedModel>& preparedModel, const TestModel&
}
void runExecutionTests(const sp<IPreparedModel>& preparedModel, const TestModel& testModel,
- const Request& request) {
+ const Request& request, const ExecutionContext& context) {
for (bool synchronous : {false, true}) {
for (auto deadlineBound : deadlineBounds) {
- runExecutionTest(preparedModel, testModel, request, synchronous, deadlineBound);
+ runExecutionTest(preparedModel, testModel, request, context, synchronous,
+ deadlineBound);
}
}
}
@@ -291,8 +293,9 @@ void runTests(const sp<IDevice>& device, const TestModel& testModel) {
if (preparedModel == nullptr) return;
// run execution tests
- const Request request = nn::convertToV1_3(createRequest(testModel));
- runExecutionTests(preparedModel, testModel, request);
+ ExecutionContext context;
+ const Request request = nn::convertToV1_3(context.createRequest(testModel));
+ runExecutionTests(preparedModel, testModel, request, context);
}
class DeadlineTest : public GeneratedTestBase {};
diff --git a/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp
index 5b07034296..703a2f5345 100644
--- a/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp
+++ b/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp
@@ -177,7 +177,8 @@ void validateFailure(const sp<IDevice>& device, const Model& model, const Reques
TEST_P(ValidationTest, Test) {
const Model model = createModel(kTestModel);
- const Request request = nn::convertToV1_3(createRequest(kTestModel));
+ ExecutionContext context;
+ const Request request = nn::convertToV1_3(context.createRequest(kTestModel));
if (kTestModel.expectFailure) {
validateFailure(kDevice, model, request);
} else {