summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--current.txt4
-rw-r--r--neuralnetworks/1.2/IDevice.hal11
-rw-r--r--neuralnetworks/1.2/types.hal33
-rw-r--r--neuralnetworks/1.2/vts/functional/BasicTests.cpp27
4 files changed, 72 insertions, 3 deletions
diff --git a/current.txt b/current.txt
index 23b019e8a..d68bde5fd 100644
--- a/current.txt
+++ b/current.txt
@@ -508,11 +508,11 @@ b9422a9aca84df1ff9623dc12c0562abce97716e28d63a965f2bfb88f9ad9607 android.hardwar
4cb139f729c29d8d6f4ecdab149c4feb571dad8a06e56cd57fcb52e70208bab4 android.hardware.media.c2@1.0::types
4880af120fc1640225abdc2c60bda6d79617d73484d5124913c7278af3b11e2d android.hardware.neuralnetworks@1.2::IBurstCallback
19877e466ad8c6ed42b38050b77bd010cf7800ff365fdc8574f45bbfda03a758 android.hardware.neuralnetworks@1.2::IBurstContext
-363821d1b71147b896a08e2a570946db9b9d46f90d9f91b085bd8d3013a2b4d5 android.hardware.neuralnetworks@1.2::IDevice
+b83317b66721241887d2770b5ae95fd5af1e77c5daa7530ecb08fae8892f2b43 android.hardware.neuralnetworks@1.2::IDevice
92714960d1a53fc2ec557302b41c7cc93d2636d8364a44bd0f85be0c92927ff8 android.hardware.neuralnetworks@1.2::IExecutionCallback
36e1064c869965dee533c537cefbe87e54db8bd8cd45be7e0e93e00e8a43863a android.hardware.neuralnetworks@1.2::IPreparedModel
e1c734d1545e1a4ae749ff1dd9704a8e594c59aea7c8363159dc258e93e0df3b android.hardware.neuralnetworks@1.2::IPreparedModelCallback
-39a6d7cf9bc7290bd90739e971ccad5f35f5cc0faea4a417b59f22c9ca9f1f2a android.hardware.neuralnetworks@1.2::types
+d734c2441b602da240fa0e9afe3b612cdc9f3ae9c1db13216f957861d0673c5e android.hardware.neuralnetworks@1.2::types
cf7a4ba516a638f9b82a249c91fb603042c2d9ca43fd5aad9cf6c0401ed2a5d7 android.hardware.nfc@1.2::INfc
abf98c2ae08bf765db54edc8068e36d52eb558cff6706b6fd7c18c65a1f3fc18 android.hardware.nfc@1.2::types
4cb252dc6372a874aef666b92a6e9529915aa187521a700f0789065c3c702ead android.hardware.power.stats@1.0::IPowerStats
diff --git a/neuralnetworks/1.2/IDevice.hal b/neuralnetworks/1.2/IDevice.hal
index da9a966ba..d83f9e675 100644
--- a/neuralnetworks/1.2/IDevice.hal
+++ b/neuralnetworks/1.2/IDevice.hal
@@ -76,6 +76,17 @@ interface IDevice extends @1.1::IDevice {
getType() generates (ErrorStatus status, DeviceType type);
/**
+ * Gets the capabilities of a driver.
+ *
+ * @return status Error status of the call, must be:
+ * - NONE if successful
+ * - DEVICE_UNAVAILABLE if driver is offline or busy
+ * - GENERAL_FAILURE if there is an unspecified error
+ * @return capabilities Capabilities of the driver.
+ */
+ getCapabilities_1_2() generates (ErrorStatus status, Capabilities capabilities);
+
+ /**
* Gets information about extensions supported by the driver implementation.
*
* All extension operations and operands must be fully supported for the
diff --git a/neuralnetworks/1.2/types.hal b/neuralnetworks/1.2/types.hal
index 28d01192d..7691642f7 100644
--- a/neuralnetworks/1.2/types.hal
+++ b/neuralnetworks/1.2/types.hal
@@ -4618,6 +4618,39 @@ enum DeviceType : int32_t {
};
/**
+ * The capabilities of a driver.
+ *
+ * Performance of an operation comes from the type of its first operand.
+ * This represents performance for non extension operand types.
+ */
+struct Capabilities {
+ /**
+ * Driver performance when operating on float32 data but performing
+ * calculations with range and/or precision as low as that of the IEEE
+ * 754 16-bit floating-point format.
+ */
+ PerformanceInfo relaxedFloat32toFloat16PerformanceScalar;
+ PerformanceInfo relaxedFloat32toFloat16PerformanceTensor;
+
+ /**
+ * Driver performance when operating on a particular data type.
+ * In the case of float32 data, this is used when the calculations
+ * are not relaxed.
+ */
+ struct OperandPerformance {
+ OperandType type;
+ PerformanceInfo info;
+ };
+
+ /**
+ * Performance by operand type. Must be sorted by OperandType.
+ * If a particular OperandType is not present in operandPerformance,
+ * its performance is treated as { .execTime = FLT_MAX, .powerUsage = FLT_MAX }.
+ */
+ vec<OperandPerformance> operandPerformance;
+};
+
+/**
* Describes one operation of the model's graph.
*/
struct Operation {
diff --git a/neuralnetworks/1.2/vts/functional/BasicTests.cpp b/neuralnetworks/1.2/vts/functional/BasicTests.cpp
index 6fb16c203..5c269df27 100644
--- a/neuralnetworks/1.2/vts/functional/BasicTests.cpp
+++ b/neuralnetworks/1.2/vts/functional/BasicTests.cpp
@@ -25,7 +25,7 @@ namespace V1_2 {
namespace vts {
namespace functional {
-using V1_1::Capabilities;
+using V1_0::PerformanceInfo;
// create device test
TEST_F(NeuralnetworksHidlTest, CreateDevice) {}
@@ -37,6 +37,31 @@ TEST_F(NeuralnetworksHidlTest, StatusTest) {
EXPECT_EQ(DeviceStatus::AVAILABLE, static_cast<DeviceStatus>(status));
}
+// initialization
+TEST_F(NeuralnetworksHidlTest, GetCapabilitiesTest) {
+ using OperandPerformance = Capabilities::OperandPerformance;
+ Return<void> ret = device->getCapabilities_1_2([](ErrorStatus status,
+ const Capabilities& capabilities) {
+ EXPECT_EQ(ErrorStatus::NONE, status);
+
+ auto isPositive = [](const PerformanceInfo& perf) {
+ return perf.execTime > 0.0f && perf.powerUsage > 0.0f;
+ };
+
+ EXPECT_TRUE(isPositive(capabilities.relaxedFloat32toFloat16PerformanceScalar));
+ EXPECT_TRUE(isPositive(capabilities.relaxedFloat32toFloat16PerformanceTensor));
+ const auto& opPerf = capabilities.operandPerformance;
+ EXPECT_TRUE(std::all_of(
+ opPerf.begin(), opPerf.end(),
+ [isPositive](const OperandPerformance& a) { return isPositive(a.info); }));
+ EXPECT_TRUE(std::is_sorted(opPerf.begin(), opPerf.end(),
+ [](const OperandPerformance& a, const OperandPerformance& b) {
+ return a.type < b.type;
+ }));
+ });
+ EXPECT_TRUE(ret.isOk());
+}
+
// device version test
TEST_F(NeuralnetworksHidlTest, GetDeviceVersionStringTest) {
Return<void> ret = device->getVersionString([](ErrorStatus status, const hidl_string& version) {