summaryrefslogtreecommitdiffstats
path: root/audio
diff options
context:
space:
mode:
authorMikhail Naganov <mnaganov@google.com>2020-08-04 23:37:05 +0000
committerMikhail Naganov <mnaganov@google.com>2020-11-20 01:41:38 +0000
commitfda20429b40bcdb8eba6cfa8a1d150c5d3025fb7 (patch)
treee3dacfaa34d56941af2833f186be209c6663963e /audio
parentd47c62b62af53ddbd1f70e520162d843e0e43356 (diff)
downloadplatform_hardware_interfaces-fda20429b40bcdb8eba6cfa8a1d150c5d3025fb7.tar.gz
platform_hardware_interfaces-fda20429b40bcdb8eba6cfa8a1d150c5d3025fb7.tar.bz2
platform_hardware_interfaces-fda20429b40bcdb8eba6cfa8a1d150c5d3025fb7.zip
Audio: Rearrange types in V7
Update channel mask and SR lists to conform to XSD. Added a script for converting existing APM XML files. Bug: 142480271 Test: m Change-Id: I986b9bccdade5fa850b06b033143388715a656af Merged-In: I986b9bccdade5fa850b06b033143388715a656af
Diffstat (limited to 'audio')
-rw-r--r--audio/7.0/IDevice.hal1
-rw-r--r--audio/7.0/IStream.hal68
-rw-r--r--audio/7.0/IStreamIn.hal8
-rw-r--r--audio/7.0/IStreamOut.hal8
-rw-r--r--audio/7.0/config/api/current.txt151
-rw-r--r--audio/7.0/config/audio_policy_configuration.xsd168
-rwxr-xr-xaudio/7.0/config/update_audio_policy_config.sh159
-rw-r--r--audio/7.0/types.hal55
-rw-r--r--audio/common/7.0/types.hal1032
-rw-r--r--audio/effect/7.0/IEffect.hal8
-rw-r--r--audio/effect/7.0/IVirtualizerEffect.hal10
-rw-r--r--audio/effect/7.0/types.hal6
12 files changed, 642 insertions, 1032 deletions
diff --git a/audio/7.0/IDevice.hal b/audio/7.0/IDevice.hal
index 7082d6b7ab..eecd92ed7c 100644
--- a/audio/7.0/IDevice.hal
+++ b/audio/7.0/IDevice.hal
@@ -315,7 +315,6 @@ interface IDevice {
* INVALID_STATE if the device was already closed
* or there are streams currently opened.
*/
- @exit
close() generates (Result retval);
/**
diff --git a/audio/7.0/IStream.hal b/audio/7.0/IStream.hal
index dacd3fd342..789cb1dfd1 100644
--- a/audio/7.0/IStream.hal
+++ b/audio/7.0/IStream.hal
@@ -44,13 +44,6 @@ interface IStream {
getBufferSize() generates (uint64_t bufferSize);
/**
- * Return the sampling rate in Hz.
- *
- * @return sampleRateHz sample rate in Hz.
- */
- getSampleRate() generates (uint32_t sampleRateHz);
-
- /**
* Return supported native sampling rates of the stream for a given format.
* A supported native sample rate is a sample rate that can be efficiently
* played by the hardware (typically without sample-rate conversions).
@@ -72,23 +65,6 @@ interface IStream {
generates (Result retval, vec<uint32_t> sampleRates);
/**
- * Sets the sampling rate of the stream. Calling this method is equivalent
- * to setting AUDIO_PARAMETER_STREAM_SAMPLING_RATE on the legacy HAL.
- * Optional method. If implemented, only called on a stopped stream.
- *
- * @param sampleRateHz sample rate in Hz.
- * @return retval operation completion status.
- */
- setSampleRate(uint32_t sampleRateHz) generates (Result retval);
-
- /**
- * Return the channel mask of the stream.
- *
- * @return mask channel mask.
- */
- getChannelMask() generates (bitfield<AudioChannelMask> mask);
-
- /**
* Return supported channel masks of the stream. Calling this method is
* equivalent to getting AUDIO_PARAMETER_STREAM_SUP_CHANNELS on the legacy
* HAL.
@@ -99,24 +75,7 @@ interface IStream {
* @return masks supported audio masks.
*/
getSupportedChannelMasks(AudioFormat format)
- generates (Result retval, vec<bitfield<AudioChannelMask>> masks);
-
- /**
- * Sets the channel mask of the stream. Calling this method is equivalent to
- * setting AUDIO_PARAMETER_STREAM_CHANNELS on the legacy HAL.
- * Optional method
- *
- * @param format audio format.
- * @return retval operation completion status.
- */
- setChannelMask(bitfield<AudioChannelMask> mask) generates (Result retval);
-
- /**
- * Return the audio format of the stream.
- *
- * @return format audio format.
- */
- getFormat() generates (AudioFormat format);
+ generates (Result retval, vec<vec<AudioChannelMask>> masks);
/**
* Return supported audio formats of the stream. Calling this method is
@@ -130,25 +89,23 @@ interface IStream {
getSupportedFormats() generates (Result retval, vec<AudioFormat> formats);
/**
- * Sets the audio format of the stream. Calling this method is equivalent to
- * setting AUDIO_PARAMETER_STREAM_FORMAT on the legacy HAL.
- * Optional method
+ * Retrieves basic stream configuration: sample rate, audio format,
+ * channel mask.
*
- * @param format audio format.
- * @return retval operation completion status.
+ * @return config basic stream configuration.
*/
- setFormat(AudioFormat format) generates (Result retval);
+ getAudioProperties() generates (AudioBasicConfig config);
/**
- * Convenience method for retrieving several stream parameters in
- * one transaction.
+ * Sets stream parameters. Only sets parameters that are specified.
+ * See the description of AudioBasicConfig for the details.
*
- * @return sampleRateHz sample rate in Hz.
- * @return mask channel mask.
- * @return format audio format.
+ * Optional method. If implemented, only called on a stopped stream.
+ *
+ * @param config basic stream configuration.
+ * @return retval operation completion status.
*/
- getAudioProperties() generates (
- uint32_t sampleRateHz, bitfield<AudioChannelMask> mask, AudioFormat format);
+ setAudioProperties(AudioBasicConfig config) generates (Result retval);
/**
* Applies audio effect to the stream.
@@ -312,6 +269,5 @@ interface IStream {
* output stream interface.
* INVALID_STATE if the stream was already closed.
*/
- @exit
close() generates (Result retval);
};
diff --git a/audio/7.0/IStreamIn.hal b/audio/7.0/IStreamIn.hal
index 15e436359e..0a3f24b840 100644
--- a/audio/7.0/IStreamIn.hal
+++ b/audio/7.0/IStreamIn.hal
@@ -100,7 +100,7 @@ interface IStreamIn extends IStream {
*
* The driver operates on a dedicated thread. The client must ensure that
* the thread is given an appropriate priority and assigned to correct
- * scheduler and cgroup. For this purpose, the method returns identifiers
+ * scheduler and cgroup. For this purpose, the method returns the identifier
* of the driver thread.
*
* @param frameSize the size of a single frame, in bytes.
@@ -115,7 +115,9 @@ interface IStreamIn extends IStream {
* specified at the stream opening.
* @return statusMQ a message queue used for passing status from the driver
* using ReadStatus structures.
- * @return threadInfo identifiers of the driver's dedicated thread.
+ * @return threadId identifier of the driver's dedicated thread; the caller
+ * may adjust the thread priority to match the priority
+ * of the thread that provides audio data.
*/
prepareForReading(uint32_t frameSize, uint32_t framesCount)
generates (
@@ -123,7 +125,7 @@ interface IStreamIn extends IStream {
fmq_sync<ReadParameters> commandMQ,
fmq_sync<uint8_t> dataMQ,
fmq_sync<ReadStatus> statusMQ,
- ThreadInfo threadInfo);
+ int32_t threadId);
/**
* Return the amount of input frames lost in the audio driver since the last
diff --git a/audio/7.0/IStreamOut.hal b/audio/7.0/IStreamOut.hal
index 208beb6363..38d750f76b 100644
--- a/audio/7.0/IStreamOut.hal
+++ b/audio/7.0/IStreamOut.hal
@@ -95,7 +95,7 @@ interface IStreamOut extends IStream {
*
* The driver operates on a dedicated thread. The client must ensure that
* the thread is given an appropriate priority and assigned to correct
- * scheduler and cgroup. For this purpose, the method returns identifiers
+ * scheduler and cgroup. For this purpose, the method returns the identifier
* of the driver thread.
*
* @param frameSize the size of a single frame, in bytes.
@@ -109,7 +109,9 @@ interface IStreamOut extends IStream {
* specified at the stream opening.
* @return statusMQ a message queue used for passing status from the driver
* using WriteStatus structures.
- * @return threadInfo identifiers of the driver's dedicated thread.
+ * @return threadId identifier of the driver's dedicated thread; the caller
+ * may adjust the thread priority to match the priority
+ * of the thread that provides audio data.
*/
prepareForWriting(uint32_t frameSize, uint32_t framesCount)
generates (
@@ -117,7 +119,7 @@ interface IStreamOut extends IStream {
fmq_sync<WriteCommand> commandMQ,
fmq_sync<uint8_t> dataMQ,
fmq_sync<WriteStatus> statusMQ,
- ThreadInfo threadInfo);
+ int32_t threadId);
/**
* Return the number of audio frames written by the audio DSP to DAC since
diff --git a/audio/7.0/config/api/current.txt b/audio/7.0/config/api/current.txt
index 98c5eac982..fd9a8ef200 100644
--- a/audio/7.0/config/api/current.txt
+++ b/audio/7.0/config/api/current.txt
@@ -6,6 +6,81 @@ package audio.policy.configuration.V7_0 {
method public java.util.List<java.lang.String> getItem();
}
+ public enum AudioChannelMask {
+ method public String getRawName();
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_INDEX_MASK_1;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_INDEX_MASK_10;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_INDEX_MASK_11;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_INDEX_MASK_12;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_INDEX_MASK_13;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_INDEX_MASK_14;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_INDEX_MASK_15;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_INDEX_MASK_16;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_INDEX_MASK_17;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_INDEX_MASK_18;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_INDEX_MASK_19;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_INDEX_MASK_2;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_INDEX_MASK_20;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_INDEX_MASK_21;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_INDEX_MASK_22;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_INDEX_MASK_23;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_INDEX_MASK_24;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_INDEX_MASK_3;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_INDEX_MASK_4;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_INDEX_MASK_5;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_INDEX_MASK_6;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_INDEX_MASK_7;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_INDEX_MASK_8;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_INDEX_MASK_9;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_IN_2POINT0POINT2;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_IN_2POINT1POINT2;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_IN_3POINT0POINT2;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_IN_3POINT1POINT2;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_IN_5POINT1;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_IN_6;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_IN_FRONT_BACK;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_IN_MONO;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_IN_STEREO;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_IN_VOICE_CALL_MONO;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_IN_VOICE_DNLINK_MONO;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_IN_VOICE_UPLINK_MONO;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_OUT_2POINT0POINT2;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_OUT_2POINT1;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_OUT_2POINT1POINT2;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_OUT_3POINT0POINT2;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_OUT_3POINT1POINT2;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_OUT_5POINT1;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_OUT_5POINT1POINT2;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_OUT_5POINT1POINT4;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_OUT_5POINT1_BACK;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_OUT_5POINT1_SIDE;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_OUT_6POINT1;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_OUT_7POINT1;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_OUT_7POINT1POINT2;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_OUT_7POINT1POINT4;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_OUT_HAPTIC_AB;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_OUT_MONO;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_OUT_MONO_HAPTIC_A;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_OUT_MONO_HAPTIC_AB;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_OUT_PENTA;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_OUT_QUAD;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_OUT_QUAD_BACK;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_OUT_QUAD_SIDE;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_OUT_STEREO;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_OUT_STEREO_HAPTIC_A;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_OUT_STEREO_HAPTIC_AB;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_OUT_SURROUND;
+ }
+
+ public enum AudioContentType {
+ method public String getRawName();
+ enum_constant public static final audio.policy.configuration.V7_0.AudioContentType AUDIO_CONTENT_TYPE_MOVIE;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioContentType AUDIO_CONTENT_TYPE_MUSIC;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioContentType AUDIO_CONTENT_TYPE_SONIFICATION;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioContentType AUDIO_CONTENT_TYPE_SPEECH;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioContentType AUDIO_CONTENT_TYPE_UNKNOWN;
+ }
+
public enum AudioDevice {
method public String getRawName();
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_IN_AMBIENT;
@@ -116,6 +191,7 @@ package audio.policy.configuration.V7_0 {
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_APTX_HD;
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_APTX_TWSP;
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_CELT;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_DEFAULT;
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_DOLBY_TRUEHD;
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_DSD;
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_DTS;
@@ -164,18 +240,59 @@ package audio.policy.configuration.V7_0 {
method public void setVersion(audio.policy.configuration.V7_0.Version);
}
+ public enum AudioSource {
+ method public String getRawName();
+ enum_constant public static final audio.policy.configuration.V7_0.AudioSource AUDIO_SOURCE_CAMCORDER;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioSource AUDIO_SOURCE_DEFAULT;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioSource AUDIO_SOURCE_ECHO_REFERENCE;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioSource AUDIO_SOURCE_FM_TUNER;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioSource AUDIO_SOURCE_HOTWORD;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioSource AUDIO_SOURCE_MIC;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioSource AUDIO_SOURCE_REMOTE_SUBMIX;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioSource AUDIO_SOURCE_UNPROCESSED;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioSource AUDIO_SOURCE_VOICE_CALL;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioSource AUDIO_SOURCE_VOICE_COMMUNICATION;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioSource AUDIO_SOURCE_VOICE_DOWNLINK;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioSource AUDIO_SOURCE_VOICE_PERFORMANCE;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioSource AUDIO_SOURCE_VOICE_RECOGNITION;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioSource AUDIO_SOURCE_VOICE_UPLINK;
+ }
+
+ public enum AudioStreamType {
+ method public String getRawName();
+ enum_constant public static final audio.policy.configuration.V7_0.AudioStreamType AUDIO_STREAM_ACCESSIBILITY;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioStreamType AUDIO_STREAM_ALARM;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioStreamType AUDIO_STREAM_ASSISTANT;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioStreamType AUDIO_STREAM_BLUETOOTH_SCO;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioStreamType AUDIO_STREAM_DTMF;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioStreamType AUDIO_STREAM_ENFORCED_AUDIBLE;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioStreamType AUDIO_STREAM_MUSIC;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioStreamType AUDIO_STREAM_NOTIFICATION;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioStreamType AUDIO_STREAM_PATCH;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioStreamType AUDIO_STREAM_REROUTING;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioStreamType AUDIO_STREAM_RING;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioStreamType AUDIO_STREAM_SYSTEM;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioStreamType AUDIO_STREAM_TTS;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioStreamType AUDIO_STREAM_VOICE_CALL;
+ }
+
public enum AudioUsage {
method public String getRawName();
enum_constant public static final audio.policy.configuration.V7_0.AudioUsage AUDIO_USAGE_ALARM;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioUsage AUDIO_USAGE_ANNOUNCEMENT;
enum_constant public static final audio.policy.configuration.V7_0.AudioUsage AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY;
enum_constant public static final audio.policy.configuration.V7_0.AudioUsage AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE;
enum_constant public static final audio.policy.configuration.V7_0.AudioUsage AUDIO_USAGE_ASSISTANCE_SONIFICATION;
enum_constant public static final audio.policy.configuration.V7_0.AudioUsage AUDIO_USAGE_ASSISTANT;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioUsage AUDIO_USAGE_CALL_ASSISTANT;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioUsage AUDIO_USAGE_EMERGENCY;
enum_constant public static final audio.policy.configuration.V7_0.AudioUsage AUDIO_USAGE_GAME;
enum_constant public static final audio.policy.configuration.V7_0.AudioUsage AUDIO_USAGE_MEDIA;
enum_constant public static final audio.policy.configuration.V7_0.AudioUsage AUDIO_USAGE_NOTIFICATION;
enum_constant public static final audio.policy.configuration.V7_0.AudioUsage AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioUsage AUDIO_USAGE_SAFETY;
enum_constant public static final audio.policy.configuration.V7_0.AudioUsage AUDIO_USAGE_UNKNOWN;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioUsage AUDIO_USAGE_VEHICLE_STATUS;
enum_constant public static final audio.policy.configuration.V7_0.AudioUsage AUDIO_USAGE_VIRTUAL_SOURCE;
enum_constant public static final audio.policy.configuration.V7_0.AudioUsage AUDIO_USAGE_VOICE_COMMUNICATION;
enum_constant public static final audio.policy.configuration.V7_0.AudioUsage AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING;
@@ -234,7 +351,7 @@ package audio.policy.configuration.V7_0 {
public static class Gains.Gain {
ctor public Gains.Gain();
- method public String getChannel_mask();
+ method public audio.policy.configuration.V7_0.AudioChannelMask getChannel_mask();
method public int getDefaultValueMB();
method public int getMaxRampMs();
method public int getMaxValueMB();
@@ -244,7 +361,7 @@ package audio.policy.configuration.V7_0 {
method public String getName();
method public int getStepValueMB();
method public boolean getUseForVolume();
- method public void setChannel_mask(String);
+ method public void setChannel_mask(audio.policy.configuration.V7_0.AudioChannelMask);
method public void setDefaultValueMB(int);
method public void setMaxRampMs(int);
method public void setMaxValueMB(int);
@@ -327,14 +444,14 @@ package audio.policy.configuration.V7_0 {
public class Profile {
ctor public Profile();
- method public String getChannelMasks();
+ method public java.util.List<audio.policy.configuration.V7_0.AudioChannelMask> getChannelMasks();
method public String getFormat();
method public String getName();
- method public String getSamplingRates();
- method public void setChannelMasks(String);
+ method public java.util.List<java.math.BigInteger> getSamplingRates();
+ method public void setChannelMasks(java.util.List<audio.policy.configuration.V7_0.AudioChannelMask>);
method public void setFormat(String);
method public void setName(String);
- method public void setSamplingRates(String);
+ method public void setSamplingRates(java.util.List<java.math.BigInteger>);
}
public class Reference {
@@ -365,24 +482,6 @@ package audio.policy.configuration.V7_0 {
method public void setType(audio.policy.configuration.V7_0.MixType);
}
- public enum Stream {
- method public String getRawName();
- enum_constant public static final audio.policy.configuration.V7_0.Stream AUDIO_STREAM_ACCESSIBILITY;
- enum_constant public static final audio.policy.configuration.V7_0.Stream AUDIO_STREAM_ALARM;
- enum_constant public static final audio.policy.configuration.V7_0.Stream AUDIO_STREAM_ASSISTANT;
- enum_constant public static final audio.policy.configuration.V7_0.Stream AUDIO_STREAM_BLUETOOTH_SCO;
- enum_constant public static final audio.policy.configuration.V7_0.Stream AUDIO_STREAM_DTMF;
- enum_constant public static final audio.policy.configuration.V7_0.Stream AUDIO_STREAM_ENFORCED_AUDIBLE;
- enum_constant public static final audio.policy.configuration.V7_0.Stream AUDIO_STREAM_MUSIC;
- enum_constant public static final audio.policy.configuration.V7_0.Stream AUDIO_STREAM_NOTIFICATION;
- enum_constant public static final audio.policy.configuration.V7_0.Stream AUDIO_STREAM_PATCH;
- enum_constant public static final audio.policy.configuration.V7_0.Stream AUDIO_STREAM_REROUTING;
- enum_constant public static final audio.policy.configuration.V7_0.Stream AUDIO_STREAM_RING;
- enum_constant public static final audio.policy.configuration.V7_0.Stream AUDIO_STREAM_SYSTEM;
- enum_constant public static final audio.policy.configuration.V7_0.Stream AUDIO_STREAM_TTS;
- enum_constant public static final audio.policy.configuration.V7_0.Stream AUDIO_STREAM_VOICE_CALL;
- }
-
public class SurroundFormats {
ctor public SurroundFormats();
method public java.util.List<audio.policy.configuration.V7_0.SurroundFormats.Format> getFormat();
@@ -412,10 +511,10 @@ package audio.policy.configuration.V7_0 {
method public audio.policy.configuration.V7_0.DeviceCategory getDeviceCategory();
method public java.util.List<java.lang.String> getPoint();
method public String getRef();
- method public audio.policy.configuration.V7_0.Stream getStream();
+ method public audio.policy.configuration.V7_0.AudioStreamType getStream();
method public void setDeviceCategory(audio.policy.configuration.V7_0.DeviceCategory);
method public void setRef(String);
- method public void setStream(audio.policy.configuration.V7_0.Stream);
+ method public void setStream(audio.policy.configuration.V7_0.AudioStreamType);
}
public class Volumes {
diff --git a/audio/7.0/config/audio_policy_configuration.xsd b/audio/7.0/config/audio_policy_configuration.xsd
index 19c6f70536..4555a88034 100644
--- a/audio/7.0/config/audio_policy_configuration.xsd
+++ b/audio/7.0/config/audio_policy_configuration.xsd
@@ -13,7 +13,6 @@
See the License for the specific language governing permissions and
limitations under the License.
-->
-<!-- TODO: define a targetNamespace. Note that it will break retrocompatibility -->
<xs:schema version="2.0"
elementFormDefault="qualified"
attributeFormDefault="unqualified"
@@ -27,7 +26,9 @@
<xs:simpleType name="halVersion">
<xs:annotation>
<xs:documentation xml:lang="en">
- Version of the interface the hal implements.
+ Version of the interface the hal implements. Note that this
+ relates to legacy HAL API versions since HIDL APIs are versioned
+ using other mechanisms.
</xs:documentation>
</xs:annotation>
<xs:restriction base="xs:decimal">
@@ -154,7 +155,6 @@
<xs:element name="item" type="xs:token" minOccurs="0" maxOccurs="unbounded"/>
</xs:sequence>
</xs:complexType>
- <!-- TODO: separate values by space for better xsd validations. -->
<xs:simpleType name="audioInOutFlags">
<xs:annotation>
<xs:documentation xml:lang="en">
@@ -212,9 +212,6 @@
</xs:element>
</xs:sequence>
</xs:complexType>
- <!-- Enum values of audio_device_t in audio.h
- TODO: generate from hidl to avoid manual sync.
- TODO: separate source and sink in the xml for better xsd validations. -->
<xs:simpleType name="audioDevice">
<xs:restriction base="xs:string">
<xs:enumeration value="AUDIO_DEVICE_NONE"/>
@@ -252,7 +249,6 @@
<xs:enumeration value="AUDIO_DEVICE_OUT_DEFAULT"/>
<xs:enumeration value="AUDIO_DEVICE_OUT_STUB"/>
- <!-- Due to the xml format, IN types can not be a separated from OUT types -->
<xs:enumeration value="AUDIO_DEVICE_IN_COMMUNICATION"/>
<xs:enumeration value="AUDIO_DEVICE_IN_AMBIENT"/>
<xs:enumeration value="AUDIO_DEVICE_IN_BUILTIN_MIC"/>
@@ -298,10 +294,9 @@
<xs:simpleType name="extendableAudioDevice">
<xs:union memberTypes="audioDevice vendorExtension"/>
</xs:simpleType>
- <!-- Enum values of audio_format_t in audio.h
- TODO: generate from hidl to avoid manual sync. -->
<xs:simpleType name="audioFormat">
<xs:restriction base="xs:string">
+ <xs:enumeration value="AUDIO_FORMAT_DEFAULT" />
<xs:enumeration value="AUDIO_FORMAT_PCM_16_BIT" />
<xs:enumeration value="AUDIO_FORMAT_PCM_8_BIT"/>
<xs:enumeration value="AUDIO_FORMAT_PCM_32_BIT"/>
@@ -382,9 +377,14 @@
<xs:simpleType name="extendableAudioFormat">
<xs:union memberTypes="audioFormat vendorExtension"/>
</xs:simpleType>
- <!-- Enum values of audio::common::4_0::AudioUsage
- TODO: generate from HIDL to avoid manual sync. -->
<xs:simpleType name="audioUsage">
+ <xs:annotation>
+ <xs:documentation xml:lang="en">
+ Audio usage specifies the intended use case for the sound being played.
+ Please consult frameworks/base/media/java/android/media/AudioAttributes.java
+ for the description of each value.
+ </xs:documentation>
+ </xs:annotation>
<xs:restriction base="xs:string">
<xs:enumeration value="AUDIO_USAGE_UNKNOWN" />
<xs:enumeration value="AUDIO_USAGE_MEDIA" />
@@ -399,34 +399,119 @@
<xs:enumeration value="AUDIO_USAGE_GAME" />
<xs:enumeration value="AUDIO_USAGE_VIRTUAL_SOURCE" />
<xs:enumeration value="AUDIO_USAGE_ASSISTANT" />
+ <xs:enumeration value="AUDIO_USAGE_CALL_ASSISTANT" />
+ <xs:enumeration value="AUDIO_USAGE_EMERGENCY" />
+ <xs:enumeration value="AUDIO_USAGE_SAFETY" />
+ <xs:enumeration value="AUDIO_USAGE_VEHICLE_STATUS" />
+ <xs:enumeration value="AUDIO_USAGE_ANNOUNCEMENT" />
</xs:restriction>
</xs:simpleType>
<xs:simpleType name="audioUsageList">
<xs:list itemType="audioUsage"/>
</xs:simpleType>
- <!-- TODO: Change to a space separated list to xsd enforce correctness. -->
- <xs:simpleType name="samplingRates">
+ <xs:simpleType name="audioContentType">
+ <xs:annotation>
+ <xs:documentation xml:lang="en">
+ Audio content type expresses the general category of the content.
+ Please consult frameworks/base/media/java/android/media/AudioAttributes.java
+ for the description of each value.
+ </xs:documentation>
+ </xs:annotation>
<xs:restriction base="xs:string">
- <xs:pattern value="[0-9]+(,[0-9]+)*"/>
+ <xs:enumeration value="AUDIO_CONTENT_TYPE_UNKNOWN"/>
+ <xs:enumeration value="AUDIO_CONTENT_TYPE_SPEECH"/>
+ <xs:enumeration value="AUDIO_CONTENT_TYPE_MUSIC"/>
+ <xs:enumeration value="AUDIO_CONTENT_TYPE_MOVIE"/>
+ <xs:enumeration value="AUDIO_CONTENT_TYPE_SONIFICATION"/>
</xs:restriction>
</xs:simpleType>
- <!-- TODO: Change to a space separated list to xsd enforce correctness. -->
- <xs:simpleType name="channelMask">
+ <xs:simpleType name="samplingRates">
+ <xs:list itemType="xs:nonNegativeInteger" />
+ </xs:simpleType>
+ <xs:simpleType name="audioChannelMask">
<xs:annotation>
<xs:documentation xml:lang="en">
- Comma (",") separated list of channel flags
- from audio_channel_mask_t.
+ Audio channel mask specifies presence of particular channels.
+ There are two representations:
+ - representation position (traditional discrete channel specification,
+ e.g. "left", "right");
+ - indexed (this is similar to "tracks" in audio mixing, channels
+ are represented using numbers).
</xs:documentation>
</xs:annotation>
<xs:restriction base="xs:string">
- <xs:pattern value="[_A-Z][_A-Z0-9]*(,[_A-Z][_A-Z0-9]*)*"/>
+ <xs:enumeration value="AUDIO_CHANNEL_OUT_MONO"/>
+ <xs:enumeration value="AUDIO_CHANNEL_OUT_STEREO"/>
+ <xs:enumeration value="AUDIO_CHANNEL_OUT_2POINT1"/>
+ <xs:enumeration value="AUDIO_CHANNEL_OUT_2POINT0POINT2"/>
+ <xs:enumeration value="AUDIO_CHANNEL_OUT_2POINT1POINT2"/>
+ <xs:enumeration value="AUDIO_CHANNEL_OUT_3POINT0POINT2"/>
+ <xs:enumeration value="AUDIO_CHANNEL_OUT_3POINT1POINT2"/>
+ <xs:enumeration value="AUDIO_CHANNEL_OUT_QUAD"/>
+ <xs:enumeration value="AUDIO_CHANNEL_OUT_QUAD_BACK"/>
+ <xs:enumeration value="AUDIO_CHANNEL_OUT_QUAD_SIDE"/>
+ <xs:enumeration value="AUDIO_CHANNEL_OUT_SURROUND"/>
+ <xs:enumeration value="AUDIO_CHANNEL_OUT_PENTA"/>
+ <xs:enumeration value="AUDIO_CHANNEL_OUT_5POINT1"/>
+ <xs:enumeration value="AUDIO_CHANNEL_OUT_5POINT1_BACK"/>
+ <xs:enumeration value="AUDIO_CHANNEL_OUT_5POINT1_SIDE"/>
+ <xs:enumeration value="AUDIO_CHANNEL_OUT_5POINT1POINT2"/>
+ <xs:enumeration value="AUDIO_CHANNEL_OUT_5POINT1POINT4"/>
+ <xs:enumeration value="AUDIO_CHANNEL_OUT_6POINT1"/>
+ <xs:enumeration value="AUDIO_CHANNEL_OUT_7POINT1"/>
+ <xs:enumeration value="AUDIO_CHANNEL_OUT_7POINT1POINT2"/>
+ <xs:enumeration value="AUDIO_CHANNEL_OUT_7POINT1POINT4"/>
+ <xs:enumeration value="AUDIO_CHANNEL_OUT_MONO_HAPTIC_A"/>
+ <xs:enumeration value="AUDIO_CHANNEL_OUT_STEREO_HAPTIC_A"/>
+ <xs:enumeration value="AUDIO_CHANNEL_OUT_HAPTIC_AB"/>
+ <xs:enumeration value="AUDIO_CHANNEL_OUT_MONO_HAPTIC_AB"/>
+ <xs:enumeration value="AUDIO_CHANNEL_OUT_STEREO_HAPTIC_AB"/>
+ <xs:enumeration value="AUDIO_CHANNEL_IN_MONO"/>
+ <xs:enumeration value="AUDIO_CHANNEL_IN_STEREO"/>
+ <xs:enumeration value="AUDIO_CHANNEL_IN_FRONT_BACK"/>
+ <xs:enumeration value="AUDIO_CHANNEL_IN_6"/>
+ <xs:enumeration value="AUDIO_CHANNEL_IN_2POINT0POINT2"/>
+ <xs:enumeration value="AUDIO_CHANNEL_IN_2POINT1POINT2"/>
+ <xs:enumeration value="AUDIO_CHANNEL_IN_3POINT0POINT2"/>
+ <xs:enumeration value="AUDIO_CHANNEL_IN_3POINT1POINT2"/>
+ <xs:enumeration value="AUDIO_CHANNEL_IN_5POINT1"/>
+ <xs:enumeration value="AUDIO_CHANNEL_IN_VOICE_UPLINK_MONO"/>
+ <xs:enumeration value="AUDIO_CHANNEL_IN_VOICE_DNLINK_MONO"/>
+ <xs:enumeration value="AUDIO_CHANNEL_IN_VOICE_CALL_MONO"/>
+ <xs:enumeration value="AUDIO_CHANNEL_INDEX_MASK_1"/>
+ <xs:enumeration value="AUDIO_CHANNEL_INDEX_MASK_2"/>
+ <xs:enumeration value="AUDIO_CHANNEL_INDEX_MASK_3"/>
+ <xs:enumeration value="AUDIO_CHANNEL_INDEX_MASK_4"/>
+ <xs:enumeration value="AUDIO_CHANNEL_INDEX_MASK_5"/>
+ <xs:enumeration value="AUDIO_CHANNEL_INDEX_MASK_6"/>
+ <xs:enumeration value="AUDIO_CHANNEL_INDEX_MASK_7"/>
+ <xs:enumeration value="AUDIO_CHANNEL_INDEX_MASK_8"/>
+ <xs:enumeration value="AUDIO_CHANNEL_INDEX_MASK_9"/>
+ <xs:enumeration value="AUDIO_CHANNEL_INDEX_MASK_10"/>
+ <xs:enumeration value="AUDIO_CHANNEL_INDEX_MASK_11"/>
+ <xs:enumeration value="AUDIO_CHANNEL_INDEX_MASK_12"/>
+ <xs:enumeration value="AUDIO_CHANNEL_INDEX_MASK_13"/>
+ <xs:enumeration value="AUDIO_CHANNEL_INDEX_MASK_14"/>
+ <xs:enumeration value="AUDIO_CHANNEL_INDEX_MASK_15"/>
+ <xs:enumeration value="AUDIO_CHANNEL_INDEX_MASK_16"/>
+ <xs:enumeration value="AUDIO_CHANNEL_INDEX_MASK_17"/>
+ <xs:enumeration value="AUDIO_CHANNEL_INDEX_MASK_18"/>
+ <xs:enumeration value="AUDIO_CHANNEL_INDEX_MASK_19"/>
+ <xs:enumeration value="AUDIO_CHANNEL_INDEX_MASK_20"/>
+ <xs:enumeration value="AUDIO_CHANNEL_INDEX_MASK_21"/>
+ <xs:enumeration value="AUDIO_CHANNEL_INDEX_MASK_22"/>
+ <xs:enumeration value="AUDIO_CHANNEL_INDEX_MASK_23"/>
+ <xs:enumeration value="AUDIO_CHANNEL_INDEX_MASK_24"/>
</xs:restriction>
</xs:simpleType>
+ <xs:simpleType name="channelMasks">
+ <xs:list itemType="audioChannelMask" />
+ </xs:simpleType>
<xs:complexType name="profile">
<xs:attribute name="name" type="xs:token" use="optional"/>
<xs:attribute name="format" type="extendableAudioFormat" use="optional"/>
<xs:attribute name="samplingRates" type="samplingRates" use="optional"/>
- <xs:attribute name="channelMasks" type="channelMask" use="optional"/>
+ <xs:attribute name="channelMasks" type="channelMasks" use="optional"/>
</xs:complexType>
<xs:simpleType name="gainMode">
<xs:restriction base="xs:string">
@@ -441,7 +526,7 @@
<xs:complexType>
<xs:attribute name="name" type="xs:token" use="required"/>
<xs:attribute name="mode" type="gainMode" use="required"/>
- <xs:attribute name="channel_mask" type="channelMask" use="optional"/>
+ <xs:attribute name="channel_mask" type="audioChannelMask" use="optional"/>
<xs:attribute name="minValueMB" type="xs:int" use="optional"/>
<xs:attribute name="maxValueMB" type="xs:int" use="optional"/>
<xs:attribute name="defaultValueMB" type="xs:int" use="optional"/>
@@ -537,9 +622,14 @@
<xs:pattern value="([0-9]{1,2}|100),-?[0-9]+"/>
</xs:restriction>
</xs:simpleType>
- <!-- Enum values of audio_stream_type_t in audio-base.h
- TODO: generate from hidl to avoid manual sync. -->
- <xs:simpleType name="stream">
+ <xs:simpleType name="audioStreamType">
+ <xs:annotation>
+ <xs:documentation xml:lang="en">
+ Audio stream type describing the intended use case of a stream.
+ Please consult frameworks/base/media/java/android/media/AudioSystem.java
+ for the description of each value.
+ </xs:documentation>
+ </xs:annotation>
<xs:restriction base="xs:string">
<xs:enumeration value="AUDIO_STREAM_VOICE_CALL"/>
<xs:enumeration value="AUDIO_STREAM_SYSTEM"/>
@@ -557,8 +647,32 @@
<xs:enumeration value="AUDIO_STREAM_PATCH"/>
</xs:restriction>
</xs:simpleType>
- <!-- Enum values of device_category from Volume.h.
- TODO: generate from hidl to avoid manual sync. -->
+ <xs:simpleType name="audioSource">
+ <xs:annotation>
+ <xs:documentation xml:lang="en">
+ An audio source defines the intended use case for the sound being recorded.
+ Please consult frameworks/base/media/java/android/media/MediaRecorder.java
+ for the description of each value.
+ </xs:documentation>
+ </xs:annotation>
+ <xs:restriction base="xs:string">
+ <xs:enumeration value="AUDIO_SOURCE_DEFAULT"/>
+ <xs:enumeration value="AUDIO_SOURCE_MIC"/>
+ <xs:enumeration value="AUDIO_SOURCE_VOICE_UPLINK"/>
+ <xs:enumeration value="AUDIO_SOURCE_VOICE_DOWNLINK"/>
+ <xs:enumeration value="AUDIO_SOURCE_VOICE_CALL"/>
+ <xs:enumeration value="AUDIO_SOURCE_CAMCORDER"/>
+ <xs:enumeration value="AUDIO_SOURCE_VOICE_RECOGNITION"/>
+ <xs:enumeration value="AUDIO_SOURCE_VOICE_COMMUNICATION"/>
+ <xs:enumeration value="AUDIO_SOURCE_REMOTE_SUBMIX"/>
+ <xs:enumeration value="AUDIO_SOURCE_UNPROCESSED"/>
+ <xs:enumeration value="AUDIO_SOURCE_VOICE_PERFORMANCE"/>
+ <xs:enumeration value="AUDIO_SOURCE_ECHO_REFERENCE"/>
+ <xs:enumeration value="AUDIO_SOURCE_FM_TUNER"/>
+ <xs:enumeration value="AUDIO_SOURCE_HOTWORD"/>
+ </xs:restriction>
+ </xs:simpleType>
+ <!-- Enum values of device_category from Volume.h. -->
<xs:simpleType name="deviceCategory">
<xs:restriction base="xs:string">
<xs:enumeration value="DEVICE_CATEGORY_HEADSET"/>
@@ -591,7 +705,7 @@
<xs:sequence>
<xs:element name="point" type="volumePoint" minOccurs="0" maxOccurs="unbounded"/>
</xs:sequence>
- <xs:attribute name="stream" type="stream"/>
+ <xs:attribute name="stream" type="audioStreamType"/>
<xs:attribute name="deviceCategory" type="deviceCategory"/>
<xs:attribute name="ref" type="xs:token" use="optional"/>
</xs:complexType>
diff --git a/audio/7.0/config/update_audio_policy_config.sh b/audio/7.0/config/update_audio_policy_config.sh
new file mode 100755
index 0000000000..8714b5f2d3
--- /dev/null
+++ b/audio/7.0/config/update_audio_policy_config.sh
@@ -0,0 +1,159 @@
+#!/bin/bash
+
+# Copyright (C) 2020 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This script is used to update audio policy configuration files
+# to comply with the updated audio_policy_configuration.xsd from V7.0.
+#
+# The main difference is the separator used in lists for attributes.
+# Since the XML Schema Definition standard only allows space to be
+# used as a separator (see https://www.w3.org/TR/xmlschema11-2/#list-datatypes)
+# the previous versions used a regular expression to validate lists
+# in attribute values. E.g. the channel masks were validated using
+# the following regexp: [_A-Z][_A-Z0-9]*(,[_A-Z][_A-Z0-9]*)*
+# This has an obvious drawback of missing typos in the config file.
+#
+# The V7.0 has shifted to defining most of the frequently changed
+# types in the XSD schema only. This allows for verifying all the values
+# in lists, but in order to comply with XML Schema requirements
+# list elements must be separated by space.
+#
+# Since the APM config files typically use include directives,
+# the script must be pointed to the main APM config file and will
+# take care all the included files automatically.
+# If the included file is a shared version from 'frameworks/av',
+# instead of updating it the script checks if there is a newer
+# version with the corresponding name suffix (e.g.
+# 'a2dp_audio_policy_configuration_7_0.xml') and updates the include
+# path instead.
+
+set -euo pipefail
+
+if (echo "$@" | grep -qe -h); then
+ echo "This script will update Audio Policy Manager config file"
+ echo "to the format required by V7.0 XSD schema from a previous"
+ echo "version."
+ echo
+ echo "USAGE: $0 [APM_XML_FILE] [OLD_VERSION]"
+ echo " APM_XML_FILE specifies the path to audio_policy_configuration.xml"
+ echo " relative to Android repository root"
+ echo " OLD_VERSION specifies the version of schema currently used"
+ echo
+ echo "Example: $0 device/generic/goldfish/audio/policy/audio_policy_configuration.xml 6.0"
+ exit
+fi
+readonly HAL_DIRECTORY=hardware/interfaces/audio
+readonly SHARED_CONFIGS_DIRECTORY=frameworks/av/services/audiopolicy/config
+readonly OLD_VERSION=${2:-$(ls ${ANDROID_BUILD_TOP}/${HAL_DIRECTORY} | grep -E '[0-9]+\.[0-9]+' |
+ sort -n | tail -n1)}
+readonly NEW_VERSION=7.0
+readonly NEW_VERSION_UNDERSCORE=7_0
+
+readonly SOURCE_CONFIG=${ANDROID_BUILD_TOP}/$1
+
+# First, validate the input using the schema of the current version
+
+echo Validating the source against the $OLD_VERSION schema
+xmllint --noout --xinclude \
+ --nofixup-base-uris --path "$ANDROID_BUILD_TOP/$SHARED_CONFIGS_DIRECTORY" \
+ --schema ${ANDROID_BUILD_TOP}/${HAL_DIRECTORY}/${OLD_VERSION}/config/audio_policy_configuration.xsd \
+ ${SOURCE_CONFIG}
+if [ $? -ne 0 ]; then
+ echo
+ echo "Config file fails validation for the specified version $OLD_VERSION--unsafe to update"
+ exit 1
+fi
+
+# Find all the source files recursively
+
+SOURCE_FILES=${SOURCE_CONFIG}
+SHARED_FILES=
+findIncludes() {
+ local FILES_TO_CHECK=
+ for F in $1; do
+ local FOUND_INCLUDES=$(grep -Po '<xi:include href="\K[^"]+(?="\/>)' ${F})
+ for I in ${FOUND_INCLUDES}; do
+ SOURCE_FULL_PATH=$(dirname ${F})/${I}
+ SHARED_FULL_PATH=${ANDROID_BUILD_TOP}/${SHARED_CONFIGS_DIRECTORY}/${I}
+ if [ -f "$SOURCE_FULL_PATH" ]; then
+ # Device-specific file.
+ SOURCE_FILES+=$'\n'${SOURCE_FULL_PATH}
+ FILES_TO_CHECK+=$'\n'${SOURCE_FULL_PATH}
+ elif [ -f "$SHARED_FULL_PATH" ]; then
+ # Shared file from the frameworks repo.
+ SHARED_FILES+=$'\n'${I}
+ FILES_TO_CHECK+=$'\n'${SHARED_FULL_PATH}
+ else
+ echo
+ echo "Include file not found: $I"
+ exit 1
+ fi
+ done
+ done
+ if [ "$FILES_TO_CHECK" ]; then
+ findIncludes "$FILES_TO_CHECK"
+ fi
+}
+findIncludes ${SOURCE_FILES}
+
+echo "Will update $1 and included device-specific files in place."
+echo "Will update paths to shared included files."
+echo "Press Ctrl-C to cancel, Enter to continue"
+read
+
+updateFile() {
+ FILE=$1
+ ATTR=$2
+ SEPARATOR=$3
+ SRC_LINES=$(grep -nPo "$ATTR=\"[^\"]+\"" ${FILE} || true)
+ for S in $SRC_LINES; do
+ # Prepare instruction for 'sed' for in-place editing of specified line
+ R=$(echo ${S} | sed -e 's/^[0-9]\+:/\//' | sed -e "s/$SEPARATOR/ /g")
+ S=$(echo ${S} | sed -e 's/:/s\//')${R}/
+ echo ${S} | sed -i -f - ${FILE}
+ done
+}
+for F in $SOURCE_FILES; do
+ updateFile ${F} "channelMasks" ","
+ updateFile ${F} "samplingRates" ","
+done;
+
+updateIncludes() {
+ FILE=$1
+ for I in $SHARED_FILES; do
+ NEW_VERSION_I=${I%.*}_${NEW_VERSION_UNDERSCORE}.${I##*.}
+ if [ -e "$ANDROID_BUILD_TOP/$SHARED_CONFIGS_DIRECTORY/$NEW_VERSION_I" ]; then
+ echo "s/$I/$NEW_VERSION_I/g" | sed -i -f - ${FILE}
+ fi
+ done
+}
+for F in $SOURCE_FILES; do
+ updateIncludes ${F}
+done
+
+# Validate the results against the new schema
+
+echo Validating the result against the $NEW_VERSION schema
+xmllint --noout --xinclude \
+ --nofixup-base-uris --path "$ANDROID_BUILD_TOP/$SHARED_CONFIGS_DIRECTORY" \
+ --schema ${ANDROID_BUILD_TOP}/${HAL_DIRECTORY}/${NEW_VERSION}/config/audio_policy_configuration.xsd \
+ ${SOURCE_CONFIG}
+if [ $? -ne 0 ]; then
+ echo
+ echo "Config file fails validation for the specified version $NEW_VERSION--please check the changes"
+ exit 1
+fi
+echo
+echo "Please check the diff and update path to APM shared files in the device makefile!"
diff --git a/audio/7.0/types.hal b/audio/7.0/types.hal
index b0b08430fa..15ca4921b0 100644
--- a/audio/7.0/types.hal
+++ b/audio/7.0/types.hal
@@ -355,3 +355,58 @@ struct PlaybackRate {
*/
TimestretchFallbackMode fallbackMode;
};
+
+/**
+ * The audio output flags serve two purposes:
+ *
+ * - when an output stream is created they indicate its attributes;
+ *
+ * - when present in an output profile descriptor listed for a particular audio
+ * hardware module, they indicate that an output stream can be opened that
+ * supports the attributes indicated by the flags.
+ */
+@export(name="audio_output_flags_t", value_prefix="AUDIO_OUTPUT_FLAG_")
+enum AudioOutputFlag : int32_t {
+ NONE = 0x0, // no attributes
+ DIRECT = 0x1, // this output directly connects a track
+ // to one output stream: no software mixer
+ PRIMARY = 0x2, // this output is the primary output of the device. It is
+ // unique and must be present. It is opened by default and
+ // receives routing, audio mode and volume controls related
+ // to voice calls.
+ FAST = 0x4, // output supports "fast tracks", defined elsewhere
+ DEEP_BUFFER = 0x8, // use deep audio buffers
+ COMPRESS_OFFLOAD = 0x10, // offload playback of compressed streams to
+ // hardware codec
+ NON_BLOCKING = 0x20, // use non-blocking write
+ HW_AV_SYNC = 0x40, // output uses a hardware A/V sync
+ TTS = 0x80, // output for streams transmitted through speaker at a
+ // sample rate high enough to accommodate lower-range
+ // ultrasonic p/b
+ RAW = 0x100, // minimize signal processing
+ SYNC = 0x200, // synchronize I/O streams
+ IEC958_NONAUDIO = 0x400, // Audio stream contains compressed audio in SPDIF
+ // data bursts, not PCM.
+ DIRECT_PCM = 0x2000, // Audio stream containing PCM data that needs
+ // to pass through compress path for DSP post proc.
+ MMAP_NOIRQ = 0x4000, // output operates in MMAP no IRQ mode.
+ VOIP_RX = 0x8000, // preferred output for VoIP calls.
+ /** preferred output for call music */
+ INCALL_MUSIC = 0x10000,
+};
+
+/**
+ * The audio input flags are analogous to audio output flags.
+ */
+@export(name="audio_input_flags_t", value_prefix="AUDIO_INPUT_FLAG_")
+enum AudioInputFlag : int32_t {
+ NONE = 0x0, // no attributes
+ FAST = 0x1, // prefer an input that supports "fast tracks"
+ HW_HOTWORD = 0x2, // prefer an input that captures from hw hotword source
+ RAW = 0x4, // minimize signal processing
+ SYNC = 0x8, // synchronize I/O streams
+ MMAP_NOIRQ = 0x10, // input operates in MMAP no IRQ mode.
+ VOIP_TX = 0x20, // preferred input for VoIP calls.
+ HW_AV_SYNC = 0x40, // input connected to an output that uses a hardware A/V sync
+ DIRECT = 0x80, // for acquiring encoded streams
+};
diff --git a/audio/common/7.0/types.hal b/audio/common/7.0/types.hal
index 2288eb1d47..94d0af7673 100644
--- a/audio/common/7.0/types.hal
+++ b/audio/common/7.0/types.hal
@@ -18,34 +18,18 @@ package android.hardware.audio.common@7.0;
import android.hidl.safe_union@1.0;
-/*
- *
- * IDs and Handles
- *
- */
-
/**
- * Handle type for identifying audio sources and sinks.
+ * Handle type for identifying audio resources. Handles are allocated by the framework.
*/
typedef int32_t AudioIoHandle;
/**
- * Audio hw module handle functions or structures referencing a module.
- */
-typedef int32_t AudioModuleHandle;
-
-/**
* Each port has a unique ID or handle allocated by policy manager.
*/
typedef int32_t AudioPortHandle;
/**
- * Each patch is identified by a handle at the interface used to create that
- * patch. For instance, when a patch is created by the audio HAL, the HAL
- * allocates and returns a handle. This handle is unique to a given audio HAL
- * hardware module. But the same patch receives another system wide unique
- * handle allocated by the framework. This unique handle is used for all
- * transactions inside the framework.
+ * Each patch is identified by a handle allocated by the HAL.
*/
typedef int32_t AudioPatchHandle;
@@ -55,17 +39,6 @@ typedef int32_t AudioPatchHandle;
typedef uint32_t AudioHwSync;
/**
- * Each port has a unique ID or handle allocated by policy manager.
- */
-@export(name="")
-enum AudioHandleConsts : int32_t {
- AUDIO_IO_HANDLE_NONE = 0,
- AUDIO_MODULE_HANDLE_NONE = 0,
- AUDIO_PORT_HANDLE_NONE = 0,
- AUDIO_PATCH_HANDLE_NONE = 0,
-};
-
-/**
* Commonly used structure for passing unique identifieds (UUID).
* For the definition of UUID, refer to ITU-T X.667 spec.
*/
@@ -86,116 +59,25 @@ struct Uuid {
/**
* Audio stream type describing the intended use case of a stream.
+ * See 'audioStreamType' in audio_policy_configuration.xsd for the
+ * list of allowed values.
*/
-@export(name="audio_stream_type_t", value_prefix="AUDIO_STREAM_")
-enum AudioStreamType : int32_t {
- // These values must kept in sync with
- // frameworks/base/media/java/android/media/AudioSystem.java
- /** Used to identify the default audio stream volume. */
- DEFAULT = -1,
- /** Specifies the minimum value for use in checks and loops. */
- MIN = 0,
- /** Used to identify the volume of audio streams for phone calls. */
- VOICE_CALL = 0,
- /** Used to identify the volume of audio streams for system sounds. */
- SYSTEM = 1,
- /**
- * Used to identify the volume of audio streams for the phone ring
- * and message alerts.
- */
- RING = 2,
- /** Used to identify the volume of audio streams for music playback. */
- MUSIC = 3,
- /** Used to identify the volume of audio streams for alarms. */
- ALARM = 4,
- /** Used to identify the volume of audio streams for notifications. */
- NOTIFICATION = 5,
- /**
- * Used to identify the volume of audio streams for phone calls
- * when connected on bluetooth.
- */
- BLUETOOTH_SCO = 6,
- /**
- * Used to identify the volume of audio streams for enforced system
- * sounds in certain countries (e.g camera in Japan). */
- ENFORCED_AUDIBLE = 7,
- /** Used to identify the volume of audio streams for DTMF tones. */
- DTMF = 8,
- /**
- * Used to identify the volume of audio streams exclusively transmitted
- * through the speaker (TTS) of the device.
- */
- TTS = 9,
- /**
- * Used to identify the volume of audio streams for accessibility prompts.
- */
- ACCESSIBILITY = 10,
- /** Used to identify the volume of audio streams for virtual assistant. */
- ASSISTANT = 11,
-};
+typedef string AudioStreamType;
-@export(name="audio_source_t", value_prefix="AUDIO_SOURCE_")
-enum AudioSource : int32_t {
- // These values must kept in sync with
- // frameworks/base/media/java/android/media/MediaRecorder.java,
- // system/media/audio_effects/include/audio_effects/audio_effects_conf.h
- /** Default audio source. */
- DEFAULT = 0,
- /** Microphone audio source. */
- MIC = 1,
- /** Voice call uplink (Tx) audio source. */
- VOICE_UPLINK = 2,
- /** Voice call downlink (Rx) audio source. */
- VOICE_DOWNLINK = 3,
- /** Voice call uplink + downlink audio source. */
- VOICE_CALL = 4,
- /**
- * Microphone audio source tuned for video recording, with the same
- * orientation as the camera if available.
- */
- CAMCORDER = 5,
- /** Microphone audio source tuned for voice recognition. */
- VOICE_RECOGNITION = 6,
- /**
- * Microphone audio source tuned for voice communications such as VoIP. It
- * will for instance take advantage of echo cancellation or automatic gain
- * control if available.
- */
- VOICE_COMMUNICATION = 7,
- /**
- * Source for the mix to be presented remotely. An example of remote
- * presentation is Wifi Display where a dongle attached to a TV can be used
- * to play the mix captured by this audio source.
- */
- REMOTE_SUBMIX = 8,
- /**
- * Source for unprocessed sound. Usage examples include level measurement
- * and raw signal analysis.
- */
- UNPROCESSED = 9,
- /**
- * Source for capturing audio meant to be processed in real time and played back for live
- * performance (e.g karaoke). The capture path will minimize latency and coupling with
- * playback path.
- */
- VOICE_PERFORMANCE = 10,
- /**
- * Source for an echo canceller to capture the reference signal to be cancelled.
- * The echo reference signal will be captured as close as possible to the DAC in order
- * to include all post processing applied to the playback path.
- */
- ECHO_REFERENCE = 1997,
- /** Virtual source for the built-in FM tuner. */
- FM_TUNER = 1998,
- /** Virtual source for the last captured hotword. */
- HOTWORD = 1999,
-};
+/**
+ * An audio source defines the intended use case for the sound being recorded.
+ * See 'audioSource' in audio_policy_configuration.xsd for the
+ * list of allowed values.
+ */
+typedef string AudioSource;
-typedef int32_t AudioSession;
/**
- * Special audio session values.
+ * An audio session identifier is used to designate the particular
+ * playback or recording session (e.g. playback performed by a certain
+ * application).
*/
-@export(name="audio_session_t", value_prefix="AUDIO_SESSION_")
+typedef int32_t AudioSession;
+
enum AudioSessionConsts : int32_t {
/**
* Session for effects attached to a particular sink or source audio device
@@ -213,382 +95,29 @@ enum AudioSessionConsts : int32_t {
* (value must be 0)
*/
OUTPUT_MIX = 0,
- /**
- * Application does not specify an explicit session ID to be used, and
- * requests a new session ID to be allocated. Corresponds to
- * AudioManager.AUDIO_SESSION_ID_GENERATE and
- * AudioSystem.AUDIO_SESSION_ALLOCATE.
- */
- ALLOCATE = 0,
- /**
- * For use with AudioRecord::start(), this indicates no trigger session.
- * It is also used with output tracks and patch tracks, which never have a
- * session.
- */
- NONE = 0
};
/**
- * Audio format is a 32-bit word that consists of:
- * main format field (upper 8 bits)
- * sub format field (lower 24 bits).
- *
- * The main format indicates the main codec type. The sub format field indicates
- * options and parameters for each format. The sub format is mainly used for
- * record to indicate for instance the requested bitrate or profile. It can
- * also be used for certain formats to give informations not present in the
- * encoded audio stream (e.g. octet alignement for AMR).
+ * Audio format indicates audio codec type.
+ * See 'audioFormat' in audio_policy_configuration.xsd for the
+ * list of allowed values.
*/
-@export(name="audio_format_t", value_prefix="AUDIO_FORMAT_")
-enum AudioFormat : uint32_t {
- INVALID = 0xFFFFFFFFUL,
- DEFAULT = 0,
- PCM = 0x00000000UL,
- MP3 = 0x01000000UL,
- AMR_NB = 0x02000000UL,
- AMR_WB = 0x03000000UL,
- AAC = 0x04000000UL,
- /** Deprecated, Use AAC_HE_V1 */
- HE_AAC_V1 = 0x05000000UL,
- /** Deprecated, Use AAC_HE_V2 */
- HE_AAC_V2 = 0x06000000UL,
- VORBIS = 0x07000000UL,
- OPUS = 0x08000000UL,
- AC3 = 0x09000000UL,
- E_AC3 = 0x0A000000UL,
- DTS = 0x0B000000UL,
- DTS_HD = 0x0C000000UL,
- /** IEC61937 is encoded audio wrapped in 16-bit PCM. */
- IEC61937 = 0x0D000000UL,
- DOLBY_TRUEHD = 0x0E000000UL,
- EVRC = 0x10000000UL,
- EVRCB = 0x11000000UL,
- EVRCWB = 0x12000000UL,
- EVRCNW = 0x13000000UL,
- AAC_ADIF = 0x14000000UL,
- WMA = 0x15000000UL,
- WMA_PRO = 0x16000000UL,
- AMR_WB_PLUS = 0x17000000UL,
- MP2 = 0x18000000UL,
- QCELP = 0x19000000UL,
- DSD = 0x1A000000UL,
- FLAC = 0x1B000000UL,
- ALAC = 0x1C000000UL,
- APE = 0x1D000000UL,
- AAC_ADTS = 0x1E000000UL,
- SBC = 0x1F000000UL,
- APTX = 0x20000000UL,
- APTX_HD = 0x21000000UL,
- AC4 = 0x22000000UL,
- LDAC = 0x23000000UL,
- /** Dolby Metadata-enhanced Audio Transmission */
- MAT = 0x24000000UL,
- AAC_LATM = 0x25000000UL,
- CELT = 0x26000000UL,
- APTX_ADAPTIVE = 0x27000000UL,
- LHDC = 0x28000000UL,
- LHDC_LL = 0x29000000UL,
- APTX_TWSP = 0x2A000000UL,
-
- /** Deprecated */
- MAIN_MASK = 0xFF000000UL,
- SUB_MASK = 0x00FFFFFFUL,
-
- /* Subformats */
- PCM_SUB_16_BIT = 0x1, // PCM signed 16 bits
- PCM_SUB_8_BIT = 0x2, // PCM unsigned 8 bits
- PCM_SUB_32_BIT = 0x3, // PCM signed .31 fixed point
- PCM_SUB_8_24_BIT = 0x4, // PCM signed 8.23 fixed point
- PCM_SUB_FLOAT = 0x5, // PCM single-precision float pt
- PCM_SUB_24_BIT_PACKED = 0x6, // PCM signed .23 fix pt (3 bytes)
-
- MP3_SUB_NONE = 0x0,
-
- AMR_SUB_NONE = 0x0,
-
- AAC_SUB_MAIN = 0x1,
- AAC_SUB_LC = 0x2,
- AAC_SUB_SSR = 0x4,
- AAC_SUB_LTP = 0x8,
- AAC_SUB_HE_V1 = 0x10,
- AAC_SUB_SCALABLE = 0x20,
- AAC_SUB_ERLC = 0x40,
- AAC_SUB_LD = 0x80,
- AAC_SUB_HE_V2 = 0x100,
- AAC_SUB_ELD = 0x200,
- AAC_SUB_XHE = 0x300,
-
- VORBIS_SUB_NONE = 0x0,
-
- E_AC3_SUB_JOC = 0x1,
-
- MAT_SUB_1_0 = 0x1,
- MAT_SUB_2_0 = 0x2,
- MAT_SUB_2_1 = 0x3,
-
- /* Aliases */
- /** note != AudioFormat.ENCODING_PCM_16BIT */
- PCM_16_BIT = (PCM | PCM_SUB_16_BIT),
- /** note != AudioFormat.ENCODING_PCM_8BIT */
- PCM_8_BIT = (PCM | PCM_SUB_8_BIT),
- PCM_32_BIT = (PCM | PCM_SUB_32_BIT),
- PCM_8_24_BIT = (PCM | PCM_SUB_8_24_BIT),
- PCM_FLOAT = (PCM | PCM_SUB_FLOAT),
- PCM_24_BIT_PACKED = (PCM | PCM_SUB_24_BIT_PACKED),
- AAC_MAIN = (AAC | AAC_SUB_MAIN),
- AAC_LC = (AAC | AAC_SUB_LC),
- AAC_SSR = (AAC | AAC_SUB_SSR),
- AAC_LTP = (AAC | AAC_SUB_LTP),
- AAC_HE_V1 = (AAC | AAC_SUB_HE_V1),
- AAC_SCALABLE = (AAC | AAC_SUB_SCALABLE),
- AAC_ERLC = (AAC | AAC_SUB_ERLC),
- AAC_LD = (AAC | AAC_SUB_LD),
- AAC_HE_V2 = (AAC | AAC_SUB_HE_V2),
- AAC_ELD = (AAC | AAC_SUB_ELD),
- AAC_XHE = (AAC | AAC_SUB_XHE),
- AAC_ADTS_MAIN = (AAC_ADTS | AAC_SUB_MAIN),
- AAC_ADTS_LC = (AAC_ADTS | AAC_SUB_LC),
- AAC_ADTS_SSR = (AAC_ADTS | AAC_SUB_SSR),
- AAC_ADTS_LTP = (AAC_ADTS | AAC_SUB_LTP),
- AAC_ADTS_HE_V1 = (AAC_ADTS | AAC_SUB_HE_V1),
- AAC_ADTS_SCALABLE = (AAC_ADTS | AAC_SUB_SCALABLE),
- AAC_ADTS_ERLC = (AAC_ADTS | AAC_SUB_ERLC),
- AAC_ADTS_LD = (AAC_ADTS | AAC_SUB_LD),
- AAC_ADTS_HE_V2 = (AAC_ADTS | AAC_SUB_HE_V2),
- AAC_ADTS_ELD = (AAC_ADTS | AAC_SUB_ELD),
- AAC_ADTS_XHE = (AAC_ADTS | AAC_SUB_XHE),
- E_AC3_JOC = (E_AC3 | E_AC3_SUB_JOC),
- MAT_1_0 = (MAT | MAT_SUB_1_0),
- MAT_2_0 = (MAT | MAT_SUB_2_0),
- MAT_2_1 = (MAT | MAT_SUB_2_1),
- AAC_LATM_LC = (AAC_LATM | AAC_SUB_LC),
- AAC_LATM_HE_V1 = (AAC_LATM | AAC_SUB_HE_V1),
- AAC_LATM_HE_V2 = (AAC_LATM | AAC_SUB_HE_V2),
-};
+typedef string AudioFormat;
/**
- * Usage of these values highlights places in the code that use 2- or 8- channel
- * assumptions.
+ * Audio channel mask indicates presence of particular channels.
+ * See 'audioChannelMask' in audio_policy_configuration.xsd for the
+ * list of allowed values.
*/
-@export(name="")
-enum FixedChannelCount : int32_t {
- FCC_2 = 2, // This is typically due to legacy implementation of stereo I/O
- FCC_8 = 8 // This is typically due to audio mixer and resampler limitations
-};
+typedef string AudioChannelMask;
/**
- * A channel mask per se only defines the presence or absence of a channel, not
- * the order.
- *
- * The channel order convention is that channels are interleaved in order from
- * least significant channel mask bit to most significant channel mask bit,
- * with unused bits skipped. For example for stereo, LEFT would be first,
- * followed by RIGHT.
- * Any exceptions to this convention are noted at the appropriate API.
- *
- * AudioChannelMask is an opaque type and its internal layout should not be
- * assumed as it may change in the future. Instead, always use functions
- * to examine it.
- *
- * These are the current representations:
- *
- * REPRESENTATION_POSITION
- * is a channel mask representation for position assignment. Each low-order
- * bit corresponds to the spatial position of a transducer (output), or
- * interpretation of channel (input). The user of a channel mask needs to
- * know the context of whether it is for output or input. The constants
- * OUT_* or IN_* apply to the bits portion. It is not permitted for no bits
- * to be set.
- *
- * REPRESENTATION_INDEX
- * is a channel mask representation for index assignment. Each low-order
- * bit corresponds to a selected channel. There is no platform
- * interpretation of the various bits. There is no concept of output or
- * input. It is not permitted for no bits to be set.
- *
- * All other representations are reserved for future use.
- *
- * Warning: current representation distinguishes between input and output, but
- * this will not the be case in future revisions of the platform. Wherever there
- * is an ambiguity between input and output that is currently resolved by
- * checking the channel mask, the implementer should look for ways to fix it
- * with additional information outside of the mask.
+ * Basic configuration applicable to any stream of audio.
*/
-@export(name="", value_prefix="AUDIO_CHANNEL_")
-enum AudioChannelMask : uint32_t {
- /** must be 0 for compatibility */
- REPRESENTATION_POSITION = 0,
- /** 1 is reserved for future use */
- REPRESENTATION_INDEX = 2,
- /* 3 is reserved for future use */
-
- /** These can be a complete value of AudioChannelMask */
- NONE = 0x0,
- INVALID = 0xC0000000,
-
- /*
- * These can be the bits portion of an AudioChannelMask
- * with representation REPRESENTATION_POSITION.
- */
-
- /** output channels */
- OUT_FRONT_LEFT = 0x1,
- OUT_FRONT_RIGHT = 0x2,
- OUT_FRONT_CENTER = 0x4,
- OUT_LOW_FREQUENCY = 0x8,
- OUT_BACK_LEFT = 0x10,
- OUT_BACK_RIGHT = 0x20,
- OUT_FRONT_LEFT_OF_CENTER = 0x40,
- OUT_FRONT_RIGHT_OF_CENTER = 0x80,
- OUT_BACK_CENTER = 0x100,
- OUT_SIDE_LEFT = 0x200,
- OUT_SIDE_RIGHT = 0x400,
- OUT_TOP_CENTER = 0x800,
- OUT_TOP_FRONT_LEFT = 0x1000,
- OUT_TOP_FRONT_CENTER = 0x2000,
- OUT_TOP_FRONT_RIGHT = 0x4000,
- OUT_TOP_BACK_LEFT = 0x8000,
- OUT_TOP_BACK_CENTER = 0x10000,
- OUT_TOP_BACK_RIGHT = 0x20000,
- OUT_TOP_SIDE_LEFT = 0x40000,
- OUT_TOP_SIDE_RIGHT = 0x80000,
-
- /**
- * Haptic channel characteristics are specific to a device and
- * only used to play device specific resources (eg: ringtones).
- * The HAL can freely map A and B to haptic controllers, the
- * framework shall not interpret those values and forward them
- * from the device audio assets.
- */
- OUT_HAPTIC_A = 0x20000000,
- OUT_HAPTIC_B = 0x10000000,
-
- OUT_MONO = OUT_FRONT_LEFT,
- OUT_STEREO = (OUT_FRONT_LEFT | OUT_FRONT_RIGHT),
- OUT_2POINT1 = (OUT_FRONT_LEFT | OUT_FRONT_RIGHT | OUT_LOW_FREQUENCY),
- OUT_2POINT0POINT2 = (OUT_FRONT_LEFT | OUT_FRONT_RIGHT |
- OUT_TOP_SIDE_LEFT | OUT_TOP_SIDE_RIGHT),
- OUT_2POINT1POINT2 = (OUT_FRONT_LEFT | OUT_FRONT_RIGHT |
- OUT_TOP_SIDE_LEFT | OUT_TOP_SIDE_RIGHT |
- OUT_LOW_FREQUENCY),
- OUT_3POINT0POINT2 = (OUT_FRONT_LEFT | OUT_FRONT_RIGHT | OUT_FRONT_CENTER |
- OUT_TOP_SIDE_LEFT | OUT_TOP_SIDE_RIGHT),
- OUT_3POINT1POINT2 = (OUT_FRONT_LEFT | OUT_FRONT_RIGHT | OUT_FRONT_CENTER |
- OUT_TOP_SIDE_LEFT | OUT_TOP_SIDE_RIGHT |
- OUT_LOW_FREQUENCY),
- OUT_QUAD = (OUT_FRONT_LEFT | OUT_FRONT_RIGHT |
- OUT_BACK_LEFT | OUT_BACK_RIGHT),
- OUT_QUAD_BACK = OUT_QUAD,
- /** like OUT_QUAD_BACK with *_SIDE_* instead of *_BACK_* */
- OUT_QUAD_SIDE = (OUT_FRONT_LEFT | OUT_FRONT_RIGHT |
- OUT_SIDE_LEFT | OUT_SIDE_RIGHT),
- OUT_SURROUND = (OUT_FRONT_LEFT | OUT_FRONT_RIGHT |
- OUT_FRONT_CENTER | OUT_BACK_CENTER),
- OUT_PENTA = (OUT_QUAD | OUT_FRONT_CENTER),
- OUT_5POINT1 = (OUT_FRONT_LEFT | OUT_FRONT_RIGHT |
- OUT_FRONT_CENTER | OUT_LOW_FREQUENCY |
- OUT_BACK_LEFT | OUT_BACK_RIGHT),
- OUT_5POINT1_BACK = OUT_5POINT1,
- /** like OUT_5POINT1_BACK with *_SIDE_* instead of *_BACK_* */
- OUT_5POINT1_SIDE = (OUT_FRONT_LEFT | OUT_FRONT_RIGHT |
- OUT_FRONT_CENTER | OUT_LOW_FREQUENCY |
- OUT_SIDE_LEFT | OUT_SIDE_RIGHT),
- OUT_5POINT1POINT2 = (OUT_5POINT1 | OUT_TOP_SIDE_LEFT | OUT_TOP_SIDE_RIGHT),
- OUT_5POINT1POINT4 = (OUT_5POINT1 |
- OUT_TOP_FRONT_LEFT | OUT_TOP_FRONT_RIGHT |
- OUT_TOP_BACK_LEFT | OUT_TOP_BACK_RIGHT),
- OUT_6POINT1 = (OUT_FRONT_LEFT | OUT_FRONT_RIGHT |
- OUT_FRONT_CENTER | OUT_LOW_FREQUENCY |
- OUT_BACK_LEFT | OUT_BACK_RIGHT |
- OUT_BACK_CENTER),
- /** matches the correct AudioFormat.CHANNEL_OUT_7POINT1_SURROUND */
- OUT_7POINT1 = (OUT_FRONT_LEFT | OUT_FRONT_RIGHT |
- OUT_FRONT_CENTER | OUT_LOW_FREQUENCY |
- OUT_BACK_LEFT | OUT_BACK_RIGHT |
- OUT_SIDE_LEFT | OUT_SIDE_RIGHT),
- OUT_7POINT1POINT2 = (OUT_7POINT1 | OUT_TOP_SIDE_LEFT | OUT_TOP_SIDE_RIGHT),
- OUT_7POINT1POINT4 = (OUT_7POINT1 |
- OUT_TOP_FRONT_LEFT | OUT_TOP_FRONT_RIGHT |
- OUT_TOP_BACK_LEFT | OUT_TOP_BACK_RIGHT),
- OUT_MONO_HAPTIC_A = (OUT_FRONT_LEFT | OUT_HAPTIC_A),
- OUT_STEREO_HAPTIC_A = (OUT_FRONT_LEFT | OUT_FRONT_RIGHT | OUT_HAPTIC_A),
- OUT_HAPTIC_AB = (OUT_HAPTIC_A | OUT_HAPTIC_B),
- OUT_MONO_HAPTIC_AB = (OUT_FRONT_LEFT | OUT_HAPTIC_A | OUT_HAPTIC_B),
- OUT_STEREO_HAPTIC_AB = (OUT_FRONT_LEFT | OUT_FRONT_RIGHT |
- OUT_HAPTIC_A | OUT_HAPTIC_B),
- // Note that the 2.0 OUT_ALL* have been moved to helper functions
-
- /* These are bits only, not complete values */
-
- /** input channels */
- IN_LEFT = 0x4,
- IN_RIGHT = 0x8,
- IN_FRONT = 0x10,
- IN_BACK = 0x20,
- IN_LEFT_PROCESSED = 0x40,
- IN_RIGHT_PROCESSED = 0x80,
- IN_FRONT_PROCESSED = 0x100,
- IN_BACK_PROCESSED = 0x200,
- IN_PRESSURE = 0x400,
- IN_X_AXIS = 0x800,
- IN_Y_AXIS = 0x1000,
- IN_Z_AXIS = 0x2000,
- IN_BACK_LEFT = 0x10000,
- IN_BACK_RIGHT = 0x20000,
- IN_CENTER = 0x40000,
- IN_LOW_FREQUENCY = 0x100000,
- IN_TOP_LEFT = 0x200000,
- IN_TOP_RIGHT = 0x400000,
-
- IN_VOICE_UPLINK = 0x4000,
- IN_VOICE_DNLINK = 0x8000,
-
- IN_MONO = IN_FRONT,
- IN_STEREO = (IN_LEFT | IN_RIGHT),
- IN_FRONT_BACK = (IN_FRONT | IN_BACK),
- IN_6 = (IN_LEFT | IN_RIGHT |
- IN_FRONT | IN_BACK |
- IN_LEFT_PROCESSED | IN_RIGHT_PROCESSED),
- IN_2POINT0POINT2 = (IN_LEFT | IN_RIGHT | IN_TOP_LEFT | IN_TOP_RIGHT),
- IN_2POINT1POINT2 = (IN_LEFT | IN_RIGHT | IN_TOP_LEFT | IN_TOP_RIGHT |
- IN_LOW_FREQUENCY),
- IN_3POINT0POINT2 = (IN_LEFT | IN_CENTER | IN_RIGHT | IN_TOP_LEFT | IN_TOP_RIGHT),
- IN_3POINT1POINT2 = (IN_LEFT | IN_CENTER | IN_RIGHT |
- IN_TOP_LEFT | IN_TOP_RIGHT | IN_LOW_FREQUENCY),
- IN_5POINT1 = (IN_LEFT | IN_CENTER | IN_RIGHT |
- IN_BACK_LEFT | IN_BACK_RIGHT | IN_LOW_FREQUENCY),
- IN_VOICE_UPLINK_MONO = (IN_VOICE_UPLINK | IN_MONO),
- IN_VOICE_DNLINK_MONO = (IN_VOICE_DNLINK | IN_MONO),
- IN_VOICE_CALL_MONO = (IN_VOICE_UPLINK_MONO |
- IN_VOICE_DNLINK_MONO),
- // Note that the 2.0 IN_ALL* have been moved to helper functions
-
- COUNT_MAX = 30,
- INDEX_HDR = REPRESENTATION_INDEX << COUNT_MAX,
- INDEX_MASK_1 = INDEX_HDR | ((1 << 1) - 1),
- INDEX_MASK_2 = INDEX_HDR | ((1 << 2) - 1),
- INDEX_MASK_3 = INDEX_HDR | ((1 << 3) - 1),
- INDEX_MASK_4 = INDEX_HDR | ((1 << 4) - 1),
- INDEX_MASK_5 = INDEX_HDR | ((1 << 5) - 1),
- INDEX_MASK_6 = INDEX_HDR | ((1 << 6) - 1),
- INDEX_MASK_7 = INDEX_HDR | ((1 << 7) - 1),
- INDEX_MASK_8 = INDEX_HDR | ((1 << 8) - 1),
- INDEX_MASK_9 = INDEX_HDR | ((1 << 9) - 1),
- INDEX_MASK_10 = INDEX_HDR | ((1 << 10) - 1),
- INDEX_MASK_11 = INDEX_HDR | ((1 << 11) - 1),
- INDEX_MASK_12 = INDEX_HDR | ((1 << 12) - 1),
- INDEX_MASK_13 = INDEX_HDR | ((1 << 13) - 1),
- INDEX_MASK_14 = INDEX_HDR | ((1 << 14) - 1),
- INDEX_MASK_15 = INDEX_HDR | ((1 << 15) - 1),
- INDEX_MASK_16 = INDEX_HDR | ((1 << 16) - 1),
- INDEX_MASK_17 = INDEX_HDR | ((1 << 17) - 1),
- INDEX_MASK_18 = INDEX_HDR | ((1 << 18) - 1),
- INDEX_MASK_19 = INDEX_HDR | ((1 << 19) - 1),
- INDEX_MASK_20 = INDEX_HDR | ((1 << 20) - 1),
- INDEX_MASK_21 = INDEX_HDR | ((1 << 21) - 1),
- INDEX_MASK_22 = INDEX_HDR | ((1 << 22) - 1),
- INDEX_MASK_23 = INDEX_HDR | ((1 << 23) - 1),
- INDEX_MASK_24 = INDEX_HDR | ((1 << 24) - 1),
+struct AudioBasicConfig {
+ uint32_t sampleRateHz; // 0 means 'unspecified'
+ vec<AudioChannelMask> channelMask; // empty means 'unspecified'
+ AudioFormat format; // 'DEFAULT' means 'unspecified'
};
/**
@@ -607,301 +136,55 @@ enum AudioMode : int32_t {
CALL_SCREEN = 4,
};
-@export(name="", value_prefix="AUDIO_DEVICE_")
-enum AudioDevice : uint32_t {
- NONE = 0x0,
- /** reserved bits */
- BIT_IN = 0x80000000,
- BIT_DEFAULT = 0x40000000,
- /** output devices */
- OUT_EARPIECE = 0x1,
- OUT_SPEAKER = 0x2,
- OUT_WIRED_HEADSET = 0x4,
- OUT_WIRED_HEADPHONE = 0x8,
- OUT_BLUETOOTH_SCO = 0x10,
- OUT_BLUETOOTH_SCO_HEADSET = 0x20,
- OUT_BLUETOOTH_SCO_CARKIT = 0x40,
- OUT_BLUETOOTH_A2DP = 0x80,
- OUT_BLUETOOTH_A2DP_HEADPHONES = 0x100,
- OUT_BLUETOOTH_A2DP_SPEAKER = 0x200,
- OUT_AUX_DIGITAL = 0x400,
- OUT_HDMI = OUT_AUX_DIGITAL,
- /** uses an analog connection (multiplexed over the USB pins for instance) */
- OUT_ANLG_DOCK_HEADSET = 0x800,
- OUT_DGTL_DOCK_HEADSET = 0x1000,
- /** USB accessory mode: Android device is USB device and dock is USB host */
- OUT_USB_ACCESSORY = 0x2000,
- /** USB host mode: Android device is USB host and dock is USB device */
- OUT_USB_DEVICE = 0x4000,
- OUT_REMOTE_SUBMIX = 0x8000,
- /** Telephony voice TX path */
- OUT_TELEPHONY_TX = 0x10000,
- /** Analog jack with line impedance detected */
- OUT_LINE = 0x20000,
- /** HDMI Audio Return Channel */
- OUT_HDMI_ARC = 0x40000,
- /** S/PDIF out */
- OUT_SPDIF = 0x80000,
- /** FM transmitter out */
- OUT_FM = 0x100000,
- /** Line out for av devices */
- OUT_AUX_LINE = 0x200000,
- /** limited-output speaker device for acoustic safety */
- OUT_SPEAKER_SAFE = 0x400000,
- OUT_IP = 0x800000,
- /** audio bus implemented by the audio system (e.g an MOST stereo channel) */
- OUT_BUS = 0x1000000,
- OUT_PROXY = 0x2000000,
- OUT_USB_HEADSET = 0x4000000,
- OUT_HEARING_AID = 0x8000000,
- OUT_ECHO_CANCELLER = 0x10000000,
- OUT_DEFAULT = BIT_DEFAULT,
- // Note that the 2.0 OUT_ALL* have been moved to helper functions
-
- /** input devices */
- IN_COMMUNICATION = BIT_IN | 0x1,
- IN_AMBIENT = BIT_IN | 0x2,
- IN_BUILTIN_MIC = BIT_IN | 0x4,
- IN_BLUETOOTH_SCO_HEADSET = BIT_IN | 0x8,
- IN_WIRED_HEADSET = BIT_IN | 0x10,
- IN_AUX_DIGITAL = BIT_IN | 0x20,
- IN_HDMI = IN_AUX_DIGITAL,
- /** Telephony voice RX path */
- IN_VOICE_CALL = BIT_IN | 0x40,
- IN_TELEPHONY_RX = IN_VOICE_CALL,
- IN_BACK_MIC = BIT_IN | 0x80,
- IN_REMOTE_SUBMIX = BIT_IN | 0x100,
- IN_ANLG_DOCK_HEADSET = BIT_IN | 0x200,
- IN_DGTL_DOCK_HEADSET = BIT_IN | 0x400,
- IN_USB_ACCESSORY = BIT_IN | 0x800,
- IN_USB_DEVICE = BIT_IN | 0x1000,
- /** FM tuner input */
- IN_FM_TUNER = BIT_IN | 0x2000,
- /** TV tuner input */
- IN_TV_TUNER = BIT_IN | 0x4000,
- /** Analog jack with line impedance detected */
- IN_LINE = BIT_IN | 0x8000,
- /** S/PDIF in */
- IN_SPDIF = BIT_IN | 0x10000,
- IN_BLUETOOTH_A2DP = BIT_IN | 0x20000,
- IN_LOOPBACK = BIT_IN | 0x40000,
- IN_IP = BIT_IN | 0x80000,
- /** audio bus implemented by the audio system (e.g an MOST stereo channel) */
- IN_BUS = BIT_IN | 0x100000,
- IN_PROXY = BIT_IN | 0x1000000,
- IN_USB_HEADSET = BIT_IN | 0x2000000,
- IN_BLUETOOTH_BLE = BIT_IN | 0x4000000,
- IN_ECHO_REFERENCE = BIT_IN | 0x10000000,
- IN_DEFAULT = BIT_IN | BIT_DEFAULT,
-
- // Note that the 2.0 IN_ALL* have been moved to helper functions
-};
-
-/**
- * IEEE 802 MAC address.
- */
-typedef uint8_t[6] MacAddress;
-
/**
* Specifies a device address in case when several devices of the same type
* can be connected (e.g. BT A2DP, USB).
*/
struct DeviceAddress {
- AudioDevice device; // discriminator
- union Address {
- MacAddress mac; // used for BLUETOOTH_A2DP_*
- uint8_t[4] ipv4; // used for IP
+ /**
+ * Audio device specifies type (or category) of audio I/O device
+ * (e.g. speaker or headphones).
+ * See 'audioDevice' in audio_policy_configuration.xsd for the
+ * list of allowed values.
+ */
+ string deviceType;
+ safe_union Address {
+ /**
+ * The address may be left unspecified if 'device' specifies
+ * a physical device unambiguously.
+ */
+ Monostate unspecified;
+ /** IEEE 802 MAC address. Set for Bluetooth devices. */
+ uint8_t[6] mac;
+ /** IPv4 Address. Set for IPv4 devices. */
+ uint8_t[4] ipv4;
+ /** IPv6 Address. Set for IPv6 devices. */
+ uint16_t[8] ipv6;
+ /** PCI bus Address. Set for USB devices. */
struct Alsa {
int32_t card;
int32_t device;
- } alsa; // used for USB_*
+ } alsa;
+ /** Arbitrary BUS device unique address. Not interpreted by the framework. */
+ string bus;
+ /** Arbitrary REMOTE_SUBMIX device unique address. Not interpreted by the HAL. */
+ string rSubmix;
} address;
- /** Arbitrary BUS device unique address. Should not be interpreted by the framework. */
- string busAddress;
- /** Arbitrary REMOTE_SUBMIX device unique address. Should not be interpreted by the HAL. */
- string rSubmixAddress;
};
/**
- * The audio output flags serve two purposes:
- *
- * - when an AudioTrack is created they indicate a "wish" to be connected to an
- * output stream with attributes corresponding to the specified flags;
- *
- * - when present in an output profile descriptor listed for a particular audio
- * hardware module, they indicate that an output stream can be opened that
- * supports the attributes indicated by the flags.
- *
- * The audio policy manager will try to match the flags in the request
- * (when getOuput() is called) to an available output stream.
+ * Audio usage specifies the intended use case for the sound being played.
+ * See 'audioUsage' in audio_policy_configuration.xsd for the
+ * list of allowed values.
*/
-@export(name="audio_output_flags_t", value_prefix="AUDIO_OUTPUT_FLAG_")
-enum AudioOutputFlag : int32_t {
- NONE = 0x0, // no attributes
- DIRECT = 0x1, // this output directly connects a track
- // to one output stream: no software mixer
- PRIMARY = 0x2, // this output is the primary output of the device. It is
- // unique and must be present. It is opened by default and
- // receives routing, audio mode and volume controls related
- // to voice calls.
- FAST = 0x4, // output supports "fast tracks", defined elsewhere
- DEEP_BUFFER = 0x8, // use deep audio buffers
- COMPRESS_OFFLOAD = 0x10, // offload playback of compressed streams to
- // hardware codec
- NON_BLOCKING = 0x20, // use non-blocking write
- HW_AV_SYNC = 0x40, // output uses a hardware A/V sync
- TTS = 0x80, // output for streams transmitted through speaker at a
- // sample rate high enough to accommodate lower-range
- // ultrasonic p/b
- RAW = 0x100, // minimize signal processing
- SYNC = 0x200, // synchronize I/O streams
- IEC958_NONAUDIO = 0x400, // Audio stream contains compressed audio in SPDIF
- // data bursts, not PCM.
- DIRECT_PCM = 0x2000, // Audio stream containing PCM data that needs
- // to pass through compress path for DSP post proc.
- MMAP_NOIRQ = 0x4000, // output operates in MMAP no IRQ mode.
- VOIP_RX = 0x8000, // preferred output for VoIP calls.
- /** preferred output for call music */
- INCALL_MUSIC = 0x10000,
-};
+typedef string AudioUsage;
/**
- * The audio input flags are analogous to audio output flags.
- * Currently they are used only when an AudioRecord is created,
- * to indicate a preference to be connected to an input stream with
- * attributes corresponding to the specified flags.
+ * Audio content type expresses the general category of the content.
+ * See 'audioContentType' in audio_policy_configuration.xsd for the
+ * list of allowed values.
*/
-@export(name="audio_input_flags_t", value_prefix="AUDIO_INPUT_FLAG_")
-enum AudioInputFlag : int32_t {
- NONE = 0x0, // no attributes
- FAST = 0x1, // prefer an input that supports "fast tracks"
- HW_HOTWORD = 0x2, // prefer an input that captures from hw hotword source
- RAW = 0x4, // minimize signal processing
- SYNC = 0x8, // synchronize I/O streams
- MMAP_NOIRQ = 0x10, // input operates in MMAP no IRQ mode.
- VOIP_TX = 0x20, // preferred input for VoIP calls.
- HW_AV_SYNC = 0x40, // input connected to an output that uses a hardware A/V sync
- DIRECT = 0x80, // for acquiring encoded streams
-};
-
-@export(name="audio_usage_t", value_prefix="AUDIO_USAGE_")
-enum AudioUsage : int32_t {
- // These values must kept in sync with
- // frameworks/base/media/java/android/media/AudioAttributes.java
- // Note that not all framework values are exposed
- /**
- * Usage value to use when the usage is unknown.
- */
- UNKNOWN = 0,
- /**
- * Usage value to use when the usage is media, such as music, or movie
- * soundtracks.
- */
- MEDIA = 1,
- /**
- * Usage value to use when the usage is voice communications, such as
- * telephony or VoIP.
- */
- VOICE_COMMUNICATION = 2,
- /**
- * Usage value to use when the usage is in-call signalling, such as with
- * a "busy" beep, or DTMF tones.
- */
- VOICE_COMMUNICATION_SIGNALLING = 3,
- /**
- * Usage value to use when the usage is an alarm (e.g. wake-up alarm).
- */
- ALARM = 4,
- /**
- * Usage value to use when the usage is a generic notification.
- */
- NOTIFICATION = 5,
- /**
- * Usage value to use when the usage is telephony ringtone.
- */
- NOTIFICATION_TELEPHONY_RINGTONE = 6,
- /**
- * Usage value to use when the usage is for accessibility, such as with
- * a screen reader.
- */
- ASSISTANCE_ACCESSIBILITY = 11,
- /**
- * Usage value to use when the usage is driving or navigation directions.
- */
- ASSISTANCE_NAVIGATION_GUIDANCE = 12,
- /**
- * Usage value to use when the usage is sonification, such as with user
- * interface sounds.
- */
- ASSISTANCE_SONIFICATION = 13,
- /**
- * Usage value to use when the usage is for game audio.
- */
- GAME = 14,
- /**
- * Usage value to use when feeding audio to the platform and replacing
- * "traditional" audio source, such as audio capture devices.
- */
- VIRTUAL_SOURCE = 15,
- /**
- * Usage value to use for audio responses to user queries, audio
- * instructions or help utterances.
- */
- ASSISTANT = 16,
- /**
- * Usage value to use for assistant voice interaction with remote caller
- * on Cell and VoIP calls.
- */
- CALL_ASSISTANT = 17,
- /**
- * Usage value to use when the usage is an emergency.
- */
- EMERGENCY = 1000,
- /**
- * Usage value to use when the usage is a safety sound.
- */
- SAFETY = 1001,
- /**
- * Usage value to use when the usage is a vehicle status.
- */
- VEHICLE_STATUS = 1002,
- /**
- * Usage value to use when the usage is an announcement.
- */
- ANNOUNCEMENT = 1003,
-};
-
-/** Type of audio generated by an application. */
-@export(name="audio_content_type_t", value_prefix="AUDIO_CONTENT_TYPE_")
-enum AudioContentType : uint32_t {
- // Do not change these values without updating their counterparts
- // in frameworks/base/media/java/android/media/AudioAttributes.java
- /**
- * Content type value to use when the content type is unknown, or other than
- * the ones defined.
- */
- UNKNOWN = 0,
- /**
- * Content type value to use when the content type is speech.
- */
- SPEECH = 1,
- /**
- * Content type value to use when the content type is music.
- */
- MUSIC = 2,
- /**
- * Content type value to use when the content type is a soundtrack,
- * typically accompanying a movie or TV program.
- */
- MOVIE = 3,
- /**
- * Content type value to use when the content type is a sound used to
- * accompany a user action, such as a beep or sound effect expressing a key
- * click, or event, such as the type of a sound for a bonus being received
- * in a game. These sounds are mostly synthesized or short Foley sounds.
- */
- SONIFICATION = 4,
-};
+typedef string AudioContentType;
/** Encapsulation mode used for sending audio compressed data. */
@export(name="audio_encapsulation_mode_t", value_prefix="AUDIO_ENCAPSULATION_MODE_")
@@ -926,9 +209,7 @@ enum AudioEncapsulationMode : int32_t {
* Additional information about the stream passed to hardware decoders.
*/
struct AudioOffloadInfo {
- uint32_t sampleRateHz;
- bitfield<AudioChannelMask> channelMask;
- AudioFormat format;
+ AudioBasicConfig base;
AudioStreamType streamType;
uint32_t bitRatePerSecond;
int64_t durationMicroseconds; // -1 if unknown
@@ -946,9 +227,7 @@ struct AudioOffloadInfo {
* Commonly used audio stream configuration parameters.
*/
struct AudioConfig {
- uint32_t sampleRateHz;
- bitfield<AudioChannelMask> channelMask;
- AudioFormat format;
+ AudioBasicConfig base;
AudioOffloadInfo offloadInfo;
uint64_t frameCount;
};
@@ -985,8 +264,7 @@ struct RecordTrackMetadata {
safe_union Destination {
Monostate unspecified;
DeviceAddress device;
- };
- Destination destination;
+ } destination;
};
/** Metadatas of the sink of a StreamIn. */
@@ -994,7 +272,6 @@ struct SinkMetadata {
vec<RecordTrackMetadata> tracks;
};
-
/*
*
* Volume control
@@ -1017,7 +294,7 @@ enum AudioGainMode : uint32_t {
*/
struct AudioGain {
bitfield<AudioGainMode> mode;
- bitfield<AudioChannelMask> channelMask; // channels which gain an be controlled
+ vec<AudioChannelMask> channelMask; // channels which gain an be controlled
int32_t minValue; // minimum gain value in millibels
int32_t maxValue; // maximum gain value in millibels
int32_t defaultValue; // default gain value in millibels
@@ -1033,10 +310,8 @@ struct AudioGain {
struct AudioGainConfig {
int32_t index; // index of the corresponding AudioGain in AudioPort.gains
AudioGainMode mode;
- AudioChannelMask channelMask; // channels which gain value follows
+ vec<AudioChannelMask> channelMask; // channels which gain value follows
/**
- * 4 = sizeof(AudioChannelMask),
- * 8 is not "FCC_8", so it won't need to be changed for > 8 channels.
* Gain values in millibels for each channel ordered from LSb to MSb in
* channel mask. The number of values is 1 in joint mode or
* popcount(channel_mask).
@@ -1060,132 +335,85 @@ struct AudioGainConfig {
* the interface.
*/
-/** Audio port role: either source or sink */
-@export(name="audio_port_role_t", value_prefix="AUDIO_PORT_ROLE_")
-enum AudioPortRole : int32_t {
- NONE,
- SOURCE,
- SINK,
-};
-
-/**
- * Audio port type indicates if it is a session (e.g AudioTrack), a mix (e.g
- * PlaybackThread output) or a physical device (e.g OUT_SPEAKER)
- */
-@export(name="audio_port_type_t", value_prefix="AUDIO_PORT_TYPE_")
-enum AudioPortType : int32_t {
- NONE,
- DEVICE,
- MIX,
- SESSION,
-};
-
/**
- * Extension for audio port configuration structure when the audio port is a
- * hardware device.
- */
-struct AudioPortConfigDeviceExt {
- AudioModuleHandle hwModule; // module the device is attached to
- AudioDevice type; // device type (e.g OUT_SPEAKER)
- uint8_t[32] address; // device address. "" if N/A
-};
-
-/**
- * Extension for audio port configuration structure when the audio port is an
- * audio session.
- */
-struct AudioPortConfigSessionExt {
+ * A helper aggregate structure providing parameters that depend on the
+ * port role.
+ */
+safe_union AudioPortExtendedInfo {
+ /** Set when no information is provided. */
+ Monostate unspecified;
+ /** Set when the audio port is an audio device. */
+ DeviceAddress device;
+ /** Set when the audio port is a mix. The handle is of a stream. */
+ struct AudioPortMixExt {
+ /** I/O handle of the input/output stream. */
+ AudioIoHandle ioHandle;
+ safe_union UseCase {
+ /** Specified when the port is in the SOURCE role. */
+ AudioStreamType stream;
+ /** Specified when the port is in the SINK role. */
+ AudioSource source;
+ } useCase;
+ } mix;
+ /** Set when the audio port is an audio session. */
AudioSession session;
};
/**
- * Flags indicating which fields are to be considered in AudioPortConfig.
- */
-@export(name="", value_prefix="AUDIO_PORT_CONFIG_")
-enum AudioPortConfigMask : uint32_t {
- SAMPLE_RATE = 0x1,
- CHANNEL_MASK = 0x2,
- FORMAT = 0x4,
- GAIN = 0x8,
-};
-
-/**
* Audio port configuration structure used to specify a particular configuration
* of an audio port.
*/
struct AudioPortConfig {
+ /**
+ * The 'id' field is set when it is needed to select the port and
+ * apply new configuration for it.
+ */
AudioPortHandle id;
- bitfield<AudioPortConfigMask> configMask;
- uint32_t sampleRateHz;
- bitfield<AudioChannelMask> channelMask;
- AudioFormat format;
- AudioGainConfig gain;
- AudioPortType type; // type is used as a discriminator for Ext union
- AudioPortRole role; // role is used as a discriminator for UseCase union
- union Ext {
- AudioPortConfigDeviceExt device;
- struct AudioPortConfigMixExt {
- AudioModuleHandle hwModule; // module the stream is attached to
- AudioIoHandle ioHandle; // I/O handle of the input/output stream
- union UseCase {
- AudioStreamType stream;
- AudioSource source;
- } useCase;
- } mix;
- AudioPortConfigSessionExt session;
- } ext;
-};
-
-/**
- * Extension for audio port structure when the audio port is a hardware device.
- */
-struct AudioPortDeviceExt {
- AudioModuleHandle hwModule; // module the device is attached to
- AudioDevice type;
- /** 32 byte string identifying the port. */
- uint8_t[32] address;
-};
-
-/**
- * Latency class of the audio mix.
- */
-@export(name="audio_mix_latency_class_t", value_prefix="AUDIO_LATENCY_")
-enum AudioMixLatencyClass : int32_t {
- LOW,
- NORMAL
-};
-
-struct AudioPortMixExt {
- AudioModuleHandle hwModule; // module the stream is attached to
- AudioIoHandle ioHandle; // I/O handle of the stream
- AudioMixLatencyClass latencyClass;
+ /**
+ * Basic parameters: sampling rate, format, channel mask. Only some of the
+ * parameters (or none) may be set. See the documentation of the
+ * AudioBasicConfig struct.
+ */
+ AudioBasicConfig config;
+ /** Associated gain control. */
+ safe_union OptionalGain {
+ Monostate unspecified;
+ AudioGainConfig config;
+ } gain;
+ /** Parameters that depend on the actual port role. */
+ AudioPortExtendedInfo ext;
};
/**
- * Extension for audio port structure when the audio port is an audio session.
+ * Audio port structure describes the capabilities of an audio port
+ * as well as its current configuration.
*/
-struct AudioPortSessionExt {
- AudioSession session;
-};
-
struct AudioPort {
+ /**
+ * Unique identifier of the port within this HAL service. When calling
+ * from the client side functions like IDevice.getAudioPort is it allowed
+ * to only specify the 'id' and leave the other fields unspecified.
+ */
AudioPortHandle id;
- AudioPortRole role;
+ /**
+ * Human-readable name describing the function of the port.
+ * E.g. "telephony_tx" or "fm_tuner".
+ */
string name;
- vec<uint32_t> sampleRates;
- vec<bitfield<AudioChannelMask>> channelMasks;
- vec<AudioFormat> formats;
+ /** List of audio profiles supported by the port. */
+ struct AudioProfile {
+ AudioFormat format;
+ /** List of the sample rates supported by the profile. */
+ vec<uint32_t> sampleRates;
+ /** List of channel masks supported by the profile. */
+ vec<AudioChannelMask> channelMasks;
+ };
+ vec<AudioProfile> profiles;
+ /** List of gain controls attached to the port. */
vec<AudioGain> gains;
- AudioPortConfig activeConfig; // current audio port configuration
- AudioPortType type; // type is used as a discriminator
- union Ext {
- AudioPortDeviceExt device;
- AudioPortMixExt mix;
- AudioPortSessionExt session;
- } ext;
-};
-
-struct ThreadInfo {
- int64_t pid;
- int64_t tid;
+ /**
+ * Current configuration of the audio port, may have all the fields left
+ * unspecified.
+ */
+ AudioPortConfig activeConfig;
};
diff --git a/audio/effect/7.0/IEffect.hal b/audio/effect/7.0/IEffect.hal
index 5b176dc2f3..aa94f6ddcd 100644
--- a/audio/effect/7.0/IEffect.hal
+++ b/audio/effect/7.0/IEffect.hal
@@ -56,7 +56,6 @@ interface IEffect {
*
* @return retval operation completion status.
*/
- @callflow(next={"prepareForProcessing"})
enable() generates (Result retval);
/**
@@ -64,7 +63,6 @@ interface IEffect {
*
* @return retval operation completion status.
*/
- @callflow(next={"close"})
disable() generates (Result retval);
/**
@@ -78,7 +76,7 @@ interface IEffect {
* @param device output device specification.
* @return retval operation completion status.
*/
- setDevice(bitfield<AudioDevice> device) generates (Result retval);
+ setDevice(DeviceAddress device) generates (Result retval);
/**
* Set and get volume. Used by audio framework to delegate volume control to
@@ -147,7 +145,7 @@ interface IEffect {
* @param device input device specification.
* @return retval operation completion status.
*/
- setInputDevice(bitfield<AudioDevice> device) generates (Result retval);
+ setInputDevice(DeviceAddress device) generates (Result retval);
/**
* Read audio parameters configurations for input and output buffers.
@@ -251,7 +249,6 @@ interface IEffect {
* the queue.
* @return statusMQ a message queue used for passing status from the effect.
*/
- @callflow(next={"setProcessBuffers"})
prepareForProcessing() generates (Result retval, fmq_sync<Result> statusMQ);
/**
@@ -416,6 +413,5 @@ interface IEffect {
* @return retval OK in case the success.
* INVALID_STATE if the effect was already closed.
*/
- @exit
close() generates (Result retval);
};
diff --git a/audio/effect/7.0/IVirtualizerEffect.hal b/audio/effect/7.0/IVirtualizerEffect.hal
index 0e6ff54403..141b4e6797 100644
--- a/audio/effect/7.0/IVirtualizerEffect.hal
+++ b/audio/effect/7.0/IVirtualizerEffect.hal
@@ -48,7 +48,7 @@ interface IVirtualizerEffect extends IEffect {
struct SpeakerAngle {
/** Speaker channel mask */
- bitfield<AudioChannelMask> mask;
+ vec<AudioChannelMask> mask;
// all angles are expressed in degrees and
// are relative to the listener.
int16_t azimuth; // 0 is the direction the listener faces
@@ -61,17 +61,17 @@ interface IVirtualizerEffect extends IEffect {
* Retrieves virtual speaker angles for the given channel mask on the
* specified device.
*/
- getVirtualSpeakerAngles(bitfield<AudioChannelMask> mask, AudioDevice device)
+ getVirtualSpeakerAngles(vec<AudioChannelMask> mask, DeviceAddress device)
generates (Result retval, vec<SpeakerAngle> speakerAngles);
/**
* Forces the virtualizer effect for the given output device.
*/
- forceVirtualizationMode(AudioDevice device) generates (Result retval);
+ forceVirtualizationMode(DeviceAddress device) generates (Result retval);
/**
* Returns audio device reflecting the current virtualization mode,
- * AUDIO_DEVICE_NONE when not virtualizing.
+ * Device type can be empty when not virtualizing.
*/
- getVirtualizationMode() generates (Result retval, AudioDevice device);
+ getVirtualizationMode() generates (Result retval, DeviceAddress device);
};
diff --git a/audio/effect/7.0/types.hal b/audio/effect/7.0/types.hal
index 7f5a38238f..fe4ee51584 100644
--- a/audio/effect/7.0/types.hal
+++ b/audio/effect/7.0/types.hal
@@ -257,7 +257,7 @@ enum EffectConfigParameters : int32_t {
struct EffectBufferConfig {
AudioBuffer buffer;
uint32_t samplingRateHz;
- bitfield<AudioChannelMask> channels;
+ AudioChannelMask channels;
AudioFormat format;
EffectBufferAccess accessMode;
bitfield<EffectConfigParameters> mask;
@@ -276,8 +276,8 @@ enum EffectFeature : int32_t {
};
struct EffectAuxChannelsConfig {
- bitfield<AudioChannelMask> mainChannels; // channel mask for main channels
- bitfield<AudioChannelMask> auxChannels; // channel mask for auxiliary channels
+ vec<AudioChannelMask> mainChannels; // channel mask for main channels
+ vec<AudioChannelMask> auxChannels; // channel mask for auxiliary channels
};
struct EffectOffloadParameter {