summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoonas Kylmälä <joonas.kylmala@iki.fi>2020-11-28 11:32:28 -0500
committerJoonas Kylmälä <joonas.kylmala@iki.fi>2020-11-28 11:32:28 -0500
commitdb0f5726f587e6d4e942d358115853ac5e11b55d (patch)
tree5b9aab11ce353ff7bca90856495f67e9e50e463d
parent2406ae1558f0c5429f88ab97c7c7cdf42d6269a4 (diff)
downloaddevice_samsung_midas-common-db0f5726f587e6d4e942d358115853ac5e11b55d.tar.gz
device_samsung_midas-common-db0f5726f587e6d4e942d358115853ac5e11b55d.tar.bz2
device_samsung_midas-common-db0f5726f587e6d4e942d358115853ac5e11b55d.zip
audio: update module to latest upstream version
This updates the audio module to version e24372e861c14654a4eb9449dd3d0a615522f084 from https://android.googlesource.com/device/linaro/dragonboard Signed-off-by: Joonas Kylmälä <joonas.kylmala@iki.fi>
-rw-r--r--audio/Android.mk11
-rw-r--r--audio/audio_aec.c700
-rw-r--r--audio/audio_aec.h132
-rw-r--r--audio/audio_hw.c754
-rw-r--r--audio/audio_hw.h129
-rw-r--r--audio/fifo_wrapper.cpp79
-rw-r--r--audio/fifo_wrapper.h35
-rw-r--r--audio/fir_filter.c154
-rw-r--r--audio/fir_filter.h39
9 files changed, 1877 insertions, 156 deletions
diff --git a/audio/Android.mk b/audio/Android.mk
index cfe9ec9..90c18fa 100644
--- a/audio/Android.mk
+++ b/audio/Android.mk
@@ -24,12 +24,14 @@ include $(CLEAR_VARS)
LOCAL_HEADER_LIBRARIES += libhardware_headers
LOCAL_MODULE := audio.primary.$(TARGET_BOARD_PLATFORM)
-LOCAL_MODULE_PATH_32 := $(TARGET_OUT_VENDOR)/lib/hw
-LOCAL_MODULE_PATH_64 := $(TARGET_OUT_VENDOR)/lib64/hw
+LOCAL_MODULE_RELATIVE_PATH := hw
LOCAL_VENDOR_MODULE := true
-LOCAL_SRC_FILES := audio_hw.c
-LOCAL_SHARED_LIBRARIES := liblog libcutils libtinyalsa libaudioroute
+LOCAL_SRC_FILES := audio_hw.c \
+ audio_aec.c \
+ fifo_wrapper.cpp \
+ fir_filter.c
+LOCAL_SHARED_LIBRARIES := liblog libcutils libtinyalsa libaudioroute libaudioutils
LOCAL_CFLAGS := -Wno-unused-parameter
LOCAL_C_INCLUDES += \
external/tinyalsa/include \
@@ -39,4 +41,3 @@ LOCAL_C_INCLUDES += \
system/media/audio_effects/include
include $(BUILD_SHARED_LIBRARY)
-
diff --git a/audio/audio_aec.c b/audio/audio_aec.c
new file mode 100644
index 0000000..ab99c93
--- /dev/null
+++ b/audio/audio_aec.c
@@ -0,0 +1,700 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// clang-format off
+/*
+ * Typical AEC signal flow:
+ *
+ * Microphone Audio
+ * Timestamps
+ * +--------------------------------------+
+ * | | +---------------+
+ * | Microphone +---------------+ | | |
+ * O|====== | Audio | Sample Rate | +-------> |
+ * (from . +--+ Samples | + | | |
+ * mic . +==================> Format |==============> |
+ * codec) . | Conversion | | | Cleaned
+ * O|====== | (if required) | | Acoustic | Audio
+ * +---------------+ | Echo | Samples
+ * | Canceller |===================>
+ * | (AEC) |
+ * Reference +---------------+ | |
+ * Audio | Sample Rate | | |
+ * Samples | + | | |
+ * +=============> Format |==============> |
+ * | | Conversion | | |
+ * | | (if required) | +-------> |
+ * | +---------------+ | | |
+ * | | +---------------+
+ * | +-------------------------------+
+ * | | Reference Audio
+ * | | Timestamps
+ * | |
+ * +--+----+---------+ AUDIO CAPTURE
+ * | Speaker |
+ * +------------+ Audio/Timestamp +---------------------------------------------------------------------------+
+ * | Buffer |
+ * +--^----^---------+ AUDIO PLAYBACK
+ * | |
+ * | |
+ * | |
+ * | |
+ * |\ | |
+ * | +-+ | |
+ * (to | | +-----C----+
+ * speaker | | | | Playback
+ * codec) | | <=====+================================================================+ Audio
+ * | +-+ Samples
+ * |/
+ *
+ */
+// clang-format on
+
+#define LOG_TAG "audio_hw_aec"
+// #define LOG_NDEBUG 0
+
+#include <audio_utils/primitives.h>
+#include <stdio.h>
+#include <inttypes.h>
+#include <errno.h>
+#include <malloc.h>
+#include <sys/time.h>
+#include <tinyalsa/asoundlib.h>
+#include <unistd.h>
+#include <log/log.h>
+#include "audio_aec.h"
+
+#ifdef AEC_HAL
+#include "audio_aec_process.h"
+#else
+#define aec_spk_mic_init(...) ((int)0)
+#define aec_spk_mic_reset(...) ((void)0)
+#define aec_spk_mic_process(...) ((int32_t)0)
+#define aec_spk_mic_release(...) ((void)0)
+#endif
+
+#define MAX_TIMESTAMP_DIFF_USEC 200000
+
+#define MAX_READ_WAIT_TIME_MSEC 80
+
+uint64_t timespec_to_usec(struct timespec ts) {
+ return (ts.tv_sec * 1e6L + ts.tv_nsec/1000);
+}
+
+void get_reference_audio_in_place(struct aec_t *aec, size_t frames) {
+ if (aec->num_reference_channels == aec->spk_num_channels) {
+ /* Reference count equals speaker channels, nothing to do here. */
+ return;
+ } else if (aec->num_reference_channels != 1) {
+ /* We don't have a rule for non-mono references, show error on log */
+ ALOGE("Invalid reference count - must be 1 or match number of playback channels!");
+ return;
+ }
+ int16_t *src_Nch = &aec->spk_buf_playback_format[0];
+ int16_t *dst_1ch = &aec->spk_buf_playback_format[0];
+ int32_t num_channels = (int32_t)aec->spk_num_channels;
+ size_t frame, ch;
+ for (frame = 0; frame < frames; frame++) {
+ int32_t acc = 0;
+ for (ch = 0; ch < aec->spk_num_channels; ch++) {
+ acc += src_Nch[ch];
+ }
+ *dst_1ch++ = clamp16(acc/num_channels);
+ src_Nch += aec->spk_num_channels;
+ }
+}
+
+void print_queue_status_to_log(struct aec_t *aec, bool write_side) {
+ ssize_t q1 = fifo_available_to_read(aec->spk_fifo);
+ ssize_t q2 = fifo_available_to_read(aec->ts_fifo);
+
+ ALOGV("Queue available %s: Spk %zd (count %zd) TS %zd (count %zd)",
+ (write_side) ? "(POST-WRITE)" : "(PRE-READ)",
+ q1, q1/aec->spk_frame_size_bytes/PLAYBACK_PERIOD_SIZE,
+ q2, q2/sizeof(struct aec_info));
+}
+
+void flush_aec_fifos(struct aec_t *aec) {
+ if (aec == NULL) {
+ return;
+ }
+ if (aec->spk_fifo != NULL) {
+ ALOGV("Flushing AEC Spk FIFO...");
+ fifo_flush(aec->spk_fifo);
+ }
+ if (aec->ts_fifo != NULL) {
+ ALOGV("Flushing AEC Timestamp FIFO...");
+ fifo_flush(aec->ts_fifo);
+ }
+ /* Reset FIFO read-write offset tracker */
+ aec->read_write_diff_bytes = 0;
+}
+
+void aec_set_spk_running_no_lock(struct aec_t* aec, bool state) {
+ aec->spk_running = state;
+}
+
+bool aec_get_spk_running_no_lock(struct aec_t* aec) {
+ return aec->spk_running;
+}
+
+void destroy_aec_reference_config_no_lock(struct aec_t* aec) {
+ if (!aec->spk_initialized) {
+ return;
+ }
+ aec_set_spk_running_no_lock(aec, false);
+ fifo_release(aec->spk_fifo);
+ fifo_release(aec->ts_fifo);
+ memset(&aec->last_spk_info, 0, sizeof(struct aec_info));
+ aec->spk_initialized = false;
+}
+
+void destroy_aec_mic_config_no_lock(struct aec_t* aec) {
+ if (!aec->mic_initialized) {
+ return;
+ }
+ release_resampler(aec->spk_resampler);
+ free(aec->mic_buf);
+ free(aec->spk_buf);
+ free(aec->spk_buf_playback_format);
+ free(aec->spk_buf_resampler_out);
+ memset(&aec->last_mic_info, 0, sizeof(struct aec_info));
+ aec->mic_initialized = false;
+}
+
+struct aec_t *init_aec_interface() {
+ ALOGV("%s enter", __func__);
+ struct aec_t *aec = (struct aec_t *)calloc(1, sizeof(struct aec_t));
+ if (aec == NULL) {
+ ALOGE("Failed to allocate memory for AEC interface!");
+ } else {
+ pthread_mutex_init(&aec->lock, NULL);
+ }
+
+ ALOGV("%s exit", __func__);
+ return aec;
+}
+
+void release_aec_interface(struct aec_t *aec) {
+ ALOGV("%s enter", __func__);
+ pthread_mutex_lock(&aec->lock);
+ destroy_aec_mic_config_no_lock(aec);
+ destroy_aec_reference_config_no_lock(aec);
+ pthread_mutex_unlock(&aec->lock);
+ free(aec);
+ ALOGV("%s exit", __func__);
+}
+
+int init_aec(int sampling_rate, int num_reference_channels,
+ int num_microphone_channels, struct aec_t **aec_ptr) {
+ ALOGV("%s enter", __func__);
+ int ret = 0;
+ int aec_ret = aec_spk_mic_init(
+ sampling_rate,
+ num_reference_channels,
+ num_microphone_channels);
+ if (aec_ret) {
+ ALOGE("AEC object failed to initialize!");
+ ret = -EINVAL;
+ }
+ struct aec_t *aec = init_aec_interface();
+ if (!ret) {
+ aec->num_reference_channels = num_reference_channels;
+ /* Set defaults, will be overridden by settings in init_aec_(mic|referece_config) */
+ /* Capture uses 2-ch, 32-bit frames */
+ aec->mic_sampling_rate = CAPTURE_CODEC_SAMPLING_RATE;
+ aec->mic_frame_size_bytes = CHANNEL_STEREO * sizeof(int32_t);
+ aec->mic_num_channels = CHANNEL_STEREO;
+
+ /* Playback uses 2-ch, 16-bit frames */
+ aec->spk_sampling_rate = PLAYBACK_CODEC_SAMPLING_RATE;
+ aec->spk_frame_size_bytes = CHANNEL_STEREO * sizeof(int16_t);
+ aec->spk_num_channels = CHANNEL_STEREO;
+ }
+
+ (*aec_ptr) = aec;
+ ALOGV("%s exit", __func__);
+ return ret;
+}
+
+void release_aec(struct aec_t *aec) {
+ ALOGV("%s enter", __func__);
+ if (aec == NULL) {
+ return;
+ }
+ release_aec_interface(aec);
+ aec_spk_mic_release();
+ ALOGV("%s exit", __func__);
+}
+
+int init_aec_reference_config(struct aec_t *aec, struct alsa_stream_out *out) {
+ ALOGV("%s enter", __func__);
+ if (!aec) {
+ ALOGE("AEC: No valid interface found!");
+ return -EINVAL;
+ }
+
+ int ret = 0;
+ pthread_mutex_lock(&aec->lock);
+ if (aec->spk_initialized) {
+ destroy_aec_reference_config_no_lock(aec);
+ }
+
+ aec->spk_fifo = fifo_init(
+ out->config.period_count * out->config.period_size *
+ audio_stream_out_frame_size(&out->stream),
+ false /* reader_throttles_writer */);
+ if (aec->spk_fifo == NULL) {
+ ALOGE("AEC: Speaker loopback FIFO Init failed!");
+ ret = -EINVAL;
+ goto exit;
+ }
+ aec->ts_fifo = fifo_init(
+ out->config.period_count * sizeof(struct aec_info),
+ false /* reader_throttles_writer */);
+ if (aec->ts_fifo == NULL) {
+ ALOGE("AEC: Speaker timestamp FIFO Init failed!");
+ ret = -EINVAL;
+ fifo_release(aec->spk_fifo);
+ goto exit;
+ }
+
+ aec->spk_sampling_rate = out->config.rate;
+ aec->spk_frame_size_bytes = audio_stream_out_frame_size(&out->stream);
+ aec->spk_num_channels = out->config.channels;
+ aec->spk_initialized = true;
+exit:
+ pthread_mutex_unlock(&aec->lock);
+ ALOGV("%s exit", __func__);
+ return ret;
+}
+
+void destroy_aec_reference_config(struct aec_t* aec) {
+ ALOGV("%s enter", __func__);
+ if (aec == NULL) {
+ ALOGV("%s exit", __func__);
+ return;
+ }
+ pthread_mutex_lock(&aec->lock);
+ destroy_aec_reference_config_no_lock(aec);
+ pthread_mutex_unlock(&aec->lock);
+ ALOGV("%s exit", __func__);
+}
+
+int write_to_reference_fifo(struct aec_t* aec, void* buffer, struct aec_info* info) {
+ ALOGV("%s enter", __func__);
+ int ret = 0;
+ size_t bytes = info->bytes;
+
+ /* Write audio samples to FIFO */
+ ssize_t written_bytes = fifo_write(aec->spk_fifo, buffer, bytes);
+ if (written_bytes != bytes) {
+ ALOGE("Could only write %zu of %zu bytes", written_bytes, bytes);
+ ret = -ENOMEM;
+ }
+
+ /* Write timestamp to FIFO */
+ info->bytes = written_bytes;
+ ALOGV("Speaker timestamp: %ld s, %ld nsec", info->timestamp.tv_sec, info->timestamp.tv_nsec);
+ ssize_t ts_bytes = fifo_write(aec->ts_fifo, info, sizeof(struct aec_info));
+ ALOGV("Wrote TS bytes: %zu", ts_bytes);
+ print_queue_status_to_log(aec, true);
+ ALOGV("%s exit", __func__);
+ return ret;
+}
+
+void get_spk_timestamp(struct aec_t* aec, ssize_t read_bytes, uint64_t* spk_time) {
+ *spk_time = 0;
+ uint64_t spk_time_offset = 0;
+ float usec_per_byte = 1E6 / ((float)(aec->spk_frame_size_bytes * aec->spk_sampling_rate));
+ if (aec->read_write_diff_bytes < 0) {
+ /* We're still reading a previous write packet. (We only need the first sample's timestamp,
+ * so even if we straddle packets we only care about the first one)
+ * So we just use the previous timestamp, with an appropriate offset
+ * based on the number of bytes remaining to be read from that write packet. */
+ spk_time_offset = (aec->last_spk_info.bytes + aec->read_write_diff_bytes) * usec_per_byte;
+ ALOGV("Reusing previous timestamp, calculated offset (usec) %" PRIu64, spk_time_offset);
+ } else {
+ /* If read_write_diff_bytes > 0, there are no new writes, so there won't be timestamps in
+ * the FIFO, and the check below will fail. */
+ if (!fifo_available_to_read(aec->ts_fifo)) {
+ ALOGE("Timestamp error: no new timestamps!");
+ return;
+ }
+ /* We just read valid data, so if we're here, we should have a valid timestamp to use. */
+ ssize_t ts_bytes = fifo_read(aec->ts_fifo, &aec->last_spk_info, sizeof(struct aec_info));
+ ALOGV("Read TS bytes: %zd, expected %zu", ts_bytes, sizeof(struct aec_info));
+ aec->read_write_diff_bytes -= aec->last_spk_info.bytes;
+ }
+
+ *spk_time = timespec_to_usec(aec->last_spk_info.timestamp) + spk_time_offset;
+
+ aec->read_write_diff_bytes += read_bytes;
+ struct aec_info spk_info = aec->last_spk_info;
+ while (aec->read_write_diff_bytes > 0) {
+ /* If read_write_diff_bytes > 0, it means that there are more write packet timestamps
+ * in FIFO (since there we read more valid data the size of the current timestamp's
+ * packet). Keep reading timestamps from FIFO to get to the most recent one. */
+ if (!fifo_available_to_read(aec->ts_fifo)) {
+ /* There are no more timestamps, we have the most recent one. */
+ ALOGV("At the end of timestamp FIFO, breaking...");
+ break;
+ }
+ fifo_read(aec->ts_fifo, &spk_info, sizeof(struct aec_info));
+ ALOGV("Fast-forwarded timestamp by %zd bytes, remaining bytes: %zd,"
+ " new timestamp (usec) %" PRIu64,
+ spk_info.bytes, aec->read_write_diff_bytes, timespec_to_usec(spk_info.timestamp));
+ aec->read_write_diff_bytes -= spk_info.bytes;
+ }
+ aec->last_spk_info = spk_info;
+}
+
+int get_reference_samples(struct aec_t* aec, void* buffer, struct aec_info* info) {
+ ALOGV("%s enter", __func__);
+
+ if (!aec->spk_initialized) {
+ ALOGE("%s called with no reference initialized", __func__);
+ return -EINVAL;
+ }
+
+ size_t bytes = info->bytes;
+ const size_t frames = bytes / aec->mic_frame_size_bytes;
+ const size_t sample_rate_ratio = aec->spk_sampling_rate / aec->mic_sampling_rate;
+
+ /* Read audio samples from FIFO */
+ const size_t req_bytes = frames * sample_rate_ratio * aec->spk_frame_size_bytes;
+ ssize_t available_bytes = 0;
+ unsigned int wait_count = MAX_READ_WAIT_TIME_MSEC;
+ while (true) {
+ available_bytes = fifo_available_to_read(aec->spk_fifo);
+ if (available_bytes >= req_bytes) {
+ break;
+ } else if (available_bytes < 0) {
+ ALOGE("fifo_read returned code %zu ", available_bytes);
+ return -ENOMEM;
+ }
+
+ ALOGV("Sleeping, required bytes: %zu, available bytes: %zd", req_bytes, available_bytes);
+ usleep(1000);
+ if ((wait_count--) == 0) {
+ ALOGE("Timed out waiting for read from reference FIFO");
+ return -ETIMEDOUT;
+ }
+ }
+
+ const size_t read_bytes = fifo_read(aec->spk_fifo, aec->spk_buf_playback_format, req_bytes);
+
+ /* Get timestamp*/
+ get_spk_timestamp(aec, read_bytes, &info->timestamp_usec);
+
+ /* Get reference - could be mono, downmixed from multichannel.
+ * Reference stored at spk_buf_playback_format */
+ const size_t resampler_in_frames = frames * sample_rate_ratio;
+ get_reference_audio_in_place(aec, resampler_in_frames);
+
+ int16_t* resampler_out_buf;
+ /* Resample to mic sampling rate (16-bit resampler) */
+ if (aec->spk_resampler != NULL) {
+ size_t in_frame_count = resampler_in_frames;
+ size_t out_frame_count = frames;
+ aec->spk_resampler->resample_from_input(aec->spk_resampler, aec->spk_buf_playback_format,
+ &in_frame_count, aec->spk_buf_resampler_out,
+ &out_frame_count);
+ resampler_out_buf = aec->spk_buf_resampler_out;
+ } else {
+ if (sample_rate_ratio != 1) {
+ ALOGE("Speaker sample rate %d, mic sample rate %d but no resampler defined!",
+ aec->spk_sampling_rate, aec->mic_sampling_rate);
+ }
+ resampler_out_buf = aec->spk_buf_playback_format;
+ }
+
+ /* Convert to 32 bit */
+ int16_t* src16 = resampler_out_buf;
+ int32_t* dst32 = buffer;
+ size_t frame, ch;
+ for (frame = 0; frame < frames; frame++) {
+ for (ch = 0; ch < aec->num_reference_channels; ch++) {
+ *dst32++ = ((int32_t)*src16++) << 16;
+ }
+ }
+
+ info->bytes = bytes;
+
+ ALOGV("%s exit", __func__);
+ return 0;
+}
+
+int init_aec_mic_config(struct aec_t *aec, struct alsa_stream_in *in) {
+ ALOGV("%s enter", __func__);
+#if DEBUG_AEC
+ remove("/data/local/traces/aec_in.pcm");
+ remove("/data/local/traces/aec_out.pcm");
+ remove("/data/local/traces/aec_ref.pcm");
+ remove("/data/local/traces/aec_timestamps.txt");
+#endif /* #if DEBUG_AEC */
+
+ if (!aec) {
+ ALOGE("AEC: No valid interface found!");
+ return -EINVAL;
+ }
+
+ int ret = 0;
+ pthread_mutex_lock(&aec->lock);
+ if (aec->mic_initialized) {
+ destroy_aec_mic_config_no_lock(aec);
+ }
+ aec->mic_sampling_rate = in->config.rate;
+ aec->mic_frame_size_bytes = audio_stream_in_frame_size(&in->stream);
+ aec->mic_num_channels = in->config.channels;
+
+ aec->mic_buf_size_bytes = in->config.period_size * audio_stream_in_frame_size(&in->stream);
+ aec->mic_buf = (int32_t *)malloc(aec->mic_buf_size_bytes);
+ if (aec->mic_buf == NULL) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+ memset(aec->mic_buf, 0, aec->mic_buf_size_bytes);
+ /* Reference buffer is the same number of frames as mic,
+ * only with a different number of channels in the frame. */
+ aec->spk_buf_size_bytes = in->config.period_size * aec->spk_frame_size_bytes;
+ aec->spk_buf = (int32_t *)malloc(aec->spk_buf_size_bytes);
+ if (aec->spk_buf == NULL) {
+ ret = -ENOMEM;
+ goto exit_1;
+ }
+ memset(aec->spk_buf, 0, aec->spk_buf_size_bytes);
+
+ /* Pre-resampler buffer */
+ size_t spk_frame_out_format_bytes = aec->spk_sampling_rate / aec->mic_sampling_rate *
+ aec->spk_buf_size_bytes;
+ aec->spk_buf_playback_format = (int16_t *)malloc(spk_frame_out_format_bytes);
+ if (aec->spk_buf_playback_format == NULL) {
+ ret = -ENOMEM;
+ goto exit_2;
+ }
+ /* Resampler is 16-bit */
+ aec->spk_buf_resampler_out = (int16_t *)malloc(aec->spk_buf_size_bytes);
+ if (aec->spk_buf_resampler_out == NULL) {
+ ret = -ENOMEM;
+ goto exit_3;
+ }
+
+ /* Don't use resampler if it's not required */
+ if (in->config.rate == aec->spk_sampling_rate) {
+ aec->spk_resampler = NULL;
+ } else {
+ int resampler_ret = create_resampler(
+ aec->spk_sampling_rate, in->config.rate, aec->num_reference_channels,
+ RESAMPLER_QUALITY_MAX - 1, /* MAX - 1 is the real max */
+ NULL, /* resampler_buffer_provider */
+ &aec->spk_resampler);
+ if (resampler_ret) {
+ ALOGE("AEC: Resampler initialization failed! Error code %d", resampler_ret);
+ ret = resampler_ret;
+ goto exit_4;
+ }
+ }
+
+ flush_aec_fifos(aec);
+ aec_spk_mic_reset();
+ aec->mic_initialized = true;
+
+exit:
+ pthread_mutex_unlock(&aec->lock);
+ ALOGV("%s exit", __func__);
+ return ret;
+
+exit_4:
+ free(aec->spk_buf_resampler_out);
+exit_3:
+ free(aec->spk_buf_playback_format);
+exit_2:
+ free(aec->spk_buf);
+exit_1:
+ free(aec->mic_buf);
+ pthread_mutex_unlock(&aec->lock);
+ ALOGV("%s exit", __func__);
+ return ret;
+}
+
+void aec_set_spk_running(struct aec_t *aec, bool state) {
+ ALOGV("%s enter", __func__);
+ pthread_mutex_lock(&aec->lock);
+ aec_set_spk_running_no_lock(aec, state);
+ pthread_mutex_unlock(&aec->lock);
+ ALOGV("%s exit", __func__);
+}
+
+bool aec_get_spk_running(struct aec_t *aec) {
+ ALOGV("%s enter", __func__);
+ pthread_mutex_lock(&aec->lock);
+ bool state = aec_get_spk_running_no_lock(aec);
+ pthread_mutex_unlock(&aec->lock);
+ ALOGV("%s exit", __func__);
+ return state;
+}
+
+void destroy_aec_mic_config(struct aec_t* aec) {
+ ALOGV("%s enter", __func__);
+ if (aec == NULL) {
+ ALOGV("%s exit", __func__);
+ return;
+ }
+
+ pthread_mutex_lock(&aec->lock);
+ destroy_aec_mic_config_no_lock(aec);
+ pthread_mutex_unlock(&aec->lock);
+ ALOGV("%s exit", __func__);
+}
+
+#ifdef AEC_HAL
+int process_aec(struct aec_t *aec, void* buffer, struct aec_info *info) {
+ ALOGV("%s enter", __func__);
+ int ret = 0;
+
+ if (aec == NULL) {
+ ALOGE("AEC: Interface uninitialized! Cannot process.");
+ return -EINVAL;
+ }
+
+ if ((!aec->mic_initialized) || (!aec->spk_initialized)) {
+ ALOGE("%s called with initialization: mic: %d, spk: %d", __func__, aec->mic_initialized,
+ aec->spk_initialized);
+ return -EINVAL;
+ }
+
+ size_t bytes = info->bytes;
+
+ size_t frame_size = aec->mic_frame_size_bytes;
+ size_t in_frames = bytes / frame_size;
+
+ /* Copy raw mic samples to AEC input buffer */
+ memcpy(aec->mic_buf, buffer, bytes);
+
+ uint64_t mic_time = timespec_to_usec(info->timestamp);
+ uint64_t spk_time = 0;
+
+ /*
+ * Only run AEC if there is speaker playback.
+ * The first time speaker state changes to running, flush FIFOs, so we're not stuck
+ * processing stale reference input.
+ */
+ bool spk_running = aec_get_spk_running(aec);
+
+ if (!spk_running) {
+ /* No new playback samples, so don't run AEC.
+ * 'buffer' already contains input samples. */
+ ALOGV("Speaker not running, skipping AEC..");
+ goto exit;
+ }
+
+ if (!aec->prev_spk_running) {
+ flush_aec_fifos(aec);
+ }
+
+ /* If there's no data in FIFO, exit */
+ if (fifo_available_to_read(aec->spk_fifo) <= 0) {
+ ALOGV("Echo reference buffer empty, zeroing reference....");
+ goto exit;
+ }
+
+ print_queue_status_to_log(aec, false);
+
+ /* Get reference, with format and sample rate required by AEC */
+ struct aec_info spk_info;
+ spk_info.bytes = bytes;
+ int ref_ret = get_reference_samples(aec, aec->spk_buf, &spk_info);
+ spk_time = spk_info.timestamp_usec;
+
+ if (ref_ret) {
+ ALOGE("get_reference_samples returned code %d", ref_ret);
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ int64_t time_diff = (mic_time > spk_time) ? (mic_time - spk_time) : (spk_time - mic_time);
+ if ((spk_time == 0) || (mic_time == 0) || (time_diff > MAX_TIMESTAMP_DIFF_USEC)) {
+ ALOGV("Speaker-mic timestamps diverged, skipping AEC");
+ flush_aec_fifos(aec);
+ aec_spk_mic_reset();
+ goto exit;
+ }
+
+ ALOGV("Mic time: %"PRIu64", spk time: %"PRIu64, mic_time, spk_time);
+
+ /*
+ * AEC processing call - output stored at 'buffer'
+ */
+ int32_t aec_status = aec_spk_mic_process(
+ aec->spk_buf, spk_time,
+ aec->mic_buf, mic_time,
+ in_frames,
+ buffer);
+
+ if (!aec_status) {
+ ALOGE("AEC processing failed!");
+ ret = -EINVAL;
+ }
+
+exit:
+ aec->prev_spk_running = spk_running;
+ ALOGV("Mic time: %"PRIu64", spk time: %"PRIu64, mic_time, spk_time);
+ if (ret) {
+ /* Best we can do is copy over the raw mic signal */
+ memcpy(buffer, aec->mic_buf, bytes);
+ flush_aec_fifos(aec);
+ aec_spk_mic_reset();
+ }
+
+#if DEBUG_AEC
+ /* ref data is 32-bit at this point */
+ size_t ref_bytes = in_frames*aec->num_reference_channels*sizeof(int32_t);
+
+ FILE *fp_in = fopen("/data/local/traces/aec_in.pcm", "a+");
+ if (fp_in) {
+ fwrite((char *)aec->mic_buf, 1, bytes, fp_in);
+ fclose(fp_in);
+ } else {
+ ALOGE("AEC debug: Could not open file aec_in.pcm!");
+ }
+ FILE *fp_out = fopen("/data/local/traces/aec_out.pcm", "a+");
+ if (fp_out) {
+ fwrite((char *)buffer, 1, bytes, fp_out);
+ fclose(fp_out);
+ } else {
+ ALOGE("AEC debug: Could not open file aec_out.pcm!");
+ }
+ FILE *fp_ref = fopen("/data/local/traces/aec_ref.pcm", "a+");
+ if (fp_ref) {
+ fwrite((char *)aec->spk_buf, 1, ref_bytes, fp_ref);
+ fclose(fp_ref);
+ } else {
+ ALOGE("AEC debug: Could not open file aec_ref.pcm!");
+ }
+ FILE *fp_ts = fopen("/data/local/traces/aec_timestamps.txt", "a+");
+ if (fp_ts) {
+ fprintf(fp_ts, "%"PRIu64",%"PRIu64"\n", mic_time, spk_time);
+ fclose(fp_ts);
+ } else {
+ ALOGE("AEC debug: Could not open file aec_timestamps.txt!");
+ }
+#endif /* #if DEBUG_AEC */
+ ALOGV("%s exit", __func__);
+ return ret;
+}
+
+#endif /*#ifdef AEC_HAL*/
diff --git a/audio/audio_aec.h b/audio/audio_aec.h
new file mode 100644
index 0000000..ac7a1dd
--- /dev/null
+++ b/audio/audio_aec.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Definitions and interface related to HAL implementations of Acoustic Echo Canceller (AEC).
+ *
+ * AEC cleans the microphone signal by removing from it audio data corresponding to loudspeaker
+ * playback. Note that this process can be nonlinear.
+ *
+ */
+
+#ifndef _AUDIO_AEC_H_
+#define _AUDIO_AEC_H_
+
+#include <stdint.h>
+#include <pthread.h>
+#include <sys/time.h>
+#include <hardware/audio.h>
+#include <audio_utils/resampler.h>
+#include "audio_hw.h"
+#include "fifo_wrapper.h"
+
+struct aec_t {
+ pthread_mutex_t lock;
+ size_t num_reference_channels;
+ bool mic_initialized;
+ int32_t *mic_buf;
+ size_t mic_num_channels;
+ size_t mic_buf_size_bytes;
+ size_t mic_frame_size_bytes;
+ uint32_t mic_sampling_rate;
+ struct aec_info last_mic_info;
+ bool spk_initialized;
+ int32_t *spk_buf;
+ size_t spk_num_channels;
+ size_t spk_buf_size_bytes;
+ size_t spk_frame_size_bytes;
+ uint32_t spk_sampling_rate;
+ struct aec_info last_spk_info;
+ int16_t *spk_buf_playback_format;
+ int16_t *spk_buf_resampler_out;
+ void *spk_fifo;
+ void *ts_fifo;
+ ssize_t read_write_diff_bytes;
+ struct resampler_itfe *spk_resampler;
+ bool spk_running;
+ bool prev_spk_running;
+};
+
+/* Initialize AEC object.
+ * This must be called when the audio device is opened.
+ * ALSA device mutex must be held before calling this API.
+ * Returns -EINVAL if AEC object fails to initialize, else returns 0. */
+int init_aec (int sampling_rate, int num_reference_channels,
+ int num_microphone_channels, struct aec_t **);
+
+/* Release AEC object.
+ * This must be called when the audio device is closed. */
+void release_aec(struct aec_t* aec);
+
+/* Initialize reference configuration for AEC.
+ * Must be called when a new output stream is opened.
+ * Returns -EINVAL if any processing block fails to initialize,
+ * else returns 0. */
+int init_aec_reference_config (struct aec_t *aec, struct alsa_stream_out *out);
+
+/* Clear reference configuration for AEC.
+ * Must be called when the output stream is closed. */
+void destroy_aec_reference_config (struct aec_t *aec);
+
+/* Initialize microphone configuration for AEC.
+ * Must be called when a new input stream is opened.
+ * Returns -EINVAL if any processing block fails to initialize,
+ * else returns 0. */
+int init_aec_mic_config(struct aec_t* aec, struct alsa_stream_in* in);
+
+/* Clear microphone configuration for AEC.
+ * Must be called when the input stream is closed. */
+void destroy_aec_mic_config (struct aec_t *aec);
+
+/* Used to communicate playback state (running or not) to AEC interface.
+ * This is used by process_aec() to determine if AEC processing is to be run. */
+void aec_set_spk_running (struct aec_t *aec, bool state);
+
+/* Used to communicate playback state (running or not) to the caller. */
+bool aec_get_spk_running(struct aec_t* aec);
+
+/* Write audio samples to AEC reference FIFO for use in AEC.
+ * Both audio samples and timestamps are added in FIFO fashion.
+ * Must be called after every write to PCM.
+ * Returns -ENOMEM if the write fails, else returns 0. */
+int write_to_reference_fifo(struct aec_t* aec, void* buffer, struct aec_info* info);
+
+/* Get reference audio samples + timestamp, in the format expected by AEC,
+ * i.e. same sample rate and bit rate as microphone audio.
+ * Timestamp is updated in field 'timestamp_usec', and not in 'timestamp'.
+ * Returns:
+ * -EINVAL if the AEC object is invalid.
+ * -ENOMEM if the reference FIFO overflows or is corrupted.
+ * -ETIMEDOUT if we timed out waiting for the requested number of bytes
+ * 0 otherwise */
+int get_reference_samples(struct aec_t* aec, void* buffer, struct aec_info* info);
+
+#ifdef AEC_HAL
+
+/* Processing function call for AEC.
+ * AEC output is updated at location pointed to by 'buffer'.
+ * This function does not run AEC when there is no playback -
+ * as communicated to this AEC interface using aec_set_spk_running().
+ * Returns -EINVAL if processing fails, else returns 0. */
+int process_aec(struct aec_t* aec, void* buffer, struct aec_info* info);
+
+#else /* #ifdef AEC_HAL */
+
+#define process_aec(...) ((int)0)
+
+#endif /* #ifdef AEC_HAL */
+
+#endif /* _AUDIO_AEC_H_ */
diff --git a/audio/audio_hw.c b/audio/audio_hw.c
index 7be7178..4a16ac1 100644
--- a/audio/audio_hw.c
+++ b/audio/audio_hw.c
@@ -12,19 +12,20 @@
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
+ *
+ * Copied as it is from device/amlogic/generic/hal/audio/
*/
-#define MIXER_XML_PATH "/vendor/etc/mixer_paths.xml"
-
-#define LOG_TAG "audio_hw_dragonboard"
+#define LOG_TAG "audio_hw_yukawa"
//#define LOG_NDEBUG 0
#include <errno.h>
+#include <inttypes.h>
#include <malloc.h>
#include <pthread.h>
#include <stdint.h>
-#include <sys/time.h>
#include <stdlib.h>
+#include <sys/time.h>
#include <unistd.h>
#include <log/log.h>
@@ -35,87 +36,152 @@
#include <system/audio.h>
#include <hardware/audio.h>
+#include <audio_effects/effect_aec.h>
#include <audio_route/audio_route.h>
-#include <sound/asound.h>
-#include <tinyalsa/asoundlib.h>
-#include <audio_utils/resampler.h>
+#include <audio_utils/clock.h>
#include <audio_utils/echo_reference.h>
-#include <hardware/audio_effect.h>
+#include <audio_utils/resampler.h>
#include <hardware/audio_alsaops.h>
-#include <audio_effects/effect_aec.h>
+#include <hardware/audio_effect.h>
+#include <sound/asound.h>
+#include <tinyalsa/asoundlib.h>
+#include <sys/ioctl.h>
-#define CARD_OUT 0
-#define PORT_CODEC 0
-/* Minimum granularity - Arbitrary but small value */
-#define CODEC_BASE_FRAME_COUNT 32
-
-/* number of base blocks in a short period (low latency) */
-#define PERIOD_MULTIPLIER 32 /* 21 ms */
-/* number of frames per short period (low latency) */
-#define PERIOD_SIZE (CODEC_BASE_FRAME_COUNT * PERIOD_MULTIPLIER)
-/* number of pseudo periods for low latency playback */
-#define PLAYBACK_PERIOD_COUNT 2
-#define PLAYBACK_PERIOD_START_THRESHOLD 2
-#define CODEC_SAMPLING_RATE 48000
-#define CHANNEL_STEREO 2
-#define MIN_WRITE_SLEEP_US 5000
-
-struct stub_stream_in {
- struct audio_stream_in stream;
-};
+#include "audio_aec.h"
+#include "audio_hw.h"
-struct alsa_audio_device {
- struct audio_hw_device hw_device;
+static int adev_get_mic_mute(const struct audio_hw_device* dev, bool* state);
+static int adev_get_microphones(const struct audio_hw_device* dev,
+ struct audio_microphone_characteristic_t* mic_array,
+ size_t* mic_count);
+static size_t out_get_buffer_size(const struct audio_stream* stream);
- pthread_mutex_t lock; /* see note below on mutex acquisition order */
- int devices;
- struct alsa_stream_in *active_input;
- struct alsa_stream_out *active_output;
- struct audio_route *audio_route;
- struct mixer *mixer;
- bool mic_mute;
-};
+static int get_audio_output_port(audio_devices_t devices) {
+ /* Only HDMI out for now #FIXME */
+ return PORT_HDMI;
+}
-struct alsa_stream_out {
- struct audio_stream_out stream;
-
- pthread_mutex_t lock; /* see note below on mutex acquisition order */
- struct pcm_config config;
- struct pcm *pcm;
- bool unavailable;
- int standby;
- struct alsa_audio_device *dev;
- int write_threshold;
- unsigned int written;
-};
+static void timestamp_adjust(struct timespec* ts, ssize_t frames, uint32_t sampling_rate) {
+ /* This function assumes the adjustment (in nsec) is less than the max value of long,
+ * which for 32-bit long this is 2^31 * 1e-9 seconds, slightly over 2 seconds.
+ * For 64-bit long it is 9e+9 seconds. */
+ long adj_nsec = (frames / (float) sampling_rate) * 1E9L;
+ ts->tv_nsec += adj_nsec;
+ while (ts->tv_nsec > 1E9L) {
+ ts->tv_sec++;
+ ts->tv_nsec -= 1E9L;
+ }
+ if (ts->tv_nsec < 0) {
+ ts->tv_sec--;
+ ts->tv_nsec += 1E9L;
+ }
+}
+/* Helper function to get PCM hardware timestamp.
+ * Only the field 'timestamp' of argument 'ts' is updated. */
+static int get_pcm_timestamp(struct pcm* pcm, uint32_t sample_rate, struct aec_info* info,
+ bool isOutput) {
+ int ret = 0;
+ if (pcm_get_htimestamp(pcm, &info->available, &info->timestamp) < 0) {
+ ALOGE("Error getting PCM timestamp!");
+ info->timestamp.tv_sec = 0;
+ info->timestamp.tv_nsec = 0;
+ return -EINVAL;
+ }
+ ssize_t frames;
+ if (isOutput) {
+ frames = pcm_get_buffer_size(pcm) - info->available;
+ } else {
+ frames = -info->available; /* rewind timestamp */
+ }
+ timestamp_adjust(&info->timestamp, frames, sample_rate);
+ return ret;
+}
+
+static int read_filter_from_file(const char* filename, int16_t* filter, int max_length) {
+ FILE* fp = fopen(filename, "r");
+ if (fp == NULL) {
+ ALOGI("%s: File %s not found.", __func__, filename);
+ return 0;
+ }
+ int num_taps = 0;
+ char* line = NULL;
+ size_t len = 0;
+ while (!feof(fp)) {
+ size_t size = getline(&line, &len, fp);
+ if ((line[0] == '#') || (size < 2)) {
+ continue;
+ }
+ int n = sscanf(line, "%" SCNd16 "\n", &filter[num_taps++]);
+ if (n < 1) {
+ ALOGE("Could not find coefficient %d! Exiting...", num_taps - 1);
+ return 0;
+ }
+ ALOGV("Coeff %d : %" PRId16, num_taps, filter[num_taps - 1]);
+ if (num_taps == max_length) {
+ ALOGI("%s: max tap length %d reached.", __func__, max_length);
+ break;
+ }
+ }
+ free(line);
+ fclose(fp);
+ return num_taps;
+}
+
+static void out_set_eq(struct alsa_stream_out* out) {
+ out->speaker_eq = NULL;
+ int16_t* speaker_eq_coeffs = (int16_t*)calloc(SPEAKER_MAX_EQ_LENGTH, sizeof(int16_t));
+ if (speaker_eq_coeffs == NULL) {
+ ALOGE("%s: Failed to allocate speaker EQ", __func__);
+ return;
+ }
+ int num_taps = read_filter_from_file(SPEAKER_EQ_FILE, speaker_eq_coeffs, SPEAKER_MAX_EQ_LENGTH);
+ if (num_taps == 0) {
+ ALOGI("%s: Empty filter file or 0 taps set.", __func__);
+ free(speaker_eq_coeffs);
+ return;
+ }
+ out->speaker_eq = fir_init(
+ out->config.channels, FIR_SINGLE_FILTER, num_taps,
+ out_get_buffer_size(&out->stream.common) / out->config.channels / sizeof(int16_t),
+ speaker_eq_coeffs);
+ free(speaker_eq_coeffs);
+}
/* must be called with hw device and output stream mutexes locked */
static int start_output_stream(struct alsa_stream_out *out)
{
struct alsa_audio_device *adev = out->dev;
- if (out->unavailable)
- return -ENODEV;
-
/* default to low power: will be corrected in out_write if necessary before first write to
* tinyalsa.
*/
- out->write_threshold = PLAYBACK_PERIOD_COUNT * PERIOD_SIZE;
- out->config.start_threshold = PLAYBACK_PERIOD_START_THRESHOLD * PERIOD_SIZE;
- out->config.avail_min = PERIOD_SIZE;
-
- out->pcm = pcm_open(CARD_OUT, PORT_CODEC, PCM_OUT | PCM_MMAP | PCM_NOIRQ | PCM_MONOTONIC, &out->config);
-
- if (!pcm_is_ready(out->pcm)) {
- ALOGE("cannot open pcm_out driver: %s", pcm_get_error(out->pcm));
- pcm_close(out->pcm);
- adev->active_output = NULL;
- out->unavailable = true;
- return -ENODEV;
+ out->write_threshold = PLAYBACK_PERIOD_COUNT * PLAYBACK_PERIOD_SIZE;
+ out->config.start_threshold = PLAYBACK_PERIOD_START_THRESHOLD * PLAYBACK_PERIOD_SIZE;
+ out->config.avail_min = PLAYBACK_PERIOD_SIZE;
+ out->unavailable = true;
+ unsigned int pcm_retry_count = PCM_OPEN_RETRIES;
+ int out_port = get_audio_output_port(out->devices);
+
+ while (1) {
+ out->pcm = pcm_open(CARD_OUT, out_port, PCM_OUT | PCM_MONOTONIC, &out->config);
+ if ((out->pcm != NULL) && pcm_is_ready(out->pcm)) {
+ break;
+ } else {
+ ALOGE("cannot open pcm_out driver: %s", pcm_get_error(out->pcm));
+ if (out->pcm != NULL) {
+ pcm_close(out->pcm);
+ out->pcm = NULL;
+ }
+ if (--pcm_retry_count == 0) {
+ ALOGE("Failed to open pcm_out after %d tries", PCM_OPEN_RETRIES);
+ return -ENODEV;
+ }
+ usleep(PCM_OPEN_WAIT_TIME_MS * 1000);
+ }
}
-
+ out->unavailable = false;
adev->active_output = out;
return 0;
}
@@ -138,7 +204,7 @@ static size_t out_get_buffer_size(const struct audio_stream *stream)
/* return the closest majoring multiple of 16 frames, as
* audioflinger expects audio buffers to be a multiple of 16 frames */
- size_t size = PERIOD_SIZE;
+ size_t size = PLAYBACK_PERIOD_SIZE;
size = ((size + 15) / 16) * 16;
return size * audio_stream_out_frame_size((struct audio_stream_out *)stream);
}
@@ -167,12 +233,15 @@ static int do_output_standby(struct alsa_stream_out *out)
{
struct alsa_audio_device *adev = out->dev;
+ fir_reset(out->speaker_eq);
+
if (!out->standby) {
pcm_close(out->pcm);
out->pcm = NULL;
adev->active_output = NULL;
out->standby = 1;
}
+ aec_set_spk_running(adev->aec, false);
return 0;
}
@@ -212,16 +281,16 @@ static int out_set_parameters(struct audio_stream *stream, const char *kvpairs)
val = atoi(value);
pthread_mutex_lock(&adev->lock);
pthread_mutex_lock(&out->lock);
- if (((adev->devices & AUDIO_DEVICE_OUT_ALL) != val) && (val != 0)) {
- adev->devices &= ~AUDIO_DEVICE_OUT_ALL;
- adev->devices |= val;
+ if (((out->devices & AUDIO_DEVICE_OUT_ALL) != val) && (val != 0)) {
+ out->devices &= ~AUDIO_DEVICE_OUT_ALL;
+ out->devices |= val;
}
pthread_mutex_unlock(&out->lock);
pthread_mutex_unlock(&adev->lock);
}
str_parms_destroy(parms);
- return ret;
+ return 0;
}
static char * out_get_parameters(const struct audio_stream *stream, const char *keys)
@@ -234,14 +303,14 @@ static uint32_t out_get_latency(const struct audio_stream_out *stream)
{
ALOGV("out_get_latency");
struct alsa_stream_out *out = (struct alsa_stream_out *)stream;
- return (PERIOD_SIZE * PLAYBACK_PERIOD_COUNT * 1000) / out->config.rate;
+ return (PLAYBACK_PERIOD_SIZE * PLAYBACK_PERIOD_COUNT * 1000) / out->config.rate;
}
static int out_set_volume(struct audio_stream_out *stream, float left,
float right)
{
ALOGV("out_set_volume: Left:%f Right:%f", left, right);
- return 0;
+ return -ENOSYS;
}
static ssize_t out_write(struct audio_stream_out *stream, const void* buffer,
@@ -253,6 +322,8 @@ static ssize_t out_write(struct audio_stream_out *stream, const void* buffer,
size_t frame_size = audio_stream_out_frame_size(stream);
size_t out_frames = bytes / frame_size;
+ ALOGV("%s: devices: %d, bytes %zu", __func__, out->devices, bytes);
+
/* acquiring hw device mutex systematically is useful if a low priority thread is waiting
* on the output stream mutex - e.g. executing select_mode() while holding the hw device
* mutex
@@ -266,14 +337,29 @@ static ssize_t out_write(struct audio_stream_out *stream, const void* buffer,
goto exit;
}
out->standby = 0;
+ aec_set_spk_running(adev->aec, true);
}
pthread_mutex_unlock(&adev->lock);
- ret = pcm_mmap_write(out->pcm, buffer, out_frames * frame_size);
+ if (out->speaker_eq != NULL) {
+ fir_process_interleaved(out->speaker_eq, (int16_t*)buffer, (int16_t*)buffer, out_frames);
+ }
+
+ ret = pcm_write(out->pcm, buffer, out_frames * frame_size);
if (ret == 0) {
- out->written += out_frames;
+ out->frames_written += out_frames;
+
+ struct aec_info info;
+ get_pcm_timestamp(out->pcm, out->config.rate, &info, true /*isOutput*/);
+ out->timestamp = info.timestamp;
+ info.bytes = out_frames * frame_size;
+ int aec_ret = write_to_reference_fifo(adev->aec, (void *)buffer, &info);
+ if (aec_ret) {
+ ALOGE("AEC: Write to speaker loopback FIFO failed!");
+ }
}
+
exit:
pthread_mutex_unlock(&out->lock);
@@ -288,30 +374,24 @@ exit:
static int out_get_render_position(const struct audio_stream_out *stream,
uint32_t *dsp_frames)
{
- *dsp_frames = 0;
ALOGV("out_get_render_position: dsp_frames: %p", dsp_frames);
- return -EINVAL;
+ return -ENOSYS;
}
static int out_get_presentation_position(const struct audio_stream_out *stream,
uint64_t *frames, struct timespec *timestamp)
{
- struct alsa_stream_out *out = (struct alsa_stream_out *)stream;
- int ret = -1;
-
- if (out->pcm) {
- unsigned int avail;
- if (pcm_get_htimestamp(out->pcm, &avail, timestamp) == 0) {
- size_t kernel_buffer_size = out->config.period_size * out->config.period_count;
- int64_t signed_frames = out->written - kernel_buffer_size + avail;
- if (signed_frames >= 0) {
- *frames = signed_frames;
- ret = 0;
- }
- }
- }
+ if (stream == NULL || frames == NULL || timestamp == NULL) {
+ return -EINVAL;
+ }
+ struct alsa_stream_out* out = (struct alsa_stream_out*)stream;
- return ret;
+ *frames = out->frames_written;
+ *timestamp = out->timestamp;
+ ALOGV("%s: frames: %" PRIu64 ", timestamp (nsec): %" PRIu64, __func__, *frames,
+ audio_utils_ns_from_timespec(timestamp));
+
+ return 0;
}
@@ -332,14 +412,64 @@ static int out_get_next_write_timestamp(const struct audio_stream_out *stream,
{
*timestamp = 0;
ALOGV("out_get_next_write_timestamp: %ld", (long int)(*timestamp));
- return -EINVAL;
+ return -ENOSYS;
}
/** audio_stream_in implementation **/
+
+/* must be called with hw device and input stream mutexes locked */
+static int start_input_stream(struct alsa_stream_in *in)
+{
+ struct alsa_audio_device *adev = in->dev;
+ in->unavailable = true;
+ unsigned int pcm_retry_count = PCM_OPEN_RETRIES;
+
+ while (1) {
+ in->pcm = pcm_open(CARD_IN, PORT_BUILTIN_MIC, PCM_IN | PCM_MONOTONIC, &in->config);
+ if ((in->pcm != NULL) && pcm_is_ready(in->pcm)) {
+ break;
+ } else {
+ ALOGE("cannot open pcm_in driver: %s", pcm_get_error(in->pcm));
+ if (in->pcm != NULL) {
+ pcm_close(in->pcm);
+ in->pcm = NULL;
+ }
+ if (--pcm_retry_count == 0) {
+ ALOGE("Failed to open pcm_in after %d tries", PCM_OPEN_RETRIES);
+ return -ENODEV;
+ }
+ usleep(PCM_OPEN_WAIT_TIME_MS * 1000);
+ }
+ }
+ in->unavailable = false;
+ adev->active_input = in;
+ return 0;
+}
+
+static void get_mic_characteristics(struct audio_microphone_characteristic_t* mic_data,
+ size_t* mic_count) {
+ *mic_count = 1;
+ memset(mic_data, 0, sizeof(struct audio_microphone_characteristic_t));
+ strlcpy(mic_data->device_id, "builtin_mic", AUDIO_MICROPHONE_ID_MAX_LEN - 1);
+ strlcpy(mic_data->address, "top", AUDIO_DEVICE_MAX_ADDRESS_LEN - 1);
+ memset(mic_data->channel_mapping, AUDIO_MICROPHONE_CHANNEL_MAPPING_UNUSED,
+ sizeof(mic_data->channel_mapping));
+ mic_data->device = AUDIO_DEVICE_IN_BUILTIN_MIC;
+ mic_data->sensitivity = -37.0;
+ mic_data->max_spl = AUDIO_MICROPHONE_SPL_UNKNOWN;
+ mic_data->min_spl = AUDIO_MICROPHONE_SPL_UNKNOWN;
+ mic_data->orientation.x = 0.0f;
+ mic_data->orientation.y = 0.0f;
+ mic_data->orientation.z = 0.0f;
+ mic_data->geometric_location.x = AUDIO_MICROPHONE_COORDINATE_UNKNOWN;
+ mic_data->geometric_location.y = AUDIO_MICROPHONE_COORDINATE_UNKNOWN;
+ mic_data->geometric_location.z = AUDIO_MICROPHONE_COORDINATE_UNKNOWN;
+}
+
static uint32_t in_get_sample_rate(const struct audio_stream *stream)
{
- ALOGV("in_get_sample_rate");
- return 8000;
+ struct alsa_stream_in *in = (struct alsa_stream_in *)stream;
+ return in->config.rate;
}
static int in_set_sample_rate(struct audio_stream *stream, uint32_t rate)
@@ -348,21 +478,29 @@ static int in_set_sample_rate(struct audio_stream *stream, uint32_t rate)
return -ENOSYS;
}
-static size_t in_get_buffer_size(const struct audio_stream *stream)
-{
- ALOGV("in_get_buffer_size: %d", 320);
- return 320;
+static size_t get_input_buffer_size(size_t frames, audio_format_t format,
+ audio_channel_mask_t channel_mask) {
+ /* return the closest majoring multiple of 16 frames, as
+ * audioflinger expects audio buffers to be a multiple of 16 frames */
+ frames = ((frames + 15) / 16) * 16;
+ size_t bytes_per_frame = audio_channel_count_from_in_mask(channel_mask) *
+ audio_bytes_per_sample(format);
+ size_t buffer_size = frames * bytes_per_frame;
+ return buffer_size;
}
static audio_channel_mask_t in_get_channels(const struct audio_stream *stream)
{
- ALOGV("in_get_channels: %d", AUDIO_CHANNEL_IN_MONO);
- return AUDIO_CHANNEL_IN_MONO;
+ struct alsa_stream_in *in = (struct alsa_stream_in *)stream;
+ ALOGV("in_get_channels: %d", in->config.channels);
+ return audio_channel_in_mask_from_count(in->config.channels);
}
static audio_format_t in_get_format(const struct audio_stream *stream)
{
- return AUDIO_FORMAT_PCM_16_BIT;
+ struct alsa_stream_in *in = (struct alsa_stream_in *)stream;
+ ALOGV("in_get_format: %d", in->config.format);
+ return audio_format_from_pcm_format(in->config.format);
}
static int in_set_format(struct audio_stream *stream, audio_format_t format)
@@ -370,13 +508,86 @@ static int in_set_format(struct audio_stream *stream, audio_format_t format)
return -ENOSYS;
}
-static int in_standby(struct audio_stream *stream)
+static size_t in_get_buffer_size(const struct audio_stream *stream)
+{
+ struct alsa_stream_in* in = (struct alsa_stream_in*)stream;
+ size_t frames = CAPTURE_PERIOD_SIZE;
+ if (in->source == AUDIO_SOURCE_ECHO_REFERENCE) {
+ frames = CAPTURE_PERIOD_SIZE * PLAYBACK_CODEC_SAMPLING_RATE / CAPTURE_CODEC_SAMPLING_RATE;
+ }
+
+ size_t buffer_size =
+ get_input_buffer_size(frames, stream->get_format(stream), stream->get_channels(stream));
+ ALOGV("in_get_buffer_size: %zu", buffer_size);
+ return buffer_size;
+}
+
+static int in_get_active_microphones(const struct audio_stream_in* stream,
+ struct audio_microphone_characteristic_t* mic_array,
+ size_t* mic_count) {
+ ALOGV("in_get_active_microphones");
+ if ((mic_array == NULL) || (mic_count == NULL)) {
+ return -EINVAL;
+ }
+ struct alsa_stream_in* in = (struct alsa_stream_in*)stream;
+ struct audio_hw_device* dev = (struct audio_hw_device*)in->dev;
+ bool mic_muted = false;
+ adev_get_mic_mute(dev, &mic_muted);
+ if ((in->source == AUDIO_SOURCE_ECHO_REFERENCE) || mic_muted) {
+ *mic_count = 0;
+ return 0;
+ }
+ adev_get_microphones(dev, mic_array, mic_count);
+ return 0;
+}
+
+static int do_input_standby(struct alsa_stream_in *in)
{
+ struct alsa_audio_device *adev = in->dev;
+
+ if (!in->standby) {
+ pcm_close(in->pcm);
+ in->pcm = NULL;
+ adev->active_input = NULL;
+ in->standby = true;
+ }
return 0;
}
+static int in_standby(struct audio_stream *stream)
+{
+ struct alsa_stream_in *in = (struct alsa_stream_in *)stream;
+ int status;
+
+ pthread_mutex_lock(&in->lock);
+ pthread_mutex_lock(&in->dev->lock);
+ status = do_input_standby(in);
+ pthread_mutex_unlock(&in->dev->lock);
+ pthread_mutex_unlock(&in->lock);
+ return status;
+}
+
static int in_dump(const struct audio_stream *stream, int fd)
{
+ struct alsa_stream_in* in = (struct alsa_stream_in*)stream;
+ if (in->source == AUDIO_SOURCE_ECHO_REFERENCE) {
+ return 0;
+ }
+
+ struct audio_microphone_characteristic_t mic_array[AUDIO_MICROPHONE_MAX_COUNT];
+ size_t mic_count;
+
+ get_mic_characteristics(mic_array, &mic_count);
+
+ dprintf(fd, " Microphone count: %zd\n", mic_count);
+ size_t idx;
+ for (idx = 0; idx < mic_count; idx++) {
+ dprintf(fd, " Microphone: %zd\n", idx);
+ dprintf(fd, " Address: %s\n", mic_array[idx].address);
+ dprintf(fd, " Device: %d\n", mic_array[idx].device);
+ dprintf(fd, " Sensitivity (dB): %.2f\n", mic_array[idx].sensitivity);
+ }
+
return 0;
}
@@ -399,14 +610,154 @@ static int in_set_gain(struct audio_stream_in *stream, float gain)
static ssize_t in_read(struct audio_stream_in *stream, void* buffer,
size_t bytes)
{
- ALOGV("in_read: bytes %zu", bytes);
- /* XXX: fake timing for audio input */
- usleep((int64_t)bytes * 1000000 / audio_stream_in_frame_size(stream) /
- in_get_sample_rate(&stream->common));
- memset(buffer, 0, bytes);
+ int ret;
+ struct alsa_stream_in *in = (struct alsa_stream_in *)stream;
+ struct alsa_audio_device *adev = in->dev;
+ size_t frame_size = audio_stream_in_frame_size(stream);
+ size_t in_frames = bytes / frame_size;
+
+ ALOGV("in_read: stream: %d, bytes %zu", in->source, bytes);
+
+ /* Special handling for Echo Reference: simply get the reference from FIFO.
+ * The format and sample rate should be specified by arguments to adev_open_input_stream. */
+ if (in->source == AUDIO_SOURCE_ECHO_REFERENCE) {
+ struct aec_info info;
+ info.bytes = bytes;
+
+ const uint64_t time_increment_nsec = (uint64_t)bytes * NANOS_PER_SECOND /
+ audio_stream_in_frame_size(stream) /
+ in_get_sample_rate(&stream->common);
+ if (!aec_get_spk_running(adev->aec)) {
+ if (in->timestamp_nsec == 0) {
+ struct timespec now;
+ clock_gettime(CLOCK_MONOTONIC, &now);
+ const uint64_t timestamp_nsec = audio_utils_ns_from_timespec(&now);
+ in->timestamp_nsec = timestamp_nsec;
+ } else {
+ in->timestamp_nsec += time_increment_nsec;
+ }
+ memset(buffer, 0, bytes);
+ const uint64_t time_increment_usec = time_increment_nsec / 1000;
+ usleep(time_increment_usec);
+ } else {
+ int ref_ret = get_reference_samples(adev->aec, buffer, &info);
+ if ((ref_ret) || (info.timestamp_usec == 0)) {
+ memset(buffer, 0, bytes);
+ in->timestamp_nsec += time_increment_nsec;
+ } else {
+ in->timestamp_nsec = 1000 * info.timestamp_usec;
+ }
+ }
+ in->frames_read += in_frames;
+
+#if DEBUG_AEC
+ FILE* fp_ref = fopen("/data/local/traces/aec_ref.pcm", "a+");
+ if (fp_ref) {
+ fwrite((char*)buffer, 1, bytes, fp_ref);
+ fclose(fp_ref);
+ } else {
+ ALOGE("AEC debug: Could not open file aec_ref.pcm!");
+ }
+ FILE* fp_ref_ts = fopen("/data/local/traces/aec_ref_timestamps.txt", "a+");
+ if (fp_ref_ts) {
+ fprintf(fp_ref_ts, "%" PRIu64 "\n", in->timestamp_nsec);
+ fclose(fp_ref_ts);
+ } else {
+ ALOGE("AEC debug: Could not open file aec_ref_timestamps.txt!");
+ }
+#endif
+ return info.bytes;
+ }
+
+ /* Microphone input stream read */
+
+ /* acquiring hw device mutex systematically is useful if a low priority thread is waiting
+ * on the input stream mutex - e.g. executing select_mode() while holding the hw device
+ * mutex
+ */
+ pthread_mutex_lock(&in->lock);
+ pthread_mutex_lock(&adev->lock);
+ if (in->standby) {
+ ret = start_input_stream(in);
+ if (ret != 0) {
+ pthread_mutex_unlock(&adev->lock);
+ ALOGE("start_input_stream failed with code %d", ret);
+ goto exit;
+ }
+ in->standby = false;
+ }
+
+ pthread_mutex_unlock(&adev->lock);
+
+ ret = pcm_read(in->pcm, buffer, in_frames * frame_size);
+ struct aec_info info;
+ get_pcm_timestamp(in->pcm, in->config.rate, &info, false /*isOutput*/);
+ if (ret == 0) {
+ in->frames_read += in_frames;
+ in->timestamp_nsec = audio_utils_ns_from_timespec(&info.timestamp);
+ }
+ else {
+ ALOGE("pcm_read failed with code %d", ret);
+ }
+
+exit:
+ pthread_mutex_unlock(&in->lock);
+
+ bool mic_muted = false;
+ adev_get_mic_mute((struct audio_hw_device*)adev, &mic_muted);
+ if (mic_muted) {
+ memset(buffer, 0, bytes);
+ }
+
+ if (ret != 0) {
+ usleep((int64_t)bytes * 1000000 / audio_stream_in_frame_size(stream) /
+ in_get_sample_rate(&stream->common));
+ } else {
+ /* Process AEC if available */
+ /* TODO move to a separate thread */
+ if (!mic_muted) {
+ info.bytes = bytes;
+ int aec_ret = process_aec(adev->aec, buffer, &info);
+ if (aec_ret) {
+ ALOGE("process_aec returned error code %d", aec_ret);
+ }
+ }
+ }
+
+#if DEBUG_AEC && !defined(AEC_HAL)
+ FILE* fp_in = fopen("/data/local/traces/aec_in.pcm", "a+");
+ if (fp_in) {
+ fwrite((char*)buffer, 1, bytes, fp_in);
+ fclose(fp_in);
+ } else {
+ ALOGE("AEC debug: Could not open file aec_in.pcm!");
+ }
+ FILE* fp_mic_ts = fopen("/data/local/traces/aec_in_timestamps.txt", "a+");
+ if (fp_mic_ts) {
+ fprintf(fp_mic_ts, "%" PRIu64 "\n", in->timestamp_nsec);
+ fclose(fp_mic_ts);
+ } else {
+ ALOGE("AEC debug: Could not open file aec_in_timestamps.txt!");
+ }
+#endif
+
return bytes;
}
+static int in_get_capture_position(const struct audio_stream_in* stream, int64_t* frames,
+ int64_t* time) {
+ if (stream == NULL || frames == NULL || time == NULL) {
+ return -EINVAL;
+ }
+ struct alsa_stream_in* in = (struct alsa_stream_in*)stream;
+
+ *frames = in->frames_read;
+ *time = in->timestamp_nsec;
+ ALOGV("%s: source: %d, timestamp (nsec): %" PRIu64, __func__, in->source, *time);
+
+ return 0;
+}
+
static uint32_t in_get_input_frames_lost(struct audio_stream_in *stream)
{
return 0;
@@ -437,7 +788,9 @@ static int adev_open_output_stream(struct audio_hw_device *dev,
struct pcm_params *params;
int ret = 0;
- params = pcm_params_get(CARD_OUT, PORT_CODEC, PCM_OUT);
+ int out_port = get_audio_output_port(devices);
+
+ params = pcm_params_get(CARD_OUT, out_port, PCM_OUT);
if (!params)
return -ENOSYS;
@@ -465,9 +818,9 @@ static int adev_open_output_stream(struct audio_hw_device *dev,
out->stream.get_presentation_position = out_get_presentation_position;
out->config.channels = CHANNEL_STEREO;
- out->config.rate = CODEC_SAMPLING_RATE;
+ out->config.rate = PLAYBACK_CODEC_SAMPLING_RATE;
out->config.format = PCM_FORMAT_S16_LE;
- out->config.period_size = PERIOD_SIZE;
+ out->config.period_size = PLAYBACK_PERIOD_SIZE;
out->config.period_count = PLAYBACK_PERIOD_COUNT;
if (out->config.rate != config->sample_rate ||
@@ -479,12 +832,13 @@ static int adev_open_output_stream(struct audio_hw_device *dev,
ret = -EINVAL;
}
- ALOGI("adev_open_output_stream selects channels=%d rate=%d format=%d",
- out->config.channels, out->config.rate, out->config.format);
+ ALOGI("adev_open_output_stream selects channels=%d rate=%d format=%d, devices=%d",
+ out->config.channels, out->config.rate, out->config.format, devices);
out->dev = ladev;
out->standby = 1;
out->unavailable = false;
+ out->devices = devices;
config->format = out_get_format(&out->stream.common);
config->channel_mask = out_get_channels(&out->stream.common);
@@ -492,9 +846,25 @@ static int adev_open_output_stream(struct audio_hw_device *dev,
*stream_out = &out->stream;
+ out->speaker_eq = NULL;
+ if (out_port == PORT_INTERNAL_SPEAKER) {
+ out_set_eq(out);
+ if (out->speaker_eq == NULL) {
+ ALOGE("%s: Failed to initialize speaker EQ", __func__);
+ }
+ }
+
/* TODO The retry mechanism isn't implemented in AudioPolicyManager/AudioFlinger. */
ret = 0;
+ if (ret == 0) {
+ int aec_ret = init_aec_reference_config(ladev->aec, out);
+ if (aec_ret) {
+ ALOGE("AEC: Speaker config init failed!");
+ return -EINVAL;
+ }
+ }
+
return ret;
}
@@ -502,6 +872,10 @@ static void adev_close_output_stream(struct audio_hw_device *dev,
struct audio_stream_out *stream)
{
ALOGV("adev_close_output_stream...");
+ struct alsa_audio_device *adev = (struct alsa_audio_device *)dev;
+ destroy_aec_reference_config(adev->aec);
+ struct alsa_stream_out* out = (struct alsa_stream_out*)stream;
+ fir_release(out->speaker_eq);
free(stream);
}
@@ -518,6 +892,17 @@ static char * adev_get_parameters(const struct audio_hw_device *dev,
return strdup("");
}
+static int adev_get_microphones(const struct audio_hw_device* dev,
+ struct audio_microphone_characteristic_t* mic_array,
+ size_t* mic_count) {
+ ALOGV("adev_get_microphones");
+ if ((mic_array == NULL) || (mic_count == NULL)) {
+ return -EINVAL;
+ }
+ get_mic_characteristics(mic_array, mic_count);
+ return 0;
+}
+
static int adev_init_check(const struct audio_hw_device *dev)
{
ALOGV("adev_init_check");
@@ -563,36 +948,49 @@ static int adev_set_mode(struct audio_hw_device *dev, audio_mode_t mode)
static int adev_set_mic_mute(struct audio_hw_device *dev, bool state)
{
ALOGV("adev_set_mic_mute: %d",state);
- return -ENOSYS;
+ struct alsa_audio_device *adev = (struct alsa_audio_device *)dev;
+ pthread_mutex_lock(&adev->lock);
+ adev->mic_mute = state;
+ pthread_mutex_unlock(&adev->lock);
+ return 0;
}
static int adev_get_mic_mute(const struct audio_hw_device *dev, bool *state)
{
ALOGV("adev_get_mic_mute");
- return -ENOSYS;
+ struct alsa_audio_device *adev = (struct alsa_audio_device *)dev;
+ pthread_mutex_lock(&adev->lock);
+ *state = adev->mic_mute;
+ pthread_mutex_unlock(&adev->lock);
+ return 0;
}
static size_t adev_get_input_buffer_size(const struct audio_hw_device *dev,
const struct audio_config *config)
{
- ALOGV("adev_get_input_buffer_size: %d", 320);
- return 320;
+ size_t buffer_size =
+ get_input_buffer_size(CAPTURE_PERIOD_SIZE, config->format, config->channel_mask);
+ ALOGV("adev_get_input_buffer_size: %zu", buffer_size);
+ return buffer_size;
}
-static int adev_open_input_stream(struct audio_hw_device __unused *dev,
- audio_io_handle_t handle,
- audio_devices_t devices,
- struct audio_config *config,
- struct audio_stream_in **stream_in,
- audio_input_flags_t flags __unused,
- const char *address __unused,
- audio_source_t source __unused)
-{
- struct stub_stream_in *in;
-
+static int adev_open_input_stream(struct audio_hw_device* dev, audio_io_handle_t handle,
+ audio_devices_t devices, struct audio_config* config,
+ struct audio_stream_in** stream_in,
+ audio_input_flags_t flags __unused, const char* address __unused,
+ audio_source_t source) {
ALOGV("adev_open_input_stream...");
- in = (struct stub_stream_in *)calloc(1, sizeof(struct stub_stream_in));
+ struct alsa_audio_device *ladev = (struct alsa_audio_device *)dev;
+ struct alsa_stream_in *in;
+ struct pcm_params *params;
+ int ret = 0;
+
+ params = pcm_params_get(CARD_IN, PORT_BUILTIN_MIC, PCM_IN);
+ if (!params)
+ return -ENOSYS;
+
+ in = (struct alsa_stream_in *)calloc(1, sizeof(struct alsa_stream_in));
if (!in)
return -ENOMEM;
@@ -611,15 +1009,75 @@ static int adev_open_input_stream(struct audio_hw_device __unused *dev,
in->stream.set_gain = in_set_gain;
in->stream.read = in_read;
in->stream.get_input_frames_lost = in_get_input_frames_lost;
+ in->stream.get_capture_position = in_get_capture_position;
+ in->stream.get_active_microphones = in_get_active_microphones;
+
+ in->config.channels = CHANNEL_STEREO;
+ if (source == AUDIO_SOURCE_ECHO_REFERENCE) {
+ in->config.rate = PLAYBACK_CODEC_SAMPLING_RATE;
+ } else {
+ in->config.rate = CAPTURE_CODEC_SAMPLING_RATE;
+ }
+ in->config.format = PCM_FORMAT_S32_LE;
+ in->config.period_size = CAPTURE_PERIOD_SIZE;
+ in->config.period_count = CAPTURE_PERIOD_COUNT;
- *stream_in = &in->stream;
- return 0;
+ if (in->config.rate != config->sample_rate ||
+ audio_channel_count_from_in_mask(config->channel_mask) != CHANNEL_STEREO ||
+ in->config.format != pcm_format_from_audio_format(config->format) ) {
+ ret = -EINVAL;
+ }
+
+ ALOGI("adev_open_input_stream selects channels=%d rate=%d format=%d source=%d",
+ in->config.channels, in->config.rate, in->config.format, source);
+
+ in->dev = ladev;
+ in->standby = true;
+ in->unavailable = false;
+ in->source = source;
+ in->devices = devices;
+
+ config->format = in_get_format(&in->stream.common);
+ config->channel_mask = in_get_channels(&in->stream.common);
+ config->sample_rate = in_get_sample_rate(&in->stream.common);
+
+ /* If AEC is in the app, only configure based on ECHO_REFERENCE spec.
+ * If AEC is in the HAL, configure using the given mic stream. */
+ bool aecInput = true;
+#if !defined(AEC_HAL)
+ aecInput = (in->source == AUDIO_SOURCE_ECHO_REFERENCE);
+#endif
+
+ if ((ret == 0) && aecInput) {
+ int aec_ret = init_aec_mic_config(ladev->aec, in);
+ if (aec_ret) {
+ ALOGE("AEC: Mic config init failed!");
+ return -EINVAL;
+ }
+ }
+
+ if (ret) {
+ free(in);
+ } else {
+ *stream_in = &in->stream;
+ }
+
+#if DEBUG_AEC
+ remove("/data/local/traces/aec_ref.pcm");
+ remove("/data/local/traces/aec_in.pcm");
+ remove("/data/local/traces/aec_ref_timestamps.txt");
+ remove("/data/local/traces/aec_in_timestamps.txt");
+#endif
+ return ret;
}
static void adev_close_input_stream(struct audio_hw_device *dev,
- struct audio_stream_in *in)
+ struct audio_stream_in *stream)
{
ALOGV("adev_close_input_stream...");
+ struct alsa_audio_device *adev = (struct alsa_audio_device *)dev;
+ destroy_aec_mic_config(adev->aec);
+ free(stream);
return;
}
@@ -632,6 +1090,9 @@ static int adev_dump(const audio_hw_device_t *device, int fd)
static int adev_close(hw_device_t *device)
{
ALOGV("adev_close");
+
+ struct alsa_audio_device *adev = (struct alsa_audio_device *)device;
+ release_aec(adev->aec);
free(device);
return 0;
}
@@ -671,26 +1132,10 @@ static int adev_open(const hw_module_t* module, const char* name,
adev->hw_device.open_input_stream = adev_open_input_stream;
adev->hw_device.close_input_stream = adev_close_input_stream;
adev->hw_device.dump = adev_dump;
-
- adev->devices = AUDIO_DEVICE_NONE;
+ adev->hw_device.get_microphones = adev_get_microphones;
*device = &adev->hw_device.common;
- /* TODO: Ideally we should use Alsa UCM instead of libaudioroute
- * because with Alsa UCM:
- * - We can share the work with GNU/Linux. For instance we
- * might have automatic support for the PinePhone and our
- * work would benefit GNU/Linux distributions as well.
- * - It opens the door to generic images: With Alsa UCM, we
- * don't need build time configuration anymore: we could
- * ship a single audio library and Alsa UCM would take care
- * of the routing and abstract away the device specific part
- * for us. If we code that ourselves, it will probably not
- * be as fine grained nor as good as Alsa UCM.
- * - It can be experimented with without even recompiling, this
- * could make porting Replicant to new devices much less time
- * consuming and way easier.
- */
adev->mixer = mixer_open(CARD_OUT);
if (!adev->mixer) {
@@ -698,13 +1143,20 @@ static int adev_open(const hw_module_t* module, const char* name,
return -EINVAL;
}
- /* Set default audio route */
adev->audio_route = audio_route_init(CARD_OUT, MIXER_XML_PATH);
if (!adev->audio_route) {
ALOGE("%s: Failed to init audio route controls, aborting.", __func__);
return -EINVAL;
}
+ pthread_mutex_lock(&adev->lock);
+ if (init_aec(CAPTURE_CODEC_SAMPLING_RATE, NUM_AEC_REFERENCE_CHANNELS,
+ CHANNEL_STEREO, &adev->aec)) {
+ pthread_mutex_unlock(&adev->lock);
+ return -EINVAL;
+ }
+ pthread_mutex_unlock(&adev->lock);
+
return 0;
}
@@ -718,7 +1170,7 @@ struct audio_module HAL_MODULE_INFO_SYM = {
.module_api_version = AUDIO_MODULE_API_VERSION_0_1,
.hal_api_version = HARDWARE_HAL_API_VERSION,
.id = AUDIO_HARDWARE_MODULE_ID,
- .name = "Generic Audio HAL for dragonboards",
+ .name = "Yukawa audio HW HAL",
.author = "The Android Open Source Project",
.methods = &hal_module_methods,
},
diff --git a/audio/audio_hw.h b/audio/audio_hw.h
new file mode 100644
index 0000000..3e8e27c
--- /dev/null
+++ b/audio/audio_hw.h
@@ -0,0 +1,129 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _YUKAWA_AUDIO_HW_H_
+#define _YUKAWA_AUDIO_HW_H_
+
+#include <hardware/audio.h>
+#include <tinyalsa/asoundlib.h>
+
+#include "fir_filter.h"
+
+#define CARD_OUT 0
+#define PORT_HDMI 0
+#define PORT_INTERNAL_SPEAKER 1
+#define CARD_IN 0
+#define PORT_BUILTIN_MIC 3
+
+#define MIXER_XML_PATH "/vendor/etc/mixer_paths.xml"
+/* Minimum granularity - Arbitrary but small value */
+#define CODEC_BASE_FRAME_COUNT 32
+
+#define CHANNEL_STEREO 2
+
+#ifdef AEC_HAL
+#define NUM_AEC_REFERENCE_CHANNELS 1
+#else
+/* App AEC uses 2-channel reference */
+#define NUM_AEC_REFERENCE_CHANNELS 2
+#endif /* #ifdef AEC_HAL */
+
+#define DEBUG_AEC 0
+
+#define PCM_OPEN_RETRIES 100
+#define PCM_OPEN_WAIT_TIME_MS 20
+
+/* Capture codec parameters */
+/* Set up a capture period of 32 ms:
+ * CAPTURE_PERIOD = PERIOD_SIZE / SAMPLE_RATE, so (32e-3) = PERIOD_SIZE / (16e3)
+ * => PERIOD_SIZE = 512 frames, where each "frame" consists of 1 sample of every channel (here, 2ch) */
+#define CAPTURE_PERIOD_MULTIPLIER 16
+#define CAPTURE_PERIOD_SIZE (CODEC_BASE_FRAME_COUNT * CAPTURE_PERIOD_MULTIPLIER)
+#define CAPTURE_PERIOD_COUNT 4
+#define CAPTURE_PERIOD_START_THRESHOLD 0
+#define CAPTURE_CODEC_SAMPLING_RATE 16000
+
+/* Playback codec parameters */
+/* number of base blocks in a short period (low latency) */
+#define PLAYBACK_PERIOD_MULTIPLIER 32 /* 21 ms */
+/* number of frames per short period (low latency) */
+#define PLAYBACK_PERIOD_SIZE (CODEC_BASE_FRAME_COUNT * PLAYBACK_PERIOD_MULTIPLIER)
+/* number of pseudo periods for low latency playback */
+#define PLAYBACK_PERIOD_COUNT 4
+#define PLAYBACK_PERIOD_START_THRESHOLD 2
+#define PLAYBACK_CODEC_SAMPLING_RATE 48000
+#define MIN_WRITE_SLEEP_US 5000
+
+#define SPEAKER_EQ_FILE "/vendor/etc/speaker_eq.fir"
+#define SPEAKER_MAX_EQ_LENGTH 512
+
+struct alsa_audio_device {
+ struct audio_hw_device hw_device;
+
+ pthread_mutex_t lock; /* see notes in in_read/out_write on mutex acquisition order */
+ struct alsa_stream_in *active_input;
+ struct alsa_stream_out *active_output;
+ struct audio_route *audio_route;
+ struct mixer *mixer;
+ bool mic_mute;
+ struct aec_t *aec;
+};
+
+struct alsa_stream_in {
+ struct audio_stream_in stream;
+
+ pthread_mutex_t lock; /* see note in in_read() on mutex acquisition order */
+ audio_devices_t devices;
+ struct pcm_config config;
+ struct pcm *pcm;
+ bool unavailable;
+ bool standby;
+ struct alsa_audio_device *dev;
+ int read_threshold;
+ unsigned int frames_read;
+ uint64_t timestamp_nsec;
+ audio_source_t source;
+};
+
+struct alsa_stream_out {
+ struct audio_stream_out stream;
+
+ pthread_mutex_t lock; /* see note in out_write() on mutex acquisition order */
+ audio_devices_t devices;
+ struct pcm_config config;
+ struct pcm *pcm;
+ bool unavailable;
+ int standby;
+ struct alsa_audio_device *dev;
+ int write_threshold;
+ unsigned int frames_written;
+ struct timespec timestamp;
+ fir_filter_t* speaker_eq;
+};
+
+/* 'bytes' are the number of bytes written to audio FIFO, for which 'timestamp' is valid.
+ * 'available' is the number of frames available to read (for input) or yet to be played
+ * (for output) frames in the PCM buffer.
+ * timestamp and available are updated by pcm_get_htimestamp(), so they use the same
+ * datatypes as the corresponding arguments to that function. */
+struct aec_info {
+ struct timespec timestamp;
+ uint64_t timestamp_usec;
+ unsigned int available;
+ size_t bytes;
+};
+
+#endif /* #ifndef _YUKAWA_AUDIO_HW_H_ */
diff --git a/audio/fifo_wrapper.cpp b/audio/fifo_wrapper.cpp
new file mode 100644
index 0000000..7bc9079
--- /dev/null
+++ b/audio/fifo_wrapper.cpp
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "audio_utils_fifo_wrapper"
+// #define LOG_NDEBUG 0
+
+#include <stdint.h>
+#include <errno.h>
+#include <log/log.h>
+#include <audio_utils/fifo.h>
+#include "fifo_wrapper.h"
+
+struct audio_fifo_itfe {
+ audio_utils_fifo *p_fifo;
+ audio_utils_fifo_reader *p_fifo_reader;
+ audio_utils_fifo_writer *p_fifo_writer;
+ int8_t *p_buffer;
+};
+
+void *fifo_init(uint32_t bytes, bool reader_throttles_writer) {
+ struct audio_fifo_itfe *interface = new struct audio_fifo_itfe;
+ interface->p_buffer = new int8_t[bytes];
+ if (interface->p_buffer == NULL) {
+ ALOGE("Failed to allocate fifo buffer!");
+ return NULL;
+ }
+ interface->p_fifo = new audio_utils_fifo(bytes, 1, interface->p_buffer, reader_throttles_writer);
+ interface->p_fifo_writer = new audio_utils_fifo_writer(*interface->p_fifo);
+ interface->p_fifo_reader = new audio_utils_fifo_reader(*interface->p_fifo);
+
+ return (void *)interface;
+}
+
+void fifo_release(void *fifo_itfe) {
+ struct audio_fifo_itfe *interface = static_cast<struct audio_fifo_itfe *>(fifo_itfe);
+ delete interface->p_fifo_writer;
+ delete interface->p_fifo_reader;
+ delete interface->p_fifo;
+ delete[] interface->p_buffer;
+ delete interface;
+}
+
+ssize_t fifo_read(void *fifo_itfe, void *buffer, size_t bytes) {
+ struct audio_fifo_itfe *interface = static_cast<struct audio_fifo_itfe *>(fifo_itfe);
+ return interface->p_fifo_reader->read(buffer, bytes);
+}
+
+ssize_t fifo_write(void *fifo_itfe, void *buffer, size_t bytes) {
+ struct audio_fifo_itfe *interface = static_cast<struct audio_fifo_itfe *>(fifo_itfe);
+ return interface->p_fifo_writer->write(buffer, bytes);
+}
+
+ssize_t fifo_available_to_read(void *fifo_itfe) {
+ struct audio_fifo_itfe *interface = static_cast<struct audio_fifo_itfe *>(fifo_itfe);
+ return interface->p_fifo_reader->available();
+}
+
+ssize_t fifo_available_to_write(void *fifo_itfe) {
+ struct audio_fifo_itfe *interface = static_cast<struct audio_fifo_itfe *>(fifo_itfe);
+ return interface->p_fifo_writer->available();
+}
+
+ssize_t fifo_flush(void *fifo_itfe) {
+ struct audio_fifo_itfe *interface = static_cast<struct audio_fifo_itfe *>(fifo_itfe);
+ return interface->p_fifo_reader->flush();
+}
diff --git a/audio/fifo_wrapper.h b/audio/fifo_wrapper.h
new file mode 100644
index 0000000..e9469ef
--- /dev/null
+++ b/audio/fifo_wrapper.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _AUDIO_FIFO_WRAPPER_H_
+#define _AUDIO_FIFO_WRAPPER_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void *fifo_init(uint32_t bytes, bool reader_throttles_writer);
+void fifo_release(void *fifo_itfe);
+ssize_t fifo_read(void *fifo_itfe, void *buffer, size_t bytes);
+ssize_t fifo_write(void *fifo_itfe, void *buffer, size_t bytes);
+ssize_t fifo_available_to_read(void *fifo_itfe);
+ssize_t fifo_available_to_write(void *fifo_itfe);
+ssize_t fifo_flush(void *fifo_itfe);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* #ifndef _AUDIO_FIFO_WRAPPER_H_ */
diff --git a/audio/fir_filter.c b/audio/fir_filter.c
new file mode 100644
index 0000000..c648fc0
--- /dev/null
+++ b/audio/fir_filter.c
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "audio_hw_fir_filter"
+//#define LOG_NDEBUG 0
+
+#include <assert.h>
+#include <audio_utils/primitives.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <log/log.h>
+#include <malloc.h>
+#include <string.h>
+
+#include "fir_filter.h"
+
+#ifdef __ARM_NEON
+#include "arm_neon.h"
+#endif /* #ifdef __ARM_NEON */
+
+fir_filter_t* fir_init(uint32_t channels, fir_filter_mode_t mode, uint32_t filter_length,
+ uint32_t input_length, int16_t* coeffs) {
+ if ((channels == 0) || (filter_length == 0) || (coeffs == NULL)) {
+ ALOGE("%s: Invalid channel count, filter length or coefficient array.", __func__);
+ return NULL;
+ }
+
+ fir_filter_t* fir = (fir_filter_t*)calloc(1, sizeof(fir_filter_t));
+ if (fir == NULL) {
+ ALOGE("%s: Unable to allocate memory for fir_filter.", __func__);
+ return NULL;
+ }
+
+ fir->channels = channels;
+ fir->filter_length = filter_length;
+ /* Default: same filter coeffs for all channels */
+ fir->mode = FIR_SINGLE_FILTER;
+ uint32_t coeff_bytes = fir->filter_length * sizeof(int16_t);
+ if (mode == FIR_PER_CHANNEL_FILTER) {
+ fir->mode = FIR_PER_CHANNEL_FILTER;
+ coeff_bytes = fir->filter_length * fir->channels * sizeof(int16_t);
+ }
+
+ fir->coeffs = (int16_t*)malloc(coeff_bytes);
+ if (fir->coeffs == NULL) {
+ ALOGE("%s: Unable to allocate memory for FIR coeffs", __func__);
+ goto exit_1;
+ }
+ memcpy(fir->coeffs, coeffs, coeff_bytes);
+
+ fir->buffer_size = (input_length + fir->filter_length) * fir->channels;
+ fir->state = (int16_t*)malloc(fir->buffer_size * sizeof(int16_t));
+ if (fir->state == NULL) {
+ ALOGE("%s: Unable to allocate memory for FIR state", __func__);
+ goto exit_2;
+ }
+
+#ifdef __ARM_NEON
+ ALOGI("%s: Using ARM Neon", __func__);
+#endif /* #ifdef __ARM_NEON */
+
+ fir_reset(fir);
+ return fir;
+
+exit_2:
+ free(fir->coeffs);
+exit_1:
+ free(fir);
+ return NULL;
+}
+
+void fir_release(fir_filter_t* fir) {
+ if (fir == NULL) {
+ return;
+ }
+ free(fir->state);
+ free(fir->coeffs);
+ free(fir);
+}
+
+void fir_reset(fir_filter_t* fir) {
+ if (fir == NULL) {
+ return;
+ }
+ memset(fir->state, 0, fir->buffer_size * sizeof(int16_t));
+}
+
+void fir_process_interleaved(fir_filter_t* fir, int16_t* input, int16_t* output, uint32_t samples) {
+ assert(fir != NULL);
+
+ int start_offset = (fir->filter_length - 1) * fir->channels;
+ memcpy(&fir->state[start_offset], input, samples * fir->channels * sizeof(int16_t));
+ // int ch;
+ bool use_2nd_set_coeffs = (fir->channels > 1) && (fir->mode == FIR_PER_CHANNEL_FILTER);
+ int16_t* p_coeff_A = &fir->coeffs[0];
+ int16_t* p_coeff_B = use_2nd_set_coeffs ? &fir->coeffs[fir->filter_length] : &fir->coeffs[0];
+ int16_t* p_output;
+ for (int ch = 0; ch < fir->channels; ch += 2) {
+ p_output = &output[ch];
+ int offset = start_offset + ch;
+ for (int s = 0; s < samples; s++) {
+ int32_t acc_A = 0;
+ int32_t acc_B = 0;
+
+#ifdef __ARM_NEON
+ int32x4_t acc_vec = vdupq_n_s32(0);
+ for (int k = 0; k < fir->filter_length; k++, offset -= fir->channels) {
+ int16x4_t coeff_vec = vdup_n_s16(p_coeff_A[k]);
+ coeff_vec = vset_lane_s16(p_coeff_B[k], coeff_vec, 1);
+ int16x4_t input_vec = vld1_s16(&fir->state[offset]);
+ acc_vec = vmlal_s16(acc_vec, coeff_vec, input_vec);
+ }
+ acc_A = vgetq_lane_s32(acc_vec, 0);
+ acc_B = vgetq_lane_s32(acc_vec, 1);
+#else
+ for (int k = 0; k < fir->filter_length; k++, offset -= fir->channels) {
+ int32_t input_A = (int32_t)(fir->state[offset]);
+ int32_t coeff_A = (int32_t)(p_coeff_A[k]);
+ int32_t input_B = (int32_t)(fir->state[offset + 1]);
+ int32_t coeff_B = (int32_t)(p_coeff_B[k]);
+ acc_A += (input_A * coeff_A);
+ acc_B += (input_B * coeff_B);
+ }
+#endif /* #ifdef __ARM_NEON */
+
+ *p_output = clamp16(acc_A >> 15);
+ if (ch < fir->channels - 1) {
+ *(p_output + 1) = clamp16(acc_B >> 15);
+ }
+ /* Move to next sample */
+ p_output += fir->channels;
+ offset += (fir->filter_length + 1) * fir->channels;
+ }
+ if (use_2nd_set_coeffs) {
+ p_coeff_A += (fir->filter_length << 1);
+ p_coeff_B += (fir->filter_length << 1);
+ }
+ }
+ memmove(fir->state, &fir->state[samples * fir->channels],
+ (fir->filter_length - 1) * fir->channels * sizeof(int16_t));
+}
diff --git a/audio/fir_filter.h b/audio/fir_filter.h
new file mode 100644
index 0000000..d8c6e91
--- /dev/null
+++ b/audio/fir_filter.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FIR_FILTER_H
+#define FIR_FILTER_H
+
+#include <stdint.h>
+
+typedef enum fir_filter_mode { FIR_SINGLE_FILTER = 0, FIR_PER_CHANNEL_FILTER } fir_filter_mode_t;
+
+typedef struct fir_filter {
+ fir_filter_mode_t mode;
+ uint32_t channels;
+ uint32_t filter_length;
+ uint32_t buffer_size;
+ int16_t* coeffs;
+ int16_t* state;
+} fir_filter_t;
+
+fir_filter_t* fir_init(uint32_t channels, fir_filter_mode_t mode, uint32_t filter_length,
+ uint32_t input_length, int16_t* coeffs);
+void fir_release(fir_filter_t* fir);
+void fir_reset(fir_filter_t* fir);
+void fir_process_interleaved(fir_filter_t* fir, int16_t* input, int16_t* output, uint32_t samples);
+
+#endif /* #ifndef FIR_FILTER_H */