audio: Kang audio HAL from amlogic yukawa device
Copied from device/amlogic/generic/hal/audio/ minus the
speaker equalizer filter file.
It is a generic and more feature rich HAL, plus it fixes
the occasional static noise we hear on db845c and PocoF1.
Removed the mixer control settings from qcom/init.qcom.rc
and added HDMI out mixer control in etc/mixer_paths.xml
to start with. ** Analog audio out is not tested yet. **
ToDo:
1. Fallback to Analog audio port if the monitor doesn't
support HDMI audio out.
2. Mic AEC support is integrated deep in this HAL, so I
didn't remove it. Might as well test and use that
feature on Pixel 3 and Poco F1. Same with speaker EQ,
though we may end up never using this equalizer
feature at all.
Signed-off-by: Amit Pundir <amit.pundir@linaro.org>
Change-Id: I85614abdd684ab67f405f4c0e48380668ade4e06
diff --git a/audio/Android.mk b/audio/Android.mk
index 228ccf2..90c18fa 100644
--- a/audio/Android.mk
+++ b/audio/Android.mk
@@ -27,12 +27,16 @@
LOCAL_MODULE_RELATIVE_PATH := hw
LOCAL_VENDOR_MODULE := true
-LOCAL_SRC_FILES := audio_hw.c
-LOCAL_SHARED_LIBRARIES := liblog libcutils libtinyalsa
+LOCAL_SRC_FILES := audio_hw.c \
+ audio_aec.c \
+ fifo_wrapper.cpp \
+ fir_filter.c
+LOCAL_SHARED_LIBRARIES := liblog libcutils libtinyalsa libaudioroute libaudioutils
LOCAL_CFLAGS := -Wno-unused-parameter
LOCAL_C_INCLUDES += \
external/tinyalsa/include \
external/expat/lib \
+ $(call include-path-for, audio-route) \
system/media/audio_utils/include \
system/media/audio_effects/include
diff --git a/audio/audio_aec.c b/audio/audio_aec.c
new file mode 100644
index 0000000..ab99c93
--- /dev/null
+++ b/audio/audio_aec.c
@@ -0,0 +1,700 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// clang-format off
+/*
+ * Typical AEC signal flow:
+ *
+ * Microphone Audio
+ * Timestamps
+ * +--------------------------------------+
+ * | | +---------------+
+ * | Microphone +---------------+ | | |
+ * O|====== | Audio | Sample Rate | +-------> |
+ * (from . +--+ Samples | + | | |
+ * mic . +==================> Format |==============> |
+ * codec) . | Conversion | | | Cleaned
+ * O|====== | (if required) | | Acoustic | Audio
+ * +---------------+ | Echo | Samples
+ * | Canceller |===================>
+ * | (AEC) |
+ * Reference +---------------+ | |
+ * Audio | Sample Rate | | |
+ * Samples | + | | |
+ * +=============> Format |==============> |
+ * | | Conversion | | |
+ * | | (if required) | +-------> |
+ * | +---------------+ | | |
+ * | | +---------------+
+ * | +-------------------------------+
+ * | | Reference Audio
+ * | | Timestamps
+ * | |
+ * +--+----+---------+ AUDIO CAPTURE
+ * | Speaker |
+ * +------------+ Audio/Timestamp +---------------------------------------------------------------------------+
+ * | Buffer |
+ * +--^----^---------+ AUDIO PLAYBACK
+ * | |
+ * | |
+ * | |
+ * | |
+ * |\ | |
+ * | +-+ | |
+ * (to | | +-----C----+
+ * speaker | | | | Playback
+ * codec) | | <=====+================================================================+ Audio
+ * | +-+ Samples
+ * |/
+ *
+ */
+// clang-format on
+
+#define LOG_TAG "audio_hw_aec"
+// #define LOG_NDEBUG 0
+
+#include <audio_utils/primitives.h>
+#include <stdio.h>
+#include <inttypes.h>
+#include <errno.h>
+#include <malloc.h>
+#include <sys/time.h>
+#include <tinyalsa/asoundlib.h>
+#include <unistd.h>
+#include <log/log.h>
+#include "audio_aec.h"
+
+#ifdef AEC_HAL
+#include "audio_aec_process.h"
+#else
+#define aec_spk_mic_init(...) ((int)0)
+#define aec_spk_mic_reset(...) ((void)0)
+#define aec_spk_mic_process(...) ((int32_t)0)
+#define aec_spk_mic_release(...) ((void)0)
+#endif
+
+#define MAX_TIMESTAMP_DIFF_USEC 200000
+
+#define MAX_READ_WAIT_TIME_MSEC 80
+
+uint64_t timespec_to_usec(struct timespec ts) {
+ return (ts.tv_sec * 1e6L + ts.tv_nsec/1000);
+}
+
+void get_reference_audio_in_place(struct aec_t *aec, size_t frames) {
+ if (aec->num_reference_channels == aec->spk_num_channels) {
+ /* Reference count equals speaker channels, nothing to do here. */
+ return;
+ } else if (aec->num_reference_channels != 1) {
+ /* We don't have a rule for non-mono references, show error on log */
+ ALOGE("Invalid reference count - must be 1 or match number of playback channels!");
+ return;
+ }
+ int16_t *src_Nch = &aec->spk_buf_playback_format[0];
+ int16_t *dst_1ch = &aec->spk_buf_playback_format[0];
+ int32_t num_channels = (int32_t)aec->spk_num_channels;
+ size_t frame, ch;
+ for (frame = 0; frame < frames; frame++) {
+ int32_t acc = 0;
+ for (ch = 0; ch < aec->spk_num_channels; ch++) {
+ acc += src_Nch[ch];
+ }
+ *dst_1ch++ = clamp16(acc/num_channels);
+ src_Nch += aec->spk_num_channels;
+ }
+}
+
+void print_queue_status_to_log(struct aec_t *aec, bool write_side) {
+ ssize_t q1 = fifo_available_to_read(aec->spk_fifo);
+ ssize_t q2 = fifo_available_to_read(aec->ts_fifo);
+
+ ALOGV("Queue available %s: Spk %zd (count %zd) TS %zd (count %zd)",
+ (write_side) ? "(POST-WRITE)" : "(PRE-READ)",
+ q1, q1/aec->spk_frame_size_bytes/PLAYBACK_PERIOD_SIZE,
+ q2, q2/sizeof(struct aec_info));
+}
+
+void flush_aec_fifos(struct aec_t *aec) {
+ if (aec == NULL) {
+ return;
+ }
+ if (aec->spk_fifo != NULL) {
+ ALOGV("Flushing AEC Spk FIFO...");
+ fifo_flush(aec->spk_fifo);
+ }
+ if (aec->ts_fifo != NULL) {
+ ALOGV("Flushing AEC Timestamp FIFO...");
+ fifo_flush(aec->ts_fifo);
+ }
+ /* Reset FIFO read-write offset tracker */
+ aec->read_write_diff_bytes = 0;
+}
+
+void aec_set_spk_running_no_lock(struct aec_t* aec, bool state) {
+ aec->spk_running = state;
+}
+
+bool aec_get_spk_running_no_lock(struct aec_t* aec) {
+ return aec->spk_running;
+}
+
+void destroy_aec_reference_config_no_lock(struct aec_t* aec) {
+ if (!aec->spk_initialized) {
+ return;
+ }
+ aec_set_spk_running_no_lock(aec, false);
+ fifo_release(aec->spk_fifo);
+ fifo_release(aec->ts_fifo);
+ memset(&aec->last_spk_info, 0, sizeof(struct aec_info));
+ aec->spk_initialized = false;
+}
+
+void destroy_aec_mic_config_no_lock(struct aec_t* aec) {
+ if (!aec->mic_initialized) {
+ return;
+ }
+ release_resampler(aec->spk_resampler);
+ free(aec->mic_buf);
+ free(aec->spk_buf);
+ free(aec->spk_buf_playback_format);
+ free(aec->spk_buf_resampler_out);
+ memset(&aec->last_mic_info, 0, sizeof(struct aec_info));
+ aec->mic_initialized = false;
+}
+
+struct aec_t *init_aec_interface() {
+ ALOGV("%s enter", __func__);
+ struct aec_t *aec = (struct aec_t *)calloc(1, sizeof(struct aec_t));
+ if (aec == NULL) {
+ ALOGE("Failed to allocate memory for AEC interface!");
+ } else {
+ pthread_mutex_init(&aec->lock, NULL);
+ }
+
+ ALOGV("%s exit", __func__);
+ return aec;
+}
+
+void release_aec_interface(struct aec_t *aec) {
+ ALOGV("%s enter", __func__);
+ pthread_mutex_lock(&aec->lock);
+ destroy_aec_mic_config_no_lock(aec);
+ destroy_aec_reference_config_no_lock(aec);
+ pthread_mutex_unlock(&aec->lock);
+ free(aec);
+ ALOGV("%s exit", __func__);
+}
+
+int init_aec(int sampling_rate, int num_reference_channels,
+ int num_microphone_channels, struct aec_t **aec_ptr) {
+ ALOGV("%s enter", __func__);
+ int ret = 0;
+ int aec_ret = aec_spk_mic_init(
+ sampling_rate,
+ num_reference_channels,
+ num_microphone_channels);
+ if (aec_ret) {
+ ALOGE("AEC object failed to initialize!");
+ ret = -EINVAL;
+ }
+ struct aec_t *aec = init_aec_interface();
+ if (!ret) {
+ aec->num_reference_channels = num_reference_channels;
+ /* Set defaults, will be overridden by settings in init_aec_(mic|referece_config) */
+ /* Capture uses 2-ch, 32-bit frames */
+ aec->mic_sampling_rate = CAPTURE_CODEC_SAMPLING_RATE;
+ aec->mic_frame_size_bytes = CHANNEL_STEREO * sizeof(int32_t);
+ aec->mic_num_channels = CHANNEL_STEREO;
+
+ /* Playback uses 2-ch, 16-bit frames */
+ aec->spk_sampling_rate = PLAYBACK_CODEC_SAMPLING_RATE;
+ aec->spk_frame_size_bytes = CHANNEL_STEREO * sizeof(int16_t);
+ aec->spk_num_channels = CHANNEL_STEREO;
+ }
+
+ (*aec_ptr) = aec;
+ ALOGV("%s exit", __func__);
+ return ret;
+}
+
+void release_aec(struct aec_t *aec) {
+ ALOGV("%s enter", __func__);
+ if (aec == NULL) {
+ return;
+ }
+ release_aec_interface(aec);
+ aec_spk_mic_release();
+ ALOGV("%s exit", __func__);
+}
+
+int init_aec_reference_config(struct aec_t *aec, struct alsa_stream_out *out) {
+ ALOGV("%s enter", __func__);
+ if (!aec) {
+ ALOGE("AEC: No valid interface found!");
+ return -EINVAL;
+ }
+
+ int ret = 0;
+ pthread_mutex_lock(&aec->lock);
+ if (aec->spk_initialized) {
+ destroy_aec_reference_config_no_lock(aec);
+ }
+
+ aec->spk_fifo = fifo_init(
+ out->config.period_count * out->config.period_size *
+ audio_stream_out_frame_size(&out->stream),
+ false /* reader_throttles_writer */);
+ if (aec->spk_fifo == NULL) {
+ ALOGE("AEC: Speaker loopback FIFO Init failed!");
+ ret = -EINVAL;
+ goto exit;
+ }
+ aec->ts_fifo = fifo_init(
+ out->config.period_count * sizeof(struct aec_info),
+ false /* reader_throttles_writer */);
+ if (aec->ts_fifo == NULL) {
+ ALOGE("AEC: Speaker timestamp FIFO Init failed!");
+ ret = -EINVAL;
+ fifo_release(aec->spk_fifo);
+ goto exit;
+ }
+
+ aec->spk_sampling_rate = out->config.rate;
+ aec->spk_frame_size_bytes = audio_stream_out_frame_size(&out->stream);
+ aec->spk_num_channels = out->config.channels;
+ aec->spk_initialized = true;
+exit:
+ pthread_mutex_unlock(&aec->lock);
+ ALOGV("%s exit", __func__);
+ return ret;
+}
+
+void destroy_aec_reference_config(struct aec_t* aec) {
+ ALOGV("%s enter", __func__);
+ if (aec == NULL) {
+ ALOGV("%s exit", __func__);
+ return;
+ }
+ pthread_mutex_lock(&aec->lock);
+ destroy_aec_reference_config_no_lock(aec);
+ pthread_mutex_unlock(&aec->lock);
+ ALOGV("%s exit", __func__);
+}
+
+int write_to_reference_fifo(struct aec_t* aec, void* buffer, struct aec_info* info) {
+ ALOGV("%s enter", __func__);
+ int ret = 0;
+ size_t bytes = info->bytes;
+
+ /* Write audio samples to FIFO */
+ ssize_t written_bytes = fifo_write(aec->spk_fifo, buffer, bytes);
+ if (written_bytes != bytes) {
+ ALOGE("Could only write %zu of %zu bytes", written_bytes, bytes);
+ ret = -ENOMEM;
+ }
+
+ /* Write timestamp to FIFO */
+ info->bytes = written_bytes;
+ ALOGV("Speaker timestamp: %ld s, %ld nsec", info->timestamp.tv_sec, info->timestamp.tv_nsec);
+ ssize_t ts_bytes = fifo_write(aec->ts_fifo, info, sizeof(struct aec_info));
+ ALOGV("Wrote TS bytes: %zu", ts_bytes);
+ print_queue_status_to_log(aec, true);
+ ALOGV("%s exit", __func__);
+ return ret;
+}
+
+void get_spk_timestamp(struct aec_t* aec, ssize_t read_bytes, uint64_t* spk_time) {
+ *spk_time = 0;
+ uint64_t spk_time_offset = 0;
+ float usec_per_byte = 1E6 / ((float)(aec->spk_frame_size_bytes * aec->spk_sampling_rate));
+ if (aec->read_write_diff_bytes < 0) {
+ /* We're still reading a previous write packet. (We only need the first sample's timestamp,
+ * so even if we straddle packets we only care about the first one)
+ * So we just use the previous timestamp, with an appropriate offset
+ * based on the number of bytes remaining to be read from that write packet. */
+ spk_time_offset = (aec->last_spk_info.bytes + aec->read_write_diff_bytes) * usec_per_byte;
+ ALOGV("Reusing previous timestamp, calculated offset (usec) %" PRIu64, spk_time_offset);
+ } else {
+ /* If read_write_diff_bytes > 0, there are no new writes, so there won't be timestamps in
+ * the FIFO, and the check below will fail. */
+ if (!fifo_available_to_read(aec->ts_fifo)) {
+ ALOGE("Timestamp error: no new timestamps!");
+ return;
+ }
+ /* We just read valid data, so if we're here, we should have a valid timestamp to use. */
+ ssize_t ts_bytes = fifo_read(aec->ts_fifo, &aec->last_spk_info, sizeof(struct aec_info));
+ ALOGV("Read TS bytes: %zd, expected %zu", ts_bytes, sizeof(struct aec_info));
+ aec->read_write_diff_bytes -= aec->last_spk_info.bytes;
+ }
+
+ *spk_time = timespec_to_usec(aec->last_spk_info.timestamp) + spk_time_offset;
+
+ aec->read_write_diff_bytes += read_bytes;
+ struct aec_info spk_info = aec->last_spk_info;
+ while (aec->read_write_diff_bytes > 0) {
+ /* If read_write_diff_bytes > 0, it means that there are more write packet timestamps
+ * in FIFO (since there we read more valid data the size of the current timestamp's
+ * packet). Keep reading timestamps from FIFO to get to the most recent one. */
+ if (!fifo_available_to_read(aec->ts_fifo)) {
+ /* There are no more timestamps, we have the most recent one. */
+ ALOGV("At the end of timestamp FIFO, breaking...");
+ break;
+ }
+ fifo_read(aec->ts_fifo, &spk_info, sizeof(struct aec_info));
+ ALOGV("Fast-forwarded timestamp by %zd bytes, remaining bytes: %zd,"
+ " new timestamp (usec) %" PRIu64,
+ spk_info.bytes, aec->read_write_diff_bytes, timespec_to_usec(spk_info.timestamp));
+ aec->read_write_diff_bytes -= spk_info.bytes;
+ }
+ aec->last_spk_info = spk_info;
+}
+
+int get_reference_samples(struct aec_t* aec, void* buffer, struct aec_info* info) {
+ ALOGV("%s enter", __func__);
+
+ if (!aec->spk_initialized) {
+ ALOGE("%s called with no reference initialized", __func__);
+ return -EINVAL;
+ }
+
+ size_t bytes = info->bytes;
+ const size_t frames = bytes / aec->mic_frame_size_bytes;
+ const size_t sample_rate_ratio = aec->spk_sampling_rate / aec->mic_sampling_rate;
+
+ /* Read audio samples from FIFO */
+ const size_t req_bytes = frames * sample_rate_ratio * aec->spk_frame_size_bytes;
+ ssize_t available_bytes = 0;
+ unsigned int wait_count = MAX_READ_WAIT_TIME_MSEC;
+ while (true) {
+ available_bytes = fifo_available_to_read(aec->spk_fifo);
+ if (available_bytes >= req_bytes) {
+ break;
+ } else if (available_bytes < 0) {
+ ALOGE("fifo_read returned code %zu ", available_bytes);
+ return -ENOMEM;
+ }
+
+ ALOGV("Sleeping, required bytes: %zu, available bytes: %zd", req_bytes, available_bytes);
+ usleep(1000);
+ if ((wait_count--) == 0) {
+ ALOGE("Timed out waiting for read from reference FIFO");
+ return -ETIMEDOUT;
+ }
+ }
+
+ const size_t read_bytes = fifo_read(aec->spk_fifo, aec->spk_buf_playback_format, req_bytes);
+
+ /* Get timestamp*/
+ get_spk_timestamp(aec, read_bytes, &info->timestamp_usec);
+
+ /* Get reference - could be mono, downmixed from multichannel.
+ * Reference stored at spk_buf_playback_format */
+ const size_t resampler_in_frames = frames * sample_rate_ratio;
+ get_reference_audio_in_place(aec, resampler_in_frames);
+
+ int16_t* resampler_out_buf;
+ /* Resample to mic sampling rate (16-bit resampler) */
+ if (aec->spk_resampler != NULL) {
+ size_t in_frame_count = resampler_in_frames;
+ size_t out_frame_count = frames;
+ aec->spk_resampler->resample_from_input(aec->spk_resampler, aec->spk_buf_playback_format,
+ &in_frame_count, aec->spk_buf_resampler_out,
+ &out_frame_count);
+ resampler_out_buf = aec->spk_buf_resampler_out;
+ } else {
+ if (sample_rate_ratio != 1) {
+ ALOGE("Speaker sample rate %d, mic sample rate %d but no resampler defined!",
+ aec->spk_sampling_rate, aec->mic_sampling_rate);
+ }
+ resampler_out_buf = aec->spk_buf_playback_format;
+ }
+
+ /* Convert to 32 bit */
+ int16_t* src16 = resampler_out_buf;
+ int32_t* dst32 = buffer;
+ size_t frame, ch;
+ for (frame = 0; frame < frames; frame++) {
+ for (ch = 0; ch < aec->num_reference_channels; ch++) {
+ *dst32++ = ((int32_t)*src16++) << 16;
+ }
+ }
+
+ info->bytes = bytes;
+
+ ALOGV("%s exit", __func__);
+ return 0;
+}
+
+int init_aec_mic_config(struct aec_t *aec, struct alsa_stream_in *in) {
+ ALOGV("%s enter", __func__);
+#if DEBUG_AEC
+ remove("/data/local/traces/aec_in.pcm");
+ remove("/data/local/traces/aec_out.pcm");
+ remove("/data/local/traces/aec_ref.pcm");
+ remove("/data/local/traces/aec_timestamps.txt");
+#endif /* #if DEBUG_AEC */
+
+ if (!aec) {
+ ALOGE("AEC: No valid interface found!");
+ return -EINVAL;
+ }
+
+ int ret = 0;
+ pthread_mutex_lock(&aec->lock);
+ if (aec->mic_initialized) {
+ destroy_aec_mic_config_no_lock(aec);
+ }
+ aec->mic_sampling_rate = in->config.rate;
+ aec->mic_frame_size_bytes = audio_stream_in_frame_size(&in->stream);
+ aec->mic_num_channels = in->config.channels;
+
+ aec->mic_buf_size_bytes = in->config.period_size * audio_stream_in_frame_size(&in->stream);
+ aec->mic_buf = (int32_t *)malloc(aec->mic_buf_size_bytes);
+ if (aec->mic_buf == NULL) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+ memset(aec->mic_buf, 0, aec->mic_buf_size_bytes);
+ /* Reference buffer is the same number of frames as mic,
+ * only with a different number of channels in the frame. */
+ aec->spk_buf_size_bytes = in->config.period_size * aec->spk_frame_size_bytes;
+ aec->spk_buf = (int32_t *)malloc(aec->spk_buf_size_bytes);
+ if (aec->spk_buf == NULL) {
+ ret = -ENOMEM;
+ goto exit_1;
+ }
+ memset(aec->spk_buf, 0, aec->spk_buf_size_bytes);
+
+ /* Pre-resampler buffer */
+ size_t spk_frame_out_format_bytes = aec->spk_sampling_rate / aec->mic_sampling_rate *
+ aec->spk_buf_size_bytes;
+ aec->spk_buf_playback_format = (int16_t *)malloc(spk_frame_out_format_bytes);
+ if (aec->spk_buf_playback_format == NULL) {
+ ret = -ENOMEM;
+ goto exit_2;
+ }
+ /* Resampler is 16-bit */
+ aec->spk_buf_resampler_out = (int16_t *)malloc(aec->spk_buf_size_bytes);
+ if (aec->spk_buf_resampler_out == NULL) {
+ ret = -ENOMEM;
+ goto exit_3;
+ }
+
+ /* Don't use resampler if it's not required */
+ if (in->config.rate == aec->spk_sampling_rate) {
+ aec->spk_resampler = NULL;
+ } else {
+ int resampler_ret = create_resampler(
+ aec->spk_sampling_rate, in->config.rate, aec->num_reference_channels,
+ RESAMPLER_QUALITY_MAX - 1, /* MAX - 1 is the real max */
+ NULL, /* resampler_buffer_provider */
+ &aec->spk_resampler);
+ if (resampler_ret) {
+ ALOGE("AEC: Resampler initialization failed! Error code %d", resampler_ret);
+ ret = resampler_ret;
+ goto exit_4;
+ }
+ }
+
+ flush_aec_fifos(aec);
+ aec_spk_mic_reset();
+ aec->mic_initialized = true;
+
+exit:
+ pthread_mutex_unlock(&aec->lock);
+ ALOGV("%s exit", __func__);
+ return ret;
+
+exit_4:
+ free(aec->spk_buf_resampler_out);
+exit_3:
+ free(aec->spk_buf_playback_format);
+exit_2:
+ free(aec->spk_buf);
+exit_1:
+ free(aec->mic_buf);
+ pthread_mutex_unlock(&aec->lock);
+ ALOGV("%s exit", __func__);
+ return ret;
+}
+
+void aec_set_spk_running(struct aec_t *aec, bool state) {
+ ALOGV("%s enter", __func__);
+ pthread_mutex_lock(&aec->lock);
+ aec_set_spk_running_no_lock(aec, state);
+ pthread_mutex_unlock(&aec->lock);
+ ALOGV("%s exit", __func__);
+}
+
+bool aec_get_spk_running(struct aec_t *aec) {
+ ALOGV("%s enter", __func__);
+ pthread_mutex_lock(&aec->lock);
+ bool state = aec_get_spk_running_no_lock(aec);
+ pthread_mutex_unlock(&aec->lock);
+ ALOGV("%s exit", __func__);
+ return state;
+}
+
+void destroy_aec_mic_config(struct aec_t* aec) {
+ ALOGV("%s enter", __func__);
+ if (aec == NULL) {
+ ALOGV("%s exit", __func__);
+ return;
+ }
+
+ pthread_mutex_lock(&aec->lock);
+ destroy_aec_mic_config_no_lock(aec);
+ pthread_mutex_unlock(&aec->lock);
+ ALOGV("%s exit", __func__);
+}
+
+#ifdef AEC_HAL
+int process_aec(struct aec_t *aec, void* buffer, struct aec_info *info) {
+ ALOGV("%s enter", __func__);
+ int ret = 0;
+
+ if (aec == NULL) {
+ ALOGE("AEC: Interface uninitialized! Cannot process.");
+ return -EINVAL;
+ }
+
+ if ((!aec->mic_initialized) || (!aec->spk_initialized)) {
+ ALOGE("%s called with initialization: mic: %d, spk: %d", __func__, aec->mic_initialized,
+ aec->spk_initialized);
+ return -EINVAL;
+ }
+
+ size_t bytes = info->bytes;
+
+ size_t frame_size = aec->mic_frame_size_bytes;
+ size_t in_frames = bytes / frame_size;
+
+ /* Copy raw mic samples to AEC input buffer */
+ memcpy(aec->mic_buf, buffer, bytes);
+
+ uint64_t mic_time = timespec_to_usec(info->timestamp);
+ uint64_t spk_time = 0;
+
+ /*
+ * Only run AEC if there is speaker playback.
+ * The first time speaker state changes to running, flush FIFOs, so we're not stuck
+ * processing stale reference input.
+ */
+ bool spk_running = aec_get_spk_running(aec);
+
+ if (!spk_running) {
+ /* No new playback samples, so don't run AEC.
+ * 'buffer' already contains input samples. */
+ ALOGV("Speaker not running, skipping AEC..");
+ goto exit;
+ }
+
+ if (!aec->prev_spk_running) {
+ flush_aec_fifos(aec);
+ }
+
+ /* If there's no data in FIFO, exit */
+ if (fifo_available_to_read(aec->spk_fifo) <= 0) {
+ ALOGV("Echo reference buffer empty, zeroing reference....");
+ goto exit;
+ }
+
+ print_queue_status_to_log(aec, false);
+
+ /* Get reference, with format and sample rate required by AEC */
+ struct aec_info spk_info;
+ spk_info.bytes = bytes;
+ int ref_ret = get_reference_samples(aec, aec->spk_buf, &spk_info);
+ spk_time = spk_info.timestamp_usec;
+
+ if (ref_ret) {
+ ALOGE("get_reference_samples returned code %d", ref_ret);
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ int64_t time_diff = (mic_time > spk_time) ? (mic_time - spk_time) : (spk_time - mic_time);
+ if ((spk_time == 0) || (mic_time == 0) || (time_diff > MAX_TIMESTAMP_DIFF_USEC)) {
+ ALOGV("Speaker-mic timestamps diverged, skipping AEC");
+ flush_aec_fifos(aec);
+ aec_spk_mic_reset();
+ goto exit;
+ }
+
+ ALOGV("Mic time: %"PRIu64", spk time: %"PRIu64, mic_time, spk_time);
+
+ /*
+ * AEC processing call - output stored at 'buffer'
+ */
+ int32_t aec_status = aec_spk_mic_process(
+ aec->spk_buf, spk_time,
+ aec->mic_buf, mic_time,
+ in_frames,
+ buffer);
+
+ if (!aec_status) {
+ ALOGE("AEC processing failed!");
+ ret = -EINVAL;
+ }
+
+exit:
+ aec->prev_spk_running = spk_running;
+ ALOGV("Mic time: %"PRIu64", spk time: %"PRIu64, mic_time, spk_time);
+ if (ret) {
+ /* Best we can do is copy over the raw mic signal */
+ memcpy(buffer, aec->mic_buf, bytes);
+ flush_aec_fifos(aec);
+ aec_spk_mic_reset();
+ }
+
+#if DEBUG_AEC
+ /* ref data is 32-bit at this point */
+ size_t ref_bytes = in_frames*aec->num_reference_channels*sizeof(int32_t);
+
+ FILE *fp_in = fopen("/data/local/traces/aec_in.pcm", "a+");
+ if (fp_in) {
+ fwrite((char *)aec->mic_buf, 1, bytes, fp_in);
+ fclose(fp_in);
+ } else {
+ ALOGE("AEC debug: Could not open file aec_in.pcm!");
+ }
+ FILE *fp_out = fopen("/data/local/traces/aec_out.pcm", "a+");
+ if (fp_out) {
+ fwrite((char *)buffer, 1, bytes, fp_out);
+ fclose(fp_out);
+ } else {
+ ALOGE("AEC debug: Could not open file aec_out.pcm!");
+ }
+ FILE *fp_ref = fopen("/data/local/traces/aec_ref.pcm", "a+");
+ if (fp_ref) {
+ fwrite((char *)aec->spk_buf, 1, ref_bytes, fp_ref);
+ fclose(fp_ref);
+ } else {
+ ALOGE("AEC debug: Could not open file aec_ref.pcm!");
+ }
+ FILE *fp_ts = fopen("/data/local/traces/aec_timestamps.txt", "a+");
+ if (fp_ts) {
+ fprintf(fp_ts, "%"PRIu64",%"PRIu64"\n", mic_time, spk_time);
+ fclose(fp_ts);
+ } else {
+ ALOGE("AEC debug: Could not open file aec_timestamps.txt!");
+ }
+#endif /* #if DEBUG_AEC */
+ ALOGV("%s exit", __func__);
+ return ret;
+}
+
+#endif /*#ifdef AEC_HAL*/
diff --git a/audio/audio_aec.h b/audio/audio_aec.h
new file mode 100644
index 0000000..ac7a1dd
--- /dev/null
+++ b/audio/audio_aec.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Definitions and interface related to HAL implementations of Acoustic Echo Canceller (AEC).
+ *
+ * AEC cleans the microphone signal by removing from it audio data corresponding to loudspeaker
+ * playback. Note that this process can be nonlinear.
+ *
+ */
+
+#ifndef _AUDIO_AEC_H_
+#define _AUDIO_AEC_H_
+
+#include <stdint.h>
+#include <pthread.h>
+#include <sys/time.h>
+#include <hardware/audio.h>
+#include <audio_utils/resampler.h>
+#include "audio_hw.h"
+#include "fifo_wrapper.h"
+
+struct aec_t {
+ pthread_mutex_t lock;
+ size_t num_reference_channels;
+ bool mic_initialized;
+ int32_t *mic_buf;
+ size_t mic_num_channels;
+ size_t mic_buf_size_bytes;
+ size_t mic_frame_size_bytes;
+ uint32_t mic_sampling_rate;
+ struct aec_info last_mic_info;
+ bool spk_initialized;
+ int32_t *spk_buf;
+ size_t spk_num_channels;
+ size_t spk_buf_size_bytes;
+ size_t spk_frame_size_bytes;
+ uint32_t spk_sampling_rate;
+ struct aec_info last_spk_info;
+ int16_t *spk_buf_playback_format;
+ int16_t *spk_buf_resampler_out;
+ void *spk_fifo;
+ void *ts_fifo;
+ ssize_t read_write_diff_bytes;
+ struct resampler_itfe *spk_resampler;
+ bool spk_running;
+ bool prev_spk_running;
+};
+
+/* Initialize AEC object.
+ * This must be called when the audio device is opened.
+ * ALSA device mutex must be held before calling this API.
+ * Returns -EINVAL if AEC object fails to initialize, else returns 0. */
+int init_aec (int sampling_rate, int num_reference_channels,
+ int num_microphone_channels, struct aec_t **);
+
+/* Release AEC object.
+ * This must be called when the audio device is closed. */
+void release_aec(struct aec_t* aec);
+
+/* Initialize reference configuration for AEC.
+ * Must be called when a new output stream is opened.
+ * Returns -EINVAL if any processing block fails to initialize,
+ * else returns 0. */
+int init_aec_reference_config (struct aec_t *aec, struct alsa_stream_out *out);
+
+/* Clear reference configuration for AEC.
+ * Must be called when the output stream is closed. */
+void destroy_aec_reference_config (struct aec_t *aec);
+
+/* Initialize microphone configuration for AEC.
+ * Must be called when a new input stream is opened.
+ * Returns -EINVAL if any processing block fails to initialize,
+ * else returns 0. */
+int init_aec_mic_config(struct aec_t* aec, struct alsa_stream_in* in);
+
+/* Clear microphone configuration for AEC.
+ * Must be called when the input stream is closed. */
+void destroy_aec_mic_config (struct aec_t *aec);
+
+/* Used to communicate playback state (running or not) to AEC interface.
+ * This is used by process_aec() to determine if AEC processing is to be run. */
+void aec_set_spk_running (struct aec_t *aec, bool state);
+
+/* Used to communicate playback state (running or not) to the caller. */
+bool aec_get_spk_running(struct aec_t* aec);
+
+/* Write audio samples to AEC reference FIFO for use in AEC.
+ * Both audio samples and timestamps are added in FIFO fashion.
+ * Must be called after every write to PCM.
+ * Returns -ENOMEM if the write fails, else returns 0. */
+int write_to_reference_fifo(struct aec_t* aec, void* buffer, struct aec_info* info);
+
+/* Get reference audio samples + timestamp, in the format expected by AEC,
+ * i.e. same sample rate and bit rate as microphone audio.
+ * Timestamp is updated in field 'timestamp_usec', and not in 'timestamp'.
+ * Returns:
+ * -EINVAL if the AEC object is invalid.
+ * -ENOMEM if the reference FIFO overflows or is corrupted.
+ * -ETIMEDOUT if we timed out waiting for the requested number of bytes
+ * 0 otherwise */
+int get_reference_samples(struct aec_t* aec, void* buffer, struct aec_info* info);
+
+#ifdef AEC_HAL
+
+/* Processing function call for AEC.
+ * AEC output is updated at location pointed to by 'buffer'.
+ * This function does not run AEC when there is no playback -
+ * as communicated to this AEC interface using aec_set_spk_running().
+ * Returns -EINVAL if processing fails, else returns 0. */
+int process_aec(struct aec_t* aec, void* buffer, struct aec_info* info);
+
+#else /* #ifdef AEC_HAL */
+
+#define process_aec(...) ((int)0)
+
+#endif /* #ifdef AEC_HAL */
+
+#endif /* _AUDIO_AEC_H_ */
diff --git a/audio/audio_hw.c b/audio/audio_hw.c
index 805e2cd..4a16ac1 100644
--- a/audio/audio_hw.c
+++ b/audio/audio_hw.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 The Android Open Source Project
+ * Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -13,811 +13,494 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*
- * Copied as it is from device/google/cuttlefish/guest/hals/audio/audio_hw.c
- * and fixed couple of typos pointed out by Lint during review.
+ * Copied as it is from device/amlogic/generic/hal/audio/
*/
-#define LOG_TAG "audio_hw_generic"
+#define LOG_TAG "audio_hw_yukawa"
+//#define LOG_NDEBUG 0
-#include <assert.h>
#include <errno.h>
#include <inttypes.h>
+#include <malloc.h>
#include <pthread.h>
#include <stdint.h>
#include <stdlib.h>
#include <sys/time.h>
-#include <dlfcn.h>
-#include <fcntl.h>
#include <unistd.h>
#include <log/log.h>
-#include <cutils/list.h>
#include <cutils/str_parms.h>
+#include <cutils/properties.h>
#include <hardware/hardware.h>
#include <system/audio.h>
#include <hardware/audio.h>
+
+#include <audio_effects/effect_aec.h>
+#include <audio_route/audio_route.h>
+#include <audio_utils/clock.h>
+#include <audio_utils/echo_reference.h>
+#include <audio_utils/resampler.h>
+#include <hardware/audio_alsaops.h>
+#include <hardware/audio_effect.h>
+#include <sound/asound.h>
#include <tinyalsa/asoundlib.h>
-#define PCM_CARD 0
-#define PCM_DEVICE 0
+#include <sys/ioctl.h>
+#include "audio_aec.h"
+#include "audio_hw.h"
-#define OUT_PERIOD_MS 15
-#define OUT_PERIOD_COUNT 4
+static int adev_get_mic_mute(const struct audio_hw_device* dev, bool* state);
+static int adev_get_microphones(const struct audio_hw_device* dev,
+ struct audio_microphone_characteristic_t* mic_array,
+ size_t* mic_count);
+static size_t out_get_buffer_size(const struct audio_stream* stream);
-#define IN_PERIOD_MS 15
-#define IN_PERIOD_COUNT 4
-
-struct generic_audio_device {
- struct audio_hw_device device; // Constant after init
- pthread_mutex_t lock;
- bool mic_mute; // Protected by this->lock
- struct mixer* mixer; // Protected by this->lock
- struct listnode out_streams; // Record for output streams, protected by this->lock
- struct listnode in_streams; // Record for input streams, protected by this->lock
- audio_patch_handle_t next_patch_handle; // Protected by this->lock
-};
-
-/* If not NULL, this is a pointer to the fallback module.
- * This really is the original goldfish audio device /dev/eac which we will use
- * if no alsa devices are detected.
- */
-static int adev_get_mic_mute(const struct audio_hw_device *dev, bool *state);
-static int adev_get_microphones(const audio_hw_device_t *dev,
- struct audio_microphone_characteristic_t *mic_array,
- size_t *mic_count);
-
-
-typedef struct audio_vbuffer {
- pthread_mutex_t lock;
- uint8_t * data;
- size_t frame_size;
- size_t frame_count;
- size_t head;
- size_t tail;
- size_t live;
-} audio_vbuffer_t;
-
-static int audio_vbuffer_init (audio_vbuffer_t * audio_vbuffer, size_t frame_count,
- size_t frame_size) {
- if (!audio_vbuffer) {
- return -EINVAL;
- }
- audio_vbuffer->frame_size = frame_size;
- audio_vbuffer->frame_count = frame_count;
- size_t bytes = frame_count * frame_size;
- audio_vbuffer->data = calloc(bytes, 1);
- if (!audio_vbuffer->data) {
- return -ENOMEM;
- }
- audio_vbuffer->head = 0;
- audio_vbuffer->tail = 0;
- audio_vbuffer->live = 0;
- pthread_mutex_init (&audio_vbuffer->lock, (const pthread_mutexattr_t *) NULL);
- return 0;
+static int get_audio_output_port(audio_devices_t devices) {
+ /* Only HDMI out for now #FIXME */
+ return PORT_HDMI;
}
-static int audio_vbuffer_destroy (audio_vbuffer_t * audio_vbuffer) {
- if (!audio_vbuffer) {
- return -EINVAL;
+static void timestamp_adjust(struct timespec* ts, ssize_t frames, uint32_t sampling_rate) {
+ /* This function assumes the adjustment (in nsec) is less than the max value of long,
+ * which for 32-bit long this is 2^31 * 1e-9 seconds, slightly over 2 seconds.
+ * For 64-bit long it is 9e+9 seconds. */
+ long adj_nsec = (frames / (float) sampling_rate) * 1E9L;
+ ts->tv_nsec += adj_nsec;
+ while (ts->tv_nsec > 1E9L) {
+ ts->tv_sec++;
+ ts->tv_nsec -= 1E9L;
}
- free(audio_vbuffer->data);
- pthread_mutex_destroy(&audio_vbuffer->lock);
- return 0;
+ if (ts->tv_nsec < 0) {
+ ts->tv_sec--;
+ ts->tv_nsec += 1E9L;
+ }
}
-static int audio_vbuffer_live (audio_vbuffer_t * audio_vbuffer) {
- if (!audio_vbuffer) {
+/* Helper function to get PCM hardware timestamp.
+ * Only the field 'timestamp' of argument 'ts' is updated. */
+static int get_pcm_timestamp(struct pcm* pcm, uint32_t sample_rate, struct aec_info* info,
+ bool isOutput) {
+ int ret = 0;
+ if (pcm_get_htimestamp(pcm, &info->available, &info->timestamp) < 0) {
+ ALOGE("Error getting PCM timestamp!");
+ info->timestamp.tv_sec = 0;
+ info->timestamp.tv_nsec = 0;
return -EINVAL;
}
- pthread_mutex_lock (&audio_vbuffer->lock);
- int live = audio_vbuffer->live;
- pthread_mutex_unlock (&audio_vbuffer->lock);
- return live;
+ ssize_t frames;
+ if (isOutput) {
+ frames = pcm_get_buffer_size(pcm) - info->available;
+ } else {
+ frames = -info->available; /* rewind timestamp */
+ }
+ timestamp_adjust(&info->timestamp, frames, sample_rate);
+ return ret;
}
-#define MIN(a,b) (((a)<(b))?(a):(b))
-static size_t audio_vbuffer_write (audio_vbuffer_t * audio_vbuffer, const void * buffer, size_t frame_count) {
- size_t frames_written = 0;
- pthread_mutex_lock (&audio_vbuffer->lock);
-
- while (frame_count != 0) {
- int frames = 0;
- if (audio_vbuffer->live == 0 || audio_vbuffer->head > audio_vbuffer->tail) {
- frames = MIN(frame_count, audio_vbuffer->frame_count - audio_vbuffer->head);
- } else if (audio_vbuffer->head < audio_vbuffer->tail) {
- frames = MIN(frame_count, audio_vbuffer->tail - (audio_vbuffer->head));
- } else {
- // Full
+static int read_filter_from_file(const char* filename, int16_t* filter, int max_length) {
+ FILE* fp = fopen(filename, "r");
+ if (fp == NULL) {
+ ALOGI("%s: File %s not found.", __func__, filename);
+ return 0;
+ }
+ int num_taps = 0;
+ char* line = NULL;
+ size_t len = 0;
+ while (!feof(fp)) {
+ size_t size = getline(&line, &len, fp);
+ if ((line[0] == '#') || (size < 2)) {
+ continue;
+ }
+ int n = sscanf(line, "%" SCNd16 "\n", &filter[num_taps++]);
+ if (n < 1) {
+ ALOGE("Could not find coefficient %d! Exiting...", num_taps - 1);
+ return 0;
+ }
+ ALOGV("Coeff %d : %" PRId16, num_taps, filter[num_taps - 1]);
+ if (num_taps == max_length) {
+ ALOGI("%s: max tap length %d reached.", __func__, max_length);
break;
}
- memcpy(&audio_vbuffer->data[audio_vbuffer->head*audio_vbuffer->frame_size],
- &((uint8_t*)buffer)[frames_written*audio_vbuffer->frame_size],
- frames*audio_vbuffer->frame_size);
- audio_vbuffer->live += frames;
- frames_written += frames;
- frame_count -= frames;
- audio_vbuffer->head = (audio_vbuffer->head + frames) % audio_vbuffer->frame_count;
}
-
- pthread_mutex_unlock (&audio_vbuffer->lock);
- return frames_written;
+ free(line);
+ fclose(fp);
+ return num_taps;
}
-static size_t audio_vbuffer_read (audio_vbuffer_t * audio_vbuffer, void * buffer, size_t frame_count) {
- size_t frames_read = 0;
- pthread_mutex_lock (&audio_vbuffer->lock);
+static void out_set_eq(struct alsa_stream_out* out) {
+ out->speaker_eq = NULL;
+ int16_t* speaker_eq_coeffs = (int16_t*)calloc(SPEAKER_MAX_EQ_LENGTH, sizeof(int16_t));
+ if (speaker_eq_coeffs == NULL) {
+ ALOGE("%s: Failed to allocate speaker EQ", __func__);
+ return;
+ }
+ int num_taps = read_filter_from_file(SPEAKER_EQ_FILE, speaker_eq_coeffs, SPEAKER_MAX_EQ_LENGTH);
+ if (num_taps == 0) {
+ ALOGI("%s: Empty filter file or 0 taps set.", __func__);
+ free(speaker_eq_coeffs);
+ return;
+ }
+ out->speaker_eq = fir_init(
+ out->config.channels, FIR_SINGLE_FILTER, num_taps,
+ out_get_buffer_size(&out->stream.common) / out->config.channels / sizeof(int16_t),
+ speaker_eq_coeffs);
+ free(speaker_eq_coeffs);
+}
- while (frame_count != 0) {
- int frames = 0;
- if (audio_vbuffer->live == audio_vbuffer->frame_count ||
- audio_vbuffer->tail > audio_vbuffer->head) {
- frames = MIN(frame_count, audio_vbuffer->frame_count - audio_vbuffer->tail);
- } else if (audio_vbuffer->tail < audio_vbuffer->head) {
- frames = MIN(frame_count, audio_vbuffer->head - audio_vbuffer->tail);
- } else {
+/* must be called with hw device and output stream mutexes locked */
+static int start_output_stream(struct alsa_stream_out *out)
+{
+ struct alsa_audio_device *adev = out->dev;
+
+ /* default to low power: will be corrected in out_write if necessary before first write to
+ * tinyalsa.
+ */
+ out->write_threshold = PLAYBACK_PERIOD_COUNT * PLAYBACK_PERIOD_SIZE;
+ out->config.start_threshold = PLAYBACK_PERIOD_START_THRESHOLD * PLAYBACK_PERIOD_SIZE;
+ out->config.avail_min = PLAYBACK_PERIOD_SIZE;
+ out->unavailable = true;
+ unsigned int pcm_retry_count = PCM_OPEN_RETRIES;
+ int out_port = get_audio_output_port(out->devices);
+
+ while (1) {
+ out->pcm = pcm_open(CARD_OUT, out_port, PCM_OUT | PCM_MONOTONIC, &out->config);
+ if ((out->pcm != NULL) && pcm_is_ready(out->pcm)) {
break;
+ } else {
+ ALOGE("cannot open pcm_out driver: %s", pcm_get_error(out->pcm));
+ if (out->pcm != NULL) {
+ pcm_close(out->pcm);
+ out->pcm = NULL;
+ }
+ if (--pcm_retry_count == 0) {
+ ALOGE("Failed to open pcm_out after %d tries", PCM_OPEN_RETRIES);
+ return -ENODEV;
+ }
+ usleep(PCM_OPEN_WAIT_TIME_MS * 1000);
}
- memcpy(&((uint8_t*)buffer)[frames_read*audio_vbuffer->frame_size],
- &audio_vbuffer->data[audio_vbuffer->tail*audio_vbuffer->frame_size],
- frames*audio_vbuffer->frame_size);
- audio_vbuffer->live -= frames;
- frames_read += frames;
- frame_count -= frames;
- audio_vbuffer->tail = (audio_vbuffer->tail + frames) % audio_vbuffer->frame_count;
}
-
- pthread_mutex_unlock (&audio_vbuffer->lock);
- return frames_read;
+ out->unavailable = false;
+ adev->active_output = out;
+ return 0;
}
-struct generic_stream_out {
- struct audio_stream_out stream; // Constant after init
- pthread_mutex_t lock;
- struct generic_audio_device *dev; // Constant after init
- uint32_t num_devices; // Protected by this->lock
- audio_devices_t devices[AUDIO_PATCH_PORTS_MAX]; // Protected by this->lock
- struct audio_config req_config; // Constant after init
- struct pcm_config pcm_config; // Constant after init
- audio_vbuffer_t buffer; // Constant after init
-
- // Time & Position Keeping
- bool standby; // Protected by this->lock
- uint64_t underrun_position; // Protected by this->lock
- struct timespec underrun_time; // Protected by this->lock
- uint64_t last_write_time_us; // Protected by this->lock
- uint64_t frames_total_buffered; // Protected by this->lock
- uint64_t frames_written; // Protected by this->lock
- uint64_t frames_rendered; // Protected by this->lock
-
- // Worker
- pthread_t worker_thread; // Constant after init
- pthread_cond_t worker_wake; // Protected by this->lock
- bool worker_standby; // Protected by this->lock
- bool worker_exit; // Protected by this->lock
-
- audio_io_handle_t handle; // Constant after init
- audio_patch_handle_t patch_handle; // Protected by this->dev->lock
-
- struct listnode stream_node; // Protected by this->dev->lock
-};
-
-struct generic_stream_in {
- struct audio_stream_in stream; // Constant after init
- pthread_mutex_t lock;
- struct generic_audio_device *dev; // Constant after init
- audio_devices_t device; // Protected by this->lock
- struct audio_config req_config; // Constant after init
- struct pcm *pcm; // Protected by this->lock
- struct pcm_config pcm_config; // Constant after init
- int16_t *stereo_to_mono_buf; // Protected by this->lock
- size_t stereo_to_mono_buf_size; // Protected by this->lock
- audio_vbuffer_t buffer; // Protected by this->lock
-
- // Time & Position Keeping
- bool standby; // Protected by this->lock
- int64_t standby_position; // Protected by this->lock
- struct timespec standby_exit_time;// Protected by this->lock
- int64_t standby_frames_read; // Protected by this->lock
-
- // Worker
- pthread_t worker_thread; // Constant after init
- pthread_cond_t worker_wake; // Protected by this->lock
- bool worker_standby; // Protected by this->lock
- bool worker_exit; // Protected by this->lock
-
- audio_io_handle_t handle; // Constant after init
- audio_patch_handle_t patch_handle; // Protected by this->dev->lock
-
- struct listnode stream_node; // Protected by this->dev->lock
-};
-
-static struct pcm_config pcm_config_out = {
- .channels = 2,
- .rate = 0,
- .period_size = 0,
- .period_count = OUT_PERIOD_COUNT,
- .format = PCM_FORMAT_S16_LE,
- .start_threshold = 0,
-};
-
-static struct pcm_config pcm_config_in = {
- .channels = 2,
- .rate = 0,
- .period_size = 0,
- .period_count = IN_PERIOD_COUNT,
- .format = PCM_FORMAT_S16_LE,
- .start_threshold = 0,
- .stop_threshold = INT_MAX,
-};
-
-static pthread_mutex_t adev_init_lock = PTHREAD_MUTEX_INITIALIZER;
-static unsigned int audio_device_ref_count = 0;
-
static uint32_t out_get_sample_rate(const struct audio_stream *stream)
{
- struct generic_stream_out *out = (struct generic_stream_out *)stream;
- return out->req_config.sample_rate;
+ struct alsa_stream_out *out = (struct alsa_stream_out *)stream;
+ return out->config.rate;
}
static int out_set_sample_rate(struct audio_stream *stream, uint32_t rate)
{
+ ALOGV("out_set_sample_rate: %d", 0);
return -ENOSYS;
}
static size_t out_get_buffer_size(const struct audio_stream *stream)
{
- struct generic_stream_out *out = (struct generic_stream_out *)stream;
- int size = out->pcm_config.period_size *
- audio_stream_out_frame_size(&out->stream);
+ ALOGV("out_get_buffer_size: %d", 4096);
- return size;
+ /* return the closest majoring multiple of 16 frames, as
+ * audioflinger expects audio buffers to be a multiple of 16 frames */
+ size_t size = PLAYBACK_PERIOD_SIZE;
+ size = ((size + 15) / 16) * 16;
+ return size * audio_stream_out_frame_size((struct audio_stream_out *)stream);
}
static audio_channel_mask_t out_get_channels(const struct audio_stream *stream)
{
- struct generic_stream_out *out = (struct generic_stream_out *)stream;
- return out->req_config.channel_mask;
+ ALOGV("out_get_channels");
+ struct alsa_stream_out *out = (struct alsa_stream_out *)stream;
+ return audio_channel_out_mask_from_count(out->config.channels);
}
static audio_format_t out_get_format(const struct audio_stream *stream)
{
- struct generic_stream_out *out = (struct generic_stream_out *)stream;
-
- return out->req_config.format;
+ ALOGV("out_get_format");
+ struct alsa_stream_out *out = (struct alsa_stream_out *)stream;
+ return audio_format_from_pcm_format(out->config.format);
}
static int out_set_format(struct audio_stream *stream, audio_format_t format)
{
+ ALOGV("out_set_format: %d",format);
return -ENOSYS;
}
+static int do_output_standby(struct alsa_stream_out *out)
+{
+ struct alsa_audio_device *adev = out->dev;
+
+ fir_reset(out->speaker_eq);
+
+ if (!out->standby) {
+ pcm_close(out->pcm);
+ out->pcm = NULL;
+ adev->active_output = NULL;
+ out->standby = 1;
+ }
+ aec_set_spk_running(adev->aec, false);
+ return 0;
+}
+
+static int out_standby(struct audio_stream *stream)
+{
+ ALOGV("out_standby");
+ struct alsa_stream_out *out = (struct alsa_stream_out *)stream;
+ int status;
+
+ pthread_mutex_lock(&out->dev->lock);
+ pthread_mutex_lock(&out->lock);
+ status = do_output_standby(out);
+ pthread_mutex_unlock(&out->lock);
+ pthread_mutex_unlock(&out->dev->lock);
+ return status;
+}
+
static int out_dump(const struct audio_stream *stream, int fd)
{
- struct generic_stream_out *out = (struct generic_stream_out *)stream;
- pthread_mutex_lock(&out->lock);
- dprintf(fd, "\tout_dump:\n"
- "\t\tsample rate: %u\n"
- "\t\tbuffer size: %zu\n"
- "\t\tchannel mask: %08x\n"
- "\t\tformat: %d\n"
- "\t\tdevice(s): ",
- out_get_sample_rate(stream),
- out_get_buffer_size(stream),
- out_get_channels(stream),
- out_get_format(stream));
- if (out->num_devices == 0) {
- dprintf(fd, "%08x\n", AUDIO_DEVICE_NONE);
- } else {
- for (uint32_t i = 0; i < out->num_devices; i++) {
- if (i != 0) {
- dprintf(fd, ", ");
- }
- dprintf(fd, "%08x", out->devices[i]);
- }
- dprintf(fd, "\n");
- }
- dprintf(fd, "\t\taudio dev: %p\n\n", out->dev);
- pthread_mutex_unlock(&out->lock);
+ ALOGV("out_dump");
return 0;
}
static int out_set_parameters(struct audio_stream *stream, const char *kvpairs)
{
+ ALOGV("out_set_parameters");
+ struct alsa_stream_out *out = (struct alsa_stream_out *)stream;
+ struct alsa_audio_device *adev = out->dev;
struct str_parms *parms;
char value[32];
- int success;
- int ret = -EINVAL;
+ int ret, val = 0;
- if (kvpairs == NULL || kvpairs[0] == 0) {
- return 0;
- }
parms = str_parms_create_str(kvpairs);
- success = str_parms_get_str(parms, AUDIO_PARAMETER_STREAM_ROUTING,
- value, sizeof(value));
- // As the hal version is 3.0, it must not use set parameters API to set audio devices.
- // Instead, it should use create_audio_patch API.
- assert(("Must not use set parameters API to set audio devices", success < 0));
- if (str_parms_has_key(parms, AUDIO_PARAMETER_STREAM_FORMAT)) {
- // match the return value of out_set_format
- ret = -ENOSYS;
+ ret = str_parms_get_str(parms, AUDIO_PARAMETER_STREAM_ROUTING, value, sizeof(value));
+ if (ret >= 0) {
+ val = atoi(value);
+ pthread_mutex_lock(&adev->lock);
+ pthread_mutex_lock(&out->lock);
+ if (((out->devices & AUDIO_DEVICE_OUT_ALL) != val) && (val != 0)) {
+ out->devices &= ~AUDIO_DEVICE_OUT_ALL;
+ out->devices |= val;
+ }
+ pthread_mutex_unlock(&out->lock);
+ pthread_mutex_unlock(&adev->lock);
}
str_parms_destroy(parms);
-
- if (ret == -EINVAL) {
- ALOGW("%s(), unsupported parameter %s", __func__, kvpairs);
- // There is not any key supported for set_parameters API.
- // Return error when there is non-null value passed in.
- }
- return ret;
+ return 0;
}
static char * out_get_parameters(const struct audio_stream *stream, const char *keys)
{
- struct generic_stream_out *out = (struct generic_stream_out *)stream;
- struct str_parms *query = str_parms_create_str(keys);
- char *str = NULL;
- char value[256];
- struct str_parms *reply = str_parms_create();
- int ret;
- bool get = false;
-
- ret = str_parms_get_str(query, AUDIO_PARAMETER_STREAM_ROUTING, value, sizeof(value));
- if (ret >= 0) {
- pthread_mutex_lock(&out->lock);
- audio_devices_t device = AUDIO_DEVICE_NONE;
- for (uint32_t i = 0; i < out->num_devices; i++) {
- device |= out->devices[i];
- }
- str_parms_add_int(reply, AUDIO_PARAMETER_STREAM_ROUTING, device);
- pthread_mutex_unlock(&out->lock);
- get = true;
- }
-
- if (str_parms_has_key(query, AUDIO_PARAMETER_STREAM_SUP_FORMATS)) {
- value[0] = 0;
- strcat(value, "AUDIO_FORMAT_PCM_16_BIT");
- str_parms_add_str(reply, AUDIO_PARAMETER_STREAM_SUP_FORMATS, value);
- get = true;
- }
-
- if (str_parms_has_key(query, AUDIO_PARAMETER_STREAM_FORMAT)) {
- value[0] = 0;
- strcat(value, "AUDIO_FORMAT_PCM_16_BIT");
- str_parms_add_str(reply, AUDIO_PARAMETER_STREAM_FORMAT, value);
- get = true;
- }
-
- if (get) {
- str = str_parms_to_str(reply);
- }
- else {
- ALOGD("%s Unsupported parameter: %s", __FUNCTION__, keys);
- }
-
- str_parms_destroy(query);
- str_parms_destroy(reply);
- return str;
+ ALOGV("out_get_parameters");
+ return strdup("");
}
static uint32_t out_get_latency(const struct audio_stream_out *stream)
{
- struct generic_stream_out *out = (struct generic_stream_out *)stream;
- return (out->pcm_config.period_size * 1000) / out->pcm_config.rate;
+ ALOGV("out_get_latency");
+ struct alsa_stream_out *out = (struct alsa_stream_out *)stream;
+ return (PLAYBACK_PERIOD_SIZE * PLAYBACK_PERIOD_COUNT * 1000) / out->config.rate;
}
static int out_set_volume(struct audio_stream_out *stream, float left,
- float right)
+ float right)
{
+ ALOGV("out_set_volume: Left:%f Right:%f", left, right);
return -ENOSYS;
}
-static void *out_write_worker(void * args)
+static ssize_t out_write(struct audio_stream_out *stream, const void* buffer,
+ size_t bytes)
{
- struct generic_stream_out *out = (struct generic_stream_out *)args;
- struct pcm *pcm = NULL;
- uint8_t *buffer = NULL;
- int buffer_frames;
- int buffer_size;
- bool restart = false;
- bool shutdown = false;
- while (true) {
- pthread_mutex_lock(&out->lock);
- while (out->worker_standby || restart) {
- restart = false;
- if (pcm) {
- pcm_close(pcm); // Frees pcm
- pcm = NULL;
- free(buffer);
- buffer=NULL;
- }
- if (out->worker_exit) {
- break;
- }
- pthread_cond_wait(&out->worker_wake, &out->lock);
- }
+ int ret;
+ struct alsa_stream_out *out = (struct alsa_stream_out *)stream;
+ struct alsa_audio_device *adev = out->dev;
+ size_t frame_size = audio_stream_out_frame_size(stream);
+ size_t out_frames = bytes / frame_size;
- if (out->worker_exit) {
- if (!out->worker_standby) {
- ALOGE("Out worker not in standby before exiting");
- }
- shutdown = true;
- }
+ ALOGV("%s: devices: %d, bytes %zu", __func__, out->devices, bytes);
- while (!shutdown && audio_vbuffer_live(&out->buffer) == 0) {
- pthread_cond_wait(&out->worker_wake, &out->lock);
- }
-
- if (shutdown) {
- pthread_mutex_unlock(&out->lock);
- break;
- }
-
- if (!pcm) {
- pcm = pcm_open(PCM_CARD, PCM_DEVICE,
- PCM_OUT | PCM_MONOTONIC, &out->pcm_config);
- if (!pcm_is_ready(pcm)) {
- ALOGE("pcm_open(out) failed: %s: channels %d format %d rate %d",
- pcm_get_error(pcm),
- out->pcm_config.channels,
- out->pcm_config.format,
- out->pcm_config.rate
- );
- pthread_mutex_unlock(&out->lock);
- break;
- }
- buffer_frames = out->pcm_config.period_size;
- buffer_size = pcm_frames_to_bytes(pcm, buffer_frames);
- buffer = malloc(buffer_size);
- if (!buffer) {
- ALOGE("could not allocate write buffer");
- pthread_mutex_unlock(&out->lock);
- break;
- }
- }
- int frames = audio_vbuffer_read(&out->buffer, buffer, buffer_frames);
- pthread_mutex_unlock(&out->lock);
- int ret = pcm_write(pcm, buffer, pcm_frames_to_bytes(pcm, frames));
- if (ret != 0) {
- ALOGE("pcm_write failed %s", pcm_get_error(pcm));
- restart = true;
- }
- }
- if (buffer) {
- free(buffer);
- }
-
- return NULL;
-}
-
-// Call with in->lock held
-static void get_current_output_position(struct generic_stream_out *out,
- uint64_t * position,
- struct timespec * timestamp) {
- struct timespec curtime = { .tv_sec = 0, .tv_nsec = 0 };
- clock_gettime(CLOCK_MONOTONIC, &curtime);
- const int64_t now_us = (curtime.tv_sec * 1000000000LL + curtime.tv_nsec) / 1000;
- if (timestamp) {
- *timestamp = curtime;
- }
- int64_t position_since_underrun;
- if (out->standby) {
- position_since_underrun = 0;
- } else {
- const int64_t first_us = (out->underrun_time.tv_sec * 1000000000LL +
- out->underrun_time.tv_nsec) / 1000;
- position_since_underrun = (now_us - first_us) *
- out_get_sample_rate(&out->stream.common) /
- 1000000;
- if (position_since_underrun < 0) {
- position_since_underrun = 0;
- }
- }
- *position = out->underrun_position + position_since_underrun;
-
- // The device will reuse the same output stream leading to periods of
- // underrun.
- if (*position > out->frames_written) {
- ALOGW("Not supplying enough data to HAL, expected position %" PRIu64 " , only wrote "
- "%" PRIu64,
- *position, out->frames_written);
-
- *position = out->frames_written;
- out->underrun_position = *position;
- out->underrun_time = curtime;
- out->frames_total_buffered = 0;
- }
-}
-
-
-static ssize_t out_write(struct audio_stream_out *stream, const void *buffer,
- size_t bytes)
-{
- struct generic_stream_out *out = (struct generic_stream_out *)stream;
- const size_t frames = bytes / audio_stream_out_frame_size(stream);
-
+ /* acquiring hw device mutex systematically is useful if a low priority thread is waiting
+ * on the output stream mutex - e.g. executing select_mode() while holding the hw device
+ * mutex
+ */
+ pthread_mutex_lock(&adev->lock);
pthread_mutex_lock(&out->lock);
-
- if (out->worker_standby) {
- out->worker_standby = false;
- }
-
- uint64_t current_position;
- struct timespec current_time;
-
- get_current_output_position(out, ¤t_position, ¤t_time);
- const uint64_t now_us = (current_time.tv_sec * 1000000000LL +
- current_time.tv_nsec) / 1000;
if (out->standby) {
- out->standby = false;
- out->underrun_time = current_time;
- out->frames_rendered = 0;
- out->frames_total_buffered = 0;
+ ret = start_output_stream(out);
+ if (ret != 0) {
+ pthread_mutex_unlock(&adev->lock);
+ goto exit;
+ }
+ out->standby = 0;
+ aec_set_spk_running(adev->aec, true);
}
- size_t frames_written = audio_vbuffer_write(&out->buffer, buffer, frames);
- pthread_cond_signal(&out->worker_wake);
+ pthread_mutex_unlock(&adev->lock);
- /* Implementation just consumes bytes if we start getting backed up */
- out->frames_written += frames;
- out->frames_rendered += frames;
- out->frames_total_buffered += frames;
-
- // We simulate the audio device blocking when it's write buffers become
- // full.
-
- // At the beginning or after an underrun, try to fill up the vbuffer.
- // This will be throttled by the PlaybackThread
- int frames_sleep = out->frames_total_buffered < out->buffer.frame_count ? 0 : frames;
-
- uint64_t sleep_time_us = frames_sleep * 1000000LL /
- out_get_sample_rate(&stream->common);
-
- // If the write calls are delayed, subtract time off of the sleep to
- // compensate
- uint64_t time_since_last_write_us = now_us - out->last_write_time_us;
- if (time_since_last_write_us < sleep_time_us) {
- sleep_time_us -= time_since_last_write_us;
- } else {
- sleep_time_us = 0;
+ if (out->speaker_eq != NULL) {
+ fir_process_interleaved(out->speaker_eq, (int16_t*)buffer, (int16_t*)buffer, out_frames);
}
- out->last_write_time_us = now_us + sleep_time_us;
+ ret = pcm_write(out->pcm, buffer, out_frames * frame_size);
+ if (ret == 0) {
+ out->frames_written += out_frames;
+
+ struct aec_info info;
+ get_pcm_timestamp(out->pcm, out->config.rate, &info, true /*isOutput*/);
+ out->timestamp = info.timestamp;
+ info.bytes = out_frames * frame_size;
+ int aec_ret = write_to_reference_fifo(adev->aec, (void *)buffer, &info);
+ if (aec_ret) {
+ ALOGE("AEC: Write to speaker loopback FIFO failed!");
+ }
+ }
+
+exit:
pthread_mutex_unlock(&out->lock);
- if (sleep_time_us > 0) {
- usleep(sleep_time_us);
+ if (ret != 0) {
+ usleep((int64_t)bytes * 1000000 / audio_stream_out_frame_size(stream) /
+ out_get_sample_rate(&stream->common));
}
- if (frames_written < frames) {
- ALOGW("Hardware backing HAL too slow, could only write %zu of %zu frames", frames_written, frames);
- }
-
- /* Always consume all bytes */
return bytes;
}
+static int out_get_render_position(const struct audio_stream_out *stream,
+ uint32_t *dsp_frames)
+{
+ ALOGV("out_get_render_position: dsp_frames: %p", dsp_frames);
+ return -ENOSYS;
+}
+
static int out_get_presentation_position(const struct audio_stream_out *stream,
uint64_t *frames, struct timespec *timestamp)
-
{
if (stream == NULL || frames == NULL || timestamp == NULL) {
return -EINVAL;
}
- struct generic_stream_out *out = (struct generic_stream_out *)stream;
+ struct alsa_stream_out* out = (struct alsa_stream_out*)stream;
- pthread_mutex_lock(&out->lock);
- get_current_output_position(out, frames, timestamp);
- pthread_mutex_unlock(&out->lock);
+ *frames = out->frames_written;
+ *timestamp = out->timestamp;
+ ALOGV("%s: frames: %" PRIu64 ", timestamp (nsec): %" PRIu64, __func__, *frames,
+ audio_utils_ns_from_timespec(timestamp));
return 0;
}
-static int out_get_render_position(const struct audio_stream_out *stream,
- uint32_t *dsp_frames)
-{
- if (stream == NULL || dsp_frames == NULL) {
- return -EINVAL;
- }
- struct generic_stream_out *out = (struct generic_stream_out *)stream;
- pthread_mutex_lock(&out->lock);
- *dsp_frames = out->frames_rendered;
- pthread_mutex_unlock(&out->lock);
- return 0;
-}
-
-// Must be called with out->lock held
-static void do_out_standby(struct generic_stream_out *out)
-{
- int frames_sleep = 0;
- uint64_t sleep_time_us = 0;
- if (out->standby) {
- return;
- }
- while (true) {
- get_current_output_position(out, &out->underrun_position, NULL);
- frames_sleep = out->frames_written - out->underrun_position;
-
- if (frames_sleep == 0) {
- break;
- }
-
- sleep_time_us = frames_sleep * 1000000LL /
- out_get_sample_rate(&out->stream.common);
-
- pthread_mutex_unlock(&out->lock);
- usleep(sleep_time_us);
- pthread_mutex_lock(&out->lock);
- }
- out->worker_standby = true;
- out->standby = true;
-}
-
-static int out_standby(struct audio_stream *stream)
-{
- struct generic_stream_out *out = (struct generic_stream_out *)stream;
- pthread_mutex_lock(&out->lock);
- do_out_standby(out);
- pthread_mutex_unlock(&out->lock);
- return 0;
-}
static int out_add_audio_effect(const struct audio_stream *stream, effect_handle_t effect)
{
- // out_add_audio_effect is a no op
+ ALOGV("out_add_audio_effect: %p", effect);
return 0;
}
static int out_remove_audio_effect(const struct audio_stream *stream, effect_handle_t effect)
{
- // out_remove_audio_effect is a no op
+ ALOGV("out_remove_audio_effect: %p", effect);
return 0;
}
static int out_get_next_write_timestamp(const struct audio_stream_out *stream,
- int64_t *timestamp)
+ int64_t *timestamp)
{
+ *timestamp = 0;
+ ALOGV("out_get_next_write_timestamp: %ld", (long int)(*timestamp));
return -ENOSYS;
}
+/** audio_stream_in implementation **/
+
+/* must be called with hw device and input stream mutexes locked */
+static int start_input_stream(struct alsa_stream_in *in)
+{
+ struct alsa_audio_device *adev = in->dev;
+ in->unavailable = true;
+ unsigned int pcm_retry_count = PCM_OPEN_RETRIES;
+
+ while (1) {
+ in->pcm = pcm_open(CARD_IN, PORT_BUILTIN_MIC, PCM_IN | PCM_MONOTONIC, &in->config);
+ if ((in->pcm != NULL) && pcm_is_ready(in->pcm)) {
+ break;
+ } else {
+ ALOGE("cannot open pcm_in driver: %s", pcm_get_error(in->pcm));
+ if (in->pcm != NULL) {
+ pcm_close(in->pcm);
+ in->pcm = NULL;
+ }
+ if (--pcm_retry_count == 0) {
+ ALOGE("Failed to open pcm_in after %d tries", PCM_OPEN_RETRIES);
+ return -ENODEV;
+ }
+ usleep(PCM_OPEN_WAIT_TIME_MS * 1000);
+ }
+ }
+ in->unavailable = false;
+ adev->active_input = in;
+ return 0;
+}
+
+static void get_mic_characteristics(struct audio_microphone_characteristic_t* mic_data,
+ size_t* mic_count) {
+ *mic_count = 1;
+ memset(mic_data, 0, sizeof(struct audio_microphone_characteristic_t));
+ strlcpy(mic_data->device_id, "builtin_mic", AUDIO_MICROPHONE_ID_MAX_LEN - 1);
+ strlcpy(mic_data->address, "top", AUDIO_DEVICE_MAX_ADDRESS_LEN - 1);
+ memset(mic_data->channel_mapping, AUDIO_MICROPHONE_CHANNEL_MAPPING_UNUSED,
+ sizeof(mic_data->channel_mapping));
+ mic_data->device = AUDIO_DEVICE_IN_BUILTIN_MIC;
+ mic_data->sensitivity = -37.0;
+ mic_data->max_spl = AUDIO_MICROPHONE_SPL_UNKNOWN;
+ mic_data->min_spl = AUDIO_MICROPHONE_SPL_UNKNOWN;
+ mic_data->orientation.x = 0.0f;
+ mic_data->orientation.y = 0.0f;
+ mic_data->orientation.z = 0.0f;
+ mic_data->geometric_location.x = AUDIO_MICROPHONE_COORDINATE_UNKNOWN;
+ mic_data->geometric_location.y = AUDIO_MICROPHONE_COORDINATE_UNKNOWN;
+ mic_data->geometric_location.z = AUDIO_MICROPHONE_COORDINATE_UNKNOWN;
+}
+
static uint32_t in_get_sample_rate(const struct audio_stream *stream)
{
- struct generic_stream_in *in = (struct generic_stream_in *)stream;
- return in->req_config.sample_rate;
+ struct alsa_stream_in *in = (struct alsa_stream_in *)stream;
+ return in->config.rate;
}
static int in_set_sample_rate(struct audio_stream *stream, uint32_t rate)
{
+ ALOGV("in_set_sample_rate: %d", rate);
return -ENOSYS;
}
-static int refine_output_parameters(uint32_t *sample_rate, audio_format_t *format, audio_channel_mask_t *channel_mask)
-{
- static const uint32_t sample_rates [] = {8000,11025,16000,22050,24000,32000,
- 44100,48000};
- static const int sample_rates_count = sizeof(sample_rates)/sizeof(uint32_t);
- bool inval = false;
- if (*format != AUDIO_FORMAT_PCM_16_BIT) {
- *format = AUDIO_FORMAT_PCM_16_BIT;
- inval = true;
- }
-
- int channel_count = popcount(*channel_mask);
- if (channel_count != 1 && channel_count != 2) {
- *channel_mask = AUDIO_CHANNEL_IN_STEREO;
- inval = true;
- }
-
- int i;
- for (i = 0; i < sample_rates_count; i++) {
- if (*sample_rate < sample_rates[i]) {
- *sample_rate = sample_rates[i];
- inval=true;
- break;
- }
- else if (*sample_rate == sample_rates[i]) {
- break;
- }
- else if (i == sample_rates_count-1) {
- // Cap it to the highest rate we support
- *sample_rate = sample_rates[i];
- inval=true;
- }
- }
-
- if (inval) {
- return -EINVAL;
- }
- return 0;
-}
-
-static int refine_input_parameters(uint32_t *sample_rate, audio_format_t *format, audio_channel_mask_t *channel_mask)
-{
- static const uint32_t sample_rates [] = {8000, 11025, 16000, 22050, 44100, 48000};
- static const int sample_rates_count = sizeof(sample_rates)/sizeof(uint32_t);
- bool inval = false;
- // Only PCM_16_bit is supported. If this is changed, stereo to mono drop
- // must be fixed in in_read
- if (*format != AUDIO_FORMAT_PCM_16_BIT) {
- *format = AUDIO_FORMAT_PCM_16_BIT;
- inval = true;
- }
-
- int channel_count = popcount(*channel_mask);
- if (channel_count != 1 && channel_count != 2) {
- *channel_mask = AUDIO_CHANNEL_IN_STEREO;
- inval = true;
- }
-
- int i;
- for (i = 0; i < sample_rates_count; i++) {
- if (*sample_rate < sample_rates[i]) {
- *sample_rate = sample_rates[i];
- inval=true;
- break;
- }
- else if (*sample_rate == sample_rates[i]) {
- break;
- }
- else if (i == sample_rates_count-1) {
- // Cap it to the highest rate we support
- *sample_rate = sample_rates[i];
- inval=true;
- }
- }
-
- if (inval) {
- return -EINVAL;
- }
- return 0;
-}
-
-static int check_input_parameters(uint32_t sample_rate, audio_format_t format,
- audio_channel_mask_t channel_mask)
-{
- return refine_input_parameters(&sample_rate, &format, &channel_mask);
-}
-
-static size_t get_input_buffer_size(uint32_t sample_rate, audio_format_t format,
- audio_channel_mask_t channel_mask)
-{
- size_t size;
- int channel_count = popcount(channel_mask);
- if (check_input_parameters(sample_rate, format, channel_mask) != 0)
- return 0;
-
- size = sample_rate*IN_PERIOD_MS/1000;
- // Audioflinger expects audio buffers to be multiple of 16 frames
- size = ((size + 15) / 16) * 16;
- size *= sizeof(short) * channel_count;
-
- return size;
-}
-
-
-static size_t in_get_buffer_size(const struct audio_stream *stream)
-{
- struct generic_stream_in *in = (struct generic_stream_in *)stream;
- int size = get_input_buffer_size(in->req_config.sample_rate,
- in->req_config.format,
- in->req_config.channel_mask);
-
- return size;
+static size_t get_input_buffer_size(size_t frames, audio_format_t format,
+ audio_channel_mask_t channel_mask) {
+ /* return the closest majoring multiple of 16 frames, as
+ * audioflinger expects audio buffers to be a multiple of 16 frames */
+ frames = ((frames + 15) / 16) * 16;
+ size_t bytes_per_frame = audio_channel_count_from_in_mask(channel_mask) *
+ audio_bytes_per_sample(format);
+ size_t buffer_size = frames * bytes_per_frame;
+ return buffer_size;
}
static audio_channel_mask_t in_get_channels(const struct audio_stream *stream)
{
- struct generic_stream_in *in = (struct generic_stream_in *)stream;
- return in->req_config.channel_mask;
+ struct alsa_stream_in *in = (struct alsa_stream_in *)stream;
+ ALOGV("in_get_channels: %d", in->config.channels);
+ return audio_channel_in_mask_from_count(in->config.channels);
}
static audio_format_t in_get_format(const struct audio_stream *stream)
{
- struct generic_stream_in *in = (struct generic_stream_in *)stream;
- return in->req_config.format;
+ struct alsa_stream_in *in = (struct alsa_stream_in *)stream;
+ ALOGV("in_get_format: %d", in->config.format);
+ return audio_format_from_pcm_format(in->config.format);
}
static int in_set_format(struct audio_stream *stream, audio_format_t format)
@@ -825,386 +508,293 @@
return -ENOSYS;
}
-static int in_dump(const struct audio_stream *stream, int fd)
+static size_t in_get_buffer_size(const struct audio_stream *stream)
{
- struct generic_stream_in *in = (struct generic_stream_in *)stream;
+ struct alsa_stream_in* in = (struct alsa_stream_in*)stream;
+ size_t frames = CAPTURE_PERIOD_SIZE;
+ if (in->source == AUDIO_SOURCE_ECHO_REFERENCE) {
+ frames = CAPTURE_PERIOD_SIZE * PLAYBACK_CODEC_SAMPLING_RATE / CAPTURE_CODEC_SAMPLING_RATE;
+ }
+
+ size_t buffer_size =
+ get_input_buffer_size(frames, stream->get_format(stream), stream->get_channels(stream));
+ ALOGV("in_get_buffer_size: %zu", buffer_size);
+ return buffer_size;
+}
+
+static int in_get_active_microphones(const struct audio_stream_in* stream,
+ struct audio_microphone_characteristic_t* mic_array,
+ size_t* mic_count) {
+ ALOGV("in_get_active_microphones");
+ if ((mic_array == NULL) || (mic_count == NULL)) {
+ return -EINVAL;
+ }
+ struct alsa_stream_in* in = (struct alsa_stream_in*)stream;
+ struct audio_hw_device* dev = (struct audio_hw_device*)in->dev;
+ bool mic_muted = false;
+ adev_get_mic_mute(dev, &mic_muted);
+ if ((in->source == AUDIO_SOURCE_ECHO_REFERENCE) || mic_muted) {
+ *mic_count = 0;
+ return 0;
+ }
+ adev_get_microphones(dev, mic_array, mic_count);
+ return 0;
+}
+
+static int do_input_standby(struct alsa_stream_in *in)
+{
+ struct alsa_audio_device *adev = in->dev;
+
+ if (!in->standby) {
+ pcm_close(in->pcm);
+ in->pcm = NULL;
+ adev->active_input = NULL;
+ in->standby = true;
+ }
+ return 0;
+}
+
+static int in_standby(struct audio_stream *stream)
+{
+ struct alsa_stream_in *in = (struct alsa_stream_in *)stream;
+ int status;
pthread_mutex_lock(&in->lock);
- dprintf(fd, "\tin_dump:\n"
- "\t\tsample rate: %u\n"
- "\t\tbuffer size: %zu\n"
- "\t\tchannel mask: %08x\n"
- "\t\tformat: %d\n"
- "\t\tdevice: %08x\n"
- "\t\taudio dev: %p\n\n",
- in_get_sample_rate(stream),
- in_get_buffer_size(stream),
- in_get_channels(stream),
- in_get_format(stream),
- in->device,
- in->dev);
+ pthread_mutex_lock(&in->dev->lock);
+ status = do_input_standby(in);
+ pthread_mutex_unlock(&in->dev->lock);
pthread_mutex_unlock(&in->lock);
+ return status;
+}
+
+static int in_dump(const struct audio_stream *stream, int fd)
+{
+ struct alsa_stream_in* in = (struct alsa_stream_in*)stream;
+ if (in->source == AUDIO_SOURCE_ECHO_REFERENCE) {
+ return 0;
+ }
+
+ struct audio_microphone_characteristic_t mic_array[AUDIO_MICROPHONE_MAX_COUNT];
+ size_t mic_count;
+
+ get_mic_characteristics(mic_array, &mic_count);
+
+ dprintf(fd, " Microphone count: %zd\n", mic_count);
+ size_t idx;
+ for (idx = 0; idx < mic_count; idx++) {
+ dprintf(fd, " Microphone: %zd\n", idx);
+ dprintf(fd, " Address: %s\n", mic_array[idx].address);
+ dprintf(fd, " Device: %d\n", mic_array[idx].device);
+ dprintf(fd, " Sensitivity (dB): %.2f\n", mic_array[idx].sensitivity);
+ }
+
return 0;
}
static int in_set_parameters(struct audio_stream *stream, const char *kvpairs)
{
- struct str_parms *parms;
- char value[32];
- int success;
- int ret = -EINVAL;
-
- if (kvpairs == NULL || kvpairs[0] == 0) {
- return 0;
- }
- parms = str_parms_create_str(kvpairs);
- success = str_parms_get_str(parms, AUDIO_PARAMETER_STREAM_ROUTING,
- value, sizeof(value));
- // As the hal version is 3.0, it must not use set parameters API to set audio device.
- // Instead, it should use create_audio_patch API.
- assert(("Must not use set parameters API to set audio devices", success < 0));
-
- if (str_parms_has_key(parms, AUDIO_PARAMETER_STREAM_FORMAT)) {
- // match the return value of in_set_format
- ret = -ENOSYS;
- }
-
- str_parms_destroy(parms);
-
- if (ret == -EINVAL) {
- ALOGW("%s(), unsupported parameter %s", __func__, kvpairs);
- // There is not any key supported for set_parameters API.
- // Return error when there is non-null value passed in.
- }
- return ret;
+ return 0;
}
static char * in_get_parameters(const struct audio_stream *stream,
- const char *keys)
+ const char *keys)
{
- struct generic_stream_in *in = (struct generic_stream_in *)stream;
- struct str_parms *query = str_parms_create_str(keys);
- char *str = NULL;
- char value[256];
- struct str_parms *reply = str_parms_create();
- int ret;
- bool get = false;
-
- ret = str_parms_get_str(query, AUDIO_PARAMETER_STREAM_ROUTING, value, sizeof(value));
- if (ret >= 0) {
- str_parms_add_int(reply, AUDIO_PARAMETER_STREAM_ROUTING, in->device);
- get = true;
- }
-
- if (str_parms_has_key(query, AUDIO_PARAMETER_STREAM_SUP_FORMATS)) {
- value[0] = 0;
- strcat(value, "AUDIO_FORMAT_PCM_16_BIT");
- str_parms_add_str(reply, AUDIO_PARAMETER_STREAM_SUP_FORMATS, value);
- get = true;
- }
-
- if (str_parms_has_key(query, AUDIO_PARAMETER_STREAM_FORMAT)) {
- value[0] = 0;
- strcat(value, "AUDIO_FORMAT_PCM_16_BIT");
- str_parms_add_str(reply, AUDIO_PARAMETER_STREAM_FORMAT, value);
- get = true;
- }
-
- if (get) {
- str = str_parms_to_str(reply);
- }
- else {
- ALOGD("%s Unsupported parameter: %s", __FUNCTION__, keys);
- }
-
- str_parms_destroy(query);
- str_parms_destroy(reply);
- return str;
+ return strdup("");
}
static int in_set_gain(struct audio_stream_in *stream, float gain)
{
- // in_set_gain is a no op
return 0;
}
-// Call with in->lock held
-static void get_current_input_position(struct generic_stream_in *in,
- int64_t * position,
- struct timespec * timestamp) {
- struct timespec t = { .tv_sec = 0, .tv_nsec = 0 };
- clock_gettime(CLOCK_MONOTONIC, &t);
- const int64_t now_us = (t.tv_sec * 1000000000LL + t.tv_nsec) / 1000;
- if (timestamp) {
- *timestamp = t;
- }
- int64_t position_since_standby;
- if (in->standby) {
- position_since_standby = 0;
- } else {
- const int64_t first_us = (in->standby_exit_time.tv_sec * 1000000000LL +
- in->standby_exit_time.tv_nsec) / 1000;
- position_since_standby = (now_us - first_us) *
- in_get_sample_rate(&in->stream.common) /
- 1000000;
- if (position_since_standby < 0) {
- position_since_standby = 0;
- }
- }
- *position = in->standby_position + position_since_standby;
-}
-
-// Must be called with in->lock held
-static void do_in_standby(struct generic_stream_in *in)
-{
- if (in->standby) {
- return;
- }
- in->worker_standby = true;
- get_current_input_position(in, &in->standby_position, NULL);
- in->standby = true;
-}
-
-static int in_standby(struct audio_stream *stream)
-{
- struct generic_stream_in *in = (struct generic_stream_in *)stream;
- pthread_mutex_lock(&in->lock);
- do_in_standby(in);
- pthread_mutex_unlock(&in->lock);
- return 0;
-}
-
-static void *in_read_worker(void * args)
-{
- struct generic_stream_in *in = (struct generic_stream_in *)args;
- struct pcm *pcm = NULL;
- uint8_t *buffer = NULL;
- size_t buffer_frames;
- int buffer_size;
-
- bool restart = false;
- bool shutdown = false;
- while (true) {
- pthread_mutex_lock(&in->lock);
- while (in->worker_standby || restart) {
- restart = false;
- if (pcm) {
- pcm_close(pcm); // Frees pcm
- pcm = NULL;
- free(buffer);
- buffer=NULL;
- }
- if (in->worker_exit) {
- break;
- }
- pthread_cond_wait(&in->worker_wake, &in->lock);
- }
-
- if (in->worker_exit) {
- if (!in->worker_standby) {
- ALOGE("In worker not in standby before exiting");
- }
- shutdown = true;
- }
- if (shutdown) {
- pthread_mutex_unlock(&in->lock);
- break;
- }
- if (!pcm) {
- pcm = pcm_open(PCM_CARD, PCM_DEVICE,
- PCM_IN | PCM_MONOTONIC, &in->pcm_config);
- if (!pcm_is_ready(pcm)) {
- ALOGE("pcm_open(in) failed: %s: channels %d format %d rate %d",
- pcm_get_error(pcm),
- in->pcm_config.channels,
- in->pcm_config.format,
- in->pcm_config.rate
- );
- pthread_mutex_unlock(&in->lock);
- break;
- }
- buffer_frames = in->pcm_config.period_size;
- buffer_size = pcm_frames_to_bytes(pcm, buffer_frames);
- buffer = malloc(buffer_size);
- if (!buffer) {
- ALOGE("could not allocate worker read buffer");
- pthread_mutex_unlock(&in->lock);
- break;
- }
- }
- pthread_mutex_unlock(&in->lock);
- int ret = pcm_read(pcm, buffer, pcm_frames_to_bytes(pcm, buffer_frames));
- if (ret != 0) {
- ALOGW("pcm_read failed %s", pcm_get_error(pcm));
- restart = true;
- continue;
- }
-
- pthread_mutex_lock(&in->lock);
- size_t frames_written = audio_vbuffer_write(&in->buffer, buffer, buffer_frames);
- pthread_mutex_unlock(&in->lock);
-
- if (frames_written != buffer_frames) {
- ALOGW("in_read_worker only could write %zu / %zu frames", frames_written, buffer_frames);
- }
- }
- if (buffer) {
- free(buffer);
- }
- return NULL;
-}
-
static ssize_t in_read(struct audio_stream_in *stream, void* buffer,
- size_t bytes)
+ size_t bytes)
{
- struct generic_stream_in *in = (struct generic_stream_in *)stream;
- struct generic_audio_device *adev = in->dev;
- const size_t frames = bytes / audio_stream_in_frame_size(stream);
- bool mic_mute = false;
- size_t read_bytes = 0;
+ int ret;
+ struct alsa_stream_in *in = (struct alsa_stream_in *)stream;
+ struct alsa_audio_device *adev = in->dev;
+ size_t frame_size = audio_stream_in_frame_size(stream);
+ size_t in_frames = bytes / frame_size;
- adev_get_mic_mute(&adev->device, &mic_mute);
- pthread_mutex_lock(&in->lock);
+ ALOGV("in_read: stream: %d, bytes %zu", in->source, bytes);
- if (in->worker_standby) {
- in->worker_standby = false;
- }
- pthread_cond_signal(&in->worker_wake);
+ /* Special handling for Echo Reference: simply get the reference from FIFO.
+ * The format and sample rate should be specified by arguments to adev_open_input_stream. */
+ if (in->source == AUDIO_SOURCE_ECHO_REFERENCE) {
+ struct aec_info info;
+ info.bytes = bytes;
- int64_t current_position;
- struct timespec current_time;
-
- get_current_input_position(in, ¤t_position, ¤t_time);
- if (in->standby) {
- in->standby = false;
- in->standby_exit_time = current_time;
- in->standby_frames_read = 0;
- }
-
- const int64_t frames_available = current_position - in->standby_position - in->standby_frames_read;
- assert(frames_available >= 0);
-
- const size_t frames_wait = ((uint64_t)frames_available > frames) ? 0 : frames - frames_available;
-
- int64_t sleep_time_us = frames_wait * 1000000LL /
- in_get_sample_rate(&stream->common);
-
- pthread_mutex_unlock(&in->lock);
-
- if (sleep_time_us > 0) {
- usleep(sleep_time_us);
- }
-
- pthread_mutex_lock(&in->lock);
- int read_frames = 0;
- if (in->standby) {
- ALOGW("Input put to sleep while read in progress");
- goto exit;
- }
- in->standby_frames_read += frames;
-
- if (popcount(in->req_config.channel_mask) == 1 &&
- in->pcm_config.channels == 2) {
- // Need to resample to mono
- if (in->stereo_to_mono_buf_size < bytes*2) {
- in->stereo_to_mono_buf = realloc(in->stereo_to_mono_buf,
- bytes*2);
- if (!in->stereo_to_mono_buf) {
- ALOGE("Failed to allocate stereo_to_mono_buff");
- goto exit;
+ const uint64_t time_increment_nsec = (uint64_t)bytes * NANOS_PER_SECOND /
+ audio_stream_in_frame_size(stream) /
+ in_get_sample_rate(&stream->common);
+ if (!aec_get_spk_running(adev->aec)) {
+ if (in->timestamp_nsec == 0) {
+ struct timespec now;
+ clock_gettime(CLOCK_MONOTONIC, &now);
+ const uint64_t timestamp_nsec = audio_utils_ns_from_timespec(&now);
+ in->timestamp_nsec = timestamp_nsec;
+ } else {
+ in->timestamp_nsec += time_increment_nsec;
+ }
+ memset(buffer, 0, bytes);
+ const uint64_t time_increment_usec = time_increment_nsec / 1000;
+ usleep(time_increment_usec);
+ } else {
+ int ref_ret = get_reference_samples(adev->aec, buffer, &info);
+ if ((ref_ret) || (info.timestamp_usec == 0)) {
+ memset(buffer, 0, bytes);
+ in->timestamp_nsec += time_increment_nsec;
+ } else {
+ in->timestamp_nsec = 1000 * info.timestamp_usec;
}
}
+ in->frames_read += in_frames;
- read_frames = audio_vbuffer_read(&in->buffer, in->stereo_to_mono_buf, frames);
-
- // Currently only pcm 16 is supported.
- uint16_t *src = (uint16_t *)in->stereo_to_mono_buf;
- uint16_t *dst = (uint16_t *)buffer;
- size_t i;
- // Resample stereo 16 to mono 16 by dropping one channel.
- // The stereo stream is interleaved L-R-L-R
- for (i = 0; i < frames; i++) {
- *dst = *src;
- src += 2;
- dst += 1;
+#if DEBUG_AEC
+ FILE* fp_ref = fopen("/data/local/traces/aec_ref.pcm", "a+");
+ if (fp_ref) {
+ fwrite((char*)buffer, 1, bytes, fp_ref);
+ fclose(fp_ref);
+ } else {
+ ALOGE("AEC debug: Could not open file aec_ref.pcm!");
}
- } else {
- read_frames = audio_vbuffer_read(&in->buffer, buffer, frames);
+ FILE* fp_ref_ts = fopen("/data/local/traces/aec_ref_timestamps.txt", "a+");
+ if (fp_ref_ts) {
+ fprintf(fp_ref_ts, "%" PRIu64 "\n", in->timestamp_nsec);
+ fclose(fp_ref_ts);
+ } else {
+ ALOGE("AEC debug: Could not open file aec_ref_timestamps.txt!");
+ }
+#endif
+ return info.bytes;
+ }
+
+ /* Microphone input stream read */
+
+ /* acquiring hw device mutex systematically is useful if a low priority thread is waiting
+ * on the input stream mutex - e.g. executing select_mode() while holding the hw device
+ * mutex
+ */
+ pthread_mutex_lock(&in->lock);
+ pthread_mutex_lock(&adev->lock);
+ if (in->standby) {
+ ret = start_input_stream(in);
+ if (ret != 0) {
+ pthread_mutex_unlock(&adev->lock);
+ ALOGE("start_input_stream failed with code %d", ret);
+ goto exit;
+ }
+ in->standby = false;
+ }
+
+ pthread_mutex_unlock(&adev->lock);
+
+ ret = pcm_read(in->pcm, buffer, in_frames * frame_size);
+ struct aec_info info;
+ get_pcm_timestamp(in->pcm, in->config.rate, &info, false /*isOutput*/);
+ if (ret == 0) {
+ in->frames_read += in_frames;
+ in->timestamp_nsec = audio_utils_ns_from_timespec(&info.timestamp);
+ }
+ else {
+ ALOGE("pcm_read failed with code %d", ret);
}
exit:
- read_bytes = read_frames*audio_stream_in_frame_size(stream);
-
- if (mic_mute) {
- read_bytes = 0;
- }
-
- if (read_bytes < bytes) {
- memset (&((uint8_t *)buffer)[read_bytes], 0, bytes-read_bytes);
- }
-
pthread_mutex_unlock(&in->lock);
+ bool mic_muted = false;
+ adev_get_mic_mute((struct audio_hw_device*)adev, &mic_muted);
+ if (mic_muted) {
+ memset(buffer, 0, bytes);
+ }
+
+ if (ret != 0) {
+ usleep((int64_t)bytes * 1000000 / audio_stream_in_frame_size(stream) /
+ in_get_sample_rate(&stream->common));
+ } else {
+ /* Process AEC if available */
+ /* TODO move to a separate thread */
+ if (!mic_muted) {
+ info.bytes = bytes;
+ int aec_ret = process_aec(adev->aec, buffer, &info);
+ if (aec_ret) {
+ ALOGE("process_aec returned error code %d", aec_ret);
+ }
+ }
+ }
+
+#if DEBUG_AEC && !defined(AEC_HAL)
+ FILE* fp_in = fopen("/data/local/traces/aec_in.pcm", "a+");
+ if (fp_in) {
+ fwrite((char*)buffer, 1, bytes, fp_in);
+ fclose(fp_in);
+ } else {
+ ALOGE("AEC debug: Could not open file aec_in.pcm!");
+ }
+ FILE* fp_mic_ts = fopen("/data/local/traces/aec_in_timestamps.txt", "a+");
+ if (fp_mic_ts) {
+ fprintf(fp_mic_ts, "%" PRIu64 "\n", in->timestamp_nsec);
+ fclose(fp_mic_ts);
+ } else {
+ ALOGE("AEC debug: Could not open file aec_in_timestamps.txt!");
+ }
+#endif
+
return bytes;
}
+static int in_get_capture_position(const struct audio_stream_in* stream, int64_t* frames,
+ int64_t* time) {
+ if (stream == NULL || frames == NULL || time == NULL) {
+ return -EINVAL;
+ }
+ struct alsa_stream_in* in = (struct alsa_stream_in*)stream;
+
+ *frames = in->frames_read;
+ *time = in->timestamp_nsec;
+ ALOGV("%s: source: %d, timestamp (nsec): %" PRIu64, __func__, in->source, *time);
+
+ return 0;
+}
+
static uint32_t in_get_input_frames_lost(struct audio_stream_in *stream)
{
return 0;
}
-static int in_get_capture_position(const struct audio_stream_in *stream,
- int64_t *frames, int64_t *time)
-{
- struct generic_stream_in *in = (struct generic_stream_in *)stream;
- pthread_mutex_lock(&in->lock);
- struct timespec current_time;
- get_current_input_position(in, frames, ¤t_time);
- *time = (current_time.tv_sec * 1000000000LL + current_time.tv_nsec);
- pthread_mutex_unlock(&in->lock);
- return 0;
-}
-
-static int in_get_active_microphones(const struct audio_stream_in *stream,
- struct audio_microphone_characteristic_t *mic_array,
- size_t *mic_count)
-{
- return adev_get_microphones(NULL, mic_array, mic_count);
-}
-
static int in_add_audio_effect(const struct audio_stream *stream, effect_handle_t effect)
{
- // in_add_audio_effect is a no op
return 0;
}
static int in_remove_audio_effect(const struct audio_stream *stream, effect_handle_t effect)
{
- // in_add_audio_effect is a no op
return 0;
}
static int adev_open_output_stream(struct audio_hw_device *dev,
- audio_io_handle_t handle,
- audio_devices_t devices,
- audio_output_flags_t flags,
- struct audio_config *config,
- struct audio_stream_out **stream_out,
- const char *address __unused)
+ audio_io_handle_t handle,
+ audio_devices_t devices,
+ audio_output_flags_t flags,
+ struct audio_config *config,
+ struct audio_stream_out **stream_out,
+ const char *address __unused)
{
- struct generic_audio_device *adev = (struct generic_audio_device *)dev;
- struct generic_stream_out *out;
+ ALOGV("adev_open_output_stream...");
+
+ struct alsa_audio_device *ladev = (struct alsa_audio_device *)dev;
+ struct alsa_stream_out *out;
+ struct pcm_params *params;
int ret = 0;
- if (refine_output_parameters(&config->sample_rate, &config->format, &config->channel_mask)) {
- ALOGE("Error opening output stream format %d, channel_mask %04x, sample_rate %u",
- config->format, config->channel_mask, config->sample_rate);
- ret = -EINVAL;
- goto error;
- }
+ int out_port = get_audio_output_port(devices);
- out = (struct generic_stream_out *)calloc(1, sizeof(struct generic_stream_out));
+ params = pcm_params_get(CARD_OUT, out_port, PCM_OUT);
+ if (!params)
+ return -ENOSYS;
+ out = (struct alsa_stream_out *)calloc(1, sizeof(struct alsa_stream_out));
if (!out)
return -ENOMEM;
@@ -1224,141 +814,141 @@
out->stream.set_volume = out_set_volume;
out->stream.write = out_write;
out->stream.get_render_position = out_get_render_position;
- out->stream.get_presentation_position = out_get_presentation_position;
out->stream.get_next_write_timestamp = out_get_next_write_timestamp;
+ out->stream.get_presentation_position = out_get_presentation_position;
- out->handle = handle;
+ out->config.channels = CHANNEL_STEREO;
+ out->config.rate = PLAYBACK_CODEC_SAMPLING_RATE;
+ out->config.format = PCM_FORMAT_S16_LE;
+ out->config.period_size = PLAYBACK_PERIOD_SIZE;
+ out->config.period_count = PLAYBACK_PERIOD_COUNT;
- pthread_mutex_init(&out->lock, (const pthread_mutexattr_t *) NULL);
- out->dev = adev;
- // Only 1 device is expected despite the argument being named 'devices'
- out->num_devices = 1;
- out->devices[0] = devices;
- memcpy(&out->req_config, config, sizeof(struct audio_config));
- memcpy(&out->pcm_config, &pcm_config_out, sizeof(struct pcm_config));
- out->pcm_config.rate = config->sample_rate;
- out->pcm_config.period_size = out->pcm_config.rate*OUT_PERIOD_MS/1000;
-
- out->standby = true;
- out->underrun_position = 0;
- out->underrun_time.tv_sec = 0;
- out->underrun_time.tv_nsec = 0;
- out->last_write_time_us = 0;
- out->frames_total_buffered = 0;
- out->frames_written = 0;
- out->frames_rendered = 0;
-
- ret = audio_vbuffer_init(&out->buffer,
- out->pcm_config.period_size*out->pcm_config.period_count,
- out->pcm_config.channels *
- pcm_format_to_bits(out->pcm_config.format) >> 3);
- if (ret == 0) {
- pthread_cond_init(&out->worker_wake, NULL);
- out->worker_standby = true;
- out->worker_exit = false;
- pthread_create(&out->worker_thread, NULL, out_write_worker, out);
-
+ if (out->config.rate != config->sample_rate ||
+ audio_channel_count_from_out_mask(config->channel_mask) != CHANNEL_STEREO ||
+ out->config.format != pcm_format_from_audio_format(config->format) ) {
+ config->sample_rate = out->config.rate;
+ config->format = audio_format_from_pcm_format(out->config.format);
+ config->channel_mask = audio_channel_out_mask_from_count(CHANNEL_STEREO);
+ ret = -EINVAL;
}
- pthread_mutex_lock(&adev->lock);
- list_add_tail(&adev->out_streams, &out->stream_node);
- pthread_mutex_unlock(&adev->lock);
+ ALOGI("adev_open_output_stream selects channels=%d rate=%d format=%d, devices=%d",
+ out->config.channels, out->config.rate, out->config.format, devices);
+
+ out->dev = ladev;
+ out->standby = 1;
+ out->unavailable = false;
+ out->devices = devices;
+
+ config->format = out_get_format(&out->stream.common);
+ config->channel_mask = out_get_channels(&out->stream.common);
+ config->sample_rate = out_get_sample_rate(&out->stream.common);
*stream_out = &out->stream;
-error:
+ out->speaker_eq = NULL;
+ if (out_port == PORT_INTERNAL_SPEAKER) {
+ out_set_eq(out);
+ if (out->speaker_eq == NULL) {
+ ALOGE("%s: Failed to initialize speaker EQ", __func__);
+ }
+ }
+
+ /* TODO The retry mechanism isn't implemented in AudioPolicyManager/AudioFlinger. */
+ ret = 0;
+
+ if (ret == 0) {
+ int aec_ret = init_aec_reference_config(ladev->aec, out);
+ if (aec_ret) {
+ ALOGE("AEC: Speaker config init failed!");
+ return -EINVAL;
+ }
+ }
return ret;
}
-// This must be called with adev->lock held.
-struct generic_stream_out *get_stream_out_by_io_handle_l(
- struct generic_audio_device *adev, audio_io_handle_t handle) {
- struct listnode *node;
-
- list_for_each(node, &adev->out_streams) {
- struct generic_stream_out *out = node_to_item(
- node, struct generic_stream_out, stream_node);
- if (out->handle == handle) {
- return out;
- }
- }
- return NULL;
-}
-
static void adev_close_output_stream(struct audio_hw_device *dev,
- struct audio_stream_out *stream)
+ struct audio_stream_out *stream)
{
- struct generic_stream_out *out = (struct generic_stream_out *)stream;
- pthread_mutex_lock(&out->lock);
- do_out_standby(out);
-
- out->worker_exit = true;
- pthread_cond_signal(&out->worker_wake);
- pthread_mutex_unlock(&out->lock);
-
- pthread_join(out->worker_thread, NULL);
- pthread_mutex_destroy(&out->lock);
- audio_vbuffer_destroy(&out->buffer);
-
- struct generic_audio_device *adev = (struct generic_audio_device *) dev;
- pthread_mutex_lock(&adev->lock);
- list_remove(&out->stream_node);
- pthread_mutex_unlock(&adev->lock);
+ ALOGV("adev_close_output_stream...");
+ struct alsa_audio_device *adev = (struct alsa_audio_device *)dev;
+ destroy_aec_reference_config(adev->aec);
+ struct alsa_stream_out* out = (struct alsa_stream_out*)stream;
+ fir_release(out->speaker_eq);
free(stream);
}
static int adev_set_parameters(struct audio_hw_device *dev, const char *kvpairs)
{
- return 0;
+ ALOGV("adev_set_parameters");
+ return -ENOSYS;
}
static char * adev_get_parameters(const struct audio_hw_device *dev,
- const char *keys)
+ const char *keys)
{
+ ALOGV("adev_get_parameters");
return strdup("");
}
+static int adev_get_microphones(const struct audio_hw_device* dev,
+ struct audio_microphone_characteristic_t* mic_array,
+ size_t* mic_count) {
+ ALOGV("adev_get_microphones");
+ if ((mic_array == NULL) || (mic_count == NULL)) {
+ return -EINVAL;
+ }
+ get_mic_characteristics(mic_array, mic_count);
+ return 0;
+}
+
static int adev_init_check(const struct audio_hw_device *dev)
{
+ ALOGV("adev_init_check");
return 0;
}
static int adev_set_voice_volume(struct audio_hw_device *dev, float volume)
{
- // adev_set_voice_volume is a no op (simulates phones)
- return 0;
+ ALOGV("adev_set_voice_volume: %f", volume);
+ return -ENOSYS;
}
static int adev_set_master_volume(struct audio_hw_device *dev, float volume)
{
+ ALOGV("adev_set_master_volume: %f", volume);
return -ENOSYS;
}
static int adev_get_master_volume(struct audio_hw_device *dev, float *volume)
{
+ ALOGV("adev_get_master_volume: %f", *volume);
return -ENOSYS;
}
static int adev_set_master_mute(struct audio_hw_device *dev, bool muted)
{
+ ALOGV("adev_set_master_mute: %d", muted);
return -ENOSYS;
}
static int adev_get_master_mute(struct audio_hw_device *dev, bool *muted)
{
+ ALOGV("adev_get_master_mute: %d", *muted);
return -ENOSYS;
}
static int adev_set_mode(struct audio_hw_device *dev, audio_mode_t mode)
{
- // adev_set_mode is a no op (simulates phones)
+ ALOGV("adev_set_mode: %d", mode);
return 0;
}
static int adev_set_mic_mute(struct audio_hw_device *dev, bool state)
{
- struct generic_audio_device *adev = (struct generic_audio_device *)dev;
+ ALOGV("adev_set_mic_mute: %d",state);
+ struct alsa_audio_device *adev = (struct alsa_audio_device *)dev;
pthread_mutex_lock(&adev->lock);
adev->mic_mute = state;
pthread_mutex_unlock(&adev->lock);
@@ -1367,455 +957,206 @@
static int adev_get_mic_mute(const struct audio_hw_device *dev, bool *state)
{
- struct generic_audio_device *adev = (struct generic_audio_device *)dev;
+ ALOGV("adev_get_mic_mute");
+ struct alsa_audio_device *adev = (struct alsa_audio_device *)dev;
pthread_mutex_lock(&adev->lock);
*state = adev->mic_mute;
pthread_mutex_unlock(&adev->lock);
return 0;
}
-
static size_t adev_get_input_buffer_size(const struct audio_hw_device *dev,
- const struct audio_config *config)
+ const struct audio_config *config)
{
- return get_input_buffer_size(config->sample_rate, config->format, config->channel_mask);
+ size_t buffer_size =
+ get_input_buffer_size(CAPTURE_PERIOD_SIZE, config->format, config->channel_mask);
+ ALOGV("adev_get_input_buffer_size: %zu", buffer_size);
+ return buffer_size;
}
-// This must be called with adev->lock held.
-struct generic_stream_in *get_stream_in_by_io_handle_l(
- struct generic_audio_device *adev, audio_io_handle_t handle) {
- struct listnode *node;
+static int adev_open_input_stream(struct audio_hw_device* dev, audio_io_handle_t handle,
+ audio_devices_t devices, struct audio_config* config,
+ struct audio_stream_in** stream_in,
+ audio_input_flags_t flags __unused, const char* address __unused,
+ audio_source_t source) {
+ ALOGV("adev_open_input_stream...");
- list_for_each(node, &adev->in_streams) {
- struct generic_stream_in *in = node_to_item(
- node, struct generic_stream_in, stream_node);
- if (in->handle == handle) {
- return in;
- }
- }
- return NULL;
-}
-
-static void adev_close_input_stream(struct audio_hw_device *dev,
- struct audio_stream_in *stream)
-{
- struct generic_stream_in *in = (struct generic_stream_in *)stream;
- pthread_mutex_lock(&in->lock);
- do_in_standby(in);
-
- in->worker_exit = true;
- pthread_cond_signal(&in->worker_wake);
- pthread_mutex_unlock(&in->lock);
- pthread_join(in->worker_thread, NULL);
-
- if (in->stereo_to_mono_buf != NULL) {
- free(in->stereo_to_mono_buf);
- in->stereo_to_mono_buf_size = 0;
- }
-
- pthread_mutex_destroy(&in->lock);
- audio_vbuffer_destroy(&in->buffer);
-
- struct generic_audio_device *adev = (struct generic_audio_device *) dev;
- pthread_mutex_lock(&adev->lock);
- list_remove(&in->stream_node);
- pthread_mutex_unlock(&adev->lock);
- free(stream);
-}
-
-
-static int adev_open_input_stream(struct audio_hw_device *dev,
- audio_io_handle_t handle,
- audio_devices_t devices,
- struct audio_config *config,
- struct audio_stream_in **stream_in,
- audio_input_flags_t flags __unused,
- const char *address __unused,
- audio_source_t source __unused)
-{
- struct generic_audio_device *adev = (struct generic_audio_device *)dev;
- struct generic_stream_in *in;
+ struct alsa_audio_device *ladev = (struct alsa_audio_device *)dev;
+ struct alsa_stream_in *in;
+ struct pcm_params *params;
int ret = 0;
- if (refine_input_parameters(&config->sample_rate, &config->format, &config->channel_mask)) {
- ALOGE("Error opening input stream format %d, channel_mask %04x, sample_rate %u",
- config->format, config->channel_mask, config->sample_rate);
- ret = -EINVAL;
- goto error;
- }
- in = (struct generic_stream_in *)calloc(1, sizeof(struct generic_stream_in));
- if (!in) {
- ret = -ENOMEM;
- goto error;
- }
+ params = pcm_params_get(CARD_IN, PORT_BUILTIN_MIC, PCM_IN);
+ if (!params)
+ return -ENOSYS;
+
+ in = (struct alsa_stream_in *)calloc(1, sizeof(struct alsa_stream_in));
+ if (!in)
+ return -ENOMEM;
in->stream.common.get_sample_rate = in_get_sample_rate;
- in->stream.common.set_sample_rate = in_set_sample_rate; // no op
+ in->stream.common.set_sample_rate = in_set_sample_rate;
in->stream.common.get_buffer_size = in_get_buffer_size;
in->stream.common.get_channels = in_get_channels;
in->stream.common.get_format = in_get_format;
- in->stream.common.set_format = in_set_format; // no op
+ in->stream.common.set_format = in_set_format;
in->stream.common.standby = in_standby;
in->stream.common.dump = in_dump;
in->stream.common.set_parameters = in_set_parameters;
in->stream.common.get_parameters = in_get_parameters;
- in->stream.common.add_audio_effect = in_add_audio_effect; // no op
- in->stream.common.remove_audio_effect = in_remove_audio_effect; // no op
- in->stream.set_gain = in_set_gain; // no op
+ in->stream.common.add_audio_effect = in_add_audio_effect;
+ in->stream.common.remove_audio_effect = in_remove_audio_effect;
+ in->stream.set_gain = in_set_gain;
in->stream.read = in_read;
- in->stream.get_input_frames_lost = in_get_input_frames_lost; // no op
+ in->stream.get_input_frames_lost = in_get_input_frames_lost;
in->stream.get_capture_position = in_get_capture_position;
in->stream.get_active_microphones = in_get_active_microphones;
- pthread_mutex_init(&in->lock, (const pthread_mutexattr_t *) NULL);
- in->dev = adev;
- in->device = devices;
- memcpy(&in->req_config, config, sizeof(struct audio_config));
- memcpy(&in->pcm_config, &pcm_config_in, sizeof(struct pcm_config));
- in->pcm_config.rate = config->sample_rate;
- in->pcm_config.period_size = in->pcm_config.rate*IN_PERIOD_MS/1000;
+ in->config.channels = CHANNEL_STEREO;
+ if (source == AUDIO_SOURCE_ECHO_REFERENCE) {
+ in->config.rate = PLAYBACK_CODEC_SAMPLING_RATE;
+ } else {
+ in->config.rate = CAPTURE_CODEC_SAMPLING_RATE;
+ }
+ in->config.format = PCM_FORMAT_S32_LE;
+ in->config.period_size = CAPTURE_PERIOD_SIZE;
+ in->config.period_count = CAPTURE_PERIOD_COUNT;
- in->stereo_to_mono_buf = NULL;
- in->stereo_to_mono_buf_size = 0;
+ if (in->config.rate != config->sample_rate ||
+ audio_channel_count_from_in_mask(config->channel_mask) != CHANNEL_STEREO ||
+ in->config.format != pcm_format_from_audio_format(config->format) ) {
+ ret = -EINVAL;
+ }
+ ALOGI("adev_open_input_stream selects channels=%d rate=%d format=%d source=%d",
+ in->config.channels, in->config.rate, in->config.format, source);
+
+ in->dev = ladev;
in->standby = true;
- in->standby_position = 0;
- in->standby_exit_time.tv_sec = 0;
- in->standby_exit_time.tv_nsec = 0;
- in->standby_frames_read = 0;
+ in->unavailable = false;
+ in->source = source;
+ in->devices = devices;
- ret = audio_vbuffer_init(&in->buffer,
- in->pcm_config.period_size*in->pcm_config.period_count,
- in->pcm_config.channels *
- pcm_format_to_bits(in->pcm_config.format) >> 3);
- if (ret == 0) {
- pthread_cond_init(&in->worker_wake, NULL);
- in->worker_standby = true;
- in->worker_exit = false;
- pthread_create(&in->worker_thread, NULL, in_read_worker, in);
- }
- in->handle = handle;
+ config->format = in_get_format(&in->stream.common);
+ config->channel_mask = in_get_channels(&in->stream.common);
+ config->sample_rate = in_get_sample_rate(&in->stream.common);
- pthread_mutex_lock(&adev->lock);
- list_add_tail(&adev->in_streams, &in->stream_node);
- pthread_mutex_unlock(&adev->lock);
+ /* If AEC is in the app, only configure based on ECHO_REFERENCE spec.
+ * If AEC is in the HAL, configure using the given mic stream. */
+ bool aecInput = true;
+#if !defined(AEC_HAL)
+ aecInput = (in->source == AUDIO_SOURCE_ECHO_REFERENCE);
+#endif
- *stream_in = &in->stream;
-
-error:
- return ret;
-}
-
-
-static int adev_dump(const audio_hw_device_t *dev, int fd)
-{
- return 0;
-}
-
-static int adev_get_microphones(const audio_hw_device_t *dev,
- struct audio_microphone_characteristic_t *mic_array,
- size_t *mic_count)
-{
- if (mic_count == NULL) {
- return -ENOSYS;
- }
-
- if (*mic_count == 0) {
- *mic_count = 1;
- return 0;
- }
-
- if (mic_array == NULL) {
- return -ENOSYS;
- }
-
- strncpy(mic_array->device_id, "mic_goldfish", AUDIO_MICROPHONE_ID_MAX_LEN - 1);
- mic_array->device = AUDIO_DEVICE_IN_BUILTIN_MIC;
- strncpy(mic_array->address, AUDIO_BOTTOM_MICROPHONE_ADDRESS,
- AUDIO_DEVICE_MAX_ADDRESS_LEN - 1);
- memset(mic_array->channel_mapping, AUDIO_MICROPHONE_CHANNEL_MAPPING_UNUSED,
- sizeof(mic_array->channel_mapping));
- mic_array->location = AUDIO_MICROPHONE_LOCATION_UNKNOWN;
- mic_array->group = 0;
- mic_array->index_in_the_group = 0;
- mic_array->sensitivity = AUDIO_MICROPHONE_SENSITIVITY_UNKNOWN;
- mic_array->max_spl = AUDIO_MICROPHONE_SPL_UNKNOWN;
- mic_array->min_spl = AUDIO_MICROPHONE_SPL_UNKNOWN;
- mic_array->directionality = AUDIO_MICROPHONE_DIRECTIONALITY_UNKNOWN;
- mic_array->num_frequency_responses = 0;
- mic_array->geometric_location.x = AUDIO_MICROPHONE_COORDINATE_UNKNOWN;
- mic_array->geometric_location.y = AUDIO_MICROPHONE_COORDINATE_UNKNOWN;
- mic_array->geometric_location.z = AUDIO_MICROPHONE_COORDINATE_UNKNOWN;
- mic_array->orientation.x = AUDIO_MICROPHONE_COORDINATE_UNKNOWN;
- mic_array->orientation.y = AUDIO_MICROPHONE_COORDINATE_UNKNOWN;
- mic_array->orientation.z = AUDIO_MICROPHONE_COORDINATE_UNKNOWN;
-
- *mic_count = 1;
- return 0;
-}
-
-static int adev_create_audio_patch(struct audio_hw_device *dev,
- unsigned int num_sources,
- const struct audio_port_config *sources,
- unsigned int num_sinks,
- const struct audio_port_config *sinks,
- audio_patch_handle_t *handle) {
- if (num_sources != 1 || num_sinks == 0 || num_sinks > AUDIO_PATCH_PORTS_MAX) {
- return -EINVAL;
- }
-
- if (sources[0].type == AUDIO_PORT_TYPE_DEVICE) {
- // If source is a device, the number of sinks should be 1.
- if (num_sinks != 1 || sinks[0].type != AUDIO_PORT_TYPE_MIX) {
+ if ((ret == 0) && aecInput) {
+ int aec_ret = init_aec_mic_config(ladev->aec, in);
+ if (aec_ret) {
+ ALOGE("AEC: Mic config init failed!");
return -EINVAL;
}
- } else if (sources[0].type == AUDIO_PORT_TYPE_MIX) {
- // If source is a mix, all sinks should be device.
- for (unsigned int i = 0; i < num_sinks; i++) {
- if (sinks[i].type != AUDIO_PORT_TYPE_DEVICE) {
- ALOGE("%s() invalid sink type %#x for mix source", __func__, sinks[i].type);
- return -EINVAL;
- }
- }
+ }
+
+ if (ret) {
+ free(in);
} else {
- // All other cases are invalid.
- return -EINVAL;
+ *stream_in = &in->stream;
}
- struct generic_audio_device* adev = (struct generic_audio_device*) dev;
- int ret = 0;
- bool generatedPatchHandle = false;
- pthread_mutex_lock(&adev->lock);
- if (*handle == AUDIO_PATCH_HANDLE_NONE) {
- *handle = ++adev->next_patch_handle;
- generatedPatchHandle = true;
- }
+#if DEBUG_AEC
+ remove("/data/local/traces/aec_ref.pcm");
+ remove("/data/local/traces/aec_in.pcm");
+ remove("/data/local/traces/aec_ref_timestamps.txt");
+ remove("/data/local/traces/aec_in_timestamps.txt");
+#endif
+ return ret;
+}
- // Only handle patches for mix->devices and device->mix case.
- if (sources[0].type == AUDIO_PORT_TYPE_DEVICE) {
- struct generic_stream_in *in =
- get_stream_in_by_io_handle_l(adev, sinks[0].ext.mix.handle);
- if (in == NULL) {
- ALOGE("%s()can not find stream with handle(%d)", __func__, sources[0].ext.mix.handle);
- ret = -EINVAL;
- goto error;
- }
+static void adev_close_input_stream(struct audio_hw_device *dev,
+ struct audio_stream_in *stream)
+{
+ ALOGV("adev_close_input_stream...");
+ struct alsa_audio_device *adev = (struct alsa_audio_device *)dev;
+ destroy_aec_mic_config(adev->aec);
+ free(stream);
+ return;
+}
- // Check if the patch handle match the recorded one if a valid patch handle is passed.
- if (!generatedPatchHandle && in->patch_handle != *handle) {
- ALOGE("%s() the patch handle(%d) does not match recorded one(%d) for stream "
- "with handle(%d) when creating audio patch for device->mix",
- __func__, *handle, in->patch_handle, in->handle);
- ret = -EINVAL;
- goto error;
- }
- pthread_mutex_lock(&in->lock);
- in->device = sources[0].ext.device.type;
- pthread_mutex_unlock(&in->lock);
- in->patch_handle = *handle;
- } else {
- struct generic_stream_out *out =
- get_stream_out_by_io_handle_l(adev, sources[0].ext.mix.handle);
- if (out == NULL) {
- ALOGE("%s()can not find stream with handle(%d)", __func__, sources[0].ext.mix.handle);
- ret = -EINVAL;
- goto error;
- }
-
- // Check if the patch handle match the recorded one if a valid patch handle is passed.
- if (!generatedPatchHandle && out->patch_handle != *handle) {
- ALOGE("%s() the patch handle(%d) does not match recorded one(%d) for stream "
- "with handle(%d) when creating audio patch for mix->device",
- __func__, *handle, out->patch_handle, out->handle);
- ret = -EINVAL;
- pthread_mutex_unlock(&out->lock);
- goto error;
- }
- pthread_mutex_lock(&out->lock);
- for (out->num_devices = 0; out->num_devices < num_sinks; out->num_devices++) {
- out->devices[out->num_devices] = sinks[out->num_devices].ext.device.type;
- }
- pthread_mutex_unlock(&out->lock);
- out->patch_handle = *handle;
- }
-
-error:
- if (ret != 0 && generatedPatchHandle) {
- *handle = AUDIO_PATCH_HANDLE_NONE;
- }
- pthread_mutex_unlock(&adev->lock);
+static int adev_dump(const audio_hw_device_t *device, int fd)
+{
+ ALOGV("adev_dump");
return 0;
}
-// This must be called with adev->lock held.
-struct generic_stream_out *get_stream_out_by_patch_handle_l(
- struct generic_audio_device *adev, audio_patch_handle_t patch_handle) {
- struct listnode *node;
-
- list_for_each(node, &adev->out_streams) {
- struct generic_stream_out *out = node_to_item(
- node, struct generic_stream_out, stream_node);
- if (out->patch_handle == patch_handle) {
- return out;
- }
- }
- return NULL;
-}
-
-// This must be called with adev->lock held.
-struct generic_stream_in *get_stream_in_by_patch_handle_l(
- struct generic_audio_device *adev, audio_patch_handle_t patch_handle) {
- struct listnode *node;
-
- list_for_each(node, &adev->in_streams) {
- struct generic_stream_in *in = node_to_item(
- node, struct generic_stream_in, stream_node);
- if (in->patch_handle == patch_handle) {
- return in;
- }
- }
- return NULL;
-}
-
-static int adev_release_audio_patch(struct audio_hw_device *dev,
- audio_patch_handle_t patch_handle) {
- struct generic_audio_device *adev = (struct generic_audio_device *) dev;
-
- pthread_mutex_lock(&adev->lock);
- struct generic_stream_out *out = get_stream_out_by_patch_handle_l(adev, patch_handle);
- if (out != NULL) {
- pthread_mutex_lock(&out->lock);
- out->num_devices = 0;
- memset(out->devices, 0, sizeof(out->devices));
- pthread_mutex_unlock(&out->lock);
- out->patch_handle = AUDIO_PATCH_HANDLE_NONE;
- pthread_mutex_unlock(&adev->lock);
- return 0;
- }
- struct generic_stream_in *in = get_stream_in_by_patch_handle_l(adev, patch_handle);
- if (in != NULL) {
- pthread_mutex_lock(&in->lock);
- in->device = AUDIO_DEVICE_NONE;
- pthread_mutex_unlock(&in->lock);
- in->patch_handle = AUDIO_PATCH_HANDLE_NONE;
- pthread_mutex_unlock(&adev->lock);
- return 0;
- }
-
- pthread_mutex_unlock(&adev->lock);
- ALOGW("%s() cannot find stream for patch handle: %d", __func__, patch_handle);
- return -EINVAL;
-}
-
-static int adev_close(hw_device_t *dev)
+static int adev_close(hw_device_t *device)
{
- struct generic_audio_device *adev = (struct generic_audio_device *)dev;
- int ret = 0;
- if (!adev)
- return 0;
+ ALOGV("adev_close");
- pthread_mutex_lock(&adev_init_lock);
-
- if (audio_device_ref_count == 0) {
- ALOGE("adev_close called when ref_count 0");
- ret = -EINVAL;
- goto error;
- }
-
- if ((--audio_device_ref_count) == 0) {
- if (adev->mixer) {
- mixer_close(adev->mixer);
- }
- free(adev);
- }
-
-error:
- pthread_mutex_unlock(&adev_init_lock);
- return ret;
+ struct alsa_audio_device *adev = (struct alsa_audio_device *)device;
+ release_aec(adev->aec);
+ free(device);
+ return 0;
}
static int adev_open(const hw_module_t* module, const char* name,
- hw_device_t** device)
+ hw_device_t** device)
{
- static struct generic_audio_device *adev;
+ struct alsa_audio_device *adev;
+
+ ALOGV("adev_open: %s", name);
if (strcmp(name, AUDIO_HARDWARE_INTERFACE) != 0)
return -EINVAL;
- pthread_mutex_lock(&adev_init_lock);
- if (audio_device_ref_count != 0) {
- *device = &adev->device.common;
- audio_device_ref_count++;
- ALOGV("%s: returning existing instance of adev", __func__);
- ALOGV("%s: exit", __func__);
- goto unlock;
- }
- adev = calloc(1, sizeof(struct generic_audio_device));
+ adev = calloc(1, sizeof(struct alsa_audio_device));
+ if (!adev)
+ return -ENOMEM;
- pthread_mutex_init(&adev->lock, (const pthread_mutexattr_t *) NULL);
+ adev->hw_device.common.tag = HARDWARE_DEVICE_TAG;
+ adev->hw_device.common.version = AUDIO_DEVICE_API_VERSION_2_0;
+ adev->hw_device.common.module = (struct hw_module_t *) module;
+ adev->hw_device.common.close = adev_close;
+ adev->hw_device.init_check = adev_init_check;
+ adev->hw_device.set_voice_volume = adev_set_voice_volume;
+ adev->hw_device.set_master_volume = adev_set_master_volume;
+ adev->hw_device.get_master_volume = adev_get_master_volume;
+ adev->hw_device.set_master_mute = adev_set_master_mute;
+ adev->hw_device.get_master_mute = adev_get_master_mute;
+ adev->hw_device.set_mode = adev_set_mode;
+ adev->hw_device.set_mic_mute = adev_set_mic_mute;
+ adev->hw_device.get_mic_mute = adev_get_mic_mute;
+ adev->hw_device.set_parameters = adev_set_parameters;
+ adev->hw_device.get_parameters = adev_get_parameters;
+ adev->hw_device.get_input_buffer_size = adev_get_input_buffer_size;
+ adev->hw_device.open_output_stream = adev_open_output_stream;
+ adev->hw_device.close_output_stream = adev_close_output_stream;
+ adev->hw_device.open_input_stream = adev_open_input_stream;
+ adev->hw_device.close_input_stream = adev_close_input_stream;
+ adev->hw_device.dump = adev_dump;
+ adev->hw_device.get_microphones = adev_get_microphones;
- adev->device.common.tag = HARDWARE_DEVICE_TAG;
- adev->device.common.version = AUDIO_DEVICE_API_VERSION_3_0;
- adev->device.common.module = (struct hw_module_t *) module;
- adev->device.common.close = adev_close;
+ *device = &adev->hw_device.common;
- adev->device.init_check = adev_init_check; // no op
- adev->device.set_voice_volume = adev_set_voice_volume; // no op
- adev->device.set_master_volume = adev_set_master_volume; // no op
- adev->device.get_master_volume = adev_get_master_volume; // no op
- adev->device.set_master_mute = adev_set_master_mute; // no op
- adev->device.get_master_mute = adev_get_master_mute; // no op
- adev->device.set_mode = adev_set_mode; // no op
- adev->device.set_mic_mute = adev_set_mic_mute;
- adev->device.get_mic_mute = adev_get_mic_mute;
- adev->device.set_parameters = adev_set_parameters; // no op
- adev->device.get_parameters = adev_get_parameters; // no op
- adev->device.get_input_buffer_size = adev_get_input_buffer_size;
- adev->device.open_output_stream = adev_open_output_stream;
- adev->device.close_output_stream = adev_close_output_stream;
- adev->device.open_input_stream = adev_open_input_stream;
- adev->device.close_input_stream = adev_close_input_stream;
- adev->device.dump = adev_dump;
- adev->device.get_microphones = adev_get_microphones;
- adev->device.create_audio_patch = adev_create_audio_patch;
- adev->device.release_audio_patch = adev_release_audio_patch;
+ adev->mixer = mixer_open(CARD_OUT);
- *device = &adev->device.common;
-
- adev->next_patch_handle = AUDIO_PATCH_HANDLE_NONE;
- list_init(&adev->out_streams);
- list_init(&adev->in_streams);
-
- adev->mixer = mixer_open(PCM_CARD);
- struct mixer_ctl *ctl;
-
- // Set default mixer ctls
- // Enable channels and set volume
- for (int i = 0; i < (int)mixer_get_num_ctls(adev->mixer); i++) {
- ctl = mixer_get_ctl(adev->mixer, i);
- ALOGD("mixer %d name %s", i, mixer_ctl_get_name(ctl));
- if (!strcmp(mixer_ctl_get_name(ctl), "Master Playback Volume") ||
- !strcmp(mixer_ctl_get_name(ctl), "Capture Volume")) {
- for (int z = 0; z < (int)mixer_ctl_get_num_values(ctl); z++) {
- ALOGD("set ctl %d to %d", z, 100);
- mixer_ctl_set_percent(ctl, z, 100);
- }
- continue;
- }
- if (!strcmp(mixer_ctl_get_name(ctl), "Master Playback Switch") ||
- !strcmp(mixer_ctl_get_name(ctl), "Capture Switch")) {
- for (int z = 0; z < (int)mixer_ctl_get_num_values(ctl); z++) {
- ALOGD("set ctl %d to %d", z, 1);
- mixer_ctl_set_value(ctl, z, 1);
- }
- continue;
- }
+ if (!adev->mixer) {
+ ALOGE("Unable to open the mixer, aborting.");
+ return -EINVAL;
}
- audio_device_ref_count++;
+ adev->audio_route = audio_route_init(CARD_OUT, MIXER_XML_PATH);
+ if (!adev->audio_route) {
+ ALOGE("%s: Failed to init audio route controls, aborting.", __func__);
+ return -EINVAL;
+ }
-unlock:
- pthread_mutex_unlock(&adev_init_lock);
+ pthread_mutex_lock(&adev->lock);
+ if (init_aec(CAPTURE_CODEC_SAMPLING_RATE, NUM_AEC_REFERENCE_CHANNELS,
+ CHANNEL_STEREO, &adev->aec)) {
+ pthread_mutex_unlock(&adev->lock);
+ return -EINVAL;
+ }
+ pthread_mutex_unlock(&adev->lock);
+
return 0;
}
@@ -1829,7 +1170,7 @@
.module_api_version = AUDIO_MODULE_API_VERSION_0_1,
.hal_api_version = HARDWARE_HAL_API_VERSION,
.id = AUDIO_HARDWARE_MODULE_ID,
- .name = "Generic audio HW HAL",
+ .name = "Yukawa audio HW HAL",
.author = "The Android Open Source Project",
.methods = &hal_module_methods,
},
diff --git a/audio/audio_hw.h b/audio/audio_hw.h
new file mode 100644
index 0000000..3e8e27c
--- /dev/null
+++ b/audio/audio_hw.h
@@ -0,0 +1,129 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _YUKAWA_AUDIO_HW_H_
+#define _YUKAWA_AUDIO_HW_H_
+
+#include <hardware/audio.h>
+#include <tinyalsa/asoundlib.h>
+
+#include "fir_filter.h"
+
+#define CARD_OUT 0
+#define PORT_HDMI 0
+#define PORT_INTERNAL_SPEAKER 1
+#define CARD_IN 0
+#define PORT_BUILTIN_MIC 3
+
+#define MIXER_XML_PATH "/vendor/etc/mixer_paths.xml"
+/* Minimum granularity - Arbitrary but small value */
+#define CODEC_BASE_FRAME_COUNT 32
+
+#define CHANNEL_STEREO 2
+
+#ifdef AEC_HAL
+#define NUM_AEC_REFERENCE_CHANNELS 1
+#else
+/* App AEC uses 2-channel reference */
+#define NUM_AEC_REFERENCE_CHANNELS 2
+#endif /* #ifdef AEC_HAL */
+
+#define DEBUG_AEC 0
+
+#define PCM_OPEN_RETRIES 100
+#define PCM_OPEN_WAIT_TIME_MS 20
+
+/* Capture codec parameters */
+/* Set up a capture period of 32 ms:
+ * CAPTURE_PERIOD = PERIOD_SIZE / SAMPLE_RATE, so (32e-3) = PERIOD_SIZE / (16e3)
+ * => PERIOD_SIZE = 512 frames, where each "frame" consists of 1 sample of every channel (here, 2ch) */
+#define CAPTURE_PERIOD_MULTIPLIER 16
+#define CAPTURE_PERIOD_SIZE (CODEC_BASE_FRAME_COUNT * CAPTURE_PERIOD_MULTIPLIER)
+#define CAPTURE_PERIOD_COUNT 4
+#define CAPTURE_PERIOD_START_THRESHOLD 0
+#define CAPTURE_CODEC_SAMPLING_RATE 16000
+
+/* Playback codec parameters */
+/* number of base blocks in a short period (low latency) */
+#define PLAYBACK_PERIOD_MULTIPLIER 32 /* 21 ms */
+/* number of frames per short period (low latency) */
+#define PLAYBACK_PERIOD_SIZE (CODEC_BASE_FRAME_COUNT * PLAYBACK_PERIOD_MULTIPLIER)
+/* number of pseudo periods for low latency playback */
+#define PLAYBACK_PERIOD_COUNT 4
+#define PLAYBACK_PERIOD_START_THRESHOLD 2
+#define PLAYBACK_CODEC_SAMPLING_RATE 48000
+#define MIN_WRITE_SLEEP_US 5000
+
+#define SPEAKER_EQ_FILE "/vendor/etc/speaker_eq.fir"
+#define SPEAKER_MAX_EQ_LENGTH 512
+
+struct alsa_audio_device {
+ struct audio_hw_device hw_device;
+
+ pthread_mutex_t lock; /* see notes in in_read/out_write on mutex acquisition order */
+ struct alsa_stream_in *active_input;
+ struct alsa_stream_out *active_output;
+ struct audio_route *audio_route;
+ struct mixer *mixer;
+ bool mic_mute;
+ struct aec_t *aec;
+};
+
+struct alsa_stream_in {
+ struct audio_stream_in stream;
+
+ pthread_mutex_t lock; /* see note in in_read() on mutex acquisition order */
+ audio_devices_t devices;
+ struct pcm_config config;
+ struct pcm *pcm;
+ bool unavailable;
+ bool standby;
+ struct alsa_audio_device *dev;
+ int read_threshold;
+ unsigned int frames_read;
+ uint64_t timestamp_nsec;
+ audio_source_t source;
+};
+
+struct alsa_stream_out {
+ struct audio_stream_out stream;
+
+ pthread_mutex_t lock; /* see note in out_write() on mutex acquisition order */
+ audio_devices_t devices;
+ struct pcm_config config;
+ struct pcm *pcm;
+ bool unavailable;
+ int standby;
+ struct alsa_audio_device *dev;
+ int write_threshold;
+ unsigned int frames_written;
+ struct timespec timestamp;
+ fir_filter_t* speaker_eq;
+};
+
+/* 'bytes' are the number of bytes written to audio FIFO, for which 'timestamp' is valid.
+ * 'available' is the number of frames available to read (for input) or yet to be played
+ * (for output) frames in the PCM buffer.
+ * timestamp and available are updated by pcm_get_htimestamp(), so they use the same
+ * datatypes as the corresponding arguments to that function. */
+struct aec_info {
+ struct timespec timestamp;
+ uint64_t timestamp_usec;
+ unsigned int available;
+ size_t bytes;
+};
+
+#endif /* #ifndef _YUKAWA_AUDIO_HW_H_ */
diff --git a/audio/fifo_wrapper.cpp b/audio/fifo_wrapper.cpp
new file mode 100644
index 0000000..7bc9079
--- /dev/null
+++ b/audio/fifo_wrapper.cpp
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "audio_utils_fifo_wrapper"
+// #define LOG_NDEBUG 0
+
+#include <stdint.h>
+#include <errno.h>
+#include <log/log.h>
+#include <audio_utils/fifo.h>
+#include "fifo_wrapper.h"
+
+struct audio_fifo_itfe {
+ audio_utils_fifo *p_fifo;
+ audio_utils_fifo_reader *p_fifo_reader;
+ audio_utils_fifo_writer *p_fifo_writer;
+ int8_t *p_buffer;
+};
+
+void *fifo_init(uint32_t bytes, bool reader_throttles_writer) {
+ struct audio_fifo_itfe *interface = new struct audio_fifo_itfe;
+ interface->p_buffer = new int8_t[bytes];
+ if (interface->p_buffer == NULL) {
+ ALOGE("Failed to allocate fifo buffer!");
+ return NULL;
+ }
+ interface->p_fifo = new audio_utils_fifo(bytes, 1, interface->p_buffer, reader_throttles_writer);
+ interface->p_fifo_writer = new audio_utils_fifo_writer(*interface->p_fifo);
+ interface->p_fifo_reader = new audio_utils_fifo_reader(*interface->p_fifo);
+
+ return (void *)interface;
+}
+
+void fifo_release(void *fifo_itfe) {
+ struct audio_fifo_itfe *interface = static_cast<struct audio_fifo_itfe *>(fifo_itfe);
+ delete interface->p_fifo_writer;
+ delete interface->p_fifo_reader;
+ delete interface->p_fifo;
+ delete[] interface->p_buffer;
+ delete interface;
+}
+
+ssize_t fifo_read(void *fifo_itfe, void *buffer, size_t bytes) {
+ struct audio_fifo_itfe *interface = static_cast<struct audio_fifo_itfe *>(fifo_itfe);
+ return interface->p_fifo_reader->read(buffer, bytes);
+}
+
+ssize_t fifo_write(void *fifo_itfe, void *buffer, size_t bytes) {
+ struct audio_fifo_itfe *interface = static_cast<struct audio_fifo_itfe *>(fifo_itfe);
+ return interface->p_fifo_writer->write(buffer, bytes);
+}
+
+ssize_t fifo_available_to_read(void *fifo_itfe) {
+ struct audio_fifo_itfe *interface = static_cast<struct audio_fifo_itfe *>(fifo_itfe);
+ return interface->p_fifo_reader->available();
+}
+
+ssize_t fifo_available_to_write(void *fifo_itfe) {
+ struct audio_fifo_itfe *interface = static_cast<struct audio_fifo_itfe *>(fifo_itfe);
+ return interface->p_fifo_writer->available();
+}
+
+ssize_t fifo_flush(void *fifo_itfe) {
+ struct audio_fifo_itfe *interface = static_cast<struct audio_fifo_itfe *>(fifo_itfe);
+ return interface->p_fifo_reader->flush();
+}
diff --git a/audio/fifo_wrapper.h b/audio/fifo_wrapper.h
new file mode 100644
index 0000000..e9469ef
--- /dev/null
+++ b/audio/fifo_wrapper.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _AUDIO_FIFO_WRAPPER_H_
+#define _AUDIO_FIFO_WRAPPER_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void *fifo_init(uint32_t bytes, bool reader_throttles_writer);
+void fifo_release(void *fifo_itfe);
+ssize_t fifo_read(void *fifo_itfe, void *buffer, size_t bytes);
+ssize_t fifo_write(void *fifo_itfe, void *buffer, size_t bytes);
+ssize_t fifo_available_to_read(void *fifo_itfe);
+ssize_t fifo_available_to_write(void *fifo_itfe);
+ssize_t fifo_flush(void *fifo_itfe);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* #ifndef _AUDIO_FIFO_WRAPPER_H_ */
diff --git a/audio/fir_filter.c b/audio/fir_filter.c
new file mode 100644
index 0000000..c648fc0
--- /dev/null
+++ b/audio/fir_filter.c
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "audio_hw_fir_filter"
+//#define LOG_NDEBUG 0
+
+#include <assert.h>
+#include <audio_utils/primitives.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <log/log.h>
+#include <malloc.h>
+#include <string.h>
+
+#include "fir_filter.h"
+
+#ifdef __ARM_NEON
+#include "arm_neon.h"
+#endif /* #ifdef __ARM_NEON */
+
+fir_filter_t* fir_init(uint32_t channels, fir_filter_mode_t mode, uint32_t filter_length,
+ uint32_t input_length, int16_t* coeffs) {
+ if ((channels == 0) || (filter_length == 0) || (coeffs == NULL)) {
+ ALOGE("%s: Invalid channel count, filter length or coefficient array.", __func__);
+ return NULL;
+ }
+
+ fir_filter_t* fir = (fir_filter_t*)calloc(1, sizeof(fir_filter_t));
+ if (fir == NULL) {
+ ALOGE("%s: Unable to allocate memory for fir_filter.", __func__);
+ return NULL;
+ }
+
+ fir->channels = channels;
+ fir->filter_length = filter_length;
+ /* Default: same filter coeffs for all channels */
+ fir->mode = FIR_SINGLE_FILTER;
+ uint32_t coeff_bytes = fir->filter_length * sizeof(int16_t);
+ if (mode == FIR_PER_CHANNEL_FILTER) {
+ fir->mode = FIR_PER_CHANNEL_FILTER;
+ coeff_bytes = fir->filter_length * fir->channels * sizeof(int16_t);
+ }
+
+ fir->coeffs = (int16_t*)malloc(coeff_bytes);
+ if (fir->coeffs == NULL) {
+ ALOGE("%s: Unable to allocate memory for FIR coeffs", __func__);
+ goto exit_1;
+ }
+ memcpy(fir->coeffs, coeffs, coeff_bytes);
+
+ fir->buffer_size = (input_length + fir->filter_length) * fir->channels;
+ fir->state = (int16_t*)malloc(fir->buffer_size * sizeof(int16_t));
+ if (fir->state == NULL) {
+ ALOGE("%s: Unable to allocate memory for FIR state", __func__);
+ goto exit_2;
+ }
+
+#ifdef __ARM_NEON
+ ALOGI("%s: Using ARM Neon", __func__);
+#endif /* #ifdef __ARM_NEON */
+
+ fir_reset(fir);
+ return fir;
+
+exit_2:
+ free(fir->coeffs);
+exit_1:
+ free(fir);
+ return NULL;
+}
+
+void fir_release(fir_filter_t* fir) {
+ if (fir == NULL) {
+ return;
+ }
+ free(fir->state);
+ free(fir->coeffs);
+ free(fir);
+}
+
+void fir_reset(fir_filter_t* fir) {
+ if (fir == NULL) {
+ return;
+ }
+ memset(fir->state, 0, fir->buffer_size * sizeof(int16_t));
+}
+
+void fir_process_interleaved(fir_filter_t* fir, int16_t* input, int16_t* output, uint32_t samples) {
+ assert(fir != NULL);
+
+ int start_offset = (fir->filter_length - 1) * fir->channels;
+ memcpy(&fir->state[start_offset], input, samples * fir->channels * sizeof(int16_t));
+ // int ch;
+ bool use_2nd_set_coeffs = (fir->channels > 1) && (fir->mode == FIR_PER_CHANNEL_FILTER);
+ int16_t* p_coeff_A = &fir->coeffs[0];
+ int16_t* p_coeff_B = use_2nd_set_coeffs ? &fir->coeffs[fir->filter_length] : &fir->coeffs[0];
+ int16_t* p_output;
+ for (int ch = 0; ch < fir->channels; ch += 2) {
+ p_output = &output[ch];
+ int offset = start_offset + ch;
+ for (int s = 0; s < samples; s++) {
+ int32_t acc_A = 0;
+ int32_t acc_B = 0;
+
+#ifdef __ARM_NEON
+ int32x4_t acc_vec = vdupq_n_s32(0);
+ for (int k = 0; k < fir->filter_length; k++, offset -= fir->channels) {
+ int16x4_t coeff_vec = vdup_n_s16(p_coeff_A[k]);
+ coeff_vec = vset_lane_s16(p_coeff_B[k], coeff_vec, 1);
+ int16x4_t input_vec = vld1_s16(&fir->state[offset]);
+ acc_vec = vmlal_s16(acc_vec, coeff_vec, input_vec);
+ }
+ acc_A = vgetq_lane_s32(acc_vec, 0);
+ acc_B = vgetq_lane_s32(acc_vec, 1);
+#else
+ for (int k = 0; k < fir->filter_length; k++, offset -= fir->channels) {
+ int32_t input_A = (int32_t)(fir->state[offset]);
+ int32_t coeff_A = (int32_t)(p_coeff_A[k]);
+ int32_t input_B = (int32_t)(fir->state[offset + 1]);
+ int32_t coeff_B = (int32_t)(p_coeff_B[k]);
+ acc_A += (input_A * coeff_A);
+ acc_B += (input_B * coeff_B);
+ }
+#endif /* #ifdef __ARM_NEON */
+
+ *p_output = clamp16(acc_A >> 15);
+ if (ch < fir->channels - 1) {
+ *(p_output + 1) = clamp16(acc_B >> 15);
+ }
+ /* Move to next sample */
+ p_output += fir->channels;
+ offset += (fir->filter_length + 1) * fir->channels;
+ }
+ if (use_2nd_set_coeffs) {
+ p_coeff_A += (fir->filter_length << 1);
+ p_coeff_B += (fir->filter_length << 1);
+ }
+ }
+ memmove(fir->state, &fir->state[samples * fir->channels],
+ (fir->filter_length - 1) * fir->channels * sizeof(int16_t));
+}
diff --git a/audio/fir_filter.h b/audio/fir_filter.h
new file mode 100644
index 0000000..d8c6e91
--- /dev/null
+++ b/audio/fir_filter.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FIR_FILTER_H
+#define FIR_FILTER_H
+
+#include <stdint.h>
+
+typedef enum fir_filter_mode { FIR_SINGLE_FILTER = 0, FIR_PER_CHANNEL_FILTER } fir_filter_mode_t;
+
+typedef struct fir_filter {
+ fir_filter_mode_t mode;
+ uint32_t channels;
+ uint32_t filter_length;
+ uint32_t buffer_size;
+ int16_t* coeffs;
+ int16_t* state;
+} fir_filter_t;
+
+fir_filter_t* fir_init(uint32_t channels, fir_filter_mode_t mode, uint32_t filter_length,
+ uint32_t input_length, int16_t* coeffs);
+void fir_release(fir_filter_t* fir);
+void fir_reset(fir_filter_t* fir);
+void fir_process_interleaved(fir_filter_t* fir, int16_t* input, int16_t* output, uint32_t samples);
+
+#endif /* #ifndef FIR_FILTER_H */