Vishal Bhoj | 2cf4939 | 2016-01-13 14:01:08 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2016 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #define LOG_TAG "audio_hw_hikey" |
| 18 | //#define LOG_NDEBUG 0 |
| 19 | |
| 20 | #include <errno.h> |
| 21 | #include <malloc.h> |
| 22 | #include <pthread.h> |
| 23 | #include <stdint.h> |
| 24 | #include <sys/time.h> |
| 25 | #include <stdlib.h> |
John Stultz | 1750938 | 2018-04-27 15:39:02 -0700 | [diff] [blame] | 26 | #include <unistd.h> |
Vishal Bhoj | 2cf4939 | 2016-01-13 14:01:08 +0000 | [diff] [blame] | 27 | |
John Stultz | 1750938 | 2018-04-27 15:39:02 -0700 | [diff] [blame] | 28 | #include <log/log.h> |
Vishal Bhoj | 2cf4939 | 2016-01-13 14:01:08 +0000 | [diff] [blame] | 29 | #include <cutils/str_parms.h> |
| 30 | #include <cutils/properties.h> |
| 31 | |
| 32 | #include <hardware/hardware.h> |
| 33 | #include <system/audio.h> |
| 34 | #include <hardware/audio.h> |
| 35 | |
| 36 | #include <sound/asound.h> |
| 37 | #include <tinyalsa/asoundlib.h> |
| 38 | #include <audio_utils/resampler.h> |
| 39 | #include <audio_utils/echo_reference.h> |
| 40 | #include <hardware/audio_effect.h> |
| 41 | #include <hardware/audio_alsaops.h> |
| 42 | #include <audio_effects/effect_aec.h> |
| 43 | |
Niranjan Yadla | efa6b4d | 2017-09-18 13:31:35 -0700 | [diff] [blame] | 44 | #include <sys/ioctl.h> |
Vishal Bhoj | 2cf4939 | 2016-01-13 14:01:08 +0000 | [diff] [blame] | 45 | |
| 46 | #define CARD_OUT 0 |
| 47 | #define PORT_CODEC 0 |
| 48 | /* Minimum granularity - Arbitrary but small value */ |
| 49 | #define CODEC_BASE_FRAME_COUNT 32 |
| 50 | |
| 51 | /* number of base blocks in a short period (low latency) */ |
| 52 | #define PERIOD_MULTIPLIER 32 /* 21 ms */ |
| 53 | /* number of frames per short period (low latency) */ |
| 54 | #define PERIOD_SIZE (CODEC_BASE_FRAME_COUNT * PERIOD_MULTIPLIER) |
| 55 | /* number of pseudo periods for low latency playback */ |
| 56 | #define PLAYBACK_PERIOD_COUNT 4 |
| 57 | #define PLAYBACK_PERIOD_START_THRESHOLD 2 |
| 58 | #define CODEC_SAMPLING_RATE 48000 |
| 59 | #define CHANNEL_STEREO 2 |
| 60 | #define MIN_WRITE_SLEEP_US 5000 |
| 61 | |
Niranjan Yadla | 672a346 | 2018-05-08 16:27:06 -0700 | [diff] [blame] | 62 | #ifdef ENABLE_XAF_DSP_DEVICE |
| 63 | #include "xaf-utils-test.h" |
| 64 | #include "audio/xa_vorbis_dec_api.h" |
| 65 | #include "audio/xa-audio-decoder-api.h" |
| 66 | #define NUM_COMP_IN_GRAPH 1 |
| 67 | |
| 68 | struct alsa_audio_device; |
| 69 | |
| 70 | struct xaf_dsp_device { |
| 71 | void *p_adev; |
| 72 | void *p_decoder; |
| 73 | xaf_info_t comp_info; |
| 74 | /* ...playback format */ |
| 75 | xaf_format_t pb_format; |
| 76 | xaf_comp_status dec_status; |
| 77 | int dec_info[4]; |
| 78 | void *dec_inbuf[2]; |
| 79 | int read_length; |
| 80 | xf_id_t dec_id; |
| 81 | int xaf_started; |
| 82 | mem_obj_t* mem_handle; |
| 83 | int num_comp; |
| 84 | int (*dec_setup)(void *p_comp, struct alsa_audio_device *audio_device); |
| 85 | int xafinitdone; |
| 86 | }; |
| 87 | #endif |
| 88 | |
Vishal Bhoj | 2cf4939 | 2016-01-13 14:01:08 +0000 | [diff] [blame] | 89 | struct stub_stream_in { |
| 90 | struct audio_stream_in stream; |
| 91 | }; |
| 92 | |
| 93 | struct alsa_audio_device { |
| 94 | struct audio_hw_device hw_device; |
| 95 | |
| 96 | pthread_mutex_t lock; /* see note below on mutex acquisition order */ |
| 97 | int devices; |
| 98 | struct alsa_stream_in *active_input; |
| 99 | struct alsa_stream_out *active_output; |
| 100 | bool mic_mute; |
Niranjan Yadla | 672a346 | 2018-05-08 16:27:06 -0700 | [diff] [blame] | 101 | #ifdef ENABLE_XAF_DSP_DEVICE |
| 102 | struct xaf_dsp_device dsp_device; |
| 103 | int hifi_dsp_fd; |
| 104 | #endif |
Vishal Bhoj | 2cf4939 | 2016-01-13 14:01:08 +0000 | [diff] [blame] | 105 | }; |
| 106 | |
| 107 | struct alsa_stream_out { |
| 108 | struct audio_stream_out stream; |
| 109 | |
| 110 | pthread_mutex_t lock; /* see note below on mutex acquisition order */ |
| 111 | struct pcm_config config; |
| 112 | struct pcm *pcm; |
| 113 | bool unavailable; |
| 114 | int standby; |
| 115 | struct alsa_audio_device *dev; |
| 116 | int write_threshold; |
| 117 | unsigned int written; |
| 118 | }; |
| 119 | |
Niranjan Yadla | 672a346 | 2018-05-08 16:27:06 -0700 | [diff] [blame] | 120 | #ifdef ENABLE_XAF_DSP_DEVICE |
| 121 | static int pcm_setup(void *p_pcm, struct alsa_audio_device *audio_device) |
| 122 | { |
| 123 | int param[6]; |
| 124 | |
| 125 | param[0] = XA_CODEC_CONFIG_PARAM_SAMPLE_RATE; |
| 126 | param[1] = audio_device->dsp_device.pb_format.sample_rate; |
| 127 | param[2] = XA_CODEC_CONFIG_PARAM_CHANNELS; |
| 128 | param[3] = audio_device->dsp_device.pb_format.channels; |
| 129 | param[4] = XA_CODEC_CONFIG_PARAM_PCM_WIDTH; |
| 130 | param[5] = audio_device->dsp_device.pb_format.pcm_width; |
| 131 | |
| 132 | XF_CHK_API(xaf_comp_set_config(p_pcm, 3, ¶m[0])); |
| 133 | |
| 134 | return 0; |
| 135 | } |
| 136 | |
| 137 | void xa_thread_exit_handler(int sig) |
| 138 | { |
| 139 | /* ...unused arg */ |
| 140 | (void) sig; |
| 141 | |
| 142 | pthread_exit(0); |
| 143 | } |
| 144 | |
| 145 | /*xtensa audio device init*/ |
| 146 | static int xa_device_init(struct alsa_audio_device *audio_device) |
| 147 | { |
| 148 | /* ...initialize playback format */ |
| 149 | audio_device->dsp_device.p_adev = NULL; |
| 150 | audio_device->dsp_device.pb_format.sample_rate = 48000; |
| 151 | audio_device->dsp_device.pb_format.channels = 2; |
| 152 | audio_device->dsp_device.pb_format.pcm_width = 16; |
| 153 | audio_device->dsp_device.xafinitdone = 0; |
| 154 | audio_frmwk_buf_size = 0; //unused |
| 155 | audio_comp_buf_size = 0; //unused |
| 156 | audio_device->dsp_device.num_comp = NUM_COMP_IN_GRAPH; |
| 157 | struct sigaction actions; |
| 158 | memset(&actions, 0, sizeof(actions)); |
| 159 | sigemptyset(&actions.sa_mask); |
| 160 | actions.sa_flags = 0; |
| 161 | actions.sa_handler = xa_thread_exit_handler; |
| 162 | sigaction(SIGUSR1,&actions,NULL); |
| 163 | /* ...initialize tracing facility */ |
| 164 | audio_device->dsp_device.xaf_started =1; |
| 165 | audio_device->dsp_device.dec_id = "audio-decoder/pcm"; |
| 166 | audio_device->dsp_device.dec_setup = pcm_setup; |
| 167 | audio_device->dsp_device.mem_handle = mem_init(); //initialize memory handler |
| 168 | XF_CHK_API(xaf_adev_open(&audio_device->dsp_device.p_adev, audio_frmwk_buf_size, audio_comp_buf_size, mem_malloc, mem_free)); |
| 169 | /* ...create decoder component */ |
| 170 | XF_CHK_API(xaf_comp_create(audio_device->dsp_device.p_adev, &audio_device->dsp_device.p_decoder, audio_device->dsp_device.dec_id, 1, 1, &audio_device->dsp_device.dec_inbuf[0], XAF_DECODER)); |
| 171 | XF_CHK_API(audio_device->dsp_device.dec_setup(audio_device->dsp_device.p_decoder,audio_device)); |
| 172 | |
| 173 | /* ...start decoder component */ |
| 174 | XF_CHK_API(xaf_comp_process(audio_device->dsp_device.p_adev, audio_device->dsp_device.p_decoder, NULL, 0, XAF_START_FLAG)); |
| 175 | return 0; |
| 176 | } |
| 177 | |
| 178 | static int xa_device_run(struct audio_stream_out *stream, const void *buffer, size_t frame_size, size_t out_frames, size_t bytes) |
| 179 | { |
| 180 | struct alsa_stream_out *out = (struct alsa_stream_out *)stream; |
| 181 | struct alsa_audio_device *adev = out->dev; |
| 182 | int ret=0; |
| 183 | void *p_comp=adev->dsp_device.p_decoder; |
| 184 | xaf_comp_status comp_status; |
| 185 | memcpy(adev->dsp_device.dec_inbuf[0],buffer,bytes); |
| 186 | adev->dsp_device.read_length=bytes; |
| 187 | |
| 188 | if (adev->dsp_device.xafinitdone == 0) { |
| 189 | XF_CHK_API(xaf_comp_process(adev->dsp_device.p_adev, adev->dsp_device.p_decoder, adev->dsp_device.dec_inbuf[0], adev->dsp_device.read_length, XAF_INPUT_READY_FLAG)); |
| 190 | XF_CHK_API(xaf_comp_get_status(adev->dsp_device.p_adev, adev->dsp_device.p_decoder, &adev->dsp_device.dec_status, &adev->dsp_device.comp_info)); |
| 191 | ALOGE("PROXY:%s xaf_comp_get_status %d\n",__func__,adev->dsp_device.dec_status); |
| 192 | if (adev->dsp_device.dec_status == XAF_INIT_DONE) { |
| 193 | adev->dsp_device.xafinitdone = 1; |
| 194 | out->written += out_frames; |
| 195 | XF_CHK_API(xaf_comp_process(NULL, p_comp, NULL, 0, XAF_EXEC_FLAG)); |
| 196 | } |
| 197 | } else { |
| 198 | XF_CHK_API(xaf_comp_process(NULL, adev->dsp_device.p_decoder, adev->dsp_device.dec_inbuf[0], adev->dsp_device.read_length, XAF_INPUT_READY_FLAG)); |
| 199 | while (1) { |
| 200 | XF_CHK_API(xaf_comp_get_status(NULL, p_comp, &comp_status, &adev->dsp_device.comp_info)); |
| 201 | if (comp_status == XAF_EXEC_DONE) break; |
| 202 | if (comp_status == XAF_NEED_INPUT) { |
| 203 | ALOGV("PROXY:%s loop:XAF_NEED_INPUT\n",__func__); |
| 204 | break; |
| 205 | } |
| 206 | if (comp_status == XAF_OUTPUT_READY) { |
| 207 | void *p_buf = (void *)adev->dsp_device.comp_info.buf; |
| 208 | int size = adev->dsp_device.comp_info.length; |
| 209 | ret = pcm_mmap_write(out->pcm, p_buf, size); |
| 210 | if (ret == 0) { |
| 211 | out->written += out_frames; |
| 212 | } |
| 213 | XF_CHK_API(xaf_comp_process(NULL, adev->dsp_device.p_decoder, (void *)adev->dsp_device.comp_info.buf, adev->dsp_device.comp_info.length, XAF_NEED_OUTPUT_FLAG)); |
| 214 | } |
| 215 | } |
| 216 | } |
| 217 | return ret; |
| 218 | } |
| 219 | |
| 220 | static int xa_device_close(struct alsa_audio_device *audio_device) |
| 221 | { |
| 222 | if (audio_device->dsp_device.xaf_started) { |
| 223 | xaf_comp_status comp_status; |
| 224 | audio_device->dsp_device.xaf_started=0; |
| 225 | while (1) { |
| 226 | XF_CHK_API(xaf_comp_get_status(NULL, audio_device->dsp_device.p_decoder, &comp_status, &audio_device->dsp_device.comp_info)); |
| 227 | ALOGV("PROXY:comp_status:%d,audio_device->dsp_device.comp_info.length:%d\n",(int)comp_status,audio_device->dsp_device.comp_info.length); |
| 228 | if (comp_status == XAF_EXEC_DONE) |
| 229 | break; |
| 230 | if (comp_status == XAF_NEED_INPUT) { |
| 231 | XF_CHK_API(xaf_comp_process(NULL, audio_device->dsp_device.p_decoder, NULL, 0, XAF_INPUT_OVER_FLAG)); |
| 232 | } |
| 233 | |
| 234 | if (comp_status == XAF_OUTPUT_READY) { |
| 235 | XF_CHK_API(xaf_comp_process(NULL, audio_device->dsp_device.p_decoder, (void *)audio_device->dsp_device.comp_info.buf, audio_device->dsp_device.comp_info.length, XAF_NEED_OUTPUT_FLAG)); |
| 236 | } |
| 237 | } |
| 238 | |
| 239 | /* ...exec done, clean-up */ |
| 240 | XF_CHK_API(xaf_comp_delete(audio_device->dsp_device.p_decoder)); |
| 241 | XF_CHK_API(xaf_adev_close(audio_device->dsp_device.p_adev, 0 /*unused*/)); |
| 242 | mem_exit(); |
| 243 | XF_CHK_API(print_mem_mcps_info(audio_device->dsp_device.mem_handle, audio_device->dsp_device.num_comp)); |
| 244 | } |
| 245 | return 0; |
| 246 | } |
| 247 | #endif |
Vishal Bhoj | 2cf4939 | 2016-01-13 14:01:08 +0000 | [diff] [blame] | 248 | |
| 249 | /* must be called with hw device and output stream mutexes locked */ |
| 250 | static int start_output_stream(struct alsa_stream_out *out) |
| 251 | { |
| 252 | struct alsa_audio_device *adev = out->dev; |
| 253 | |
| 254 | if (out->unavailable) |
| 255 | return -ENODEV; |
| 256 | |
| 257 | /* default to low power: will be corrected in out_write if necessary before first write to |
| 258 | * tinyalsa. |
| 259 | */ |
| 260 | out->write_threshold = PLAYBACK_PERIOD_COUNT * PERIOD_SIZE; |
| 261 | out->config.start_threshold = PLAYBACK_PERIOD_START_THRESHOLD * PERIOD_SIZE; |
| 262 | out->config.avail_min = PERIOD_SIZE; |
| 263 | |
| 264 | out->pcm = pcm_open(CARD_OUT, PORT_CODEC, PCM_OUT | PCM_MMAP | PCM_NOIRQ | PCM_MONOTONIC, &out->config); |
| 265 | |
| 266 | if (!pcm_is_ready(out->pcm)) { |
| 267 | ALOGE("cannot open pcm_out driver: %s", pcm_get_error(out->pcm)); |
| 268 | pcm_close(out->pcm); |
| 269 | adev->active_output = NULL; |
| 270 | out->unavailable = true; |
| 271 | return -ENODEV; |
| 272 | } |
| 273 | |
| 274 | adev->active_output = out; |
| 275 | return 0; |
| 276 | } |
| 277 | |
| 278 | static uint32_t out_get_sample_rate(const struct audio_stream *stream) |
| 279 | { |
| 280 | struct alsa_stream_out *out = (struct alsa_stream_out *)stream; |
| 281 | return out->config.rate; |
| 282 | } |
| 283 | |
| 284 | static int out_set_sample_rate(struct audio_stream *stream, uint32_t rate) |
| 285 | { |
| 286 | ALOGV("out_set_sample_rate: %d", 0); |
| 287 | return -ENOSYS; |
| 288 | } |
| 289 | |
| 290 | static size_t out_get_buffer_size(const struct audio_stream *stream) |
| 291 | { |
| 292 | ALOGV("out_get_buffer_size: %d", 4096); |
Vishal Bhoj | 2cf4939 | 2016-01-13 14:01:08 +0000 | [diff] [blame] | 293 | |
| 294 | /* return the closest majoring multiple of 16 frames, as |
| 295 | * audioflinger expects audio buffers to be a multiple of 16 frames */ |
| 296 | size_t size = PERIOD_SIZE; |
| 297 | size = ((size + 15) / 16) * 16; |
| 298 | return size * audio_stream_out_frame_size((struct audio_stream_out *)stream); |
| 299 | } |
| 300 | |
| 301 | static audio_channel_mask_t out_get_channels(const struct audio_stream *stream) |
| 302 | { |
| 303 | ALOGV("out_get_channels"); |
| 304 | struct alsa_stream_out *out = (struct alsa_stream_out *)stream; |
| 305 | return audio_channel_out_mask_from_count(out->config.channels); |
| 306 | } |
| 307 | |
| 308 | static audio_format_t out_get_format(const struct audio_stream *stream) |
| 309 | { |
| 310 | ALOGV("out_get_format"); |
| 311 | struct alsa_stream_out *out = (struct alsa_stream_out *)stream; |
| 312 | return audio_format_from_pcm_format(out->config.format); |
| 313 | } |
| 314 | |
| 315 | static int out_set_format(struct audio_stream *stream, audio_format_t format) |
| 316 | { |
| 317 | ALOGV("out_set_format: %d",format); |
| 318 | return -ENOSYS; |
| 319 | } |
| 320 | |
| 321 | static int do_output_standby(struct alsa_stream_out *out) |
| 322 | { |
| 323 | struct alsa_audio_device *adev = out->dev; |
| 324 | |
| 325 | if (!out->standby) { |
| 326 | pcm_close(out->pcm); |
| 327 | out->pcm = NULL; |
| 328 | adev->active_output = NULL; |
| 329 | out->standby = 1; |
| 330 | } |
| 331 | return 0; |
| 332 | } |
| 333 | |
| 334 | static int out_standby(struct audio_stream *stream) |
| 335 | { |
| 336 | ALOGV("out_standby"); |
| 337 | struct alsa_stream_out *out = (struct alsa_stream_out *)stream; |
| 338 | int status; |
| 339 | |
| 340 | pthread_mutex_lock(&out->dev->lock); |
| 341 | pthread_mutex_lock(&out->lock); |
Niranjan Yadla | 672a346 | 2018-05-08 16:27:06 -0700 | [diff] [blame] | 342 | #ifdef ENABLE_XAF_DSP_DEVICE |
| 343 | xa_device_close(out->dev); |
| 344 | #endif |
Vishal Bhoj | 2cf4939 | 2016-01-13 14:01:08 +0000 | [diff] [blame] | 345 | status = do_output_standby(out); |
| 346 | pthread_mutex_unlock(&out->lock); |
| 347 | pthread_mutex_unlock(&out->dev->lock); |
| 348 | return status; |
| 349 | } |
| 350 | |
| 351 | static int out_dump(const struct audio_stream *stream, int fd) |
| 352 | { |
| 353 | ALOGV("out_dump"); |
| 354 | return 0; |
| 355 | } |
| 356 | |
| 357 | static int out_set_parameters(struct audio_stream *stream, const char *kvpairs) |
| 358 | { |
| 359 | ALOGV("out_set_parameters"); |
| 360 | struct alsa_stream_out *out = (struct alsa_stream_out *)stream; |
| 361 | struct alsa_audio_device *adev = out->dev; |
| 362 | struct str_parms *parms; |
Vishal Bhoj | 2cf4939 | 2016-01-13 14:01:08 +0000 | [diff] [blame] | 363 | char value[32]; |
Dean Wheatley | c6f2397 | 2020-04-21 16:12:29 +1000 | [diff] [blame] | 364 | int val = 0; |
| 365 | int ret = -EINVAL; |
| 366 | |
| 367 | if (kvpairs == NULL || kvpairs[0] == 0) { |
| 368 | return 0; |
| 369 | } |
Vishal Bhoj | 2cf4939 | 2016-01-13 14:01:08 +0000 | [diff] [blame] | 370 | |
| 371 | parms = str_parms_create_str(kvpairs); |
| 372 | |
Dean Wheatley | c6f2397 | 2020-04-21 16:12:29 +1000 | [diff] [blame] | 373 | if (str_parms_get_str(parms, AUDIO_PARAMETER_STREAM_ROUTING, value, sizeof(value)) >= 0) { |
Vishal Bhoj | 2cf4939 | 2016-01-13 14:01:08 +0000 | [diff] [blame] | 374 | val = atoi(value); |
| 375 | pthread_mutex_lock(&adev->lock); |
| 376 | pthread_mutex_lock(&out->lock); |
| 377 | if (((adev->devices & AUDIO_DEVICE_OUT_ALL) != val) && (val != 0)) { |
| 378 | adev->devices &= ~AUDIO_DEVICE_OUT_ALL; |
| 379 | adev->devices |= val; |
| 380 | } |
| 381 | pthread_mutex_unlock(&out->lock); |
| 382 | pthread_mutex_unlock(&adev->lock); |
Dean Wheatley | c6f2397 | 2020-04-21 16:12:29 +1000 | [diff] [blame] | 383 | ret = 0; |
Vishal Bhoj | 2cf4939 | 2016-01-13 14:01:08 +0000 | [diff] [blame] | 384 | } |
| 385 | |
| 386 | str_parms_destroy(parms); |
| 387 | return ret; |
| 388 | } |
| 389 | |
| 390 | static char * out_get_parameters(const struct audio_stream *stream, const char *keys) |
| 391 | { |
| 392 | ALOGV("out_get_parameters"); |
| 393 | return strdup(""); |
| 394 | } |
| 395 | |
| 396 | static uint32_t out_get_latency(const struct audio_stream_out *stream) |
| 397 | { |
| 398 | ALOGV("out_get_latency"); |
| 399 | struct alsa_stream_out *out = (struct alsa_stream_out *)stream; |
| 400 | return (PERIOD_SIZE * PLAYBACK_PERIOD_COUNT * 1000) / out->config.rate; |
| 401 | } |
| 402 | |
| 403 | static int out_set_volume(struct audio_stream_out *stream, float left, |
| 404 | float right) |
| 405 | { |
| 406 | ALOGV("out_set_volume: Left:%f Right:%f", left, right); |
| 407 | return 0; |
| 408 | } |
| 409 | |
| 410 | static ssize_t out_write(struct audio_stream_out *stream, const void* buffer, |
| 411 | size_t bytes) |
| 412 | { |
| 413 | int ret; |
| 414 | struct alsa_stream_out *out = (struct alsa_stream_out *)stream; |
| 415 | struct alsa_audio_device *adev = out->dev; |
| 416 | size_t frame_size = audio_stream_out_frame_size(stream); |
| 417 | size_t out_frames = bytes / frame_size; |
Vishal Bhoj | 2cf4939 | 2016-01-13 14:01:08 +0000 | [diff] [blame] | 418 | |
| 419 | /* acquiring hw device mutex systematically is useful if a low priority thread is waiting |
| 420 | * on the output stream mutex - e.g. executing select_mode() while holding the hw device |
| 421 | * mutex |
| 422 | */ |
| 423 | pthread_mutex_lock(&adev->lock); |
| 424 | pthread_mutex_lock(&out->lock); |
| 425 | if (out->standby) { |
Niranjan Yadla | 672a346 | 2018-05-08 16:27:06 -0700 | [diff] [blame] | 426 | #ifdef ENABLE_XAF_DSP_DEVICE |
| 427 | if (adev->hifi_dsp_fd >= 0) { |
| 428 | xa_device_init(adev); |
| 429 | } |
| 430 | #endif |
Vishal Bhoj | 2cf4939 | 2016-01-13 14:01:08 +0000 | [diff] [blame] | 431 | ret = start_output_stream(out); |
| 432 | if (ret != 0) { |
| 433 | pthread_mutex_unlock(&adev->lock); |
| 434 | goto exit; |
| 435 | } |
| 436 | out->standby = 0; |
| 437 | } |
| 438 | |
| 439 | pthread_mutex_unlock(&adev->lock); |
| 440 | |
Niranjan Yadla | 672a346 | 2018-05-08 16:27:06 -0700 | [diff] [blame] | 441 | #ifdef ENABLE_XAF_DSP_DEVICE |
| 442 | /*fallback to original audio processing*/ |
| 443 | if (adev->dsp_device.p_adev != NULL) { |
| 444 | ret = xa_device_run(stream, buffer,frame_size, out_frames, bytes); |
| 445 | } else { |
| 446 | #endif |
| 447 | ret = pcm_mmap_write(out->pcm, buffer, out_frames * frame_size); |
| 448 | if (ret == 0) { |
| 449 | out->written += out_frames; |
| 450 | } |
| 451 | #ifdef ENABLE_XAF_DSP_DEVICE |
Vishal Bhoj | 2cf4939 | 2016-01-13 14:01:08 +0000 | [diff] [blame] | 452 | } |
Niranjan Yadla | 672a346 | 2018-05-08 16:27:06 -0700 | [diff] [blame] | 453 | #endif |
Vishal Bhoj | 2cf4939 | 2016-01-13 14:01:08 +0000 | [diff] [blame] | 454 | exit: |
| 455 | pthread_mutex_unlock(&out->lock); |
| 456 | |
| 457 | if (ret != 0) { |
| 458 | usleep((int64_t)bytes * 1000000 / audio_stream_out_frame_size(stream) / |
| 459 | out_get_sample_rate(&stream->common)); |
| 460 | } |
| 461 | |
| 462 | return bytes; |
| 463 | } |
| 464 | |
| 465 | static int out_get_render_position(const struct audio_stream_out *stream, |
| 466 | uint32_t *dsp_frames) |
| 467 | { |
| 468 | *dsp_frames = 0; |
| 469 | ALOGV("out_get_render_position: dsp_frames: %p", dsp_frames); |
| 470 | return -EINVAL; |
| 471 | } |
| 472 | |
| 473 | static int out_get_presentation_position(const struct audio_stream_out *stream, |
| 474 | uint64_t *frames, struct timespec *timestamp) |
| 475 | { |
| 476 | struct alsa_stream_out *out = (struct alsa_stream_out *)stream; |
| 477 | int ret = -1; |
| 478 | |
| 479 | if (out->pcm) { |
| 480 | unsigned int avail; |
| 481 | if (pcm_get_htimestamp(out->pcm, &avail, timestamp) == 0) { |
| 482 | size_t kernel_buffer_size = out->config.period_size * out->config.period_count; |
| 483 | int64_t signed_frames = out->written - kernel_buffer_size + avail; |
| 484 | if (signed_frames >= 0) { |
| 485 | *frames = signed_frames; |
| 486 | ret = 0; |
| 487 | } |
| 488 | } |
| 489 | } |
| 490 | |
| 491 | return ret; |
| 492 | } |
| 493 | |
| 494 | |
| 495 | static int out_add_audio_effect(const struct audio_stream *stream, effect_handle_t effect) |
| 496 | { |
| 497 | ALOGV("out_add_audio_effect: %p", effect); |
| 498 | return 0; |
| 499 | } |
| 500 | |
| 501 | static int out_remove_audio_effect(const struct audio_stream *stream, effect_handle_t effect) |
| 502 | { |
| 503 | ALOGV("out_remove_audio_effect: %p", effect); |
| 504 | return 0; |
| 505 | } |
| 506 | |
| 507 | static int out_get_next_write_timestamp(const struct audio_stream_out *stream, |
| 508 | int64_t *timestamp) |
| 509 | { |
| 510 | *timestamp = 0; |
| 511 | ALOGV("out_get_next_write_timestamp: %ld", (long int)(*timestamp)); |
| 512 | return -EINVAL; |
| 513 | } |
| 514 | |
| 515 | /** audio_stream_in implementation **/ |
| 516 | static uint32_t in_get_sample_rate(const struct audio_stream *stream) |
| 517 | { |
| 518 | ALOGV("in_get_sample_rate"); |
| 519 | return 8000; |
| 520 | } |
| 521 | |
| 522 | static int in_set_sample_rate(struct audio_stream *stream, uint32_t rate) |
| 523 | { |
| 524 | ALOGV("in_set_sample_rate: %d", rate); |
| 525 | return -ENOSYS; |
| 526 | } |
| 527 | |
| 528 | static size_t in_get_buffer_size(const struct audio_stream *stream) |
| 529 | { |
| 530 | ALOGV("in_get_buffer_size: %d", 320); |
| 531 | return 320; |
| 532 | } |
| 533 | |
| 534 | static audio_channel_mask_t in_get_channels(const struct audio_stream *stream) |
| 535 | { |
| 536 | ALOGV("in_get_channels: %d", AUDIO_CHANNEL_IN_MONO); |
| 537 | return AUDIO_CHANNEL_IN_MONO; |
| 538 | } |
| 539 | |
| 540 | static audio_format_t in_get_format(const struct audio_stream *stream) |
| 541 | { |
| 542 | return AUDIO_FORMAT_PCM_16_BIT; |
| 543 | } |
| 544 | |
| 545 | static int in_set_format(struct audio_stream *stream, audio_format_t format) |
| 546 | { |
| 547 | return -ENOSYS; |
| 548 | } |
| 549 | |
| 550 | static int in_standby(struct audio_stream *stream) |
| 551 | { |
| 552 | return 0; |
| 553 | } |
| 554 | |
| 555 | static int in_dump(const struct audio_stream *stream, int fd) |
| 556 | { |
| 557 | return 0; |
| 558 | } |
| 559 | |
| 560 | static int in_set_parameters(struct audio_stream *stream, const char *kvpairs) |
| 561 | { |
| 562 | return 0; |
| 563 | } |
| 564 | |
| 565 | static char * in_get_parameters(const struct audio_stream *stream, |
| 566 | const char *keys) |
| 567 | { |
| 568 | return strdup(""); |
| 569 | } |
| 570 | |
| 571 | static int in_set_gain(struct audio_stream_in *stream, float gain) |
| 572 | { |
| 573 | return 0; |
| 574 | } |
| 575 | |
| 576 | static ssize_t in_read(struct audio_stream_in *stream, void* buffer, |
| 577 | size_t bytes) |
| 578 | { |
| 579 | ALOGV("in_read: bytes %zu", bytes); |
| 580 | /* XXX: fake timing for audio input */ |
| 581 | usleep((int64_t)bytes * 1000000 / audio_stream_in_frame_size(stream) / |
| 582 | in_get_sample_rate(&stream->common)); |
| 583 | memset(buffer, 0, bytes); |
| 584 | return bytes; |
| 585 | } |
| 586 | |
| 587 | static uint32_t in_get_input_frames_lost(struct audio_stream_in *stream) |
| 588 | { |
| 589 | return 0; |
| 590 | } |
| 591 | |
| 592 | static int in_add_audio_effect(const struct audio_stream *stream, effect_handle_t effect) |
| 593 | { |
| 594 | return 0; |
| 595 | } |
| 596 | |
| 597 | static int in_remove_audio_effect(const struct audio_stream *stream, effect_handle_t effect) |
| 598 | { |
| 599 | return 0; |
| 600 | } |
| 601 | |
| 602 | static int adev_open_output_stream(struct audio_hw_device *dev, |
| 603 | audio_io_handle_t handle, |
| 604 | audio_devices_t devices, |
| 605 | audio_output_flags_t flags, |
| 606 | struct audio_config *config, |
| 607 | struct audio_stream_out **stream_out, |
| 608 | const char *address __unused) |
| 609 | { |
| 610 | ALOGV("adev_open_output_stream..."); |
| 611 | |
| 612 | struct alsa_audio_device *ladev = (struct alsa_audio_device *)dev; |
| 613 | struct alsa_stream_out *out; |
| 614 | struct pcm_params *params; |
| 615 | int ret = 0; |
| 616 | |
| 617 | params = pcm_params_get(CARD_OUT, PORT_CODEC, PCM_OUT); |
| 618 | if (!params) |
| 619 | return -ENOSYS; |
| 620 | |
| 621 | out = (struct alsa_stream_out *)calloc(1, sizeof(struct alsa_stream_out)); |
| 622 | if (!out) |
| 623 | return -ENOMEM; |
| 624 | |
| 625 | out->stream.common.get_sample_rate = out_get_sample_rate; |
| 626 | out->stream.common.set_sample_rate = out_set_sample_rate; |
| 627 | out->stream.common.get_buffer_size = out_get_buffer_size; |
| 628 | out->stream.common.get_channels = out_get_channels; |
| 629 | out->stream.common.get_format = out_get_format; |
| 630 | out->stream.common.set_format = out_set_format; |
| 631 | out->stream.common.standby = out_standby; |
| 632 | out->stream.common.dump = out_dump; |
| 633 | out->stream.common.set_parameters = out_set_parameters; |
| 634 | out->stream.common.get_parameters = out_get_parameters; |
| 635 | out->stream.common.add_audio_effect = out_add_audio_effect; |
| 636 | out->stream.common.remove_audio_effect = out_remove_audio_effect; |
| 637 | out->stream.get_latency = out_get_latency; |
| 638 | out->stream.set_volume = out_set_volume; |
| 639 | out->stream.write = out_write; |
| 640 | out->stream.get_render_position = out_get_render_position; |
| 641 | out->stream.get_next_write_timestamp = out_get_next_write_timestamp; |
| 642 | out->stream.get_presentation_position = out_get_presentation_position; |
| 643 | |
| 644 | out->config.channels = CHANNEL_STEREO; |
| 645 | out->config.rate = CODEC_SAMPLING_RATE; |
| 646 | out->config.format = PCM_FORMAT_S16_LE; |
| 647 | out->config.period_size = PERIOD_SIZE; |
| 648 | out->config.period_count = PLAYBACK_PERIOD_COUNT; |
| 649 | |
| 650 | if (out->config.rate != config->sample_rate || |
| 651 | audio_channel_count_from_out_mask(config->channel_mask) != CHANNEL_STEREO || |
| 652 | out->config.format != pcm_format_from_audio_format(config->format) ) { |
| 653 | config->sample_rate = out->config.rate; |
| 654 | config->format = audio_format_from_pcm_format(out->config.format); |
| 655 | config->channel_mask = audio_channel_out_mask_from_count(CHANNEL_STEREO); |
| 656 | ret = -EINVAL; |
| 657 | } |
| 658 | |
| 659 | ALOGI("adev_open_output_stream selects channels=%d rate=%d format=%d", |
| 660 | out->config.channels, out->config.rate, out->config.format); |
| 661 | |
| 662 | out->dev = ladev; |
| 663 | out->standby = 1; |
| 664 | out->unavailable = false; |
| 665 | |
| 666 | config->format = out_get_format(&out->stream.common); |
| 667 | config->channel_mask = out_get_channels(&out->stream.common); |
| 668 | config->sample_rate = out_get_sample_rate(&out->stream.common); |
| 669 | |
| 670 | *stream_out = &out->stream; |
| 671 | |
| 672 | /* TODO The retry mechanism isn't implemented in AudioPolicyManager/AudioFlinger. */ |
| 673 | ret = 0; |
| 674 | |
| 675 | return ret; |
| 676 | } |
| 677 | |
| 678 | static void adev_close_output_stream(struct audio_hw_device *dev, |
| 679 | struct audio_stream_out *stream) |
| 680 | { |
| 681 | ALOGV("adev_close_output_stream..."); |
| 682 | free(stream); |
| 683 | } |
| 684 | |
| 685 | static int adev_set_parameters(struct audio_hw_device *dev, const char *kvpairs) |
| 686 | { |
| 687 | ALOGV("adev_set_parameters"); |
| 688 | return -ENOSYS; |
| 689 | } |
| 690 | |
| 691 | static char * adev_get_parameters(const struct audio_hw_device *dev, |
| 692 | const char *keys) |
| 693 | { |
| 694 | ALOGV("adev_get_parameters"); |
| 695 | return strdup(""); |
| 696 | } |
| 697 | |
| 698 | static int adev_init_check(const struct audio_hw_device *dev) |
| 699 | { |
| 700 | ALOGV("adev_init_check"); |
| 701 | return 0; |
| 702 | } |
| 703 | |
| 704 | static int adev_set_voice_volume(struct audio_hw_device *dev, float volume) |
| 705 | { |
| 706 | ALOGV("adev_set_voice_volume: %f", volume); |
| 707 | return -ENOSYS; |
| 708 | } |
| 709 | |
| 710 | static int adev_set_master_volume(struct audio_hw_device *dev, float volume) |
| 711 | { |
| 712 | ALOGV("adev_set_master_volume: %f", volume); |
| 713 | return -ENOSYS; |
| 714 | } |
| 715 | |
| 716 | static int adev_get_master_volume(struct audio_hw_device *dev, float *volume) |
| 717 | { |
| 718 | ALOGV("adev_get_master_volume: %f", *volume); |
| 719 | return -ENOSYS; |
| 720 | } |
| 721 | |
| 722 | static int adev_set_master_mute(struct audio_hw_device *dev, bool muted) |
| 723 | { |
| 724 | ALOGV("adev_set_master_mute: %d", muted); |
| 725 | return -ENOSYS; |
| 726 | } |
| 727 | |
| 728 | static int adev_get_master_mute(struct audio_hw_device *dev, bool *muted) |
| 729 | { |
| 730 | ALOGV("adev_get_master_mute: %d", *muted); |
| 731 | return -ENOSYS; |
| 732 | } |
| 733 | |
| 734 | static int adev_set_mode(struct audio_hw_device *dev, audio_mode_t mode) |
| 735 | { |
| 736 | ALOGV("adev_set_mode: %d", mode); |
| 737 | return 0; |
| 738 | } |
| 739 | |
| 740 | static int adev_set_mic_mute(struct audio_hw_device *dev, bool state) |
| 741 | { |
| 742 | ALOGV("adev_set_mic_mute: %d",state); |
| 743 | return -ENOSYS; |
| 744 | } |
| 745 | |
| 746 | static int adev_get_mic_mute(const struct audio_hw_device *dev, bool *state) |
| 747 | { |
| 748 | ALOGV("adev_get_mic_mute"); |
| 749 | return -ENOSYS; |
| 750 | } |
| 751 | |
| 752 | static size_t adev_get_input_buffer_size(const struct audio_hw_device *dev, |
| 753 | const struct audio_config *config) |
| 754 | { |
| 755 | ALOGV("adev_get_input_buffer_size: %d", 320); |
| 756 | return 320; |
| 757 | } |
| 758 | |
Dmitry Shmidt | 6b34f04 | 2017-11-29 13:23:01 -0800 | [diff] [blame] | 759 | static int adev_open_input_stream(struct audio_hw_device __unused *dev, |
Vishal Bhoj | 2cf4939 | 2016-01-13 14:01:08 +0000 | [diff] [blame] | 760 | audio_io_handle_t handle, |
| 761 | audio_devices_t devices, |
| 762 | struct audio_config *config, |
| 763 | struct audio_stream_in **stream_in, |
| 764 | audio_input_flags_t flags __unused, |
| 765 | const char *address __unused, |
| 766 | audio_source_t source __unused) |
| 767 | { |
Vishal Bhoj | 2cf4939 | 2016-01-13 14:01:08 +0000 | [diff] [blame] | 768 | struct stub_stream_in *in; |
Dmitry Shmidt | 6b34f04 | 2017-11-29 13:23:01 -0800 | [diff] [blame] | 769 | |
| 770 | ALOGV("adev_open_input_stream..."); |
Vishal Bhoj | 2cf4939 | 2016-01-13 14:01:08 +0000 | [diff] [blame] | 771 | |
| 772 | in = (struct stub_stream_in *)calloc(1, sizeof(struct stub_stream_in)); |
| 773 | if (!in) |
| 774 | return -ENOMEM; |
| 775 | |
| 776 | in->stream.common.get_sample_rate = in_get_sample_rate; |
| 777 | in->stream.common.set_sample_rate = in_set_sample_rate; |
| 778 | in->stream.common.get_buffer_size = in_get_buffer_size; |
| 779 | in->stream.common.get_channels = in_get_channels; |
| 780 | in->stream.common.get_format = in_get_format; |
| 781 | in->stream.common.set_format = in_set_format; |
| 782 | in->stream.common.standby = in_standby; |
| 783 | in->stream.common.dump = in_dump; |
| 784 | in->stream.common.set_parameters = in_set_parameters; |
| 785 | in->stream.common.get_parameters = in_get_parameters; |
| 786 | in->stream.common.add_audio_effect = in_add_audio_effect; |
| 787 | in->stream.common.remove_audio_effect = in_remove_audio_effect; |
| 788 | in->stream.set_gain = in_set_gain; |
| 789 | in->stream.read = in_read; |
| 790 | in->stream.get_input_frames_lost = in_get_input_frames_lost; |
| 791 | |
| 792 | *stream_in = &in->stream; |
| 793 | return 0; |
| 794 | } |
| 795 | |
| 796 | static void adev_close_input_stream(struct audio_hw_device *dev, |
| 797 | struct audio_stream_in *in) |
| 798 | { |
| 799 | ALOGV("adev_close_input_stream..."); |
| 800 | return; |
| 801 | } |
| 802 | |
| 803 | static int adev_dump(const audio_hw_device_t *device, int fd) |
| 804 | { |
| 805 | ALOGV("adev_dump"); |
| 806 | return 0; |
| 807 | } |
| 808 | |
| 809 | static int adev_close(hw_device_t *device) |
| 810 | { |
Niranjan Yadla | 672a346 | 2018-05-08 16:27:06 -0700 | [diff] [blame] | 811 | #ifdef ENABLE_XAF_DSP_DEVICE |
| 812 | struct alsa_audio_device *adev = (struct alsa_audio_device *)device; |
| 813 | #endif |
Vishal Bhoj | 2cf4939 | 2016-01-13 14:01:08 +0000 | [diff] [blame] | 814 | ALOGV("adev_close"); |
Niranjan Yadla | 672a346 | 2018-05-08 16:27:06 -0700 | [diff] [blame] | 815 | #ifdef ENABLE_XAF_DSP_DEVICE |
| 816 | if (adev->hifi_dsp_fd >= 0) |
| 817 | close(adev->hifi_dsp_fd); |
| 818 | #endif |
Vishal Bhoj | 2cf4939 | 2016-01-13 14:01:08 +0000 | [diff] [blame] | 819 | free(device); |
| 820 | return 0; |
| 821 | } |
| 822 | |
| 823 | static int adev_open(const hw_module_t* module, const char* name, |
| 824 | hw_device_t** device) |
| 825 | { |
Vishal Bhoj | 2cf4939 | 2016-01-13 14:01:08 +0000 | [diff] [blame] | 826 | struct alsa_audio_device *adev; |
Dmitry Shmidt | 6b34f04 | 2017-11-29 13:23:01 -0800 | [diff] [blame] | 827 | |
| 828 | ALOGV("adev_open: %s", name); |
Vishal Bhoj | 2cf4939 | 2016-01-13 14:01:08 +0000 | [diff] [blame] | 829 | |
| 830 | if (strcmp(name, AUDIO_HARDWARE_INTERFACE) != 0) |
| 831 | return -EINVAL; |
| 832 | |
| 833 | adev = calloc(1, sizeof(struct alsa_audio_device)); |
| 834 | if (!adev) |
| 835 | return -ENOMEM; |
| 836 | |
| 837 | adev->hw_device.common.tag = HARDWARE_DEVICE_TAG; |
| 838 | adev->hw_device.common.version = AUDIO_DEVICE_API_VERSION_2_0; |
| 839 | adev->hw_device.common.module = (struct hw_module_t *) module; |
| 840 | adev->hw_device.common.close = adev_close; |
| 841 | adev->hw_device.init_check = adev_init_check; |
| 842 | adev->hw_device.set_voice_volume = adev_set_voice_volume; |
| 843 | adev->hw_device.set_master_volume = adev_set_master_volume; |
| 844 | adev->hw_device.get_master_volume = adev_get_master_volume; |
| 845 | adev->hw_device.set_master_mute = adev_set_master_mute; |
| 846 | adev->hw_device.get_master_mute = adev_get_master_mute; |
| 847 | adev->hw_device.set_mode = adev_set_mode; |
| 848 | adev->hw_device.set_mic_mute = adev_set_mic_mute; |
| 849 | adev->hw_device.get_mic_mute = adev_get_mic_mute; |
| 850 | adev->hw_device.set_parameters = adev_set_parameters; |
| 851 | adev->hw_device.get_parameters = adev_get_parameters; |
| 852 | adev->hw_device.get_input_buffer_size = adev_get_input_buffer_size; |
| 853 | adev->hw_device.open_output_stream = adev_open_output_stream; |
| 854 | adev->hw_device.close_output_stream = adev_close_output_stream; |
| 855 | adev->hw_device.open_input_stream = adev_open_input_stream; |
| 856 | adev->hw_device.close_input_stream = adev_close_input_stream; |
| 857 | adev->hw_device.dump = adev_dump; |
| 858 | |
| 859 | adev->devices = AUDIO_DEVICE_NONE; |
| 860 | |
| 861 | *device = &adev->hw_device.common; |
Niranjan Yadla | 672a346 | 2018-05-08 16:27:06 -0700 | [diff] [blame] | 862 | #ifdef ENABLE_XAF_DSP_DEVICE |
| 863 | adev->hifi_dsp_fd = open(HIFI_DSP_MISC_DRIVER, O_WRONLY, 0); |
| 864 | if (adev->hifi_dsp_fd < 0) { |
| 865 | ALOGW("hifi_dsp: Error opening device %d", errno); |
| 866 | } else { |
| 867 | ALOGI("hifi_dsp: Open device"); |
| 868 | } |
| 869 | #endif |
Vishal Bhoj | 2cf4939 | 2016-01-13 14:01:08 +0000 | [diff] [blame] | 870 | return 0; |
| 871 | } |
| 872 | |
| 873 | static struct hw_module_methods_t hal_module_methods = { |
| 874 | .open = adev_open, |
| 875 | }; |
| 876 | |
| 877 | struct audio_module HAL_MODULE_INFO_SYM = { |
| 878 | .common = { |
| 879 | .tag = HARDWARE_MODULE_TAG, |
| 880 | .module_api_version = AUDIO_MODULE_API_VERSION_0_1, |
| 881 | .hal_api_version = HARDWARE_HAL_API_VERSION, |
| 882 | .id = AUDIO_HARDWARE_MODULE_ID, |
| 883 | .name = "Hikey audio HW HAL", |
| 884 | .author = "The Android Open Source Project", |
| 885 | .methods = &hal_module_methods, |
| 886 | }, |
| 887 | }; |