1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * 4 * Implementation of primary alsa driver code base for Intel HD Audio. 5 * 6 * Copyright(c) 2004 Intel Corporation. All rights reserved. 7 * 8 * Copyright (c) 2004 Takashi Iwai <tiwai@suse.de> 9 * PeiSen Hou <pshou@realtek.com.tw> 10 */ 11 12 #include <linux/clocksource.h> 13 #include <linux/delay.h> 14 #include <linux/interrupt.h> 15 #include <linux/kernel.h> 16 #include <linux/module.h> 17 #include <linux/pm_runtime.h> 18 #include <linux/slab.h> 19 20 #ifdef CONFIG_X86 21 /* for art-tsc conversion */ 22 #include <asm/tsc.h> 23 #endif 24 25 #include <sound/core.h> 26 #include <sound/initval.h> 27 #include "hda_controller.h" 28 29 #define CREATE_TRACE_POINTS 30 #include "hda_controller_trace.h" 31 32 /* DSP lock helpers */ 33 #define dsp_lock(dev) snd_hdac_dsp_lock(azx_stream(dev)) 34 #define dsp_unlock(dev) snd_hdac_dsp_unlock(azx_stream(dev)) 35 #define dsp_is_locked(dev) snd_hdac_stream_is_locked(azx_stream(dev)) 36 37 /* assign a stream for the PCM */ 38 static inline struct azx_dev * 39 azx_assign_device(struct azx *chip, struct snd_pcm_substream *substream) 40 { 41 struct hdac_stream *s; 42 43 s = snd_hdac_stream_assign(azx_bus(chip), substream); 44 if (!s) 45 return NULL; 46 return stream_to_azx_dev(s); 47 } 48 49 /* release the assigned stream */ 50 static inline void azx_release_device(struct azx_dev *azx_dev) 51 { 52 snd_hdac_stream_release(azx_stream(azx_dev)); 53 } 54 55 static inline struct hda_pcm_stream * 56 to_hda_pcm_stream(struct snd_pcm_substream *substream) 57 { 58 struct azx_pcm *apcm = snd_pcm_substream_chip(substream); 59 return &apcm->info->stream[substream->stream]; 60 } 61 62 static u64 azx_adjust_codec_delay(struct snd_pcm_substream *substream, 63 u64 nsec) 64 { 65 struct azx_pcm *apcm = snd_pcm_substream_chip(substream); 66 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream); 67 u64 codec_frames, codec_nsecs; 68 69 if (!hinfo->ops.get_delay) 70 return nsec; 71 72 codec_frames = hinfo->ops.get_delay(hinfo, apcm->codec, substream); 73 codec_nsecs = div_u64(codec_frames * 1000000000LL, 74 substream->runtime->rate); 75 76 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) 77 return nsec + codec_nsecs; 78 79 return (nsec > codec_nsecs) ? nsec - codec_nsecs : 0; 80 } 81 82 /* 83 * PCM ops 84 */ 85 86 static int azx_pcm_close(struct snd_pcm_substream *substream) 87 { 88 struct azx_pcm *apcm = snd_pcm_substream_chip(substream); 89 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream); 90 struct azx *chip = apcm->chip; 91 struct azx_dev *azx_dev = get_azx_dev(substream); 92 93 trace_azx_pcm_close(chip, azx_dev); 94 mutex_lock(&chip->open_mutex); 95 azx_release_device(azx_dev); 96 if (hinfo->ops.close) 97 hinfo->ops.close(hinfo, apcm->codec, substream); 98 snd_hda_power_down(apcm->codec); 99 mutex_unlock(&chip->open_mutex); 100 snd_hda_codec_pcm_put(apcm->info); 101 return 0; 102 } 103 104 static int azx_pcm_hw_params(struct snd_pcm_substream *substream, 105 struct snd_pcm_hw_params *hw_params) 106 { 107 struct azx_pcm *apcm = snd_pcm_substream_chip(substream); 108 struct azx *chip = apcm->chip; 109 struct azx_dev *azx_dev = get_azx_dev(substream); 110 int ret; 111 112 trace_azx_pcm_hw_params(chip, azx_dev); 113 dsp_lock(azx_dev); 114 if (dsp_is_locked(azx_dev)) { 115 ret = -EBUSY; 116 goto unlock; 117 } 118 119 azx_dev->core.bufsize = 0; 120 azx_dev->core.period_bytes = 0; 121 azx_dev->core.format_val = 0; 122 ret = snd_pcm_lib_malloc_pages(substream, 123 params_buffer_bytes(hw_params)); 124 125 unlock: 126 dsp_unlock(azx_dev); 127 return ret; 128 } 129 130 static int azx_pcm_hw_free(struct snd_pcm_substream *substream) 131 { 132 struct azx_pcm *apcm = snd_pcm_substream_chip(substream); 133 struct azx_dev *azx_dev = get_azx_dev(substream); 134 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream); 135 int err; 136 137 /* reset BDL address */ 138 dsp_lock(azx_dev); 139 if (!dsp_is_locked(azx_dev)) 140 snd_hdac_stream_cleanup(azx_stream(azx_dev)); 141 142 snd_hda_codec_cleanup(apcm->codec, hinfo, substream); 143 144 err = snd_pcm_lib_free_pages(substream); 145 azx_stream(azx_dev)->prepared = 0; 146 dsp_unlock(azx_dev); 147 return err; 148 } 149 150 static int azx_pcm_prepare(struct snd_pcm_substream *substream) 151 { 152 struct azx_pcm *apcm = snd_pcm_substream_chip(substream); 153 struct azx *chip = apcm->chip; 154 struct azx_dev *azx_dev = get_azx_dev(substream); 155 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream); 156 struct snd_pcm_runtime *runtime = substream->runtime; 157 unsigned int format_val, stream_tag; 158 int err; 159 struct hda_spdif_out *spdif = 160 snd_hda_spdif_out_of_nid(apcm->codec, hinfo->nid); 161 unsigned short ctls = spdif ? spdif->ctls : 0; 162 163 trace_azx_pcm_prepare(chip, azx_dev); 164 dsp_lock(azx_dev); 165 if (dsp_is_locked(azx_dev)) { 166 err = -EBUSY; 167 goto unlock; 168 } 169 170 snd_hdac_stream_reset(azx_stream(azx_dev)); 171 format_val = snd_hdac_calc_stream_format(runtime->rate, 172 runtime->channels, 173 runtime->format, 174 hinfo->maxbps, 175 ctls); 176 if (!format_val) { 177 dev_err(chip->card->dev, 178 "invalid format_val, rate=%d, ch=%d, format=%d\n", 179 runtime->rate, runtime->channels, runtime->format); 180 err = -EINVAL; 181 goto unlock; 182 } 183 184 err = snd_hdac_stream_set_params(azx_stream(azx_dev), format_val); 185 if (err < 0) 186 goto unlock; 187 188 snd_hdac_stream_setup(azx_stream(azx_dev)); 189 190 stream_tag = azx_dev->core.stream_tag; 191 /* CA-IBG chips need the playback stream starting from 1 */ 192 if ((chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) && 193 stream_tag > chip->capture_streams) 194 stream_tag -= chip->capture_streams; 195 err = snd_hda_codec_prepare(apcm->codec, hinfo, stream_tag, 196 azx_dev->core.format_val, substream); 197 198 unlock: 199 if (!err) 200 azx_stream(azx_dev)->prepared = 1; 201 dsp_unlock(azx_dev); 202 return err; 203 } 204 205 static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd) 206 { 207 struct azx_pcm *apcm = snd_pcm_substream_chip(substream); 208 struct azx *chip = apcm->chip; 209 struct hdac_bus *bus = azx_bus(chip); 210 struct azx_dev *azx_dev; 211 struct snd_pcm_substream *s; 212 struct hdac_stream *hstr; 213 bool start; 214 int sbits = 0; 215 int sync_reg; 216 217 azx_dev = get_azx_dev(substream); 218 trace_azx_pcm_trigger(chip, azx_dev, cmd); 219 220 hstr = azx_stream(azx_dev); 221 if (chip->driver_caps & AZX_DCAPS_OLD_SSYNC) 222 sync_reg = AZX_REG_OLD_SSYNC; 223 else 224 sync_reg = AZX_REG_SSYNC; 225 226 if (dsp_is_locked(azx_dev) || !hstr->prepared) 227 return -EPIPE; 228 229 switch (cmd) { 230 case SNDRV_PCM_TRIGGER_START: 231 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: 232 case SNDRV_PCM_TRIGGER_RESUME: 233 start = true; 234 break; 235 case SNDRV_PCM_TRIGGER_PAUSE_PUSH: 236 case SNDRV_PCM_TRIGGER_SUSPEND: 237 case SNDRV_PCM_TRIGGER_STOP: 238 start = false; 239 break; 240 default: 241 return -EINVAL; 242 } 243 244 snd_pcm_group_for_each_entry(s, substream) { 245 if (s->pcm->card != substream->pcm->card) 246 continue; 247 azx_dev = get_azx_dev(s); 248 sbits |= 1 << azx_dev->core.index; 249 snd_pcm_trigger_done(s, substream); 250 } 251 252 spin_lock(&bus->reg_lock); 253 254 /* first, set SYNC bits of corresponding streams */ 255 snd_hdac_stream_sync_trigger(hstr, true, sbits, sync_reg); 256 257 snd_pcm_group_for_each_entry(s, substream) { 258 if (s->pcm->card != substream->pcm->card) 259 continue; 260 azx_dev = get_azx_dev(s); 261 if (start) { 262 azx_dev->insufficient = 1; 263 snd_hdac_stream_start(azx_stream(azx_dev), true); 264 } else { 265 snd_hdac_stream_stop(azx_stream(azx_dev)); 266 } 267 } 268 spin_unlock(&bus->reg_lock); 269 270 snd_hdac_stream_sync(hstr, start, sbits); 271 272 spin_lock(&bus->reg_lock); 273 /* reset SYNC bits */ 274 snd_hdac_stream_sync_trigger(hstr, false, sbits, sync_reg); 275 if (start) 276 snd_hdac_stream_timecounter_init(hstr, sbits); 277 spin_unlock(&bus->reg_lock); 278 return 0; 279 } 280 281 unsigned int azx_get_pos_lpib(struct azx *chip, struct azx_dev *azx_dev) 282 { 283 return snd_hdac_stream_get_pos_lpib(azx_stream(azx_dev)); 284 } 285 EXPORT_SYMBOL_GPL(azx_get_pos_lpib); 286 287 unsigned int azx_get_pos_posbuf(struct azx *chip, struct azx_dev *azx_dev) 288 { 289 return snd_hdac_stream_get_pos_posbuf(azx_stream(azx_dev)); 290 } 291 EXPORT_SYMBOL_GPL(azx_get_pos_posbuf); 292 293 unsigned int azx_get_position(struct azx *chip, 294 struct azx_dev *azx_dev) 295 { 296 struct snd_pcm_substream *substream = azx_dev->core.substream; 297 unsigned int pos; 298 int stream = substream->stream; 299 int delay = 0; 300 301 if (chip->get_position[stream]) 302 pos = chip->get_position[stream](chip, azx_dev); 303 else /* use the position buffer as default */ 304 pos = azx_get_pos_posbuf(chip, azx_dev); 305 306 if (pos >= azx_dev->core.bufsize) 307 pos = 0; 308 309 if (substream->runtime) { 310 struct azx_pcm *apcm = snd_pcm_substream_chip(substream); 311 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream); 312 313 if (chip->get_delay[stream]) 314 delay += chip->get_delay[stream](chip, azx_dev, pos); 315 if (hinfo->ops.get_delay) 316 delay += hinfo->ops.get_delay(hinfo, apcm->codec, 317 substream); 318 substream->runtime->delay = delay; 319 } 320 321 trace_azx_get_position(chip, azx_dev, pos, delay); 322 return pos; 323 } 324 EXPORT_SYMBOL_GPL(azx_get_position); 325 326 static snd_pcm_uframes_t azx_pcm_pointer(struct snd_pcm_substream *substream) 327 { 328 struct azx_pcm *apcm = snd_pcm_substream_chip(substream); 329 struct azx *chip = apcm->chip; 330 struct azx_dev *azx_dev = get_azx_dev(substream); 331 return bytes_to_frames(substream->runtime, 332 azx_get_position(chip, azx_dev)); 333 } 334 335 /* 336 * azx_scale64: Scale base by mult/div while not overflowing sanely 337 * 338 * Derived from scale64_check_overflow in kernel/time/timekeeping.c 339 * 340 * The tmestamps for a 48Khz stream can overflow after (2^64/10^9)/48K which 341 * is about 384307 ie ~4.5 days. 342 * 343 * This scales the calculation so that overflow will happen but after 2^64 / 344 * 48000 secs, which is pretty large! 345 * 346 * In caln below: 347 * base may overflow, but since there isn’t any additional division 348 * performed on base it’s OK 349 * rem can’t overflow because both are 32-bit values 350 */ 351 352 #ifdef CONFIG_X86 353 static u64 azx_scale64(u64 base, u32 num, u32 den) 354 { 355 u64 rem; 356 357 rem = do_div(base, den); 358 359 base *= num; 360 rem *= num; 361 362 do_div(rem, den); 363 364 return base + rem; 365 } 366 367 static int azx_get_sync_time(ktime_t *device, 368 struct system_counterval_t *system, void *ctx) 369 { 370 struct snd_pcm_substream *substream = ctx; 371 struct azx_dev *azx_dev = get_azx_dev(substream); 372 struct azx_pcm *apcm = snd_pcm_substream_chip(substream); 373 struct azx *chip = apcm->chip; 374 struct snd_pcm_runtime *runtime; 375 u64 ll_counter, ll_counter_l, ll_counter_h; 376 u64 tsc_counter, tsc_counter_l, tsc_counter_h; 377 u32 wallclk_ctr, wallclk_cycles; 378 bool direction; 379 u32 dma_select; 380 u32 timeout = 200; 381 u32 retry_count = 0; 382 383 runtime = substream->runtime; 384 385 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) 386 direction = 1; 387 else 388 direction = 0; 389 390 /* 0th stream tag is not used, so DMA ch 0 is for 1st stream tag */ 391 do { 392 timeout = 100; 393 dma_select = (direction << GTSCC_CDMAS_DMA_DIR_SHIFT) | 394 (azx_dev->core.stream_tag - 1); 395 snd_hdac_chip_writel(azx_bus(chip), GTSCC, dma_select); 396 397 /* Enable the capture */ 398 snd_hdac_chip_updatel(azx_bus(chip), GTSCC, 0, GTSCC_TSCCI_MASK); 399 400 while (timeout) { 401 if (snd_hdac_chip_readl(azx_bus(chip), GTSCC) & 402 GTSCC_TSCCD_MASK) 403 break; 404 405 timeout--; 406 } 407 408 if (!timeout) { 409 dev_err(chip->card->dev, "GTSCC capture Timedout!\n"); 410 return -EIO; 411 } 412 413 /* Read wall clock counter */ 414 wallclk_ctr = snd_hdac_chip_readl(azx_bus(chip), WALFCC); 415 416 /* Read TSC counter */ 417 tsc_counter_l = snd_hdac_chip_readl(azx_bus(chip), TSCCL); 418 tsc_counter_h = snd_hdac_chip_readl(azx_bus(chip), TSCCU); 419 420 /* Read Link counter */ 421 ll_counter_l = snd_hdac_chip_readl(azx_bus(chip), LLPCL); 422 ll_counter_h = snd_hdac_chip_readl(azx_bus(chip), LLPCU); 423 424 /* Ack: registers read done */ 425 snd_hdac_chip_writel(azx_bus(chip), GTSCC, GTSCC_TSCCD_SHIFT); 426 427 tsc_counter = (tsc_counter_h << TSCCU_CCU_SHIFT) | 428 tsc_counter_l; 429 430 ll_counter = (ll_counter_h << LLPC_CCU_SHIFT) | ll_counter_l; 431 wallclk_cycles = wallclk_ctr & WALFCC_CIF_MASK; 432 433 /* 434 * An error occurs near frame "rollover". The clocks in 435 * frame value indicates whether this error may have 436 * occurred. Here we use the value of 10 i.e., 437 * HDA_MAX_CYCLE_OFFSET 438 */ 439 if (wallclk_cycles < HDA_MAX_CYCLE_VALUE - HDA_MAX_CYCLE_OFFSET 440 && wallclk_cycles > HDA_MAX_CYCLE_OFFSET) 441 break; 442 443 /* 444 * Sleep before we read again, else we may again get 445 * value near to MAX_CYCLE. Try to sleep for different 446 * amount of time so we dont hit the same number again 447 */ 448 udelay(retry_count++); 449 450 } while (retry_count != HDA_MAX_CYCLE_READ_RETRY); 451 452 if (retry_count == HDA_MAX_CYCLE_READ_RETRY) { 453 dev_err_ratelimited(chip->card->dev, 454 "Error in WALFCC cycle count\n"); 455 return -EIO; 456 } 457 458 *device = ns_to_ktime(azx_scale64(ll_counter, 459 NSEC_PER_SEC, runtime->rate)); 460 *device = ktime_add_ns(*device, (wallclk_cycles * NSEC_PER_SEC) / 461 ((HDA_MAX_CYCLE_VALUE + 1) * runtime->rate)); 462 463 *system = convert_art_to_tsc(tsc_counter); 464 465 return 0; 466 } 467 468 #else 469 static int azx_get_sync_time(ktime_t *device, 470 struct system_counterval_t *system, void *ctx) 471 { 472 return -ENXIO; 473 } 474 #endif 475 476 static int azx_get_crosststamp(struct snd_pcm_substream *substream, 477 struct system_device_crosststamp *xtstamp) 478 { 479 return get_device_system_crosststamp(azx_get_sync_time, 480 substream, NULL, xtstamp); 481 } 482 483 static inline bool is_link_time_supported(struct snd_pcm_runtime *runtime, 484 struct snd_pcm_audio_tstamp_config *ts) 485 { 486 if (runtime->hw.info & SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME) 487 if (ts->type_requested == SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK_SYNCHRONIZED) 488 return true; 489 490 return false; 491 } 492 493 static int azx_get_time_info(struct snd_pcm_substream *substream, 494 struct timespec *system_ts, struct timespec *audio_ts, 495 struct snd_pcm_audio_tstamp_config *audio_tstamp_config, 496 struct snd_pcm_audio_tstamp_report *audio_tstamp_report) 497 { 498 struct azx_dev *azx_dev = get_azx_dev(substream); 499 struct snd_pcm_runtime *runtime = substream->runtime; 500 struct system_device_crosststamp xtstamp; 501 int ret; 502 u64 nsec; 503 504 if ((substream->runtime->hw.info & SNDRV_PCM_INFO_HAS_LINK_ATIME) && 505 (audio_tstamp_config->type_requested == SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK)) { 506 507 snd_pcm_gettime(substream->runtime, system_ts); 508 509 nsec = timecounter_read(&azx_dev->core.tc); 510 nsec = div_u64(nsec, 3); /* can be optimized */ 511 if (audio_tstamp_config->report_delay) 512 nsec = azx_adjust_codec_delay(substream, nsec); 513 514 *audio_ts = ns_to_timespec(nsec); 515 516 audio_tstamp_report->actual_type = SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK; 517 audio_tstamp_report->accuracy_report = 1; /* rest of structure is valid */ 518 audio_tstamp_report->accuracy = 42; /* 24 MHz WallClock == 42ns resolution */ 519 520 } else if (is_link_time_supported(runtime, audio_tstamp_config)) { 521 522 ret = azx_get_crosststamp(substream, &xtstamp); 523 if (ret) 524 return ret; 525 526 switch (runtime->tstamp_type) { 527 case SNDRV_PCM_TSTAMP_TYPE_MONOTONIC: 528 return -EINVAL; 529 530 case SNDRV_PCM_TSTAMP_TYPE_MONOTONIC_RAW: 531 *system_ts = ktime_to_timespec(xtstamp.sys_monoraw); 532 break; 533 534 default: 535 *system_ts = ktime_to_timespec(xtstamp.sys_realtime); 536 break; 537 538 } 539 540 *audio_ts = ktime_to_timespec(xtstamp.device); 541 542 audio_tstamp_report->actual_type = 543 SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK_SYNCHRONIZED; 544 audio_tstamp_report->accuracy_report = 1; 545 /* 24 MHz WallClock == 42ns resolution */ 546 audio_tstamp_report->accuracy = 42; 547 548 } else { 549 audio_tstamp_report->actual_type = SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT; 550 } 551 552 return 0; 553 } 554 555 static struct snd_pcm_hardware azx_pcm_hw = { 556 .info = (SNDRV_PCM_INFO_MMAP | 557 SNDRV_PCM_INFO_INTERLEAVED | 558 SNDRV_PCM_INFO_BLOCK_TRANSFER | 559 SNDRV_PCM_INFO_MMAP_VALID | 560 /* No full-resume yet implemented */ 561 /* SNDRV_PCM_INFO_RESUME |*/ 562 SNDRV_PCM_INFO_PAUSE | 563 SNDRV_PCM_INFO_SYNC_START | 564 SNDRV_PCM_INFO_HAS_WALL_CLOCK | /* legacy */ 565 SNDRV_PCM_INFO_HAS_LINK_ATIME | 566 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP), 567 .formats = SNDRV_PCM_FMTBIT_S16_LE, 568 .rates = SNDRV_PCM_RATE_48000, 569 .rate_min = 48000, 570 .rate_max = 48000, 571 .channels_min = 2, 572 .channels_max = 2, 573 .buffer_bytes_max = AZX_MAX_BUF_SIZE, 574 .period_bytes_min = 128, 575 .period_bytes_max = AZX_MAX_BUF_SIZE / 2, 576 .periods_min = 2, 577 .periods_max = AZX_MAX_FRAG, 578 .fifo_size = 0, 579 }; 580 581 static int azx_pcm_open(struct snd_pcm_substream *substream) 582 { 583 struct azx_pcm *apcm = snd_pcm_substream_chip(substream); 584 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream); 585 struct azx *chip = apcm->chip; 586 struct azx_dev *azx_dev; 587 struct snd_pcm_runtime *runtime = substream->runtime; 588 int err; 589 int buff_step; 590 591 snd_hda_codec_pcm_get(apcm->info); 592 mutex_lock(&chip->open_mutex); 593 azx_dev = azx_assign_device(chip, substream); 594 trace_azx_pcm_open(chip, azx_dev); 595 if (azx_dev == NULL) { 596 err = -EBUSY; 597 goto unlock; 598 } 599 runtime->private_data = azx_dev; 600 601 runtime->hw = azx_pcm_hw; 602 if (chip->gts_present) 603 runtime->hw.info |= SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME; 604 runtime->hw.channels_min = hinfo->channels_min; 605 runtime->hw.channels_max = hinfo->channels_max; 606 runtime->hw.formats = hinfo->formats; 607 runtime->hw.rates = hinfo->rates; 608 snd_pcm_limit_hw_rates(runtime); 609 snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); 610 611 /* avoid wrap-around with wall-clock */ 612 snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_TIME, 613 20, 614 178000000); 615 616 /* by some reason, the playback stream stalls on PulseAudio with 617 * tsched=1 when a capture stream triggers. Until we figure out the 618 * real cause, disable tsched mode by telling the PCM info flag. 619 */ 620 if (chip->driver_caps & AZX_DCAPS_AMD_WORKAROUND) 621 runtime->hw.info |= SNDRV_PCM_INFO_BATCH; 622 623 if (chip->align_buffer_size) 624 /* constrain buffer sizes to be multiple of 128 625 bytes. This is more efficient in terms of memory 626 access but isn't required by the HDA spec and 627 prevents users from specifying exact period/buffer 628 sizes. For example for 44.1kHz, a period size set 629 to 20ms will be rounded to 19.59ms. */ 630 buff_step = 128; 631 else 632 /* Don't enforce steps on buffer sizes, still need to 633 be multiple of 4 bytes (HDA spec). Tested on Intel 634 HDA controllers, may not work on all devices where 635 option needs to be disabled */ 636 buff_step = 4; 637 638 snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 639 buff_step); 640 snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 641 buff_step); 642 snd_hda_power_up(apcm->codec); 643 if (hinfo->ops.open) 644 err = hinfo->ops.open(hinfo, apcm->codec, substream); 645 else 646 err = -ENODEV; 647 if (err < 0) { 648 azx_release_device(azx_dev); 649 goto powerdown; 650 } 651 snd_pcm_limit_hw_rates(runtime); 652 /* sanity check */ 653 if (snd_BUG_ON(!runtime->hw.channels_min) || 654 snd_BUG_ON(!runtime->hw.channels_max) || 655 snd_BUG_ON(!runtime->hw.formats) || 656 snd_BUG_ON(!runtime->hw.rates)) { 657 azx_release_device(azx_dev); 658 if (hinfo->ops.close) 659 hinfo->ops.close(hinfo, apcm->codec, substream); 660 err = -EINVAL; 661 goto powerdown; 662 } 663 664 /* disable LINK_ATIME timestamps for capture streams 665 until we figure out how to handle digital inputs */ 666 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) { 667 runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_WALL_CLOCK; /* legacy */ 668 runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_LINK_ATIME; 669 } 670 671 snd_pcm_set_sync(substream); 672 mutex_unlock(&chip->open_mutex); 673 return 0; 674 675 powerdown: 676 snd_hda_power_down(apcm->codec); 677 unlock: 678 mutex_unlock(&chip->open_mutex); 679 snd_hda_codec_pcm_put(apcm->info); 680 return err; 681 } 682 683 static int azx_pcm_mmap(struct snd_pcm_substream *substream, 684 struct vm_area_struct *area) 685 { 686 struct azx_pcm *apcm = snd_pcm_substream_chip(substream); 687 struct azx *chip = apcm->chip; 688 if (chip->ops->pcm_mmap_prepare) 689 chip->ops->pcm_mmap_prepare(substream, area); 690 return snd_pcm_lib_default_mmap(substream, area); 691 } 692 693 static const struct snd_pcm_ops azx_pcm_ops = { 694 .open = azx_pcm_open, 695 .close = azx_pcm_close, 696 .ioctl = snd_pcm_lib_ioctl, 697 .hw_params = azx_pcm_hw_params, 698 .hw_free = azx_pcm_hw_free, 699 .prepare = azx_pcm_prepare, 700 .trigger = azx_pcm_trigger, 701 .pointer = azx_pcm_pointer, 702 .get_time_info = azx_get_time_info, 703 .mmap = azx_pcm_mmap, 704 }; 705 706 static void azx_pcm_free(struct snd_pcm *pcm) 707 { 708 struct azx_pcm *apcm = pcm->private_data; 709 if (apcm) { 710 list_del(&apcm->list); 711 apcm->info->pcm = NULL; 712 kfree(apcm); 713 } 714 } 715 716 #define MAX_PREALLOC_SIZE (32 * 1024 * 1024) 717 718 int snd_hda_attach_pcm_stream(struct hda_bus *_bus, struct hda_codec *codec, 719 struct hda_pcm *cpcm) 720 { 721 struct hdac_bus *bus = &_bus->core; 722 struct azx *chip = bus_to_azx(bus); 723 struct snd_pcm *pcm; 724 struct azx_pcm *apcm; 725 int pcm_dev = cpcm->device; 726 unsigned int size; 727 int s, err; 728 int type = SNDRV_DMA_TYPE_DEV_SG; 729 730 list_for_each_entry(apcm, &chip->pcm_list, list) { 731 if (apcm->pcm->device == pcm_dev) { 732 dev_err(chip->card->dev, "PCM %d already exists\n", 733 pcm_dev); 734 return -EBUSY; 735 } 736 } 737 err = snd_pcm_new(chip->card, cpcm->name, pcm_dev, 738 cpcm->stream[SNDRV_PCM_STREAM_PLAYBACK].substreams, 739 cpcm->stream[SNDRV_PCM_STREAM_CAPTURE].substreams, 740 &pcm); 741 if (err < 0) 742 return err; 743 strlcpy(pcm->name, cpcm->name, sizeof(pcm->name)); 744 apcm = kzalloc(sizeof(*apcm), GFP_KERNEL); 745 if (apcm == NULL) { 746 snd_device_free(chip->card, pcm); 747 return -ENOMEM; 748 } 749 apcm->chip = chip; 750 apcm->pcm = pcm; 751 apcm->codec = codec; 752 apcm->info = cpcm; 753 pcm->private_data = apcm; 754 pcm->private_free = azx_pcm_free; 755 if (cpcm->pcm_type == HDA_PCM_TYPE_MODEM) 756 pcm->dev_class = SNDRV_PCM_CLASS_MODEM; 757 list_add_tail(&apcm->list, &chip->pcm_list); 758 cpcm->pcm = pcm; 759 for (s = 0; s < 2; s++) { 760 if (cpcm->stream[s].substreams) 761 snd_pcm_set_ops(pcm, s, &azx_pcm_ops); 762 } 763 /* buffer pre-allocation */ 764 size = CONFIG_SND_HDA_PREALLOC_SIZE * 1024; 765 if (size > MAX_PREALLOC_SIZE) 766 size = MAX_PREALLOC_SIZE; 767 if (chip->uc_buffer) 768 type = SNDRV_DMA_TYPE_DEV_UC_SG; 769 snd_pcm_lib_preallocate_pages_for_all(pcm, type, 770 chip->card->dev, 771 size, MAX_PREALLOC_SIZE); 772 return 0; 773 } 774 775 static unsigned int azx_command_addr(u32 cmd) 776 { 777 unsigned int addr = cmd >> 28; 778 779 if (addr >= AZX_MAX_CODECS) { 780 snd_BUG(); 781 addr = 0; 782 } 783 784 return addr; 785 } 786 787 /* receive a response */ 788 static int azx_rirb_get_response(struct hdac_bus *bus, unsigned int addr, 789 unsigned int *res) 790 { 791 struct azx *chip = bus_to_azx(bus); 792 struct hda_bus *hbus = &chip->bus; 793 unsigned long timeout; 794 unsigned long loopcounter; 795 wait_queue_entry_t wait; 796 bool warned = false; 797 798 init_wait_entry(&wait, 0); 799 again: 800 timeout = jiffies + msecs_to_jiffies(1000); 801 802 for (loopcounter = 0;; loopcounter++) { 803 spin_lock_irq(&bus->reg_lock); 804 if (!bus->polling_mode) 805 prepare_to_wait(&bus->rirb_wq, &wait, 806 TASK_UNINTERRUPTIBLE); 807 if (bus->polling_mode) 808 snd_hdac_bus_update_rirb(bus); 809 if (!bus->rirb.cmds[addr]) { 810 if (res) 811 *res = bus->rirb.res[addr]; /* the last value */ 812 if (!bus->polling_mode) 813 finish_wait(&bus->rirb_wq, &wait); 814 spin_unlock_irq(&bus->reg_lock); 815 return 0; 816 } 817 spin_unlock_irq(&bus->reg_lock); 818 if (time_after(jiffies, timeout)) 819 break; 820 #define LOOP_COUNT_MAX 3000 821 if (!bus->polling_mode) { 822 schedule_timeout(msecs_to_jiffies(2)); 823 } else if (hbus->needs_damn_long_delay || 824 loopcounter > LOOP_COUNT_MAX) { 825 if (loopcounter > LOOP_COUNT_MAX && !warned) { 826 dev_dbg_ratelimited(chip->card->dev, 827 "too slow response, last cmd=%#08x\n", 828 bus->last_cmd[addr]); 829 warned = true; 830 } 831 msleep(2); /* temporary workaround */ 832 } else { 833 udelay(10); 834 cond_resched(); 835 } 836 } 837 838 if (!bus->polling_mode) 839 finish_wait(&bus->rirb_wq, &wait); 840 841 if (hbus->no_response_fallback) 842 return -EIO; 843 844 if (!bus->polling_mode) { 845 dev_warn(chip->card->dev, 846 "azx_get_response timeout, switching to polling mode: last cmd=0x%08x\n", 847 bus->last_cmd[addr]); 848 bus->polling_mode = 1; 849 goto again; 850 } 851 852 if (chip->msi) { 853 dev_warn(chip->card->dev, 854 "No response from codec, disabling MSI: last cmd=0x%08x\n", 855 bus->last_cmd[addr]); 856 if (chip->ops->disable_msi_reset_irq && 857 chip->ops->disable_msi_reset_irq(chip) < 0) 858 return -EIO; 859 goto again; 860 } 861 862 if (chip->probing) { 863 /* If this critical timeout happens during the codec probing 864 * phase, this is likely an access to a non-existing codec 865 * slot. Better to return an error and reset the system. 866 */ 867 return -EIO; 868 } 869 870 /* no fallback mechanism? */ 871 if (!chip->fallback_to_single_cmd) 872 return -EIO; 873 874 /* a fatal communication error; need either to reset or to fallback 875 * to the single_cmd mode 876 */ 877 if (hbus->allow_bus_reset && !hbus->response_reset && !hbus->in_reset) { 878 hbus->response_reset = 1; 879 dev_err(chip->card->dev, 880 "No response from codec, resetting bus: last cmd=0x%08x\n", 881 bus->last_cmd[addr]); 882 return -EAGAIN; /* give a chance to retry */ 883 } 884 885 dev_WARN(chip->card->dev, 886 "azx_get_response timeout, switching to single_cmd mode: last cmd=0x%08x\n", 887 bus->last_cmd[addr]); 888 chip->single_cmd = 1; 889 hbus->response_reset = 0; 890 snd_hdac_bus_stop_cmd_io(bus); 891 return -EIO; 892 } 893 894 /* 895 * Use the single immediate command instead of CORB/RIRB for simplicity 896 * 897 * Note: according to Intel, this is not preferred use. The command was 898 * intended for the BIOS only, and may get confused with unsolicited 899 * responses. So, we shouldn't use it for normal operation from the 900 * driver. 901 * I left the codes, however, for debugging/testing purposes. 902 */ 903 904 /* receive a response */ 905 static int azx_single_wait_for_response(struct azx *chip, unsigned int addr) 906 { 907 int timeout = 50; 908 909 while (timeout--) { 910 /* check IRV busy bit */ 911 if (azx_readw(chip, IRS) & AZX_IRS_VALID) { 912 /* reuse rirb.res as the response return value */ 913 azx_bus(chip)->rirb.res[addr] = azx_readl(chip, IR); 914 return 0; 915 } 916 udelay(1); 917 } 918 if (printk_ratelimit()) 919 dev_dbg(chip->card->dev, "get_response timeout: IRS=0x%x\n", 920 azx_readw(chip, IRS)); 921 azx_bus(chip)->rirb.res[addr] = -1; 922 return -EIO; 923 } 924 925 /* send a command */ 926 static int azx_single_send_cmd(struct hdac_bus *bus, u32 val) 927 { 928 struct azx *chip = bus_to_azx(bus); 929 unsigned int addr = azx_command_addr(val); 930 int timeout = 50; 931 932 bus->last_cmd[azx_command_addr(val)] = val; 933 while (timeout--) { 934 /* check ICB busy bit */ 935 if (!((azx_readw(chip, IRS) & AZX_IRS_BUSY))) { 936 /* Clear IRV valid bit */ 937 azx_writew(chip, IRS, azx_readw(chip, IRS) | 938 AZX_IRS_VALID); 939 azx_writel(chip, IC, val); 940 azx_writew(chip, IRS, azx_readw(chip, IRS) | 941 AZX_IRS_BUSY); 942 return azx_single_wait_for_response(chip, addr); 943 } 944 udelay(1); 945 } 946 if (printk_ratelimit()) 947 dev_dbg(chip->card->dev, 948 "send_cmd timeout: IRS=0x%x, val=0x%x\n", 949 azx_readw(chip, IRS), val); 950 return -EIO; 951 } 952 953 /* receive a response */ 954 static int azx_single_get_response(struct hdac_bus *bus, unsigned int addr, 955 unsigned int *res) 956 { 957 if (res) 958 *res = bus->rirb.res[addr]; 959 return 0; 960 } 961 962 /* 963 * The below are the main callbacks from hda_codec. 964 * 965 * They are just the skeleton to call sub-callbacks according to the 966 * current setting of chip->single_cmd. 967 */ 968 969 /* send a command */ 970 static int azx_send_cmd(struct hdac_bus *bus, unsigned int val) 971 { 972 struct azx *chip = bus_to_azx(bus); 973 974 if (chip->disabled) 975 return 0; 976 if (chip->single_cmd) 977 return azx_single_send_cmd(bus, val); 978 else 979 return snd_hdac_bus_send_cmd(bus, val); 980 } 981 982 /* get a response */ 983 static int azx_get_response(struct hdac_bus *bus, unsigned int addr, 984 unsigned int *res) 985 { 986 struct azx *chip = bus_to_azx(bus); 987 988 if (chip->disabled) 989 return 0; 990 if (chip->single_cmd) 991 return azx_single_get_response(bus, addr, res); 992 else 993 return azx_rirb_get_response(bus, addr, res); 994 } 995 996 static const struct hdac_bus_ops bus_core_ops = { 997 .command = azx_send_cmd, 998 .get_response = azx_get_response, 999 }; 1000 1001 #ifdef CONFIG_SND_HDA_DSP_LOADER 1002 /* 1003 * DSP loading code (e.g. for CA0132) 1004 */ 1005 1006 /* use the first stream for loading DSP */ 1007 static struct azx_dev * 1008 azx_get_dsp_loader_dev(struct azx *chip) 1009 { 1010 struct hdac_bus *bus = azx_bus(chip); 1011 struct hdac_stream *s; 1012 1013 list_for_each_entry(s, &bus->stream_list, list) 1014 if (s->index == chip->playback_index_offset) 1015 return stream_to_azx_dev(s); 1016 1017 return NULL; 1018 } 1019 1020 int snd_hda_codec_load_dsp_prepare(struct hda_codec *codec, unsigned int format, 1021 unsigned int byte_size, 1022 struct snd_dma_buffer *bufp) 1023 { 1024 struct hdac_bus *bus = &codec->bus->core; 1025 struct azx *chip = bus_to_azx(bus); 1026 struct azx_dev *azx_dev; 1027 struct hdac_stream *hstr; 1028 bool saved = false; 1029 int err; 1030 1031 azx_dev = azx_get_dsp_loader_dev(chip); 1032 hstr = azx_stream(azx_dev); 1033 spin_lock_irq(&bus->reg_lock); 1034 if (hstr->opened) { 1035 chip->saved_azx_dev = *azx_dev; 1036 saved = true; 1037 } 1038 spin_unlock_irq(&bus->reg_lock); 1039 1040 err = snd_hdac_dsp_prepare(hstr, format, byte_size, bufp); 1041 if (err < 0) { 1042 spin_lock_irq(&bus->reg_lock); 1043 if (saved) 1044 *azx_dev = chip->saved_azx_dev; 1045 spin_unlock_irq(&bus->reg_lock); 1046 return err; 1047 } 1048 1049 hstr->prepared = 0; 1050 return err; 1051 } 1052 EXPORT_SYMBOL_GPL(snd_hda_codec_load_dsp_prepare); 1053 1054 void snd_hda_codec_load_dsp_trigger(struct hda_codec *codec, bool start) 1055 { 1056 struct hdac_bus *bus = &codec->bus->core; 1057 struct azx *chip = bus_to_azx(bus); 1058 struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip); 1059 1060 snd_hdac_dsp_trigger(azx_stream(azx_dev), start); 1061 } 1062 EXPORT_SYMBOL_GPL(snd_hda_codec_load_dsp_trigger); 1063 1064 void snd_hda_codec_load_dsp_cleanup(struct hda_codec *codec, 1065 struct snd_dma_buffer *dmab) 1066 { 1067 struct hdac_bus *bus = &codec->bus->core; 1068 struct azx *chip = bus_to_azx(bus); 1069 struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip); 1070 struct hdac_stream *hstr = azx_stream(azx_dev); 1071 1072 if (!dmab->area || !hstr->locked) 1073 return; 1074 1075 snd_hdac_dsp_cleanup(hstr, dmab); 1076 spin_lock_irq(&bus->reg_lock); 1077 if (hstr->opened) 1078 *azx_dev = chip->saved_azx_dev; 1079 hstr->locked = false; 1080 spin_unlock_irq(&bus->reg_lock); 1081 } 1082 EXPORT_SYMBOL_GPL(snd_hda_codec_load_dsp_cleanup); 1083 #endif /* CONFIG_SND_HDA_DSP_LOADER */ 1084 1085 /* 1086 * reset and start the controller registers 1087 */ 1088 void azx_init_chip(struct azx *chip, bool full_reset) 1089 { 1090 if (snd_hdac_bus_init_chip(azx_bus(chip), full_reset)) { 1091 /* correct RINTCNT for CXT */ 1092 if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) 1093 azx_writew(chip, RINTCNT, 0xc0); 1094 } 1095 } 1096 EXPORT_SYMBOL_GPL(azx_init_chip); 1097 1098 void azx_stop_all_streams(struct azx *chip) 1099 { 1100 struct hdac_bus *bus = azx_bus(chip); 1101 struct hdac_stream *s; 1102 1103 list_for_each_entry(s, &bus->stream_list, list) 1104 snd_hdac_stream_stop(s); 1105 } 1106 EXPORT_SYMBOL_GPL(azx_stop_all_streams); 1107 1108 void azx_stop_chip(struct azx *chip) 1109 { 1110 snd_hdac_bus_stop_chip(azx_bus(chip)); 1111 } 1112 EXPORT_SYMBOL_GPL(azx_stop_chip); 1113 1114 /* 1115 * interrupt handler 1116 */ 1117 static void stream_update(struct hdac_bus *bus, struct hdac_stream *s) 1118 { 1119 struct azx *chip = bus_to_azx(bus); 1120 struct azx_dev *azx_dev = stream_to_azx_dev(s); 1121 1122 /* check whether this IRQ is really acceptable */ 1123 if (!chip->ops->position_check || 1124 chip->ops->position_check(chip, azx_dev)) { 1125 spin_unlock(&bus->reg_lock); 1126 snd_pcm_period_elapsed(azx_stream(azx_dev)->substream); 1127 spin_lock(&bus->reg_lock); 1128 } 1129 } 1130 1131 irqreturn_t azx_interrupt(int irq, void *dev_id) 1132 { 1133 struct azx *chip = dev_id; 1134 struct hdac_bus *bus = azx_bus(chip); 1135 u32 status; 1136 bool active, handled = false; 1137 int repeat = 0; /* count for avoiding endless loop */ 1138 1139 #ifdef CONFIG_PM 1140 if (azx_has_pm_runtime(chip)) 1141 if (!pm_runtime_active(chip->card->dev)) 1142 return IRQ_NONE; 1143 #endif 1144 1145 spin_lock(&bus->reg_lock); 1146 1147 if (chip->disabled) 1148 goto unlock; 1149 1150 do { 1151 status = azx_readl(chip, INTSTS); 1152 if (status == 0 || status == 0xffffffff) 1153 break; 1154 1155 handled = true; 1156 active = false; 1157 if (snd_hdac_bus_handle_stream_irq(bus, status, stream_update)) 1158 active = true; 1159 1160 /* clear rirb int */ 1161 status = azx_readb(chip, RIRBSTS); 1162 if (status & RIRB_INT_MASK) { 1163 active = true; 1164 if (status & RIRB_INT_RESPONSE) { 1165 if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) 1166 udelay(80); 1167 snd_hdac_bus_update_rirb(bus); 1168 } 1169 azx_writeb(chip, RIRBSTS, RIRB_INT_MASK); 1170 } 1171 } while (active && ++repeat < 10); 1172 1173 unlock: 1174 spin_unlock(&bus->reg_lock); 1175 1176 return IRQ_RETVAL(handled); 1177 } 1178 EXPORT_SYMBOL_GPL(azx_interrupt); 1179 1180 /* 1181 * Codec initerface 1182 */ 1183 1184 /* 1185 * Probe the given codec address 1186 */ 1187 static int probe_codec(struct azx *chip, int addr) 1188 { 1189 unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) | 1190 (AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID; 1191 struct hdac_bus *bus = azx_bus(chip); 1192 int err; 1193 unsigned int res = -1; 1194 1195 mutex_lock(&bus->cmd_mutex); 1196 chip->probing = 1; 1197 azx_send_cmd(bus, cmd); 1198 err = azx_get_response(bus, addr, &res); 1199 chip->probing = 0; 1200 mutex_unlock(&bus->cmd_mutex); 1201 if (err < 0 || res == -1) 1202 return -EIO; 1203 dev_dbg(chip->card->dev, "codec #%d probed OK\n", addr); 1204 return 0; 1205 } 1206 1207 void snd_hda_bus_reset(struct hda_bus *bus) 1208 { 1209 struct azx *chip = bus_to_azx(&bus->core); 1210 1211 bus->in_reset = 1; 1212 azx_stop_chip(chip); 1213 azx_init_chip(chip, true); 1214 if (bus->core.chip_init) 1215 snd_hda_bus_reset_codecs(bus); 1216 bus->in_reset = 0; 1217 } 1218 1219 /* HD-audio bus initialization */ 1220 int azx_bus_init(struct azx *chip, const char *model) 1221 { 1222 struct hda_bus *bus = &chip->bus; 1223 int err; 1224 1225 err = snd_hdac_bus_init(&bus->core, chip->card->dev, &bus_core_ops); 1226 if (err < 0) 1227 return err; 1228 1229 bus->card = chip->card; 1230 mutex_init(&bus->prepare_mutex); 1231 bus->pci = chip->pci; 1232 bus->modelname = model; 1233 bus->mixer_assigned = -1; 1234 bus->core.snoop = azx_snoop(chip); 1235 if (chip->get_position[0] != azx_get_pos_lpib || 1236 chip->get_position[1] != azx_get_pos_lpib) 1237 bus->core.use_posbuf = true; 1238 bus->core.bdl_pos_adj = chip->bdl_pos_adj; 1239 if (chip->driver_caps & AZX_DCAPS_CORBRP_SELF_CLEAR) 1240 bus->core.corbrp_self_clear = true; 1241 1242 if (chip->driver_caps & AZX_DCAPS_4K_BDLE_BOUNDARY) 1243 bus->core.align_bdle_4k = true; 1244 1245 /* AMD chipsets often cause the communication stalls upon certain 1246 * sequence like the pin-detection. It seems that forcing the synced 1247 * access works around the stall. Grrr... 1248 */ 1249 if (chip->driver_caps & AZX_DCAPS_SYNC_WRITE) { 1250 dev_dbg(chip->card->dev, "Enable sync_write for stable communication\n"); 1251 bus->core.sync_write = 1; 1252 bus->allow_bus_reset = 1; 1253 } 1254 1255 return 0; 1256 } 1257 EXPORT_SYMBOL_GPL(azx_bus_init); 1258 1259 /* Probe codecs */ 1260 int azx_probe_codecs(struct azx *chip, unsigned int max_slots) 1261 { 1262 struct hdac_bus *bus = azx_bus(chip); 1263 int c, codecs, err; 1264 1265 codecs = 0; 1266 if (!max_slots) 1267 max_slots = AZX_DEFAULT_CODECS; 1268 1269 /* First try to probe all given codec slots */ 1270 for (c = 0; c < max_slots; c++) { 1271 if ((bus->codec_mask & (1 << c)) & chip->codec_probe_mask) { 1272 if (probe_codec(chip, c) < 0) { 1273 /* Some BIOSen give you wrong codec addresses 1274 * that don't exist 1275 */ 1276 dev_warn(chip->card->dev, 1277 "Codec #%d probe error; disabling it...\n", c); 1278 bus->codec_mask &= ~(1 << c); 1279 /* More badly, accessing to a non-existing 1280 * codec often screws up the controller chip, 1281 * and disturbs the further communications. 1282 * Thus if an error occurs during probing, 1283 * better to reset the controller chip to 1284 * get back to the sanity state. 1285 */ 1286 azx_stop_chip(chip); 1287 azx_init_chip(chip, true); 1288 } 1289 } 1290 } 1291 1292 /* Then create codec instances */ 1293 for (c = 0; c < max_slots; c++) { 1294 if ((bus->codec_mask & (1 << c)) & chip->codec_probe_mask) { 1295 struct hda_codec *codec; 1296 err = snd_hda_codec_new(&chip->bus, chip->card, c, &codec); 1297 if (err < 0) 1298 continue; 1299 codec->jackpoll_interval = chip->jackpoll_interval; 1300 codec->beep_mode = chip->beep_mode; 1301 codecs++; 1302 } 1303 } 1304 if (!codecs) { 1305 dev_err(chip->card->dev, "no codecs initialized\n"); 1306 return -ENXIO; 1307 } 1308 return 0; 1309 } 1310 EXPORT_SYMBOL_GPL(azx_probe_codecs); 1311 1312 /* configure each codec instance */ 1313 int azx_codec_configure(struct azx *chip) 1314 { 1315 struct hda_codec *codec, *next; 1316 1317 /* use _safe version here since snd_hda_codec_configure() deregisters 1318 * the device upon error and deletes itself from the bus list. 1319 */ 1320 list_for_each_codec_safe(codec, next, &chip->bus) { 1321 snd_hda_codec_configure(codec); 1322 } 1323 1324 if (!azx_bus(chip)->num_codecs) 1325 return -ENODEV; 1326 return 0; 1327 } 1328 EXPORT_SYMBOL_GPL(azx_codec_configure); 1329 1330 static int stream_direction(struct azx *chip, unsigned char index) 1331 { 1332 if (index >= chip->capture_index_offset && 1333 index < chip->capture_index_offset + chip->capture_streams) 1334 return SNDRV_PCM_STREAM_CAPTURE; 1335 return SNDRV_PCM_STREAM_PLAYBACK; 1336 } 1337 1338 /* initialize SD streams */ 1339 int azx_init_streams(struct azx *chip) 1340 { 1341 int i; 1342 int stream_tags[2] = { 0, 0 }; 1343 1344 /* initialize each stream (aka device) 1345 * assign the starting bdl address to each stream (device) 1346 * and initialize 1347 */ 1348 for (i = 0; i < chip->num_streams; i++) { 1349 struct azx_dev *azx_dev = kzalloc(sizeof(*azx_dev), GFP_KERNEL); 1350 int dir, tag; 1351 1352 if (!azx_dev) 1353 return -ENOMEM; 1354 1355 dir = stream_direction(chip, i); 1356 /* stream tag must be unique throughout 1357 * the stream direction group, 1358 * valid values 1...15 1359 * use separate stream tag if the flag 1360 * AZX_DCAPS_SEPARATE_STREAM_TAG is used 1361 */ 1362 if (chip->driver_caps & AZX_DCAPS_SEPARATE_STREAM_TAG) 1363 tag = ++stream_tags[dir]; 1364 else 1365 tag = i + 1; 1366 snd_hdac_stream_init(azx_bus(chip), azx_stream(azx_dev), 1367 i, dir, tag); 1368 } 1369 1370 return 0; 1371 } 1372 EXPORT_SYMBOL_GPL(azx_init_streams); 1373 1374 void azx_free_streams(struct azx *chip) 1375 { 1376 struct hdac_bus *bus = azx_bus(chip); 1377 struct hdac_stream *s; 1378 1379 while (!list_empty(&bus->stream_list)) { 1380 s = list_first_entry(&bus->stream_list, struct hdac_stream, list); 1381 list_del(&s->list); 1382 kfree(stream_to_azx_dev(s)); 1383 } 1384 } 1385 EXPORT_SYMBOL_GPL(azx_free_streams); 1386