1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * 4 * Implementation of primary alsa driver code base for Intel HD Audio. 5 * 6 * Copyright(c) 2004 Intel Corporation. All rights reserved. 7 * 8 * Copyright (c) 2004 Takashi Iwai <tiwai@suse.de> 9 * PeiSen Hou <pshou@realtek.com.tw> 10 */ 11 12 #include <linux/clocksource.h> 13 #include <linux/delay.h> 14 #include <linux/interrupt.h> 15 #include <linux/kernel.h> 16 #include <linux/module.h> 17 #include <linux/pm_runtime.h> 18 #include <linux/slab.h> 19 20 #ifdef CONFIG_X86 21 /* for art-tsc conversion */ 22 #include <asm/tsc.h> 23 #endif 24 25 #include <sound/core.h> 26 #include <sound/initval.h> 27 #include "hda_controller.h" 28 29 #define CREATE_TRACE_POINTS 30 #include "hda_controller_trace.h" 31 32 /* DSP lock helpers */ 33 #define dsp_lock(dev) snd_hdac_dsp_lock(azx_stream(dev)) 34 #define dsp_unlock(dev) snd_hdac_dsp_unlock(azx_stream(dev)) 35 #define dsp_is_locked(dev) snd_hdac_stream_is_locked(azx_stream(dev)) 36 37 /* assign a stream for the PCM */ 38 static inline struct azx_dev * 39 azx_assign_device(struct azx *chip, struct snd_pcm_substream *substream) 40 { 41 struct hdac_stream *s; 42 43 s = snd_hdac_stream_assign(azx_bus(chip), substream); 44 if (!s) 45 return NULL; 46 return stream_to_azx_dev(s); 47 } 48 49 /* release the assigned stream */ 50 static inline void azx_release_device(struct azx_dev *azx_dev) 51 { 52 snd_hdac_stream_release(azx_stream(azx_dev)); 53 } 54 55 static inline struct hda_pcm_stream * 56 to_hda_pcm_stream(struct snd_pcm_substream *substream) 57 { 58 struct azx_pcm *apcm = snd_pcm_substream_chip(substream); 59 return &apcm->info->stream[substream->stream]; 60 } 61 62 static u64 azx_adjust_codec_delay(struct snd_pcm_substream *substream, 63 u64 nsec) 64 { 65 struct azx_pcm *apcm = snd_pcm_substream_chip(substream); 66 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream); 67 u64 codec_frames, codec_nsecs; 68 69 if (!hinfo->ops.get_delay) 70 return nsec; 71 72 codec_frames = hinfo->ops.get_delay(hinfo, apcm->codec, substream); 73 codec_nsecs = div_u64(codec_frames * 1000000000LL, 74 substream->runtime->rate); 75 76 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) 77 return nsec + codec_nsecs; 78 79 return (nsec > codec_nsecs) ? nsec - codec_nsecs : 0; 80 } 81 82 /* 83 * PCM ops 84 */ 85 86 static int azx_pcm_close(struct snd_pcm_substream *substream) 87 { 88 struct azx_pcm *apcm = snd_pcm_substream_chip(substream); 89 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream); 90 struct azx *chip = apcm->chip; 91 struct azx_dev *azx_dev = get_azx_dev(substream); 92 93 trace_azx_pcm_close(chip, azx_dev); 94 mutex_lock(&chip->open_mutex); 95 azx_release_device(azx_dev); 96 if (hinfo->ops.close) 97 hinfo->ops.close(hinfo, apcm->codec, substream); 98 snd_hda_power_down(apcm->codec); 99 mutex_unlock(&chip->open_mutex); 100 snd_hda_codec_pcm_put(apcm->info); 101 return 0; 102 } 103 104 static int azx_pcm_hw_params(struct snd_pcm_substream *substream, 105 struct snd_pcm_hw_params *hw_params) 106 { 107 struct azx_pcm *apcm = snd_pcm_substream_chip(substream); 108 struct azx *chip = apcm->chip; 109 struct azx_dev *azx_dev = get_azx_dev(substream); 110 int ret; 111 112 trace_azx_pcm_hw_params(chip, azx_dev); 113 dsp_lock(azx_dev); 114 if (dsp_is_locked(azx_dev)) { 115 ret = -EBUSY; 116 goto unlock; 117 } 118 119 azx_dev->core.bufsize = 0; 120 azx_dev->core.period_bytes = 0; 121 azx_dev->core.format_val = 0; 122 ret = snd_pcm_lib_malloc_pages(substream, 123 params_buffer_bytes(hw_params)); 124 125 unlock: 126 dsp_unlock(azx_dev); 127 return ret; 128 } 129 130 static int azx_pcm_hw_free(struct snd_pcm_substream *substream) 131 { 132 struct azx_pcm *apcm = snd_pcm_substream_chip(substream); 133 struct azx_dev *azx_dev = get_azx_dev(substream); 134 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream); 135 int err; 136 137 /* reset BDL address */ 138 dsp_lock(azx_dev); 139 if (!dsp_is_locked(azx_dev)) 140 snd_hdac_stream_cleanup(azx_stream(azx_dev)); 141 142 snd_hda_codec_cleanup(apcm->codec, hinfo, substream); 143 144 err = snd_pcm_lib_free_pages(substream); 145 azx_stream(azx_dev)->prepared = 0; 146 dsp_unlock(azx_dev); 147 return err; 148 } 149 150 static int azx_pcm_prepare(struct snd_pcm_substream *substream) 151 { 152 struct azx_pcm *apcm = snd_pcm_substream_chip(substream); 153 struct azx *chip = apcm->chip; 154 struct azx_dev *azx_dev = get_azx_dev(substream); 155 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream); 156 struct snd_pcm_runtime *runtime = substream->runtime; 157 unsigned int format_val, stream_tag; 158 int err; 159 struct hda_spdif_out *spdif = 160 snd_hda_spdif_out_of_nid(apcm->codec, hinfo->nid); 161 unsigned short ctls = spdif ? spdif->ctls : 0; 162 163 trace_azx_pcm_prepare(chip, azx_dev); 164 dsp_lock(azx_dev); 165 if (dsp_is_locked(azx_dev)) { 166 err = -EBUSY; 167 goto unlock; 168 } 169 170 snd_hdac_stream_reset(azx_stream(azx_dev)); 171 format_val = snd_hdac_calc_stream_format(runtime->rate, 172 runtime->channels, 173 runtime->format, 174 hinfo->maxbps, 175 ctls); 176 if (!format_val) { 177 dev_err(chip->card->dev, 178 "invalid format_val, rate=%d, ch=%d, format=%d\n", 179 runtime->rate, runtime->channels, runtime->format); 180 err = -EINVAL; 181 goto unlock; 182 } 183 184 err = snd_hdac_stream_set_params(azx_stream(azx_dev), format_val); 185 if (err < 0) 186 goto unlock; 187 188 snd_hdac_stream_setup(azx_stream(azx_dev)); 189 190 stream_tag = azx_dev->core.stream_tag; 191 /* CA-IBG chips need the playback stream starting from 1 */ 192 if ((chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) && 193 stream_tag > chip->capture_streams) 194 stream_tag -= chip->capture_streams; 195 err = snd_hda_codec_prepare(apcm->codec, hinfo, stream_tag, 196 azx_dev->core.format_val, substream); 197 198 unlock: 199 if (!err) 200 azx_stream(azx_dev)->prepared = 1; 201 dsp_unlock(azx_dev); 202 return err; 203 } 204 205 static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd) 206 { 207 struct azx_pcm *apcm = snd_pcm_substream_chip(substream); 208 struct azx *chip = apcm->chip; 209 struct hdac_bus *bus = azx_bus(chip); 210 struct azx_dev *azx_dev; 211 struct snd_pcm_substream *s; 212 struct hdac_stream *hstr; 213 bool start; 214 int sbits = 0; 215 int sync_reg; 216 217 azx_dev = get_azx_dev(substream); 218 trace_azx_pcm_trigger(chip, azx_dev, cmd); 219 220 hstr = azx_stream(azx_dev); 221 if (chip->driver_caps & AZX_DCAPS_OLD_SSYNC) 222 sync_reg = AZX_REG_OLD_SSYNC; 223 else 224 sync_reg = AZX_REG_SSYNC; 225 226 if (dsp_is_locked(azx_dev) || !hstr->prepared) 227 return -EPIPE; 228 229 switch (cmd) { 230 case SNDRV_PCM_TRIGGER_START: 231 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: 232 case SNDRV_PCM_TRIGGER_RESUME: 233 start = true; 234 break; 235 case SNDRV_PCM_TRIGGER_PAUSE_PUSH: 236 case SNDRV_PCM_TRIGGER_SUSPEND: 237 case SNDRV_PCM_TRIGGER_STOP: 238 start = false; 239 break; 240 default: 241 return -EINVAL; 242 } 243 244 snd_pcm_group_for_each_entry(s, substream) { 245 if (s->pcm->card != substream->pcm->card) 246 continue; 247 azx_dev = get_azx_dev(s); 248 sbits |= 1 << azx_dev->core.index; 249 snd_pcm_trigger_done(s, substream); 250 } 251 252 spin_lock(&bus->reg_lock); 253 254 /* first, set SYNC bits of corresponding streams */ 255 snd_hdac_stream_sync_trigger(hstr, true, sbits, sync_reg); 256 257 snd_pcm_group_for_each_entry(s, substream) { 258 if (s->pcm->card != substream->pcm->card) 259 continue; 260 azx_dev = get_azx_dev(s); 261 if (start) { 262 azx_dev->insufficient = 1; 263 snd_hdac_stream_start(azx_stream(azx_dev), true); 264 } else { 265 snd_hdac_stream_stop(azx_stream(azx_dev)); 266 } 267 } 268 spin_unlock(&bus->reg_lock); 269 270 snd_hdac_stream_sync(hstr, start, sbits); 271 272 spin_lock(&bus->reg_lock); 273 /* reset SYNC bits */ 274 snd_hdac_stream_sync_trigger(hstr, false, sbits, sync_reg); 275 if (start) 276 snd_hdac_stream_timecounter_init(hstr, sbits); 277 spin_unlock(&bus->reg_lock); 278 return 0; 279 } 280 281 unsigned int azx_get_pos_lpib(struct azx *chip, struct azx_dev *azx_dev) 282 { 283 return snd_hdac_stream_get_pos_lpib(azx_stream(azx_dev)); 284 } 285 EXPORT_SYMBOL_GPL(azx_get_pos_lpib); 286 287 unsigned int azx_get_pos_posbuf(struct azx *chip, struct azx_dev *azx_dev) 288 { 289 return snd_hdac_stream_get_pos_posbuf(azx_stream(azx_dev)); 290 } 291 EXPORT_SYMBOL_GPL(azx_get_pos_posbuf); 292 293 unsigned int azx_get_position(struct azx *chip, 294 struct azx_dev *azx_dev) 295 { 296 struct snd_pcm_substream *substream = azx_dev->core.substream; 297 unsigned int pos; 298 int stream = substream->stream; 299 int delay = 0; 300 301 if (chip->get_position[stream]) 302 pos = chip->get_position[stream](chip, azx_dev); 303 else /* use the position buffer as default */ 304 pos = azx_get_pos_posbuf(chip, azx_dev); 305 306 if (pos >= azx_dev->core.bufsize) 307 pos = 0; 308 309 if (substream->runtime) { 310 struct azx_pcm *apcm = snd_pcm_substream_chip(substream); 311 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream); 312 313 if (chip->get_delay[stream]) 314 delay += chip->get_delay[stream](chip, azx_dev, pos); 315 if (hinfo->ops.get_delay) 316 delay += hinfo->ops.get_delay(hinfo, apcm->codec, 317 substream); 318 substream->runtime->delay = delay; 319 } 320 321 trace_azx_get_position(chip, azx_dev, pos, delay); 322 return pos; 323 } 324 EXPORT_SYMBOL_GPL(azx_get_position); 325 326 static snd_pcm_uframes_t azx_pcm_pointer(struct snd_pcm_substream *substream) 327 { 328 struct azx_pcm *apcm = snd_pcm_substream_chip(substream); 329 struct azx *chip = apcm->chip; 330 struct azx_dev *azx_dev = get_azx_dev(substream); 331 return bytes_to_frames(substream->runtime, 332 azx_get_position(chip, azx_dev)); 333 } 334 335 /* 336 * azx_scale64: Scale base by mult/div while not overflowing sanely 337 * 338 * Derived from scale64_check_overflow in kernel/time/timekeeping.c 339 * 340 * The tmestamps for a 48Khz stream can overflow after (2^64/10^9)/48K which 341 * is about 384307 ie ~4.5 days. 342 * 343 * This scales the calculation so that overflow will happen but after 2^64 / 344 * 48000 secs, which is pretty large! 345 * 346 * In caln below: 347 * base may overflow, but since there isn’t any additional division 348 * performed on base it’s OK 349 * rem can’t overflow because both are 32-bit values 350 */ 351 352 #ifdef CONFIG_X86 353 static u64 azx_scale64(u64 base, u32 num, u32 den) 354 { 355 u64 rem; 356 357 rem = do_div(base, den); 358 359 base *= num; 360 rem *= num; 361 362 do_div(rem, den); 363 364 return base + rem; 365 } 366 367 static int azx_get_sync_time(ktime_t *device, 368 struct system_counterval_t *system, void *ctx) 369 { 370 struct snd_pcm_substream *substream = ctx; 371 struct azx_dev *azx_dev = get_azx_dev(substream); 372 struct azx_pcm *apcm = snd_pcm_substream_chip(substream); 373 struct azx *chip = apcm->chip; 374 struct snd_pcm_runtime *runtime; 375 u64 ll_counter, ll_counter_l, ll_counter_h; 376 u64 tsc_counter, tsc_counter_l, tsc_counter_h; 377 u32 wallclk_ctr, wallclk_cycles; 378 bool direction; 379 u32 dma_select; 380 u32 timeout = 200; 381 u32 retry_count = 0; 382 383 runtime = substream->runtime; 384 385 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) 386 direction = 1; 387 else 388 direction = 0; 389 390 /* 0th stream tag is not used, so DMA ch 0 is for 1st stream tag */ 391 do { 392 timeout = 100; 393 dma_select = (direction << GTSCC_CDMAS_DMA_DIR_SHIFT) | 394 (azx_dev->core.stream_tag - 1); 395 snd_hdac_chip_writel(azx_bus(chip), GTSCC, dma_select); 396 397 /* Enable the capture */ 398 snd_hdac_chip_updatel(azx_bus(chip), GTSCC, 0, GTSCC_TSCCI_MASK); 399 400 while (timeout) { 401 if (snd_hdac_chip_readl(azx_bus(chip), GTSCC) & 402 GTSCC_TSCCD_MASK) 403 break; 404 405 timeout--; 406 } 407 408 if (!timeout) { 409 dev_err(chip->card->dev, "GTSCC capture Timedout!\n"); 410 return -EIO; 411 } 412 413 /* Read wall clock counter */ 414 wallclk_ctr = snd_hdac_chip_readl(azx_bus(chip), WALFCC); 415 416 /* Read TSC counter */ 417 tsc_counter_l = snd_hdac_chip_readl(azx_bus(chip), TSCCL); 418 tsc_counter_h = snd_hdac_chip_readl(azx_bus(chip), TSCCU); 419 420 /* Read Link counter */ 421 ll_counter_l = snd_hdac_chip_readl(azx_bus(chip), LLPCL); 422 ll_counter_h = snd_hdac_chip_readl(azx_bus(chip), LLPCU); 423 424 /* Ack: registers read done */ 425 snd_hdac_chip_writel(azx_bus(chip), GTSCC, GTSCC_TSCCD_SHIFT); 426 427 tsc_counter = (tsc_counter_h << TSCCU_CCU_SHIFT) | 428 tsc_counter_l; 429 430 ll_counter = (ll_counter_h << LLPC_CCU_SHIFT) | ll_counter_l; 431 wallclk_cycles = wallclk_ctr & WALFCC_CIF_MASK; 432 433 /* 434 * An error occurs near frame "rollover". The clocks in 435 * frame value indicates whether this error may have 436 * occurred. Here we use the value of 10 i.e., 437 * HDA_MAX_CYCLE_OFFSET 438 */ 439 if (wallclk_cycles < HDA_MAX_CYCLE_VALUE - HDA_MAX_CYCLE_OFFSET 440 && wallclk_cycles > HDA_MAX_CYCLE_OFFSET) 441 break; 442 443 /* 444 * Sleep before we read again, else we may again get 445 * value near to MAX_CYCLE. Try to sleep for different 446 * amount of time so we dont hit the same number again 447 */ 448 udelay(retry_count++); 449 450 } while (retry_count != HDA_MAX_CYCLE_READ_RETRY); 451 452 if (retry_count == HDA_MAX_CYCLE_READ_RETRY) { 453 dev_err_ratelimited(chip->card->dev, 454 "Error in WALFCC cycle count\n"); 455 return -EIO; 456 } 457 458 *device = ns_to_ktime(azx_scale64(ll_counter, 459 NSEC_PER_SEC, runtime->rate)); 460 *device = ktime_add_ns(*device, (wallclk_cycles * NSEC_PER_SEC) / 461 ((HDA_MAX_CYCLE_VALUE + 1) * runtime->rate)); 462 463 *system = convert_art_to_tsc(tsc_counter); 464 465 return 0; 466 } 467 468 #else 469 static int azx_get_sync_time(ktime_t *device, 470 struct system_counterval_t *system, void *ctx) 471 { 472 return -ENXIO; 473 } 474 #endif 475 476 static int azx_get_crosststamp(struct snd_pcm_substream *substream, 477 struct system_device_crosststamp *xtstamp) 478 { 479 return get_device_system_crosststamp(azx_get_sync_time, 480 substream, NULL, xtstamp); 481 } 482 483 static inline bool is_link_time_supported(struct snd_pcm_runtime *runtime, 484 struct snd_pcm_audio_tstamp_config *ts) 485 { 486 if (runtime->hw.info & SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME) 487 if (ts->type_requested == SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK_SYNCHRONIZED) 488 return true; 489 490 return false; 491 } 492 493 static int azx_get_time_info(struct snd_pcm_substream *substream, 494 struct timespec *system_ts, struct timespec *audio_ts, 495 struct snd_pcm_audio_tstamp_config *audio_tstamp_config, 496 struct snd_pcm_audio_tstamp_report *audio_tstamp_report) 497 { 498 struct azx_dev *azx_dev = get_azx_dev(substream); 499 struct snd_pcm_runtime *runtime = substream->runtime; 500 struct system_device_crosststamp xtstamp; 501 int ret; 502 u64 nsec; 503 504 if ((substream->runtime->hw.info & SNDRV_PCM_INFO_HAS_LINK_ATIME) && 505 (audio_tstamp_config->type_requested == SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK)) { 506 507 snd_pcm_gettime(substream->runtime, system_ts); 508 509 nsec = timecounter_read(&azx_dev->core.tc); 510 nsec = div_u64(nsec, 3); /* can be optimized */ 511 if (audio_tstamp_config->report_delay) 512 nsec = azx_adjust_codec_delay(substream, nsec); 513 514 *audio_ts = ns_to_timespec(nsec); 515 516 audio_tstamp_report->actual_type = SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK; 517 audio_tstamp_report->accuracy_report = 1; /* rest of structure is valid */ 518 audio_tstamp_report->accuracy = 42; /* 24 MHz WallClock == 42ns resolution */ 519 520 } else if (is_link_time_supported(runtime, audio_tstamp_config)) { 521 522 ret = azx_get_crosststamp(substream, &xtstamp); 523 if (ret) 524 return ret; 525 526 switch (runtime->tstamp_type) { 527 case SNDRV_PCM_TSTAMP_TYPE_MONOTONIC: 528 return -EINVAL; 529 530 case SNDRV_PCM_TSTAMP_TYPE_MONOTONIC_RAW: 531 *system_ts = ktime_to_timespec(xtstamp.sys_monoraw); 532 break; 533 534 default: 535 *system_ts = ktime_to_timespec(xtstamp.sys_realtime); 536 break; 537 538 } 539 540 *audio_ts = ktime_to_timespec(xtstamp.device); 541 542 audio_tstamp_report->actual_type = 543 SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK_SYNCHRONIZED; 544 audio_tstamp_report->accuracy_report = 1; 545 /* 24 MHz WallClock == 42ns resolution */ 546 audio_tstamp_report->accuracy = 42; 547 548 } else { 549 audio_tstamp_report->actual_type = SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT; 550 } 551 552 return 0; 553 } 554 555 static struct snd_pcm_hardware azx_pcm_hw = { 556 .info = (SNDRV_PCM_INFO_MMAP | 557 SNDRV_PCM_INFO_INTERLEAVED | 558 SNDRV_PCM_INFO_BLOCK_TRANSFER | 559 SNDRV_PCM_INFO_MMAP_VALID | 560 /* No full-resume yet implemented */ 561 /* SNDRV_PCM_INFO_RESUME |*/ 562 SNDRV_PCM_INFO_PAUSE | 563 SNDRV_PCM_INFO_SYNC_START | 564 SNDRV_PCM_INFO_HAS_WALL_CLOCK | /* legacy */ 565 SNDRV_PCM_INFO_HAS_LINK_ATIME | 566 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP), 567 .formats = SNDRV_PCM_FMTBIT_S16_LE, 568 .rates = SNDRV_PCM_RATE_48000, 569 .rate_min = 48000, 570 .rate_max = 48000, 571 .channels_min = 2, 572 .channels_max = 2, 573 .buffer_bytes_max = AZX_MAX_BUF_SIZE, 574 .period_bytes_min = 128, 575 .period_bytes_max = AZX_MAX_BUF_SIZE / 2, 576 .periods_min = 2, 577 .periods_max = AZX_MAX_FRAG, 578 .fifo_size = 0, 579 }; 580 581 static int azx_pcm_open(struct snd_pcm_substream *substream) 582 { 583 struct azx_pcm *apcm = snd_pcm_substream_chip(substream); 584 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream); 585 struct azx *chip = apcm->chip; 586 struct azx_dev *azx_dev; 587 struct snd_pcm_runtime *runtime = substream->runtime; 588 int err; 589 int buff_step; 590 591 snd_hda_codec_pcm_get(apcm->info); 592 mutex_lock(&chip->open_mutex); 593 azx_dev = azx_assign_device(chip, substream); 594 trace_azx_pcm_open(chip, azx_dev); 595 if (azx_dev == NULL) { 596 err = -EBUSY; 597 goto unlock; 598 } 599 runtime->private_data = azx_dev; 600 601 runtime->hw = azx_pcm_hw; 602 if (chip->gts_present) 603 runtime->hw.info |= SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME; 604 runtime->hw.channels_min = hinfo->channels_min; 605 runtime->hw.channels_max = hinfo->channels_max; 606 runtime->hw.formats = hinfo->formats; 607 runtime->hw.rates = hinfo->rates; 608 snd_pcm_limit_hw_rates(runtime); 609 snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); 610 611 /* avoid wrap-around with wall-clock */ 612 snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_TIME, 613 20, 614 178000000); 615 616 /* by some reason, the playback stream stalls on PulseAudio with 617 * tsched=1 when a capture stream triggers. Until we figure out the 618 * real cause, disable tsched mode by telling the PCM info flag. 619 */ 620 if (chip->driver_caps & AZX_DCAPS_AMD_WORKAROUND) 621 runtime->hw.info |= SNDRV_PCM_INFO_BATCH; 622 623 if (chip->align_buffer_size) 624 /* constrain buffer sizes to be multiple of 128 625 bytes. This is more efficient in terms of memory 626 access but isn't required by the HDA spec and 627 prevents users from specifying exact period/buffer 628 sizes. For example for 44.1kHz, a period size set 629 to 20ms will be rounded to 19.59ms. */ 630 buff_step = 128; 631 else 632 /* Don't enforce steps on buffer sizes, still need to 633 be multiple of 4 bytes (HDA spec). Tested on Intel 634 HDA controllers, may not work on all devices where 635 option needs to be disabled */ 636 buff_step = 4; 637 638 snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 639 buff_step); 640 snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 641 buff_step); 642 snd_hda_power_up(apcm->codec); 643 if (hinfo->ops.open) 644 err = hinfo->ops.open(hinfo, apcm->codec, substream); 645 else 646 err = -ENODEV; 647 if (err < 0) { 648 azx_release_device(azx_dev); 649 goto powerdown; 650 } 651 snd_pcm_limit_hw_rates(runtime); 652 /* sanity check */ 653 if (snd_BUG_ON(!runtime->hw.channels_min) || 654 snd_BUG_ON(!runtime->hw.channels_max) || 655 snd_BUG_ON(!runtime->hw.formats) || 656 snd_BUG_ON(!runtime->hw.rates)) { 657 azx_release_device(azx_dev); 658 if (hinfo->ops.close) 659 hinfo->ops.close(hinfo, apcm->codec, substream); 660 err = -EINVAL; 661 goto powerdown; 662 } 663 664 /* disable LINK_ATIME timestamps for capture streams 665 until we figure out how to handle digital inputs */ 666 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) { 667 runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_WALL_CLOCK; /* legacy */ 668 runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_LINK_ATIME; 669 } 670 671 snd_pcm_set_sync(substream); 672 mutex_unlock(&chip->open_mutex); 673 return 0; 674 675 powerdown: 676 snd_hda_power_down(apcm->codec); 677 unlock: 678 mutex_unlock(&chip->open_mutex); 679 snd_hda_codec_pcm_put(apcm->info); 680 return err; 681 } 682 683 static int azx_pcm_mmap(struct snd_pcm_substream *substream, 684 struct vm_area_struct *area) 685 { 686 struct azx_pcm *apcm = snd_pcm_substream_chip(substream); 687 struct azx *chip = apcm->chip; 688 if (chip->ops->pcm_mmap_prepare) 689 chip->ops->pcm_mmap_prepare(substream, area); 690 return snd_pcm_lib_default_mmap(substream, area); 691 } 692 693 static const struct snd_pcm_ops azx_pcm_ops = { 694 .open = azx_pcm_open, 695 .close = azx_pcm_close, 696 .ioctl = snd_pcm_lib_ioctl, 697 .hw_params = azx_pcm_hw_params, 698 .hw_free = azx_pcm_hw_free, 699 .prepare = azx_pcm_prepare, 700 .trigger = azx_pcm_trigger, 701 .pointer = azx_pcm_pointer, 702 .get_time_info = azx_get_time_info, 703 .mmap = azx_pcm_mmap, 704 .page = snd_pcm_sgbuf_ops_page, 705 }; 706 707 static void azx_pcm_free(struct snd_pcm *pcm) 708 { 709 struct azx_pcm *apcm = pcm->private_data; 710 if (apcm) { 711 list_del(&apcm->list); 712 apcm->info->pcm = NULL; 713 kfree(apcm); 714 } 715 } 716 717 #define MAX_PREALLOC_SIZE (32 * 1024 * 1024) 718 719 int snd_hda_attach_pcm_stream(struct hda_bus *_bus, struct hda_codec *codec, 720 struct hda_pcm *cpcm) 721 { 722 struct hdac_bus *bus = &_bus->core; 723 struct azx *chip = bus_to_azx(bus); 724 struct snd_pcm *pcm; 725 struct azx_pcm *apcm; 726 int pcm_dev = cpcm->device; 727 unsigned int size; 728 int s, err; 729 int type = SNDRV_DMA_TYPE_DEV_SG; 730 731 list_for_each_entry(apcm, &chip->pcm_list, list) { 732 if (apcm->pcm->device == pcm_dev) { 733 dev_err(chip->card->dev, "PCM %d already exists\n", 734 pcm_dev); 735 return -EBUSY; 736 } 737 } 738 err = snd_pcm_new(chip->card, cpcm->name, pcm_dev, 739 cpcm->stream[SNDRV_PCM_STREAM_PLAYBACK].substreams, 740 cpcm->stream[SNDRV_PCM_STREAM_CAPTURE].substreams, 741 &pcm); 742 if (err < 0) 743 return err; 744 strlcpy(pcm->name, cpcm->name, sizeof(pcm->name)); 745 apcm = kzalloc(sizeof(*apcm), GFP_KERNEL); 746 if (apcm == NULL) { 747 snd_device_free(chip->card, pcm); 748 return -ENOMEM; 749 } 750 apcm->chip = chip; 751 apcm->pcm = pcm; 752 apcm->codec = codec; 753 apcm->info = cpcm; 754 pcm->private_data = apcm; 755 pcm->private_free = azx_pcm_free; 756 if (cpcm->pcm_type == HDA_PCM_TYPE_MODEM) 757 pcm->dev_class = SNDRV_PCM_CLASS_MODEM; 758 list_add_tail(&apcm->list, &chip->pcm_list); 759 cpcm->pcm = pcm; 760 for (s = 0; s < 2; s++) { 761 if (cpcm->stream[s].substreams) 762 snd_pcm_set_ops(pcm, s, &azx_pcm_ops); 763 } 764 /* buffer pre-allocation */ 765 size = CONFIG_SND_HDA_PREALLOC_SIZE * 1024; 766 if (size > MAX_PREALLOC_SIZE) 767 size = MAX_PREALLOC_SIZE; 768 if (chip->uc_buffer) 769 type = SNDRV_DMA_TYPE_DEV_UC_SG; 770 snd_pcm_lib_preallocate_pages_for_all(pcm, type, 771 chip->card->dev, 772 size, MAX_PREALLOC_SIZE); 773 return 0; 774 } 775 776 static unsigned int azx_command_addr(u32 cmd) 777 { 778 unsigned int addr = cmd >> 28; 779 780 if (addr >= AZX_MAX_CODECS) { 781 snd_BUG(); 782 addr = 0; 783 } 784 785 return addr; 786 } 787 788 /* receive a response */ 789 static int azx_rirb_get_response(struct hdac_bus *bus, unsigned int addr, 790 unsigned int *res) 791 { 792 struct azx *chip = bus_to_azx(bus); 793 struct hda_bus *hbus = &chip->bus; 794 unsigned long timeout; 795 unsigned long loopcounter; 796 int do_poll = 0; 797 bool warned = false; 798 799 again: 800 timeout = jiffies + msecs_to_jiffies(1000); 801 802 for (loopcounter = 0;; loopcounter++) { 803 spin_lock_irq(&bus->reg_lock); 804 if (bus->polling_mode || do_poll) 805 snd_hdac_bus_update_rirb(bus); 806 if (!bus->rirb.cmds[addr]) { 807 if (!do_poll) 808 bus->poll_count = 0; 809 if (res) 810 *res = bus->rirb.res[addr]; /* the last value */ 811 spin_unlock_irq(&bus->reg_lock); 812 return 0; 813 } 814 spin_unlock_irq(&bus->reg_lock); 815 if (time_after(jiffies, timeout)) 816 break; 817 #define LOOP_COUNT_MAX 3000 818 if (hbus->needs_damn_long_delay || 819 loopcounter > LOOP_COUNT_MAX) { 820 if (loopcounter > LOOP_COUNT_MAX && !warned) { 821 dev_dbg_ratelimited(chip->card->dev, 822 "too slow response, last cmd=%#08x\n", 823 bus->last_cmd[addr]); 824 warned = true; 825 } 826 msleep(2); /* temporary workaround */ 827 } else { 828 udelay(10); 829 cond_resched(); 830 } 831 } 832 833 if (hbus->no_response_fallback) 834 return -EIO; 835 836 if (!bus->polling_mode && bus->poll_count < 2) { 837 dev_dbg(chip->card->dev, 838 "azx_get_response timeout, polling the codec once: last cmd=0x%08x\n", 839 bus->last_cmd[addr]); 840 do_poll = 1; 841 bus->poll_count++; 842 goto again; 843 } 844 845 846 if (!bus->polling_mode) { 847 dev_warn(chip->card->dev, 848 "azx_get_response timeout, switching to polling mode: last cmd=0x%08x\n", 849 bus->last_cmd[addr]); 850 bus->polling_mode = 1; 851 goto again; 852 } 853 854 if (chip->msi) { 855 dev_warn(chip->card->dev, 856 "No response from codec, disabling MSI: last cmd=0x%08x\n", 857 bus->last_cmd[addr]); 858 if (chip->ops->disable_msi_reset_irq && 859 chip->ops->disable_msi_reset_irq(chip) < 0) 860 return -EIO; 861 goto again; 862 } 863 864 if (chip->probing) { 865 /* If this critical timeout happens during the codec probing 866 * phase, this is likely an access to a non-existing codec 867 * slot. Better to return an error and reset the system. 868 */ 869 return -EIO; 870 } 871 872 /* no fallback mechanism? */ 873 if (!chip->fallback_to_single_cmd) 874 return -EIO; 875 876 /* a fatal communication error; need either to reset or to fallback 877 * to the single_cmd mode 878 */ 879 if (hbus->allow_bus_reset && !hbus->response_reset && !hbus->in_reset) { 880 hbus->response_reset = 1; 881 dev_err(chip->card->dev, 882 "No response from codec, resetting bus: last cmd=0x%08x\n", 883 bus->last_cmd[addr]); 884 return -EAGAIN; /* give a chance to retry */ 885 } 886 887 dev_WARN(chip->card->dev, 888 "azx_get_response timeout, switching to single_cmd mode: last cmd=0x%08x\n", 889 bus->last_cmd[addr]); 890 chip->single_cmd = 1; 891 hbus->response_reset = 0; 892 snd_hdac_bus_stop_cmd_io(bus); 893 return -EIO; 894 } 895 896 /* 897 * Use the single immediate command instead of CORB/RIRB for simplicity 898 * 899 * Note: according to Intel, this is not preferred use. The command was 900 * intended for the BIOS only, and may get confused with unsolicited 901 * responses. So, we shouldn't use it for normal operation from the 902 * driver. 903 * I left the codes, however, for debugging/testing purposes. 904 */ 905 906 /* receive a response */ 907 static int azx_single_wait_for_response(struct azx *chip, unsigned int addr) 908 { 909 int timeout = 50; 910 911 while (timeout--) { 912 /* check IRV busy bit */ 913 if (azx_readw(chip, IRS) & AZX_IRS_VALID) { 914 /* reuse rirb.res as the response return value */ 915 azx_bus(chip)->rirb.res[addr] = azx_readl(chip, IR); 916 return 0; 917 } 918 udelay(1); 919 } 920 if (printk_ratelimit()) 921 dev_dbg(chip->card->dev, "get_response timeout: IRS=0x%x\n", 922 azx_readw(chip, IRS)); 923 azx_bus(chip)->rirb.res[addr] = -1; 924 return -EIO; 925 } 926 927 /* send a command */ 928 static int azx_single_send_cmd(struct hdac_bus *bus, u32 val) 929 { 930 struct azx *chip = bus_to_azx(bus); 931 unsigned int addr = azx_command_addr(val); 932 int timeout = 50; 933 934 bus->last_cmd[azx_command_addr(val)] = val; 935 while (timeout--) { 936 /* check ICB busy bit */ 937 if (!((azx_readw(chip, IRS) & AZX_IRS_BUSY))) { 938 /* Clear IRV valid bit */ 939 azx_writew(chip, IRS, azx_readw(chip, IRS) | 940 AZX_IRS_VALID); 941 azx_writel(chip, IC, val); 942 azx_writew(chip, IRS, azx_readw(chip, IRS) | 943 AZX_IRS_BUSY); 944 return azx_single_wait_for_response(chip, addr); 945 } 946 udelay(1); 947 } 948 if (printk_ratelimit()) 949 dev_dbg(chip->card->dev, 950 "send_cmd timeout: IRS=0x%x, val=0x%x\n", 951 azx_readw(chip, IRS), val); 952 return -EIO; 953 } 954 955 /* receive a response */ 956 static int azx_single_get_response(struct hdac_bus *bus, unsigned int addr, 957 unsigned int *res) 958 { 959 if (res) 960 *res = bus->rirb.res[addr]; 961 return 0; 962 } 963 964 /* 965 * The below are the main callbacks from hda_codec. 966 * 967 * They are just the skeleton to call sub-callbacks according to the 968 * current setting of chip->single_cmd. 969 */ 970 971 /* send a command */ 972 static int azx_send_cmd(struct hdac_bus *bus, unsigned int val) 973 { 974 struct azx *chip = bus_to_azx(bus); 975 976 if (chip->disabled) 977 return 0; 978 if (chip->single_cmd) 979 return azx_single_send_cmd(bus, val); 980 else 981 return snd_hdac_bus_send_cmd(bus, val); 982 } 983 984 /* get a response */ 985 static int azx_get_response(struct hdac_bus *bus, unsigned int addr, 986 unsigned int *res) 987 { 988 struct azx *chip = bus_to_azx(bus); 989 990 if (chip->disabled) 991 return 0; 992 if (chip->single_cmd) 993 return azx_single_get_response(bus, addr, res); 994 else 995 return azx_rirb_get_response(bus, addr, res); 996 } 997 998 static const struct hdac_bus_ops bus_core_ops = { 999 .command = azx_send_cmd, 1000 .get_response = azx_get_response, 1001 }; 1002 1003 #ifdef CONFIG_SND_HDA_DSP_LOADER 1004 /* 1005 * DSP loading code (e.g. for CA0132) 1006 */ 1007 1008 /* use the first stream for loading DSP */ 1009 static struct azx_dev * 1010 azx_get_dsp_loader_dev(struct azx *chip) 1011 { 1012 struct hdac_bus *bus = azx_bus(chip); 1013 struct hdac_stream *s; 1014 1015 list_for_each_entry(s, &bus->stream_list, list) 1016 if (s->index == chip->playback_index_offset) 1017 return stream_to_azx_dev(s); 1018 1019 return NULL; 1020 } 1021 1022 int snd_hda_codec_load_dsp_prepare(struct hda_codec *codec, unsigned int format, 1023 unsigned int byte_size, 1024 struct snd_dma_buffer *bufp) 1025 { 1026 struct hdac_bus *bus = &codec->bus->core; 1027 struct azx *chip = bus_to_azx(bus); 1028 struct azx_dev *azx_dev; 1029 struct hdac_stream *hstr; 1030 bool saved = false; 1031 int err; 1032 1033 azx_dev = azx_get_dsp_loader_dev(chip); 1034 hstr = azx_stream(azx_dev); 1035 spin_lock_irq(&bus->reg_lock); 1036 if (hstr->opened) { 1037 chip->saved_azx_dev = *azx_dev; 1038 saved = true; 1039 } 1040 spin_unlock_irq(&bus->reg_lock); 1041 1042 err = snd_hdac_dsp_prepare(hstr, format, byte_size, bufp); 1043 if (err < 0) { 1044 spin_lock_irq(&bus->reg_lock); 1045 if (saved) 1046 *azx_dev = chip->saved_azx_dev; 1047 spin_unlock_irq(&bus->reg_lock); 1048 return err; 1049 } 1050 1051 hstr->prepared = 0; 1052 return err; 1053 } 1054 EXPORT_SYMBOL_GPL(snd_hda_codec_load_dsp_prepare); 1055 1056 void snd_hda_codec_load_dsp_trigger(struct hda_codec *codec, bool start) 1057 { 1058 struct hdac_bus *bus = &codec->bus->core; 1059 struct azx *chip = bus_to_azx(bus); 1060 struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip); 1061 1062 snd_hdac_dsp_trigger(azx_stream(azx_dev), start); 1063 } 1064 EXPORT_SYMBOL_GPL(snd_hda_codec_load_dsp_trigger); 1065 1066 void snd_hda_codec_load_dsp_cleanup(struct hda_codec *codec, 1067 struct snd_dma_buffer *dmab) 1068 { 1069 struct hdac_bus *bus = &codec->bus->core; 1070 struct azx *chip = bus_to_azx(bus); 1071 struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip); 1072 struct hdac_stream *hstr = azx_stream(azx_dev); 1073 1074 if (!dmab->area || !hstr->locked) 1075 return; 1076 1077 snd_hdac_dsp_cleanup(hstr, dmab); 1078 spin_lock_irq(&bus->reg_lock); 1079 if (hstr->opened) 1080 *azx_dev = chip->saved_azx_dev; 1081 hstr->locked = false; 1082 spin_unlock_irq(&bus->reg_lock); 1083 } 1084 EXPORT_SYMBOL_GPL(snd_hda_codec_load_dsp_cleanup); 1085 #endif /* CONFIG_SND_HDA_DSP_LOADER */ 1086 1087 /* 1088 * reset and start the controller registers 1089 */ 1090 void azx_init_chip(struct azx *chip, bool full_reset) 1091 { 1092 if (snd_hdac_bus_init_chip(azx_bus(chip), full_reset)) { 1093 /* correct RINTCNT for CXT */ 1094 if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) 1095 azx_writew(chip, RINTCNT, 0xc0); 1096 } 1097 } 1098 EXPORT_SYMBOL_GPL(azx_init_chip); 1099 1100 void azx_stop_all_streams(struct azx *chip) 1101 { 1102 struct hdac_bus *bus = azx_bus(chip); 1103 struct hdac_stream *s; 1104 1105 list_for_each_entry(s, &bus->stream_list, list) 1106 snd_hdac_stream_stop(s); 1107 } 1108 EXPORT_SYMBOL_GPL(azx_stop_all_streams); 1109 1110 void azx_stop_chip(struct azx *chip) 1111 { 1112 snd_hdac_bus_stop_chip(azx_bus(chip)); 1113 } 1114 EXPORT_SYMBOL_GPL(azx_stop_chip); 1115 1116 /* 1117 * interrupt handler 1118 */ 1119 static void stream_update(struct hdac_bus *bus, struct hdac_stream *s) 1120 { 1121 struct azx *chip = bus_to_azx(bus); 1122 struct azx_dev *azx_dev = stream_to_azx_dev(s); 1123 1124 /* check whether this IRQ is really acceptable */ 1125 if (!chip->ops->position_check || 1126 chip->ops->position_check(chip, azx_dev)) { 1127 spin_unlock(&bus->reg_lock); 1128 snd_pcm_period_elapsed(azx_stream(azx_dev)->substream); 1129 spin_lock(&bus->reg_lock); 1130 } 1131 } 1132 1133 irqreturn_t azx_interrupt(int irq, void *dev_id) 1134 { 1135 struct azx *chip = dev_id; 1136 struct hdac_bus *bus = azx_bus(chip); 1137 u32 status; 1138 bool active, handled = false; 1139 int repeat = 0; /* count for avoiding endless loop */ 1140 1141 #ifdef CONFIG_PM 1142 if (azx_has_pm_runtime(chip)) 1143 if (!pm_runtime_active(chip->card->dev)) 1144 return IRQ_NONE; 1145 #endif 1146 1147 spin_lock(&bus->reg_lock); 1148 1149 if (chip->disabled) 1150 goto unlock; 1151 1152 do { 1153 status = azx_readl(chip, INTSTS); 1154 if (status == 0 || status == 0xffffffff) 1155 break; 1156 1157 handled = true; 1158 active = false; 1159 if (snd_hdac_bus_handle_stream_irq(bus, status, stream_update)) 1160 active = true; 1161 1162 /* clear rirb int */ 1163 status = azx_readb(chip, RIRBSTS); 1164 if (status & RIRB_INT_MASK) { 1165 active = true; 1166 if (status & RIRB_INT_RESPONSE) { 1167 if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) 1168 udelay(80); 1169 snd_hdac_bus_update_rirb(bus); 1170 } 1171 azx_writeb(chip, RIRBSTS, RIRB_INT_MASK); 1172 } 1173 } while (active && ++repeat < 10); 1174 1175 unlock: 1176 spin_unlock(&bus->reg_lock); 1177 1178 return IRQ_RETVAL(handled); 1179 } 1180 EXPORT_SYMBOL_GPL(azx_interrupt); 1181 1182 /* 1183 * Codec initerface 1184 */ 1185 1186 /* 1187 * Probe the given codec address 1188 */ 1189 static int probe_codec(struct azx *chip, int addr) 1190 { 1191 unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) | 1192 (AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID; 1193 struct hdac_bus *bus = azx_bus(chip); 1194 int err; 1195 unsigned int res = -1; 1196 1197 mutex_lock(&bus->cmd_mutex); 1198 chip->probing = 1; 1199 azx_send_cmd(bus, cmd); 1200 err = azx_get_response(bus, addr, &res); 1201 chip->probing = 0; 1202 mutex_unlock(&bus->cmd_mutex); 1203 if (err < 0 || res == -1) 1204 return -EIO; 1205 dev_dbg(chip->card->dev, "codec #%d probed OK\n", addr); 1206 return 0; 1207 } 1208 1209 void snd_hda_bus_reset(struct hda_bus *bus) 1210 { 1211 struct azx *chip = bus_to_azx(&bus->core); 1212 1213 bus->in_reset = 1; 1214 azx_stop_chip(chip); 1215 azx_init_chip(chip, true); 1216 if (bus->core.chip_init) 1217 snd_hda_bus_reset_codecs(bus); 1218 bus->in_reset = 0; 1219 } 1220 1221 /* HD-audio bus initialization */ 1222 int azx_bus_init(struct azx *chip, const char *model) 1223 { 1224 struct hda_bus *bus = &chip->bus; 1225 int err; 1226 1227 err = snd_hdac_bus_init(&bus->core, chip->card->dev, &bus_core_ops); 1228 if (err < 0) 1229 return err; 1230 1231 bus->card = chip->card; 1232 mutex_init(&bus->prepare_mutex); 1233 bus->pci = chip->pci; 1234 bus->modelname = model; 1235 bus->mixer_assigned = -1; 1236 bus->core.snoop = azx_snoop(chip); 1237 if (chip->get_position[0] != azx_get_pos_lpib || 1238 chip->get_position[1] != azx_get_pos_lpib) 1239 bus->core.use_posbuf = true; 1240 bus->core.bdl_pos_adj = chip->bdl_pos_adj; 1241 if (chip->driver_caps & AZX_DCAPS_CORBRP_SELF_CLEAR) 1242 bus->core.corbrp_self_clear = true; 1243 1244 if (chip->driver_caps & AZX_DCAPS_4K_BDLE_BOUNDARY) 1245 bus->core.align_bdle_4k = true; 1246 1247 /* AMD chipsets often cause the communication stalls upon certain 1248 * sequence like the pin-detection. It seems that forcing the synced 1249 * access works around the stall. Grrr... 1250 */ 1251 if (chip->driver_caps & AZX_DCAPS_SYNC_WRITE) { 1252 dev_dbg(chip->card->dev, "Enable sync_write for stable communication\n"); 1253 bus->core.sync_write = 1; 1254 bus->allow_bus_reset = 1; 1255 } 1256 1257 return 0; 1258 } 1259 EXPORT_SYMBOL_GPL(azx_bus_init); 1260 1261 /* Probe codecs */ 1262 int azx_probe_codecs(struct azx *chip, unsigned int max_slots) 1263 { 1264 struct hdac_bus *bus = azx_bus(chip); 1265 int c, codecs, err; 1266 1267 codecs = 0; 1268 if (!max_slots) 1269 max_slots = AZX_DEFAULT_CODECS; 1270 1271 /* First try to probe all given codec slots */ 1272 for (c = 0; c < max_slots; c++) { 1273 if ((bus->codec_mask & (1 << c)) & chip->codec_probe_mask) { 1274 if (probe_codec(chip, c) < 0) { 1275 /* Some BIOSen give you wrong codec addresses 1276 * that don't exist 1277 */ 1278 dev_warn(chip->card->dev, 1279 "Codec #%d probe error; disabling it...\n", c); 1280 bus->codec_mask &= ~(1 << c); 1281 /* More badly, accessing to a non-existing 1282 * codec often screws up the controller chip, 1283 * and disturbs the further communications. 1284 * Thus if an error occurs during probing, 1285 * better to reset the controller chip to 1286 * get back to the sanity state. 1287 */ 1288 azx_stop_chip(chip); 1289 azx_init_chip(chip, true); 1290 } 1291 } 1292 } 1293 1294 /* Then create codec instances */ 1295 for (c = 0; c < max_slots; c++) { 1296 if ((bus->codec_mask & (1 << c)) & chip->codec_probe_mask) { 1297 struct hda_codec *codec; 1298 err = snd_hda_codec_new(&chip->bus, chip->card, c, &codec); 1299 if (err < 0) 1300 continue; 1301 codec->jackpoll_interval = chip->jackpoll_interval; 1302 codec->beep_mode = chip->beep_mode; 1303 codecs++; 1304 } 1305 } 1306 if (!codecs) { 1307 dev_err(chip->card->dev, "no codecs initialized\n"); 1308 return -ENXIO; 1309 } 1310 return 0; 1311 } 1312 EXPORT_SYMBOL_GPL(azx_probe_codecs); 1313 1314 /* configure each codec instance */ 1315 int azx_codec_configure(struct azx *chip) 1316 { 1317 struct hda_codec *codec, *next; 1318 1319 /* use _safe version here since snd_hda_codec_configure() deregisters 1320 * the device upon error and deletes itself from the bus list. 1321 */ 1322 list_for_each_codec_safe(codec, next, &chip->bus) { 1323 snd_hda_codec_configure(codec); 1324 } 1325 1326 if (!azx_bus(chip)->num_codecs) 1327 return -ENODEV; 1328 return 0; 1329 } 1330 EXPORT_SYMBOL_GPL(azx_codec_configure); 1331 1332 static int stream_direction(struct azx *chip, unsigned char index) 1333 { 1334 if (index >= chip->capture_index_offset && 1335 index < chip->capture_index_offset + chip->capture_streams) 1336 return SNDRV_PCM_STREAM_CAPTURE; 1337 return SNDRV_PCM_STREAM_PLAYBACK; 1338 } 1339 1340 /* initialize SD streams */ 1341 int azx_init_streams(struct azx *chip) 1342 { 1343 int i; 1344 int stream_tags[2] = { 0, 0 }; 1345 1346 /* initialize each stream (aka device) 1347 * assign the starting bdl address to each stream (device) 1348 * and initialize 1349 */ 1350 for (i = 0; i < chip->num_streams; i++) { 1351 struct azx_dev *azx_dev = kzalloc(sizeof(*azx_dev), GFP_KERNEL); 1352 int dir, tag; 1353 1354 if (!azx_dev) 1355 return -ENOMEM; 1356 1357 dir = stream_direction(chip, i); 1358 /* stream tag must be unique throughout 1359 * the stream direction group, 1360 * valid values 1...15 1361 * use separate stream tag if the flag 1362 * AZX_DCAPS_SEPARATE_STREAM_TAG is used 1363 */ 1364 if (chip->driver_caps & AZX_DCAPS_SEPARATE_STREAM_TAG) 1365 tag = ++stream_tags[dir]; 1366 else 1367 tag = i + 1; 1368 snd_hdac_stream_init(azx_bus(chip), azx_stream(azx_dev), 1369 i, dir, tag); 1370 } 1371 1372 return 0; 1373 } 1374 EXPORT_SYMBOL_GPL(azx_init_streams); 1375 1376 void azx_free_streams(struct azx *chip) 1377 { 1378 struct hdac_bus *bus = azx_bus(chip); 1379 struct hdac_stream *s; 1380 1381 while (!list_empty(&bus->stream_list)) { 1382 s = list_first_entry(&bus->stream_list, struct hdac_stream, list); 1383 list_del(&s->list); 1384 kfree(stream_to_azx_dev(s)); 1385 } 1386 } 1387 EXPORT_SYMBOL_GPL(azx_free_streams); 1388