1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * 4 * Implementation of primary alsa driver code base for Intel HD Audio. 5 * 6 * Copyright(c) 2004 Intel Corporation. All rights reserved. 7 * 8 * Copyright (c) 2004 Takashi Iwai <tiwai@suse.de> 9 * PeiSen Hou <pshou@realtek.com.tw> 10 */ 11 12 #include <linux/clocksource.h> 13 #include <linux/delay.h> 14 #include <linux/interrupt.h> 15 #include <linux/kernel.h> 16 #include <linux/module.h> 17 #include <linux/pm_runtime.h> 18 #include <linux/slab.h> 19 20 #ifdef CONFIG_X86 21 /* for art-tsc conversion */ 22 #include <asm/tsc.h> 23 #endif 24 25 #include <sound/core.h> 26 #include <sound/initval.h> 27 #include "hda_controller.h" 28 29 #define CREATE_TRACE_POINTS 30 #include "hda_controller_trace.h" 31 32 /* DSP lock helpers */ 33 #define dsp_lock(dev) snd_hdac_dsp_lock(azx_stream(dev)) 34 #define dsp_unlock(dev) snd_hdac_dsp_unlock(azx_stream(dev)) 35 #define dsp_is_locked(dev) snd_hdac_stream_is_locked(azx_stream(dev)) 36 37 /* assign a stream for the PCM */ 38 static inline struct azx_dev * 39 azx_assign_device(struct azx *chip, struct snd_pcm_substream *substream) 40 { 41 struct hdac_stream *s; 42 43 s = snd_hdac_stream_assign(azx_bus(chip), substream); 44 if (!s) 45 return NULL; 46 return stream_to_azx_dev(s); 47 } 48 49 /* release the assigned stream */ 50 static inline void azx_release_device(struct azx_dev *azx_dev) 51 { 52 snd_hdac_stream_release(azx_stream(azx_dev)); 53 } 54 55 static inline struct hda_pcm_stream * 56 to_hda_pcm_stream(struct snd_pcm_substream *substream) 57 { 58 struct azx_pcm *apcm = snd_pcm_substream_chip(substream); 59 return &apcm->info->stream[substream->stream]; 60 } 61 62 static u64 azx_adjust_codec_delay(struct snd_pcm_substream *substream, 63 u64 nsec) 64 { 65 struct azx_pcm *apcm = snd_pcm_substream_chip(substream); 66 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream); 67 u64 codec_frames, codec_nsecs; 68 69 if (!hinfo->ops.get_delay) 70 return nsec; 71 72 codec_frames = hinfo->ops.get_delay(hinfo, apcm->codec, substream); 73 codec_nsecs = div_u64(codec_frames * 1000000000LL, 74 substream->runtime->rate); 75 76 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) 77 return nsec + codec_nsecs; 78 79 return (nsec > codec_nsecs) ? nsec - codec_nsecs : 0; 80 } 81 82 /* 83 * PCM ops 84 */ 85 86 static int azx_pcm_close(struct snd_pcm_substream *substream) 87 { 88 struct azx_pcm *apcm = snd_pcm_substream_chip(substream); 89 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream); 90 struct azx *chip = apcm->chip; 91 struct azx_dev *azx_dev = get_azx_dev(substream); 92 93 trace_azx_pcm_close(chip, azx_dev); 94 mutex_lock(&chip->open_mutex); 95 azx_release_device(azx_dev); 96 if (hinfo->ops.close) 97 hinfo->ops.close(hinfo, apcm->codec, substream); 98 snd_hda_power_down(apcm->codec); 99 mutex_unlock(&chip->open_mutex); 100 snd_hda_codec_pcm_put(apcm->info); 101 return 0; 102 } 103 104 static int azx_pcm_hw_params(struct snd_pcm_substream *substream, 105 struct snd_pcm_hw_params *hw_params) 106 { 107 struct azx_pcm *apcm = snd_pcm_substream_chip(substream); 108 struct azx *chip = apcm->chip; 109 struct azx_dev *azx_dev = get_azx_dev(substream); 110 int ret = 0; 111 112 trace_azx_pcm_hw_params(chip, azx_dev); 113 dsp_lock(azx_dev); 114 if (dsp_is_locked(azx_dev)) { 115 ret = -EBUSY; 116 goto unlock; 117 } 118 119 azx_dev->core.bufsize = 0; 120 azx_dev->core.period_bytes = 0; 121 azx_dev->core.format_val = 0; 122 123 unlock: 124 dsp_unlock(azx_dev); 125 return ret; 126 } 127 128 static int azx_pcm_hw_free(struct snd_pcm_substream *substream) 129 { 130 struct azx_pcm *apcm = snd_pcm_substream_chip(substream); 131 struct azx_dev *azx_dev = get_azx_dev(substream); 132 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream); 133 134 /* reset BDL address */ 135 dsp_lock(azx_dev); 136 if (!dsp_is_locked(azx_dev)) 137 snd_hdac_stream_cleanup(azx_stream(azx_dev)); 138 139 snd_hda_codec_cleanup(apcm->codec, hinfo, substream); 140 141 azx_stream(azx_dev)->prepared = 0; 142 dsp_unlock(azx_dev); 143 return 0; 144 } 145 146 static int azx_pcm_prepare(struct snd_pcm_substream *substream) 147 { 148 struct azx_pcm *apcm = snd_pcm_substream_chip(substream); 149 struct azx *chip = apcm->chip; 150 struct azx_dev *azx_dev = get_azx_dev(substream); 151 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream); 152 struct snd_pcm_runtime *runtime = substream->runtime; 153 unsigned int format_val, stream_tag; 154 int err; 155 struct hda_spdif_out *spdif = 156 snd_hda_spdif_out_of_nid(apcm->codec, hinfo->nid); 157 unsigned short ctls = spdif ? spdif->ctls : 0; 158 159 trace_azx_pcm_prepare(chip, azx_dev); 160 dsp_lock(azx_dev); 161 if (dsp_is_locked(azx_dev)) { 162 err = -EBUSY; 163 goto unlock; 164 } 165 166 snd_hdac_stream_reset(azx_stream(azx_dev)); 167 format_val = snd_hdac_calc_stream_format(runtime->rate, 168 runtime->channels, 169 runtime->format, 170 hinfo->maxbps, 171 ctls); 172 if (!format_val) { 173 dev_err(chip->card->dev, 174 "invalid format_val, rate=%d, ch=%d, format=%d\n", 175 runtime->rate, runtime->channels, runtime->format); 176 err = -EINVAL; 177 goto unlock; 178 } 179 180 err = snd_hdac_stream_set_params(azx_stream(azx_dev), format_val); 181 if (err < 0) 182 goto unlock; 183 184 snd_hdac_stream_setup(azx_stream(azx_dev)); 185 186 stream_tag = azx_dev->core.stream_tag; 187 /* CA-IBG chips need the playback stream starting from 1 */ 188 if ((chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) && 189 stream_tag > chip->capture_streams) 190 stream_tag -= chip->capture_streams; 191 err = snd_hda_codec_prepare(apcm->codec, hinfo, stream_tag, 192 azx_dev->core.format_val, substream); 193 194 unlock: 195 if (!err) 196 azx_stream(azx_dev)->prepared = 1; 197 dsp_unlock(azx_dev); 198 return err; 199 } 200 201 static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd) 202 { 203 struct azx_pcm *apcm = snd_pcm_substream_chip(substream); 204 struct azx *chip = apcm->chip; 205 struct hdac_bus *bus = azx_bus(chip); 206 struct azx_dev *azx_dev; 207 struct snd_pcm_substream *s; 208 struct hdac_stream *hstr; 209 bool start; 210 int sbits = 0; 211 int sync_reg; 212 213 azx_dev = get_azx_dev(substream); 214 trace_azx_pcm_trigger(chip, azx_dev, cmd); 215 216 hstr = azx_stream(azx_dev); 217 if (chip->driver_caps & AZX_DCAPS_OLD_SSYNC) 218 sync_reg = AZX_REG_OLD_SSYNC; 219 else 220 sync_reg = AZX_REG_SSYNC; 221 222 if (dsp_is_locked(azx_dev) || !hstr->prepared) 223 return -EPIPE; 224 225 switch (cmd) { 226 case SNDRV_PCM_TRIGGER_START: 227 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: 228 case SNDRV_PCM_TRIGGER_RESUME: 229 start = true; 230 break; 231 case SNDRV_PCM_TRIGGER_PAUSE_PUSH: 232 case SNDRV_PCM_TRIGGER_SUSPEND: 233 case SNDRV_PCM_TRIGGER_STOP: 234 start = false; 235 break; 236 default: 237 return -EINVAL; 238 } 239 240 snd_pcm_group_for_each_entry(s, substream) { 241 if (s->pcm->card != substream->pcm->card) 242 continue; 243 azx_dev = get_azx_dev(s); 244 sbits |= 1 << azx_dev->core.index; 245 snd_pcm_trigger_done(s, substream); 246 } 247 248 spin_lock(&bus->reg_lock); 249 250 /* first, set SYNC bits of corresponding streams */ 251 snd_hdac_stream_sync_trigger(hstr, true, sbits, sync_reg); 252 253 snd_pcm_group_for_each_entry(s, substream) { 254 if (s->pcm->card != substream->pcm->card) 255 continue; 256 azx_dev = get_azx_dev(s); 257 if (start) { 258 azx_dev->insufficient = 1; 259 snd_hdac_stream_start(azx_stream(azx_dev), true); 260 } else { 261 snd_hdac_stream_stop(azx_stream(azx_dev)); 262 } 263 } 264 spin_unlock(&bus->reg_lock); 265 266 snd_hdac_stream_sync(hstr, start, sbits); 267 268 spin_lock(&bus->reg_lock); 269 /* reset SYNC bits */ 270 snd_hdac_stream_sync_trigger(hstr, false, sbits, sync_reg); 271 if (start) 272 snd_hdac_stream_timecounter_init(hstr, sbits); 273 spin_unlock(&bus->reg_lock); 274 return 0; 275 } 276 277 unsigned int azx_get_pos_lpib(struct azx *chip, struct azx_dev *azx_dev) 278 { 279 return snd_hdac_stream_get_pos_lpib(azx_stream(azx_dev)); 280 } 281 EXPORT_SYMBOL_GPL(azx_get_pos_lpib); 282 283 unsigned int azx_get_pos_posbuf(struct azx *chip, struct azx_dev *azx_dev) 284 { 285 return snd_hdac_stream_get_pos_posbuf(azx_stream(azx_dev)); 286 } 287 EXPORT_SYMBOL_GPL(azx_get_pos_posbuf); 288 289 unsigned int azx_get_position(struct azx *chip, 290 struct azx_dev *azx_dev) 291 { 292 struct snd_pcm_substream *substream = azx_dev->core.substream; 293 unsigned int pos; 294 int stream = substream->stream; 295 int delay = 0; 296 297 if (chip->get_position[stream]) 298 pos = chip->get_position[stream](chip, azx_dev); 299 else /* use the position buffer as default */ 300 pos = azx_get_pos_posbuf(chip, azx_dev); 301 302 if (pos >= azx_dev->core.bufsize) 303 pos = 0; 304 305 if (substream->runtime) { 306 struct azx_pcm *apcm = snd_pcm_substream_chip(substream); 307 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream); 308 309 if (chip->get_delay[stream]) 310 delay += chip->get_delay[stream](chip, azx_dev, pos); 311 if (hinfo->ops.get_delay) 312 delay += hinfo->ops.get_delay(hinfo, apcm->codec, 313 substream); 314 substream->runtime->delay = delay; 315 } 316 317 trace_azx_get_position(chip, azx_dev, pos, delay); 318 return pos; 319 } 320 EXPORT_SYMBOL_GPL(azx_get_position); 321 322 static snd_pcm_uframes_t azx_pcm_pointer(struct snd_pcm_substream *substream) 323 { 324 struct azx_pcm *apcm = snd_pcm_substream_chip(substream); 325 struct azx *chip = apcm->chip; 326 struct azx_dev *azx_dev = get_azx_dev(substream); 327 return bytes_to_frames(substream->runtime, 328 azx_get_position(chip, azx_dev)); 329 } 330 331 /* 332 * azx_scale64: Scale base by mult/div while not overflowing sanely 333 * 334 * Derived from scale64_check_overflow in kernel/time/timekeeping.c 335 * 336 * The tmestamps for a 48Khz stream can overflow after (2^64/10^9)/48K which 337 * is about 384307 ie ~4.5 days. 338 * 339 * This scales the calculation so that overflow will happen but after 2^64 / 340 * 48000 secs, which is pretty large! 341 * 342 * In caln below: 343 * base may overflow, but since there isn’t any additional division 344 * performed on base it’s OK 345 * rem can’t overflow because both are 32-bit values 346 */ 347 348 #ifdef CONFIG_X86 349 static u64 azx_scale64(u64 base, u32 num, u32 den) 350 { 351 u64 rem; 352 353 rem = do_div(base, den); 354 355 base *= num; 356 rem *= num; 357 358 do_div(rem, den); 359 360 return base + rem; 361 } 362 363 static int azx_get_sync_time(ktime_t *device, 364 struct system_counterval_t *system, void *ctx) 365 { 366 struct snd_pcm_substream *substream = ctx; 367 struct azx_dev *azx_dev = get_azx_dev(substream); 368 struct azx_pcm *apcm = snd_pcm_substream_chip(substream); 369 struct azx *chip = apcm->chip; 370 struct snd_pcm_runtime *runtime; 371 u64 ll_counter, ll_counter_l, ll_counter_h; 372 u64 tsc_counter, tsc_counter_l, tsc_counter_h; 373 u32 wallclk_ctr, wallclk_cycles; 374 bool direction; 375 u32 dma_select; 376 u32 timeout; 377 u32 retry_count = 0; 378 379 runtime = substream->runtime; 380 381 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) 382 direction = 1; 383 else 384 direction = 0; 385 386 /* 0th stream tag is not used, so DMA ch 0 is for 1st stream tag */ 387 do { 388 timeout = 100; 389 dma_select = (direction << GTSCC_CDMAS_DMA_DIR_SHIFT) | 390 (azx_dev->core.stream_tag - 1); 391 snd_hdac_chip_writel(azx_bus(chip), GTSCC, dma_select); 392 393 /* Enable the capture */ 394 snd_hdac_chip_updatel(azx_bus(chip), GTSCC, 0, GTSCC_TSCCI_MASK); 395 396 while (timeout) { 397 if (snd_hdac_chip_readl(azx_bus(chip), GTSCC) & 398 GTSCC_TSCCD_MASK) 399 break; 400 401 timeout--; 402 } 403 404 if (!timeout) { 405 dev_err(chip->card->dev, "GTSCC capture Timedout!\n"); 406 return -EIO; 407 } 408 409 /* Read wall clock counter */ 410 wallclk_ctr = snd_hdac_chip_readl(azx_bus(chip), WALFCC); 411 412 /* Read TSC counter */ 413 tsc_counter_l = snd_hdac_chip_readl(azx_bus(chip), TSCCL); 414 tsc_counter_h = snd_hdac_chip_readl(azx_bus(chip), TSCCU); 415 416 /* Read Link counter */ 417 ll_counter_l = snd_hdac_chip_readl(azx_bus(chip), LLPCL); 418 ll_counter_h = snd_hdac_chip_readl(azx_bus(chip), LLPCU); 419 420 /* Ack: registers read done */ 421 snd_hdac_chip_writel(azx_bus(chip), GTSCC, GTSCC_TSCCD_SHIFT); 422 423 tsc_counter = (tsc_counter_h << TSCCU_CCU_SHIFT) | 424 tsc_counter_l; 425 426 ll_counter = (ll_counter_h << LLPC_CCU_SHIFT) | ll_counter_l; 427 wallclk_cycles = wallclk_ctr & WALFCC_CIF_MASK; 428 429 /* 430 * An error occurs near frame "rollover". The clocks in 431 * frame value indicates whether this error may have 432 * occurred. Here we use the value of 10 i.e., 433 * HDA_MAX_CYCLE_OFFSET 434 */ 435 if (wallclk_cycles < HDA_MAX_CYCLE_VALUE - HDA_MAX_CYCLE_OFFSET 436 && wallclk_cycles > HDA_MAX_CYCLE_OFFSET) 437 break; 438 439 /* 440 * Sleep before we read again, else we may again get 441 * value near to MAX_CYCLE. Try to sleep for different 442 * amount of time so we dont hit the same number again 443 */ 444 udelay(retry_count++); 445 446 } while (retry_count != HDA_MAX_CYCLE_READ_RETRY); 447 448 if (retry_count == HDA_MAX_CYCLE_READ_RETRY) { 449 dev_err_ratelimited(chip->card->dev, 450 "Error in WALFCC cycle count\n"); 451 return -EIO; 452 } 453 454 *device = ns_to_ktime(azx_scale64(ll_counter, 455 NSEC_PER_SEC, runtime->rate)); 456 *device = ktime_add_ns(*device, (wallclk_cycles * NSEC_PER_SEC) / 457 ((HDA_MAX_CYCLE_VALUE + 1) * runtime->rate)); 458 459 *system = convert_art_to_tsc(tsc_counter); 460 461 return 0; 462 } 463 464 #else 465 static int azx_get_sync_time(ktime_t *device, 466 struct system_counterval_t *system, void *ctx) 467 { 468 return -ENXIO; 469 } 470 #endif 471 472 static int azx_get_crosststamp(struct snd_pcm_substream *substream, 473 struct system_device_crosststamp *xtstamp) 474 { 475 return get_device_system_crosststamp(azx_get_sync_time, 476 substream, NULL, xtstamp); 477 } 478 479 static inline bool is_link_time_supported(struct snd_pcm_runtime *runtime, 480 struct snd_pcm_audio_tstamp_config *ts) 481 { 482 if (runtime->hw.info & SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME) 483 if (ts->type_requested == SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK_SYNCHRONIZED) 484 return true; 485 486 return false; 487 } 488 489 static int azx_get_time_info(struct snd_pcm_substream *substream, 490 struct timespec64 *system_ts, struct timespec64 *audio_ts, 491 struct snd_pcm_audio_tstamp_config *audio_tstamp_config, 492 struct snd_pcm_audio_tstamp_report *audio_tstamp_report) 493 { 494 struct azx_dev *azx_dev = get_azx_dev(substream); 495 struct snd_pcm_runtime *runtime = substream->runtime; 496 struct system_device_crosststamp xtstamp; 497 int ret; 498 u64 nsec; 499 500 if ((substream->runtime->hw.info & SNDRV_PCM_INFO_HAS_LINK_ATIME) && 501 (audio_tstamp_config->type_requested == SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK)) { 502 503 snd_pcm_gettime(substream->runtime, system_ts); 504 505 nsec = timecounter_read(&azx_dev->core.tc); 506 nsec = div_u64(nsec, 3); /* can be optimized */ 507 if (audio_tstamp_config->report_delay) 508 nsec = azx_adjust_codec_delay(substream, nsec); 509 510 *audio_ts = ns_to_timespec64(nsec); 511 512 audio_tstamp_report->actual_type = SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK; 513 audio_tstamp_report->accuracy_report = 1; /* rest of structure is valid */ 514 audio_tstamp_report->accuracy = 42; /* 24 MHz WallClock == 42ns resolution */ 515 516 } else if (is_link_time_supported(runtime, audio_tstamp_config)) { 517 518 ret = azx_get_crosststamp(substream, &xtstamp); 519 if (ret) 520 return ret; 521 522 switch (runtime->tstamp_type) { 523 case SNDRV_PCM_TSTAMP_TYPE_MONOTONIC: 524 return -EINVAL; 525 526 case SNDRV_PCM_TSTAMP_TYPE_MONOTONIC_RAW: 527 *system_ts = ktime_to_timespec64(xtstamp.sys_monoraw); 528 break; 529 530 default: 531 *system_ts = ktime_to_timespec64(xtstamp.sys_realtime); 532 break; 533 534 } 535 536 *audio_ts = ktime_to_timespec64(xtstamp.device); 537 538 audio_tstamp_report->actual_type = 539 SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK_SYNCHRONIZED; 540 audio_tstamp_report->accuracy_report = 1; 541 /* 24 MHz WallClock == 42ns resolution */ 542 audio_tstamp_report->accuracy = 42; 543 544 } else { 545 audio_tstamp_report->actual_type = SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT; 546 } 547 548 return 0; 549 } 550 551 static const struct snd_pcm_hardware azx_pcm_hw = { 552 .info = (SNDRV_PCM_INFO_MMAP | 553 SNDRV_PCM_INFO_INTERLEAVED | 554 SNDRV_PCM_INFO_BLOCK_TRANSFER | 555 SNDRV_PCM_INFO_MMAP_VALID | 556 /* No full-resume yet implemented */ 557 /* SNDRV_PCM_INFO_RESUME |*/ 558 SNDRV_PCM_INFO_PAUSE | 559 SNDRV_PCM_INFO_SYNC_START | 560 SNDRV_PCM_INFO_HAS_WALL_CLOCK | /* legacy */ 561 SNDRV_PCM_INFO_HAS_LINK_ATIME | 562 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP), 563 .formats = SNDRV_PCM_FMTBIT_S16_LE, 564 .rates = SNDRV_PCM_RATE_48000, 565 .rate_min = 48000, 566 .rate_max = 48000, 567 .channels_min = 2, 568 .channels_max = 2, 569 .buffer_bytes_max = AZX_MAX_BUF_SIZE, 570 .period_bytes_min = 128, 571 .period_bytes_max = AZX_MAX_BUF_SIZE / 2, 572 .periods_min = 2, 573 .periods_max = AZX_MAX_FRAG, 574 .fifo_size = 0, 575 }; 576 577 static int azx_pcm_open(struct snd_pcm_substream *substream) 578 { 579 struct azx_pcm *apcm = snd_pcm_substream_chip(substream); 580 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream); 581 struct azx *chip = apcm->chip; 582 struct azx_dev *azx_dev; 583 struct snd_pcm_runtime *runtime = substream->runtime; 584 int err; 585 int buff_step; 586 587 snd_hda_codec_pcm_get(apcm->info); 588 mutex_lock(&chip->open_mutex); 589 azx_dev = azx_assign_device(chip, substream); 590 trace_azx_pcm_open(chip, azx_dev); 591 if (azx_dev == NULL) { 592 err = -EBUSY; 593 goto unlock; 594 } 595 runtime->private_data = azx_dev; 596 597 runtime->hw = azx_pcm_hw; 598 if (chip->gts_present) 599 runtime->hw.info |= SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME; 600 runtime->hw.channels_min = hinfo->channels_min; 601 runtime->hw.channels_max = hinfo->channels_max; 602 runtime->hw.formats = hinfo->formats; 603 runtime->hw.rates = hinfo->rates; 604 snd_pcm_limit_hw_rates(runtime); 605 snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); 606 607 /* avoid wrap-around with wall-clock */ 608 snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_TIME, 609 20, 610 178000000); 611 612 if (chip->align_buffer_size) 613 /* constrain buffer sizes to be multiple of 128 614 bytes. This is more efficient in terms of memory 615 access but isn't required by the HDA spec and 616 prevents users from specifying exact period/buffer 617 sizes. For example for 44.1kHz, a period size set 618 to 20ms will be rounded to 19.59ms. */ 619 buff_step = 128; 620 else 621 /* Don't enforce steps on buffer sizes, still need to 622 be multiple of 4 bytes (HDA spec). Tested on Intel 623 HDA controllers, may not work on all devices where 624 option needs to be disabled */ 625 buff_step = 4; 626 627 snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 628 buff_step); 629 snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 630 buff_step); 631 snd_hda_power_up(apcm->codec); 632 if (hinfo->ops.open) 633 err = hinfo->ops.open(hinfo, apcm->codec, substream); 634 else 635 err = -ENODEV; 636 if (err < 0) { 637 azx_release_device(azx_dev); 638 goto powerdown; 639 } 640 snd_pcm_limit_hw_rates(runtime); 641 /* sanity check */ 642 if (snd_BUG_ON(!runtime->hw.channels_min) || 643 snd_BUG_ON(!runtime->hw.channels_max) || 644 snd_BUG_ON(!runtime->hw.formats) || 645 snd_BUG_ON(!runtime->hw.rates)) { 646 azx_release_device(azx_dev); 647 if (hinfo->ops.close) 648 hinfo->ops.close(hinfo, apcm->codec, substream); 649 err = -EINVAL; 650 goto powerdown; 651 } 652 653 /* disable LINK_ATIME timestamps for capture streams 654 until we figure out how to handle digital inputs */ 655 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) { 656 runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_WALL_CLOCK; /* legacy */ 657 runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_LINK_ATIME; 658 } 659 660 snd_pcm_set_sync(substream); 661 mutex_unlock(&chip->open_mutex); 662 return 0; 663 664 powerdown: 665 snd_hda_power_down(apcm->codec); 666 unlock: 667 mutex_unlock(&chip->open_mutex); 668 snd_hda_codec_pcm_put(apcm->info); 669 return err; 670 } 671 672 static const struct snd_pcm_ops azx_pcm_ops = { 673 .open = azx_pcm_open, 674 .close = azx_pcm_close, 675 .hw_params = azx_pcm_hw_params, 676 .hw_free = azx_pcm_hw_free, 677 .prepare = azx_pcm_prepare, 678 .trigger = azx_pcm_trigger, 679 .pointer = azx_pcm_pointer, 680 .get_time_info = azx_get_time_info, 681 }; 682 683 static void azx_pcm_free(struct snd_pcm *pcm) 684 { 685 struct azx_pcm *apcm = pcm->private_data; 686 if (apcm) { 687 list_del(&apcm->list); 688 apcm->info->pcm = NULL; 689 kfree(apcm); 690 } 691 } 692 693 #define MAX_PREALLOC_SIZE (32 * 1024 * 1024) 694 695 int snd_hda_attach_pcm_stream(struct hda_bus *_bus, struct hda_codec *codec, 696 struct hda_pcm *cpcm) 697 { 698 struct hdac_bus *bus = &_bus->core; 699 struct azx *chip = bus_to_azx(bus); 700 struct snd_pcm *pcm; 701 struct azx_pcm *apcm; 702 int pcm_dev = cpcm->device; 703 unsigned int size; 704 int s, err; 705 int type = SNDRV_DMA_TYPE_DEV_SG; 706 707 list_for_each_entry(apcm, &chip->pcm_list, list) { 708 if (apcm->pcm->device == pcm_dev) { 709 dev_err(chip->card->dev, "PCM %d already exists\n", 710 pcm_dev); 711 return -EBUSY; 712 } 713 } 714 err = snd_pcm_new(chip->card, cpcm->name, pcm_dev, 715 cpcm->stream[SNDRV_PCM_STREAM_PLAYBACK].substreams, 716 cpcm->stream[SNDRV_PCM_STREAM_CAPTURE].substreams, 717 &pcm); 718 if (err < 0) 719 return err; 720 strscpy(pcm->name, cpcm->name, sizeof(pcm->name)); 721 apcm = kzalloc(sizeof(*apcm), GFP_KERNEL); 722 if (apcm == NULL) { 723 snd_device_free(chip->card, pcm); 724 return -ENOMEM; 725 } 726 apcm->chip = chip; 727 apcm->pcm = pcm; 728 apcm->codec = codec; 729 apcm->info = cpcm; 730 pcm->private_data = apcm; 731 pcm->private_free = azx_pcm_free; 732 if (cpcm->pcm_type == HDA_PCM_TYPE_MODEM) 733 pcm->dev_class = SNDRV_PCM_CLASS_MODEM; 734 list_add_tail(&apcm->list, &chip->pcm_list); 735 cpcm->pcm = pcm; 736 for (s = 0; s < 2; s++) { 737 if (cpcm->stream[s].substreams) 738 snd_pcm_set_ops(pcm, s, &azx_pcm_ops); 739 } 740 /* buffer pre-allocation */ 741 size = CONFIG_SND_HDA_PREALLOC_SIZE * 1024; 742 if (size > MAX_PREALLOC_SIZE) 743 size = MAX_PREALLOC_SIZE; 744 if (chip->uc_buffer) 745 type = SNDRV_DMA_TYPE_DEV_WC_SG; 746 snd_pcm_set_managed_buffer_all(pcm, type, chip->card->dev, 747 size, MAX_PREALLOC_SIZE); 748 return 0; 749 } 750 751 static unsigned int azx_command_addr(u32 cmd) 752 { 753 unsigned int addr = cmd >> 28; 754 755 if (addr >= AZX_MAX_CODECS) { 756 snd_BUG(); 757 addr = 0; 758 } 759 760 return addr; 761 } 762 763 /* receive a response */ 764 static int azx_rirb_get_response(struct hdac_bus *bus, unsigned int addr, 765 unsigned int *res) 766 { 767 struct azx *chip = bus_to_azx(bus); 768 struct hda_bus *hbus = &chip->bus; 769 int err; 770 771 again: 772 err = snd_hdac_bus_get_response(bus, addr, res); 773 if (!err) 774 return 0; 775 776 if (hbus->no_response_fallback) 777 return -EIO; 778 779 if (!bus->polling_mode) { 780 dev_warn(chip->card->dev, 781 "azx_get_response timeout, switching to polling mode: last cmd=0x%08x\n", 782 bus->last_cmd[addr]); 783 bus->polling_mode = 1; 784 goto again; 785 } 786 787 if (chip->msi) { 788 dev_warn(chip->card->dev, 789 "No response from codec, disabling MSI: last cmd=0x%08x\n", 790 bus->last_cmd[addr]); 791 if (chip->ops->disable_msi_reset_irq && 792 chip->ops->disable_msi_reset_irq(chip) < 0) 793 return -EIO; 794 goto again; 795 } 796 797 if (chip->probing) { 798 /* If this critical timeout happens during the codec probing 799 * phase, this is likely an access to a non-existing codec 800 * slot. Better to return an error and reset the system. 801 */ 802 return -EIO; 803 } 804 805 /* no fallback mechanism? */ 806 if (!chip->fallback_to_single_cmd) 807 return -EIO; 808 809 /* a fatal communication error; need either to reset or to fallback 810 * to the single_cmd mode 811 */ 812 if (hbus->allow_bus_reset && !hbus->response_reset && !hbus->in_reset) { 813 hbus->response_reset = 1; 814 dev_err(chip->card->dev, 815 "No response from codec, resetting bus: last cmd=0x%08x\n", 816 bus->last_cmd[addr]); 817 return -EAGAIN; /* give a chance to retry */ 818 } 819 820 dev_err(chip->card->dev, 821 "azx_get_response timeout, switching to single_cmd mode: last cmd=0x%08x\n", 822 bus->last_cmd[addr]); 823 chip->single_cmd = 1; 824 hbus->response_reset = 0; 825 snd_hdac_bus_stop_cmd_io(bus); 826 return -EIO; 827 } 828 829 /* 830 * Use the single immediate command instead of CORB/RIRB for simplicity 831 * 832 * Note: according to Intel, this is not preferred use. The command was 833 * intended for the BIOS only, and may get confused with unsolicited 834 * responses. So, we shouldn't use it for normal operation from the 835 * driver. 836 * I left the codes, however, for debugging/testing purposes. 837 */ 838 839 /* receive a response */ 840 static int azx_single_wait_for_response(struct azx *chip, unsigned int addr) 841 { 842 int timeout = 50; 843 844 while (timeout--) { 845 /* check IRV busy bit */ 846 if (azx_readw(chip, IRS) & AZX_IRS_VALID) { 847 /* reuse rirb.res as the response return value */ 848 azx_bus(chip)->rirb.res[addr] = azx_readl(chip, IR); 849 return 0; 850 } 851 udelay(1); 852 } 853 if (printk_ratelimit()) 854 dev_dbg(chip->card->dev, "get_response timeout: IRS=0x%x\n", 855 azx_readw(chip, IRS)); 856 azx_bus(chip)->rirb.res[addr] = -1; 857 return -EIO; 858 } 859 860 /* send a command */ 861 static int azx_single_send_cmd(struct hdac_bus *bus, u32 val) 862 { 863 struct azx *chip = bus_to_azx(bus); 864 unsigned int addr = azx_command_addr(val); 865 int timeout = 50; 866 867 bus->last_cmd[azx_command_addr(val)] = val; 868 while (timeout--) { 869 /* check ICB busy bit */ 870 if (!((azx_readw(chip, IRS) & AZX_IRS_BUSY))) { 871 /* Clear IRV valid bit */ 872 azx_writew(chip, IRS, azx_readw(chip, IRS) | 873 AZX_IRS_VALID); 874 azx_writel(chip, IC, val); 875 azx_writew(chip, IRS, azx_readw(chip, IRS) | 876 AZX_IRS_BUSY); 877 return azx_single_wait_for_response(chip, addr); 878 } 879 udelay(1); 880 } 881 if (printk_ratelimit()) 882 dev_dbg(chip->card->dev, 883 "send_cmd timeout: IRS=0x%x, val=0x%x\n", 884 azx_readw(chip, IRS), val); 885 return -EIO; 886 } 887 888 /* receive a response */ 889 static int azx_single_get_response(struct hdac_bus *bus, unsigned int addr, 890 unsigned int *res) 891 { 892 if (res) 893 *res = bus->rirb.res[addr]; 894 return 0; 895 } 896 897 /* 898 * The below are the main callbacks from hda_codec. 899 * 900 * They are just the skeleton to call sub-callbacks according to the 901 * current setting of chip->single_cmd. 902 */ 903 904 /* send a command */ 905 static int azx_send_cmd(struct hdac_bus *bus, unsigned int val) 906 { 907 struct azx *chip = bus_to_azx(bus); 908 909 if (chip->disabled) 910 return 0; 911 if (chip->single_cmd) 912 return azx_single_send_cmd(bus, val); 913 else 914 return snd_hdac_bus_send_cmd(bus, val); 915 } 916 917 /* get a response */ 918 static int azx_get_response(struct hdac_bus *bus, unsigned int addr, 919 unsigned int *res) 920 { 921 struct azx *chip = bus_to_azx(bus); 922 923 if (chip->disabled) 924 return 0; 925 if (chip->single_cmd) 926 return azx_single_get_response(bus, addr, res); 927 else 928 return azx_rirb_get_response(bus, addr, res); 929 } 930 931 static const struct hdac_bus_ops bus_core_ops = { 932 .command = azx_send_cmd, 933 .get_response = azx_get_response, 934 }; 935 936 #ifdef CONFIG_SND_HDA_DSP_LOADER 937 /* 938 * DSP loading code (e.g. for CA0132) 939 */ 940 941 /* use the first stream for loading DSP */ 942 static struct azx_dev * 943 azx_get_dsp_loader_dev(struct azx *chip) 944 { 945 struct hdac_bus *bus = azx_bus(chip); 946 struct hdac_stream *s; 947 948 list_for_each_entry(s, &bus->stream_list, list) 949 if (s->index == chip->playback_index_offset) 950 return stream_to_azx_dev(s); 951 952 return NULL; 953 } 954 955 int snd_hda_codec_load_dsp_prepare(struct hda_codec *codec, unsigned int format, 956 unsigned int byte_size, 957 struct snd_dma_buffer *bufp) 958 { 959 struct hdac_bus *bus = &codec->bus->core; 960 struct azx *chip = bus_to_azx(bus); 961 struct azx_dev *azx_dev; 962 struct hdac_stream *hstr; 963 bool saved = false; 964 int err; 965 966 azx_dev = azx_get_dsp_loader_dev(chip); 967 hstr = azx_stream(azx_dev); 968 spin_lock_irq(&bus->reg_lock); 969 if (hstr->opened) { 970 chip->saved_azx_dev = *azx_dev; 971 saved = true; 972 } 973 spin_unlock_irq(&bus->reg_lock); 974 975 err = snd_hdac_dsp_prepare(hstr, format, byte_size, bufp); 976 if (err < 0) { 977 spin_lock_irq(&bus->reg_lock); 978 if (saved) 979 *azx_dev = chip->saved_azx_dev; 980 spin_unlock_irq(&bus->reg_lock); 981 return err; 982 } 983 984 hstr->prepared = 0; 985 return err; 986 } 987 EXPORT_SYMBOL_GPL(snd_hda_codec_load_dsp_prepare); 988 989 void snd_hda_codec_load_dsp_trigger(struct hda_codec *codec, bool start) 990 { 991 struct hdac_bus *bus = &codec->bus->core; 992 struct azx *chip = bus_to_azx(bus); 993 struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip); 994 995 snd_hdac_dsp_trigger(azx_stream(azx_dev), start); 996 } 997 EXPORT_SYMBOL_GPL(snd_hda_codec_load_dsp_trigger); 998 999 void snd_hda_codec_load_dsp_cleanup(struct hda_codec *codec, 1000 struct snd_dma_buffer *dmab) 1001 { 1002 struct hdac_bus *bus = &codec->bus->core; 1003 struct azx *chip = bus_to_azx(bus); 1004 struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip); 1005 struct hdac_stream *hstr = azx_stream(azx_dev); 1006 1007 if (!dmab->area || !hstr->locked) 1008 return; 1009 1010 snd_hdac_dsp_cleanup(hstr, dmab); 1011 spin_lock_irq(&bus->reg_lock); 1012 if (hstr->opened) 1013 *azx_dev = chip->saved_azx_dev; 1014 hstr->locked = false; 1015 spin_unlock_irq(&bus->reg_lock); 1016 } 1017 EXPORT_SYMBOL_GPL(snd_hda_codec_load_dsp_cleanup); 1018 #endif /* CONFIG_SND_HDA_DSP_LOADER */ 1019 1020 /* 1021 * reset and start the controller registers 1022 */ 1023 void azx_init_chip(struct azx *chip, bool full_reset) 1024 { 1025 if (snd_hdac_bus_init_chip(azx_bus(chip), full_reset)) { 1026 /* correct RINTCNT for CXT */ 1027 if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) 1028 azx_writew(chip, RINTCNT, 0xc0); 1029 } 1030 } 1031 EXPORT_SYMBOL_GPL(azx_init_chip); 1032 1033 void azx_stop_all_streams(struct azx *chip) 1034 { 1035 struct hdac_bus *bus = azx_bus(chip); 1036 struct hdac_stream *s; 1037 1038 list_for_each_entry(s, &bus->stream_list, list) 1039 snd_hdac_stream_stop(s); 1040 } 1041 EXPORT_SYMBOL_GPL(azx_stop_all_streams); 1042 1043 void azx_stop_chip(struct azx *chip) 1044 { 1045 snd_hdac_bus_stop_chip(azx_bus(chip)); 1046 } 1047 EXPORT_SYMBOL_GPL(azx_stop_chip); 1048 1049 /* 1050 * interrupt handler 1051 */ 1052 static void stream_update(struct hdac_bus *bus, struct hdac_stream *s) 1053 { 1054 struct azx *chip = bus_to_azx(bus); 1055 struct azx_dev *azx_dev = stream_to_azx_dev(s); 1056 1057 /* check whether this IRQ is really acceptable */ 1058 if (!chip->ops->position_check || 1059 chip->ops->position_check(chip, azx_dev)) { 1060 spin_unlock(&bus->reg_lock); 1061 snd_pcm_period_elapsed(azx_stream(azx_dev)->substream); 1062 spin_lock(&bus->reg_lock); 1063 } 1064 } 1065 1066 irqreturn_t azx_interrupt(int irq, void *dev_id) 1067 { 1068 struct azx *chip = dev_id; 1069 struct hdac_bus *bus = azx_bus(chip); 1070 u32 status; 1071 bool active, handled = false; 1072 int repeat = 0; /* count for avoiding endless loop */ 1073 1074 #ifdef CONFIG_PM 1075 if (azx_has_pm_runtime(chip)) 1076 if (!pm_runtime_active(chip->card->dev)) 1077 return IRQ_NONE; 1078 #endif 1079 1080 spin_lock(&bus->reg_lock); 1081 1082 if (chip->disabled) 1083 goto unlock; 1084 1085 do { 1086 status = azx_readl(chip, INTSTS); 1087 if (status == 0 || status == 0xffffffff) 1088 break; 1089 1090 handled = true; 1091 active = false; 1092 if (snd_hdac_bus_handle_stream_irq(bus, status, stream_update)) 1093 active = true; 1094 1095 status = azx_readb(chip, RIRBSTS); 1096 if (status & RIRB_INT_MASK) { 1097 /* 1098 * Clearing the interrupt status here ensures that no 1099 * interrupt gets masked after the RIRB wp is read in 1100 * snd_hdac_bus_update_rirb. This avoids a possible 1101 * race condition where codec response in RIRB may 1102 * remain unserviced by IRQ, eventually falling back 1103 * to polling mode in azx_rirb_get_response. 1104 */ 1105 azx_writeb(chip, RIRBSTS, RIRB_INT_MASK); 1106 active = true; 1107 if (status & RIRB_INT_RESPONSE) { 1108 if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) 1109 udelay(80); 1110 snd_hdac_bus_update_rirb(bus); 1111 } 1112 } 1113 } while (active && ++repeat < 10); 1114 1115 unlock: 1116 spin_unlock(&bus->reg_lock); 1117 1118 return IRQ_RETVAL(handled); 1119 } 1120 EXPORT_SYMBOL_GPL(azx_interrupt); 1121 1122 /* 1123 * Codec initerface 1124 */ 1125 1126 /* 1127 * Probe the given codec address 1128 */ 1129 static int probe_codec(struct azx *chip, int addr) 1130 { 1131 unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) | 1132 (AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID; 1133 struct hdac_bus *bus = azx_bus(chip); 1134 int err; 1135 unsigned int res = -1; 1136 1137 mutex_lock(&bus->cmd_mutex); 1138 chip->probing = 1; 1139 azx_send_cmd(bus, cmd); 1140 err = azx_get_response(bus, addr, &res); 1141 chip->probing = 0; 1142 mutex_unlock(&bus->cmd_mutex); 1143 if (err < 0 || res == -1) 1144 return -EIO; 1145 dev_dbg(chip->card->dev, "codec #%d probed OK\n", addr); 1146 return 0; 1147 } 1148 1149 void snd_hda_bus_reset(struct hda_bus *bus) 1150 { 1151 struct azx *chip = bus_to_azx(&bus->core); 1152 1153 bus->in_reset = 1; 1154 azx_stop_chip(chip); 1155 azx_init_chip(chip, true); 1156 if (bus->core.chip_init) 1157 snd_hda_bus_reset_codecs(bus); 1158 bus->in_reset = 0; 1159 } 1160 1161 /* HD-audio bus initialization */ 1162 int azx_bus_init(struct azx *chip, const char *model) 1163 { 1164 struct hda_bus *bus = &chip->bus; 1165 int err; 1166 1167 err = snd_hdac_bus_init(&bus->core, chip->card->dev, &bus_core_ops); 1168 if (err < 0) 1169 return err; 1170 1171 bus->card = chip->card; 1172 mutex_init(&bus->prepare_mutex); 1173 bus->pci = chip->pci; 1174 bus->modelname = model; 1175 bus->mixer_assigned = -1; 1176 bus->core.snoop = azx_snoop(chip); 1177 if (chip->get_position[0] != azx_get_pos_lpib || 1178 chip->get_position[1] != azx_get_pos_lpib) 1179 bus->core.use_posbuf = true; 1180 bus->core.bdl_pos_adj = chip->bdl_pos_adj; 1181 if (chip->driver_caps & AZX_DCAPS_CORBRP_SELF_CLEAR) 1182 bus->core.corbrp_self_clear = true; 1183 1184 if (chip->driver_caps & AZX_DCAPS_4K_BDLE_BOUNDARY) 1185 bus->core.align_bdle_4k = true; 1186 1187 /* enable sync_write flag for stable communication as default */ 1188 bus->core.sync_write = 1; 1189 1190 return 0; 1191 } 1192 EXPORT_SYMBOL_GPL(azx_bus_init); 1193 1194 /* Probe codecs */ 1195 int azx_probe_codecs(struct azx *chip, unsigned int max_slots) 1196 { 1197 struct hdac_bus *bus = azx_bus(chip); 1198 int c, codecs, err; 1199 1200 codecs = 0; 1201 if (!max_slots) 1202 max_slots = AZX_DEFAULT_CODECS; 1203 1204 /* First try to probe all given codec slots */ 1205 for (c = 0; c < max_slots; c++) { 1206 if ((bus->codec_mask & (1 << c)) & chip->codec_probe_mask) { 1207 if (probe_codec(chip, c) < 0) { 1208 /* Some BIOSen give you wrong codec addresses 1209 * that don't exist 1210 */ 1211 dev_warn(chip->card->dev, 1212 "Codec #%d probe error; disabling it...\n", c); 1213 bus->codec_mask &= ~(1 << c); 1214 /* More badly, accessing to a non-existing 1215 * codec often screws up the controller chip, 1216 * and disturbs the further communications. 1217 * Thus if an error occurs during probing, 1218 * better to reset the controller chip to 1219 * get back to the sanity state. 1220 */ 1221 azx_stop_chip(chip); 1222 azx_init_chip(chip, true); 1223 } 1224 } 1225 } 1226 1227 /* Then create codec instances */ 1228 for (c = 0; c < max_slots; c++) { 1229 if ((bus->codec_mask & (1 << c)) & chip->codec_probe_mask) { 1230 struct hda_codec *codec; 1231 err = snd_hda_codec_new(&chip->bus, chip->card, c, &codec); 1232 if (err < 0) 1233 continue; 1234 codec->jackpoll_interval = chip->jackpoll_interval; 1235 codec->beep_mode = chip->beep_mode; 1236 codecs++; 1237 } 1238 } 1239 if (!codecs) { 1240 dev_err(chip->card->dev, "no codecs initialized\n"); 1241 return -ENXIO; 1242 } 1243 return 0; 1244 } 1245 EXPORT_SYMBOL_GPL(azx_probe_codecs); 1246 1247 /* configure each codec instance */ 1248 int azx_codec_configure(struct azx *chip) 1249 { 1250 struct hda_codec *codec, *next; 1251 1252 /* use _safe version here since snd_hda_codec_configure() deregisters 1253 * the device upon error and deletes itself from the bus list. 1254 */ 1255 list_for_each_codec_safe(codec, next, &chip->bus) { 1256 snd_hda_codec_configure(codec); 1257 } 1258 1259 if (!azx_bus(chip)->num_codecs) 1260 return -ENODEV; 1261 return 0; 1262 } 1263 EXPORT_SYMBOL_GPL(azx_codec_configure); 1264 1265 static int stream_direction(struct azx *chip, unsigned char index) 1266 { 1267 if (index >= chip->capture_index_offset && 1268 index < chip->capture_index_offset + chip->capture_streams) 1269 return SNDRV_PCM_STREAM_CAPTURE; 1270 return SNDRV_PCM_STREAM_PLAYBACK; 1271 } 1272 1273 /* initialize SD streams */ 1274 int azx_init_streams(struct azx *chip) 1275 { 1276 int i; 1277 int stream_tags[2] = { 0, 0 }; 1278 1279 /* initialize each stream (aka device) 1280 * assign the starting bdl address to each stream (device) 1281 * and initialize 1282 */ 1283 for (i = 0; i < chip->num_streams; i++) { 1284 struct azx_dev *azx_dev = kzalloc(sizeof(*azx_dev), GFP_KERNEL); 1285 int dir, tag; 1286 1287 if (!azx_dev) 1288 return -ENOMEM; 1289 1290 dir = stream_direction(chip, i); 1291 /* stream tag must be unique throughout 1292 * the stream direction group, 1293 * valid values 1...15 1294 * use separate stream tag if the flag 1295 * AZX_DCAPS_SEPARATE_STREAM_TAG is used 1296 */ 1297 if (chip->driver_caps & AZX_DCAPS_SEPARATE_STREAM_TAG) 1298 tag = ++stream_tags[dir]; 1299 else 1300 tag = i + 1; 1301 snd_hdac_stream_init(azx_bus(chip), azx_stream(azx_dev), 1302 i, dir, tag); 1303 } 1304 1305 return 0; 1306 } 1307 EXPORT_SYMBOL_GPL(azx_init_streams); 1308 1309 void azx_free_streams(struct azx *chip) 1310 { 1311 struct hdac_bus *bus = azx_bus(chip); 1312 struct hdac_stream *s; 1313 1314 while (!list_empty(&bus->stream_list)) { 1315 s = list_first_entry(&bus->stream_list, struct hdac_stream, list); 1316 list_del(&s->list); 1317 kfree(stream_to_azx_dev(s)); 1318 } 1319 } 1320 EXPORT_SYMBOL_GPL(azx_free_streams); 1321