1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) 2 // Copyright(c) 2015-17 Intel Corporation. 3 4 /* 5 * Soundwire Intel Master Driver 6 */ 7 8 #include <linux/acpi.h> 9 #include <linux/debugfs.h> 10 #include <linux/delay.h> 11 #include <linux/io.h> 12 #include <sound/pcm_params.h> 13 #include <linux/pm_runtime.h> 14 #include <sound/soc.h> 15 #include <linux/soundwire/sdw_registers.h> 16 #include <linux/soundwire/sdw.h> 17 #include <linux/soundwire/sdw_intel.h> 18 #include "cadence_master.h" 19 #include "bus.h" 20 #include "intel.h" 21 22 static int intel_wait_bit(void __iomem *base, int offset, u32 mask, u32 target) 23 { 24 int timeout = 10; 25 u32 reg_read; 26 27 do { 28 reg_read = readl(base + offset); 29 if ((reg_read & mask) == target) 30 return 0; 31 32 timeout--; 33 usleep_range(50, 100); 34 } while (timeout != 0); 35 36 return -EAGAIN; 37 } 38 39 static int intel_clear_bit(void __iomem *base, int offset, u32 value, u32 mask) 40 { 41 writel(value, base + offset); 42 return intel_wait_bit(base, offset, mask, 0); 43 } 44 45 static int intel_set_bit(void __iomem *base, int offset, u32 value, u32 mask) 46 { 47 writel(value, base + offset); 48 return intel_wait_bit(base, offset, mask, mask); 49 } 50 51 /* 52 * debugfs 53 */ 54 #ifdef CONFIG_DEBUG_FS 55 56 #define RD_BUF (2 * PAGE_SIZE) 57 58 static ssize_t intel_sprintf(void __iomem *mem, bool l, 59 char *buf, size_t pos, unsigned int reg) 60 { 61 int value; 62 63 if (l) 64 value = intel_readl(mem, reg); 65 else 66 value = intel_readw(mem, reg); 67 68 return scnprintf(buf + pos, RD_BUF - pos, "%4x\t%4x\n", reg, value); 69 } 70 71 static int intel_reg_show(struct seq_file *s_file, void *data) 72 { 73 struct sdw_intel *sdw = s_file->private; 74 void __iomem *s = sdw->link_res->shim; 75 void __iomem *a = sdw->link_res->alh; 76 char *buf; 77 ssize_t ret; 78 int i, j; 79 unsigned int links, reg; 80 81 buf = kzalloc(RD_BUF, GFP_KERNEL); 82 if (!buf) 83 return -ENOMEM; 84 85 links = intel_readl(s, SDW_SHIM_LCAP) & SDW_SHIM_LCAP_LCOUNT_MASK; 86 87 ret = scnprintf(buf, RD_BUF, "Register Value\n"); 88 ret += scnprintf(buf + ret, RD_BUF - ret, "\nShim\n"); 89 90 for (i = 0; i < links; i++) { 91 reg = SDW_SHIM_LCAP + i * 4; 92 ret += intel_sprintf(s, true, buf, ret, reg); 93 } 94 95 for (i = 0; i < links; i++) { 96 ret += scnprintf(buf + ret, RD_BUF - ret, "\nLink%d\n", i); 97 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLSCAP(i)); 98 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS0CM(i)); 99 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS1CM(i)); 100 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS2CM(i)); 101 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS3CM(i)); 102 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_PCMSCAP(i)); 103 104 ret += scnprintf(buf + ret, RD_BUF - ret, "\n PCMSyCH registers\n"); 105 106 /* 107 * the value 10 is the number of PDIs. We will need a 108 * cleanup to remove hard-coded Intel configurations 109 * from cadence_master.c 110 */ 111 for (j = 0; j < 10; j++) { 112 ret += intel_sprintf(s, false, buf, ret, 113 SDW_SHIM_PCMSYCHM(i, j)); 114 ret += intel_sprintf(s, false, buf, ret, 115 SDW_SHIM_PCMSYCHC(i, j)); 116 } 117 ret += scnprintf(buf + ret, RD_BUF - ret, "\n IOCTL, CTMCTL\n"); 118 119 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_IOCTL(i)); 120 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTMCTL(i)); 121 } 122 123 ret += scnprintf(buf + ret, RD_BUF - ret, "\nWake registers\n"); 124 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_WAKEEN); 125 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_WAKESTS); 126 127 ret += scnprintf(buf + ret, RD_BUF - ret, "\nALH STRMzCFG\n"); 128 for (i = 0; i < SDW_ALH_NUM_STREAMS; i++) 129 ret += intel_sprintf(a, true, buf, ret, SDW_ALH_STRMZCFG(i)); 130 131 seq_printf(s_file, "%s", buf); 132 kfree(buf); 133 134 return 0; 135 } 136 DEFINE_SHOW_ATTRIBUTE(intel_reg); 137 138 static int intel_set_m_datamode(void *data, u64 value) 139 { 140 struct sdw_intel *sdw = data; 141 struct sdw_bus *bus = &sdw->cdns.bus; 142 143 if (value > SDW_PORT_DATA_MODE_STATIC_1) 144 return -EINVAL; 145 146 /* Userspace changed the hardware state behind the kernel's back */ 147 add_taint(TAINT_USER, LOCKDEP_STILL_OK); 148 149 bus->params.m_data_mode = value; 150 151 return 0; 152 } 153 DEFINE_DEBUGFS_ATTRIBUTE(intel_set_m_datamode_fops, NULL, 154 intel_set_m_datamode, "%llu\n"); 155 156 static int intel_set_s_datamode(void *data, u64 value) 157 { 158 struct sdw_intel *sdw = data; 159 struct sdw_bus *bus = &sdw->cdns.bus; 160 161 if (value > SDW_PORT_DATA_MODE_STATIC_1) 162 return -EINVAL; 163 164 /* Userspace changed the hardware state behind the kernel's back */ 165 add_taint(TAINT_USER, LOCKDEP_STILL_OK); 166 167 bus->params.s_data_mode = value; 168 169 return 0; 170 } 171 DEFINE_DEBUGFS_ATTRIBUTE(intel_set_s_datamode_fops, NULL, 172 intel_set_s_datamode, "%llu\n"); 173 174 static void intel_debugfs_init(struct sdw_intel *sdw) 175 { 176 struct dentry *root = sdw->cdns.bus.debugfs; 177 178 if (!root) 179 return; 180 181 sdw->debugfs = debugfs_create_dir("intel-sdw", root); 182 183 debugfs_create_file("intel-registers", 0400, sdw->debugfs, sdw, 184 &intel_reg_fops); 185 186 debugfs_create_file("intel-m-datamode", 0200, sdw->debugfs, sdw, 187 &intel_set_m_datamode_fops); 188 189 debugfs_create_file("intel-s-datamode", 0200, sdw->debugfs, sdw, 190 &intel_set_s_datamode_fops); 191 192 sdw_cdns_debugfs_init(&sdw->cdns, sdw->debugfs); 193 } 194 195 static void intel_debugfs_exit(struct sdw_intel *sdw) 196 { 197 debugfs_remove_recursive(sdw->debugfs); 198 } 199 #else 200 static void intel_debugfs_init(struct sdw_intel *sdw) {} 201 static void intel_debugfs_exit(struct sdw_intel *sdw) {} 202 #endif /* CONFIG_DEBUG_FS */ 203 204 /* 205 * shim ops 206 */ 207 /* this needs to be called with shim_lock */ 208 static void intel_shim_glue_to_master_ip(struct sdw_intel *sdw) 209 { 210 void __iomem *shim = sdw->link_res->shim; 211 unsigned int link_id = sdw->instance; 212 u16 ioctl; 213 214 /* Switch to MIP from Glue logic */ 215 ioctl = intel_readw(shim, SDW_SHIM_IOCTL(link_id)); 216 217 ioctl &= ~(SDW_SHIM_IOCTL_DOE); 218 intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl); 219 usleep_range(10, 15); 220 221 ioctl &= ~(SDW_SHIM_IOCTL_DO); 222 intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl); 223 usleep_range(10, 15); 224 225 ioctl |= (SDW_SHIM_IOCTL_MIF); 226 intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl); 227 usleep_range(10, 15); 228 229 ioctl &= ~(SDW_SHIM_IOCTL_BKE); 230 ioctl &= ~(SDW_SHIM_IOCTL_COE); 231 intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl); 232 usleep_range(10, 15); 233 234 /* at this point Master IP has full control of the I/Os */ 235 } 236 237 /* this needs to be called with shim_lock */ 238 static void intel_shim_master_ip_to_glue(struct sdw_intel *sdw) 239 { 240 unsigned int link_id = sdw->instance; 241 void __iomem *shim = sdw->link_res->shim; 242 u16 ioctl; 243 244 /* Glue logic */ 245 ioctl = intel_readw(shim, SDW_SHIM_IOCTL(link_id)); 246 ioctl |= SDW_SHIM_IOCTL_BKE; 247 ioctl |= SDW_SHIM_IOCTL_COE; 248 intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl); 249 usleep_range(10, 15); 250 251 ioctl &= ~(SDW_SHIM_IOCTL_MIF); 252 intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl); 253 usleep_range(10, 15); 254 255 /* at this point Integration Glue has full control of the I/Os */ 256 } 257 258 /* this needs to be called with shim_lock */ 259 static void intel_shim_init(struct sdw_intel *sdw) 260 { 261 void __iomem *shim = sdw->link_res->shim; 262 unsigned int link_id = sdw->instance; 263 u16 ioctl = 0, act = 0; 264 265 /* Initialize Shim */ 266 ioctl |= SDW_SHIM_IOCTL_BKE; 267 intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl); 268 usleep_range(10, 15); 269 270 ioctl |= SDW_SHIM_IOCTL_WPDD; 271 intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl); 272 usleep_range(10, 15); 273 274 ioctl |= SDW_SHIM_IOCTL_DO; 275 intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl); 276 usleep_range(10, 15); 277 278 ioctl |= SDW_SHIM_IOCTL_DOE; 279 intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl); 280 usleep_range(10, 15); 281 282 intel_shim_glue_to_master_ip(sdw); 283 284 u16p_replace_bits(&act, 0x1, SDW_SHIM_CTMCTL_DOAIS); 285 act |= SDW_SHIM_CTMCTL_DACTQE; 286 act |= SDW_SHIM_CTMCTL_DODS; 287 intel_writew(shim, SDW_SHIM_CTMCTL(link_id), act); 288 usleep_range(10, 15); 289 } 290 291 static int intel_shim_check_wake(struct sdw_intel *sdw) 292 { 293 void __iomem *shim; 294 u16 wake_sts; 295 296 shim = sdw->link_res->shim; 297 wake_sts = intel_readw(shim, SDW_SHIM_WAKESTS); 298 299 return wake_sts & BIT(sdw->instance); 300 } 301 302 static void intel_shim_wake(struct sdw_intel *sdw, bool wake_enable) 303 { 304 void __iomem *shim = sdw->link_res->shim; 305 unsigned int link_id = sdw->instance; 306 u16 wake_en, wake_sts; 307 308 mutex_lock(sdw->link_res->shim_lock); 309 wake_en = intel_readw(shim, SDW_SHIM_WAKEEN); 310 311 if (wake_enable) { 312 /* Enable the wakeup */ 313 wake_en |= (SDW_SHIM_WAKEEN_ENABLE << link_id); 314 intel_writew(shim, SDW_SHIM_WAKEEN, wake_en); 315 } else { 316 /* Disable the wake up interrupt */ 317 wake_en &= ~(SDW_SHIM_WAKEEN_ENABLE << link_id); 318 intel_writew(shim, SDW_SHIM_WAKEEN, wake_en); 319 320 /* Clear wake status */ 321 wake_sts = intel_readw(shim, SDW_SHIM_WAKESTS); 322 wake_sts |= (SDW_SHIM_WAKESTS_STATUS << link_id); 323 intel_writew(shim, SDW_SHIM_WAKESTS, wake_sts); 324 } 325 mutex_unlock(sdw->link_res->shim_lock); 326 } 327 328 static bool intel_check_cmdsync_unlocked(struct sdw_intel *sdw) 329 { 330 void __iomem *shim = sdw->link_res->shim; 331 int sync_reg; 332 333 sync_reg = intel_readl(shim, SDW_SHIM_SYNC); 334 return !!(sync_reg & SDW_SHIM_SYNC_CMDSYNC_MASK); 335 } 336 337 static int intel_link_power_up(struct sdw_intel *sdw) 338 { 339 unsigned int link_id = sdw->instance; 340 void __iomem *shim = sdw->link_res->shim; 341 u32 *shim_mask = sdw->link_res->shim_mask; 342 struct sdw_bus *bus = &sdw->cdns.bus; 343 struct sdw_master_prop *prop = &bus->prop; 344 u32 spa_mask, cpa_mask; 345 u32 link_control; 346 int ret = 0; 347 u32 syncprd; 348 u32 sync_reg; 349 350 mutex_lock(sdw->link_res->shim_lock); 351 352 /* 353 * The hardware relies on an internal counter, typically 4kHz, 354 * to generate the SoundWire SSP - which defines a 'safe' 355 * synchronization point between commands and audio transport 356 * and allows for multi link synchronization. The SYNCPRD value 357 * is only dependent on the oscillator clock provided to 358 * the IP, so adjust based on _DSD properties reported in DSDT 359 * tables. The values reported are based on either 24MHz 360 * (CNL/CML) or 38.4 MHz (ICL/TGL+). 361 */ 362 if (prop->mclk_freq % 6000000) 363 syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_38_4; 364 else 365 syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_24; 366 367 if (!*shim_mask) { 368 dev_dbg(sdw->cdns.dev, "powering up all links\n"); 369 370 /* we first need to program the SyncPRD/CPU registers */ 371 dev_dbg(sdw->cdns.dev, 372 "first link up, programming SYNCPRD\n"); 373 374 /* set SyncPRD period */ 375 sync_reg = intel_readl(shim, SDW_SHIM_SYNC); 376 u32p_replace_bits(&sync_reg, syncprd, SDW_SHIM_SYNC_SYNCPRD); 377 378 /* Set SyncCPU bit */ 379 sync_reg |= SDW_SHIM_SYNC_SYNCCPU; 380 intel_writel(shim, SDW_SHIM_SYNC, sync_reg); 381 382 /* Link power up sequence */ 383 link_control = intel_readl(shim, SDW_SHIM_LCTL); 384 385 /* only power-up enabled links */ 386 spa_mask = FIELD_PREP(SDW_SHIM_LCTL_SPA_MASK, sdw->link_res->link_mask); 387 cpa_mask = FIELD_PREP(SDW_SHIM_LCTL_CPA_MASK, sdw->link_res->link_mask); 388 389 link_control |= spa_mask; 390 391 ret = intel_set_bit(shim, SDW_SHIM_LCTL, link_control, cpa_mask); 392 if (ret < 0) { 393 dev_err(sdw->cdns.dev, "Failed to power up link: %d\n", ret); 394 goto out; 395 } 396 397 /* SyncCPU will change once link is active */ 398 ret = intel_wait_bit(shim, SDW_SHIM_SYNC, 399 SDW_SHIM_SYNC_SYNCCPU, 0); 400 if (ret < 0) { 401 dev_err(sdw->cdns.dev, 402 "Failed to set SHIM_SYNC: %d\n", ret); 403 goto out; 404 } 405 } 406 407 *shim_mask |= BIT(link_id); 408 409 sdw->cdns.link_up = true; 410 411 intel_shim_init(sdw); 412 413 out: 414 mutex_unlock(sdw->link_res->shim_lock); 415 416 return ret; 417 } 418 419 static int intel_link_power_down(struct sdw_intel *sdw) 420 { 421 u32 link_control, spa_mask, cpa_mask; 422 unsigned int link_id = sdw->instance; 423 void __iomem *shim = sdw->link_res->shim; 424 u32 *shim_mask = sdw->link_res->shim_mask; 425 int ret = 0; 426 427 mutex_lock(sdw->link_res->shim_lock); 428 429 if (!(*shim_mask & BIT(link_id))) 430 dev_err(sdw->cdns.dev, 431 "%s: Unbalanced power-up/down calls\n", __func__); 432 433 sdw->cdns.link_up = false; 434 435 intel_shim_master_ip_to_glue(sdw); 436 437 *shim_mask &= ~BIT(link_id); 438 439 if (!*shim_mask) { 440 441 dev_dbg(sdw->cdns.dev, "powering down all links\n"); 442 443 /* Link power down sequence */ 444 link_control = intel_readl(shim, SDW_SHIM_LCTL); 445 446 /* only power-down enabled links */ 447 spa_mask = FIELD_PREP(SDW_SHIM_LCTL_SPA_MASK, ~sdw->link_res->link_mask); 448 cpa_mask = FIELD_PREP(SDW_SHIM_LCTL_CPA_MASK, sdw->link_res->link_mask); 449 450 link_control &= spa_mask; 451 452 ret = intel_clear_bit(shim, SDW_SHIM_LCTL, link_control, cpa_mask); 453 if (ret < 0) { 454 dev_err(sdw->cdns.dev, "%s: could not power down link\n", __func__); 455 456 /* 457 * we leave the sdw->cdns.link_up flag as false since we've disabled 458 * the link at this point and cannot handle interrupts any longer. 459 */ 460 } 461 } 462 463 mutex_unlock(sdw->link_res->shim_lock); 464 465 return ret; 466 } 467 468 static void intel_shim_sync_arm(struct sdw_intel *sdw) 469 { 470 void __iomem *shim = sdw->link_res->shim; 471 u32 sync_reg; 472 473 mutex_lock(sdw->link_res->shim_lock); 474 475 /* update SYNC register */ 476 sync_reg = intel_readl(shim, SDW_SHIM_SYNC); 477 sync_reg |= (SDW_SHIM_SYNC_CMDSYNC << sdw->instance); 478 intel_writel(shim, SDW_SHIM_SYNC, sync_reg); 479 480 mutex_unlock(sdw->link_res->shim_lock); 481 } 482 483 static int intel_shim_sync_go_unlocked(struct sdw_intel *sdw) 484 { 485 void __iomem *shim = sdw->link_res->shim; 486 u32 sync_reg; 487 488 /* Read SYNC register */ 489 sync_reg = intel_readl(shim, SDW_SHIM_SYNC); 490 491 /* 492 * Set SyncGO bit to synchronously trigger a bank switch for 493 * all the masters. A write to SYNCGO bit clears CMDSYNC bit for all 494 * the Masters. 495 */ 496 sync_reg |= SDW_SHIM_SYNC_SYNCGO; 497 498 intel_writel(shim, SDW_SHIM_SYNC, sync_reg); 499 500 return 0; 501 } 502 503 static int intel_shim_sync_go(struct sdw_intel *sdw) 504 { 505 int ret; 506 507 mutex_lock(sdw->link_res->shim_lock); 508 509 ret = intel_shim_sync_go_unlocked(sdw); 510 511 mutex_unlock(sdw->link_res->shim_lock); 512 513 return ret; 514 } 515 516 /* 517 * PDI routines 518 */ 519 static void intel_pdi_init(struct sdw_intel *sdw, 520 struct sdw_cdns_stream_config *config) 521 { 522 void __iomem *shim = sdw->link_res->shim; 523 unsigned int link_id = sdw->instance; 524 int pcm_cap; 525 526 /* PCM Stream Capability */ 527 pcm_cap = intel_readw(shim, SDW_SHIM_PCMSCAP(link_id)); 528 529 config->pcm_bd = FIELD_GET(SDW_SHIM_PCMSCAP_BSS, pcm_cap); 530 config->pcm_in = FIELD_GET(SDW_SHIM_PCMSCAP_ISS, pcm_cap); 531 config->pcm_out = FIELD_GET(SDW_SHIM_PCMSCAP_OSS, pcm_cap); 532 533 dev_dbg(sdw->cdns.dev, "PCM cap bd:%d in:%d out:%d\n", 534 config->pcm_bd, config->pcm_in, config->pcm_out); 535 } 536 537 static int 538 intel_pdi_get_ch_cap(struct sdw_intel *sdw, unsigned int pdi_num) 539 { 540 void __iomem *shim = sdw->link_res->shim; 541 unsigned int link_id = sdw->instance; 542 int count; 543 544 count = intel_readw(shim, SDW_SHIM_PCMSYCHC(link_id, pdi_num)); 545 546 /* 547 * WORKAROUND: on all existing Intel controllers, pdi 548 * number 2 reports channel count as 1 even though it 549 * supports 8 channels. Performing hardcoding for pdi 550 * number 2. 551 */ 552 if (pdi_num == 2) 553 count = 7; 554 555 /* zero based values for channel count in register */ 556 count++; 557 558 return count; 559 } 560 561 static int intel_pdi_get_ch_update(struct sdw_intel *sdw, 562 struct sdw_cdns_pdi *pdi, 563 unsigned int num_pdi, 564 unsigned int *num_ch) 565 { 566 int i, ch_count = 0; 567 568 for (i = 0; i < num_pdi; i++) { 569 pdi->ch_count = intel_pdi_get_ch_cap(sdw, pdi->num); 570 ch_count += pdi->ch_count; 571 pdi++; 572 } 573 574 *num_ch = ch_count; 575 return 0; 576 } 577 578 static int intel_pdi_stream_ch_update(struct sdw_intel *sdw, 579 struct sdw_cdns_streams *stream) 580 { 581 intel_pdi_get_ch_update(sdw, stream->bd, stream->num_bd, 582 &stream->num_ch_bd); 583 584 intel_pdi_get_ch_update(sdw, stream->in, stream->num_in, 585 &stream->num_ch_in); 586 587 intel_pdi_get_ch_update(sdw, stream->out, stream->num_out, 588 &stream->num_ch_out); 589 590 return 0; 591 } 592 593 static void 594 intel_pdi_shim_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi) 595 { 596 void __iomem *shim = sdw->link_res->shim; 597 unsigned int link_id = sdw->instance; 598 int pdi_conf = 0; 599 600 /* the Bulk and PCM streams are not contiguous */ 601 pdi->intel_alh_id = (link_id * 16) + pdi->num + 3; 602 if (pdi->num >= 2) 603 pdi->intel_alh_id += 2; 604 605 /* 606 * Program stream parameters to stream SHIM register 607 * This is applicable for PCM stream only. 608 */ 609 if (pdi->type != SDW_STREAM_PCM) 610 return; 611 612 if (pdi->dir == SDW_DATA_DIR_RX) 613 pdi_conf |= SDW_SHIM_PCMSYCM_DIR; 614 else 615 pdi_conf &= ~(SDW_SHIM_PCMSYCM_DIR); 616 617 u32p_replace_bits(&pdi_conf, pdi->intel_alh_id, SDW_SHIM_PCMSYCM_STREAM); 618 u32p_replace_bits(&pdi_conf, pdi->l_ch_num, SDW_SHIM_PCMSYCM_LCHN); 619 u32p_replace_bits(&pdi_conf, pdi->h_ch_num, SDW_SHIM_PCMSYCM_HCHN); 620 621 intel_writew(shim, SDW_SHIM_PCMSYCHM(link_id, pdi->num), pdi_conf); 622 } 623 624 static void 625 intel_pdi_alh_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi) 626 { 627 void __iomem *alh = sdw->link_res->alh; 628 unsigned int link_id = sdw->instance; 629 unsigned int conf; 630 631 /* the Bulk and PCM streams are not contiguous */ 632 pdi->intel_alh_id = (link_id * 16) + pdi->num + 3; 633 if (pdi->num >= 2) 634 pdi->intel_alh_id += 2; 635 636 /* Program Stream config ALH register */ 637 conf = intel_readl(alh, SDW_ALH_STRMZCFG(pdi->intel_alh_id)); 638 639 u32p_replace_bits(&conf, SDW_ALH_STRMZCFG_DMAT_VAL, SDW_ALH_STRMZCFG_DMAT); 640 u32p_replace_bits(&conf, pdi->ch_count - 1, SDW_ALH_STRMZCFG_CHN); 641 642 intel_writel(alh, SDW_ALH_STRMZCFG(pdi->intel_alh_id), conf); 643 } 644 645 static int intel_params_stream(struct sdw_intel *sdw, 646 int stream, 647 struct snd_soc_dai *dai, 648 struct snd_pcm_hw_params *hw_params, 649 int link_id, int alh_stream_id) 650 { 651 struct sdw_intel_link_res *res = sdw->link_res; 652 struct sdw_intel_stream_params_data params_data; 653 654 params_data.stream = stream; /* direction */ 655 params_data.dai = dai; 656 params_data.hw_params = hw_params; 657 params_data.link_id = link_id; 658 params_data.alh_stream_id = alh_stream_id; 659 660 if (res->ops && res->ops->params_stream && res->dev) 661 return res->ops->params_stream(res->dev, 662 ¶ms_data); 663 return -EIO; 664 } 665 666 static int intel_free_stream(struct sdw_intel *sdw, 667 int stream, 668 struct snd_soc_dai *dai, 669 int link_id) 670 { 671 struct sdw_intel_link_res *res = sdw->link_res; 672 struct sdw_intel_stream_free_data free_data; 673 674 free_data.stream = stream; /* direction */ 675 free_data.dai = dai; 676 free_data.link_id = link_id; 677 678 if (res->ops && res->ops->free_stream && res->dev) 679 return res->ops->free_stream(res->dev, 680 &free_data); 681 682 return 0; 683 } 684 685 /* 686 * DAI routines 687 */ 688 689 static int intel_hw_params(struct snd_pcm_substream *substream, 690 struct snd_pcm_hw_params *params, 691 struct snd_soc_dai *dai) 692 { 693 struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai); 694 struct sdw_intel *sdw = cdns_to_intel(cdns); 695 struct sdw_cdns_dai_runtime *dai_runtime; 696 struct sdw_cdns_pdi *pdi; 697 struct sdw_stream_config sconfig; 698 struct sdw_port_config *pconfig; 699 int ch, dir; 700 int ret; 701 702 dai_runtime = cdns->dai_runtime_array[dai->id]; 703 if (!dai_runtime) 704 return -EIO; 705 706 ch = params_channels(params); 707 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) 708 dir = SDW_DATA_DIR_RX; 709 else 710 dir = SDW_DATA_DIR_TX; 711 712 pdi = sdw_cdns_alloc_pdi(cdns, &cdns->pcm, ch, dir, dai->id); 713 714 if (!pdi) { 715 ret = -EINVAL; 716 goto error; 717 } 718 719 /* do run-time configurations for SHIM, ALH and PDI/PORT */ 720 intel_pdi_shim_configure(sdw, pdi); 721 intel_pdi_alh_configure(sdw, pdi); 722 sdw_cdns_config_stream(cdns, ch, dir, pdi); 723 724 /* store pdi and hw_params, may be needed in prepare step */ 725 dai_runtime->paused = false; 726 dai_runtime->suspended = false; 727 dai_runtime->pdi = pdi; 728 729 /* Inform DSP about PDI stream number */ 730 ret = intel_params_stream(sdw, substream->stream, dai, params, 731 sdw->instance, 732 pdi->intel_alh_id); 733 if (ret) 734 goto error; 735 736 sconfig.direction = dir; 737 sconfig.ch_count = ch; 738 sconfig.frame_rate = params_rate(params); 739 sconfig.type = dai_runtime->stream_type; 740 741 sconfig.bps = snd_pcm_format_width(params_format(params)); 742 743 /* Port configuration */ 744 pconfig = kzalloc(sizeof(*pconfig), GFP_KERNEL); 745 if (!pconfig) { 746 ret = -ENOMEM; 747 goto error; 748 } 749 750 pconfig->num = pdi->num; 751 pconfig->ch_mask = (1 << ch) - 1; 752 753 ret = sdw_stream_add_master(&cdns->bus, &sconfig, 754 pconfig, 1, dai_runtime->stream); 755 if (ret) 756 dev_err(cdns->dev, "add master to stream failed:%d\n", ret); 757 758 kfree(pconfig); 759 error: 760 return ret; 761 } 762 763 static int intel_prepare(struct snd_pcm_substream *substream, 764 struct snd_soc_dai *dai) 765 { 766 struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai); 767 struct sdw_intel *sdw = cdns_to_intel(cdns); 768 struct sdw_cdns_dai_runtime *dai_runtime; 769 int ch, dir; 770 int ret = 0; 771 772 dai_runtime = cdns->dai_runtime_array[dai->id]; 773 if (!dai_runtime) { 774 dev_err(dai->dev, "failed to get dai runtime in %s\n", 775 __func__); 776 return -EIO; 777 } 778 779 if (dai_runtime->suspended) { 780 struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); 781 struct snd_pcm_hw_params *hw_params; 782 783 hw_params = &rtd->dpcm[substream->stream].hw_params; 784 785 dai_runtime->suspended = false; 786 787 /* 788 * .prepare() is called after system resume, where we 789 * need to reinitialize the SHIM/ALH/Cadence IP. 790 * .prepare() is also called to deal with underflows, 791 * but in those cases we cannot touch ALH/SHIM 792 * registers 793 */ 794 795 /* configure stream */ 796 ch = params_channels(hw_params); 797 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) 798 dir = SDW_DATA_DIR_RX; 799 else 800 dir = SDW_DATA_DIR_TX; 801 802 intel_pdi_shim_configure(sdw, dai_runtime->pdi); 803 intel_pdi_alh_configure(sdw, dai_runtime->pdi); 804 sdw_cdns_config_stream(cdns, ch, dir, dai_runtime->pdi); 805 806 /* Inform DSP about PDI stream number */ 807 ret = intel_params_stream(sdw, substream->stream, dai, 808 hw_params, 809 sdw->instance, 810 dai_runtime->pdi->intel_alh_id); 811 } 812 813 return ret; 814 } 815 816 static int 817 intel_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) 818 { 819 struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai); 820 struct sdw_intel *sdw = cdns_to_intel(cdns); 821 struct sdw_cdns_dai_runtime *dai_runtime; 822 int ret; 823 824 dai_runtime = cdns->dai_runtime_array[dai->id]; 825 if (!dai_runtime) 826 return -EIO; 827 828 /* 829 * The sdw stream state will transition to RELEASED when stream-> 830 * master_list is empty. So the stream state will transition to 831 * DEPREPARED for the first cpu-dai and to RELEASED for the last 832 * cpu-dai. 833 */ 834 ret = sdw_stream_remove_master(&cdns->bus, dai_runtime->stream); 835 if (ret < 0) { 836 dev_err(dai->dev, "remove master from stream %s failed: %d\n", 837 dai_runtime->stream->name, ret); 838 return ret; 839 } 840 841 ret = intel_free_stream(sdw, substream->stream, dai, sdw->instance); 842 if (ret < 0) { 843 dev_err(dai->dev, "intel_free_stream: failed %d\n", ret); 844 return ret; 845 } 846 847 dai_runtime->pdi = NULL; 848 849 return 0; 850 } 851 852 static int intel_pcm_set_sdw_stream(struct snd_soc_dai *dai, 853 void *stream, int direction) 854 { 855 return cdns_set_sdw_stream(dai, stream, direction); 856 } 857 858 static void *intel_get_sdw_stream(struct snd_soc_dai *dai, 859 int direction) 860 { 861 struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai); 862 struct sdw_cdns_dai_runtime *dai_runtime; 863 864 dai_runtime = cdns->dai_runtime_array[dai->id]; 865 if (!dai_runtime) 866 return ERR_PTR(-EINVAL); 867 868 return dai_runtime->stream; 869 } 870 871 static int intel_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) 872 { 873 struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai); 874 struct sdw_intel *sdw = cdns_to_intel(cdns); 875 struct sdw_intel_link_res *res = sdw->link_res; 876 struct sdw_cdns_dai_runtime *dai_runtime; 877 int ret = 0; 878 879 /* 880 * The .trigger callback is used to send required IPC to audio 881 * firmware. The .free_stream callback will still be called 882 * by intel_free_stream() in the TRIGGER_SUSPEND case. 883 */ 884 if (res->ops && res->ops->trigger) 885 res->ops->trigger(dai, cmd, substream->stream); 886 887 dai_runtime = cdns->dai_runtime_array[dai->id]; 888 if (!dai_runtime) { 889 dev_err(dai->dev, "failed to get dai runtime in %s\n", 890 __func__); 891 return -EIO; 892 } 893 894 switch (cmd) { 895 case SNDRV_PCM_TRIGGER_SUSPEND: 896 897 /* 898 * The .prepare callback is used to deal with xruns and resume operations. 899 * In the case of xruns, the DMAs and SHIM registers cannot be touched, 900 * but for resume operations the DMAs and SHIM registers need to be initialized. 901 * the .trigger callback is used to track the suspend case only. 902 */ 903 904 dai_runtime->suspended = true; 905 906 ret = intel_free_stream(sdw, substream->stream, dai, sdw->instance); 907 break; 908 909 case SNDRV_PCM_TRIGGER_PAUSE_PUSH: 910 dai_runtime->paused = true; 911 break; 912 case SNDRV_PCM_TRIGGER_STOP: 913 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: 914 dai_runtime->paused = false; 915 break; 916 default: 917 break; 918 } 919 920 return ret; 921 } 922 923 static int intel_component_probe(struct snd_soc_component *component) 924 { 925 int ret; 926 927 /* 928 * make sure the device is pm_runtime_active before initiating 929 * bus transactions during the card registration. 930 * We use pm_runtime_resume() here, without taking a reference 931 * and releasing it immediately. 932 */ 933 ret = pm_runtime_resume(component->dev); 934 if (ret < 0 && ret != -EACCES) 935 return ret; 936 937 return 0; 938 } 939 940 static int intel_component_dais_suspend(struct snd_soc_component *component) 941 { 942 struct snd_soc_dai *dai; 943 944 /* 945 * In the corner case where a SUSPEND happens during a PAUSE, the ALSA core 946 * does not throw the TRIGGER_SUSPEND. This leaves the DAIs in an unbalanced state. 947 * Since the component suspend is called last, we can trap this corner case 948 * and force the DAIs to release their resources. 949 */ 950 for_each_component_dais(component, dai) { 951 struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai); 952 struct sdw_intel *sdw = cdns_to_intel(cdns); 953 struct sdw_cdns_dai_runtime *dai_runtime; 954 int ret; 955 956 dai_runtime = cdns->dai_runtime_array[dai->id]; 957 958 if (!dai_runtime) 959 continue; 960 961 if (dai_runtime->suspended) 962 continue; 963 964 if (dai_runtime->paused) { 965 dai_runtime->suspended = true; 966 967 ret = intel_free_stream(sdw, dai_runtime->direction, dai, sdw->instance); 968 if (ret < 0) 969 return ret; 970 } 971 } 972 973 return 0; 974 } 975 976 static const struct snd_soc_dai_ops intel_pcm_dai_ops = { 977 .hw_params = intel_hw_params, 978 .prepare = intel_prepare, 979 .hw_free = intel_hw_free, 980 .trigger = intel_trigger, 981 .set_stream = intel_pcm_set_sdw_stream, 982 .get_stream = intel_get_sdw_stream, 983 }; 984 985 static const struct snd_soc_component_driver dai_component = { 986 .name = "soundwire", 987 .probe = intel_component_probe, 988 .suspend = intel_component_dais_suspend, 989 .legacy_dai_naming = 1, 990 }; 991 992 static int intel_create_dai(struct sdw_cdns *cdns, 993 struct snd_soc_dai_driver *dais, 994 enum intel_pdi_type type, 995 u32 num, u32 off, u32 max_ch) 996 { 997 int i; 998 999 if (num == 0) 1000 return 0; 1001 1002 for (i = off; i < (off + num); i++) { 1003 dais[i].name = devm_kasprintf(cdns->dev, GFP_KERNEL, 1004 "SDW%d Pin%d", 1005 cdns->instance, i); 1006 if (!dais[i].name) 1007 return -ENOMEM; 1008 1009 if (type == INTEL_PDI_BD || type == INTEL_PDI_OUT) { 1010 dais[i].playback.channels_min = 1; 1011 dais[i].playback.channels_max = max_ch; 1012 } 1013 1014 if (type == INTEL_PDI_BD || type == INTEL_PDI_IN) { 1015 dais[i].capture.channels_min = 1; 1016 dais[i].capture.channels_max = max_ch; 1017 } 1018 1019 dais[i].ops = &intel_pcm_dai_ops; 1020 } 1021 1022 return 0; 1023 } 1024 1025 static int intel_register_dai(struct sdw_intel *sdw) 1026 { 1027 struct sdw_cdns_dai_runtime **dai_runtime_array; 1028 struct sdw_cdns_stream_config config; 1029 struct sdw_cdns *cdns = &sdw->cdns; 1030 struct sdw_cdns_streams *stream; 1031 struct snd_soc_dai_driver *dais; 1032 int num_dai, ret, off = 0; 1033 1034 /* Read the PDI config and initialize cadence PDI */ 1035 intel_pdi_init(sdw, &config); 1036 ret = sdw_cdns_pdi_init(cdns, config); 1037 if (ret) 1038 return ret; 1039 1040 intel_pdi_stream_ch_update(sdw, &sdw->cdns.pcm); 1041 1042 /* DAIs are created based on total number of PDIs supported */ 1043 num_dai = cdns->pcm.num_pdi; 1044 1045 dai_runtime_array = devm_kcalloc(cdns->dev, num_dai, 1046 sizeof(struct sdw_cdns_dai_runtime *), 1047 GFP_KERNEL); 1048 if (!dai_runtime_array) 1049 return -ENOMEM; 1050 cdns->dai_runtime_array = dai_runtime_array; 1051 1052 dais = devm_kcalloc(cdns->dev, num_dai, sizeof(*dais), GFP_KERNEL); 1053 if (!dais) 1054 return -ENOMEM; 1055 1056 /* Create PCM DAIs */ 1057 stream = &cdns->pcm; 1058 1059 ret = intel_create_dai(cdns, dais, INTEL_PDI_IN, cdns->pcm.num_in, 1060 off, stream->num_ch_in); 1061 if (ret) 1062 return ret; 1063 1064 off += cdns->pcm.num_in; 1065 ret = intel_create_dai(cdns, dais, INTEL_PDI_OUT, cdns->pcm.num_out, 1066 off, stream->num_ch_out); 1067 if (ret) 1068 return ret; 1069 1070 off += cdns->pcm.num_out; 1071 ret = intel_create_dai(cdns, dais, INTEL_PDI_BD, cdns->pcm.num_bd, 1072 off, stream->num_ch_bd); 1073 if (ret) 1074 return ret; 1075 1076 return devm_snd_soc_register_component(cdns->dev, &dai_component, 1077 dais, num_dai); 1078 } 1079 1080 1081 const struct sdw_intel_hw_ops sdw_intel_cnl_hw_ops = { 1082 .debugfs_init = intel_debugfs_init, 1083 .debugfs_exit = intel_debugfs_exit, 1084 1085 .register_dai = intel_register_dai, 1086 1087 .check_clock_stop = intel_check_clock_stop, 1088 .start_bus = intel_start_bus, 1089 .start_bus_after_reset = intel_start_bus_after_reset, 1090 .start_bus_after_clock_stop = intel_start_bus_after_clock_stop, 1091 .stop_bus = intel_stop_bus, 1092 1093 .link_power_up = intel_link_power_up, 1094 .link_power_down = intel_link_power_down, 1095 1096 .shim_check_wake = intel_shim_check_wake, 1097 .shim_wake = intel_shim_wake, 1098 1099 .pre_bank_switch = intel_pre_bank_switch, 1100 .post_bank_switch = intel_post_bank_switch, 1101 1102 .sync_arm = intel_shim_sync_arm, 1103 .sync_go_unlocked = intel_shim_sync_go_unlocked, 1104 .sync_go = intel_shim_sync_go, 1105 .sync_check_cmdsync_unlocked = intel_check_cmdsync_unlocked, 1106 }; 1107 EXPORT_SYMBOL_NS(sdw_intel_cnl_hw_ops, SOUNDWIRE_INTEL); 1108 1109