1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2020-2021 Intel Corporation 4 */ 5 6 #include "i915_drv.h" 7 #include "i915_trace.h" 8 #include "intel_display_types.h" 9 #include "intel_dp_aux.h" 10 #include "intel_pps.h" 11 #include "intel_tc.h" 12 13 u32 intel_dp_pack_aux(const u8 *src, int src_bytes) 14 { 15 int i; 16 u32 v = 0; 17 18 if (src_bytes > 4) 19 src_bytes = 4; 20 for (i = 0; i < src_bytes; i++) 21 v |= ((u32)src[i]) << ((3 - i) * 8); 22 return v; 23 } 24 25 static void intel_dp_unpack_aux(u32 src, u8 *dst, int dst_bytes) 26 { 27 int i; 28 29 if (dst_bytes > 4) 30 dst_bytes = 4; 31 for (i = 0; i < dst_bytes; i++) 32 dst[i] = src >> ((3 - i) * 8); 33 } 34 35 static u32 36 intel_dp_aux_wait_done(struct intel_dp *intel_dp) 37 { 38 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 39 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp); 40 const unsigned int timeout_ms = 10; 41 u32 status; 42 bool done; 43 44 #define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0) 45 done = wait_event_timeout(i915->gmbus_wait_queue, C, 46 msecs_to_jiffies_timeout(timeout_ms)); 47 48 /* just trace the final value */ 49 trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true); 50 51 if (!done) 52 drm_err(&i915->drm, 53 "%s: did not complete or timeout within %ums (status 0x%08x)\n", 54 intel_dp->aux.name, timeout_ms, status); 55 #undef C 56 57 return status; 58 } 59 60 static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 61 { 62 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 63 64 if (index) 65 return 0; 66 67 /* 68 * The clock divider is based off the hrawclk, and would like to run at 69 * 2MHz. So, take the hrawclk value and divide by 2000 and use that 70 */ 71 return DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, 2000); 72 } 73 74 static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 75 { 76 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 77 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 78 u32 freq; 79 80 if (index) 81 return 0; 82 83 /* 84 * The clock divider is based off the cdclk or PCH rawclk, and would 85 * like to run at 2MHz. So, take the cdclk or PCH rawclk value and 86 * divide by 2000 and use that 87 */ 88 if (dig_port->aux_ch == AUX_CH_A) 89 freq = dev_priv->cdclk.hw.cdclk; 90 else 91 freq = RUNTIME_INFO(dev_priv)->rawclk_freq; 92 return DIV_ROUND_CLOSEST(freq, 2000); 93 } 94 95 static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 96 { 97 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 98 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 99 100 if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) { 101 /* Workaround for non-ULT HSW */ 102 switch (index) { 103 case 0: return 63; 104 case 1: return 72; 105 default: return 0; 106 } 107 } 108 109 return ilk_get_aux_clock_divider(intel_dp, index); 110 } 111 112 static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 113 { 114 /* 115 * SKL doesn't need us to program the AUX clock divider (Hardware will 116 * derive the clock from CDCLK automatically). We still implement the 117 * get_aux_clock_divider vfunc to plug-in into the existing code. 118 */ 119 return index ? 0 : 1; 120 } 121 122 static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp, 123 int send_bytes, 124 u32 aux_clock_divider) 125 { 126 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 127 struct drm_i915_private *dev_priv = 128 to_i915(dig_port->base.base.dev); 129 u32 timeout; 130 131 /* Max timeout value on G4x-BDW: 1.6ms */ 132 if (IS_BROADWELL(dev_priv)) 133 timeout = DP_AUX_CH_CTL_TIME_OUT_600us; 134 else 135 timeout = DP_AUX_CH_CTL_TIME_OUT_400us; 136 137 return DP_AUX_CH_CTL_SEND_BUSY | 138 DP_AUX_CH_CTL_DONE | 139 DP_AUX_CH_CTL_INTERRUPT | 140 DP_AUX_CH_CTL_TIME_OUT_ERROR | 141 timeout | 142 DP_AUX_CH_CTL_RECEIVE_ERROR | 143 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 144 (3 << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | 145 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT); 146 } 147 148 static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp, 149 int send_bytes, 150 u32 unused) 151 { 152 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 153 struct drm_i915_private *i915 = 154 to_i915(dig_port->base.base.dev); 155 enum phy phy = intel_port_to_phy(i915, dig_port->base.port); 156 u32 ret; 157 158 /* 159 * Max timeout values: 160 * SKL-GLK: 1.6ms 161 * ICL+: 4ms 162 */ 163 ret = DP_AUX_CH_CTL_SEND_BUSY | 164 DP_AUX_CH_CTL_DONE | 165 DP_AUX_CH_CTL_INTERRUPT | 166 DP_AUX_CH_CTL_TIME_OUT_ERROR | 167 DP_AUX_CH_CTL_TIME_OUT_MAX | 168 DP_AUX_CH_CTL_RECEIVE_ERROR | 169 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 170 DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) | 171 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32); 172 173 if (intel_phy_is_tc(i915, phy) && 174 dig_port->tc_mode == TC_PORT_TBT_ALT) 175 ret |= DP_AUX_CH_CTL_TBT_IO; 176 177 return ret; 178 } 179 180 static int 181 intel_dp_aux_xfer(struct intel_dp *intel_dp, 182 const u8 *send, int send_bytes, 183 u8 *recv, int recv_size, 184 u32 aux_send_ctl_flags) 185 { 186 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 187 struct drm_i915_private *i915 = 188 to_i915(dig_port->base.base.dev); 189 struct intel_uncore *uncore = &i915->uncore; 190 enum phy phy = intel_port_to_phy(i915, dig_port->base.port); 191 bool is_tc_port = intel_phy_is_tc(i915, phy); 192 i915_reg_t ch_ctl, ch_data[5]; 193 u32 aux_clock_divider; 194 enum intel_display_power_domain aux_domain; 195 intel_wakeref_t aux_wakeref; 196 intel_wakeref_t pps_wakeref; 197 int i, ret, recv_bytes; 198 int try, clock = 0; 199 u32 status; 200 bool vdd; 201 202 ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp); 203 for (i = 0; i < ARRAY_SIZE(ch_data); i++) 204 ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i); 205 206 if (is_tc_port) 207 intel_tc_port_lock(dig_port); 208 209 aux_domain = intel_aux_power_domain(dig_port); 210 211 aux_wakeref = intel_display_power_get(i915, aux_domain); 212 pps_wakeref = intel_pps_lock(intel_dp); 213 214 /* 215 * We will be called with VDD already enabled for dpcd/edid/oui reads. 216 * In such cases we want to leave VDD enabled and it's up to upper layers 217 * to turn it off. But for eg. i2c-dev access we need to turn it on/off 218 * ourselves. 219 */ 220 vdd = intel_pps_vdd_on_unlocked(intel_dp); 221 222 /* 223 * dp aux is extremely sensitive to irq latency, hence request the 224 * lowest possible wakeup latency and so prevent the cpu from going into 225 * deep sleep states. 226 */ 227 cpu_latency_qos_update_request(&intel_dp->pm_qos, 0); 228 229 intel_pps_check_power_unlocked(intel_dp); 230 231 /* Try to wait for any previous AUX channel activity */ 232 for (try = 0; try < 3; try++) { 233 status = intel_uncore_read_notrace(uncore, ch_ctl); 234 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) 235 break; 236 msleep(1); 237 } 238 /* just trace the final value */ 239 trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true); 240 241 if (try == 3) { 242 const u32 status = intel_uncore_read(uncore, ch_ctl); 243 244 if (status != intel_dp->aux_busy_last_status) { 245 drm_WARN(&i915->drm, 1, 246 "%s: not started (status 0x%08x)\n", 247 intel_dp->aux.name, status); 248 intel_dp->aux_busy_last_status = status; 249 } 250 251 ret = -EBUSY; 252 goto out; 253 } 254 255 /* Only 5 data registers! */ 256 if (drm_WARN_ON(&i915->drm, send_bytes > 20 || recv_size > 20)) { 257 ret = -E2BIG; 258 goto out; 259 } 260 261 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) { 262 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp, 263 send_bytes, 264 aux_clock_divider); 265 266 send_ctl |= aux_send_ctl_flags; 267 268 /* Must try at least 3 times according to DP spec */ 269 for (try = 0; try < 5; try++) { 270 /* Load the send data into the aux channel data registers */ 271 for (i = 0; i < send_bytes; i += 4) 272 intel_uncore_write(uncore, 273 ch_data[i >> 2], 274 intel_dp_pack_aux(send + i, 275 send_bytes - i)); 276 277 /* Send the command and wait for it to complete */ 278 intel_uncore_write(uncore, ch_ctl, send_ctl); 279 280 status = intel_dp_aux_wait_done(intel_dp); 281 282 /* Clear done status and any errors */ 283 intel_uncore_write(uncore, 284 ch_ctl, 285 status | 286 DP_AUX_CH_CTL_DONE | 287 DP_AUX_CH_CTL_TIME_OUT_ERROR | 288 DP_AUX_CH_CTL_RECEIVE_ERROR); 289 290 /* 291 * DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2 292 * 400us delay required for errors and timeouts 293 * Timeout errors from the HW already meet this 294 * requirement so skip to next iteration 295 */ 296 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) 297 continue; 298 299 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { 300 usleep_range(400, 500); 301 continue; 302 } 303 if (status & DP_AUX_CH_CTL_DONE) 304 goto done; 305 } 306 } 307 308 if ((status & DP_AUX_CH_CTL_DONE) == 0) { 309 drm_err(&i915->drm, "%s: not done (status 0x%08x)\n", 310 intel_dp->aux.name, status); 311 ret = -EBUSY; 312 goto out; 313 } 314 315 done: 316 /* 317 * Check for timeout or receive error. Timeouts occur when the sink is 318 * not connected. 319 */ 320 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { 321 drm_err(&i915->drm, "%s: receive error (status 0x%08x)\n", 322 intel_dp->aux.name, status); 323 ret = -EIO; 324 goto out; 325 } 326 327 /* 328 * Timeouts occur when the device isn't connected, so they're "normal" 329 * -- don't fill the kernel log with these 330 */ 331 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { 332 drm_dbg_kms(&i915->drm, "%s: timeout (status 0x%08x)\n", 333 intel_dp->aux.name, status); 334 ret = -ETIMEDOUT; 335 goto out; 336 } 337 338 /* Unload any bytes sent back from the other side */ 339 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> 340 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); 341 342 /* 343 * By BSpec: "Message sizes of 0 or >20 are not allowed." 344 * We have no idea of what happened so we return -EBUSY so 345 * drm layer takes care for the necessary retries. 346 */ 347 if (recv_bytes == 0 || recv_bytes > 20) { 348 drm_dbg_kms(&i915->drm, 349 "%s: Forbidden recv_bytes = %d on aux transaction\n", 350 intel_dp->aux.name, recv_bytes); 351 ret = -EBUSY; 352 goto out; 353 } 354 355 if (recv_bytes > recv_size) 356 recv_bytes = recv_size; 357 358 for (i = 0; i < recv_bytes; i += 4) 359 intel_dp_unpack_aux(intel_uncore_read(uncore, ch_data[i >> 2]), 360 recv + i, recv_bytes - i); 361 362 ret = recv_bytes; 363 out: 364 cpu_latency_qos_update_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE); 365 366 if (vdd) 367 intel_pps_vdd_off_unlocked(intel_dp, false); 368 369 intel_pps_unlock(intel_dp, pps_wakeref); 370 intel_display_power_put_async(i915, aux_domain, aux_wakeref); 371 372 if (is_tc_port) 373 intel_tc_port_unlock(dig_port); 374 375 return ret; 376 } 377 378 #define BARE_ADDRESS_SIZE 3 379 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1) 380 381 static void 382 intel_dp_aux_header(u8 txbuf[HEADER_SIZE], 383 const struct drm_dp_aux_msg *msg) 384 { 385 txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf); 386 txbuf[1] = (msg->address >> 8) & 0xff; 387 txbuf[2] = msg->address & 0xff; 388 txbuf[3] = msg->size - 1; 389 } 390 391 static u32 intel_dp_aux_xfer_flags(const struct drm_dp_aux_msg *msg) 392 { 393 /* 394 * If we're trying to send the HDCP Aksv, we need to set a the Aksv 395 * select bit to inform the hardware to send the Aksv after our header 396 * since we can't access that data from software. 397 */ 398 if ((msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_WRITE && 399 msg->address == DP_AUX_HDCP_AKSV) 400 return DP_AUX_CH_CTL_AUX_AKSV_SELECT; 401 402 return 0; 403 } 404 405 static ssize_t 406 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) 407 { 408 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux); 409 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 410 u8 txbuf[20], rxbuf[20]; 411 size_t txsize, rxsize; 412 u32 flags = intel_dp_aux_xfer_flags(msg); 413 int ret; 414 415 intel_dp_aux_header(txbuf, msg); 416 417 switch (msg->request & ~DP_AUX_I2C_MOT) { 418 case DP_AUX_NATIVE_WRITE: 419 case DP_AUX_I2C_WRITE: 420 case DP_AUX_I2C_WRITE_STATUS_UPDATE: 421 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE; 422 rxsize = 2; /* 0 or 1 data bytes */ 423 424 if (drm_WARN_ON(&i915->drm, txsize > 20)) 425 return -E2BIG; 426 427 drm_WARN_ON(&i915->drm, !msg->buffer != !msg->size); 428 429 if (msg->buffer) 430 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size); 431 432 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize, 433 rxbuf, rxsize, flags); 434 if (ret > 0) { 435 msg->reply = rxbuf[0] >> 4; 436 437 if (ret > 1) { 438 /* Number of bytes written in a short write. */ 439 ret = clamp_t(int, rxbuf[1], 0, msg->size); 440 } else { 441 /* Return payload size. */ 442 ret = msg->size; 443 } 444 } 445 break; 446 447 case DP_AUX_NATIVE_READ: 448 case DP_AUX_I2C_READ: 449 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE; 450 rxsize = msg->size + 1; 451 452 if (drm_WARN_ON(&i915->drm, rxsize > 20)) 453 return -E2BIG; 454 455 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize, 456 rxbuf, rxsize, flags); 457 if (ret > 0) { 458 msg->reply = rxbuf[0] >> 4; 459 /* 460 * Assume happy day, and copy the data. The caller is 461 * expected to check msg->reply before touching it. 462 * 463 * Return payload size. 464 */ 465 ret--; 466 memcpy(msg->buffer, rxbuf + 1, ret); 467 } 468 break; 469 470 default: 471 ret = -EINVAL; 472 break; 473 } 474 475 return ret; 476 } 477 478 static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp) 479 { 480 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 481 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 482 enum aux_ch aux_ch = dig_port->aux_ch; 483 484 switch (aux_ch) { 485 case AUX_CH_B: 486 case AUX_CH_C: 487 case AUX_CH_D: 488 return DP_AUX_CH_CTL(aux_ch); 489 default: 490 MISSING_CASE(aux_ch); 491 return DP_AUX_CH_CTL(AUX_CH_B); 492 } 493 } 494 495 static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index) 496 { 497 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 498 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 499 enum aux_ch aux_ch = dig_port->aux_ch; 500 501 switch (aux_ch) { 502 case AUX_CH_B: 503 case AUX_CH_C: 504 case AUX_CH_D: 505 return DP_AUX_CH_DATA(aux_ch, index); 506 default: 507 MISSING_CASE(aux_ch); 508 return DP_AUX_CH_DATA(AUX_CH_B, index); 509 } 510 } 511 512 static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp) 513 { 514 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 515 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 516 enum aux_ch aux_ch = dig_port->aux_ch; 517 518 switch (aux_ch) { 519 case AUX_CH_A: 520 return DP_AUX_CH_CTL(aux_ch); 521 case AUX_CH_B: 522 case AUX_CH_C: 523 case AUX_CH_D: 524 return PCH_DP_AUX_CH_CTL(aux_ch); 525 default: 526 MISSING_CASE(aux_ch); 527 return DP_AUX_CH_CTL(AUX_CH_A); 528 } 529 } 530 531 static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index) 532 { 533 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 534 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 535 enum aux_ch aux_ch = dig_port->aux_ch; 536 537 switch (aux_ch) { 538 case AUX_CH_A: 539 return DP_AUX_CH_DATA(aux_ch, index); 540 case AUX_CH_B: 541 case AUX_CH_C: 542 case AUX_CH_D: 543 return PCH_DP_AUX_CH_DATA(aux_ch, index); 544 default: 545 MISSING_CASE(aux_ch); 546 return DP_AUX_CH_DATA(AUX_CH_A, index); 547 } 548 } 549 550 static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp) 551 { 552 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 553 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 554 enum aux_ch aux_ch = dig_port->aux_ch; 555 556 switch (aux_ch) { 557 case AUX_CH_A: 558 case AUX_CH_B: 559 case AUX_CH_C: 560 case AUX_CH_D: 561 case AUX_CH_E: 562 case AUX_CH_F: 563 return DP_AUX_CH_CTL(aux_ch); 564 default: 565 MISSING_CASE(aux_ch); 566 return DP_AUX_CH_CTL(AUX_CH_A); 567 } 568 } 569 570 static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index) 571 { 572 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 573 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 574 enum aux_ch aux_ch = dig_port->aux_ch; 575 576 switch (aux_ch) { 577 case AUX_CH_A: 578 case AUX_CH_B: 579 case AUX_CH_C: 580 case AUX_CH_D: 581 case AUX_CH_E: 582 case AUX_CH_F: 583 return DP_AUX_CH_DATA(aux_ch, index); 584 default: 585 MISSING_CASE(aux_ch); 586 return DP_AUX_CH_DATA(AUX_CH_A, index); 587 } 588 } 589 590 static i915_reg_t tgl_aux_ctl_reg(struct intel_dp *intel_dp) 591 { 592 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 593 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 594 enum aux_ch aux_ch = dig_port->aux_ch; 595 596 switch (aux_ch) { 597 case AUX_CH_A: 598 case AUX_CH_B: 599 case AUX_CH_C: 600 case AUX_CH_USBC1: 601 case AUX_CH_USBC2: 602 case AUX_CH_USBC3: 603 case AUX_CH_USBC4: 604 case AUX_CH_USBC5: /* aka AUX_CH_D_XELPD */ 605 case AUX_CH_USBC6: /* aka AUX_CH_E_XELPD */ 606 return DP_AUX_CH_CTL(aux_ch); 607 default: 608 MISSING_CASE(aux_ch); 609 return DP_AUX_CH_CTL(AUX_CH_A); 610 } 611 } 612 613 static i915_reg_t tgl_aux_data_reg(struct intel_dp *intel_dp, int index) 614 { 615 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 616 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 617 enum aux_ch aux_ch = dig_port->aux_ch; 618 619 switch (aux_ch) { 620 case AUX_CH_A: 621 case AUX_CH_B: 622 case AUX_CH_C: 623 case AUX_CH_USBC1: 624 case AUX_CH_USBC2: 625 case AUX_CH_USBC3: 626 case AUX_CH_USBC4: 627 case AUX_CH_USBC5: /* aka AUX_CH_D_XELPD */ 628 case AUX_CH_USBC6: /* aka AUX_CH_E_XELPD */ 629 return DP_AUX_CH_DATA(aux_ch, index); 630 default: 631 MISSING_CASE(aux_ch); 632 return DP_AUX_CH_DATA(AUX_CH_A, index); 633 } 634 } 635 636 void intel_dp_aux_fini(struct intel_dp *intel_dp) 637 { 638 if (cpu_latency_qos_request_active(&intel_dp->pm_qos)) 639 cpu_latency_qos_remove_request(&intel_dp->pm_qos); 640 641 kfree(intel_dp->aux.name); 642 } 643 644 void intel_dp_aux_init(struct intel_dp *intel_dp) 645 { 646 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 647 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 648 struct intel_encoder *encoder = &dig_port->base; 649 enum aux_ch aux_ch = dig_port->aux_ch; 650 651 if (DISPLAY_VER(dev_priv) >= 12) { 652 intel_dp->aux_ch_ctl_reg = tgl_aux_ctl_reg; 653 intel_dp->aux_ch_data_reg = tgl_aux_data_reg; 654 } else if (DISPLAY_VER(dev_priv) >= 9) { 655 intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg; 656 intel_dp->aux_ch_data_reg = skl_aux_data_reg; 657 } else if (HAS_PCH_SPLIT(dev_priv)) { 658 intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg; 659 intel_dp->aux_ch_data_reg = ilk_aux_data_reg; 660 } else { 661 intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg; 662 intel_dp->aux_ch_data_reg = g4x_aux_data_reg; 663 } 664 665 if (DISPLAY_VER(dev_priv) >= 9) 666 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider; 667 else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 668 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider; 669 else if (HAS_PCH_SPLIT(dev_priv)) 670 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider; 671 else 672 intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider; 673 674 if (DISPLAY_VER(dev_priv) >= 9) 675 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl; 676 else 677 intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl; 678 679 intel_dp->aux.drm_dev = &dev_priv->drm; 680 drm_dp_aux_init(&intel_dp->aux); 681 682 /* Failure to allocate our preferred name is not critical */ 683 if (DISPLAY_VER(dev_priv) >= 13 && aux_ch >= AUX_CH_D_XELPD) 684 intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %c/%s", 685 aux_ch_name(aux_ch - AUX_CH_D_XELPD + AUX_CH_D), 686 encoder->base.name); 687 else if (DISPLAY_VER(dev_priv) >= 12 && aux_ch >= AUX_CH_USBC1) 688 intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX USBC%c/%s", 689 aux_ch - AUX_CH_USBC1 + '1', 690 encoder->base.name); 691 else 692 intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %c/%s", 693 aux_ch_name(aux_ch), 694 encoder->base.name); 695 696 intel_dp->aux.transfer = intel_dp_aux_transfer; 697 cpu_latency_qos_add_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE); 698 } 699