1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2020-2021 Intel Corporation 4 */ 5 6 #include "i915_drv.h" 7 #include "i915_trace.h" 8 #include "intel_display_types.h" 9 #include "intel_dp_aux.h" 10 #include "intel_pps.h" 11 #include "intel_tc.h" 12 13 u32 intel_dp_pack_aux(const u8 *src, int src_bytes) 14 { 15 int i; 16 u32 v = 0; 17 18 if (src_bytes > 4) 19 src_bytes = 4; 20 for (i = 0; i < src_bytes; i++) 21 v |= ((u32)src[i]) << ((3 - i) * 8); 22 return v; 23 } 24 25 static void intel_dp_unpack_aux(u32 src, u8 *dst, int dst_bytes) 26 { 27 int i; 28 29 if (dst_bytes > 4) 30 dst_bytes = 4; 31 for (i = 0; i < dst_bytes; i++) 32 dst[i] = src >> ((3 - i) * 8); 33 } 34 35 static u32 36 intel_dp_aux_wait_done(struct intel_dp *intel_dp) 37 { 38 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 39 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp); 40 const unsigned int timeout_ms = 10; 41 u32 status; 42 bool done; 43 44 #define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0) 45 done = wait_event_timeout(i915->gmbus_wait_queue, C, 46 msecs_to_jiffies_timeout(timeout_ms)); 47 48 /* just trace the final value */ 49 trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true); 50 51 if (!done) 52 drm_err(&i915->drm, 53 "%s: did not complete or timeout within %ums (status 0x%08x)\n", 54 intel_dp->aux.name, timeout_ms, status); 55 #undef C 56 57 return status; 58 } 59 60 static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 61 { 62 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 63 64 if (index) 65 return 0; 66 67 /* 68 * The clock divider is based off the hrawclk, and would like to run at 69 * 2MHz. So, take the hrawclk value and divide by 2000 and use that 70 */ 71 return DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, 2000); 72 } 73 74 static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 75 { 76 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 77 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 78 u32 freq; 79 80 if (index) 81 return 0; 82 83 /* 84 * The clock divider is based off the cdclk or PCH rawclk, and would 85 * like to run at 2MHz. So, take the cdclk or PCH rawclk value and 86 * divide by 2000 and use that 87 */ 88 if (dig_port->aux_ch == AUX_CH_A) 89 freq = dev_priv->cdclk.hw.cdclk; 90 else 91 freq = RUNTIME_INFO(dev_priv)->rawclk_freq; 92 return DIV_ROUND_CLOSEST(freq, 2000); 93 } 94 95 static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 96 { 97 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 98 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 99 100 if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) { 101 /* Workaround for non-ULT HSW */ 102 switch (index) { 103 case 0: return 63; 104 case 1: return 72; 105 default: return 0; 106 } 107 } 108 109 return ilk_get_aux_clock_divider(intel_dp, index); 110 } 111 112 static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 113 { 114 /* 115 * SKL doesn't need us to program the AUX clock divider (Hardware will 116 * derive the clock from CDCLK automatically). We still implement the 117 * get_aux_clock_divider vfunc to plug-in into the existing code. 118 */ 119 return index ? 0 : 1; 120 } 121 122 static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp, 123 int send_bytes, 124 u32 aux_clock_divider) 125 { 126 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 127 struct drm_i915_private *dev_priv = 128 to_i915(dig_port->base.base.dev); 129 u32 precharge, timeout; 130 131 if (IS_GEN(dev_priv, 6)) 132 precharge = 3; 133 else 134 precharge = 5; 135 136 /* Max timeout value on G4x-BDW: 1.6ms */ 137 if (IS_BROADWELL(dev_priv)) 138 timeout = DP_AUX_CH_CTL_TIME_OUT_600us; 139 else 140 timeout = DP_AUX_CH_CTL_TIME_OUT_400us; 141 142 return DP_AUX_CH_CTL_SEND_BUSY | 143 DP_AUX_CH_CTL_DONE | 144 DP_AUX_CH_CTL_INTERRUPT | 145 DP_AUX_CH_CTL_TIME_OUT_ERROR | 146 timeout | 147 DP_AUX_CH_CTL_RECEIVE_ERROR | 148 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 149 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | 150 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT); 151 } 152 153 static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp, 154 int send_bytes, 155 u32 unused) 156 { 157 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 158 struct drm_i915_private *i915 = 159 to_i915(dig_port->base.base.dev); 160 enum phy phy = intel_port_to_phy(i915, dig_port->base.port); 161 u32 ret; 162 163 /* 164 * Max timeout values: 165 * SKL-GLK: 1.6ms 166 * CNL: 3.2ms 167 * ICL+: 4ms 168 */ 169 ret = DP_AUX_CH_CTL_SEND_BUSY | 170 DP_AUX_CH_CTL_DONE | 171 DP_AUX_CH_CTL_INTERRUPT | 172 DP_AUX_CH_CTL_TIME_OUT_ERROR | 173 DP_AUX_CH_CTL_TIME_OUT_MAX | 174 DP_AUX_CH_CTL_RECEIVE_ERROR | 175 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 176 DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) | 177 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32); 178 179 if (intel_phy_is_tc(i915, phy) && 180 dig_port->tc_mode == TC_PORT_TBT_ALT) 181 ret |= DP_AUX_CH_CTL_TBT_IO; 182 183 return ret; 184 } 185 186 static int 187 intel_dp_aux_xfer(struct intel_dp *intel_dp, 188 const u8 *send, int send_bytes, 189 u8 *recv, int recv_size, 190 u32 aux_send_ctl_flags) 191 { 192 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 193 struct drm_i915_private *i915 = 194 to_i915(dig_port->base.base.dev); 195 struct intel_uncore *uncore = &i915->uncore; 196 enum phy phy = intel_port_to_phy(i915, dig_port->base.port); 197 bool is_tc_port = intel_phy_is_tc(i915, phy); 198 i915_reg_t ch_ctl, ch_data[5]; 199 u32 aux_clock_divider; 200 enum intel_display_power_domain aux_domain; 201 intel_wakeref_t aux_wakeref; 202 intel_wakeref_t pps_wakeref; 203 int i, ret, recv_bytes; 204 int try, clock = 0; 205 u32 status; 206 bool vdd; 207 208 ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp); 209 for (i = 0; i < ARRAY_SIZE(ch_data); i++) 210 ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i); 211 212 if (is_tc_port) 213 intel_tc_port_lock(dig_port); 214 215 aux_domain = intel_aux_power_domain(dig_port); 216 217 aux_wakeref = intel_display_power_get(i915, aux_domain); 218 pps_wakeref = intel_pps_lock(intel_dp); 219 220 /* 221 * We will be called with VDD already enabled for dpcd/edid/oui reads. 222 * In such cases we want to leave VDD enabled and it's up to upper layers 223 * to turn it off. But for eg. i2c-dev access we need to turn it on/off 224 * ourselves. 225 */ 226 vdd = intel_pps_vdd_on_unlocked(intel_dp); 227 228 /* 229 * dp aux is extremely sensitive to irq latency, hence request the 230 * lowest possible wakeup latency and so prevent the cpu from going into 231 * deep sleep states. 232 */ 233 cpu_latency_qos_update_request(&intel_dp->pm_qos, 0); 234 235 intel_pps_check_power_unlocked(intel_dp); 236 237 /* Try to wait for any previous AUX channel activity */ 238 for (try = 0; try < 3; try++) { 239 status = intel_uncore_read_notrace(uncore, ch_ctl); 240 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) 241 break; 242 msleep(1); 243 } 244 /* just trace the final value */ 245 trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true); 246 247 if (try == 3) { 248 const u32 status = intel_uncore_read(uncore, ch_ctl); 249 250 if (status != intel_dp->aux_busy_last_status) { 251 drm_WARN(&i915->drm, 1, 252 "%s: not started (status 0x%08x)\n", 253 intel_dp->aux.name, status); 254 intel_dp->aux_busy_last_status = status; 255 } 256 257 ret = -EBUSY; 258 goto out; 259 } 260 261 /* Only 5 data registers! */ 262 if (drm_WARN_ON(&i915->drm, send_bytes > 20 || recv_size > 20)) { 263 ret = -E2BIG; 264 goto out; 265 } 266 267 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) { 268 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp, 269 send_bytes, 270 aux_clock_divider); 271 272 send_ctl |= aux_send_ctl_flags; 273 274 /* Must try at least 3 times according to DP spec */ 275 for (try = 0; try < 5; try++) { 276 /* Load the send data into the aux channel data registers */ 277 for (i = 0; i < send_bytes; i += 4) 278 intel_uncore_write(uncore, 279 ch_data[i >> 2], 280 intel_dp_pack_aux(send + i, 281 send_bytes - i)); 282 283 /* Send the command and wait for it to complete */ 284 intel_uncore_write(uncore, ch_ctl, send_ctl); 285 286 status = intel_dp_aux_wait_done(intel_dp); 287 288 /* Clear done status and any errors */ 289 intel_uncore_write(uncore, 290 ch_ctl, 291 status | 292 DP_AUX_CH_CTL_DONE | 293 DP_AUX_CH_CTL_TIME_OUT_ERROR | 294 DP_AUX_CH_CTL_RECEIVE_ERROR); 295 296 /* 297 * DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2 298 * 400us delay required for errors and timeouts 299 * Timeout errors from the HW already meet this 300 * requirement so skip to next iteration 301 */ 302 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) 303 continue; 304 305 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { 306 usleep_range(400, 500); 307 continue; 308 } 309 if (status & DP_AUX_CH_CTL_DONE) 310 goto done; 311 } 312 } 313 314 if ((status & DP_AUX_CH_CTL_DONE) == 0) { 315 drm_err(&i915->drm, "%s: not done (status 0x%08x)\n", 316 intel_dp->aux.name, status); 317 ret = -EBUSY; 318 goto out; 319 } 320 321 done: 322 /* 323 * Check for timeout or receive error. Timeouts occur when the sink is 324 * not connected. 325 */ 326 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { 327 drm_err(&i915->drm, "%s: receive error (status 0x%08x)\n", 328 intel_dp->aux.name, status); 329 ret = -EIO; 330 goto out; 331 } 332 333 /* 334 * Timeouts occur when the device isn't connected, so they're "normal" 335 * -- don't fill the kernel log with these 336 */ 337 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { 338 drm_dbg_kms(&i915->drm, "%s: timeout (status 0x%08x)\n", 339 intel_dp->aux.name, status); 340 ret = -ETIMEDOUT; 341 goto out; 342 } 343 344 /* Unload any bytes sent back from the other side */ 345 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> 346 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); 347 348 /* 349 * By BSpec: "Message sizes of 0 or >20 are not allowed." 350 * We have no idea of what happened so we return -EBUSY so 351 * drm layer takes care for the necessary retries. 352 */ 353 if (recv_bytes == 0 || recv_bytes > 20) { 354 drm_dbg_kms(&i915->drm, 355 "%s: Forbidden recv_bytes = %d on aux transaction\n", 356 intel_dp->aux.name, recv_bytes); 357 ret = -EBUSY; 358 goto out; 359 } 360 361 if (recv_bytes > recv_size) 362 recv_bytes = recv_size; 363 364 for (i = 0; i < recv_bytes; i += 4) 365 intel_dp_unpack_aux(intel_uncore_read(uncore, ch_data[i >> 2]), 366 recv + i, recv_bytes - i); 367 368 ret = recv_bytes; 369 out: 370 cpu_latency_qos_update_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE); 371 372 if (vdd) 373 intel_pps_vdd_off_unlocked(intel_dp, false); 374 375 intel_pps_unlock(intel_dp, pps_wakeref); 376 intel_display_power_put_async(i915, aux_domain, aux_wakeref); 377 378 if (is_tc_port) 379 intel_tc_port_unlock(dig_port); 380 381 return ret; 382 } 383 384 #define BARE_ADDRESS_SIZE 3 385 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1) 386 387 static void 388 intel_dp_aux_header(u8 txbuf[HEADER_SIZE], 389 const struct drm_dp_aux_msg *msg) 390 { 391 txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf); 392 txbuf[1] = (msg->address >> 8) & 0xff; 393 txbuf[2] = msg->address & 0xff; 394 txbuf[3] = msg->size - 1; 395 } 396 397 static u32 intel_dp_aux_xfer_flags(const struct drm_dp_aux_msg *msg) 398 { 399 /* 400 * If we're trying to send the HDCP Aksv, we need to set a the Aksv 401 * select bit to inform the hardware to send the Aksv after our header 402 * since we can't access that data from software. 403 */ 404 if ((msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_WRITE && 405 msg->address == DP_AUX_HDCP_AKSV) 406 return DP_AUX_CH_CTL_AUX_AKSV_SELECT; 407 408 return 0; 409 } 410 411 static ssize_t 412 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) 413 { 414 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux); 415 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 416 u8 txbuf[20], rxbuf[20]; 417 size_t txsize, rxsize; 418 u32 flags = intel_dp_aux_xfer_flags(msg); 419 int ret; 420 421 intel_dp_aux_header(txbuf, msg); 422 423 switch (msg->request & ~DP_AUX_I2C_MOT) { 424 case DP_AUX_NATIVE_WRITE: 425 case DP_AUX_I2C_WRITE: 426 case DP_AUX_I2C_WRITE_STATUS_UPDATE: 427 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE; 428 rxsize = 2; /* 0 or 1 data bytes */ 429 430 if (drm_WARN_ON(&i915->drm, txsize > 20)) 431 return -E2BIG; 432 433 drm_WARN_ON(&i915->drm, !msg->buffer != !msg->size); 434 435 if (msg->buffer) 436 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size); 437 438 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize, 439 rxbuf, rxsize, flags); 440 if (ret > 0) { 441 msg->reply = rxbuf[0] >> 4; 442 443 if (ret > 1) { 444 /* Number of bytes written in a short write. */ 445 ret = clamp_t(int, rxbuf[1], 0, msg->size); 446 } else { 447 /* Return payload size. */ 448 ret = msg->size; 449 } 450 } 451 break; 452 453 case DP_AUX_NATIVE_READ: 454 case DP_AUX_I2C_READ: 455 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE; 456 rxsize = msg->size + 1; 457 458 if (drm_WARN_ON(&i915->drm, rxsize > 20)) 459 return -E2BIG; 460 461 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize, 462 rxbuf, rxsize, flags); 463 if (ret > 0) { 464 msg->reply = rxbuf[0] >> 4; 465 /* 466 * Assume happy day, and copy the data. The caller is 467 * expected to check msg->reply before touching it. 468 * 469 * Return payload size. 470 */ 471 ret--; 472 memcpy(msg->buffer, rxbuf + 1, ret); 473 } 474 break; 475 476 default: 477 ret = -EINVAL; 478 break; 479 } 480 481 return ret; 482 } 483 484 static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp) 485 { 486 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 487 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 488 enum aux_ch aux_ch = dig_port->aux_ch; 489 490 switch (aux_ch) { 491 case AUX_CH_B: 492 case AUX_CH_C: 493 case AUX_CH_D: 494 return DP_AUX_CH_CTL(aux_ch); 495 default: 496 MISSING_CASE(aux_ch); 497 return DP_AUX_CH_CTL(AUX_CH_B); 498 } 499 } 500 501 static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index) 502 { 503 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 504 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 505 enum aux_ch aux_ch = dig_port->aux_ch; 506 507 switch (aux_ch) { 508 case AUX_CH_B: 509 case AUX_CH_C: 510 case AUX_CH_D: 511 return DP_AUX_CH_DATA(aux_ch, index); 512 default: 513 MISSING_CASE(aux_ch); 514 return DP_AUX_CH_DATA(AUX_CH_B, index); 515 } 516 } 517 518 static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp) 519 { 520 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 521 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 522 enum aux_ch aux_ch = dig_port->aux_ch; 523 524 switch (aux_ch) { 525 case AUX_CH_A: 526 return DP_AUX_CH_CTL(aux_ch); 527 case AUX_CH_B: 528 case AUX_CH_C: 529 case AUX_CH_D: 530 return PCH_DP_AUX_CH_CTL(aux_ch); 531 default: 532 MISSING_CASE(aux_ch); 533 return DP_AUX_CH_CTL(AUX_CH_A); 534 } 535 } 536 537 static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index) 538 { 539 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 540 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 541 enum aux_ch aux_ch = dig_port->aux_ch; 542 543 switch (aux_ch) { 544 case AUX_CH_A: 545 return DP_AUX_CH_DATA(aux_ch, index); 546 case AUX_CH_B: 547 case AUX_CH_C: 548 case AUX_CH_D: 549 return PCH_DP_AUX_CH_DATA(aux_ch, index); 550 default: 551 MISSING_CASE(aux_ch); 552 return DP_AUX_CH_DATA(AUX_CH_A, index); 553 } 554 } 555 556 static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp) 557 { 558 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 559 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 560 enum aux_ch aux_ch = dig_port->aux_ch; 561 562 switch (aux_ch) { 563 case AUX_CH_A: 564 case AUX_CH_B: 565 case AUX_CH_C: 566 case AUX_CH_D: 567 case AUX_CH_E: 568 case AUX_CH_F: 569 return DP_AUX_CH_CTL(aux_ch); 570 default: 571 MISSING_CASE(aux_ch); 572 return DP_AUX_CH_CTL(AUX_CH_A); 573 } 574 } 575 576 static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index) 577 { 578 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 579 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 580 enum aux_ch aux_ch = dig_port->aux_ch; 581 582 switch (aux_ch) { 583 case AUX_CH_A: 584 case AUX_CH_B: 585 case AUX_CH_C: 586 case AUX_CH_D: 587 case AUX_CH_E: 588 case AUX_CH_F: 589 return DP_AUX_CH_DATA(aux_ch, index); 590 default: 591 MISSING_CASE(aux_ch); 592 return DP_AUX_CH_DATA(AUX_CH_A, index); 593 } 594 } 595 596 static i915_reg_t tgl_aux_ctl_reg(struct intel_dp *intel_dp) 597 { 598 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 599 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 600 enum aux_ch aux_ch = dig_port->aux_ch; 601 602 switch (aux_ch) { 603 case AUX_CH_A: 604 case AUX_CH_B: 605 case AUX_CH_C: 606 case AUX_CH_USBC1: 607 case AUX_CH_USBC2: 608 case AUX_CH_USBC3: 609 case AUX_CH_USBC4: 610 case AUX_CH_USBC5: 611 case AUX_CH_USBC6: 612 return DP_AUX_CH_CTL(aux_ch); 613 default: 614 MISSING_CASE(aux_ch); 615 return DP_AUX_CH_CTL(AUX_CH_A); 616 } 617 } 618 619 static i915_reg_t tgl_aux_data_reg(struct intel_dp *intel_dp, int index) 620 { 621 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 622 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 623 enum aux_ch aux_ch = dig_port->aux_ch; 624 625 switch (aux_ch) { 626 case AUX_CH_A: 627 case AUX_CH_B: 628 case AUX_CH_C: 629 case AUX_CH_USBC1: 630 case AUX_CH_USBC2: 631 case AUX_CH_USBC3: 632 case AUX_CH_USBC4: 633 case AUX_CH_USBC5: 634 case AUX_CH_USBC6: 635 return DP_AUX_CH_DATA(aux_ch, index); 636 default: 637 MISSING_CASE(aux_ch); 638 return DP_AUX_CH_DATA(AUX_CH_A, index); 639 } 640 } 641 642 void intel_dp_aux_fini(struct intel_dp *intel_dp) 643 { 644 if (cpu_latency_qos_request_active(&intel_dp->pm_qos)) 645 cpu_latency_qos_remove_request(&intel_dp->pm_qos); 646 647 kfree(intel_dp->aux.name); 648 } 649 650 void intel_dp_aux_init(struct intel_dp *intel_dp) 651 { 652 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 653 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 654 struct intel_encoder *encoder = &dig_port->base; 655 enum aux_ch aux_ch = dig_port->aux_ch; 656 657 if (INTEL_GEN(dev_priv) >= 12) { 658 intel_dp->aux_ch_ctl_reg = tgl_aux_ctl_reg; 659 intel_dp->aux_ch_data_reg = tgl_aux_data_reg; 660 } else if (INTEL_GEN(dev_priv) >= 9) { 661 intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg; 662 intel_dp->aux_ch_data_reg = skl_aux_data_reg; 663 } else if (HAS_PCH_SPLIT(dev_priv)) { 664 intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg; 665 intel_dp->aux_ch_data_reg = ilk_aux_data_reg; 666 } else { 667 intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg; 668 intel_dp->aux_ch_data_reg = g4x_aux_data_reg; 669 } 670 671 if (INTEL_GEN(dev_priv) >= 9) 672 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider; 673 else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 674 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider; 675 else if (HAS_PCH_SPLIT(dev_priv)) 676 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider; 677 else 678 intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider; 679 680 if (INTEL_GEN(dev_priv) >= 9) 681 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl; 682 else 683 intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl; 684 685 drm_dp_aux_init(&intel_dp->aux); 686 687 /* Failure to allocate our preferred name is not critical */ 688 if (INTEL_GEN(dev_priv) >= 12 && aux_ch >= AUX_CH_USBC1) 689 intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX USBC%c/%s", 690 aux_ch - AUX_CH_USBC1 + '1', 691 encoder->base.name); 692 else 693 intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %c/%s", 694 aux_ch_name(aux_ch), 695 encoder->base.name); 696 697 intel_dp->aux.transfer = intel_dp_aux_transfer; 698 cpu_latency_qos_add_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE); 699 } 700