1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2019 Intel Corporation 4 */ 5 6 #include "i915_drv.h" 7 #include "i915_reg.h" 8 #include "intel_display.h" 9 #include "intel_display_types.h" 10 #include "intel_dp_mst.h" 11 #include "intel_tc.h" 12 #include "intel_tc_phy_regs.h" 13 14 static const char *tc_port_mode_name(enum tc_port_mode mode) 15 { 16 static const char * const names[] = { 17 [TC_PORT_DISCONNECTED] = "disconnected", 18 [TC_PORT_TBT_ALT] = "tbt-alt", 19 [TC_PORT_DP_ALT] = "dp-alt", 20 [TC_PORT_LEGACY] = "legacy", 21 }; 22 23 if (WARN_ON(mode >= ARRAY_SIZE(names))) 24 mode = TC_PORT_DISCONNECTED; 25 26 return names[mode]; 27 } 28 29 static bool intel_tc_port_in_mode(struct intel_digital_port *dig_port, 30 enum tc_port_mode mode) 31 { 32 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 33 enum phy phy = intel_port_to_phy(i915, dig_port->base.port); 34 35 return intel_phy_is_tc(i915, phy) && dig_port->tc_mode == mode; 36 } 37 38 bool intel_tc_port_in_tbt_alt_mode(struct intel_digital_port *dig_port) 39 { 40 return intel_tc_port_in_mode(dig_port, TC_PORT_TBT_ALT); 41 } 42 43 bool intel_tc_port_in_dp_alt_mode(struct intel_digital_port *dig_port) 44 { 45 return intel_tc_port_in_mode(dig_port, TC_PORT_DP_ALT); 46 } 47 48 bool intel_tc_port_in_legacy_mode(struct intel_digital_port *dig_port) 49 { 50 return intel_tc_port_in_mode(dig_port, TC_PORT_LEGACY); 51 } 52 53 bool intel_tc_cold_requires_aux_pw(struct intel_digital_port *dig_port) 54 { 55 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 56 57 return (DISPLAY_VER(i915) == 11 && dig_port->tc_legacy_port) || 58 IS_ALDERLAKE_P(i915); 59 } 60 61 static enum intel_display_power_domain 62 tc_cold_get_power_domain(struct intel_digital_port *dig_port, enum tc_port_mode mode) 63 { 64 if (mode == TC_PORT_TBT_ALT || !intel_tc_cold_requires_aux_pw(dig_port)) 65 return POWER_DOMAIN_TC_COLD_OFF; 66 67 return intel_legacy_aux_to_power_domain(dig_port->aux_ch); 68 } 69 70 static intel_wakeref_t 71 tc_cold_block_in_mode(struct intel_digital_port *dig_port, enum tc_port_mode mode, 72 enum intel_display_power_domain *domain) 73 { 74 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 75 76 *domain = tc_cold_get_power_domain(dig_port, mode); 77 78 return intel_display_power_get(i915, *domain); 79 } 80 81 static intel_wakeref_t 82 tc_cold_block(struct intel_digital_port *dig_port, enum intel_display_power_domain *domain) 83 { 84 return tc_cold_block_in_mode(dig_port, dig_port->tc_mode, domain); 85 } 86 87 static void 88 tc_cold_unblock(struct intel_digital_port *dig_port, enum intel_display_power_domain domain, 89 intel_wakeref_t wakeref) 90 { 91 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 92 93 /* 94 * wakeref == -1, means some error happened saving save_depot_stack but 95 * power should still be put down and 0 is a invalid save_depot_stack 96 * id so can be used to skip it for non TC legacy ports. 97 */ 98 if (wakeref == 0) 99 return; 100 101 intel_display_power_put(i915, domain, wakeref); 102 } 103 104 static void 105 assert_tc_cold_blocked(struct intel_digital_port *dig_port) 106 { 107 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 108 bool enabled; 109 110 enabled = intel_display_power_is_enabled(i915, 111 tc_cold_get_power_domain(dig_port, 112 dig_port->tc_mode)); 113 drm_WARN_ON(&i915->drm, !enabled); 114 } 115 116 u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port) 117 { 118 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 119 struct intel_uncore *uncore = &i915->uncore; 120 u32 lane_mask; 121 122 lane_mask = intel_uncore_read(uncore, 123 PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia)); 124 125 drm_WARN_ON(&i915->drm, lane_mask == 0xffffffff); 126 assert_tc_cold_blocked(dig_port); 127 128 lane_mask &= DP_LANE_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx); 129 return lane_mask >> DP_LANE_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx); 130 } 131 132 u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port) 133 { 134 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 135 struct intel_uncore *uncore = &i915->uncore; 136 u32 pin_mask; 137 138 pin_mask = intel_uncore_read(uncore, 139 PORT_TX_DFLEXPA1(dig_port->tc_phy_fia)); 140 141 drm_WARN_ON(&i915->drm, pin_mask == 0xffffffff); 142 assert_tc_cold_blocked(dig_port); 143 144 return (pin_mask & DP_PIN_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx)) >> 145 DP_PIN_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx); 146 } 147 148 int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port) 149 { 150 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 151 intel_wakeref_t wakeref; 152 u32 lane_mask; 153 154 if (dig_port->tc_mode != TC_PORT_DP_ALT) 155 return 4; 156 157 assert_tc_cold_blocked(dig_port); 158 159 lane_mask = 0; 160 with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref) 161 lane_mask = intel_tc_port_get_lane_mask(dig_port); 162 163 switch (lane_mask) { 164 default: 165 MISSING_CASE(lane_mask); 166 fallthrough; 167 case 0x1: 168 case 0x2: 169 case 0x4: 170 case 0x8: 171 return 1; 172 case 0x3: 173 case 0xc: 174 return 2; 175 case 0xf: 176 return 4; 177 } 178 } 179 180 void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port, 181 int required_lanes) 182 { 183 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 184 bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL; 185 struct intel_uncore *uncore = &i915->uncore; 186 u32 val; 187 188 drm_WARN_ON(&i915->drm, 189 lane_reversal && dig_port->tc_mode != TC_PORT_LEGACY); 190 191 assert_tc_cold_blocked(dig_port); 192 193 val = intel_uncore_read(uncore, 194 PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia)); 195 val &= ~DFLEXDPMLE1_DPMLETC_MASK(dig_port->tc_phy_fia_idx); 196 197 switch (required_lanes) { 198 case 1: 199 val |= lane_reversal ? 200 DFLEXDPMLE1_DPMLETC_ML3(dig_port->tc_phy_fia_idx) : 201 DFLEXDPMLE1_DPMLETC_ML0(dig_port->tc_phy_fia_idx); 202 break; 203 case 2: 204 val |= lane_reversal ? 205 DFLEXDPMLE1_DPMLETC_ML3_2(dig_port->tc_phy_fia_idx) : 206 DFLEXDPMLE1_DPMLETC_ML1_0(dig_port->tc_phy_fia_idx); 207 break; 208 case 4: 209 val |= DFLEXDPMLE1_DPMLETC_ML3_0(dig_port->tc_phy_fia_idx); 210 break; 211 default: 212 MISSING_CASE(required_lanes); 213 } 214 215 intel_uncore_write(uncore, 216 PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia), val); 217 } 218 219 static void tc_port_fixup_legacy_flag(struct intel_digital_port *dig_port, 220 u32 live_status_mask) 221 { 222 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 223 u32 valid_hpd_mask; 224 225 if (dig_port->tc_legacy_port) 226 valid_hpd_mask = BIT(TC_PORT_LEGACY); 227 else 228 valid_hpd_mask = BIT(TC_PORT_DP_ALT) | 229 BIT(TC_PORT_TBT_ALT); 230 231 if (!(live_status_mask & ~valid_hpd_mask)) 232 return; 233 234 /* If live status mismatches the VBT flag, trust the live status. */ 235 drm_dbg_kms(&i915->drm, 236 "Port %s: live status %08x mismatches the legacy port flag %08x, fixing flag\n", 237 dig_port->tc_port_name, live_status_mask, valid_hpd_mask); 238 239 dig_port->tc_legacy_port = !dig_port->tc_legacy_port; 240 } 241 242 static u32 icl_tc_port_live_status_mask(struct intel_digital_port *dig_port) 243 { 244 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 245 struct intel_uncore *uncore = &i915->uncore; 246 u32 isr_bit = i915->hotplug.pch_hpd[dig_port->base.hpd_pin]; 247 u32 mask = 0; 248 u32 val; 249 250 val = intel_uncore_read(uncore, 251 PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia)); 252 253 if (val == 0xffffffff) { 254 drm_dbg_kms(&i915->drm, 255 "Port %s: PHY in TCCOLD, nothing connected\n", 256 dig_port->tc_port_name); 257 return mask; 258 } 259 260 if (val & TC_LIVE_STATE_TBT(dig_port->tc_phy_fia_idx)) 261 mask |= BIT(TC_PORT_TBT_ALT); 262 if (val & TC_LIVE_STATE_TC(dig_port->tc_phy_fia_idx)) 263 mask |= BIT(TC_PORT_DP_ALT); 264 265 if (intel_uncore_read(uncore, SDEISR) & isr_bit) 266 mask |= BIT(TC_PORT_LEGACY); 267 268 /* The sink can be connected only in a single mode. */ 269 if (!drm_WARN_ON_ONCE(&i915->drm, hweight32(mask) > 1)) 270 tc_port_fixup_legacy_flag(dig_port, mask); 271 272 return mask; 273 } 274 275 static u32 adl_tc_port_live_status_mask(struct intel_digital_port *dig_port) 276 { 277 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 278 enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port); 279 u32 isr_bit = i915->hotplug.pch_hpd[dig_port->base.hpd_pin]; 280 struct intel_uncore *uncore = &i915->uncore; 281 u32 val, mask = 0; 282 283 /* 284 * On ADL-P HW/FW will wake from TCCOLD to complete the read access of 285 * registers in IOM. Note that this doesn't apply to PHY and FIA 286 * registers. 287 */ 288 val = intel_uncore_read(uncore, TCSS_DDI_STATUS(tc_port)); 289 if (val & TCSS_DDI_STATUS_HPD_LIVE_STATUS_ALT) 290 mask |= BIT(TC_PORT_DP_ALT); 291 if (val & TCSS_DDI_STATUS_HPD_LIVE_STATUS_TBT) 292 mask |= BIT(TC_PORT_TBT_ALT); 293 294 if (intel_uncore_read(uncore, SDEISR) & isr_bit) 295 mask |= BIT(TC_PORT_LEGACY); 296 297 /* The sink can be connected only in a single mode. */ 298 if (!drm_WARN_ON(&i915->drm, hweight32(mask) > 1)) 299 tc_port_fixup_legacy_flag(dig_port, mask); 300 301 return mask; 302 } 303 304 static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port) 305 { 306 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 307 308 if (IS_ALDERLAKE_P(i915)) 309 return adl_tc_port_live_status_mask(dig_port); 310 311 return icl_tc_port_live_status_mask(dig_port); 312 } 313 314 /* 315 * Return the PHY status complete flag indicating that display can acquire the 316 * PHY ownership. The IOM firmware sets this flag when a DP-alt or legacy sink 317 * is connected and it's ready to switch the ownership to display. The flag 318 * will be left cleared when a TBT-alt sink is connected, where the PHY is 319 * owned by the TBT subsystem and so switching the ownership to display is not 320 * required. 321 */ 322 static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port) 323 { 324 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 325 struct intel_uncore *uncore = &i915->uncore; 326 u32 val; 327 328 val = intel_uncore_read(uncore, 329 PORT_TX_DFLEXDPPMS(dig_port->tc_phy_fia)); 330 if (val == 0xffffffff) { 331 drm_dbg_kms(&i915->drm, 332 "Port %s: PHY in TCCOLD, assuming not complete\n", 333 dig_port->tc_port_name); 334 return false; 335 } 336 337 return val & DP_PHY_MODE_STATUS_COMPLETED(dig_port->tc_phy_fia_idx); 338 } 339 340 /* 341 * Return the PHY status complete flag indicating that display can acquire the 342 * PHY ownership. The IOM firmware sets this flag when it's ready to switch 343 * the ownership to display, regardless of what sink is connected (TBT-alt, 344 * DP-alt, legacy or nothing). For TBT-alt sinks the PHY is owned by the TBT 345 * subsystem and so switching the ownership to display is not required. 346 */ 347 static bool adl_tc_phy_status_complete(struct intel_digital_port *dig_port) 348 { 349 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 350 enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port); 351 struct intel_uncore *uncore = &i915->uncore; 352 u32 val; 353 354 val = intel_uncore_read(uncore, TCSS_DDI_STATUS(tc_port)); 355 if (val == 0xffffffff) { 356 drm_dbg_kms(&i915->drm, 357 "Port %s: PHY in TCCOLD, assuming not complete\n", 358 dig_port->tc_port_name); 359 return false; 360 } 361 362 return val & TCSS_DDI_STATUS_READY; 363 } 364 365 static bool tc_phy_status_complete(struct intel_digital_port *dig_port) 366 { 367 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 368 369 if (IS_ALDERLAKE_P(i915)) 370 return adl_tc_phy_status_complete(dig_port); 371 372 return icl_tc_phy_status_complete(dig_port); 373 } 374 375 static bool icl_tc_phy_take_ownership(struct intel_digital_port *dig_port, 376 bool take) 377 { 378 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 379 struct intel_uncore *uncore = &i915->uncore; 380 u32 val; 381 382 val = intel_uncore_read(uncore, 383 PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia)); 384 if (val == 0xffffffff) { 385 drm_dbg_kms(&i915->drm, 386 "Port %s: PHY in TCCOLD, can't %s ownership\n", 387 dig_port->tc_port_name, take ? "take" : "release"); 388 389 return false; 390 } 391 392 val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx); 393 if (take) 394 val |= DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx); 395 396 intel_uncore_write(uncore, 397 PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia), val); 398 399 return true; 400 } 401 402 static bool adl_tc_phy_take_ownership(struct intel_digital_port *dig_port, 403 bool take) 404 { 405 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 406 struct intel_uncore *uncore = &i915->uncore; 407 enum port port = dig_port->base.port; 408 u32 val; 409 410 val = intel_uncore_read(uncore, DDI_BUF_CTL(port)); 411 if (take) 412 val |= DDI_BUF_CTL_TC_PHY_OWNERSHIP; 413 else 414 val &= ~DDI_BUF_CTL_TC_PHY_OWNERSHIP; 415 intel_uncore_write(uncore, DDI_BUF_CTL(port), val); 416 417 return true; 418 } 419 420 static bool tc_phy_take_ownership(struct intel_digital_port *dig_port, bool take) 421 { 422 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 423 424 if (IS_ALDERLAKE_P(i915)) 425 return adl_tc_phy_take_ownership(dig_port, take); 426 427 return icl_tc_phy_take_ownership(dig_port, take); 428 } 429 430 static bool icl_tc_phy_is_owned(struct intel_digital_port *dig_port) 431 { 432 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 433 struct intel_uncore *uncore = &i915->uncore; 434 u32 val; 435 436 val = intel_uncore_read(uncore, 437 PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia)); 438 if (val == 0xffffffff) { 439 drm_dbg_kms(&i915->drm, 440 "Port %s: PHY in TCCOLD, assume safe mode\n", 441 dig_port->tc_port_name); 442 return true; 443 } 444 445 return val & DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx); 446 } 447 448 static bool adl_tc_phy_is_owned(struct intel_digital_port *dig_port) 449 { 450 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 451 struct intel_uncore *uncore = &i915->uncore; 452 enum port port = dig_port->base.port; 453 u32 val; 454 455 val = intel_uncore_read(uncore, DDI_BUF_CTL(port)); 456 return val & DDI_BUF_CTL_TC_PHY_OWNERSHIP; 457 } 458 459 static bool tc_phy_is_owned(struct intel_digital_port *dig_port) 460 { 461 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 462 463 if (IS_ALDERLAKE_P(i915)) 464 return adl_tc_phy_is_owned(dig_port); 465 466 return icl_tc_phy_is_owned(dig_port); 467 } 468 469 /* 470 * This function implements the first part of the Connect Flow described by our 471 * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading 472 * lanes, EDID, etc) is done as needed in the typical places. 473 * 474 * Unlike the other ports, type-C ports are not available to use as soon as we 475 * get a hotplug. The type-C PHYs can be shared between multiple controllers: 476 * display, USB, etc. As a result, handshaking through FIA is required around 477 * connect and disconnect to cleanly transfer ownership with the controller and 478 * set the type-C power state. 479 */ 480 static void icl_tc_phy_connect(struct intel_digital_port *dig_port, 481 int required_lanes) 482 { 483 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 484 u32 live_status_mask; 485 int max_lanes; 486 487 if (!tc_phy_status_complete(dig_port)) { 488 drm_dbg_kms(&i915->drm, "Port %s: PHY not ready\n", 489 dig_port->tc_port_name); 490 goto out_set_tbt_alt_mode; 491 } 492 493 live_status_mask = tc_port_live_status_mask(dig_port); 494 if (!(live_status_mask & (BIT(TC_PORT_DP_ALT) | BIT(TC_PORT_LEGACY)))) { 495 drm_dbg_kms(&i915->drm, "Port %s: PHY ownership not required (live status %02x)\n", 496 dig_port->tc_port_name, live_status_mask); 497 goto out_set_tbt_alt_mode; 498 } 499 500 if (!tc_phy_take_ownership(dig_port, true) && 501 !drm_WARN_ON(&i915->drm, dig_port->tc_legacy_port)) 502 goto out_set_tbt_alt_mode; 503 504 max_lanes = intel_tc_port_fia_max_lane_count(dig_port); 505 if (dig_port->tc_legacy_port) { 506 drm_WARN_ON(&i915->drm, max_lanes != 4); 507 dig_port->tc_mode = TC_PORT_LEGACY; 508 509 return; 510 } 511 512 /* 513 * Now we have to re-check the live state, in case the port recently 514 * became disconnected. Not necessary for legacy mode. 515 */ 516 if (!(tc_port_live_status_mask(dig_port) & BIT(TC_PORT_DP_ALT))) { 517 drm_dbg_kms(&i915->drm, "Port %s: PHY sudden disconnect\n", 518 dig_port->tc_port_name); 519 goto out_release_phy; 520 } 521 522 if (max_lanes < required_lanes) { 523 drm_dbg_kms(&i915->drm, 524 "Port %s: PHY max lanes %d < required lanes %d\n", 525 dig_port->tc_port_name, 526 max_lanes, required_lanes); 527 goto out_release_phy; 528 } 529 530 dig_port->tc_mode = TC_PORT_DP_ALT; 531 532 return; 533 534 out_release_phy: 535 tc_phy_take_ownership(dig_port, false); 536 out_set_tbt_alt_mode: 537 dig_port->tc_mode = TC_PORT_TBT_ALT; 538 } 539 540 /* 541 * See the comment at the connect function. This implements the Disconnect 542 * Flow. 543 */ 544 static void icl_tc_phy_disconnect(struct intel_digital_port *dig_port) 545 { 546 switch (dig_port->tc_mode) { 547 case TC_PORT_LEGACY: 548 case TC_PORT_DP_ALT: 549 tc_phy_take_ownership(dig_port, false); 550 fallthrough; 551 case TC_PORT_TBT_ALT: 552 dig_port->tc_mode = TC_PORT_DISCONNECTED; 553 fallthrough; 554 case TC_PORT_DISCONNECTED: 555 break; 556 default: 557 MISSING_CASE(dig_port->tc_mode); 558 } 559 } 560 561 static bool icl_tc_phy_is_connected(struct intel_digital_port *dig_port) 562 { 563 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 564 565 if (!tc_phy_status_complete(dig_port)) { 566 drm_dbg_kms(&i915->drm, "Port %s: PHY status not complete\n", 567 dig_port->tc_port_name); 568 return dig_port->tc_mode == TC_PORT_TBT_ALT; 569 } 570 571 /* On ADL-P the PHY complete flag is set in TBT mode as well. */ 572 if (IS_ALDERLAKE_P(i915) && dig_port->tc_mode == TC_PORT_TBT_ALT) 573 return true; 574 575 if (!tc_phy_is_owned(dig_port)) { 576 drm_dbg_kms(&i915->drm, "Port %s: PHY not owned\n", 577 dig_port->tc_port_name); 578 579 return false; 580 } 581 582 return dig_port->tc_mode == TC_PORT_DP_ALT || 583 dig_port->tc_mode == TC_PORT_LEGACY; 584 } 585 586 static enum tc_port_mode 587 intel_tc_port_get_current_mode(struct intel_digital_port *dig_port) 588 { 589 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 590 u32 live_status_mask = tc_port_live_status_mask(dig_port); 591 enum tc_port_mode mode; 592 593 if (!tc_phy_is_owned(dig_port) || 594 drm_WARN_ON(&i915->drm, !tc_phy_status_complete(dig_port))) 595 return TC_PORT_TBT_ALT; 596 597 mode = dig_port->tc_legacy_port ? TC_PORT_LEGACY : TC_PORT_DP_ALT; 598 if (live_status_mask) { 599 enum tc_port_mode live_mode = fls(live_status_mask) - 1; 600 601 if (!drm_WARN_ON(&i915->drm, live_mode == TC_PORT_TBT_ALT)) 602 mode = live_mode; 603 } 604 605 return mode; 606 } 607 608 static enum tc_port_mode 609 intel_tc_port_get_target_mode(struct intel_digital_port *dig_port) 610 { 611 u32 live_status_mask = tc_port_live_status_mask(dig_port); 612 613 if (live_status_mask) 614 return fls(live_status_mask) - 1; 615 616 return TC_PORT_TBT_ALT; 617 } 618 619 static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port, 620 int required_lanes, bool force_disconnect) 621 { 622 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 623 enum tc_port_mode old_tc_mode = dig_port->tc_mode; 624 625 intel_display_power_flush_work(i915); 626 if (!intel_tc_cold_requires_aux_pw(dig_port)) { 627 enum intel_display_power_domain aux_domain; 628 bool aux_powered; 629 630 aux_domain = intel_aux_power_domain(dig_port); 631 aux_powered = intel_display_power_is_enabled(i915, aux_domain); 632 drm_WARN_ON(&i915->drm, aux_powered); 633 } 634 635 icl_tc_phy_disconnect(dig_port); 636 if (!force_disconnect) 637 icl_tc_phy_connect(dig_port, required_lanes); 638 639 drm_dbg_kms(&i915->drm, "Port %s: TC port mode reset (%s -> %s)\n", 640 dig_port->tc_port_name, 641 tc_port_mode_name(old_tc_mode), 642 tc_port_mode_name(dig_port->tc_mode)); 643 } 644 645 static bool intel_tc_port_needs_reset(struct intel_digital_port *dig_port) 646 { 647 return intel_tc_port_get_target_mode(dig_port) != dig_port->tc_mode; 648 } 649 650 static void intel_tc_port_update_mode(struct intel_digital_port *dig_port, 651 int required_lanes, bool force_disconnect) 652 { 653 enum intel_display_power_domain domain; 654 intel_wakeref_t wref; 655 bool needs_reset = force_disconnect; 656 657 if (!needs_reset) { 658 /* Get power domain required to check the hotplug live status. */ 659 wref = tc_cold_block(dig_port, &domain); 660 needs_reset = intel_tc_port_needs_reset(dig_port); 661 tc_cold_unblock(dig_port, domain, wref); 662 } 663 664 if (!needs_reset) 665 return; 666 667 /* Get power domain required for resetting the mode. */ 668 wref = tc_cold_block_in_mode(dig_port, TC_PORT_DISCONNECTED, &domain); 669 670 intel_tc_port_reset_mode(dig_port, required_lanes, force_disconnect); 671 672 /* Get power domain matching the new mode after reset. */ 673 tc_cold_unblock(dig_port, dig_port->tc_lock_power_domain, 674 fetch_and_zero(&dig_port->tc_lock_wakeref)); 675 if (dig_port->tc_mode != TC_PORT_DISCONNECTED) 676 dig_port->tc_lock_wakeref = tc_cold_block(dig_port, 677 &dig_port->tc_lock_power_domain); 678 679 tc_cold_unblock(dig_port, domain, wref); 680 } 681 682 static void 683 intel_tc_port_link_init_refcount(struct intel_digital_port *dig_port, 684 int refcount) 685 { 686 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 687 688 drm_WARN_ON(&i915->drm, dig_port->tc_link_refcount); 689 dig_port->tc_link_refcount = refcount; 690 } 691 692 void intel_tc_port_sanitize(struct intel_digital_port *dig_port) 693 { 694 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 695 struct intel_encoder *encoder = &dig_port->base; 696 intel_wakeref_t tc_cold_wref; 697 enum intel_display_power_domain domain; 698 int active_links = 0; 699 700 mutex_lock(&dig_port->tc_lock); 701 702 if (dig_port->dp.is_mst) 703 active_links = intel_dp_mst_encoder_active_links(dig_port); 704 else if (encoder->base.crtc) 705 active_links = to_intel_crtc(encoder->base.crtc)->active; 706 707 drm_WARN_ON(&i915->drm, dig_port->tc_mode != TC_PORT_DISCONNECTED); 708 drm_WARN_ON(&i915->drm, dig_port->tc_lock_wakeref); 709 710 tc_cold_wref = tc_cold_block(dig_port, &domain); 711 712 dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port); 713 if (active_links) { 714 if (!icl_tc_phy_is_connected(dig_port)) 715 drm_dbg_kms(&i915->drm, 716 "Port %s: PHY disconnected with %d active link(s)\n", 717 dig_port->tc_port_name, active_links); 718 intel_tc_port_link_init_refcount(dig_port, active_links); 719 720 dig_port->tc_lock_wakeref = tc_cold_block(dig_port, 721 &dig_port->tc_lock_power_domain); 722 } else { 723 /* 724 * TBT-alt is the default mode in any case the PHY ownership is not 725 * held (regardless of the sink's connected live state), so 726 * we'll just switch to disconnected mode from it here without 727 * a note. 728 */ 729 if (dig_port->tc_mode != TC_PORT_TBT_ALT) 730 drm_dbg_kms(&i915->drm, 731 "Port %s: PHY left in %s mode on disabled port, disconnecting it\n", 732 dig_port->tc_port_name, 733 tc_port_mode_name(dig_port->tc_mode)); 734 icl_tc_phy_disconnect(dig_port); 735 } 736 737 tc_cold_unblock(dig_port, domain, tc_cold_wref); 738 739 drm_dbg_kms(&i915->drm, "Port %s: sanitize mode (%s)\n", 740 dig_port->tc_port_name, 741 tc_port_mode_name(dig_port->tc_mode)); 742 743 mutex_unlock(&dig_port->tc_lock); 744 } 745 746 /* 747 * The type-C ports are different because even when they are connected, they may 748 * not be available/usable by the graphics driver: see the comment on 749 * icl_tc_phy_connect(). So in our driver instead of adding the additional 750 * concept of "usable" and make everything check for "connected and usable" we 751 * define a port as "connected" when it is not only connected, but also when it 752 * is usable by the rest of the driver. That maintains the old assumption that 753 * connected ports are usable, and avoids exposing to the users objects they 754 * can't really use. 755 */ 756 bool intel_tc_port_connected(struct intel_encoder *encoder) 757 { 758 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 759 bool is_connected; 760 761 intel_tc_port_lock(dig_port); 762 763 is_connected = tc_port_live_status_mask(dig_port) & 764 BIT(dig_port->tc_mode); 765 766 intel_tc_port_unlock(dig_port); 767 768 return is_connected; 769 } 770 771 static void __intel_tc_port_lock(struct intel_digital_port *dig_port, 772 int required_lanes) 773 { 774 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 775 776 mutex_lock(&dig_port->tc_lock); 777 778 cancel_delayed_work(&dig_port->tc_disconnect_phy_work); 779 780 if (!dig_port->tc_link_refcount) 781 intel_tc_port_update_mode(dig_port, required_lanes, 782 false); 783 784 drm_WARN_ON(&i915->drm, dig_port->tc_mode == TC_PORT_DISCONNECTED); 785 drm_WARN_ON(&i915->drm, dig_port->tc_mode != TC_PORT_TBT_ALT && 786 !tc_phy_is_owned(dig_port)); 787 } 788 789 void intel_tc_port_lock(struct intel_digital_port *dig_port) 790 { 791 __intel_tc_port_lock(dig_port, 1); 792 } 793 794 /** 795 * intel_tc_port_disconnect_phy_work: disconnect TypeC PHY from display port 796 * @dig_port: digital port 797 * 798 * Disconnect the given digital port from its TypeC PHY (handing back the 799 * control of the PHY to the TypeC subsystem). This will happen in a delayed 800 * manner after each aux transactions and modeset disables. 801 */ 802 static void intel_tc_port_disconnect_phy_work(struct work_struct *work) 803 { 804 struct intel_digital_port *dig_port = 805 container_of(work, struct intel_digital_port, tc_disconnect_phy_work.work); 806 807 mutex_lock(&dig_port->tc_lock); 808 809 if (!dig_port->tc_link_refcount) 810 intel_tc_port_update_mode(dig_port, 1, true); 811 812 mutex_unlock(&dig_port->tc_lock); 813 } 814 815 /** 816 * intel_tc_port_flush_work: flush the work disconnecting the PHY 817 * @dig_port: digital port 818 * 819 * Flush the delayed work disconnecting an idle PHY. 820 */ 821 void intel_tc_port_flush_work(struct intel_digital_port *dig_port) 822 { 823 flush_delayed_work(&dig_port->tc_disconnect_phy_work); 824 } 825 826 void intel_tc_port_unlock(struct intel_digital_port *dig_port) 827 { 828 if (!dig_port->tc_link_refcount && dig_port->tc_mode != TC_PORT_DISCONNECTED) 829 queue_delayed_work(system_unbound_wq, &dig_port->tc_disconnect_phy_work, 830 msecs_to_jiffies(1000)); 831 832 mutex_unlock(&dig_port->tc_lock); 833 } 834 835 bool intel_tc_port_ref_held(struct intel_digital_port *dig_port) 836 { 837 return mutex_is_locked(&dig_port->tc_lock) || 838 dig_port->tc_link_refcount; 839 } 840 841 void intel_tc_port_get_link(struct intel_digital_port *dig_port, 842 int required_lanes) 843 { 844 __intel_tc_port_lock(dig_port, required_lanes); 845 dig_port->tc_link_refcount++; 846 intel_tc_port_unlock(dig_port); 847 } 848 849 void intel_tc_port_put_link(struct intel_digital_port *dig_port) 850 { 851 intel_tc_port_lock(dig_port); 852 --dig_port->tc_link_refcount; 853 intel_tc_port_unlock(dig_port); 854 855 /* 856 * Disconnecting the PHY after the PHY's PLL gets disabled may 857 * hang the system on ADL-P, so disconnect the PHY here synchronously. 858 * TODO: remove this once the root cause of the ordering requirement 859 * is found/fixed. 860 */ 861 intel_tc_port_flush_work(dig_port); 862 } 863 864 static bool 865 tc_has_modular_fia(struct drm_i915_private *i915, struct intel_digital_port *dig_port) 866 { 867 enum intel_display_power_domain domain; 868 intel_wakeref_t wakeref; 869 u32 val; 870 871 if (!INTEL_INFO(i915)->display.has_modular_fia) 872 return false; 873 874 mutex_lock(&dig_port->tc_lock); 875 wakeref = tc_cold_block(dig_port, &domain); 876 val = intel_uncore_read(&i915->uncore, PORT_TX_DFLEXDPSP(FIA1)); 877 tc_cold_unblock(dig_port, domain, wakeref); 878 mutex_unlock(&dig_port->tc_lock); 879 880 drm_WARN_ON(&i915->drm, val == 0xffffffff); 881 882 return val & MODULAR_FIA_MASK; 883 } 884 885 static void 886 tc_port_load_fia_params(struct drm_i915_private *i915, struct intel_digital_port *dig_port) 887 { 888 enum port port = dig_port->base.port; 889 enum tc_port tc_port = intel_port_to_tc(i915, port); 890 891 /* 892 * Each Modular FIA instance houses 2 TC ports. In SOC that has more 893 * than two TC ports, there are multiple instances of Modular FIA. 894 */ 895 if (tc_has_modular_fia(i915, dig_port)) { 896 dig_port->tc_phy_fia = tc_port / 2; 897 dig_port->tc_phy_fia_idx = tc_port % 2; 898 } else { 899 dig_port->tc_phy_fia = FIA1; 900 dig_port->tc_phy_fia_idx = tc_port; 901 } 902 } 903 904 void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy) 905 { 906 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 907 enum port port = dig_port->base.port; 908 enum tc_port tc_port = intel_port_to_tc(i915, port); 909 910 if (drm_WARN_ON(&i915->drm, tc_port == TC_PORT_NONE)) 911 return; 912 913 snprintf(dig_port->tc_port_name, sizeof(dig_port->tc_port_name), 914 "%c/TC#%d", port_name(port), tc_port + 1); 915 916 mutex_init(&dig_port->tc_lock); 917 INIT_DELAYED_WORK(&dig_port->tc_disconnect_phy_work, intel_tc_port_disconnect_phy_work); 918 dig_port->tc_legacy_port = is_legacy; 919 dig_port->tc_mode = TC_PORT_DISCONNECTED; 920 dig_port->tc_link_refcount = 0; 921 tc_port_load_fia_params(i915, dig_port); 922 } 923