1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2019 Intel Corporation 4 */ 5 6 #include "i915_drv.h" 7 #include "intel_display.h" 8 #include "intel_display_types.h" 9 #include "intel_dp_mst.h" 10 #include "intel_tc.h" 11 12 static const char *tc_port_mode_name(enum tc_port_mode mode) 13 { 14 static const char * const names[] = { 15 [TC_PORT_TBT_ALT] = "tbt-alt", 16 [TC_PORT_DP_ALT] = "dp-alt", 17 [TC_PORT_LEGACY] = "legacy", 18 }; 19 20 if (WARN_ON(mode >= ARRAY_SIZE(names))) 21 mode = TC_PORT_TBT_ALT; 22 23 return names[mode]; 24 } 25 26 static void 27 tc_port_load_fia_params(struct drm_i915_private *i915, 28 struct intel_digital_port *dig_port) 29 { 30 enum port port = dig_port->base.port; 31 enum tc_port tc_port = intel_port_to_tc(i915, port); 32 u32 modular_fia; 33 34 if (INTEL_INFO(i915)->display.has_modular_fia) { 35 modular_fia = intel_uncore_read(&i915->uncore, 36 PORT_TX_DFLEXDPSP(FIA1)); 37 modular_fia &= MODULAR_FIA_MASK; 38 } else { 39 modular_fia = 0; 40 } 41 42 /* 43 * Each Modular FIA instance houses 2 TC ports. In SOC that has more 44 * than two TC ports, there are multiple instances of Modular FIA. 45 */ 46 if (modular_fia) { 47 dig_port->tc_phy_fia = tc_port / 2; 48 dig_port->tc_phy_fia_idx = tc_port % 2; 49 } else { 50 dig_port->tc_phy_fia = FIA1; 51 dig_port->tc_phy_fia_idx = tc_port; 52 } 53 } 54 55 u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port) 56 { 57 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 58 struct intel_uncore *uncore = &i915->uncore; 59 u32 lane_mask; 60 61 lane_mask = intel_uncore_read(uncore, 62 PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia)); 63 64 WARN_ON(lane_mask == 0xffffffff); 65 66 lane_mask &= DP_LANE_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx); 67 return lane_mask >> DP_LANE_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx); 68 } 69 70 u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port) 71 { 72 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 73 struct intel_uncore *uncore = &i915->uncore; 74 u32 pin_mask; 75 76 pin_mask = intel_uncore_read(uncore, 77 PORT_TX_DFLEXPA1(dig_port->tc_phy_fia)); 78 79 WARN_ON(pin_mask == 0xffffffff); 80 81 return (pin_mask & DP_PIN_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx)) >> 82 DP_PIN_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx); 83 } 84 85 int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port) 86 { 87 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 88 intel_wakeref_t wakeref; 89 u32 lane_mask; 90 91 if (dig_port->tc_mode != TC_PORT_DP_ALT) 92 return 4; 93 94 lane_mask = 0; 95 with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref) 96 lane_mask = intel_tc_port_get_lane_mask(dig_port); 97 98 switch (lane_mask) { 99 default: 100 MISSING_CASE(lane_mask); 101 /* fall-through */ 102 case 0x1: 103 case 0x2: 104 case 0x4: 105 case 0x8: 106 return 1; 107 case 0x3: 108 case 0xc: 109 return 2; 110 case 0xf: 111 return 4; 112 } 113 } 114 115 void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port, 116 int required_lanes) 117 { 118 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 119 bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL; 120 struct intel_uncore *uncore = &i915->uncore; 121 u32 val; 122 123 WARN_ON(lane_reversal && dig_port->tc_mode != TC_PORT_LEGACY); 124 125 val = intel_uncore_read(uncore, 126 PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia)); 127 val &= ~DFLEXDPMLE1_DPMLETC_MASK(dig_port->tc_phy_fia_idx); 128 129 switch (required_lanes) { 130 case 1: 131 val |= lane_reversal ? 132 DFLEXDPMLE1_DPMLETC_ML3(dig_port->tc_phy_fia_idx) : 133 DFLEXDPMLE1_DPMLETC_ML0(dig_port->tc_phy_fia_idx); 134 break; 135 case 2: 136 val |= lane_reversal ? 137 DFLEXDPMLE1_DPMLETC_ML3_2(dig_port->tc_phy_fia_idx) : 138 DFLEXDPMLE1_DPMLETC_ML1_0(dig_port->tc_phy_fia_idx); 139 break; 140 case 4: 141 val |= DFLEXDPMLE1_DPMLETC_ML3_0(dig_port->tc_phy_fia_idx); 142 break; 143 default: 144 MISSING_CASE(required_lanes); 145 } 146 147 intel_uncore_write(uncore, 148 PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia), val); 149 } 150 151 static void tc_port_fixup_legacy_flag(struct intel_digital_port *dig_port, 152 u32 live_status_mask) 153 { 154 u32 valid_hpd_mask; 155 156 if (dig_port->tc_legacy_port) 157 valid_hpd_mask = BIT(TC_PORT_LEGACY); 158 else 159 valid_hpd_mask = BIT(TC_PORT_DP_ALT) | 160 BIT(TC_PORT_TBT_ALT); 161 162 if (!(live_status_mask & ~valid_hpd_mask)) 163 return; 164 165 /* If live status mismatches the VBT flag, trust the live status. */ 166 DRM_ERROR("Port %s: live status %08x mismatches the legacy port flag, fix flag\n", 167 dig_port->tc_port_name, live_status_mask); 168 169 dig_port->tc_legacy_port = !dig_port->tc_legacy_port; 170 } 171 172 static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port) 173 { 174 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 175 enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port); 176 struct intel_uncore *uncore = &i915->uncore; 177 u32 mask = 0; 178 u32 val; 179 180 val = intel_uncore_read(uncore, 181 PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia)); 182 183 if (val == 0xffffffff) { 184 DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, nothing connected\n", 185 dig_port->tc_port_name); 186 return mask; 187 } 188 189 if (val & TC_LIVE_STATE_TBT(dig_port->tc_phy_fia_idx)) 190 mask |= BIT(TC_PORT_TBT_ALT); 191 if (val & TC_LIVE_STATE_TC(dig_port->tc_phy_fia_idx)) 192 mask |= BIT(TC_PORT_DP_ALT); 193 194 if (intel_uncore_read(uncore, SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port)) 195 mask |= BIT(TC_PORT_LEGACY); 196 197 /* The sink can be connected only in a single mode. */ 198 if (!WARN_ON(hweight32(mask) > 1)) 199 tc_port_fixup_legacy_flag(dig_port, mask); 200 201 return mask; 202 } 203 204 static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port) 205 { 206 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 207 struct intel_uncore *uncore = &i915->uncore; 208 u32 val; 209 210 val = intel_uncore_read(uncore, 211 PORT_TX_DFLEXDPPMS(dig_port->tc_phy_fia)); 212 if (val == 0xffffffff) { 213 DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, assuming not complete\n", 214 dig_port->tc_port_name); 215 return false; 216 } 217 218 return val & DP_PHY_MODE_STATUS_COMPLETED(dig_port->tc_phy_fia_idx); 219 } 220 221 static bool icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port, 222 bool enable) 223 { 224 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 225 struct intel_uncore *uncore = &i915->uncore; 226 u32 val; 227 228 val = intel_uncore_read(uncore, 229 PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia)); 230 if (val == 0xffffffff) { 231 DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, can't set safe-mode to %s\n", 232 dig_port->tc_port_name, 233 enableddisabled(enable)); 234 235 return false; 236 } 237 238 val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx); 239 if (!enable) 240 val |= DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx); 241 242 intel_uncore_write(uncore, 243 PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia), val); 244 245 if (enable && wait_for(!icl_tc_phy_status_complete(dig_port), 10)) 246 DRM_DEBUG_KMS("Port %s: PHY complete clear timed out\n", 247 dig_port->tc_port_name); 248 249 return true; 250 } 251 252 static bool icl_tc_phy_is_in_safe_mode(struct intel_digital_port *dig_port) 253 { 254 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 255 struct intel_uncore *uncore = &i915->uncore; 256 u32 val; 257 258 val = intel_uncore_read(uncore, 259 PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia)); 260 if (val == 0xffffffff) { 261 DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, assume safe mode\n", 262 dig_port->tc_port_name); 263 return true; 264 } 265 266 return !(val & DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx)); 267 } 268 269 /* 270 * This function implements the first part of the Connect Flow described by our 271 * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading 272 * lanes, EDID, etc) is done as needed in the typical places. 273 * 274 * Unlike the other ports, type-C ports are not available to use as soon as we 275 * get a hotplug. The type-C PHYs can be shared between multiple controllers: 276 * display, USB, etc. As a result, handshaking through FIA is required around 277 * connect and disconnect to cleanly transfer ownership with the controller and 278 * set the type-C power state. 279 */ 280 static void icl_tc_phy_connect(struct intel_digital_port *dig_port, 281 int required_lanes) 282 { 283 int max_lanes; 284 285 if (!icl_tc_phy_status_complete(dig_port)) { 286 DRM_DEBUG_KMS("Port %s: PHY not ready\n", 287 dig_port->tc_port_name); 288 goto out_set_tbt_alt_mode; 289 } 290 291 if (!icl_tc_phy_set_safe_mode(dig_port, false) && 292 !WARN_ON(dig_port->tc_legacy_port)) 293 goto out_set_tbt_alt_mode; 294 295 max_lanes = intel_tc_port_fia_max_lane_count(dig_port); 296 if (dig_port->tc_legacy_port) { 297 WARN_ON(max_lanes != 4); 298 dig_port->tc_mode = TC_PORT_LEGACY; 299 300 return; 301 } 302 303 /* 304 * Now we have to re-check the live state, in case the port recently 305 * became disconnected. Not necessary for legacy mode. 306 */ 307 if (!(tc_port_live_status_mask(dig_port) & BIT(TC_PORT_DP_ALT))) { 308 DRM_DEBUG_KMS("Port %s: PHY sudden disconnect\n", 309 dig_port->tc_port_name); 310 goto out_set_safe_mode; 311 } 312 313 if (max_lanes < required_lanes) { 314 DRM_DEBUG_KMS("Port %s: PHY max lanes %d < required lanes %d\n", 315 dig_port->tc_port_name, 316 max_lanes, required_lanes); 317 goto out_set_safe_mode; 318 } 319 320 dig_port->tc_mode = TC_PORT_DP_ALT; 321 322 return; 323 324 out_set_safe_mode: 325 icl_tc_phy_set_safe_mode(dig_port, true); 326 out_set_tbt_alt_mode: 327 dig_port->tc_mode = TC_PORT_TBT_ALT; 328 } 329 330 /* 331 * See the comment at the connect function. This implements the Disconnect 332 * Flow. 333 */ 334 static void icl_tc_phy_disconnect(struct intel_digital_port *dig_port) 335 { 336 switch (dig_port->tc_mode) { 337 case TC_PORT_LEGACY: 338 /* Nothing to do, we never disconnect from legacy mode */ 339 break; 340 case TC_PORT_DP_ALT: 341 icl_tc_phy_set_safe_mode(dig_port, true); 342 dig_port->tc_mode = TC_PORT_TBT_ALT; 343 break; 344 case TC_PORT_TBT_ALT: 345 /* Nothing to do, we stay in TBT-alt mode */ 346 break; 347 default: 348 MISSING_CASE(dig_port->tc_mode); 349 } 350 } 351 352 static bool icl_tc_phy_is_connected(struct intel_digital_port *dig_port) 353 { 354 if (!icl_tc_phy_status_complete(dig_port)) { 355 DRM_DEBUG_KMS("Port %s: PHY status not complete\n", 356 dig_port->tc_port_name); 357 return dig_port->tc_mode == TC_PORT_TBT_ALT; 358 } 359 360 if (icl_tc_phy_is_in_safe_mode(dig_port)) { 361 DRM_DEBUG_KMS("Port %s: PHY still in safe mode\n", 362 dig_port->tc_port_name); 363 364 return false; 365 } 366 367 return dig_port->tc_mode == TC_PORT_DP_ALT || 368 dig_port->tc_mode == TC_PORT_LEGACY; 369 } 370 371 static enum tc_port_mode 372 intel_tc_port_get_current_mode(struct intel_digital_port *dig_port) 373 { 374 u32 live_status_mask = tc_port_live_status_mask(dig_port); 375 bool in_safe_mode = icl_tc_phy_is_in_safe_mode(dig_port); 376 enum tc_port_mode mode; 377 378 if (in_safe_mode || WARN_ON(!icl_tc_phy_status_complete(dig_port))) 379 return TC_PORT_TBT_ALT; 380 381 mode = dig_port->tc_legacy_port ? TC_PORT_LEGACY : TC_PORT_DP_ALT; 382 if (live_status_mask) { 383 enum tc_port_mode live_mode = fls(live_status_mask) - 1; 384 385 if (!WARN_ON(live_mode == TC_PORT_TBT_ALT)) 386 mode = live_mode; 387 } 388 389 return mode; 390 } 391 392 static enum tc_port_mode 393 intel_tc_port_get_target_mode(struct intel_digital_port *dig_port) 394 { 395 u32 live_status_mask = tc_port_live_status_mask(dig_port); 396 397 if (live_status_mask) 398 return fls(live_status_mask) - 1; 399 400 return icl_tc_phy_status_complete(dig_port) && 401 dig_port->tc_legacy_port ? TC_PORT_LEGACY : 402 TC_PORT_TBT_ALT; 403 } 404 405 static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port, 406 int required_lanes) 407 { 408 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 409 enum tc_port_mode old_tc_mode = dig_port->tc_mode; 410 411 intel_display_power_flush_work(i915); 412 WARN_ON(intel_display_power_is_enabled(i915, 413 intel_aux_power_domain(dig_port))); 414 415 icl_tc_phy_disconnect(dig_port); 416 icl_tc_phy_connect(dig_port, required_lanes); 417 418 DRM_DEBUG_KMS("Port %s: TC port mode reset (%s -> %s)\n", 419 dig_port->tc_port_name, 420 tc_port_mode_name(old_tc_mode), 421 tc_port_mode_name(dig_port->tc_mode)); 422 } 423 424 static void 425 intel_tc_port_link_init_refcount(struct intel_digital_port *dig_port, 426 int refcount) 427 { 428 WARN_ON(dig_port->tc_link_refcount); 429 dig_port->tc_link_refcount = refcount; 430 } 431 432 void intel_tc_port_sanitize(struct intel_digital_port *dig_port) 433 { 434 struct intel_encoder *encoder = &dig_port->base; 435 int active_links = 0; 436 437 mutex_lock(&dig_port->tc_lock); 438 439 dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port); 440 if (dig_port->dp.is_mst) 441 active_links = intel_dp_mst_encoder_active_links(dig_port); 442 else if (encoder->base.crtc) 443 active_links = to_intel_crtc(encoder->base.crtc)->active; 444 445 if (active_links) { 446 if (!icl_tc_phy_is_connected(dig_port)) 447 DRM_DEBUG_KMS("Port %s: PHY disconnected with %d active link(s)\n", 448 dig_port->tc_port_name, active_links); 449 intel_tc_port_link_init_refcount(dig_port, active_links); 450 451 goto out; 452 } 453 454 if (dig_port->tc_legacy_port) 455 icl_tc_phy_connect(dig_port, 1); 456 457 out: 458 DRM_DEBUG_KMS("Port %s: sanitize mode (%s)\n", 459 dig_port->tc_port_name, 460 tc_port_mode_name(dig_port->tc_mode)); 461 462 mutex_unlock(&dig_port->tc_lock); 463 } 464 465 static bool intel_tc_port_needs_reset(struct intel_digital_port *dig_port) 466 { 467 return intel_tc_port_get_target_mode(dig_port) != dig_port->tc_mode; 468 } 469 470 /* 471 * The type-C ports are different because even when they are connected, they may 472 * not be available/usable by the graphics driver: see the comment on 473 * icl_tc_phy_connect(). So in our driver instead of adding the additional 474 * concept of "usable" and make everything check for "connected and usable" we 475 * define a port as "connected" when it is not only connected, but also when it 476 * is usable by the rest of the driver. That maintains the old assumption that 477 * connected ports are usable, and avoids exposing to the users objects they 478 * can't really use. 479 */ 480 bool intel_tc_port_connected(struct intel_digital_port *dig_port) 481 { 482 bool is_connected; 483 484 intel_tc_port_lock(dig_port); 485 is_connected = tc_port_live_status_mask(dig_port) & 486 BIT(dig_port->tc_mode); 487 intel_tc_port_unlock(dig_port); 488 489 return is_connected; 490 } 491 492 static void __intel_tc_port_lock(struct intel_digital_port *dig_port, 493 int required_lanes) 494 { 495 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 496 intel_wakeref_t wakeref; 497 498 wakeref = intel_display_power_get(i915, POWER_DOMAIN_DISPLAY_CORE); 499 500 mutex_lock(&dig_port->tc_lock); 501 502 if (!dig_port->tc_link_refcount && 503 intel_tc_port_needs_reset(dig_port)) 504 intel_tc_port_reset_mode(dig_port, required_lanes); 505 506 WARN_ON(dig_port->tc_lock_wakeref); 507 dig_port->tc_lock_wakeref = wakeref; 508 } 509 510 void intel_tc_port_lock(struct intel_digital_port *dig_port) 511 { 512 __intel_tc_port_lock(dig_port, 1); 513 } 514 515 void intel_tc_port_unlock(struct intel_digital_port *dig_port) 516 { 517 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 518 intel_wakeref_t wakeref = fetch_and_zero(&dig_port->tc_lock_wakeref); 519 520 mutex_unlock(&dig_port->tc_lock); 521 522 intel_display_power_put_async(i915, POWER_DOMAIN_DISPLAY_CORE, 523 wakeref); 524 } 525 526 bool intel_tc_port_ref_held(struct intel_digital_port *dig_port) 527 { 528 return mutex_is_locked(&dig_port->tc_lock) || 529 dig_port->tc_link_refcount; 530 } 531 532 void intel_tc_port_get_link(struct intel_digital_port *dig_port, 533 int required_lanes) 534 { 535 __intel_tc_port_lock(dig_port, required_lanes); 536 dig_port->tc_link_refcount++; 537 intel_tc_port_unlock(dig_port); 538 } 539 540 void intel_tc_port_put_link(struct intel_digital_port *dig_port) 541 { 542 mutex_lock(&dig_port->tc_lock); 543 dig_port->tc_link_refcount--; 544 mutex_unlock(&dig_port->tc_lock); 545 } 546 547 void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy) 548 { 549 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 550 enum port port = dig_port->base.port; 551 enum tc_port tc_port = intel_port_to_tc(i915, port); 552 553 if (WARN_ON(tc_port == PORT_TC_NONE)) 554 return; 555 556 snprintf(dig_port->tc_port_name, sizeof(dig_port->tc_port_name), 557 "%c/TC#%d", port_name(port), tc_port + 1); 558 559 mutex_init(&dig_port->tc_lock); 560 dig_port->tc_legacy_port = is_legacy; 561 dig_port->tc_link_refcount = 0; 562 tc_port_load_fia_params(i915, dig_port); 563 } 564