1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #include "i915_drv.h"
7 #include "i915_reg.h"
8 #include "intel_ddi.h"
9 #include "intel_de.h"
10 #include "intel_display.h"
11 #include "intel_display_power_map.h"
12 #include "intel_display_types.h"
13 #include "intel_dkl_phy_regs.h"
14 #include "intel_dp_mst.h"
15 #include "intel_mg_phy_regs.h"
16 #include "intel_tc.h"
17 
18 enum tc_port_mode {
19 	TC_PORT_DISCONNECTED,
20 	TC_PORT_TBT_ALT,
21 	TC_PORT_DP_ALT,
22 	TC_PORT_LEGACY,
23 };
24 
25 struct intel_tc_port;
26 
27 struct intel_tc_phy_ops {
28 	enum intel_display_power_domain (*cold_off_domain)(struct intel_tc_port *tc);
29 	u32 (*hpd_live_status)(struct intel_tc_port *tc);
30 	bool (*is_ready)(struct intel_tc_port *tc);
31 	bool (*is_owned)(struct intel_tc_port *tc);
32 	void (*get_hw_state)(struct intel_tc_port *tc);
33 	bool (*connect)(struct intel_tc_port *tc, int required_lanes);
34 	void (*disconnect)(struct intel_tc_port *tc);
35 	void (*init)(struct intel_tc_port *tc);
36 };
37 
38 struct intel_tc_port {
39 	struct intel_digital_port *dig_port;
40 
41 	const struct intel_tc_phy_ops *phy_ops;
42 
43 	struct mutex lock;	/* protects the TypeC port mode */
44 	intel_wakeref_t lock_wakeref;
45 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
46 	enum intel_display_power_domain lock_power_domain;
47 #endif
48 	struct delayed_work disconnect_phy_work;
49 	int link_refcount;
50 	bool legacy_port:1;
51 	char port_name[8];
52 	enum tc_port_mode mode;
53 	enum tc_port_mode init_mode;
54 	enum phy_fia phy_fia;
55 	u8 phy_fia_idx;
56 };
57 
58 static enum intel_display_power_domain
59 tc_phy_cold_off_domain(struct intel_tc_port *);
60 static u32 tc_phy_hpd_live_status(struct intel_tc_port *tc);
61 static bool tc_phy_is_ready(struct intel_tc_port *tc);
62 static enum tc_port_mode tc_phy_get_current_mode(struct intel_tc_port *tc);
63 
64 static const char *tc_port_mode_name(enum tc_port_mode mode)
65 {
66 	static const char * const names[] = {
67 		[TC_PORT_DISCONNECTED] = "disconnected",
68 		[TC_PORT_TBT_ALT] = "tbt-alt",
69 		[TC_PORT_DP_ALT] = "dp-alt",
70 		[TC_PORT_LEGACY] = "legacy",
71 	};
72 
73 	if (WARN_ON(mode >= ARRAY_SIZE(names)))
74 		mode = TC_PORT_DISCONNECTED;
75 
76 	return names[mode];
77 }
78 
79 static struct intel_tc_port *to_tc_port(struct intel_digital_port *dig_port)
80 {
81 	return dig_port->tc;
82 }
83 
84 static struct drm_i915_private *tc_to_i915(struct intel_tc_port *tc)
85 {
86 	return to_i915(tc->dig_port->base.base.dev);
87 }
88 
89 static bool intel_tc_port_in_mode(struct intel_digital_port *dig_port,
90 				  enum tc_port_mode mode)
91 {
92 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
93 	enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
94 	struct intel_tc_port *tc = to_tc_port(dig_port);
95 
96 	return intel_phy_is_tc(i915, phy) && tc->mode == mode;
97 }
98 
99 bool intel_tc_port_in_tbt_alt_mode(struct intel_digital_port *dig_port)
100 {
101 	return intel_tc_port_in_mode(dig_port, TC_PORT_TBT_ALT);
102 }
103 
104 bool intel_tc_port_in_dp_alt_mode(struct intel_digital_port *dig_port)
105 {
106 	return intel_tc_port_in_mode(dig_port, TC_PORT_DP_ALT);
107 }
108 
109 bool intel_tc_port_in_legacy_mode(struct intel_digital_port *dig_port)
110 {
111 	return intel_tc_port_in_mode(dig_port, TC_PORT_LEGACY);
112 }
113 
114 /*
115  * The display power domains used for TC ports depending on the
116  * platform and TC mode (legacy, DP-alt, TBT):
117  *
118  * POWER_DOMAIN_DISPLAY_CORE:
119  * --------------------------
120  * ADLP/all modes:
121  *   - TCSS/IOM access for PHY ready state.
122  * ADLP+/all modes:
123  *   - DE/north-,south-HPD ISR access for HPD live state.
124  *
125  * POWER_DOMAIN_PORT_DDI_LANES_<port>:
126  * -----------------------------------
127  * ICL+/all modes:
128  *   - DE/DDI_BUF access for port enabled state.
129  * ADLP/all modes:
130  *   - DE/DDI_BUF access for PHY owned state.
131  *
132  * POWER_DOMAIN_AUX_USBC<TC port index>:
133  * -------------------------------------
134  * ICL/legacy mode:
135  *   - TCSS/IOM,FIA access for PHY ready, owned and HPD live state
136  *   - TCSS/PHY: block TC-cold power state for using the PHY AUX and
137  *     main lanes.
138  * ADLP/legacy, DP-alt modes:
139  *   - TCSS/PHY: block TC-cold power state for using the PHY AUX and
140  *     main lanes.
141  *
142  * POWER_DOMAIN_TC_COLD_OFF:
143  * -------------------------
144  * TGL/legacy, DP-alt modes:
145  *   - TCSS/IOM,FIA access for PHY ready, owned and HPD live state
146  *   - TCSS/PHY: block TC-cold power state for using the PHY AUX and
147  *     main lanes.
148  *
149  * ICL, TGL, ADLP/TBT mode:
150  *   - TCSS/IOM,FIA access for HPD live state
151  *   - TCSS/TBT: block TC-cold power state for using the (TBT DP-IN)
152  *     AUX and main lanes.
153  */
154 bool intel_tc_cold_requires_aux_pw(struct intel_digital_port *dig_port)
155 {
156 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
157 	struct intel_tc_port *tc = to_tc_port(dig_port);
158 
159 	return tc_phy_cold_off_domain(tc) ==
160 	       intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch);
161 }
162 
163 static intel_wakeref_t
164 __tc_cold_block(struct intel_tc_port *tc, enum intel_display_power_domain *domain)
165 {
166 	struct drm_i915_private *i915 = tc_to_i915(tc);
167 
168 	*domain = tc_phy_cold_off_domain(tc);
169 
170 	return intel_display_power_get(i915, *domain);
171 }
172 
173 static intel_wakeref_t
174 tc_cold_block(struct intel_tc_port *tc)
175 {
176 	enum intel_display_power_domain domain;
177 	intel_wakeref_t wakeref;
178 
179 	wakeref = __tc_cold_block(tc, &domain);
180 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
181 	tc->lock_power_domain = domain;
182 #endif
183 	return wakeref;
184 }
185 
186 static void
187 __tc_cold_unblock(struct intel_tc_port *tc, enum intel_display_power_domain domain,
188 		  intel_wakeref_t wakeref)
189 {
190 	struct drm_i915_private *i915 = tc_to_i915(tc);
191 
192 	intel_display_power_put(i915, domain, wakeref);
193 }
194 
195 static void
196 tc_cold_unblock(struct intel_tc_port *tc, intel_wakeref_t wakeref)
197 {
198 	enum intel_display_power_domain domain = tc_phy_cold_off_domain(tc);
199 
200 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
201 	drm_WARN_ON(&tc_to_i915(tc)->drm, tc->lock_power_domain != domain);
202 #endif
203 	__tc_cold_unblock(tc, domain, wakeref);
204 }
205 
206 static void
207 assert_display_core_power_enabled(struct intel_tc_port *tc)
208 {
209 	struct drm_i915_private *i915 = tc_to_i915(tc);
210 
211 	drm_WARN_ON(&i915->drm,
212 		    !intel_display_power_is_enabled(i915, POWER_DOMAIN_DISPLAY_CORE));
213 }
214 
215 static void
216 assert_tc_cold_blocked(struct intel_tc_port *tc)
217 {
218 	struct drm_i915_private *i915 = tc_to_i915(tc);
219 	bool enabled;
220 
221 	enabled = intel_display_power_is_enabled(i915,
222 						 tc_phy_cold_off_domain(tc));
223 	drm_WARN_ON(&i915->drm, !enabled);
224 }
225 
226 static enum intel_display_power_domain
227 tc_port_power_domain(struct intel_tc_port *tc)
228 {
229 	struct drm_i915_private *i915 = tc_to_i915(tc);
230 	enum tc_port tc_port = intel_port_to_tc(i915, tc->dig_port->base.port);
231 
232 	return POWER_DOMAIN_PORT_DDI_LANES_TC1 + tc_port - TC_PORT_1;
233 }
234 
235 static void
236 assert_tc_port_power_enabled(struct intel_tc_port *tc)
237 {
238 	struct drm_i915_private *i915 = tc_to_i915(tc);
239 
240 	drm_WARN_ON(&i915->drm,
241 		    !intel_display_power_is_enabled(i915, tc_port_power_domain(tc)));
242 }
243 
244 u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
245 {
246 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
247 	struct intel_tc_port *tc = to_tc_port(dig_port);
248 	u32 lane_mask;
249 
250 	lane_mask = intel_de_read(i915, PORT_TX_DFLEXDPSP(tc->phy_fia));
251 
252 	drm_WARN_ON(&i915->drm, lane_mask == 0xffffffff);
253 	assert_tc_cold_blocked(tc);
254 
255 	lane_mask &= DP_LANE_ASSIGNMENT_MASK(tc->phy_fia_idx);
256 	return lane_mask >> DP_LANE_ASSIGNMENT_SHIFT(tc->phy_fia_idx);
257 }
258 
259 u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port)
260 {
261 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
262 	struct intel_tc_port *tc = to_tc_port(dig_port);
263 	u32 pin_mask;
264 
265 	pin_mask = intel_de_read(i915, PORT_TX_DFLEXPA1(tc->phy_fia));
266 
267 	drm_WARN_ON(&i915->drm, pin_mask == 0xffffffff);
268 	assert_tc_cold_blocked(tc);
269 
270 	return (pin_mask & DP_PIN_ASSIGNMENT_MASK(tc->phy_fia_idx)) >>
271 	       DP_PIN_ASSIGNMENT_SHIFT(tc->phy_fia_idx);
272 }
273 
274 int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port)
275 {
276 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
277 	struct intel_tc_port *tc = to_tc_port(dig_port);
278 	enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
279 	intel_wakeref_t wakeref;
280 	u32 lane_mask;
281 
282 	if (!intel_phy_is_tc(i915, phy) || tc->mode != TC_PORT_DP_ALT)
283 		return 4;
284 
285 	assert_tc_cold_blocked(tc);
286 
287 	lane_mask = 0;
288 	with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref)
289 		lane_mask = intel_tc_port_get_lane_mask(dig_port);
290 
291 	switch (lane_mask) {
292 	default:
293 		MISSING_CASE(lane_mask);
294 		fallthrough;
295 	case 0x1:
296 	case 0x2:
297 	case 0x4:
298 	case 0x8:
299 		return 1;
300 	case 0x3:
301 	case 0xc:
302 		return 2;
303 	case 0xf:
304 		return 4;
305 	}
306 }
307 
308 void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
309 				      int required_lanes)
310 {
311 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
312 	struct intel_tc_port *tc = to_tc_port(dig_port);
313 	bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
314 	u32 val;
315 
316 	drm_WARN_ON(&i915->drm,
317 		    lane_reversal && tc->mode != TC_PORT_LEGACY);
318 
319 	assert_tc_cold_blocked(tc);
320 
321 	val = intel_de_read(i915, PORT_TX_DFLEXDPMLE1(tc->phy_fia));
322 	val &= ~DFLEXDPMLE1_DPMLETC_MASK(tc->phy_fia_idx);
323 
324 	switch (required_lanes) {
325 	case 1:
326 		val |= lane_reversal ?
327 			DFLEXDPMLE1_DPMLETC_ML3(tc->phy_fia_idx) :
328 			DFLEXDPMLE1_DPMLETC_ML0(tc->phy_fia_idx);
329 		break;
330 	case 2:
331 		val |= lane_reversal ?
332 			DFLEXDPMLE1_DPMLETC_ML3_2(tc->phy_fia_idx) :
333 			DFLEXDPMLE1_DPMLETC_ML1_0(tc->phy_fia_idx);
334 		break;
335 	case 4:
336 		val |= DFLEXDPMLE1_DPMLETC_ML3_0(tc->phy_fia_idx);
337 		break;
338 	default:
339 		MISSING_CASE(required_lanes);
340 	}
341 
342 	intel_de_write(i915, PORT_TX_DFLEXDPMLE1(tc->phy_fia), val);
343 }
344 
345 static void tc_port_fixup_legacy_flag(struct intel_tc_port *tc,
346 				      u32 live_status_mask)
347 {
348 	struct drm_i915_private *i915 = tc_to_i915(tc);
349 	u32 valid_hpd_mask;
350 
351 	drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DISCONNECTED);
352 
353 	if (hweight32(live_status_mask) != 1)
354 		return;
355 
356 	if (tc->legacy_port)
357 		valid_hpd_mask = BIT(TC_PORT_LEGACY);
358 	else
359 		valid_hpd_mask = BIT(TC_PORT_DP_ALT) |
360 				 BIT(TC_PORT_TBT_ALT);
361 
362 	if (!(live_status_mask & ~valid_hpd_mask))
363 		return;
364 
365 	/* If live status mismatches the VBT flag, trust the live status. */
366 	drm_dbg_kms(&i915->drm,
367 		    "Port %s: live status %08x mismatches the legacy port flag %08x, fixing flag\n",
368 		    tc->port_name, live_status_mask, valid_hpd_mask);
369 
370 	tc->legacy_port = !tc->legacy_port;
371 }
372 
373 static void tc_phy_load_fia_params(struct intel_tc_port *tc, bool modular_fia)
374 {
375 	struct drm_i915_private *i915 = tc_to_i915(tc);
376 	enum port port = tc->dig_port->base.port;
377 	enum tc_port tc_port = intel_port_to_tc(i915, port);
378 
379 	/*
380 	 * Each Modular FIA instance houses 2 TC ports. In SOC that has more
381 	 * than two TC ports, there are multiple instances of Modular FIA.
382 	 */
383 	if (modular_fia) {
384 		tc->phy_fia = tc_port / 2;
385 		tc->phy_fia_idx = tc_port % 2;
386 	} else {
387 		tc->phy_fia = FIA1;
388 		tc->phy_fia_idx = tc_port;
389 	}
390 }
391 
392 /*
393  * ICL TC PHY handlers
394  * -------------------
395  */
396 static enum intel_display_power_domain
397 icl_tc_phy_cold_off_domain(struct intel_tc_port *tc)
398 {
399 	struct drm_i915_private *i915 = tc_to_i915(tc);
400 	struct intel_digital_port *dig_port = tc->dig_port;
401 
402 	if (tc->legacy_port)
403 		return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch);
404 
405 	return POWER_DOMAIN_TC_COLD_OFF;
406 }
407 
408 static u32 icl_tc_phy_hpd_live_status(struct intel_tc_port *tc)
409 {
410 	struct drm_i915_private *i915 = tc_to_i915(tc);
411 	struct intel_digital_port *dig_port = tc->dig_port;
412 	u32 isr_bit = i915->display.hotplug.pch_hpd[dig_port->base.hpd_pin];
413 	intel_wakeref_t wakeref;
414 	u32 fia_isr;
415 	u32 pch_isr;
416 	u32 mask = 0;
417 
418 	with_intel_display_power(i915, tc_phy_cold_off_domain(tc), wakeref) {
419 		fia_isr = intel_de_read(i915, PORT_TX_DFLEXDPSP(tc->phy_fia));
420 		pch_isr = intel_de_read(i915, SDEISR);
421 	}
422 
423 	if (fia_isr == 0xffffffff) {
424 		drm_dbg_kms(&i915->drm,
425 			    "Port %s: PHY in TCCOLD, nothing connected\n",
426 			    tc->port_name);
427 		return mask;
428 	}
429 
430 	if (fia_isr & TC_LIVE_STATE_TBT(tc->phy_fia_idx))
431 		mask |= BIT(TC_PORT_TBT_ALT);
432 	if (fia_isr & TC_LIVE_STATE_TC(tc->phy_fia_idx))
433 		mask |= BIT(TC_PORT_DP_ALT);
434 
435 	if (pch_isr & isr_bit)
436 		mask |= BIT(TC_PORT_LEGACY);
437 
438 	return mask;
439 }
440 
441 /*
442  * Return the PHY status complete flag indicating that display can acquire the
443  * PHY ownership. The IOM firmware sets this flag when a DP-alt or legacy sink
444  * is connected and it's ready to switch the ownership to display. The flag
445  * will be left cleared when a TBT-alt sink is connected, where the PHY is
446  * owned by the TBT subsystem and so switching the ownership to display is not
447  * required.
448  */
449 static bool icl_tc_phy_is_ready(struct intel_tc_port *tc)
450 {
451 	struct drm_i915_private *i915 = tc_to_i915(tc);
452 	u32 val;
453 
454 	assert_tc_cold_blocked(tc);
455 
456 	val = intel_de_read(i915, PORT_TX_DFLEXDPPMS(tc->phy_fia));
457 	if (val == 0xffffffff) {
458 		drm_dbg_kms(&i915->drm,
459 			    "Port %s: PHY in TCCOLD, assuming not ready\n",
460 			    tc->port_name);
461 		return false;
462 	}
463 
464 	return val & DP_PHY_MODE_STATUS_COMPLETED(tc->phy_fia_idx);
465 }
466 
467 static bool icl_tc_phy_take_ownership(struct intel_tc_port *tc,
468 				      bool take)
469 {
470 	struct drm_i915_private *i915 = tc_to_i915(tc);
471 	u32 val;
472 
473 	assert_tc_cold_blocked(tc);
474 
475 	val = intel_de_read(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia));
476 	if (val == 0xffffffff) {
477 		drm_dbg_kms(&i915->drm,
478 			    "Port %s: PHY in TCCOLD, can't %s ownership\n",
479 			    tc->port_name, take ? "take" : "release");
480 
481 		return false;
482 	}
483 
484 	val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx);
485 	if (take)
486 		val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx);
487 
488 	intel_de_write(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia), val);
489 
490 	return true;
491 }
492 
493 static bool icl_tc_phy_is_owned(struct intel_tc_port *tc)
494 {
495 	struct drm_i915_private *i915 = tc_to_i915(tc);
496 	u32 val;
497 
498 	assert_tc_cold_blocked(tc);
499 
500 	val = intel_de_read(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia));
501 	if (val == 0xffffffff) {
502 		drm_dbg_kms(&i915->drm,
503 			    "Port %s: PHY in TCCOLD, assume not owned\n",
504 			    tc->port_name);
505 		return false;
506 	}
507 
508 	return val & DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx);
509 }
510 
511 static void icl_tc_phy_get_hw_state(struct intel_tc_port *tc)
512 {
513 	enum intel_display_power_domain domain;
514 	intel_wakeref_t tc_cold_wref;
515 
516 	tc_cold_wref = __tc_cold_block(tc, &domain);
517 
518 	tc->mode = tc_phy_get_current_mode(tc);
519 	if (tc->mode != TC_PORT_DISCONNECTED)
520 		tc->lock_wakeref = tc_cold_block(tc);
521 
522 	__tc_cold_unblock(tc, domain, tc_cold_wref);
523 }
524 
525 /*
526  * This function implements the first part of the Connect Flow described by our
527  * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading
528  * lanes, EDID, etc) is done as needed in the typical places.
529  *
530  * Unlike the other ports, type-C ports are not available to use as soon as we
531  * get a hotplug. The type-C PHYs can be shared between multiple controllers:
532  * display, USB, etc. As a result, handshaking through FIA is required around
533  * connect and disconnect to cleanly transfer ownership with the controller and
534  * set the type-C power state.
535  */
536 static bool tc_phy_verify_legacy_or_dp_alt_mode(struct intel_tc_port *tc,
537 						int required_lanes)
538 {
539 	struct drm_i915_private *i915 = tc_to_i915(tc);
540 	struct intel_digital_port *dig_port = tc->dig_port;
541 	int max_lanes;
542 
543 	max_lanes = intel_tc_port_fia_max_lane_count(dig_port);
544 	if (tc->mode == TC_PORT_LEGACY) {
545 		drm_WARN_ON(&i915->drm, max_lanes != 4);
546 		return true;
547 	}
548 
549 	drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DP_ALT);
550 
551 	/*
552 	 * Now we have to re-check the live state, in case the port recently
553 	 * became disconnected. Not necessary for legacy mode.
554 	 */
555 	if (!(tc_phy_hpd_live_status(tc) & BIT(TC_PORT_DP_ALT))) {
556 		drm_dbg_kms(&i915->drm, "Port %s: PHY sudden disconnect\n",
557 			    tc->port_name);
558 		return false;
559 	}
560 
561 	if (max_lanes < required_lanes) {
562 		drm_dbg_kms(&i915->drm,
563 			    "Port %s: PHY max lanes %d < required lanes %d\n",
564 			    tc->port_name,
565 			    max_lanes, required_lanes);
566 		return false;
567 	}
568 
569 	return true;
570 }
571 
572 static bool icl_tc_phy_connect(struct intel_tc_port *tc,
573 			       int required_lanes)
574 {
575 	struct drm_i915_private *i915 = tc_to_i915(tc);
576 
577 	tc->lock_wakeref = tc_cold_block(tc);
578 
579 	if (tc->mode == TC_PORT_TBT_ALT)
580 		return true;
581 
582 	if ((!tc_phy_is_ready(tc) ||
583 	     !icl_tc_phy_take_ownership(tc, true)) &&
584 	    !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) {
585 		drm_dbg_kms(&i915->drm, "Port %s: can't take PHY ownership (ready %s)\n",
586 			    tc->port_name,
587 			    str_yes_no(tc_phy_is_ready(tc)));
588 		goto out_unblock_tc_cold;
589 	}
590 
591 
592 	if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes))
593 		goto out_release_phy;
594 
595 	return true;
596 
597 out_release_phy:
598 	icl_tc_phy_take_ownership(tc, false);
599 out_unblock_tc_cold:
600 	tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
601 
602 	return false;
603 }
604 
605 /*
606  * See the comment at the connect function. This implements the Disconnect
607  * Flow.
608  */
609 static void icl_tc_phy_disconnect(struct intel_tc_port *tc)
610 {
611 	switch (tc->mode) {
612 	case TC_PORT_LEGACY:
613 	case TC_PORT_DP_ALT:
614 		icl_tc_phy_take_ownership(tc, false);
615 		fallthrough;
616 	case TC_PORT_TBT_ALT:
617 		tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
618 		break;
619 	default:
620 		MISSING_CASE(tc->mode);
621 	}
622 }
623 
624 static void icl_tc_phy_init(struct intel_tc_port *tc)
625 {
626 	tc_phy_load_fia_params(tc, false);
627 }
628 
629 static const struct intel_tc_phy_ops icl_tc_phy_ops = {
630 	.cold_off_domain = icl_tc_phy_cold_off_domain,
631 	.hpd_live_status = icl_tc_phy_hpd_live_status,
632 	.is_ready = icl_tc_phy_is_ready,
633 	.is_owned = icl_tc_phy_is_owned,
634 	.get_hw_state = icl_tc_phy_get_hw_state,
635 	.connect = icl_tc_phy_connect,
636 	.disconnect = icl_tc_phy_disconnect,
637 	.init = icl_tc_phy_init,
638 };
639 
640 /*
641  * TGL TC PHY handlers
642  * -------------------
643  */
644 static enum intel_display_power_domain
645 tgl_tc_phy_cold_off_domain(struct intel_tc_port *tc)
646 {
647 	return POWER_DOMAIN_TC_COLD_OFF;
648 }
649 
650 static void tgl_tc_phy_init(struct intel_tc_port *tc)
651 {
652 	struct drm_i915_private *i915 = tc_to_i915(tc);
653 	intel_wakeref_t wakeref;
654 	u32 val;
655 
656 	with_intel_display_power(i915, tc_phy_cold_off_domain(tc), wakeref)
657 		val = intel_de_read(i915, PORT_TX_DFLEXDPSP(FIA1));
658 
659 	drm_WARN_ON(&i915->drm, val == 0xffffffff);
660 
661 	tc_phy_load_fia_params(tc, val & MODULAR_FIA_MASK);
662 }
663 
664 static const struct intel_tc_phy_ops tgl_tc_phy_ops = {
665 	.cold_off_domain = tgl_tc_phy_cold_off_domain,
666 	.hpd_live_status = icl_tc_phy_hpd_live_status,
667 	.is_ready = icl_tc_phy_is_ready,
668 	.is_owned = icl_tc_phy_is_owned,
669 	.get_hw_state = icl_tc_phy_get_hw_state,
670 	.connect = icl_tc_phy_connect,
671 	.disconnect = icl_tc_phy_disconnect,
672 	.init = tgl_tc_phy_init,
673 };
674 
675 /*
676  * ADLP TC PHY handlers
677  * --------------------
678  */
679 static enum intel_display_power_domain
680 adlp_tc_phy_cold_off_domain(struct intel_tc_port *tc)
681 {
682 	struct drm_i915_private *i915 = tc_to_i915(tc);
683 	struct intel_digital_port *dig_port = tc->dig_port;
684 
685 	if (tc->mode != TC_PORT_TBT_ALT)
686 		return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch);
687 
688 	return POWER_DOMAIN_TC_COLD_OFF;
689 }
690 
691 static u32 adlp_tc_phy_hpd_live_status(struct intel_tc_port *tc)
692 {
693 	struct drm_i915_private *i915 = tc_to_i915(tc);
694 	struct intel_digital_port *dig_port = tc->dig_port;
695 	enum hpd_pin hpd_pin = dig_port->base.hpd_pin;
696 	u32 cpu_isr_bits = i915->display.hotplug.hpd[hpd_pin];
697 	u32 pch_isr_bit = i915->display.hotplug.pch_hpd[hpd_pin];
698 	intel_wakeref_t wakeref;
699 	u32 cpu_isr;
700 	u32 pch_isr;
701 	u32 mask = 0;
702 
703 	with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref) {
704 		cpu_isr = intel_de_read(i915, GEN11_DE_HPD_ISR);
705 		pch_isr = intel_de_read(i915, SDEISR);
706 	}
707 
708 	if (cpu_isr & (cpu_isr_bits & GEN11_DE_TC_HOTPLUG_MASK))
709 		mask |= BIT(TC_PORT_DP_ALT);
710 	if (cpu_isr & (cpu_isr_bits & GEN11_DE_TBT_HOTPLUG_MASK))
711 		mask |= BIT(TC_PORT_TBT_ALT);
712 
713 	if (pch_isr & pch_isr_bit)
714 		mask |= BIT(TC_PORT_LEGACY);
715 
716 	return mask;
717 }
718 
719 /*
720  * Return the PHY status complete flag indicating that display can acquire the
721  * PHY ownership. The IOM firmware sets this flag when it's ready to switch
722  * the ownership to display, regardless of what sink is connected (TBT-alt,
723  * DP-alt, legacy or nothing). For TBT-alt sinks the PHY is owned by the TBT
724  * subsystem and so switching the ownership to display is not required.
725  */
726 static bool adlp_tc_phy_is_ready(struct intel_tc_port *tc)
727 {
728 	struct drm_i915_private *i915 = tc_to_i915(tc);
729 	enum tc_port tc_port = intel_port_to_tc(i915, tc->dig_port->base.port);
730 	u32 val;
731 
732 	assert_display_core_power_enabled(tc);
733 
734 	val = intel_de_read(i915, TCSS_DDI_STATUS(tc_port));
735 	if (val == 0xffffffff) {
736 		drm_dbg_kms(&i915->drm,
737 			    "Port %s: PHY in TCCOLD, assuming not ready\n",
738 			    tc->port_name);
739 		return false;
740 	}
741 
742 	return val & TCSS_DDI_STATUS_READY;
743 }
744 
745 static bool adlp_tc_phy_take_ownership(struct intel_tc_port *tc,
746 				       bool take)
747 {
748 	struct drm_i915_private *i915 = tc_to_i915(tc);
749 	enum port port = tc->dig_port->base.port;
750 
751 	assert_tc_port_power_enabled(tc);
752 
753 	intel_de_rmw(i915, DDI_BUF_CTL(port), DDI_BUF_CTL_TC_PHY_OWNERSHIP,
754 		     take ? DDI_BUF_CTL_TC_PHY_OWNERSHIP : 0);
755 
756 	return true;
757 }
758 
759 static bool adlp_tc_phy_is_owned(struct intel_tc_port *tc)
760 {
761 	struct drm_i915_private *i915 = tc_to_i915(tc);
762 	enum port port = tc->dig_port->base.port;
763 	u32 val;
764 
765 	assert_tc_port_power_enabled(tc);
766 
767 	val = intel_de_read(i915, DDI_BUF_CTL(port));
768 	return val & DDI_BUF_CTL_TC_PHY_OWNERSHIP;
769 }
770 
771 static void adlp_tc_phy_get_hw_state(struct intel_tc_port *tc)
772 {
773 	struct drm_i915_private *i915 = tc_to_i915(tc);
774 	enum intel_display_power_domain port_power_domain =
775 		tc_port_power_domain(tc);
776 	intel_wakeref_t port_wakeref;
777 
778 	port_wakeref = intel_display_power_get(i915, port_power_domain);
779 
780 	tc->mode = tc_phy_get_current_mode(tc);
781 	if (tc->mode != TC_PORT_DISCONNECTED)
782 		tc->lock_wakeref = tc_cold_block(tc);
783 
784 	intel_display_power_put(i915, port_power_domain, port_wakeref);
785 }
786 
787 static bool adlp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
788 {
789 	struct drm_i915_private *i915 = tc_to_i915(tc);
790 	enum intel_display_power_domain port_power_domain =
791 		tc_port_power_domain(tc);
792 	intel_wakeref_t port_wakeref;
793 
794 	if (tc->mode == TC_PORT_TBT_ALT) {
795 		tc->lock_wakeref = tc_cold_block(tc);
796 		return true;
797 	}
798 
799 	port_wakeref = intel_display_power_get(i915, port_power_domain);
800 
801 	if (!adlp_tc_phy_take_ownership(tc, true) &&
802 	    !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) {
803 		drm_dbg_kms(&i915->drm, "Port %s: can't take PHY ownership\n",
804 			    tc->port_name);
805 		goto out_put_port_power;
806 	}
807 
808 	if (!tc_phy_is_ready(tc) &&
809 	    !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) {
810 		drm_dbg_kms(&i915->drm, "Port %s: PHY not ready\n",
811 			    tc->port_name);
812 		goto out_release_phy;
813 	}
814 
815 	tc->lock_wakeref = tc_cold_block(tc);
816 
817 	if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes))
818 		goto out_unblock_tc_cold;
819 
820 	intel_display_power_put(i915, port_power_domain, port_wakeref);
821 
822 	return true;
823 
824 out_unblock_tc_cold:
825 	tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
826 out_release_phy:
827 	adlp_tc_phy_take_ownership(tc, false);
828 out_put_port_power:
829 	intel_display_power_put(i915, port_power_domain, port_wakeref);
830 
831 	return false;
832 }
833 
834 static void adlp_tc_phy_disconnect(struct intel_tc_port *tc)
835 {
836 	struct drm_i915_private *i915 = tc_to_i915(tc);
837 	enum intel_display_power_domain port_power_domain =
838 		tc_port_power_domain(tc);
839 	intel_wakeref_t port_wakeref;
840 
841 	port_wakeref = intel_display_power_get(i915, port_power_domain);
842 
843 	tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
844 
845 	switch (tc->mode) {
846 	case TC_PORT_LEGACY:
847 	case TC_PORT_DP_ALT:
848 		adlp_tc_phy_take_ownership(tc, false);
849 		fallthrough;
850 	case TC_PORT_TBT_ALT:
851 		break;
852 	default:
853 		MISSING_CASE(tc->mode);
854 	}
855 
856 	intel_display_power_put(i915, port_power_domain, port_wakeref);
857 }
858 
859 static void adlp_tc_phy_init(struct intel_tc_port *tc)
860 {
861 	tc_phy_load_fia_params(tc, true);
862 }
863 
864 static const struct intel_tc_phy_ops adlp_tc_phy_ops = {
865 	.cold_off_domain = adlp_tc_phy_cold_off_domain,
866 	.hpd_live_status = adlp_tc_phy_hpd_live_status,
867 	.is_ready = adlp_tc_phy_is_ready,
868 	.is_owned = adlp_tc_phy_is_owned,
869 	.get_hw_state = adlp_tc_phy_get_hw_state,
870 	.connect = adlp_tc_phy_connect,
871 	.disconnect = adlp_tc_phy_disconnect,
872 	.init = adlp_tc_phy_init,
873 };
874 
875 /*
876  * Generic TC PHY handlers
877  * -----------------------
878  */
879 static enum intel_display_power_domain
880 tc_phy_cold_off_domain(struct intel_tc_port *tc)
881 {
882 	return tc->phy_ops->cold_off_domain(tc);
883 }
884 
885 static u32 tc_phy_hpd_live_status(struct intel_tc_port *tc)
886 {
887 	struct drm_i915_private *i915 = tc_to_i915(tc);
888 	u32 mask;
889 
890 	mask = tc->phy_ops->hpd_live_status(tc);
891 
892 	/* The sink can be connected only in a single mode. */
893 	drm_WARN_ON_ONCE(&i915->drm, hweight32(mask) > 1);
894 
895 	return mask;
896 }
897 
898 static bool tc_phy_is_ready(struct intel_tc_port *tc)
899 {
900 	return tc->phy_ops->is_ready(tc);
901 }
902 
903 static bool tc_phy_is_owned(struct intel_tc_port *tc)
904 {
905 	return tc->phy_ops->is_owned(tc);
906 }
907 
908 static void tc_phy_get_hw_state(struct intel_tc_port *tc)
909 {
910 	tc->phy_ops->get_hw_state(tc);
911 }
912 
913 static bool tc_phy_is_ready_and_owned(struct intel_tc_port *tc,
914 				      bool phy_is_ready, bool phy_is_owned)
915 {
916 	struct drm_i915_private *i915 = tc_to_i915(tc);
917 
918 	drm_WARN_ON(&i915->drm, phy_is_owned && !phy_is_ready);
919 
920 	return phy_is_ready && phy_is_owned;
921 }
922 
923 static bool tc_phy_is_connected(struct intel_tc_port *tc,
924 				enum icl_port_dpll_id port_pll_type)
925 {
926 	struct intel_encoder *encoder = &tc->dig_port->base;
927 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
928 	bool phy_is_ready = tc_phy_is_ready(tc);
929 	bool phy_is_owned = tc_phy_is_owned(tc);
930 	bool is_connected;
931 
932 	if (tc_phy_is_ready_and_owned(tc, phy_is_ready, phy_is_owned))
933 		is_connected = port_pll_type == ICL_PORT_DPLL_MG_PHY;
934 	else
935 		is_connected = port_pll_type == ICL_PORT_DPLL_DEFAULT;
936 
937 	drm_dbg_kms(&i915->drm,
938 		    "Port %s: PHY connected: %s (ready: %s, owned: %s, pll_type: %s)\n",
939 		    tc->port_name,
940 		    str_yes_no(is_connected),
941 		    str_yes_no(phy_is_ready),
942 		    str_yes_no(phy_is_owned),
943 		    port_pll_type == ICL_PORT_DPLL_DEFAULT ? "tbt" : "non-tbt");
944 
945 	return is_connected;
946 }
947 
948 static void tc_phy_wait_for_ready(struct intel_tc_port *tc)
949 {
950 	struct drm_i915_private *i915 = tc_to_i915(tc);
951 
952 	if (wait_for(tc_phy_is_ready(tc), 100))
953 		drm_err(&i915->drm, "Port %s: timeout waiting for PHY ready\n",
954 			tc->port_name);
955 }
956 
957 static enum tc_port_mode
958 hpd_mask_to_tc_mode(u32 live_status_mask)
959 {
960 	if (live_status_mask)
961 		return fls(live_status_mask) - 1;
962 
963 	return TC_PORT_DISCONNECTED;
964 }
965 
966 static enum tc_port_mode
967 tc_phy_hpd_live_mode(struct intel_tc_port *tc)
968 {
969 	u32 live_status_mask = tc_phy_hpd_live_status(tc);
970 
971 	return hpd_mask_to_tc_mode(live_status_mask);
972 }
973 
974 static enum tc_port_mode
975 get_tc_mode_in_phy_owned_state(struct intel_tc_port *tc,
976 			       enum tc_port_mode live_mode)
977 {
978 	switch (live_mode) {
979 	case TC_PORT_LEGACY:
980 	case TC_PORT_DP_ALT:
981 		return live_mode;
982 	default:
983 		MISSING_CASE(live_mode);
984 		fallthrough;
985 	case TC_PORT_TBT_ALT:
986 	case TC_PORT_DISCONNECTED:
987 		if (tc->legacy_port)
988 			return TC_PORT_LEGACY;
989 		else
990 			return TC_PORT_DP_ALT;
991 	}
992 }
993 
994 static enum tc_port_mode
995 get_tc_mode_in_phy_not_owned_state(struct intel_tc_port *tc,
996 				   enum tc_port_mode live_mode)
997 {
998 	switch (live_mode) {
999 	case TC_PORT_LEGACY:
1000 		return TC_PORT_DISCONNECTED;
1001 	case TC_PORT_DP_ALT:
1002 	case TC_PORT_TBT_ALT:
1003 		return TC_PORT_TBT_ALT;
1004 	default:
1005 		MISSING_CASE(live_mode);
1006 		fallthrough;
1007 	case TC_PORT_DISCONNECTED:
1008 		if (tc->legacy_port)
1009 			return TC_PORT_DISCONNECTED;
1010 		else
1011 			return TC_PORT_TBT_ALT;
1012 	}
1013 }
1014 
1015 static enum tc_port_mode
1016 tc_phy_get_current_mode(struct intel_tc_port *tc)
1017 {
1018 	struct drm_i915_private *i915 = tc_to_i915(tc);
1019 	enum tc_port_mode live_mode = tc_phy_hpd_live_mode(tc);
1020 	bool phy_is_ready;
1021 	bool phy_is_owned;
1022 	enum tc_port_mode mode;
1023 
1024 	/*
1025 	 * For legacy ports the IOM firmware initializes the PHY during boot-up
1026 	 * and system resume whether or not a sink is connected. Wait here for
1027 	 * the initialization to get ready.
1028 	 */
1029 	if (tc->legacy_port)
1030 		tc_phy_wait_for_ready(tc);
1031 
1032 	phy_is_ready = tc_phy_is_ready(tc);
1033 	phy_is_owned = tc_phy_is_owned(tc);
1034 
1035 	if (!tc_phy_is_ready_and_owned(tc, phy_is_ready, phy_is_owned)) {
1036 		mode = get_tc_mode_in_phy_not_owned_state(tc, live_mode);
1037 	} else {
1038 		drm_WARN_ON(&i915->drm, live_mode == TC_PORT_TBT_ALT);
1039 		mode = get_tc_mode_in_phy_owned_state(tc, live_mode);
1040 	}
1041 
1042 	drm_dbg_kms(&i915->drm,
1043 		    "Port %s: PHY mode: %s (ready: %s, owned: %s, HPD: %s)\n",
1044 		    tc->port_name,
1045 		    tc_port_mode_name(mode),
1046 		    str_yes_no(phy_is_ready),
1047 		    str_yes_no(phy_is_owned),
1048 		    tc_port_mode_name(live_mode));
1049 
1050 	return mode;
1051 }
1052 
1053 static enum tc_port_mode default_tc_mode(struct intel_tc_port *tc)
1054 {
1055 	if (tc->legacy_port)
1056 		return TC_PORT_LEGACY;
1057 
1058 	return TC_PORT_TBT_ALT;
1059 }
1060 
1061 static enum tc_port_mode
1062 hpd_mask_to_target_mode(struct intel_tc_port *tc, u32 live_status_mask)
1063 {
1064 	enum tc_port_mode mode = hpd_mask_to_tc_mode(live_status_mask);
1065 
1066 	if (mode != TC_PORT_DISCONNECTED)
1067 		return mode;
1068 
1069 	return default_tc_mode(tc);
1070 }
1071 
1072 static enum tc_port_mode
1073 tc_phy_get_target_mode(struct intel_tc_port *tc)
1074 {
1075 	u32 live_status_mask = tc_phy_hpd_live_status(tc);
1076 
1077 	return hpd_mask_to_target_mode(tc, live_status_mask);
1078 }
1079 
1080 static void tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
1081 {
1082 	struct drm_i915_private *i915 = tc_to_i915(tc);
1083 	u32 live_status_mask = tc_phy_hpd_live_status(tc);
1084 	bool connected;
1085 
1086 	tc_port_fixup_legacy_flag(tc, live_status_mask);
1087 
1088 	tc->mode = hpd_mask_to_target_mode(tc, live_status_mask);
1089 
1090 	connected = tc->phy_ops->connect(tc, required_lanes);
1091 	if (!connected && tc->mode != default_tc_mode(tc)) {
1092 		tc->mode = default_tc_mode(tc);
1093 		connected = tc->phy_ops->connect(tc, required_lanes);
1094 	}
1095 
1096 	drm_WARN_ON(&i915->drm, !connected);
1097 }
1098 
1099 static void tc_phy_disconnect(struct intel_tc_port *tc)
1100 {
1101 	if (tc->mode != TC_PORT_DISCONNECTED) {
1102 		tc->phy_ops->disconnect(tc);
1103 		tc->mode = TC_PORT_DISCONNECTED;
1104 	}
1105 }
1106 
1107 static void tc_phy_init(struct intel_tc_port *tc)
1108 {
1109 	mutex_lock(&tc->lock);
1110 	tc->phy_ops->init(tc);
1111 	mutex_unlock(&tc->lock);
1112 }
1113 
1114 static void intel_tc_port_reset_mode(struct intel_tc_port *tc,
1115 				     int required_lanes, bool force_disconnect)
1116 {
1117 	struct drm_i915_private *i915 = tc_to_i915(tc);
1118 	struct intel_digital_port *dig_port = tc->dig_port;
1119 	enum tc_port_mode old_tc_mode = tc->mode;
1120 
1121 	intel_display_power_flush_work(i915);
1122 	if (!intel_tc_cold_requires_aux_pw(dig_port)) {
1123 		enum intel_display_power_domain aux_domain;
1124 		bool aux_powered;
1125 
1126 		aux_domain = intel_aux_power_domain(dig_port);
1127 		aux_powered = intel_display_power_is_enabled(i915, aux_domain);
1128 		drm_WARN_ON(&i915->drm, aux_powered);
1129 	}
1130 
1131 	tc_phy_disconnect(tc);
1132 	if (!force_disconnect)
1133 		tc_phy_connect(tc, required_lanes);
1134 
1135 	drm_dbg_kms(&i915->drm, "Port %s: TC port mode reset (%s -> %s)\n",
1136 		    tc->port_name,
1137 		    tc_port_mode_name(old_tc_mode),
1138 		    tc_port_mode_name(tc->mode));
1139 }
1140 
1141 static bool intel_tc_port_needs_reset(struct intel_tc_port *tc)
1142 {
1143 	return tc_phy_get_target_mode(tc) != tc->mode;
1144 }
1145 
1146 static void intel_tc_port_update_mode(struct intel_tc_port *tc,
1147 				      int required_lanes, bool force_disconnect)
1148 {
1149 	if (force_disconnect ||
1150 	    intel_tc_port_needs_reset(tc))
1151 		intel_tc_port_reset_mode(tc, required_lanes, force_disconnect);
1152 }
1153 
1154 static void __intel_tc_port_get_link(struct intel_tc_port *tc)
1155 {
1156 	tc->link_refcount++;
1157 }
1158 
1159 static void __intel_tc_port_put_link(struct intel_tc_port *tc)
1160 {
1161 	tc->link_refcount--;
1162 }
1163 
1164 static bool tc_port_is_enabled(struct intel_tc_port *tc)
1165 {
1166 	struct drm_i915_private *i915 = tc_to_i915(tc);
1167 	struct intel_digital_port *dig_port = tc->dig_port;
1168 
1169 	assert_tc_port_power_enabled(tc);
1170 
1171 	return intel_de_read(i915, DDI_BUF_CTL(dig_port->base.port)) &
1172 	       DDI_BUF_CTL_ENABLE;
1173 }
1174 
1175 /**
1176  * intel_tc_port_init_mode: Read out HW state and init the given port's TypeC mode
1177  * @dig_port: digital port
1178  *
1179  * Read out the HW state and initialize the TypeC mode of @dig_port. The mode
1180  * will be locked until intel_tc_port_sanitize_mode() is called.
1181  */
1182 void intel_tc_port_init_mode(struct intel_digital_port *dig_port)
1183 {
1184 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
1185 	struct intel_tc_port *tc = to_tc_port(dig_port);
1186 	bool update_mode = false;
1187 
1188 	mutex_lock(&tc->lock);
1189 
1190 	drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DISCONNECTED);
1191 	drm_WARN_ON(&i915->drm, tc->lock_wakeref);
1192 	drm_WARN_ON(&i915->drm, tc->link_refcount);
1193 
1194 	tc_phy_get_hw_state(tc);
1195 	/*
1196 	 * Save the initial mode for the state check in
1197 	 * intel_tc_port_sanitize_mode().
1198 	 */
1199 	tc->init_mode = tc->mode;
1200 
1201 	/*
1202 	 * The PHY needs to be connected for AUX to work during HW readout and
1203 	 * MST topology resume, but the PHY mode can only be changed if the
1204 	 * port is disabled.
1205 	 *
1206 	 * An exception is the case where BIOS leaves the PHY incorrectly
1207 	 * disconnected on an enabled legacy port. Work around that by
1208 	 * connecting the PHY even though the port is enabled. This doesn't
1209 	 * cause a problem as the PHY ownership state is ignored by the
1210 	 * IOM/TCSS firmware (only display can own the PHY in that case).
1211 	 */
1212 	if (!tc_port_is_enabled(tc)) {
1213 		update_mode = true;
1214 	} else if (tc->mode == TC_PORT_DISCONNECTED) {
1215 		drm_WARN_ON(&i915->drm, !tc->legacy_port);
1216 		drm_err(&i915->drm,
1217 			"Port %s: PHY disconnected on enabled port, connecting it\n",
1218 			tc->port_name);
1219 		update_mode = true;
1220 	}
1221 
1222 	if (update_mode)
1223 		intel_tc_port_update_mode(tc, 1, false);
1224 
1225 	/* Prevent changing tc->mode until intel_tc_port_sanitize_mode() is called. */
1226 	__intel_tc_port_get_link(tc);
1227 
1228 	mutex_unlock(&tc->lock);
1229 }
1230 
1231 static bool tc_port_has_active_links(struct intel_tc_port *tc,
1232 				     const struct intel_crtc_state *crtc_state)
1233 {
1234 	struct drm_i915_private *i915 = tc_to_i915(tc);
1235 	struct intel_digital_port *dig_port = tc->dig_port;
1236 	enum icl_port_dpll_id pll_type = ICL_PORT_DPLL_DEFAULT;
1237 	int active_links = 0;
1238 
1239 	if (dig_port->dp.is_mst) {
1240 		/* TODO: get the PLL type for MST, once HW readout is done for it. */
1241 		active_links = intel_dp_mst_encoder_active_links(dig_port);
1242 	} else if (crtc_state && crtc_state->hw.active) {
1243 		pll_type = intel_ddi_port_pll_type(&dig_port->base, crtc_state);
1244 		active_links = 1;
1245 	}
1246 
1247 	if (active_links && !tc_phy_is_connected(tc, pll_type))
1248 		drm_err(&i915->drm,
1249 			"Port %s: PHY disconnected with %d active link(s)\n",
1250 			tc->port_name, active_links);
1251 
1252 	return active_links;
1253 }
1254 
1255 /**
1256  * intel_tc_port_sanitize_mode: Sanitize the given port's TypeC mode
1257  * @dig_port: digital port
1258  * @crtc_state: atomic state of CRTC connected to @dig_port
1259  *
1260  * Sanitize @dig_port's TypeC mode wrt. the encoder's state right after driver
1261  * loading and system resume:
1262  * If the encoder is enabled keep the TypeC mode/PHY connected state locked until
1263  * the encoder is disabled.
1264  * If the encoder is disabled make sure the PHY is disconnected.
1265  * @crtc_state is valid if @dig_port is enabled, NULL otherwise.
1266  */
1267 void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port,
1268 				 const struct intel_crtc_state *crtc_state)
1269 {
1270 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
1271 	struct intel_tc_port *tc = to_tc_port(dig_port);
1272 
1273 	mutex_lock(&tc->lock);
1274 
1275 	drm_WARN_ON(&i915->drm, tc->link_refcount != 1);
1276 	if (!tc_port_has_active_links(tc, crtc_state)) {
1277 		/*
1278 		 * TBT-alt is the default mode in any case the PHY ownership is not
1279 		 * held (regardless of the sink's connected live state), so
1280 		 * we'll just switch to disconnected mode from it here without
1281 		 * a note.
1282 		 */
1283 		if (tc->init_mode != TC_PORT_TBT_ALT &&
1284 		    tc->init_mode != TC_PORT_DISCONNECTED)
1285 			drm_dbg_kms(&i915->drm,
1286 				    "Port %s: PHY left in %s mode on disabled port, disconnecting it\n",
1287 				    tc->port_name,
1288 				    tc_port_mode_name(tc->init_mode));
1289 		tc_phy_disconnect(tc);
1290 		__intel_tc_port_put_link(tc);
1291 	}
1292 
1293 	drm_dbg_kms(&i915->drm, "Port %s: sanitize mode (%s)\n",
1294 		    tc->port_name,
1295 		    tc_port_mode_name(tc->mode));
1296 
1297 	mutex_unlock(&tc->lock);
1298 }
1299 
1300 /*
1301  * The type-C ports are different because even when they are connected, they may
1302  * not be available/usable by the graphics driver: see the comment on
1303  * icl_tc_phy_connect(). So in our driver instead of adding the additional
1304  * concept of "usable" and make everything check for "connected and usable" we
1305  * define a port as "connected" when it is not only connected, but also when it
1306  * is usable by the rest of the driver. That maintains the old assumption that
1307  * connected ports are usable, and avoids exposing to the users objects they
1308  * can't really use.
1309  */
1310 bool intel_tc_port_connected_locked(struct intel_encoder *encoder)
1311 {
1312 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1313 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
1314 	struct intel_tc_port *tc = to_tc_port(dig_port);
1315 	u32 mask = ~0;
1316 
1317 	drm_WARN_ON(&i915->drm, !intel_tc_port_ref_held(dig_port));
1318 
1319 	if (tc->mode != TC_PORT_DISCONNECTED)
1320 		mask = BIT(tc->mode);
1321 
1322 	return tc_phy_hpd_live_status(tc) & mask;
1323 }
1324 
1325 bool intel_tc_port_connected(struct intel_encoder *encoder)
1326 {
1327 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1328 	struct intel_tc_port *tc = to_tc_port(dig_port);
1329 	bool is_connected;
1330 
1331 	mutex_lock(&tc->lock);
1332 	is_connected = intel_tc_port_connected_locked(encoder);
1333 	mutex_unlock(&tc->lock);
1334 
1335 	return is_connected;
1336 }
1337 
1338 static void __intel_tc_port_lock(struct intel_tc_port *tc,
1339 				 int required_lanes)
1340 {
1341 	struct drm_i915_private *i915 = tc_to_i915(tc);
1342 
1343 	mutex_lock(&tc->lock);
1344 
1345 	cancel_delayed_work(&tc->disconnect_phy_work);
1346 
1347 	if (!tc->link_refcount)
1348 		intel_tc_port_update_mode(tc, required_lanes,
1349 					  false);
1350 
1351 	drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_DISCONNECTED);
1352 	drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_TBT_ALT &&
1353 				!tc_phy_is_owned(tc));
1354 }
1355 
1356 void intel_tc_port_lock(struct intel_digital_port *dig_port)
1357 {
1358 	__intel_tc_port_lock(to_tc_port(dig_port), 1);
1359 }
1360 
1361 /*
1362  * Disconnect the given digital port from its TypeC PHY (handing back the
1363  * control of the PHY to the TypeC subsystem). This will happen in a delayed
1364  * manner after each aux transactions and modeset disables.
1365  */
1366 static void intel_tc_port_disconnect_phy_work(struct work_struct *work)
1367 {
1368 	struct intel_tc_port *tc =
1369 		container_of(work, struct intel_tc_port, disconnect_phy_work.work);
1370 
1371 	mutex_lock(&tc->lock);
1372 
1373 	if (!tc->link_refcount)
1374 		intel_tc_port_update_mode(tc, 1, true);
1375 
1376 	mutex_unlock(&tc->lock);
1377 }
1378 
1379 /**
1380  * intel_tc_port_flush_work: flush the work disconnecting the PHY
1381  * @dig_port: digital port
1382  *
1383  * Flush the delayed work disconnecting an idle PHY.
1384  */
1385 void intel_tc_port_flush_work(struct intel_digital_port *dig_port)
1386 {
1387 	flush_delayed_work(&to_tc_port(dig_port)->disconnect_phy_work);
1388 }
1389 
1390 void intel_tc_port_unlock(struct intel_digital_port *dig_port)
1391 {
1392 	struct intel_tc_port *tc = to_tc_port(dig_port);
1393 
1394 	if (!tc->link_refcount && tc->mode != TC_PORT_DISCONNECTED)
1395 		queue_delayed_work(system_unbound_wq, &tc->disconnect_phy_work,
1396 				   msecs_to_jiffies(1000));
1397 
1398 	mutex_unlock(&tc->lock);
1399 }
1400 
1401 bool intel_tc_port_ref_held(struct intel_digital_port *dig_port)
1402 {
1403 	struct intel_tc_port *tc = to_tc_port(dig_port);
1404 
1405 	return mutex_is_locked(&tc->lock) ||
1406 	       tc->link_refcount;
1407 }
1408 
1409 void intel_tc_port_get_link(struct intel_digital_port *dig_port,
1410 			    int required_lanes)
1411 {
1412 	struct intel_tc_port *tc = to_tc_port(dig_port);
1413 
1414 	__intel_tc_port_lock(tc, required_lanes);
1415 	__intel_tc_port_get_link(tc);
1416 	intel_tc_port_unlock(dig_port);
1417 }
1418 
1419 void intel_tc_port_put_link(struct intel_digital_port *dig_port)
1420 {
1421 	struct intel_tc_port *tc = to_tc_port(dig_port);
1422 
1423 	intel_tc_port_lock(dig_port);
1424 	__intel_tc_port_put_link(tc);
1425 	intel_tc_port_unlock(dig_port);
1426 }
1427 
1428 int intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
1429 {
1430 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
1431 	struct intel_tc_port *tc;
1432 	enum port port = dig_port->base.port;
1433 	enum tc_port tc_port = intel_port_to_tc(i915, port);
1434 
1435 	if (drm_WARN_ON(&i915->drm, tc_port == TC_PORT_NONE))
1436 		return -EINVAL;
1437 
1438 	tc = kzalloc(sizeof(*tc), GFP_KERNEL);
1439 	if (!tc)
1440 		return -ENOMEM;
1441 
1442 	dig_port->tc = tc;
1443 	tc->dig_port = dig_port;
1444 
1445 	if (DISPLAY_VER(i915) >= 13)
1446 		tc->phy_ops = &adlp_tc_phy_ops;
1447 	else if (DISPLAY_VER(i915) >= 12)
1448 		tc->phy_ops = &tgl_tc_phy_ops;
1449 	else
1450 		tc->phy_ops = &icl_tc_phy_ops;
1451 
1452 	snprintf(tc->port_name, sizeof(tc->port_name),
1453 		 "%c/TC#%d", port_name(port), tc_port + 1);
1454 
1455 	mutex_init(&tc->lock);
1456 	INIT_DELAYED_WORK(&tc->disconnect_phy_work, intel_tc_port_disconnect_phy_work);
1457 	tc->legacy_port = is_legacy;
1458 	tc->mode = TC_PORT_DISCONNECTED;
1459 	tc->link_refcount = 0;
1460 
1461 	tc_phy_init(tc);
1462 
1463 	intel_tc_port_init_mode(dig_port);
1464 
1465 	return 0;
1466 }
1467 
1468 void intel_tc_port_cleanup(struct intel_digital_port *dig_port)
1469 {
1470 	intel_tc_port_flush_work(dig_port);
1471 
1472 	kfree(dig_port->tc);
1473 	dig_port->tc = NULL;
1474 }
1475