1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5 
6 #include "g4x_dp.h"
7 #include "i915_drv.h"
8 #include "intel_de.h"
9 #include "intel_display_types.h"
10 #include "intel_dp.h"
11 #include "intel_dpll.h"
12 #include "intel_lvds.h"
13 #include "intel_pps.h"
14 
15 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
16 				      enum pipe pipe);
17 
18 static void pps_init_delays(struct intel_dp *intel_dp);
19 static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd);
20 
21 intel_wakeref_t intel_pps_lock(struct intel_dp *intel_dp)
22 {
23 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
24 	intel_wakeref_t wakeref;
25 
26 	/*
27 	 * See intel_pps_reset_all() why we need a power domain reference here.
28 	 */
29 	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE);
30 	mutex_lock(&dev_priv->pps_mutex);
31 
32 	return wakeref;
33 }
34 
35 intel_wakeref_t intel_pps_unlock(struct intel_dp *intel_dp,
36 				 intel_wakeref_t wakeref)
37 {
38 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
39 
40 	mutex_unlock(&dev_priv->pps_mutex);
41 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
42 
43 	return 0;
44 }
45 
46 static void
47 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
48 {
49 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
50 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
51 	enum pipe pipe = intel_dp->pps.pps_pipe;
52 	bool pll_enabled, release_cl_override = false;
53 	enum dpio_phy phy = DPIO_PHY(pipe);
54 	enum dpio_channel ch = vlv_pipe_to_channel(pipe);
55 	u32 DP;
56 
57 	if (drm_WARN(&dev_priv->drm,
58 		     intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN,
59 		     "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n",
60 		     pipe_name(pipe), dig_port->base.base.base.id,
61 		     dig_port->base.base.name))
62 		return;
63 
64 	drm_dbg_kms(&dev_priv->drm,
65 		    "kicking pipe %c power sequencer for [ENCODER:%d:%s]\n",
66 		    pipe_name(pipe), dig_port->base.base.base.id,
67 		    dig_port->base.base.name);
68 
69 	/* Preserve the BIOS-computed detected bit. This is
70 	 * supposed to be read-only.
71 	 */
72 	DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED;
73 	DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
74 	DP |= DP_PORT_WIDTH(1);
75 	DP |= DP_LINK_TRAIN_PAT_1;
76 
77 	if (IS_CHERRYVIEW(dev_priv))
78 		DP |= DP_PIPE_SEL_CHV(pipe);
79 	else
80 		DP |= DP_PIPE_SEL(pipe);
81 
82 	pll_enabled = intel_de_read(dev_priv, DPLL(pipe)) & DPLL_VCO_ENABLE;
83 
84 	/*
85 	 * The DPLL for the pipe must be enabled for this to work.
86 	 * So enable temporarily it if it's not already enabled.
87 	 */
88 	if (!pll_enabled) {
89 		release_cl_override = IS_CHERRYVIEW(dev_priv) &&
90 			!chv_phy_powergate_ch(dev_priv, phy, ch, true);
91 
92 		if (vlv_force_pll_on(dev_priv, pipe, vlv_get_dpll(dev_priv))) {
93 			drm_err(&dev_priv->drm,
94 				"Failed to force on pll for pipe %c!\n",
95 				pipe_name(pipe));
96 			return;
97 		}
98 	}
99 
100 	/*
101 	 * Similar magic as in intel_dp_enable_port().
102 	 * We _must_ do this port enable + disable trick
103 	 * to make this power sequencer lock onto the port.
104 	 * Otherwise even VDD force bit won't work.
105 	 */
106 	intel_de_write(dev_priv, intel_dp->output_reg, DP);
107 	intel_de_posting_read(dev_priv, intel_dp->output_reg);
108 
109 	intel_de_write(dev_priv, intel_dp->output_reg, DP | DP_PORT_EN);
110 	intel_de_posting_read(dev_priv, intel_dp->output_reg);
111 
112 	intel_de_write(dev_priv, intel_dp->output_reg, DP & ~DP_PORT_EN);
113 	intel_de_posting_read(dev_priv, intel_dp->output_reg);
114 
115 	if (!pll_enabled) {
116 		vlv_force_pll_off(dev_priv, pipe);
117 
118 		if (release_cl_override)
119 			chv_phy_powergate_ch(dev_priv, phy, ch, false);
120 	}
121 }
122 
123 static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
124 {
125 	struct intel_encoder *encoder;
126 	unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
127 
128 	/*
129 	 * We don't have power sequencer currently.
130 	 * Pick one that's not used by other ports.
131 	 */
132 	for_each_intel_dp(&dev_priv->drm, encoder) {
133 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
134 
135 		if (encoder->type == INTEL_OUTPUT_EDP) {
136 			drm_WARN_ON(&dev_priv->drm,
137 				    intel_dp->pps.active_pipe != INVALID_PIPE &&
138 				    intel_dp->pps.active_pipe !=
139 				    intel_dp->pps.pps_pipe);
140 
141 			if (intel_dp->pps.pps_pipe != INVALID_PIPE)
142 				pipes &= ~(1 << intel_dp->pps.pps_pipe);
143 		} else {
144 			drm_WARN_ON(&dev_priv->drm,
145 				    intel_dp->pps.pps_pipe != INVALID_PIPE);
146 
147 			if (intel_dp->pps.active_pipe != INVALID_PIPE)
148 				pipes &= ~(1 << intel_dp->pps.active_pipe);
149 		}
150 	}
151 
152 	if (pipes == 0)
153 		return INVALID_PIPE;
154 
155 	return ffs(pipes) - 1;
156 }
157 
158 static enum pipe
159 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
160 {
161 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
162 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
163 	enum pipe pipe;
164 
165 	lockdep_assert_held(&dev_priv->pps_mutex);
166 
167 	/* We should never land here with regular DP ports */
168 	drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
169 
170 	drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE &&
171 		    intel_dp->pps.active_pipe != intel_dp->pps.pps_pipe);
172 
173 	if (intel_dp->pps.pps_pipe != INVALID_PIPE)
174 		return intel_dp->pps.pps_pipe;
175 
176 	pipe = vlv_find_free_pps(dev_priv);
177 
178 	/*
179 	 * Didn't find one. This should not happen since there
180 	 * are two power sequencers and up to two eDP ports.
181 	 */
182 	if (drm_WARN_ON(&dev_priv->drm, pipe == INVALID_PIPE))
183 		pipe = PIPE_A;
184 
185 	vlv_steal_power_sequencer(dev_priv, pipe);
186 	intel_dp->pps.pps_pipe = pipe;
187 
188 	drm_dbg_kms(&dev_priv->drm,
189 		    "picked pipe %c power sequencer for [ENCODER:%d:%s]\n",
190 		    pipe_name(intel_dp->pps.pps_pipe),
191 		    dig_port->base.base.base.id,
192 		    dig_port->base.base.name);
193 
194 	/* init power sequencer on this pipe and port */
195 	pps_init_delays(intel_dp);
196 	pps_init_registers(intel_dp, true);
197 
198 	/*
199 	 * Even vdd force doesn't work until we've made
200 	 * the power sequencer lock in on the port.
201 	 */
202 	vlv_power_sequencer_kick(intel_dp);
203 
204 	return intel_dp->pps.pps_pipe;
205 }
206 
207 static int
208 bxt_power_sequencer_idx(struct intel_dp *intel_dp)
209 {
210 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
211 	int backlight_controller = dev_priv->vbt.backlight.controller;
212 
213 	lockdep_assert_held(&dev_priv->pps_mutex);
214 
215 	/* We should never land here with regular DP ports */
216 	drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
217 
218 	if (!intel_dp->pps.pps_reset)
219 		return backlight_controller;
220 
221 	intel_dp->pps.pps_reset = false;
222 
223 	/*
224 	 * Only the HW needs to be reprogrammed, the SW state is fixed and
225 	 * has been setup during connector init.
226 	 */
227 	pps_init_registers(intel_dp, false);
228 
229 	return backlight_controller;
230 }
231 
232 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
233 			       enum pipe pipe);
234 
235 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
236 			       enum pipe pipe)
237 {
238 	return intel_de_read(dev_priv, PP_STATUS(pipe)) & PP_ON;
239 }
240 
241 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
242 				enum pipe pipe)
243 {
244 	return intel_de_read(dev_priv, PP_CONTROL(pipe)) & EDP_FORCE_VDD;
245 }
246 
247 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
248 			 enum pipe pipe)
249 {
250 	return true;
251 }
252 
253 static enum pipe
254 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
255 		     enum port port,
256 		     vlv_pipe_check pipe_check)
257 {
258 	enum pipe pipe;
259 
260 	for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
261 		u32 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(pipe)) &
262 			PANEL_PORT_SELECT_MASK;
263 
264 		if (port_sel != PANEL_PORT_SELECT_VLV(port))
265 			continue;
266 
267 		if (!pipe_check(dev_priv, pipe))
268 			continue;
269 
270 		return pipe;
271 	}
272 
273 	return INVALID_PIPE;
274 }
275 
276 static void
277 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
278 {
279 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
280 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
281 	enum port port = dig_port->base.port;
282 
283 	lockdep_assert_held(&dev_priv->pps_mutex);
284 
285 	/* try to find a pipe with this port selected */
286 	/* first pick one where the panel is on */
287 	intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
288 						      vlv_pipe_has_pp_on);
289 	/* didn't find one? pick one where vdd is on */
290 	if (intel_dp->pps.pps_pipe == INVALID_PIPE)
291 		intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
292 							      vlv_pipe_has_vdd_on);
293 	/* didn't find one? pick one with just the correct port */
294 	if (intel_dp->pps.pps_pipe == INVALID_PIPE)
295 		intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
296 							      vlv_pipe_any);
297 
298 	/* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
299 	if (intel_dp->pps.pps_pipe == INVALID_PIPE) {
300 		drm_dbg_kms(&dev_priv->drm,
301 			    "no initial power sequencer for [ENCODER:%d:%s]\n",
302 			    dig_port->base.base.base.id,
303 			    dig_port->base.base.name);
304 		return;
305 	}
306 
307 	drm_dbg_kms(&dev_priv->drm,
308 		    "initial power sequencer for [ENCODER:%d:%s]: pipe %c\n",
309 		    dig_port->base.base.base.id,
310 		    dig_port->base.base.name,
311 		    pipe_name(intel_dp->pps.pps_pipe));
312 }
313 
314 void intel_pps_reset_all(struct drm_i915_private *dev_priv)
315 {
316 	struct intel_encoder *encoder;
317 
318 	if (drm_WARN_ON(&dev_priv->drm, !IS_LP(dev_priv)))
319 		return;
320 
321 	if (!HAS_DISPLAY(dev_priv))
322 		return;
323 
324 	/*
325 	 * We can't grab pps_mutex here due to deadlock with power_domain
326 	 * mutex when power_domain functions are called while holding pps_mutex.
327 	 * That also means that in order to use pps_pipe the code needs to
328 	 * hold both a power domain reference and pps_mutex, and the power domain
329 	 * reference get/put must be done while _not_ holding pps_mutex.
330 	 * pps_{lock,unlock}() do these steps in the correct order, so one
331 	 * should use them always.
332 	 */
333 
334 	for_each_intel_dp(&dev_priv->drm, encoder) {
335 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
336 
337 		drm_WARN_ON(&dev_priv->drm,
338 			    intel_dp->pps.active_pipe != INVALID_PIPE);
339 
340 		if (encoder->type != INTEL_OUTPUT_EDP)
341 			continue;
342 
343 		if (DISPLAY_VER(dev_priv) >= 9)
344 			intel_dp->pps.pps_reset = true;
345 		else
346 			intel_dp->pps.pps_pipe = INVALID_PIPE;
347 	}
348 }
349 
350 struct pps_registers {
351 	i915_reg_t pp_ctrl;
352 	i915_reg_t pp_stat;
353 	i915_reg_t pp_on;
354 	i915_reg_t pp_off;
355 	i915_reg_t pp_div;
356 };
357 
358 static void intel_pps_get_registers(struct intel_dp *intel_dp,
359 				    struct pps_registers *regs)
360 {
361 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
362 	int pps_idx = 0;
363 
364 	memset(regs, 0, sizeof(*regs));
365 
366 	if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
367 		pps_idx = bxt_power_sequencer_idx(intel_dp);
368 	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
369 		pps_idx = vlv_power_sequencer_pipe(intel_dp);
370 
371 	regs->pp_ctrl = PP_CONTROL(pps_idx);
372 	regs->pp_stat = PP_STATUS(pps_idx);
373 	regs->pp_on = PP_ON_DELAYS(pps_idx);
374 	regs->pp_off = PP_OFF_DELAYS(pps_idx);
375 
376 	/* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
377 	if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ||
378 	    INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
379 		regs->pp_div = INVALID_MMIO_REG;
380 	else
381 		regs->pp_div = PP_DIVISOR(pps_idx);
382 }
383 
384 static i915_reg_t
385 _pp_ctrl_reg(struct intel_dp *intel_dp)
386 {
387 	struct pps_registers regs;
388 
389 	intel_pps_get_registers(intel_dp, &regs);
390 
391 	return regs.pp_ctrl;
392 }
393 
394 static i915_reg_t
395 _pp_stat_reg(struct intel_dp *intel_dp)
396 {
397 	struct pps_registers regs;
398 
399 	intel_pps_get_registers(intel_dp, &regs);
400 
401 	return regs.pp_stat;
402 }
403 
404 static bool edp_have_panel_power(struct intel_dp *intel_dp)
405 {
406 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
407 
408 	lockdep_assert_held(&dev_priv->pps_mutex);
409 
410 	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
411 	    intel_dp->pps.pps_pipe == INVALID_PIPE)
412 		return false;
413 
414 	return (intel_de_read(dev_priv, _pp_stat_reg(intel_dp)) & PP_ON) != 0;
415 }
416 
417 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
418 {
419 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
420 
421 	lockdep_assert_held(&dev_priv->pps_mutex);
422 
423 	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
424 	    intel_dp->pps.pps_pipe == INVALID_PIPE)
425 		return false;
426 
427 	return intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
428 }
429 
430 void intel_pps_check_power_unlocked(struct intel_dp *intel_dp)
431 {
432 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
433 
434 	if (!intel_dp_is_edp(intel_dp))
435 		return;
436 
437 	if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
438 		drm_WARN(&dev_priv->drm, 1,
439 			 "eDP powered off while attempting aux channel communication.\n");
440 		drm_dbg_kms(&dev_priv->drm, "Status 0x%08x Control 0x%08x\n",
441 			    intel_de_read(dev_priv, _pp_stat_reg(intel_dp)),
442 			    intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)));
443 	}
444 }
445 
446 #define IDLE_ON_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
447 #define IDLE_ON_VALUE   	(PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
448 
449 #define IDLE_OFF_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
450 #define IDLE_OFF_VALUE		(0     | PP_SEQUENCE_NONE | 0                     | 0)
451 
452 #define IDLE_CYCLE_MASK		(PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
453 #define IDLE_CYCLE_VALUE	(0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
454 
455 static void intel_pps_verify_state(struct intel_dp *intel_dp);
456 
457 static void wait_panel_status(struct intel_dp *intel_dp,
458 				       u32 mask,
459 				       u32 value)
460 {
461 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
462 	i915_reg_t pp_stat_reg, pp_ctrl_reg;
463 
464 	lockdep_assert_held(&dev_priv->pps_mutex);
465 
466 	intel_pps_verify_state(intel_dp);
467 
468 	pp_stat_reg = _pp_stat_reg(intel_dp);
469 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
470 
471 	drm_dbg_kms(&dev_priv->drm,
472 		    "mask %08x value %08x status %08x control %08x\n",
473 		    mask, value,
474 		    intel_de_read(dev_priv, pp_stat_reg),
475 		    intel_de_read(dev_priv, pp_ctrl_reg));
476 
477 	if (intel_de_wait_for_register(dev_priv, pp_stat_reg,
478 				       mask, value, 5000))
479 		drm_err(&dev_priv->drm,
480 			"Panel status timeout: status %08x control %08x\n",
481 			intel_de_read(dev_priv, pp_stat_reg),
482 			intel_de_read(dev_priv, pp_ctrl_reg));
483 
484 	drm_dbg_kms(&dev_priv->drm, "Wait complete\n");
485 }
486 
487 static void wait_panel_on(struct intel_dp *intel_dp)
488 {
489 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
490 
491 	drm_dbg_kms(&i915->drm, "Wait for panel power on\n");
492 	wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
493 }
494 
495 static void wait_panel_off(struct intel_dp *intel_dp)
496 {
497 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
498 
499 	drm_dbg_kms(&i915->drm, "Wait for panel power off time\n");
500 	wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
501 }
502 
503 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
504 {
505 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
506 	ktime_t panel_power_on_time;
507 	s64 panel_power_off_duration;
508 
509 	drm_dbg_kms(&i915->drm, "Wait for panel power cycle\n");
510 
511 	/* take the difference of currrent time and panel power off time
512 	 * and then make panel wait for t11_t12 if needed. */
513 	panel_power_on_time = ktime_get_boottime();
514 	panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->pps.panel_power_off_time);
515 
516 	/* When we disable the VDD override bit last we have to do the manual
517 	 * wait. */
518 	if (panel_power_off_duration < (s64)intel_dp->pps.panel_power_cycle_delay)
519 		wait_remaining_ms_from_jiffies(jiffies,
520 				       intel_dp->pps.panel_power_cycle_delay - panel_power_off_duration);
521 
522 	wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
523 }
524 
525 void intel_pps_wait_power_cycle(struct intel_dp *intel_dp)
526 {
527 	intel_wakeref_t wakeref;
528 
529 	if (!intel_dp_is_edp(intel_dp))
530 		return;
531 
532 	with_intel_pps_lock(intel_dp, wakeref)
533 		wait_panel_power_cycle(intel_dp);
534 }
535 
536 static void wait_backlight_on(struct intel_dp *intel_dp)
537 {
538 	wait_remaining_ms_from_jiffies(intel_dp->pps.last_power_on,
539 				       intel_dp->pps.backlight_on_delay);
540 }
541 
542 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
543 {
544 	wait_remaining_ms_from_jiffies(intel_dp->pps.last_backlight_off,
545 				       intel_dp->pps.backlight_off_delay);
546 }
547 
548 /* Read the current pp_control value, unlocking the register if it
549  * is locked
550  */
551 
552 static  u32 ilk_get_pp_control(struct intel_dp *intel_dp)
553 {
554 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
555 	u32 control;
556 
557 	lockdep_assert_held(&dev_priv->pps_mutex);
558 
559 	control = intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp));
560 	if (drm_WARN_ON(&dev_priv->drm, !HAS_DDI(dev_priv) &&
561 			(control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
562 		control &= ~PANEL_UNLOCK_MASK;
563 		control |= PANEL_UNLOCK_REGS;
564 	}
565 	return control;
566 }
567 
568 /*
569  * Must be paired with intel_pps_vdd_off_unlocked().
570  * Must hold pps_mutex around the whole on/off sequence.
571  * Can be nested with intel_pps_vdd_{on,off}() calls.
572  */
573 bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp)
574 {
575 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
576 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
577 	u32 pp;
578 	i915_reg_t pp_stat_reg, pp_ctrl_reg;
579 	bool need_to_disable = !intel_dp->pps.want_panel_vdd;
580 
581 	lockdep_assert_held(&dev_priv->pps_mutex);
582 
583 	if (!intel_dp_is_edp(intel_dp))
584 		return false;
585 
586 	cancel_delayed_work(&intel_dp->pps.panel_vdd_work);
587 	intel_dp->pps.want_panel_vdd = true;
588 
589 	if (edp_have_panel_vdd(intel_dp))
590 		return need_to_disable;
591 
592 	drm_WARN_ON(&dev_priv->drm, intel_dp->pps.vdd_wakeref);
593 	intel_dp->pps.vdd_wakeref = intel_display_power_get(dev_priv,
594 							    intel_aux_power_domain(dig_port));
595 
596 	drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD on\n",
597 		    dig_port->base.base.base.id,
598 		    dig_port->base.base.name);
599 
600 	if (!edp_have_panel_power(intel_dp))
601 		wait_panel_power_cycle(intel_dp);
602 
603 	pp = ilk_get_pp_control(intel_dp);
604 	pp |= EDP_FORCE_VDD;
605 
606 	pp_stat_reg = _pp_stat_reg(intel_dp);
607 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
608 
609 	intel_de_write(dev_priv, pp_ctrl_reg, pp);
610 	intel_de_posting_read(dev_priv, pp_ctrl_reg);
611 	drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
612 		    intel_de_read(dev_priv, pp_stat_reg),
613 		    intel_de_read(dev_priv, pp_ctrl_reg));
614 	/*
615 	 * If the panel wasn't on, delay before accessing aux channel
616 	 */
617 	if (!edp_have_panel_power(intel_dp)) {
618 		drm_dbg_kms(&dev_priv->drm,
619 			    "[ENCODER:%d:%s] panel power wasn't enabled\n",
620 			    dig_port->base.base.base.id,
621 			    dig_port->base.base.name);
622 		msleep(intel_dp->pps.panel_power_up_delay);
623 	}
624 
625 	return need_to_disable;
626 }
627 
628 /*
629  * Must be paired with intel_pps_off().
630  * Nested calls to these functions are not allowed since
631  * we drop the lock. Caller must use some higher level
632  * locking to prevent nested calls from other threads.
633  */
634 void intel_pps_vdd_on(struct intel_dp *intel_dp)
635 {
636 	intel_wakeref_t wakeref;
637 	bool vdd;
638 
639 	if (!intel_dp_is_edp(intel_dp))
640 		return;
641 
642 	vdd = false;
643 	with_intel_pps_lock(intel_dp, wakeref)
644 		vdd = intel_pps_vdd_on_unlocked(intel_dp);
645 	I915_STATE_WARN(!vdd, "[ENCODER:%d:%s] VDD already requested on\n",
646 			dp_to_dig_port(intel_dp)->base.base.base.id,
647 			dp_to_dig_port(intel_dp)->base.base.name);
648 }
649 
650 static void intel_pps_vdd_off_sync_unlocked(struct intel_dp *intel_dp)
651 {
652 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
653 	struct intel_digital_port *dig_port =
654 		dp_to_dig_port(intel_dp);
655 	u32 pp;
656 	i915_reg_t pp_stat_reg, pp_ctrl_reg;
657 
658 	lockdep_assert_held(&dev_priv->pps_mutex);
659 
660 	drm_WARN_ON(&dev_priv->drm, intel_dp->pps.want_panel_vdd);
661 
662 	if (!edp_have_panel_vdd(intel_dp))
663 		return;
664 
665 	drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD off\n",
666 		    dig_port->base.base.base.id,
667 		    dig_port->base.base.name);
668 
669 	pp = ilk_get_pp_control(intel_dp);
670 	pp &= ~EDP_FORCE_VDD;
671 
672 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
673 	pp_stat_reg = _pp_stat_reg(intel_dp);
674 
675 	intel_de_write(dev_priv, pp_ctrl_reg, pp);
676 	intel_de_posting_read(dev_priv, pp_ctrl_reg);
677 
678 	/* Make sure sequencer is idle before allowing subsequent activity */
679 	drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
680 		    intel_de_read(dev_priv, pp_stat_reg),
681 		    intel_de_read(dev_priv, pp_ctrl_reg));
682 
683 	if ((pp & PANEL_POWER_ON) == 0)
684 		intel_dp->pps.panel_power_off_time = ktime_get_boottime();
685 
686 	intel_display_power_put(dev_priv,
687 				intel_aux_power_domain(dig_port),
688 				fetch_and_zero(&intel_dp->pps.vdd_wakeref));
689 }
690 
691 void intel_pps_vdd_off_sync(struct intel_dp *intel_dp)
692 {
693 	intel_wakeref_t wakeref;
694 
695 	if (!intel_dp_is_edp(intel_dp))
696 		return;
697 
698 	cancel_delayed_work_sync(&intel_dp->pps.panel_vdd_work);
699 	/*
700 	 * vdd might still be enabled due to the delayed vdd off.
701 	 * Make sure vdd is actually turned off here.
702 	 */
703 	with_intel_pps_lock(intel_dp, wakeref)
704 		intel_pps_vdd_off_sync_unlocked(intel_dp);
705 }
706 
707 static void edp_panel_vdd_work(struct work_struct *__work)
708 {
709 	struct intel_pps *pps = container_of(to_delayed_work(__work),
710 					     struct intel_pps, panel_vdd_work);
711 	struct intel_dp *intel_dp = container_of(pps, struct intel_dp, pps);
712 	intel_wakeref_t wakeref;
713 
714 	with_intel_pps_lock(intel_dp, wakeref) {
715 		if (!intel_dp->pps.want_panel_vdd)
716 			intel_pps_vdd_off_sync_unlocked(intel_dp);
717 	}
718 }
719 
720 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
721 {
722 	unsigned long delay;
723 
724 	/*
725 	 * Queue the timer to fire a long time from now (relative to the power
726 	 * down delay) to keep the panel power up across a sequence of
727 	 * operations.
728 	 */
729 	delay = msecs_to_jiffies(intel_dp->pps.panel_power_cycle_delay * 5);
730 	schedule_delayed_work(&intel_dp->pps.panel_vdd_work, delay);
731 }
732 
733 /*
734  * Must be paired with edp_panel_vdd_on().
735  * Must hold pps_mutex around the whole on/off sequence.
736  * Can be nested with intel_pps_vdd_{on,off}() calls.
737  */
738 void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync)
739 {
740 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
741 
742 	lockdep_assert_held(&dev_priv->pps_mutex);
743 
744 	if (!intel_dp_is_edp(intel_dp))
745 		return;
746 
747 	I915_STATE_WARN(!intel_dp->pps.want_panel_vdd, "[ENCODER:%d:%s] VDD not forced on",
748 			dp_to_dig_port(intel_dp)->base.base.base.id,
749 			dp_to_dig_port(intel_dp)->base.base.name);
750 
751 	intel_dp->pps.want_panel_vdd = false;
752 
753 	if (sync)
754 		intel_pps_vdd_off_sync_unlocked(intel_dp);
755 	else
756 		edp_panel_vdd_schedule_off(intel_dp);
757 }
758 
759 void intel_pps_on_unlocked(struct intel_dp *intel_dp)
760 {
761 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
762 	u32 pp;
763 	i915_reg_t pp_ctrl_reg;
764 
765 	lockdep_assert_held(&dev_priv->pps_mutex);
766 
767 	if (!intel_dp_is_edp(intel_dp))
768 		return;
769 
770 	drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power on\n",
771 		    dp_to_dig_port(intel_dp)->base.base.base.id,
772 		    dp_to_dig_port(intel_dp)->base.base.name);
773 
774 	if (drm_WARN(&dev_priv->drm, edp_have_panel_power(intel_dp),
775 		     "[ENCODER:%d:%s] panel power already on\n",
776 		     dp_to_dig_port(intel_dp)->base.base.base.id,
777 		     dp_to_dig_port(intel_dp)->base.base.name))
778 		return;
779 
780 	wait_panel_power_cycle(intel_dp);
781 
782 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
783 	pp = ilk_get_pp_control(intel_dp);
784 	if (IS_IRONLAKE(dev_priv)) {
785 		/* ILK workaround: disable reset around power sequence */
786 		pp &= ~PANEL_POWER_RESET;
787 		intel_de_write(dev_priv, pp_ctrl_reg, pp);
788 		intel_de_posting_read(dev_priv, pp_ctrl_reg);
789 	}
790 
791 	pp |= PANEL_POWER_ON;
792 	if (!IS_IRONLAKE(dev_priv))
793 		pp |= PANEL_POWER_RESET;
794 
795 	intel_de_write(dev_priv, pp_ctrl_reg, pp);
796 	intel_de_posting_read(dev_priv, pp_ctrl_reg);
797 
798 	wait_panel_on(intel_dp);
799 	intel_dp->pps.last_power_on = jiffies;
800 
801 	if (IS_IRONLAKE(dev_priv)) {
802 		pp |= PANEL_POWER_RESET; /* restore panel reset bit */
803 		intel_de_write(dev_priv, pp_ctrl_reg, pp);
804 		intel_de_posting_read(dev_priv, pp_ctrl_reg);
805 	}
806 }
807 
808 void intel_pps_on(struct intel_dp *intel_dp)
809 {
810 	intel_wakeref_t wakeref;
811 
812 	if (!intel_dp_is_edp(intel_dp))
813 		return;
814 
815 	with_intel_pps_lock(intel_dp, wakeref)
816 		intel_pps_on_unlocked(intel_dp);
817 }
818 
819 void intel_pps_off_unlocked(struct intel_dp *intel_dp)
820 {
821 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
822 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
823 	u32 pp;
824 	i915_reg_t pp_ctrl_reg;
825 
826 	lockdep_assert_held(&dev_priv->pps_mutex);
827 
828 	if (!intel_dp_is_edp(intel_dp))
829 		return;
830 
831 	drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power off\n",
832 		    dig_port->base.base.base.id, dig_port->base.base.name);
833 
834 	drm_WARN(&dev_priv->drm, !intel_dp->pps.want_panel_vdd,
835 		 "Need [ENCODER:%d:%s] VDD to turn off panel\n",
836 		 dig_port->base.base.base.id, dig_port->base.base.name);
837 
838 	pp = ilk_get_pp_control(intel_dp);
839 	/* We need to switch off panel power _and_ force vdd, for otherwise some
840 	 * panels get very unhappy and cease to work. */
841 	pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
842 		EDP_BLC_ENABLE);
843 
844 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
845 
846 	intel_dp->pps.want_panel_vdd = false;
847 
848 	intel_de_write(dev_priv, pp_ctrl_reg, pp);
849 	intel_de_posting_read(dev_priv, pp_ctrl_reg);
850 
851 	wait_panel_off(intel_dp);
852 	intel_dp->pps.panel_power_off_time = ktime_get_boottime();
853 
854 	/* We got a reference when we enabled the VDD. */
855 	intel_display_power_put(dev_priv,
856 				intel_aux_power_domain(dig_port),
857 				fetch_and_zero(&intel_dp->pps.vdd_wakeref));
858 }
859 
860 void intel_pps_off(struct intel_dp *intel_dp)
861 {
862 	intel_wakeref_t wakeref;
863 
864 	if (!intel_dp_is_edp(intel_dp))
865 		return;
866 
867 	with_intel_pps_lock(intel_dp, wakeref)
868 		intel_pps_off_unlocked(intel_dp);
869 }
870 
871 /* Enable backlight in the panel power control. */
872 void intel_pps_backlight_on(struct intel_dp *intel_dp)
873 {
874 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
875 	intel_wakeref_t wakeref;
876 
877 	/*
878 	 * If we enable the backlight right away following a panel power
879 	 * on, we may see slight flicker as the panel syncs with the eDP
880 	 * link.  So delay a bit to make sure the image is solid before
881 	 * allowing it to appear.
882 	 */
883 	wait_backlight_on(intel_dp);
884 
885 	with_intel_pps_lock(intel_dp, wakeref) {
886 		i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
887 		u32 pp;
888 
889 		pp = ilk_get_pp_control(intel_dp);
890 		pp |= EDP_BLC_ENABLE;
891 
892 		intel_de_write(dev_priv, pp_ctrl_reg, pp);
893 		intel_de_posting_read(dev_priv, pp_ctrl_reg);
894 	}
895 }
896 
897 /* Disable backlight in the panel power control. */
898 void intel_pps_backlight_off(struct intel_dp *intel_dp)
899 {
900 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
901 	intel_wakeref_t wakeref;
902 
903 	if (!intel_dp_is_edp(intel_dp))
904 		return;
905 
906 	with_intel_pps_lock(intel_dp, wakeref) {
907 		i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
908 		u32 pp;
909 
910 		pp = ilk_get_pp_control(intel_dp);
911 		pp &= ~EDP_BLC_ENABLE;
912 
913 		intel_de_write(dev_priv, pp_ctrl_reg, pp);
914 		intel_de_posting_read(dev_priv, pp_ctrl_reg);
915 	}
916 
917 	intel_dp->pps.last_backlight_off = jiffies;
918 	edp_wait_backlight_off(intel_dp);
919 }
920 
921 /*
922  * Hook for controlling the panel power control backlight through the bl_power
923  * sysfs attribute. Take care to handle multiple calls.
924  */
925 void intel_pps_backlight_power(struct intel_connector *connector, bool enable)
926 {
927 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
928 	struct intel_dp *intel_dp = intel_attached_dp(connector);
929 	intel_wakeref_t wakeref;
930 	bool is_enabled;
931 
932 	is_enabled = false;
933 	with_intel_pps_lock(intel_dp, wakeref)
934 		is_enabled = ilk_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
935 	if (is_enabled == enable)
936 		return;
937 
938 	drm_dbg_kms(&i915->drm, "panel power control backlight %s\n",
939 		    enable ? "enable" : "disable");
940 
941 	if (enable)
942 		intel_pps_backlight_on(intel_dp);
943 	else
944 		intel_pps_backlight_off(intel_dp);
945 }
946 
947 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
948 {
949 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
950 	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
951 	enum pipe pipe = intel_dp->pps.pps_pipe;
952 	i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
953 
954 	drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE);
955 
956 	if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
957 		return;
958 
959 	intel_pps_vdd_off_sync_unlocked(intel_dp);
960 
961 	/*
962 	 * VLV seems to get confused when multiple power sequencers
963 	 * have the same port selected (even if only one has power/vdd
964 	 * enabled). The failure manifests as vlv_wait_port_ready() failing
965 	 * CHV on the other hand doesn't seem to mind having the same port
966 	 * selected in multiple power sequencers, but let's clear the
967 	 * port select always when logically disconnecting a power sequencer
968 	 * from a port.
969 	 */
970 	drm_dbg_kms(&dev_priv->drm,
971 		    "detaching pipe %c power sequencer from [ENCODER:%d:%s]\n",
972 		    pipe_name(pipe), dig_port->base.base.base.id,
973 		    dig_port->base.base.name);
974 	intel_de_write(dev_priv, pp_on_reg, 0);
975 	intel_de_posting_read(dev_priv, pp_on_reg);
976 
977 	intel_dp->pps.pps_pipe = INVALID_PIPE;
978 }
979 
980 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
981 				      enum pipe pipe)
982 {
983 	struct intel_encoder *encoder;
984 
985 	lockdep_assert_held(&dev_priv->pps_mutex);
986 
987 	for_each_intel_dp(&dev_priv->drm, encoder) {
988 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
989 
990 		drm_WARN(&dev_priv->drm, intel_dp->pps.active_pipe == pipe,
991 			 "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n",
992 			 pipe_name(pipe), encoder->base.base.id,
993 			 encoder->base.name);
994 
995 		if (intel_dp->pps.pps_pipe != pipe)
996 			continue;
997 
998 		drm_dbg_kms(&dev_priv->drm,
999 			    "stealing pipe %c power sequencer from [ENCODER:%d:%s]\n",
1000 			    pipe_name(pipe), encoder->base.base.id,
1001 			    encoder->base.name);
1002 
1003 		/* make sure vdd is off before we steal it */
1004 		vlv_detach_power_sequencer(intel_dp);
1005 	}
1006 }
1007 
1008 void vlv_pps_init(struct intel_encoder *encoder,
1009 		  const struct intel_crtc_state *crtc_state)
1010 {
1011 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1012 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1013 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1014 
1015 	lockdep_assert_held(&dev_priv->pps_mutex);
1016 
1017 	drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE);
1018 
1019 	if (intel_dp->pps.pps_pipe != INVALID_PIPE &&
1020 	    intel_dp->pps.pps_pipe != crtc->pipe) {
1021 		/*
1022 		 * If another power sequencer was being used on this
1023 		 * port previously make sure to turn off vdd there while
1024 		 * we still have control of it.
1025 		 */
1026 		vlv_detach_power_sequencer(intel_dp);
1027 	}
1028 
1029 	/*
1030 	 * We may be stealing the power
1031 	 * sequencer from another port.
1032 	 */
1033 	vlv_steal_power_sequencer(dev_priv, crtc->pipe);
1034 
1035 	intel_dp->pps.active_pipe = crtc->pipe;
1036 
1037 	if (!intel_dp_is_edp(intel_dp))
1038 		return;
1039 
1040 	/* now it's all ours */
1041 	intel_dp->pps.pps_pipe = crtc->pipe;
1042 
1043 	drm_dbg_kms(&dev_priv->drm,
1044 		    "initializing pipe %c power sequencer for [ENCODER:%d:%s]\n",
1045 		    pipe_name(intel_dp->pps.pps_pipe), encoder->base.base.id,
1046 		    encoder->base.name);
1047 
1048 	/* init power sequencer on this pipe and port */
1049 	pps_init_delays(intel_dp);
1050 	pps_init_registers(intel_dp, true);
1051 }
1052 
1053 static void intel_pps_vdd_sanitize(struct intel_dp *intel_dp)
1054 {
1055 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1056 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1057 
1058 	lockdep_assert_held(&dev_priv->pps_mutex);
1059 
1060 	if (!edp_have_panel_vdd(intel_dp))
1061 		return;
1062 
1063 	/*
1064 	 * The VDD bit needs a power domain reference, so if the bit is
1065 	 * already enabled when we boot or resume, grab this reference and
1066 	 * schedule a vdd off, so we don't hold on to the reference
1067 	 * indefinitely.
1068 	 */
1069 	drm_dbg_kms(&dev_priv->drm,
1070 		    "VDD left on by BIOS, adjusting state tracking\n");
1071 	drm_WARN_ON(&dev_priv->drm, intel_dp->pps.vdd_wakeref);
1072 	intel_dp->pps.vdd_wakeref = intel_display_power_get(dev_priv,
1073 							    intel_aux_power_domain(dig_port));
1074 
1075 	edp_panel_vdd_schedule_off(intel_dp);
1076 }
1077 
1078 bool intel_pps_have_panel_power_or_vdd(struct intel_dp *intel_dp)
1079 {
1080 	intel_wakeref_t wakeref;
1081 	bool have_power = false;
1082 
1083 	with_intel_pps_lock(intel_dp, wakeref) {
1084 		have_power = edp_have_panel_power(intel_dp) ||
1085 			     edp_have_panel_vdd(intel_dp);
1086 	}
1087 
1088 	return have_power;
1089 }
1090 
1091 static void pps_init_timestamps(struct intel_dp *intel_dp)
1092 {
1093 	intel_dp->pps.panel_power_off_time = ktime_get_boottime();
1094 	intel_dp->pps.last_power_on = jiffies;
1095 	intel_dp->pps.last_backlight_off = jiffies;
1096 }
1097 
1098 static void
1099 intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
1100 {
1101 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1102 	u32 pp_on, pp_off, pp_ctl;
1103 	struct pps_registers regs;
1104 
1105 	intel_pps_get_registers(intel_dp, &regs);
1106 
1107 	pp_ctl = ilk_get_pp_control(intel_dp);
1108 
1109 	/* Ensure PPS is unlocked */
1110 	if (!HAS_DDI(dev_priv))
1111 		intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
1112 
1113 	pp_on = intel_de_read(dev_priv, regs.pp_on);
1114 	pp_off = intel_de_read(dev_priv, regs.pp_off);
1115 
1116 	/* Pull timing values out of registers */
1117 	seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
1118 	seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on);
1119 	seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off);
1120 	seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off);
1121 
1122 	if (i915_mmio_reg_valid(regs.pp_div)) {
1123 		u32 pp_div;
1124 
1125 		pp_div = intel_de_read(dev_priv, regs.pp_div);
1126 
1127 		seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000;
1128 	} else {
1129 		seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000;
1130 	}
1131 }
1132 
1133 static void
1134 intel_pps_dump_state(struct intel_dp *intel_dp, const char *state_name,
1135 		     const struct edp_power_seq *seq)
1136 {
1137 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1138 
1139 	drm_dbg_kms(&i915->drm, "%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
1140 		    state_name,
1141 		    seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
1142 }
1143 
1144 static void
1145 intel_pps_verify_state(struct intel_dp *intel_dp)
1146 {
1147 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1148 	struct edp_power_seq hw;
1149 	struct edp_power_seq *sw = &intel_dp->pps.pps_delays;
1150 
1151 	intel_pps_readout_hw_state(intel_dp, &hw);
1152 
1153 	if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
1154 	    hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
1155 		drm_err(&i915->drm, "PPS state mismatch\n");
1156 		intel_pps_dump_state(intel_dp, "sw", sw);
1157 		intel_pps_dump_state(intel_dp, "hw", &hw);
1158 	}
1159 }
1160 
1161 static void pps_init_delays(struct intel_dp *intel_dp)
1162 {
1163 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1164 	struct edp_power_seq cur, vbt, spec,
1165 		*final = &intel_dp->pps.pps_delays;
1166 
1167 	lockdep_assert_held(&dev_priv->pps_mutex);
1168 
1169 	/* already initialized? */
1170 	if (final->t11_t12 != 0)
1171 		return;
1172 
1173 	intel_pps_readout_hw_state(intel_dp, &cur);
1174 
1175 	intel_pps_dump_state(intel_dp, "cur", &cur);
1176 
1177 	vbt = dev_priv->vbt.edp.pps;
1178 	/* On Toshiba Satellite P50-C-18C system the VBT T12 delay
1179 	 * of 500ms appears to be too short. Ocassionally the panel
1180 	 * just fails to power back on. Increasing the delay to 800ms
1181 	 * seems sufficient to avoid this problem.
1182 	 */
1183 	if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
1184 		vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10);
1185 		drm_dbg_kms(&dev_priv->drm,
1186 			    "Increasing T12 panel delay as per the quirk to %d\n",
1187 			    vbt.t11_t12);
1188 	}
1189 	/* T11_T12 delay is special and actually in units of 100ms, but zero
1190 	 * based in the hw (so we need to add 100 ms). But the sw vbt
1191 	 * table multiplies it with 1000 to make it in units of 100usec,
1192 	 * too. */
1193 	vbt.t11_t12 += 100 * 10;
1194 
1195 	/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
1196 	 * our hw here, which are all in 100usec. */
1197 	spec.t1_t3 = 210 * 10;
1198 	spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
1199 	spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
1200 	spec.t10 = 500 * 10;
1201 	/* This one is special and actually in units of 100ms, but zero
1202 	 * based in the hw (so we need to add 100 ms). But the sw vbt
1203 	 * table multiplies it with 1000 to make it in units of 100usec,
1204 	 * too. */
1205 	spec.t11_t12 = (510 + 100) * 10;
1206 
1207 	intel_pps_dump_state(intel_dp, "vbt", &vbt);
1208 
1209 	/* Use the max of the register settings and vbt. If both are
1210 	 * unset, fall back to the spec limits. */
1211 #define assign_final(field)	final->field = (max(cur.field, vbt.field) == 0 ? \
1212 				       spec.field : \
1213 				       max(cur.field, vbt.field))
1214 	assign_final(t1_t3);
1215 	assign_final(t8);
1216 	assign_final(t9);
1217 	assign_final(t10);
1218 	assign_final(t11_t12);
1219 #undef assign_final
1220 
1221 #define get_delay(field)	(DIV_ROUND_UP(final->field, 10))
1222 	intel_dp->pps.panel_power_up_delay = get_delay(t1_t3);
1223 	intel_dp->pps.backlight_on_delay = get_delay(t8);
1224 	intel_dp->pps.backlight_off_delay = get_delay(t9);
1225 	intel_dp->pps.panel_power_down_delay = get_delay(t10);
1226 	intel_dp->pps.panel_power_cycle_delay = get_delay(t11_t12);
1227 #undef get_delay
1228 
1229 	drm_dbg_kms(&dev_priv->drm,
1230 		    "panel power up delay %d, power down delay %d, power cycle delay %d\n",
1231 		    intel_dp->pps.panel_power_up_delay,
1232 		    intel_dp->pps.panel_power_down_delay,
1233 		    intel_dp->pps.panel_power_cycle_delay);
1234 
1235 	drm_dbg_kms(&dev_priv->drm, "backlight on delay %d, off delay %d\n",
1236 		    intel_dp->pps.backlight_on_delay,
1237 		    intel_dp->pps.backlight_off_delay);
1238 
1239 	/*
1240 	 * We override the HW backlight delays to 1 because we do manual waits
1241 	 * on them. For T8, even BSpec recommends doing it. For T9, if we
1242 	 * don't do this, we'll end up waiting for the backlight off delay
1243 	 * twice: once when we do the manual sleep, and once when we disable
1244 	 * the panel and wait for the PP_STATUS bit to become zero.
1245 	 */
1246 	final->t8 = 1;
1247 	final->t9 = 1;
1248 
1249 	/*
1250 	 * HW has only a 100msec granularity for t11_t12 so round it up
1251 	 * accordingly.
1252 	 */
1253 	final->t11_t12 = roundup(final->t11_t12, 100 * 10);
1254 }
1255 
1256 static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd)
1257 {
1258 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1259 	u32 pp_on, pp_off, port_sel = 0;
1260 	int div = RUNTIME_INFO(dev_priv)->rawclk_freq / 1000;
1261 	struct pps_registers regs;
1262 	enum port port = dp_to_dig_port(intel_dp)->base.port;
1263 	const struct edp_power_seq *seq = &intel_dp->pps.pps_delays;
1264 
1265 	lockdep_assert_held(&dev_priv->pps_mutex);
1266 
1267 	intel_pps_get_registers(intel_dp, &regs);
1268 
1269 	/*
1270 	 * On some VLV machines the BIOS can leave the VDD
1271 	 * enabled even on power sequencers which aren't
1272 	 * hooked up to any port. This would mess up the
1273 	 * power domain tracking the first time we pick
1274 	 * one of these power sequencers for use since
1275 	 * intel_pps_vdd_on_unlocked() would notice that the VDD was
1276 	 * already on and therefore wouldn't grab the power
1277 	 * domain reference. Disable VDD first to avoid this.
1278 	 * This also avoids spuriously turning the VDD on as
1279 	 * soon as the new power sequencer gets initialized.
1280 	 */
1281 	if (force_disable_vdd) {
1282 		u32 pp = ilk_get_pp_control(intel_dp);
1283 
1284 		drm_WARN(&dev_priv->drm, pp & PANEL_POWER_ON,
1285 			 "Panel power already on\n");
1286 
1287 		if (pp & EDP_FORCE_VDD)
1288 			drm_dbg_kms(&dev_priv->drm,
1289 				    "VDD already on, disabling first\n");
1290 
1291 		pp &= ~EDP_FORCE_VDD;
1292 
1293 		intel_de_write(dev_priv, regs.pp_ctrl, pp);
1294 	}
1295 
1296 	pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) |
1297 		REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8);
1298 	pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) |
1299 		REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10);
1300 
1301 	/* Haswell doesn't have any port selection bits for the panel
1302 	 * power sequencer any more. */
1303 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1304 		port_sel = PANEL_PORT_SELECT_VLV(port);
1305 	} else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
1306 		switch (port) {
1307 		case PORT_A:
1308 			port_sel = PANEL_PORT_SELECT_DPA;
1309 			break;
1310 		case PORT_C:
1311 			port_sel = PANEL_PORT_SELECT_DPC;
1312 			break;
1313 		case PORT_D:
1314 			port_sel = PANEL_PORT_SELECT_DPD;
1315 			break;
1316 		default:
1317 			MISSING_CASE(port);
1318 			break;
1319 		}
1320 	}
1321 
1322 	pp_on |= port_sel;
1323 
1324 	intel_de_write(dev_priv, regs.pp_on, pp_on);
1325 	intel_de_write(dev_priv, regs.pp_off, pp_off);
1326 
1327 	/*
1328 	 * Compute the divisor for the pp clock, simply match the Bspec formula.
1329 	 */
1330 	if (i915_mmio_reg_valid(regs.pp_div)) {
1331 		intel_de_write(dev_priv, regs.pp_div,
1332 			       REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
1333 	} else {
1334 		u32 pp_ctl;
1335 
1336 		pp_ctl = intel_de_read(dev_priv, regs.pp_ctrl);
1337 		pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK;
1338 		pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000));
1339 		intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
1340 	}
1341 
1342 	drm_dbg_kms(&dev_priv->drm,
1343 		    "panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
1344 		    intel_de_read(dev_priv, regs.pp_on),
1345 		    intel_de_read(dev_priv, regs.pp_off),
1346 		    i915_mmio_reg_valid(regs.pp_div) ?
1347 		    intel_de_read(dev_priv, regs.pp_div) :
1348 		    (intel_de_read(dev_priv, regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
1349 }
1350 
1351 void intel_pps_encoder_reset(struct intel_dp *intel_dp)
1352 {
1353 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1354 	intel_wakeref_t wakeref;
1355 
1356 	if (!intel_dp_is_edp(intel_dp))
1357 		return;
1358 
1359 	with_intel_pps_lock(intel_dp, wakeref) {
1360 		/*
1361 		 * Reinit the power sequencer also on the resume path, in case
1362 		 * BIOS did something nasty with it.
1363 		 */
1364 		if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
1365 			vlv_initial_power_sequencer_setup(intel_dp);
1366 
1367 		pps_init_delays(intel_dp);
1368 		pps_init_registers(intel_dp, false);
1369 
1370 		intel_pps_vdd_sanitize(intel_dp);
1371 	}
1372 }
1373 
1374 void intel_pps_init(struct intel_dp *intel_dp)
1375 {
1376 	INIT_DELAYED_WORK(&intel_dp->pps.panel_vdd_work, edp_panel_vdd_work);
1377 
1378 	pps_init_timestamps(intel_dp);
1379 
1380 	intel_pps_encoder_reset(intel_dp);
1381 }
1382 
1383 void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
1384 {
1385 	int pps_num;
1386 	int pps_idx;
1387 
1388 	if (!HAS_DISPLAY(dev_priv) || HAS_DDI(dev_priv))
1389 		return;
1390 	/*
1391 	 * This w/a is needed at least on CPT/PPT, but to be sure apply it
1392 	 * everywhere where registers can be write protected.
1393 	 */
1394 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1395 		pps_num = 2;
1396 	else
1397 		pps_num = 1;
1398 
1399 	for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
1400 		u32 val = intel_de_read(dev_priv, PP_CONTROL(pps_idx));
1401 
1402 		val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
1403 		intel_de_write(dev_priv, PP_CONTROL(pps_idx), val);
1404 	}
1405 }
1406 
1407 void intel_pps_setup(struct drm_i915_private *i915)
1408 {
1409 	if (HAS_PCH_SPLIT(i915) || IS_GEMINILAKE(i915) || IS_BROXTON(i915))
1410 		i915->pps_mmio_base = PCH_PPS_BASE;
1411 	else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
1412 		i915->pps_mmio_base = VLV_PPS_BASE;
1413 	else
1414 		i915->pps_mmio_base = PPS_BASE;
1415 }
1416 
1417 void assert_pps_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
1418 {
1419 	i915_reg_t pp_reg;
1420 	u32 val;
1421 	enum pipe panel_pipe = INVALID_PIPE;
1422 	bool locked = true;
1423 
1424 	if (drm_WARN_ON(&dev_priv->drm, HAS_DDI(dev_priv)))
1425 		return;
1426 
1427 	if (HAS_PCH_SPLIT(dev_priv)) {
1428 		u32 port_sel;
1429 
1430 		pp_reg = PP_CONTROL(0);
1431 		port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1432 
1433 		switch (port_sel) {
1434 		case PANEL_PORT_SELECT_LVDS:
1435 			intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
1436 			break;
1437 		case PANEL_PORT_SELECT_DPA:
1438 			g4x_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
1439 			break;
1440 		case PANEL_PORT_SELECT_DPC:
1441 			g4x_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
1442 			break;
1443 		case PANEL_PORT_SELECT_DPD:
1444 			g4x_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
1445 			break;
1446 		default:
1447 			MISSING_CASE(port_sel);
1448 			break;
1449 		}
1450 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1451 		/* presumably write lock depends on pipe, not port select */
1452 		pp_reg = PP_CONTROL(pipe);
1453 		panel_pipe = pipe;
1454 	} else {
1455 		u32 port_sel;
1456 
1457 		pp_reg = PP_CONTROL(0);
1458 		port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1459 
1460 		drm_WARN_ON(&dev_priv->drm,
1461 			    port_sel != PANEL_PORT_SELECT_LVDS);
1462 		intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
1463 	}
1464 
1465 	val = intel_de_read(dev_priv, pp_reg);
1466 	if (!(val & PANEL_POWER_ON) ||
1467 	    ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1468 		locked = false;
1469 
1470 	I915_STATE_WARN(panel_pipe == pipe && locked,
1471 			"panel assertion failure, pipe %c regs locked\n",
1472 			pipe_name(pipe));
1473 }
1474