1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5 
6 #include "g4x_dp.h"
7 #include "i915_drv.h"
8 #include "intel_display_types.h"
9 #include "intel_dp.h"
10 #include "intel_dpll.h"
11 #include "intel_pps.h"
12 
13 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
14 				      enum pipe pipe);
15 
16 static void pps_init_delays(struct intel_dp *intel_dp);
17 static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd);
18 
19 intel_wakeref_t intel_pps_lock(struct intel_dp *intel_dp)
20 {
21 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
22 	intel_wakeref_t wakeref;
23 
24 	/*
25 	 * See intel_pps_reset_all() why we need a power domain reference here.
26 	 */
27 	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE);
28 	mutex_lock(&dev_priv->pps_mutex);
29 
30 	return wakeref;
31 }
32 
33 intel_wakeref_t intel_pps_unlock(struct intel_dp *intel_dp,
34 				 intel_wakeref_t wakeref)
35 {
36 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
37 
38 	mutex_unlock(&dev_priv->pps_mutex);
39 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
40 
41 	return 0;
42 }
43 
44 static void
45 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
46 {
47 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
48 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
49 	enum pipe pipe = intel_dp->pps.pps_pipe;
50 	bool pll_enabled, release_cl_override = false;
51 	enum dpio_phy phy = DPIO_PHY(pipe);
52 	enum dpio_channel ch = vlv_pipe_to_channel(pipe);
53 	u32 DP;
54 
55 	if (drm_WARN(&dev_priv->drm,
56 		     intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN,
57 		     "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n",
58 		     pipe_name(pipe), dig_port->base.base.base.id,
59 		     dig_port->base.base.name))
60 		return;
61 
62 	drm_dbg_kms(&dev_priv->drm,
63 		    "kicking pipe %c power sequencer for [ENCODER:%d:%s]\n",
64 		    pipe_name(pipe), dig_port->base.base.base.id,
65 		    dig_port->base.base.name);
66 
67 	/* Preserve the BIOS-computed detected bit. This is
68 	 * supposed to be read-only.
69 	 */
70 	DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED;
71 	DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
72 	DP |= DP_PORT_WIDTH(1);
73 	DP |= DP_LINK_TRAIN_PAT_1;
74 
75 	if (IS_CHERRYVIEW(dev_priv))
76 		DP |= DP_PIPE_SEL_CHV(pipe);
77 	else
78 		DP |= DP_PIPE_SEL(pipe);
79 
80 	pll_enabled = intel_de_read(dev_priv, DPLL(pipe)) & DPLL_VCO_ENABLE;
81 
82 	/*
83 	 * The DPLL for the pipe must be enabled for this to work.
84 	 * So enable temporarily it if it's not already enabled.
85 	 */
86 	if (!pll_enabled) {
87 		release_cl_override = IS_CHERRYVIEW(dev_priv) &&
88 			!chv_phy_powergate_ch(dev_priv, phy, ch, true);
89 
90 		if (vlv_force_pll_on(dev_priv, pipe, vlv_get_dpll(dev_priv))) {
91 			drm_err(&dev_priv->drm,
92 				"Failed to force on pll for pipe %c!\n",
93 				pipe_name(pipe));
94 			return;
95 		}
96 	}
97 
98 	/*
99 	 * Similar magic as in intel_dp_enable_port().
100 	 * We _must_ do this port enable + disable trick
101 	 * to make this power sequencer lock onto the port.
102 	 * Otherwise even VDD force bit won't work.
103 	 */
104 	intel_de_write(dev_priv, intel_dp->output_reg, DP);
105 	intel_de_posting_read(dev_priv, intel_dp->output_reg);
106 
107 	intel_de_write(dev_priv, intel_dp->output_reg, DP | DP_PORT_EN);
108 	intel_de_posting_read(dev_priv, intel_dp->output_reg);
109 
110 	intel_de_write(dev_priv, intel_dp->output_reg, DP & ~DP_PORT_EN);
111 	intel_de_posting_read(dev_priv, intel_dp->output_reg);
112 
113 	if (!pll_enabled) {
114 		vlv_force_pll_off(dev_priv, pipe);
115 
116 		if (release_cl_override)
117 			chv_phy_powergate_ch(dev_priv, phy, ch, false);
118 	}
119 }
120 
121 static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
122 {
123 	struct intel_encoder *encoder;
124 	unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
125 
126 	/*
127 	 * We don't have power sequencer currently.
128 	 * Pick one that's not used by other ports.
129 	 */
130 	for_each_intel_dp(&dev_priv->drm, encoder) {
131 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
132 
133 		if (encoder->type == INTEL_OUTPUT_EDP) {
134 			drm_WARN_ON(&dev_priv->drm,
135 				    intel_dp->pps.active_pipe != INVALID_PIPE &&
136 				    intel_dp->pps.active_pipe !=
137 				    intel_dp->pps.pps_pipe);
138 
139 			if (intel_dp->pps.pps_pipe != INVALID_PIPE)
140 				pipes &= ~(1 << intel_dp->pps.pps_pipe);
141 		} else {
142 			drm_WARN_ON(&dev_priv->drm,
143 				    intel_dp->pps.pps_pipe != INVALID_PIPE);
144 
145 			if (intel_dp->pps.active_pipe != INVALID_PIPE)
146 				pipes &= ~(1 << intel_dp->pps.active_pipe);
147 		}
148 	}
149 
150 	if (pipes == 0)
151 		return INVALID_PIPE;
152 
153 	return ffs(pipes) - 1;
154 }
155 
156 static enum pipe
157 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
158 {
159 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
160 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
161 	enum pipe pipe;
162 
163 	lockdep_assert_held(&dev_priv->pps_mutex);
164 
165 	/* We should never land here with regular DP ports */
166 	drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
167 
168 	drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE &&
169 		    intel_dp->pps.active_pipe != intel_dp->pps.pps_pipe);
170 
171 	if (intel_dp->pps.pps_pipe != INVALID_PIPE)
172 		return intel_dp->pps.pps_pipe;
173 
174 	pipe = vlv_find_free_pps(dev_priv);
175 
176 	/*
177 	 * Didn't find one. This should not happen since there
178 	 * are two power sequencers and up to two eDP ports.
179 	 */
180 	if (drm_WARN_ON(&dev_priv->drm, pipe == INVALID_PIPE))
181 		pipe = PIPE_A;
182 
183 	vlv_steal_power_sequencer(dev_priv, pipe);
184 	intel_dp->pps.pps_pipe = pipe;
185 
186 	drm_dbg_kms(&dev_priv->drm,
187 		    "picked pipe %c power sequencer for [ENCODER:%d:%s]\n",
188 		    pipe_name(intel_dp->pps.pps_pipe),
189 		    dig_port->base.base.base.id,
190 		    dig_port->base.base.name);
191 
192 	/* init power sequencer on this pipe and port */
193 	pps_init_delays(intel_dp);
194 	pps_init_registers(intel_dp, true);
195 
196 	/*
197 	 * Even vdd force doesn't work until we've made
198 	 * the power sequencer lock in on the port.
199 	 */
200 	vlv_power_sequencer_kick(intel_dp);
201 
202 	return intel_dp->pps.pps_pipe;
203 }
204 
205 static int
206 bxt_power_sequencer_idx(struct intel_dp *intel_dp)
207 {
208 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
209 	int backlight_controller = dev_priv->vbt.backlight.controller;
210 
211 	lockdep_assert_held(&dev_priv->pps_mutex);
212 
213 	/* We should never land here with regular DP ports */
214 	drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
215 
216 	if (!intel_dp->pps.pps_reset)
217 		return backlight_controller;
218 
219 	intel_dp->pps.pps_reset = false;
220 
221 	/*
222 	 * Only the HW needs to be reprogrammed, the SW state is fixed and
223 	 * has been setup during connector init.
224 	 */
225 	pps_init_registers(intel_dp, false);
226 
227 	return backlight_controller;
228 }
229 
230 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
231 			       enum pipe pipe);
232 
233 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
234 			       enum pipe pipe)
235 {
236 	return intel_de_read(dev_priv, PP_STATUS(pipe)) & PP_ON;
237 }
238 
239 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
240 				enum pipe pipe)
241 {
242 	return intel_de_read(dev_priv, PP_CONTROL(pipe)) & EDP_FORCE_VDD;
243 }
244 
245 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
246 			 enum pipe pipe)
247 {
248 	return true;
249 }
250 
251 static enum pipe
252 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
253 		     enum port port,
254 		     vlv_pipe_check pipe_check)
255 {
256 	enum pipe pipe;
257 
258 	for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
259 		u32 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(pipe)) &
260 			PANEL_PORT_SELECT_MASK;
261 
262 		if (port_sel != PANEL_PORT_SELECT_VLV(port))
263 			continue;
264 
265 		if (!pipe_check(dev_priv, pipe))
266 			continue;
267 
268 		return pipe;
269 	}
270 
271 	return INVALID_PIPE;
272 }
273 
274 static void
275 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
276 {
277 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
278 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
279 	enum port port = dig_port->base.port;
280 
281 	lockdep_assert_held(&dev_priv->pps_mutex);
282 
283 	/* try to find a pipe with this port selected */
284 	/* first pick one where the panel is on */
285 	intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
286 						      vlv_pipe_has_pp_on);
287 	/* didn't find one? pick one where vdd is on */
288 	if (intel_dp->pps.pps_pipe == INVALID_PIPE)
289 		intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
290 							      vlv_pipe_has_vdd_on);
291 	/* didn't find one? pick one with just the correct port */
292 	if (intel_dp->pps.pps_pipe == INVALID_PIPE)
293 		intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
294 							      vlv_pipe_any);
295 
296 	/* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
297 	if (intel_dp->pps.pps_pipe == INVALID_PIPE) {
298 		drm_dbg_kms(&dev_priv->drm,
299 			    "no initial power sequencer for [ENCODER:%d:%s]\n",
300 			    dig_port->base.base.base.id,
301 			    dig_port->base.base.name);
302 		return;
303 	}
304 
305 	drm_dbg_kms(&dev_priv->drm,
306 		    "initial power sequencer for [ENCODER:%d:%s]: pipe %c\n",
307 		    dig_port->base.base.base.id,
308 		    dig_port->base.base.name,
309 		    pipe_name(intel_dp->pps.pps_pipe));
310 }
311 
312 void intel_pps_reset_all(struct drm_i915_private *dev_priv)
313 {
314 	struct intel_encoder *encoder;
315 
316 	if (drm_WARN_ON(&dev_priv->drm,
317 			!(IS_VALLEYVIEW(dev_priv) ||
318 			  IS_CHERRYVIEW(dev_priv) ||
319 			  IS_GEN9_LP(dev_priv))))
320 		return;
321 
322 	/*
323 	 * We can't grab pps_mutex here due to deadlock with power_domain
324 	 * mutex when power_domain functions are called while holding pps_mutex.
325 	 * That also means that in order to use pps_pipe the code needs to
326 	 * hold both a power domain reference and pps_mutex, and the power domain
327 	 * reference get/put must be done while _not_ holding pps_mutex.
328 	 * pps_{lock,unlock}() do these steps in the correct order, so one
329 	 * should use them always.
330 	 */
331 
332 	for_each_intel_dp(&dev_priv->drm, encoder) {
333 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
334 
335 		drm_WARN_ON(&dev_priv->drm,
336 			    intel_dp->pps.active_pipe != INVALID_PIPE);
337 
338 		if (encoder->type != INTEL_OUTPUT_EDP)
339 			continue;
340 
341 		if (IS_GEN9_LP(dev_priv))
342 			intel_dp->pps.pps_reset = true;
343 		else
344 			intel_dp->pps.pps_pipe = INVALID_PIPE;
345 	}
346 }
347 
348 struct pps_registers {
349 	i915_reg_t pp_ctrl;
350 	i915_reg_t pp_stat;
351 	i915_reg_t pp_on;
352 	i915_reg_t pp_off;
353 	i915_reg_t pp_div;
354 };
355 
356 static void intel_pps_get_registers(struct intel_dp *intel_dp,
357 				    struct pps_registers *regs)
358 {
359 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
360 	int pps_idx = 0;
361 
362 	memset(regs, 0, sizeof(*regs));
363 
364 	if (IS_GEN9_LP(dev_priv))
365 		pps_idx = bxt_power_sequencer_idx(intel_dp);
366 	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
367 		pps_idx = vlv_power_sequencer_pipe(intel_dp);
368 
369 	regs->pp_ctrl = PP_CONTROL(pps_idx);
370 	regs->pp_stat = PP_STATUS(pps_idx);
371 	regs->pp_on = PP_ON_DELAYS(pps_idx);
372 	regs->pp_off = PP_OFF_DELAYS(pps_idx);
373 
374 	/* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
375 	if (IS_GEN9_LP(dev_priv) || INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
376 		regs->pp_div = INVALID_MMIO_REG;
377 	else
378 		regs->pp_div = PP_DIVISOR(pps_idx);
379 }
380 
381 static i915_reg_t
382 _pp_ctrl_reg(struct intel_dp *intel_dp)
383 {
384 	struct pps_registers regs;
385 
386 	intel_pps_get_registers(intel_dp, &regs);
387 
388 	return regs.pp_ctrl;
389 }
390 
391 static i915_reg_t
392 _pp_stat_reg(struct intel_dp *intel_dp)
393 {
394 	struct pps_registers regs;
395 
396 	intel_pps_get_registers(intel_dp, &regs);
397 
398 	return regs.pp_stat;
399 }
400 
401 static bool edp_have_panel_power(struct intel_dp *intel_dp)
402 {
403 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
404 
405 	lockdep_assert_held(&dev_priv->pps_mutex);
406 
407 	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
408 	    intel_dp->pps.pps_pipe == INVALID_PIPE)
409 		return false;
410 
411 	return (intel_de_read(dev_priv, _pp_stat_reg(intel_dp)) & PP_ON) != 0;
412 }
413 
414 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
415 {
416 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
417 
418 	lockdep_assert_held(&dev_priv->pps_mutex);
419 
420 	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
421 	    intel_dp->pps.pps_pipe == INVALID_PIPE)
422 		return false;
423 
424 	return intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
425 }
426 
427 void intel_pps_check_power_unlocked(struct intel_dp *intel_dp)
428 {
429 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
430 
431 	if (!intel_dp_is_edp(intel_dp))
432 		return;
433 
434 	if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
435 		drm_WARN(&dev_priv->drm, 1,
436 			 "eDP powered off while attempting aux channel communication.\n");
437 		drm_dbg_kms(&dev_priv->drm, "Status 0x%08x Control 0x%08x\n",
438 			    intel_de_read(dev_priv, _pp_stat_reg(intel_dp)),
439 			    intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)));
440 	}
441 }
442 
443 #define IDLE_ON_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
444 #define IDLE_ON_VALUE   	(PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
445 
446 #define IDLE_OFF_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
447 #define IDLE_OFF_VALUE		(0     | PP_SEQUENCE_NONE | 0                     | 0)
448 
449 #define IDLE_CYCLE_MASK		(PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
450 #define IDLE_CYCLE_VALUE	(0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
451 
452 static void intel_pps_verify_state(struct intel_dp *intel_dp);
453 
454 static void wait_panel_status(struct intel_dp *intel_dp,
455 				       u32 mask,
456 				       u32 value)
457 {
458 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
459 	i915_reg_t pp_stat_reg, pp_ctrl_reg;
460 
461 	lockdep_assert_held(&dev_priv->pps_mutex);
462 
463 	intel_pps_verify_state(intel_dp);
464 
465 	pp_stat_reg = _pp_stat_reg(intel_dp);
466 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
467 
468 	drm_dbg_kms(&dev_priv->drm,
469 		    "mask %08x value %08x status %08x control %08x\n",
470 		    mask, value,
471 		    intel_de_read(dev_priv, pp_stat_reg),
472 		    intel_de_read(dev_priv, pp_ctrl_reg));
473 
474 	if (intel_de_wait_for_register(dev_priv, pp_stat_reg,
475 				       mask, value, 5000))
476 		drm_err(&dev_priv->drm,
477 			"Panel status timeout: status %08x control %08x\n",
478 			intel_de_read(dev_priv, pp_stat_reg),
479 			intel_de_read(dev_priv, pp_ctrl_reg));
480 
481 	drm_dbg_kms(&dev_priv->drm, "Wait complete\n");
482 }
483 
484 static void wait_panel_on(struct intel_dp *intel_dp)
485 {
486 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
487 
488 	drm_dbg_kms(&i915->drm, "Wait for panel power on\n");
489 	wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
490 }
491 
492 static void wait_panel_off(struct intel_dp *intel_dp)
493 {
494 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
495 
496 	drm_dbg_kms(&i915->drm, "Wait for panel power off time\n");
497 	wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
498 }
499 
500 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
501 {
502 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
503 	ktime_t panel_power_on_time;
504 	s64 panel_power_off_duration;
505 
506 	drm_dbg_kms(&i915->drm, "Wait for panel power cycle\n");
507 
508 	/* take the difference of currrent time and panel power off time
509 	 * and then make panel wait for t11_t12 if needed. */
510 	panel_power_on_time = ktime_get_boottime();
511 	panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->pps.panel_power_off_time);
512 
513 	/* When we disable the VDD override bit last we have to do the manual
514 	 * wait. */
515 	if (panel_power_off_duration < (s64)intel_dp->pps.panel_power_cycle_delay)
516 		wait_remaining_ms_from_jiffies(jiffies,
517 				       intel_dp->pps.panel_power_cycle_delay - panel_power_off_duration);
518 
519 	wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
520 }
521 
522 void intel_pps_wait_power_cycle(struct intel_dp *intel_dp)
523 {
524 	intel_wakeref_t wakeref;
525 
526 	if (!intel_dp_is_edp(intel_dp))
527 		return;
528 
529 	with_intel_pps_lock(intel_dp, wakeref)
530 		wait_panel_power_cycle(intel_dp);
531 }
532 
533 static void wait_backlight_on(struct intel_dp *intel_dp)
534 {
535 	wait_remaining_ms_from_jiffies(intel_dp->pps.last_power_on,
536 				       intel_dp->pps.backlight_on_delay);
537 }
538 
539 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
540 {
541 	wait_remaining_ms_from_jiffies(intel_dp->pps.last_backlight_off,
542 				       intel_dp->pps.backlight_off_delay);
543 }
544 
545 /* Read the current pp_control value, unlocking the register if it
546  * is locked
547  */
548 
549 static  u32 ilk_get_pp_control(struct intel_dp *intel_dp)
550 {
551 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
552 	u32 control;
553 
554 	lockdep_assert_held(&dev_priv->pps_mutex);
555 
556 	control = intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp));
557 	if (drm_WARN_ON(&dev_priv->drm, !HAS_DDI(dev_priv) &&
558 			(control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
559 		control &= ~PANEL_UNLOCK_MASK;
560 		control |= PANEL_UNLOCK_REGS;
561 	}
562 	return control;
563 }
564 
565 /*
566  * Must be paired with intel_pps_vdd_off_unlocked().
567  * Must hold pps_mutex around the whole on/off sequence.
568  * Can be nested with intel_pps_vdd_{on,off}() calls.
569  */
570 bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp)
571 {
572 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
573 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
574 	u32 pp;
575 	i915_reg_t pp_stat_reg, pp_ctrl_reg;
576 	bool need_to_disable = !intel_dp->pps.want_panel_vdd;
577 
578 	lockdep_assert_held(&dev_priv->pps_mutex);
579 
580 	if (!intel_dp_is_edp(intel_dp))
581 		return false;
582 
583 	cancel_delayed_work(&intel_dp->pps.panel_vdd_work);
584 	intel_dp->pps.want_panel_vdd = true;
585 
586 	if (edp_have_panel_vdd(intel_dp))
587 		return need_to_disable;
588 
589 	drm_WARN_ON(&dev_priv->drm, intel_dp->pps.vdd_wakeref);
590 	intel_dp->pps.vdd_wakeref = intel_display_power_get(dev_priv,
591 							    intel_aux_power_domain(dig_port));
592 
593 	drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD on\n",
594 		    dig_port->base.base.base.id,
595 		    dig_port->base.base.name);
596 
597 	if (!edp_have_panel_power(intel_dp))
598 		wait_panel_power_cycle(intel_dp);
599 
600 	pp = ilk_get_pp_control(intel_dp);
601 	pp |= EDP_FORCE_VDD;
602 
603 	pp_stat_reg = _pp_stat_reg(intel_dp);
604 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
605 
606 	intel_de_write(dev_priv, pp_ctrl_reg, pp);
607 	intel_de_posting_read(dev_priv, pp_ctrl_reg);
608 	drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
609 		    intel_de_read(dev_priv, pp_stat_reg),
610 		    intel_de_read(dev_priv, pp_ctrl_reg));
611 	/*
612 	 * If the panel wasn't on, delay before accessing aux channel
613 	 */
614 	if (!edp_have_panel_power(intel_dp)) {
615 		drm_dbg_kms(&dev_priv->drm,
616 			    "[ENCODER:%d:%s] panel power wasn't enabled\n",
617 			    dig_port->base.base.base.id,
618 			    dig_port->base.base.name);
619 		msleep(intel_dp->pps.panel_power_up_delay);
620 	}
621 
622 	return need_to_disable;
623 }
624 
625 /*
626  * Must be paired with intel_pps_off().
627  * Nested calls to these functions are not allowed since
628  * we drop the lock. Caller must use some higher level
629  * locking to prevent nested calls from other threads.
630  */
631 void intel_pps_vdd_on(struct intel_dp *intel_dp)
632 {
633 	intel_wakeref_t wakeref;
634 	bool vdd;
635 
636 	if (!intel_dp_is_edp(intel_dp))
637 		return;
638 
639 	vdd = false;
640 	with_intel_pps_lock(intel_dp, wakeref)
641 		vdd = intel_pps_vdd_on_unlocked(intel_dp);
642 	I915_STATE_WARN(!vdd, "[ENCODER:%d:%s] VDD already requested on\n",
643 			dp_to_dig_port(intel_dp)->base.base.base.id,
644 			dp_to_dig_port(intel_dp)->base.base.name);
645 }
646 
647 static void intel_pps_vdd_off_sync_unlocked(struct intel_dp *intel_dp)
648 {
649 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
650 	struct intel_digital_port *dig_port =
651 		dp_to_dig_port(intel_dp);
652 	u32 pp;
653 	i915_reg_t pp_stat_reg, pp_ctrl_reg;
654 
655 	lockdep_assert_held(&dev_priv->pps_mutex);
656 
657 	drm_WARN_ON(&dev_priv->drm, intel_dp->pps.want_panel_vdd);
658 
659 	if (!edp_have_panel_vdd(intel_dp))
660 		return;
661 
662 	drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD off\n",
663 		    dig_port->base.base.base.id,
664 		    dig_port->base.base.name);
665 
666 	pp = ilk_get_pp_control(intel_dp);
667 	pp &= ~EDP_FORCE_VDD;
668 
669 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
670 	pp_stat_reg = _pp_stat_reg(intel_dp);
671 
672 	intel_de_write(dev_priv, pp_ctrl_reg, pp);
673 	intel_de_posting_read(dev_priv, pp_ctrl_reg);
674 
675 	/* Make sure sequencer is idle before allowing subsequent activity */
676 	drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
677 		    intel_de_read(dev_priv, pp_stat_reg),
678 		    intel_de_read(dev_priv, pp_ctrl_reg));
679 
680 	if ((pp & PANEL_POWER_ON) == 0)
681 		intel_dp->pps.panel_power_off_time = ktime_get_boottime();
682 
683 	intel_display_power_put(dev_priv,
684 				intel_aux_power_domain(dig_port),
685 				fetch_and_zero(&intel_dp->pps.vdd_wakeref));
686 }
687 
688 void intel_pps_vdd_off_sync(struct intel_dp *intel_dp)
689 {
690 	intel_wakeref_t wakeref;
691 
692 	if (!intel_dp_is_edp(intel_dp))
693 		return;
694 
695 	cancel_delayed_work_sync(&intel_dp->pps.panel_vdd_work);
696 	/*
697 	 * vdd might still be enabled due to the delayed vdd off.
698 	 * Make sure vdd is actually turned off here.
699 	 */
700 	with_intel_pps_lock(intel_dp, wakeref)
701 		intel_pps_vdd_off_sync_unlocked(intel_dp);
702 }
703 
704 static void edp_panel_vdd_work(struct work_struct *__work)
705 {
706 	struct intel_pps *pps = container_of(to_delayed_work(__work),
707 					     struct intel_pps, panel_vdd_work);
708 	struct intel_dp *intel_dp = container_of(pps, struct intel_dp, pps);
709 	intel_wakeref_t wakeref;
710 
711 	with_intel_pps_lock(intel_dp, wakeref) {
712 		if (!intel_dp->pps.want_panel_vdd)
713 			intel_pps_vdd_off_sync_unlocked(intel_dp);
714 	}
715 }
716 
717 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
718 {
719 	unsigned long delay;
720 
721 	/*
722 	 * Queue the timer to fire a long time from now (relative to the power
723 	 * down delay) to keep the panel power up across a sequence of
724 	 * operations.
725 	 */
726 	delay = msecs_to_jiffies(intel_dp->pps.panel_power_cycle_delay * 5);
727 	schedule_delayed_work(&intel_dp->pps.panel_vdd_work, delay);
728 }
729 
730 /*
731  * Must be paired with edp_panel_vdd_on().
732  * Must hold pps_mutex around the whole on/off sequence.
733  * Can be nested with intel_pps_vdd_{on,off}() calls.
734  */
735 void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync)
736 {
737 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
738 
739 	lockdep_assert_held(&dev_priv->pps_mutex);
740 
741 	if (!intel_dp_is_edp(intel_dp))
742 		return;
743 
744 	I915_STATE_WARN(!intel_dp->pps.want_panel_vdd, "[ENCODER:%d:%s] VDD not forced on",
745 			dp_to_dig_port(intel_dp)->base.base.base.id,
746 			dp_to_dig_port(intel_dp)->base.base.name);
747 
748 	intel_dp->pps.want_panel_vdd = false;
749 
750 	if (sync)
751 		intel_pps_vdd_off_sync_unlocked(intel_dp);
752 	else
753 		edp_panel_vdd_schedule_off(intel_dp);
754 }
755 
756 void intel_pps_on_unlocked(struct intel_dp *intel_dp)
757 {
758 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
759 	u32 pp;
760 	i915_reg_t pp_ctrl_reg;
761 
762 	lockdep_assert_held(&dev_priv->pps_mutex);
763 
764 	if (!intel_dp_is_edp(intel_dp))
765 		return;
766 
767 	drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power on\n",
768 		    dp_to_dig_port(intel_dp)->base.base.base.id,
769 		    dp_to_dig_port(intel_dp)->base.base.name);
770 
771 	if (drm_WARN(&dev_priv->drm, edp_have_panel_power(intel_dp),
772 		     "[ENCODER:%d:%s] panel power already on\n",
773 		     dp_to_dig_port(intel_dp)->base.base.base.id,
774 		     dp_to_dig_port(intel_dp)->base.base.name))
775 		return;
776 
777 	wait_panel_power_cycle(intel_dp);
778 
779 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
780 	pp = ilk_get_pp_control(intel_dp);
781 	if (IS_IRONLAKE(dev_priv)) {
782 		/* ILK workaround: disable reset around power sequence */
783 		pp &= ~PANEL_POWER_RESET;
784 		intel_de_write(dev_priv, pp_ctrl_reg, pp);
785 		intel_de_posting_read(dev_priv, pp_ctrl_reg);
786 	}
787 
788 	pp |= PANEL_POWER_ON;
789 	if (!IS_IRONLAKE(dev_priv))
790 		pp |= PANEL_POWER_RESET;
791 
792 	intel_de_write(dev_priv, pp_ctrl_reg, pp);
793 	intel_de_posting_read(dev_priv, pp_ctrl_reg);
794 
795 	wait_panel_on(intel_dp);
796 	intel_dp->pps.last_power_on = jiffies;
797 
798 	if (IS_IRONLAKE(dev_priv)) {
799 		pp |= PANEL_POWER_RESET; /* restore panel reset bit */
800 		intel_de_write(dev_priv, pp_ctrl_reg, pp);
801 		intel_de_posting_read(dev_priv, pp_ctrl_reg);
802 	}
803 }
804 
805 void intel_pps_on(struct intel_dp *intel_dp)
806 {
807 	intel_wakeref_t wakeref;
808 
809 	if (!intel_dp_is_edp(intel_dp))
810 		return;
811 
812 	with_intel_pps_lock(intel_dp, wakeref)
813 		intel_pps_on_unlocked(intel_dp);
814 }
815 
816 void intel_pps_off_unlocked(struct intel_dp *intel_dp)
817 {
818 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
819 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
820 	u32 pp;
821 	i915_reg_t pp_ctrl_reg;
822 
823 	lockdep_assert_held(&dev_priv->pps_mutex);
824 
825 	if (!intel_dp_is_edp(intel_dp))
826 		return;
827 
828 	drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power off\n",
829 		    dig_port->base.base.base.id, dig_port->base.base.name);
830 
831 	drm_WARN(&dev_priv->drm, !intel_dp->pps.want_panel_vdd,
832 		 "Need [ENCODER:%d:%s] VDD to turn off panel\n",
833 		 dig_port->base.base.base.id, dig_port->base.base.name);
834 
835 	pp = ilk_get_pp_control(intel_dp);
836 	/* We need to switch off panel power _and_ force vdd, for otherwise some
837 	 * panels get very unhappy and cease to work. */
838 	pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
839 		EDP_BLC_ENABLE);
840 
841 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
842 
843 	intel_dp->pps.want_panel_vdd = false;
844 
845 	intel_de_write(dev_priv, pp_ctrl_reg, pp);
846 	intel_de_posting_read(dev_priv, pp_ctrl_reg);
847 
848 	wait_panel_off(intel_dp);
849 	intel_dp->pps.panel_power_off_time = ktime_get_boottime();
850 
851 	/* We got a reference when we enabled the VDD. */
852 	intel_display_power_put(dev_priv,
853 				intel_aux_power_domain(dig_port),
854 				fetch_and_zero(&intel_dp->pps.vdd_wakeref));
855 }
856 
857 void intel_pps_off(struct intel_dp *intel_dp)
858 {
859 	intel_wakeref_t wakeref;
860 
861 	if (!intel_dp_is_edp(intel_dp))
862 		return;
863 
864 	with_intel_pps_lock(intel_dp, wakeref)
865 		intel_pps_off_unlocked(intel_dp);
866 }
867 
868 /* Enable backlight in the panel power control. */
869 void intel_pps_backlight_on(struct intel_dp *intel_dp)
870 {
871 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
872 	intel_wakeref_t wakeref;
873 
874 	/*
875 	 * If we enable the backlight right away following a panel power
876 	 * on, we may see slight flicker as the panel syncs with the eDP
877 	 * link.  So delay a bit to make sure the image is solid before
878 	 * allowing it to appear.
879 	 */
880 	wait_backlight_on(intel_dp);
881 
882 	with_intel_pps_lock(intel_dp, wakeref) {
883 		i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
884 		u32 pp;
885 
886 		pp = ilk_get_pp_control(intel_dp);
887 		pp |= EDP_BLC_ENABLE;
888 
889 		intel_de_write(dev_priv, pp_ctrl_reg, pp);
890 		intel_de_posting_read(dev_priv, pp_ctrl_reg);
891 	}
892 }
893 
894 /* Disable backlight in the panel power control. */
895 void intel_pps_backlight_off(struct intel_dp *intel_dp)
896 {
897 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
898 	intel_wakeref_t wakeref;
899 
900 	if (!intel_dp_is_edp(intel_dp))
901 		return;
902 
903 	with_intel_pps_lock(intel_dp, wakeref) {
904 		i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
905 		u32 pp;
906 
907 		pp = ilk_get_pp_control(intel_dp);
908 		pp &= ~EDP_BLC_ENABLE;
909 
910 		intel_de_write(dev_priv, pp_ctrl_reg, pp);
911 		intel_de_posting_read(dev_priv, pp_ctrl_reg);
912 	}
913 
914 	intel_dp->pps.last_backlight_off = jiffies;
915 	edp_wait_backlight_off(intel_dp);
916 }
917 
918 /*
919  * Hook for controlling the panel power control backlight through the bl_power
920  * sysfs attribute. Take care to handle multiple calls.
921  */
922 void intel_pps_backlight_power(struct intel_connector *connector, bool enable)
923 {
924 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
925 	struct intel_dp *intel_dp = intel_attached_dp(connector);
926 	intel_wakeref_t wakeref;
927 	bool is_enabled;
928 
929 	is_enabled = false;
930 	with_intel_pps_lock(intel_dp, wakeref)
931 		is_enabled = ilk_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
932 	if (is_enabled == enable)
933 		return;
934 
935 	drm_dbg_kms(&i915->drm, "panel power control backlight %s\n",
936 		    enable ? "enable" : "disable");
937 
938 	if (enable)
939 		intel_pps_backlight_on(intel_dp);
940 	else
941 		intel_pps_backlight_off(intel_dp);
942 }
943 
944 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
945 {
946 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
947 	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
948 	enum pipe pipe = intel_dp->pps.pps_pipe;
949 	i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
950 
951 	drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE);
952 
953 	if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
954 		return;
955 
956 	intel_pps_vdd_off_sync_unlocked(intel_dp);
957 
958 	/*
959 	 * VLV seems to get confused when multiple power sequencers
960 	 * have the same port selected (even if only one has power/vdd
961 	 * enabled). The failure manifests as vlv_wait_port_ready() failing
962 	 * CHV on the other hand doesn't seem to mind having the same port
963 	 * selected in multiple power sequencers, but let's clear the
964 	 * port select always when logically disconnecting a power sequencer
965 	 * from a port.
966 	 */
967 	drm_dbg_kms(&dev_priv->drm,
968 		    "detaching pipe %c power sequencer from [ENCODER:%d:%s]\n",
969 		    pipe_name(pipe), dig_port->base.base.base.id,
970 		    dig_port->base.base.name);
971 	intel_de_write(dev_priv, pp_on_reg, 0);
972 	intel_de_posting_read(dev_priv, pp_on_reg);
973 
974 	intel_dp->pps.pps_pipe = INVALID_PIPE;
975 }
976 
977 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
978 				      enum pipe pipe)
979 {
980 	struct intel_encoder *encoder;
981 
982 	lockdep_assert_held(&dev_priv->pps_mutex);
983 
984 	for_each_intel_dp(&dev_priv->drm, encoder) {
985 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
986 
987 		drm_WARN(&dev_priv->drm, intel_dp->pps.active_pipe == pipe,
988 			 "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n",
989 			 pipe_name(pipe), encoder->base.base.id,
990 			 encoder->base.name);
991 
992 		if (intel_dp->pps.pps_pipe != pipe)
993 			continue;
994 
995 		drm_dbg_kms(&dev_priv->drm,
996 			    "stealing pipe %c power sequencer from [ENCODER:%d:%s]\n",
997 			    pipe_name(pipe), encoder->base.base.id,
998 			    encoder->base.name);
999 
1000 		/* make sure vdd is off before we steal it */
1001 		vlv_detach_power_sequencer(intel_dp);
1002 	}
1003 }
1004 
1005 void vlv_pps_init(struct intel_encoder *encoder,
1006 		  const struct intel_crtc_state *crtc_state)
1007 {
1008 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1009 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1010 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1011 
1012 	lockdep_assert_held(&dev_priv->pps_mutex);
1013 
1014 	drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE);
1015 
1016 	if (intel_dp->pps.pps_pipe != INVALID_PIPE &&
1017 	    intel_dp->pps.pps_pipe != crtc->pipe) {
1018 		/*
1019 		 * If another power sequencer was being used on this
1020 		 * port previously make sure to turn off vdd there while
1021 		 * we still have control of it.
1022 		 */
1023 		vlv_detach_power_sequencer(intel_dp);
1024 	}
1025 
1026 	/*
1027 	 * We may be stealing the power
1028 	 * sequencer from another port.
1029 	 */
1030 	vlv_steal_power_sequencer(dev_priv, crtc->pipe);
1031 
1032 	intel_dp->pps.active_pipe = crtc->pipe;
1033 
1034 	if (!intel_dp_is_edp(intel_dp))
1035 		return;
1036 
1037 	/* now it's all ours */
1038 	intel_dp->pps.pps_pipe = crtc->pipe;
1039 
1040 	drm_dbg_kms(&dev_priv->drm,
1041 		    "initializing pipe %c power sequencer for [ENCODER:%d:%s]\n",
1042 		    pipe_name(intel_dp->pps.pps_pipe), encoder->base.base.id,
1043 		    encoder->base.name);
1044 
1045 	/* init power sequencer on this pipe and port */
1046 	pps_init_delays(intel_dp);
1047 	pps_init_registers(intel_dp, true);
1048 }
1049 
1050 static void intel_pps_vdd_sanitize(struct intel_dp *intel_dp)
1051 {
1052 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1053 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1054 
1055 	lockdep_assert_held(&dev_priv->pps_mutex);
1056 
1057 	if (!edp_have_panel_vdd(intel_dp))
1058 		return;
1059 
1060 	/*
1061 	 * The VDD bit needs a power domain reference, so if the bit is
1062 	 * already enabled when we boot or resume, grab this reference and
1063 	 * schedule a vdd off, so we don't hold on to the reference
1064 	 * indefinitely.
1065 	 */
1066 	drm_dbg_kms(&dev_priv->drm,
1067 		    "VDD left on by BIOS, adjusting state tracking\n");
1068 	drm_WARN_ON(&dev_priv->drm, intel_dp->pps.vdd_wakeref);
1069 	intel_dp->pps.vdd_wakeref = intel_display_power_get(dev_priv,
1070 							    intel_aux_power_domain(dig_port));
1071 
1072 	edp_panel_vdd_schedule_off(intel_dp);
1073 }
1074 
1075 bool intel_pps_have_power(struct intel_dp *intel_dp)
1076 {
1077 	intel_wakeref_t wakeref;
1078 	bool have_power = false;
1079 
1080 	with_intel_pps_lock(intel_dp, wakeref) {
1081 		have_power = edp_have_panel_power(intel_dp) &&
1082 						  edp_have_panel_vdd(intel_dp);
1083 	}
1084 
1085 	return have_power;
1086 }
1087 
1088 static void pps_init_timestamps(struct intel_dp *intel_dp)
1089 {
1090 	intel_dp->pps.panel_power_off_time = ktime_get_boottime();
1091 	intel_dp->pps.last_power_on = jiffies;
1092 	intel_dp->pps.last_backlight_off = jiffies;
1093 }
1094 
1095 static void
1096 intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
1097 {
1098 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1099 	u32 pp_on, pp_off, pp_ctl;
1100 	struct pps_registers regs;
1101 
1102 	intel_pps_get_registers(intel_dp, &regs);
1103 
1104 	pp_ctl = ilk_get_pp_control(intel_dp);
1105 
1106 	/* Ensure PPS is unlocked */
1107 	if (!HAS_DDI(dev_priv))
1108 		intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
1109 
1110 	pp_on = intel_de_read(dev_priv, regs.pp_on);
1111 	pp_off = intel_de_read(dev_priv, regs.pp_off);
1112 
1113 	/* Pull timing values out of registers */
1114 	seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
1115 	seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on);
1116 	seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off);
1117 	seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off);
1118 
1119 	if (i915_mmio_reg_valid(regs.pp_div)) {
1120 		u32 pp_div;
1121 
1122 		pp_div = intel_de_read(dev_priv, regs.pp_div);
1123 
1124 		seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000;
1125 	} else {
1126 		seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000;
1127 	}
1128 }
1129 
1130 static void
1131 intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq)
1132 {
1133 	DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
1134 		      state_name,
1135 		      seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
1136 }
1137 
1138 static void
1139 intel_pps_verify_state(struct intel_dp *intel_dp)
1140 {
1141 	struct edp_power_seq hw;
1142 	struct edp_power_seq *sw = &intel_dp->pps.pps_delays;
1143 
1144 	intel_pps_readout_hw_state(intel_dp, &hw);
1145 
1146 	if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
1147 	    hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
1148 		DRM_ERROR("PPS state mismatch\n");
1149 		intel_pps_dump_state("sw", sw);
1150 		intel_pps_dump_state("hw", &hw);
1151 	}
1152 }
1153 
1154 static void pps_init_delays(struct intel_dp *intel_dp)
1155 {
1156 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1157 	struct edp_power_seq cur, vbt, spec,
1158 		*final = &intel_dp->pps.pps_delays;
1159 
1160 	lockdep_assert_held(&dev_priv->pps_mutex);
1161 
1162 	/* already initialized? */
1163 	if (final->t11_t12 != 0)
1164 		return;
1165 
1166 	intel_pps_readout_hw_state(intel_dp, &cur);
1167 
1168 	intel_pps_dump_state("cur", &cur);
1169 
1170 	vbt = dev_priv->vbt.edp.pps;
1171 	/* On Toshiba Satellite P50-C-18C system the VBT T12 delay
1172 	 * of 500ms appears to be too short. Ocassionally the panel
1173 	 * just fails to power back on. Increasing the delay to 800ms
1174 	 * seems sufficient to avoid this problem.
1175 	 */
1176 	if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
1177 		vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10);
1178 		drm_dbg_kms(&dev_priv->drm,
1179 			    "Increasing T12 panel delay as per the quirk to %d\n",
1180 			    vbt.t11_t12);
1181 	}
1182 	/* T11_T12 delay is special and actually in units of 100ms, but zero
1183 	 * based in the hw (so we need to add 100 ms). But the sw vbt
1184 	 * table multiplies it with 1000 to make it in units of 100usec,
1185 	 * too. */
1186 	vbt.t11_t12 += 100 * 10;
1187 
1188 	/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
1189 	 * our hw here, which are all in 100usec. */
1190 	spec.t1_t3 = 210 * 10;
1191 	spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
1192 	spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
1193 	spec.t10 = 500 * 10;
1194 	/* This one is special and actually in units of 100ms, but zero
1195 	 * based in the hw (so we need to add 100 ms). But the sw vbt
1196 	 * table multiplies it with 1000 to make it in units of 100usec,
1197 	 * too. */
1198 	spec.t11_t12 = (510 + 100) * 10;
1199 
1200 	intel_pps_dump_state("vbt", &vbt);
1201 
1202 	/* Use the max of the register settings and vbt. If both are
1203 	 * unset, fall back to the spec limits. */
1204 #define assign_final(field)	final->field = (max(cur.field, vbt.field) == 0 ? \
1205 				       spec.field : \
1206 				       max(cur.field, vbt.field))
1207 	assign_final(t1_t3);
1208 	assign_final(t8);
1209 	assign_final(t9);
1210 	assign_final(t10);
1211 	assign_final(t11_t12);
1212 #undef assign_final
1213 
1214 #define get_delay(field)	(DIV_ROUND_UP(final->field, 10))
1215 	intel_dp->pps.panel_power_up_delay = get_delay(t1_t3);
1216 	intel_dp->pps.backlight_on_delay = get_delay(t8);
1217 	intel_dp->pps.backlight_off_delay = get_delay(t9);
1218 	intel_dp->pps.panel_power_down_delay = get_delay(t10);
1219 	intel_dp->pps.panel_power_cycle_delay = get_delay(t11_t12);
1220 #undef get_delay
1221 
1222 	drm_dbg_kms(&dev_priv->drm,
1223 		    "panel power up delay %d, power down delay %d, power cycle delay %d\n",
1224 		    intel_dp->pps.panel_power_up_delay,
1225 		    intel_dp->pps.panel_power_down_delay,
1226 		    intel_dp->pps.panel_power_cycle_delay);
1227 
1228 	drm_dbg_kms(&dev_priv->drm, "backlight on delay %d, off delay %d\n",
1229 		    intel_dp->pps.backlight_on_delay,
1230 		    intel_dp->pps.backlight_off_delay);
1231 
1232 	/*
1233 	 * We override the HW backlight delays to 1 because we do manual waits
1234 	 * on them. For T8, even BSpec recommends doing it. For T9, if we
1235 	 * don't do this, we'll end up waiting for the backlight off delay
1236 	 * twice: once when we do the manual sleep, and once when we disable
1237 	 * the panel and wait for the PP_STATUS bit to become zero.
1238 	 */
1239 	final->t8 = 1;
1240 	final->t9 = 1;
1241 
1242 	/*
1243 	 * HW has only a 100msec granularity for t11_t12 so round it up
1244 	 * accordingly.
1245 	 */
1246 	final->t11_t12 = roundup(final->t11_t12, 100 * 10);
1247 }
1248 
1249 static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd)
1250 {
1251 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1252 	u32 pp_on, pp_off, port_sel = 0;
1253 	int div = RUNTIME_INFO(dev_priv)->rawclk_freq / 1000;
1254 	struct pps_registers regs;
1255 	enum port port = dp_to_dig_port(intel_dp)->base.port;
1256 	const struct edp_power_seq *seq = &intel_dp->pps.pps_delays;
1257 
1258 	lockdep_assert_held(&dev_priv->pps_mutex);
1259 
1260 	intel_pps_get_registers(intel_dp, &regs);
1261 
1262 	/*
1263 	 * On some VLV machines the BIOS can leave the VDD
1264 	 * enabled even on power sequencers which aren't
1265 	 * hooked up to any port. This would mess up the
1266 	 * power domain tracking the first time we pick
1267 	 * one of these power sequencers for use since
1268 	 * intel_pps_vdd_on_unlocked() would notice that the VDD was
1269 	 * already on and therefore wouldn't grab the power
1270 	 * domain reference. Disable VDD first to avoid this.
1271 	 * This also avoids spuriously turning the VDD on as
1272 	 * soon as the new power sequencer gets initialized.
1273 	 */
1274 	if (force_disable_vdd) {
1275 		u32 pp = ilk_get_pp_control(intel_dp);
1276 
1277 		drm_WARN(&dev_priv->drm, pp & PANEL_POWER_ON,
1278 			 "Panel power already on\n");
1279 
1280 		if (pp & EDP_FORCE_VDD)
1281 			drm_dbg_kms(&dev_priv->drm,
1282 				    "VDD already on, disabling first\n");
1283 
1284 		pp &= ~EDP_FORCE_VDD;
1285 
1286 		intel_de_write(dev_priv, regs.pp_ctrl, pp);
1287 	}
1288 
1289 	pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) |
1290 		REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8);
1291 	pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) |
1292 		REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10);
1293 
1294 	/* Haswell doesn't have any port selection bits for the panel
1295 	 * power sequencer any more. */
1296 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1297 		port_sel = PANEL_PORT_SELECT_VLV(port);
1298 	} else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
1299 		switch (port) {
1300 		case PORT_A:
1301 			port_sel = PANEL_PORT_SELECT_DPA;
1302 			break;
1303 		case PORT_C:
1304 			port_sel = PANEL_PORT_SELECT_DPC;
1305 			break;
1306 		case PORT_D:
1307 			port_sel = PANEL_PORT_SELECT_DPD;
1308 			break;
1309 		default:
1310 			MISSING_CASE(port);
1311 			break;
1312 		}
1313 	}
1314 
1315 	pp_on |= port_sel;
1316 
1317 	intel_de_write(dev_priv, regs.pp_on, pp_on);
1318 	intel_de_write(dev_priv, regs.pp_off, pp_off);
1319 
1320 	/*
1321 	 * Compute the divisor for the pp clock, simply match the Bspec formula.
1322 	 */
1323 	if (i915_mmio_reg_valid(regs.pp_div)) {
1324 		intel_de_write(dev_priv, regs.pp_div,
1325 			       REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
1326 	} else {
1327 		u32 pp_ctl;
1328 
1329 		pp_ctl = intel_de_read(dev_priv, regs.pp_ctrl);
1330 		pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK;
1331 		pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000));
1332 		intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
1333 	}
1334 
1335 	drm_dbg_kms(&dev_priv->drm,
1336 		    "panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
1337 		    intel_de_read(dev_priv, regs.pp_on),
1338 		    intel_de_read(dev_priv, regs.pp_off),
1339 		    i915_mmio_reg_valid(regs.pp_div) ?
1340 		    intel_de_read(dev_priv, regs.pp_div) :
1341 		    (intel_de_read(dev_priv, regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
1342 }
1343 
1344 void intel_pps_encoder_reset(struct intel_dp *intel_dp)
1345 {
1346 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1347 	intel_wakeref_t wakeref;
1348 
1349 	if (!intel_dp_is_edp(intel_dp))
1350 		return;
1351 
1352 	with_intel_pps_lock(intel_dp, wakeref) {
1353 		/*
1354 		 * Reinit the power sequencer also on the resume path, in case
1355 		 * BIOS did something nasty with it.
1356 		 */
1357 		if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
1358 			vlv_initial_power_sequencer_setup(intel_dp);
1359 
1360 		pps_init_delays(intel_dp);
1361 		pps_init_registers(intel_dp, false);
1362 
1363 		intel_pps_vdd_sanitize(intel_dp);
1364 	}
1365 }
1366 
1367 void intel_pps_init(struct intel_dp *intel_dp)
1368 {
1369 	INIT_DELAYED_WORK(&intel_dp->pps.panel_vdd_work, edp_panel_vdd_work);
1370 
1371 	pps_init_timestamps(intel_dp);
1372 
1373 	intel_pps_encoder_reset(intel_dp);
1374 }
1375 
1376 void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
1377 {
1378 	int pps_num;
1379 	int pps_idx;
1380 
1381 	if (HAS_DDI(dev_priv))
1382 		return;
1383 	/*
1384 	 * This w/a is needed at least on CPT/PPT, but to be sure apply it
1385 	 * everywhere where registers can be write protected.
1386 	 */
1387 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1388 		pps_num = 2;
1389 	else
1390 		pps_num = 1;
1391 
1392 	for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
1393 		u32 val = intel_de_read(dev_priv, PP_CONTROL(pps_idx));
1394 
1395 		val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
1396 		intel_de_write(dev_priv, PP_CONTROL(pps_idx), val);
1397 	}
1398 }
1399 
1400 void intel_pps_setup(struct drm_i915_private *i915)
1401 {
1402 	if (HAS_PCH_SPLIT(i915) || IS_GEN9_LP(i915))
1403 		i915->pps_mmio_base = PCH_PPS_BASE;
1404 	else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
1405 		i915->pps_mmio_base = VLV_PPS_BASE;
1406 	else
1407 		i915->pps_mmio_base = PPS_BASE;
1408 }
1409