1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5 
6 #include "g4x_dp.h"
7 #include "i915_drv.h"
8 #include "i915_reg.h"
9 #include "intel_de.h"
10 #include "intel_display_power_well.h"
11 #include "intel_display_types.h"
12 #include "intel_dp.h"
13 #include "intel_dpio_phy.h"
14 #include "intel_dpll.h"
15 #include "intel_lvds.h"
16 #include "intel_pps.h"
17 #include "intel_quirks.h"
18 
19 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
20 				      enum pipe pipe);
21 
22 static void pps_init_delays(struct intel_dp *intel_dp);
23 static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd);
24 
25 intel_wakeref_t intel_pps_lock(struct intel_dp *intel_dp)
26 {
27 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
28 	intel_wakeref_t wakeref;
29 
30 	/*
31 	 * See intel_pps_reset_all() why we need a power domain reference here.
32 	 */
33 	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE);
34 	mutex_lock(&dev_priv->display.pps.mutex);
35 
36 	return wakeref;
37 }
38 
39 intel_wakeref_t intel_pps_unlock(struct intel_dp *intel_dp,
40 				 intel_wakeref_t wakeref)
41 {
42 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
43 
44 	mutex_unlock(&dev_priv->display.pps.mutex);
45 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
46 
47 	return 0;
48 }
49 
50 static void
51 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
52 {
53 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
54 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
55 	enum pipe pipe = intel_dp->pps.pps_pipe;
56 	bool pll_enabled, release_cl_override = false;
57 	enum dpio_phy phy = DPIO_PHY(pipe);
58 	enum dpio_channel ch = vlv_pipe_to_channel(pipe);
59 	u32 DP;
60 
61 	if (drm_WARN(&dev_priv->drm,
62 		     intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN,
63 		     "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n",
64 		     pipe_name(pipe), dig_port->base.base.base.id,
65 		     dig_port->base.base.name))
66 		return;
67 
68 	drm_dbg_kms(&dev_priv->drm,
69 		    "kicking pipe %c power sequencer for [ENCODER:%d:%s]\n",
70 		    pipe_name(pipe), dig_port->base.base.base.id,
71 		    dig_port->base.base.name);
72 
73 	/* Preserve the BIOS-computed detected bit. This is
74 	 * supposed to be read-only.
75 	 */
76 	DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED;
77 	DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
78 	DP |= DP_PORT_WIDTH(1);
79 	DP |= DP_LINK_TRAIN_PAT_1;
80 
81 	if (IS_CHERRYVIEW(dev_priv))
82 		DP |= DP_PIPE_SEL_CHV(pipe);
83 	else
84 		DP |= DP_PIPE_SEL(pipe);
85 
86 	pll_enabled = intel_de_read(dev_priv, DPLL(pipe)) & DPLL_VCO_ENABLE;
87 
88 	/*
89 	 * The DPLL for the pipe must be enabled for this to work.
90 	 * So enable temporarily it if it's not already enabled.
91 	 */
92 	if (!pll_enabled) {
93 		release_cl_override = IS_CHERRYVIEW(dev_priv) &&
94 			!chv_phy_powergate_ch(dev_priv, phy, ch, true);
95 
96 		if (vlv_force_pll_on(dev_priv, pipe, vlv_get_dpll(dev_priv))) {
97 			drm_err(&dev_priv->drm,
98 				"Failed to force on pll for pipe %c!\n",
99 				pipe_name(pipe));
100 			return;
101 		}
102 	}
103 
104 	/*
105 	 * Similar magic as in intel_dp_enable_port().
106 	 * We _must_ do this port enable + disable trick
107 	 * to make this power sequencer lock onto the port.
108 	 * Otherwise even VDD force bit won't work.
109 	 */
110 	intel_de_write(dev_priv, intel_dp->output_reg, DP);
111 	intel_de_posting_read(dev_priv, intel_dp->output_reg);
112 
113 	intel_de_write(dev_priv, intel_dp->output_reg, DP | DP_PORT_EN);
114 	intel_de_posting_read(dev_priv, intel_dp->output_reg);
115 
116 	intel_de_write(dev_priv, intel_dp->output_reg, DP & ~DP_PORT_EN);
117 	intel_de_posting_read(dev_priv, intel_dp->output_reg);
118 
119 	if (!pll_enabled) {
120 		vlv_force_pll_off(dev_priv, pipe);
121 
122 		if (release_cl_override)
123 			chv_phy_powergate_ch(dev_priv, phy, ch, false);
124 	}
125 }
126 
127 static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
128 {
129 	struct intel_encoder *encoder;
130 	unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
131 
132 	/*
133 	 * We don't have power sequencer currently.
134 	 * Pick one that's not used by other ports.
135 	 */
136 	for_each_intel_dp(&dev_priv->drm, encoder) {
137 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
138 
139 		if (encoder->type == INTEL_OUTPUT_EDP) {
140 			drm_WARN_ON(&dev_priv->drm,
141 				    intel_dp->pps.active_pipe != INVALID_PIPE &&
142 				    intel_dp->pps.active_pipe !=
143 				    intel_dp->pps.pps_pipe);
144 
145 			if (intel_dp->pps.pps_pipe != INVALID_PIPE)
146 				pipes &= ~(1 << intel_dp->pps.pps_pipe);
147 		} else {
148 			drm_WARN_ON(&dev_priv->drm,
149 				    intel_dp->pps.pps_pipe != INVALID_PIPE);
150 
151 			if (intel_dp->pps.active_pipe != INVALID_PIPE)
152 				pipes &= ~(1 << intel_dp->pps.active_pipe);
153 		}
154 	}
155 
156 	if (pipes == 0)
157 		return INVALID_PIPE;
158 
159 	return ffs(pipes) - 1;
160 }
161 
162 static enum pipe
163 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
164 {
165 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
166 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
167 	enum pipe pipe;
168 
169 	lockdep_assert_held(&dev_priv->display.pps.mutex);
170 
171 	/* We should never land here with regular DP ports */
172 	drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
173 
174 	drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE &&
175 		    intel_dp->pps.active_pipe != intel_dp->pps.pps_pipe);
176 
177 	if (intel_dp->pps.pps_pipe != INVALID_PIPE)
178 		return intel_dp->pps.pps_pipe;
179 
180 	pipe = vlv_find_free_pps(dev_priv);
181 
182 	/*
183 	 * Didn't find one. This should not happen since there
184 	 * are two power sequencers and up to two eDP ports.
185 	 */
186 	if (drm_WARN_ON(&dev_priv->drm, pipe == INVALID_PIPE))
187 		pipe = PIPE_A;
188 
189 	vlv_steal_power_sequencer(dev_priv, pipe);
190 	intel_dp->pps.pps_pipe = pipe;
191 
192 	drm_dbg_kms(&dev_priv->drm,
193 		    "picked pipe %c power sequencer for [ENCODER:%d:%s]\n",
194 		    pipe_name(intel_dp->pps.pps_pipe),
195 		    dig_port->base.base.base.id,
196 		    dig_port->base.base.name);
197 
198 	/* init power sequencer on this pipe and port */
199 	pps_init_delays(intel_dp);
200 	pps_init_registers(intel_dp, true);
201 
202 	/*
203 	 * Even vdd force doesn't work until we've made
204 	 * the power sequencer lock in on the port.
205 	 */
206 	vlv_power_sequencer_kick(intel_dp);
207 
208 	return intel_dp->pps.pps_pipe;
209 }
210 
211 static int
212 bxt_power_sequencer_idx(struct intel_dp *intel_dp)
213 {
214 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
215 	struct intel_connector *connector = intel_dp->attached_connector;
216 	int backlight_controller = connector->panel.vbt.backlight.controller;
217 
218 	lockdep_assert_held(&dev_priv->display.pps.mutex);
219 
220 	/* We should never land here with regular DP ports */
221 	drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
222 
223 	if (!intel_dp->pps.pps_reset)
224 		return backlight_controller;
225 
226 	intel_dp->pps.pps_reset = false;
227 
228 	/*
229 	 * Only the HW needs to be reprogrammed, the SW state is fixed and
230 	 * has been setup during connector init.
231 	 */
232 	pps_init_registers(intel_dp, false);
233 
234 	return backlight_controller;
235 }
236 
237 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
238 			       enum pipe pipe);
239 
240 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
241 			       enum pipe pipe)
242 {
243 	return intel_de_read(dev_priv, PP_STATUS(pipe)) & PP_ON;
244 }
245 
246 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
247 				enum pipe pipe)
248 {
249 	return intel_de_read(dev_priv, PP_CONTROL(pipe)) & EDP_FORCE_VDD;
250 }
251 
252 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
253 			 enum pipe pipe)
254 {
255 	return true;
256 }
257 
258 static enum pipe
259 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
260 		     enum port port,
261 		     vlv_pipe_check pipe_check)
262 {
263 	enum pipe pipe;
264 
265 	for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
266 		u32 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(pipe)) &
267 			PANEL_PORT_SELECT_MASK;
268 
269 		if (port_sel != PANEL_PORT_SELECT_VLV(port))
270 			continue;
271 
272 		if (!pipe_check(dev_priv, pipe))
273 			continue;
274 
275 		return pipe;
276 	}
277 
278 	return INVALID_PIPE;
279 }
280 
281 static void
282 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
283 {
284 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
285 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
286 	enum port port = dig_port->base.port;
287 
288 	lockdep_assert_held(&dev_priv->display.pps.mutex);
289 
290 	/* try to find a pipe with this port selected */
291 	/* first pick one where the panel is on */
292 	intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
293 						      vlv_pipe_has_pp_on);
294 	/* didn't find one? pick one where vdd is on */
295 	if (intel_dp->pps.pps_pipe == INVALID_PIPE)
296 		intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
297 							      vlv_pipe_has_vdd_on);
298 	/* didn't find one? pick one with just the correct port */
299 	if (intel_dp->pps.pps_pipe == INVALID_PIPE)
300 		intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
301 							      vlv_pipe_any);
302 
303 	/* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
304 	if (intel_dp->pps.pps_pipe == INVALID_PIPE) {
305 		drm_dbg_kms(&dev_priv->drm,
306 			    "no initial power sequencer for [ENCODER:%d:%s]\n",
307 			    dig_port->base.base.base.id,
308 			    dig_port->base.base.name);
309 		return;
310 	}
311 
312 	drm_dbg_kms(&dev_priv->drm,
313 		    "initial power sequencer for [ENCODER:%d:%s]: pipe %c\n",
314 		    dig_port->base.base.base.id,
315 		    dig_port->base.base.name,
316 		    pipe_name(intel_dp->pps.pps_pipe));
317 }
318 
319 void intel_pps_reset_all(struct drm_i915_private *dev_priv)
320 {
321 	struct intel_encoder *encoder;
322 
323 	if (drm_WARN_ON(&dev_priv->drm, !IS_LP(dev_priv)))
324 		return;
325 
326 	if (!HAS_DISPLAY(dev_priv))
327 		return;
328 
329 	/*
330 	 * We can't grab pps_mutex here due to deadlock with power_domain
331 	 * mutex when power_domain functions are called while holding pps_mutex.
332 	 * That also means that in order to use pps_pipe the code needs to
333 	 * hold both a power domain reference and pps_mutex, and the power domain
334 	 * reference get/put must be done while _not_ holding pps_mutex.
335 	 * pps_{lock,unlock}() do these steps in the correct order, so one
336 	 * should use them always.
337 	 */
338 
339 	for_each_intel_dp(&dev_priv->drm, encoder) {
340 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
341 
342 		drm_WARN_ON(&dev_priv->drm,
343 			    intel_dp->pps.active_pipe != INVALID_PIPE);
344 
345 		if (encoder->type != INTEL_OUTPUT_EDP)
346 			continue;
347 
348 		if (DISPLAY_VER(dev_priv) >= 9)
349 			intel_dp->pps.pps_reset = true;
350 		else
351 			intel_dp->pps.pps_pipe = INVALID_PIPE;
352 	}
353 }
354 
355 struct pps_registers {
356 	i915_reg_t pp_ctrl;
357 	i915_reg_t pp_stat;
358 	i915_reg_t pp_on;
359 	i915_reg_t pp_off;
360 	i915_reg_t pp_div;
361 };
362 
363 static void intel_pps_get_registers(struct intel_dp *intel_dp,
364 				    struct pps_registers *regs)
365 {
366 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
367 	int pps_idx = 0;
368 
369 	memset(regs, 0, sizeof(*regs));
370 
371 	if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
372 		pps_idx = bxt_power_sequencer_idx(intel_dp);
373 	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
374 		pps_idx = vlv_power_sequencer_pipe(intel_dp);
375 
376 	regs->pp_ctrl = PP_CONTROL(pps_idx);
377 	regs->pp_stat = PP_STATUS(pps_idx);
378 	regs->pp_on = PP_ON_DELAYS(pps_idx);
379 	regs->pp_off = PP_OFF_DELAYS(pps_idx);
380 
381 	/* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
382 	if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ||
383 	    INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
384 		regs->pp_div = INVALID_MMIO_REG;
385 	else
386 		regs->pp_div = PP_DIVISOR(pps_idx);
387 }
388 
389 static i915_reg_t
390 _pp_ctrl_reg(struct intel_dp *intel_dp)
391 {
392 	struct pps_registers regs;
393 
394 	intel_pps_get_registers(intel_dp, &regs);
395 
396 	return regs.pp_ctrl;
397 }
398 
399 static i915_reg_t
400 _pp_stat_reg(struct intel_dp *intel_dp)
401 {
402 	struct pps_registers regs;
403 
404 	intel_pps_get_registers(intel_dp, &regs);
405 
406 	return regs.pp_stat;
407 }
408 
409 static bool edp_have_panel_power(struct intel_dp *intel_dp)
410 {
411 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
412 
413 	lockdep_assert_held(&dev_priv->display.pps.mutex);
414 
415 	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
416 	    intel_dp->pps.pps_pipe == INVALID_PIPE)
417 		return false;
418 
419 	return (intel_de_read(dev_priv, _pp_stat_reg(intel_dp)) & PP_ON) != 0;
420 }
421 
422 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
423 {
424 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
425 
426 	lockdep_assert_held(&dev_priv->display.pps.mutex);
427 
428 	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
429 	    intel_dp->pps.pps_pipe == INVALID_PIPE)
430 		return false;
431 
432 	return intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
433 }
434 
435 void intel_pps_check_power_unlocked(struct intel_dp *intel_dp)
436 {
437 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
438 
439 	if (!intel_dp_is_edp(intel_dp))
440 		return;
441 
442 	if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
443 		drm_WARN(&dev_priv->drm, 1,
444 			 "eDP powered off while attempting aux channel communication.\n");
445 		drm_dbg_kms(&dev_priv->drm, "Status 0x%08x Control 0x%08x\n",
446 			    intel_de_read(dev_priv, _pp_stat_reg(intel_dp)),
447 			    intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)));
448 	}
449 }
450 
451 #define IDLE_ON_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
452 #define IDLE_ON_VALUE   	(PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
453 
454 #define IDLE_OFF_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
455 #define IDLE_OFF_VALUE		(0     | PP_SEQUENCE_NONE | 0                     | 0)
456 
457 #define IDLE_CYCLE_MASK		(PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
458 #define IDLE_CYCLE_VALUE	(0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
459 
460 static void intel_pps_verify_state(struct intel_dp *intel_dp);
461 
462 static void wait_panel_status(struct intel_dp *intel_dp,
463 				       u32 mask,
464 				       u32 value)
465 {
466 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
467 	i915_reg_t pp_stat_reg, pp_ctrl_reg;
468 
469 	lockdep_assert_held(&dev_priv->display.pps.mutex);
470 
471 	intel_pps_verify_state(intel_dp);
472 
473 	pp_stat_reg = _pp_stat_reg(intel_dp);
474 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
475 
476 	drm_dbg_kms(&dev_priv->drm,
477 		    "mask %08x value %08x status %08x control %08x\n",
478 		    mask, value,
479 		    intel_de_read(dev_priv, pp_stat_reg),
480 		    intel_de_read(dev_priv, pp_ctrl_reg));
481 
482 	if (intel_de_wait_for_register(dev_priv, pp_stat_reg,
483 				       mask, value, 5000))
484 		drm_err(&dev_priv->drm,
485 			"Panel status timeout: status %08x control %08x\n",
486 			intel_de_read(dev_priv, pp_stat_reg),
487 			intel_de_read(dev_priv, pp_ctrl_reg));
488 
489 	drm_dbg_kms(&dev_priv->drm, "Wait complete\n");
490 }
491 
492 static void wait_panel_on(struct intel_dp *intel_dp)
493 {
494 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
495 
496 	drm_dbg_kms(&i915->drm, "Wait for panel power on\n");
497 	wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
498 }
499 
500 static void wait_panel_off(struct intel_dp *intel_dp)
501 {
502 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
503 
504 	drm_dbg_kms(&i915->drm, "Wait for panel power off time\n");
505 	wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
506 }
507 
508 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
509 {
510 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
511 	ktime_t panel_power_on_time;
512 	s64 panel_power_off_duration;
513 
514 	drm_dbg_kms(&i915->drm, "Wait for panel power cycle\n");
515 
516 	/* take the difference of current time and panel power off time
517 	 * and then make panel wait for t11_t12 if needed. */
518 	panel_power_on_time = ktime_get_boottime();
519 	panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->pps.panel_power_off_time);
520 
521 	/* When we disable the VDD override bit last we have to do the manual
522 	 * wait. */
523 	if (panel_power_off_duration < (s64)intel_dp->pps.panel_power_cycle_delay)
524 		wait_remaining_ms_from_jiffies(jiffies,
525 				       intel_dp->pps.panel_power_cycle_delay - panel_power_off_duration);
526 
527 	wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
528 }
529 
530 void intel_pps_wait_power_cycle(struct intel_dp *intel_dp)
531 {
532 	intel_wakeref_t wakeref;
533 
534 	if (!intel_dp_is_edp(intel_dp))
535 		return;
536 
537 	with_intel_pps_lock(intel_dp, wakeref)
538 		wait_panel_power_cycle(intel_dp);
539 }
540 
541 static void wait_backlight_on(struct intel_dp *intel_dp)
542 {
543 	wait_remaining_ms_from_jiffies(intel_dp->pps.last_power_on,
544 				       intel_dp->pps.backlight_on_delay);
545 }
546 
547 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
548 {
549 	wait_remaining_ms_from_jiffies(intel_dp->pps.last_backlight_off,
550 				       intel_dp->pps.backlight_off_delay);
551 }
552 
553 /* Read the current pp_control value, unlocking the register if it
554  * is locked
555  */
556 
557 static  u32 ilk_get_pp_control(struct intel_dp *intel_dp)
558 {
559 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
560 	u32 control;
561 
562 	lockdep_assert_held(&dev_priv->display.pps.mutex);
563 
564 	control = intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp));
565 	if (drm_WARN_ON(&dev_priv->drm, !HAS_DDI(dev_priv) &&
566 			(control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
567 		control &= ~PANEL_UNLOCK_MASK;
568 		control |= PANEL_UNLOCK_REGS;
569 	}
570 	return control;
571 }
572 
573 /*
574  * Must be paired with intel_pps_vdd_off_unlocked().
575  * Must hold pps_mutex around the whole on/off sequence.
576  * Can be nested with intel_pps_vdd_{on,off}() calls.
577  */
578 bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp)
579 {
580 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
581 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
582 	u32 pp;
583 	i915_reg_t pp_stat_reg, pp_ctrl_reg;
584 	bool need_to_disable = !intel_dp->pps.want_panel_vdd;
585 
586 	lockdep_assert_held(&dev_priv->display.pps.mutex);
587 
588 	if (!intel_dp_is_edp(intel_dp))
589 		return false;
590 
591 	cancel_delayed_work(&intel_dp->pps.panel_vdd_work);
592 	intel_dp->pps.want_panel_vdd = true;
593 
594 	if (edp_have_panel_vdd(intel_dp))
595 		return need_to_disable;
596 
597 	drm_WARN_ON(&dev_priv->drm, intel_dp->pps.vdd_wakeref);
598 	intel_dp->pps.vdd_wakeref = intel_display_power_get(dev_priv,
599 							    intel_aux_power_domain(dig_port));
600 
601 	drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD on\n",
602 		    dig_port->base.base.base.id,
603 		    dig_port->base.base.name);
604 
605 	if (!edp_have_panel_power(intel_dp))
606 		wait_panel_power_cycle(intel_dp);
607 
608 	pp = ilk_get_pp_control(intel_dp);
609 	pp |= EDP_FORCE_VDD;
610 
611 	pp_stat_reg = _pp_stat_reg(intel_dp);
612 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
613 
614 	intel_de_write(dev_priv, pp_ctrl_reg, pp);
615 	intel_de_posting_read(dev_priv, pp_ctrl_reg);
616 	drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
617 		    intel_de_read(dev_priv, pp_stat_reg),
618 		    intel_de_read(dev_priv, pp_ctrl_reg));
619 	/*
620 	 * If the panel wasn't on, delay before accessing aux channel
621 	 */
622 	if (!edp_have_panel_power(intel_dp)) {
623 		drm_dbg_kms(&dev_priv->drm,
624 			    "[ENCODER:%d:%s] panel power wasn't enabled\n",
625 			    dig_port->base.base.base.id,
626 			    dig_port->base.base.name);
627 		msleep(intel_dp->pps.panel_power_up_delay);
628 	}
629 
630 	return need_to_disable;
631 }
632 
633 /*
634  * Must be paired with intel_pps_off().
635  * Nested calls to these functions are not allowed since
636  * we drop the lock. Caller must use some higher level
637  * locking to prevent nested calls from other threads.
638  */
639 void intel_pps_vdd_on(struct intel_dp *intel_dp)
640 {
641 	intel_wakeref_t wakeref;
642 	bool vdd;
643 
644 	if (!intel_dp_is_edp(intel_dp))
645 		return;
646 
647 	vdd = false;
648 	with_intel_pps_lock(intel_dp, wakeref)
649 		vdd = intel_pps_vdd_on_unlocked(intel_dp);
650 	I915_STATE_WARN(!vdd, "[ENCODER:%d:%s] VDD already requested on\n",
651 			dp_to_dig_port(intel_dp)->base.base.base.id,
652 			dp_to_dig_port(intel_dp)->base.base.name);
653 }
654 
655 static void intel_pps_vdd_off_sync_unlocked(struct intel_dp *intel_dp)
656 {
657 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
658 	struct intel_digital_port *dig_port =
659 		dp_to_dig_port(intel_dp);
660 	u32 pp;
661 	i915_reg_t pp_stat_reg, pp_ctrl_reg;
662 
663 	lockdep_assert_held(&dev_priv->display.pps.mutex);
664 
665 	drm_WARN_ON(&dev_priv->drm, intel_dp->pps.want_panel_vdd);
666 
667 	if (!edp_have_panel_vdd(intel_dp))
668 		return;
669 
670 	drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD off\n",
671 		    dig_port->base.base.base.id,
672 		    dig_port->base.base.name);
673 
674 	pp = ilk_get_pp_control(intel_dp);
675 	pp &= ~EDP_FORCE_VDD;
676 
677 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
678 	pp_stat_reg = _pp_stat_reg(intel_dp);
679 
680 	intel_de_write(dev_priv, pp_ctrl_reg, pp);
681 	intel_de_posting_read(dev_priv, pp_ctrl_reg);
682 
683 	/* Make sure sequencer is idle before allowing subsequent activity */
684 	drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
685 		    intel_de_read(dev_priv, pp_stat_reg),
686 		    intel_de_read(dev_priv, pp_ctrl_reg));
687 
688 	if ((pp & PANEL_POWER_ON) == 0)
689 		intel_dp->pps.panel_power_off_time = ktime_get_boottime();
690 
691 	intel_display_power_put(dev_priv,
692 				intel_aux_power_domain(dig_port),
693 				fetch_and_zero(&intel_dp->pps.vdd_wakeref));
694 }
695 
696 void intel_pps_vdd_off_sync(struct intel_dp *intel_dp)
697 {
698 	intel_wakeref_t wakeref;
699 
700 	if (!intel_dp_is_edp(intel_dp))
701 		return;
702 
703 	cancel_delayed_work_sync(&intel_dp->pps.panel_vdd_work);
704 	/*
705 	 * vdd might still be enabled due to the delayed vdd off.
706 	 * Make sure vdd is actually turned off here.
707 	 */
708 	with_intel_pps_lock(intel_dp, wakeref)
709 		intel_pps_vdd_off_sync_unlocked(intel_dp);
710 }
711 
712 static void edp_panel_vdd_work(struct work_struct *__work)
713 {
714 	struct intel_pps *pps = container_of(to_delayed_work(__work),
715 					     struct intel_pps, panel_vdd_work);
716 	struct intel_dp *intel_dp = container_of(pps, struct intel_dp, pps);
717 	intel_wakeref_t wakeref;
718 
719 	with_intel_pps_lock(intel_dp, wakeref) {
720 		if (!intel_dp->pps.want_panel_vdd)
721 			intel_pps_vdd_off_sync_unlocked(intel_dp);
722 	}
723 }
724 
725 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
726 {
727 	unsigned long delay;
728 
729 	/*
730 	 * We may not yet know the real power sequencing delays,
731 	 * so keep VDD enabled until we're done with init.
732 	 */
733 	if (intel_dp->pps.initializing)
734 		return;
735 
736 	/*
737 	 * Queue the timer to fire a long time from now (relative to the power
738 	 * down delay) to keep the panel power up across a sequence of
739 	 * operations.
740 	 */
741 	delay = msecs_to_jiffies(intel_dp->pps.panel_power_cycle_delay * 5);
742 	schedule_delayed_work(&intel_dp->pps.panel_vdd_work, delay);
743 }
744 
745 /*
746  * Must be paired with edp_panel_vdd_on().
747  * Must hold pps_mutex around the whole on/off sequence.
748  * Can be nested with intel_pps_vdd_{on,off}() calls.
749  */
750 void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync)
751 {
752 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
753 
754 	lockdep_assert_held(&dev_priv->display.pps.mutex);
755 
756 	if (!intel_dp_is_edp(intel_dp))
757 		return;
758 
759 	I915_STATE_WARN(!intel_dp->pps.want_panel_vdd, "[ENCODER:%d:%s] VDD not forced on",
760 			dp_to_dig_port(intel_dp)->base.base.base.id,
761 			dp_to_dig_port(intel_dp)->base.base.name);
762 
763 	intel_dp->pps.want_panel_vdd = false;
764 
765 	if (sync)
766 		intel_pps_vdd_off_sync_unlocked(intel_dp);
767 	else
768 		edp_panel_vdd_schedule_off(intel_dp);
769 }
770 
771 void intel_pps_on_unlocked(struct intel_dp *intel_dp)
772 {
773 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
774 	u32 pp;
775 	i915_reg_t pp_ctrl_reg;
776 
777 	lockdep_assert_held(&dev_priv->display.pps.mutex);
778 
779 	if (!intel_dp_is_edp(intel_dp))
780 		return;
781 
782 	drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power on\n",
783 		    dp_to_dig_port(intel_dp)->base.base.base.id,
784 		    dp_to_dig_port(intel_dp)->base.base.name);
785 
786 	if (drm_WARN(&dev_priv->drm, edp_have_panel_power(intel_dp),
787 		     "[ENCODER:%d:%s] panel power already on\n",
788 		     dp_to_dig_port(intel_dp)->base.base.base.id,
789 		     dp_to_dig_port(intel_dp)->base.base.name))
790 		return;
791 
792 	wait_panel_power_cycle(intel_dp);
793 
794 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
795 	pp = ilk_get_pp_control(intel_dp);
796 	if (IS_IRONLAKE(dev_priv)) {
797 		/* ILK workaround: disable reset around power sequence */
798 		pp &= ~PANEL_POWER_RESET;
799 		intel_de_write(dev_priv, pp_ctrl_reg, pp);
800 		intel_de_posting_read(dev_priv, pp_ctrl_reg);
801 	}
802 
803 	pp |= PANEL_POWER_ON;
804 	if (!IS_IRONLAKE(dev_priv))
805 		pp |= PANEL_POWER_RESET;
806 
807 	intel_de_write(dev_priv, pp_ctrl_reg, pp);
808 	intel_de_posting_read(dev_priv, pp_ctrl_reg);
809 
810 	wait_panel_on(intel_dp);
811 	intel_dp->pps.last_power_on = jiffies;
812 
813 	if (IS_IRONLAKE(dev_priv)) {
814 		pp |= PANEL_POWER_RESET; /* restore panel reset bit */
815 		intel_de_write(dev_priv, pp_ctrl_reg, pp);
816 		intel_de_posting_read(dev_priv, pp_ctrl_reg);
817 	}
818 }
819 
820 void intel_pps_on(struct intel_dp *intel_dp)
821 {
822 	intel_wakeref_t wakeref;
823 
824 	if (!intel_dp_is_edp(intel_dp))
825 		return;
826 
827 	with_intel_pps_lock(intel_dp, wakeref)
828 		intel_pps_on_unlocked(intel_dp);
829 }
830 
831 void intel_pps_off_unlocked(struct intel_dp *intel_dp)
832 {
833 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
834 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
835 	u32 pp;
836 	i915_reg_t pp_ctrl_reg;
837 
838 	lockdep_assert_held(&dev_priv->display.pps.mutex);
839 
840 	if (!intel_dp_is_edp(intel_dp))
841 		return;
842 
843 	drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power off\n",
844 		    dig_port->base.base.base.id, dig_port->base.base.name);
845 
846 	drm_WARN(&dev_priv->drm, !intel_dp->pps.want_panel_vdd,
847 		 "Need [ENCODER:%d:%s] VDD to turn off panel\n",
848 		 dig_port->base.base.base.id, dig_port->base.base.name);
849 
850 	pp = ilk_get_pp_control(intel_dp);
851 	/* We need to switch off panel power _and_ force vdd, for otherwise some
852 	 * panels get very unhappy and cease to work. */
853 	pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
854 		EDP_BLC_ENABLE);
855 
856 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
857 
858 	intel_dp->pps.want_panel_vdd = false;
859 
860 	intel_de_write(dev_priv, pp_ctrl_reg, pp);
861 	intel_de_posting_read(dev_priv, pp_ctrl_reg);
862 
863 	wait_panel_off(intel_dp);
864 	intel_dp->pps.panel_power_off_time = ktime_get_boottime();
865 
866 	/* We got a reference when we enabled the VDD. */
867 	intel_display_power_put(dev_priv,
868 				intel_aux_power_domain(dig_port),
869 				fetch_and_zero(&intel_dp->pps.vdd_wakeref));
870 }
871 
872 void intel_pps_off(struct intel_dp *intel_dp)
873 {
874 	intel_wakeref_t wakeref;
875 
876 	if (!intel_dp_is_edp(intel_dp))
877 		return;
878 
879 	with_intel_pps_lock(intel_dp, wakeref)
880 		intel_pps_off_unlocked(intel_dp);
881 }
882 
883 /* Enable backlight in the panel power control. */
884 void intel_pps_backlight_on(struct intel_dp *intel_dp)
885 {
886 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
887 	intel_wakeref_t wakeref;
888 
889 	/*
890 	 * If we enable the backlight right away following a panel power
891 	 * on, we may see slight flicker as the panel syncs with the eDP
892 	 * link.  So delay a bit to make sure the image is solid before
893 	 * allowing it to appear.
894 	 */
895 	wait_backlight_on(intel_dp);
896 
897 	with_intel_pps_lock(intel_dp, wakeref) {
898 		i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
899 		u32 pp;
900 
901 		pp = ilk_get_pp_control(intel_dp);
902 		pp |= EDP_BLC_ENABLE;
903 
904 		intel_de_write(dev_priv, pp_ctrl_reg, pp);
905 		intel_de_posting_read(dev_priv, pp_ctrl_reg);
906 	}
907 }
908 
909 /* Disable backlight in the panel power control. */
910 void intel_pps_backlight_off(struct intel_dp *intel_dp)
911 {
912 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
913 	intel_wakeref_t wakeref;
914 
915 	if (!intel_dp_is_edp(intel_dp))
916 		return;
917 
918 	with_intel_pps_lock(intel_dp, wakeref) {
919 		i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
920 		u32 pp;
921 
922 		pp = ilk_get_pp_control(intel_dp);
923 		pp &= ~EDP_BLC_ENABLE;
924 
925 		intel_de_write(dev_priv, pp_ctrl_reg, pp);
926 		intel_de_posting_read(dev_priv, pp_ctrl_reg);
927 	}
928 
929 	intel_dp->pps.last_backlight_off = jiffies;
930 	edp_wait_backlight_off(intel_dp);
931 }
932 
933 /*
934  * Hook for controlling the panel power control backlight through the bl_power
935  * sysfs attribute. Take care to handle multiple calls.
936  */
937 void intel_pps_backlight_power(struct intel_connector *connector, bool enable)
938 {
939 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
940 	struct intel_dp *intel_dp = intel_attached_dp(connector);
941 	intel_wakeref_t wakeref;
942 	bool is_enabled;
943 
944 	is_enabled = false;
945 	with_intel_pps_lock(intel_dp, wakeref)
946 		is_enabled = ilk_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
947 	if (is_enabled == enable)
948 		return;
949 
950 	drm_dbg_kms(&i915->drm, "panel power control backlight %s\n",
951 		    enable ? "enable" : "disable");
952 
953 	if (enable)
954 		intel_pps_backlight_on(intel_dp);
955 	else
956 		intel_pps_backlight_off(intel_dp);
957 }
958 
959 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
960 {
961 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
962 	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
963 	enum pipe pipe = intel_dp->pps.pps_pipe;
964 	i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
965 
966 	drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE);
967 
968 	if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
969 		return;
970 
971 	intel_pps_vdd_off_sync_unlocked(intel_dp);
972 
973 	/*
974 	 * VLV seems to get confused when multiple power sequencers
975 	 * have the same port selected (even if only one has power/vdd
976 	 * enabled). The failure manifests as vlv_wait_port_ready() failing
977 	 * CHV on the other hand doesn't seem to mind having the same port
978 	 * selected in multiple power sequencers, but let's clear the
979 	 * port select always when logically disconnecting a power sequencer
980 	 * from a port.
981 	 */
982 	drm_dbg_kms(&dev_priv->drm,
983 		    "detaching pipe %c power sequencer from [ENCODER:%d:%s]\n",
984 		    pipe_name(pipe), dig_port->base.base.base.id,
985 		    dig_port->base.base.name);
986 	intel_de_write(dev_priv, pp_on_reg, 0);
987 	intel_de_posting_read(dev_priv, pp_on_reg);
988 
989 	intel_dp->pps.pps_pipe = INVALID_PIPE;
990 }
991 
992 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
993 				      enum pipe pipe)
994 {
995 	struct intel_encoder *encoder;
996 
997 	lockdep_assert_held(&dev_priv->display.pps.mutex);
998 
999 	for_each_intel_dp(&dev_priv->drm, encoder) {
1000 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1001 
1002 		drm_WARN(&dev_priv->drm, intel_dp->pps.active_pipe == pipe,
1003 			 "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n",
1004 			 pipe_name(pipe), encoder->base.base.id,
1005 			 encoder->base.name);
1006 
1007 		if (intel_dp->pps.pps_pipe != pipe)
1008 			continue;
1009 
1010 		drm_dbg_kms(&dev_priv->drm,
1011 			    "stealing pipe %c power sequencer from [ENCODER:%d:%s]\n",
1012 			    pipe_name(pipe), encoder->base.base.id,
1013 			    encoder->base.name);
1014 
1015 		/* make sure vdd is off before we steal it */
1016 		vlv_detach_power_sequencer(intel_dp);
1017 	}
1018 }
1019 
1020 void vlv_pps_init(struct intel_encoder *encoder,
1021 		  const struct intel_crtc_state *crtc_state)
1022 {
1023 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1024 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1025 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1026 
1027 	lockdep_assert_held(&dev_priv->display.pps.mutex);
1028 
1029 	drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE);
1030 
1031 	if (intel_dp->pps.pps_pipe != INVALID_PIPE &&
1032 	    intel_dp->pps.pps_pipe != crtc->pipe) {
1033 		/*
1034 		 * If another power sequencer was being used on this
1035 		 * port previously make sure to turn off vdd there while
1036 		 * we still have control of it.
1037 		 */
1038 		vlv_detach_power_sequencer(intel_dp);
1039 	}
1040 
1041 	/*
1042 	 * We may be stealing the power
1043 	 * sequencer from another port.
1044 	 */
1045 	vlv_steal_power_sequencer(dev_priv, crtc->pipe);
1046 
1047 	intel_dp->pps.active_pipe = crtc->pipe;
1048 
1049 	if (!intel_dp_is_edp(intel_dp))
1050 		return;
1051 
1052 	/* now it's all ours */
1053 	intel_dp->pps.pps_pipe = crtc->pipe;
1054 
1055 	drm_dbg_kms(&dev_priv->drm,
1056 		    "initializing pipe %c power sequencer for [ENCODER:%d:%s]\n",
1057 		    pipe_name(intel_dp->pps.pps_pipe), encoder->base.base.id,
1058 		    encoder->base.name);
1059 
1060 	/* init power sequencer on this pipe and port */
1061 	pps_init_delays(intel_dp);
1062 	pps_init_registers(intel_dp, true);
1063 }
1064 
1065 static void pps_vdd_init(struct intel_dp *intel_dp)
1066 {
1067 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1068 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1069 
1070 	lockdep_assert_held(&dev_priv->display.pps.mutex);
1071 
1072 	if (!edp_have_panel_vdd(intel_dp))
1073 		return;
1074 
1075 	/*
1076 	 * The VDD bit needs a power domain reference, so if the bit is
1077 	 * already enabled when we boot or resume, grab this reference and
1078 	 * schedule a vdd off, so we don't hold on to the reference
1079 	 * indefinitely.
1080 	 */
1081 	drm_dbg_kms(&dev_priv->drm,
1082 		    "VDD left on by BIOS, adjusting state tracking\n");
1083 	drm_WARN_ON(&dev_priv->drm, intel_dp->pps.vdd_wakeref);
1084 	intel_dp->pps.vdd_wakeref = intel_display_power_get(dev_priv,
1085 							    intel_aux_power_domain(dig_port));
1086 }
1087 
1088 bool intel_pps_have_panel_power_or_vdd(struct intel_dp *intel_dp)
1089 {
1090 	intel_wakeref_t wakeref;
1091 	bool have_power = false;
1092 
1093 	with_intel_pps_lock(intel_dp, wakeref) {
1094 		have_power = edp_have_panel_power(intel_dp) ||
1095 			     edp_have_panel_vdd(intel_dp);
1096 	}
1097 
1098 	return have_power;
1099 }
1100 
1101 static void pps_init_timestamps(struct intel_dp *intel_dp)
1102 {
1103 	/*
1104 	 * Initialize panel power off time to 0, assuming panel power could have
1105 	 * been toggled between kernel boot and now only by a previously loaded
1106 	 * and removed i915, which has already ensured sufficient power off
1107 	 * delay at module remove.
1108 	 */
1109 	intel_dp->pps.panel_power_off_time = 0;
1110 	intel_dp->pps.last_power_on = jiffies;
1111 	intel_dp->pps.last_backlight_off = jiffies;
1112 }
1113 
1114 static void
1115 intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
1116 {
1117 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1118 	u32 pp_on, pp_off, pp_ctl;
1119 	struct pps_registers regs;
1120 
1121 	intel_pps_get_registers(intel_dp, &regs);
1122 
1123 	pp_ctl = ilk_get_pp_control(intel_dp);
1124 
1125 	/* Ensure PPS is unlocked */
1126 	if (!HAS_DDI(dev_priv))
1127 		intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
1128 
1129 	pp_on = intel_de_read(dev_priv, regs.pp_on);
1130 	pp_off = intel_de_read(dev_priv, regs.pp_off);
1131 
1132 	/* Pull timing values out of registers */
1133 	seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
1134 	seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on);
1135 	seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off);
1136 	seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off);
1137 
1138 	if (i915_mmio_reg_valid(regs.pp_div)) {
1139 		u32 pp_div;
1140 
1141 		pp_div = intel_de_read(dev_priv, regs.pp_div);
1142 
1143 		seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000;
1144 	} else {
1145 		seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000;
1146 	}
1147 }
1148 
1149 static void
1150 intel_pps_dump_state(struct intel_dp *intel_dp, const char *state_name,
1151 		     const struct edp_power_seq *seq)
1152 {
1153 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1154 
1155 	drm_dbg_kms(&i915->drm, "%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
1156 		    state_name,
1157 		    seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
1158 }
1159 
1160 static void
1161 intel_pps_verify_state(struct intel_dp *intel_dp)
1162 {
1163 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1164 	struct edp_power_seq hw;
1165 	struct edp_power_seq *sw = &intel_dp->pps.pps_delays;
1166 
1167 	intel_pps_readout_hw_state(intel_dp, &hw);
1168 
1169 	if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
1170 	    hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
1171 		drm_err(&i915->drm, "PPS state mismatch\n");
1172 		intel_pps_dump_state(intel_dp, "sw", sw);
1173 		intel_pps_dump_state(intel_dp, "hw", &hw);
1174 	}
1175 }
1176 
1177 static bool pps_delays_valid(struct edp_power_seq *delays)
1178 {
1179 	return delays->t1_t3 || delays->t8 || delays->t9 ||
1180 		delays->t10 || delays->t11_t12;
1181 }
1182 
1183 static void pps_init_delays_bios(struct intel_dp *intel_dp,
1184 				 struct edp_power_seq *bios)
1185 {
1186 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1187 
1188 	lockdep_assert_held(&dev_priv->display.pps.mutex);
1189 
1190 	if (!pps_delays_valid(&intel_dp->pps.bios_pps_delays))
1191 		intel_pps_readout_hw_state(intel_dp, &intel_dp->pps.bios_pps_delays);
1192 
1193 	*bios = intel_dp->pps.bios_pps_delays;
1194 
1195 	intel_pps_dump_state(intel_dp, "bios", bios);
1196 }
1197 
1198 static void pps_init_delays_vbt(struct intel_dp *intel_dp,
1199 				struct edp_power_seq *vbt)
1200 {
1201 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1202 	struct intel_connector *connector = intel_dp->attached_connector;
1203 
1204 	*vbt = connector->panel.vbt.edp.pps;
1205 
1206 	if (!pps_delays_valid(vbt))
1207 		return;
1208 
1209 	/* On Toshiba Satellite P50-C-18C system the VBT T12 delay
1210 	 * of 500ms appears to be too short. Ocassionally the panel
1211 	 * just fails to power back on. Increasing the delay to 800ms
1212 	 * seems sufficient to avoid this problem.
1213 	 */
1214 	if (intel_has_quirk(dev_priv, QUIRK_INCREASE_T12_DELAY)) {
1215 		vbt->t11_t12 = max_t(u16, vbt->t11_t12, 1300 * 10);
1216 		drm_dbg_kms(&dev_priv->drm,
1217 			    "Increasing T12 panel delay as per the quirk to %d\n",
1218 			    vbt->t11_t12);
1219 	}
1220 
1221 	/* T11_T12 delay is special and actually in units of 100ms, but zero
1222 	 * based in the hw (so we need to add 100 ms). But the sw vbt
1223 	 * table multiplies it with 1000 to make it in units of 100usec,
1224 	 * too. */
1225 	vbt->t11_t12 += 100 * 10;
1226 
1227 	intel_pps_dump_state(intel_dp, "vbt", vbt);
1228 }
1229 
1230 static void pps_init_delays_spec(struct intel_dp *intel_dp,
1231 				 struct edp_power_seq *spec)
1232 {
1233 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1234 
1235 	lockdep_assert_held(&dev_priv->display.pps.mutex);
1236 
1237 	/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
1238 	 * our hw here, which are all in 100usec. */
1239 	spec->t1_t3 = 210 * 10;
1240 	spec->t8 = 50 * 10; /* no limit for t8, use t7 instead */
1241 	spec->t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
1242 	spec->t10 = 500 * 10;
1243 	/* This one is special and actually in units of 100ms, but zero
1244 	 * based in the hw (so we need to add 100 ms). But the sw vbt
1245 	 * table multiplies it with 1000 to make it in units of 100usec,
1246 	 * too. */
1247 	spec->t11_t12 = (510 + 100) * 10;
1248 
1249 	intel_pps_dump_state(intel_dp, "spec", spec);
1250 }
1251 
1252 static void pps_init_delays(struct intel_dp *intel_dp)
1253 {
1254 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1255 	struct edp_power_seq cur, vbt, spec,
1256 		*final = &intel_dp->pps.pps_delays;
1257 
1258 	lockdep_assert_held(&dev_priv->display.pps.mutex);
1259 
1260 	/* already initialized? */
1261 	if (pps_delays_valid(final))
1262 		return;
1263 
1264 	pps_init_delays_bios(intel_dp, &cur);
1265 	pps_init_delays_vbt(intel_dp, &vbt);
1266 	pps_init_delays_spec(intel_dp, &spec);
1267 
1268 	/* Use the max of the register settings and vbt. If both are
1269 	 * unset, fall back to the spec limits. */
1270 #define assign_final(field)	final->field = (max(cur.field, vbt.field) == 0 ? \
1271 				       spec.field : \
1272 				       max(cur.field, vbt.field))
1273 	assign_final(t1_t3);
1274 	assign_final(t8);
1275 	assign_final(t9);
1276 	assign_final(t10);
1277 	assign_final(t11_t12);
1278 #undef assign_final
1279 
1280 #define get_delay(field)	(DIV_ROUND_UP(final->field, 10))
1281 	intel_dp->pps.panel_power_up_delay = get_delay(t1_t3);
1282 	intel_dp->pps.backlight_on_delay = get_delay(t8);
1283 	intel_dp->pps.backlight_off_delay = get_delay(t9);
1284 	intel_dp->pps.panel_power_down_delay = get_delay(t10);
1285 	intel_dp->pps.panel_power_cycle_delay = get_delay(t11_t12);
1286 #undef get_delay
1287 
1288 	drm_dbg_kms(&dev_priv->drm,
1289 		    "panel power up delay %d, power down delay %d, power cycle delay %d\n",
1290 		    intel_dp->pps.panel_power_up_delay,
1291 		    intel_dp->pps.panel_power_down_delay,
1292 		    intel_dp->pps.panel_power_cycle_delay);
1293 
1294 	drm_dbg_kms(&dev_priv->drm, "backlight on delay %d, off delay %d\n",
1295 		    intel_dp->pps.backlight_on_delay,
1296 		    intel_dp->pps.backlight_off_delay);
1297 
1298 	/*
1299 	 * We override the HW backlight delays to 1 because we do manual waits
1300 	 * on them. For T8, even BSpec recommends doing it. For T9, if we
1301 	 * don't do this, we'll end up waiting for the backlight off delay
1302 	 * twice: once when we do the manual sleep, and once when we disable
1303 	 * the panel and wait for the PP_STATUS bit to become zero.
1304 	 */
1305 	final->t8 = 1;
1306 	final->t9 = 1;
1307 
1308 	/*
1309 	 * HW has only a 100msec granularity for t11_t12 so round it up
1310 	 * accordingly.
1311 	 */
1312 	final->t11_t12 = roundup(final->t11_t12, 100 * 10);
1313 }
1314 
1315 static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd)
1316 {
1317 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1318 	u32 pp_on, pp_off, port_sel = 0;
1319 	int div = RUNTIME_INFO(dev_priv)->rawclk_freq / 1000;
1320 	struct pps_registers regs;
1321 	enum port port = dp_to_dig_port(intel_dp)->base.port;
1322 	const struct edp_power_seq *seq = &intel_dp->pps.pps_delays;
1323 
1324 	lockdep_assert_held(&dev_priv->display.pps.mutex);
1325 
1326 	intel_pps_get_registers(intel_dp, &regs);
1327 
1328 	/*
1329 	 * On some VLV machines the BIOS can leave the VDD
1330 	 * enabled even on power sequencers which aren't
1331 	 * hooked up to any port. This would mess up the
1332 	 * power domain tracking the first time we pick
1333 	 * one of these power sequencers for use since
1334 	 * intel_pps_vdd_on_unlocked() would notice that the VDD was
1335 	 * already on and therefore wouldn't grab the power
1336 	 * domain reference. Disable VDD first to avoid this.
1337 	 * This also avoids spuriously turning the VDD on as
1338 	 * soon as the new power sequencer gets initialized.
1339 	 */
1340 	if (force_disable_vdd) {
1341 		u32 pp = ilk_get_pp_control(intel_dp);
1342 
1343 		drm_WARN(&dev_priv->drm, pp & PANEL_POWER_ON,
1344 			 "Panel power already on\n");
1345 
1346 		if (pp & EDP_FORCE_VDD)
1347 			drm_dbg_kms(&dev_priv->drm,
1348 				    "VDD already on, disabling first\n");
1349 
1350 		pp &= ~EDP_FORCE_VDD;
1351 
1352 		intel_de_write(dev_priv, regs.pp_ctrl, pp);
1353 	}
1354 
1355 	pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) |
1356 		REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8);
1357 	pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) |
1358 		REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10);
1359 
1360 	/* Haswell doesn't have any port selection bits for the panel
1361 	 * power sequencer any more. */
1362 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1363 		port_sel = PANEL_PORT_SELECT_VLV(port);
1364 	} else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
1365 		switch (port) {
1366 		case PORT_A:
1367 			port_sel = PANEL_PORT_SELECT_DPA;
1368 			break;
1369 		case PORT_C:
1370 			port_sel = PANEL_PORT_SELECT_DPC;
1371 			break;
1372 		case PORT_D:
1373 			port_sel = PANEL_PORT_SELECT_DPD;
1374 			break;
1375 		default:
1376 			MISSING_CASE(port);
1377 			break;
1378 		}
1379 	}
1380 
1381 	pp_on |= port_sel;
1382 
1383 	intel_de_write(dev_priv, regs.pp_on, pp_on);
1384 	intel_de_write(dev_priv, regs.pp_off, pp_off);
1385 
1386 	/*
1387 	 * Compute the divisor for the pp clock, simply match the Bspec formula.
1388 	 */
1389 	if (i915_mmio_reg_valid(regs.pp_div)) {
1390 		intel_de_write(dev_priv, regs.pp_div,
1391 			       REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
1392 	} else {
1393 		u32 pp_ctl;
1394 
1395 		pp_ctl = intel_de_read(dev_priv, regs.pp_ctrl);
1396 		pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK;
1397 		pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000));
1398 		intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
1399 	}
1400 
1401 	drm_dbg_kms(&dev_priv->drm,
1402 		    "panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
1403 		    intel_de_read(dev_priv, regs.pp_on),
1404 		    intel_de_read(dev_priv, regs.pp_off),
1405 		    i915_mmio_reg_valid(regs.pp_div) ?
1406 		    intel_de_read(dev_priv, regs.pp_div) :
1407 		    (intel_de_read(dev_priv, regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
1408 }
1409 
1410 void intel_pps_encoder_reset(struct intel_dp *intel_dp)
1411 {
1412 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1413 	intel_wakeref_t wakeref;
1414 
1415 	if (!intel_dp_is_edp(intel_dp))
1416 		return;
1417 
1418 	with_intel_pps_lock(intel_dp, wakeref) {
1419 		/*
1420 		 * Reinit the power sequencer also on the resume path, in case
1421 		 * BIOS did something nasty with it.
1422 		 */
1423 		if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
1424 			vlv_initial_power_sequencer_setup(intel_dp);
1425 
1426 		pps_init_delays(intel_dp);
1427 		pps_init_registers(intel_dp, false);
1428 		pps_vdd_init(intel_dp);
1429 
1430 		if (edp_have_panel_vdd(intel_dp))
1431 			edp_panel_vdd_schedule_off(intel_dp);
1432 	}
1433 }
1434 
1435 void intel_pps_init(struct intel_dp *intel_dp)
1436 {
1437 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1438 	intel_wakeref_t wakeref;
1439 
1440 	intel_dp->pps.initializing = true;
1441 	INIT_DELAYED_WORK(&intel_dp->pps.panel_vdd_work, edp_panel_vdd_work);
1442 
1443 	pps_init_timestamps(intel_dp);
1444 
1445 	with_intel_pps_lock(intel_dp, wakeref) {
1446 		if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
1447 			vlv_initial_power_sequencer_setup(intel_dp);
1448 
1449 		pps_init_delays(intel_dp);
1450 		pps_init_registers(intel_dp, false);
1451 		pps_vdd_init(intel_dp);
1452 	}
1453 }
1454 
1455 void intel_pps_init_late(struct intel_dp *intel_dp)
1456 {
1457 	intel_wakeref_t wakeref;
1458 
1459 	with_intel_pps_lock(intel_dp, wakeref) {
1460 		/* Reinit delays after per-panel info has been parsed from VBT */
1461 		memset(&intel_dp->pps.pps_delays, 0, sizeof(intel_dp->pps.pps_delays));
1462 		pps_init_delays(intel_dp);
1463 		pps_init_registers(intel_dp, false);
1464 
1465 		intel_dp->pps.initializing = false;
1466 
1467 		if (edp_have_panel_vdd(intel_dp))
1468 			edp_panel_vdd_schedule_off(intel_dp);
1469 	}
1470 }
1471 
1472 void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
1473 {
1474 	int pps_num;
1475 	int pps_idx;
1476 
1477 	if (!HAS_DISPLAY(dev_priv) || HAS_DDI(dev_priv))
1478 		return;
1479 	/*
1480 	 * This w/a is needed at least on CPT/PPT, but to be sure apply it
1481 	 * everywhere where registers can be write protected.
1482 	 */
1483 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1484 		pps_num = 2;
1485 	else
1486 		pps_num = 1;
1487 
1488 	for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
1489 		u32 val = intel_de_read(dev_priv, PP_CONTROL(pps_idx));
1490 
1491 		val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
1492 		intel_de_write(dev_priv, PP_CONTROL(pps_idx), val);
1493 	}
1494 }
1495 
1496 void intel_pps_setup(struct drm_i915_private *i915)
1497 {
1498 	if (HAS_PCH_SPLIT(i915) || IS_GEMINILAKE(i915) || IS_BROXTON(i915))
1499 		i915->display.pps.mmio_base = PCH_PPS_BASE;
1500 	else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
1501 		i915->display.pps.mmio_base = VLV_PPS_BASE;
1502 	else
1503 		i915->display.pps.mmio_base = PPS_BASE;
1504 }
1505 
1506 void assert_pps_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
1507 {
1508 	i915_reg_t pp_reg;
1509 	u32 val;
1510 	enum pipe panel_pipe = INVALID_PIPE;
1511 	bool locked = true;
1512 
1513 	if (drm_WARN_ON(&dev_priv->drm, HAS_DDI(dev_priv)))
1514 		return;
1515 
1516 	if (HAS_PCH_SPLIT(dev_priv)) {
1517 		u32 port_sel;
1518 
1519 		pp_reg = PP_CONTROL(0);
1520 		port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1521 
1522 		switch (port_sel) {
1523 		case PANEL_PORT_SELECT_LVDS:
1524 			intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
1525 			break;
1526 		case PANEL_PORT_SELECT_DPA:
1527 			g4x_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
1528 			break;
1529 		case PANEL_PORT_SELECT_DPC:
1530 			g4x_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
1531 			break;
1532 		case PANEL_PORT_SELECT_DPD:
1533 			g4x_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
1534 			break;
1535 		default:
1536 			MISSING_CASE(port_sel);
1537 			break;
1538 		}
1539 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1540 		/* presumably write lock depends on pipe, not port select */
1541 		pp_reg = PP_CONTROL(pipe);
1542 		panel_pipe = pipe;
1543 	} else {
1544 		u32 port_sel;
1545 
1546 		pp_reg = PP_CONTROL(0);
1547 		port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1548 
1549 		drm_WARN_ON(&dev_priv->drm,
1550 			    port_sel != PANEL_PORT_SELECT_LVDS);
1551 		intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
1552 	}
1553 
1554 	val = intel_de_read(dev_priv, pp_reg);
1555 	if (!(val & PANEL_POWER_ON) ||
1556 	    ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1557 		locked = false;
1558 
1559 	I915_STATE_WARN(panel_pipe == pipe && locked,
1560 			"panel assertion failure, pipe %c regs locked\n",
1561 			pipe_name(pipe));
1562 }
1563