xref: /openbmc/linux/drivers/gpu/drm/i915/display/intel_pmdemand.c (revision 9f771739a04919226081a107167596de75108fbb)
1*4c4cc7acSMika Kahola // SPDX-License-Identifier: MIT
2*4c4cc7acSMika Kahola /*
3*4c4cc7acSMika Kahola  * Copyright © 2023 Intel Corporation
4*4c4cc7acSMika Kahola  */
5*4c4cc7acSMika Kahola 
6*4c4cc7acSMika Kahola #include <linux/bitops.h>
7*4c4cc7acSMika Kahola 
8*4c4cc7acSMika Kahola #include "i915_drv.h"
9*4c4cc7acSMika Kahola #include "i915_reg.h"
10*4c4cc7acSMika Kahola #include "intel_atomic.h"
11*4c4cc7acSMika Kahola #include "intel_bw.h"
12*4c4cc7acSMika Kahola #include "intel_cdclk.h"
13*4c4cc7acSMika Kahola #include "intel_de.h"
14*4c4cc7acSMika Kahola #include "intel_display_trace.h"
15*4c4cc7acSMika Kahola #include "intel_pmdemand.h"
16*4c4cc7acSMika Kahola #include "skl_watermark.h"
17*4c4cc7acSMika Kahola 
18*4c4cc7acSMika Kahola static struct intel_global_state *
intel_pmdemand_duplicate_state(struct intel_global_obj * obj)19*4c4cc7acSMika Kahola intel_pmdemand_duplicate_state(struct intel_global_obj *obj)
20*4c4cc7acSMika Kahola {
21*4c4cc7acSMika Kahola 	struct intel_pmdemand_state *pmdemand_state;
22*4c4cc7acSMika Kahola 
23*4c4cc7acSMika Kahola 	pmdemand_state = kmemdup(obj->state, sizeof(*pmdemand_state), GFP_KERNEL);
24*4c4cc7acSMika Kahola 	if (!pmdemand_state)
25*4c4cc7acSMika Kahola 		return NULL;
26*4c4cc7acSMika Kahola 
27*4c4cc7acSMika Kahola 	return &pmdemand_state->base;
28*4c4cc7acSMika Kahola }
29*4c4cc7acSMika Kahola 
intel_pmdemand_destroy_state(struct intel_global_obj * obj,struct intel_global_state * state)30*4c4cc7acSMika Kahola static void intel_pmdemand_destroy_state(struct intel_global_obj *obj,
31*4c4cc7acSMika Kahola 					 struct intel_global_state *state)
32*4c4cc7acSMika Kahola {
33*4c4cc7acSMika Kahola 	kfree(state);
34*4c4cc7acSMika Kahola }
35*4c4cc7acSMika Kahola 
36*4c4cc7acSMika Kahola static const struct intel_global_state_funcs intel_pmdemand_funcs = {
37*4c4cc7acSMika Kahola 	.atomic_duplicate_state = intel_pmdemand_duplicate_state,
38*4c4cc7acSMika Kahola 	.atomic_destroy_state = intel_pmdemand_destroy_state,
39*4c4cc7acSMika Kahola };
40*4c4cc7acSMika Kahola 
41*4c4cc7acSMika Kahola static struct intel_pmdemand_state *
intel_atomic_get_pmdemand_state(struct intel_atomic_state * state)42*4c4cc7acSMika Kahola intel_atomic_get_pmdemand_state(struct intel_atomic_state *state)
43*4c4cc7acSMika Kahola {
44*4c4cc7acSMika Kahola 	struct drm_i915_private *i915 = to_i915(state->base.dev);
45*4c4cc7acSMika Kahola 	struct intel_global_state *pmdemand_state =
46*4c4cc7acSMika Kahola 		intel_atomic_get_global_obj_state(state,
47*4c4cc7acSMika Kahola 						  &i915->display.pmdemand.obj);
48*4c4cc7acSMika Kahola 
49*4c4cc7acSMika Kahola 	if (IS_ERR(pmdemand_state))
50*4c4cc7acSMika Kahola 		return ERR_CAST(pmdemand_state);
51*4c4cc7acSMika Kahola 
52*4c4cc7acSMika Kahola 	return to_intel_pmdemand_state(pmdemand_state);
53*4c4cc7acSMika Kahola }
54*4c4cc7acSMika Kahola 
55*4c4cc7acSMika Kahola static struct intel_pmdemand_state *
intel_atomic_get_old_pmdemand_state(struct intel_atomic_state * state)56*4c4cc7acSMika Kahola intel_atomic_get_old_pmdemand_state(struct intel_atomic_state *state)
57*4c4cc7acSMika Kahola {
58*4c4cc7acSMika Kahola 	struct drm_i915_private *i915 = to_i915(state->base.dev);
59*4c4cc7acSMika Kahola 	struct intel_global_state *pmdemand_state =
60*4c4cc7acSMika Kahola 		intel_atomic_get_old_global_obj_state(state,
61*4c4cc7acSMika Kahola 						      &i915->display.pmdemand.obj);
62*4c4cc7acSMika Kahola 
63*4c4cc7acSMika Kahola 	if (!pmdemand_state)
64*4c4cc7acSMika Kahola 		return NULL;
65*4c4cc7acSMika Kahola 
66*4c4cc7acSMika Kahola 	return to_intel_pmdemand_state(pmdemand_state);
67*4c4cc7acSMika Kahola }
68*4c4cc7acSMika Kahola 
69*4c4cc7acSMika Kahola static struct intel_pmdemand_state *
intel_atomic_get_new_pmdemand_state(struct intel_atomic_state * state)70*4c4cc7acSMika Kahola intel_atomic_get_new_pmdemand_state(struct intel_atomic_state *state)
71*4c4cc7acSMika Kahola {
72*4c4cc7acSMika Kahola 	struct drm_i915_private *i915 = to_i915(state->base.dev);
73*4c4cc7acSMika Kahola 	struct intel_global_state *pmdemand_state =
74*4c4cc7acSMika Kahola 		intel_atomic_get_new_global_obj_state(state,
75*4c4cc7acSMika Kahola 						      &i915->display.pmdemand.obj);
76*4c4cc7acSMika Kahola 
77*4c4cc7acSMika Kahola 	if (!pmdemand_state)
78*4c4cc7acSMika Kahola 		return NULL;
79*4c4cc7acSMika Kahola 
80*4c4cc7acSMika Kahola 	return to_intel_pmdemand_state(pmdemand_state);
81*4c4cc7acSMika Kahola }
82*4c4cc7acSMika Kahola 
intel_pmdemand_init(struct drm_i915_private * i915)83*4c4cc7acSMika Kahola int intel_pmdemand_init(struct drm_i915_private *i915)
84*4c4cc7acSMika Kahola {
85*4c4cc7acSMika Kahola 	struct intel_pmdemand_state *pmdemand_state;
86*4c4cc7acSMika Kahola 
87*4c4cc7acSMika Kahola 	pmdemand_state = kzalloc(sizeof(*pmdemand_state), GFP_KERNEL);
88*4c4cc7acSMika Kahola 	if (!pmdemand_state)
89*4c4cc7acSMika Kahola 		return -ENOMEM;
90*4c4cc7acSMika Kahola 
91*4c4cc7acSMika Kahola 	intel_atomic_global_obj_init(i915, &i915->display.pmdemand.obj,
92*4c4cc7acSMika Kahola 				     &pmdemand_state->base,
93*4c4cc7acSMika Kahola 				     &intel_pmdemand_funcs);
94*4c4cc7acSMika Kahola 
95*4c4cc7acSMika Kahola 	if (IS_MTL_DISPLAY_STEP(i915, STEP_A0, STEP_C0))
96*4c4cc7acSMika Kahola 		/* Wa_14016740474 */
97*4c4cc7acSMika Kahola 		intel_de_rmw(i915, XELPD_CHICKEN_DCPR_3, 0, DMD_RSP_TIMEOUT_DISABLE);
98*4c4cc7acSMika Kahola 
99*4c4cc7acSMika Kahola 	return 0;
100*4c4cc7acSMika Kahola }
101*4c4cc7acSMika Kahola 
intel_pmdemand_init_early(struct drm_i915_private * i915)102*4c4cc7acSMika Kahola void intel_pmdemand_init_early(struct drm_i915_private *i915)
103*4c4cc7acSMika Kahola {
104*4c4cc7acSMika Kahola 	mutex_init(&i915->display.pmdemand.lock);
105*4c4cc7acSMika Kahola 	init_waitqueue_head(&i915->display.pmdemand.waitqueue);
106*4c4cc7acSMika Kahola }
107*4c4cc7acSMika Kahola 
108*4c4cc7acSMika Kahola void
intel_pmdemand_update_phys_mask(struct drm_i915_private * i915,struct intel_encoder * encoder,struct intel_pmdemand_state * pmdemand_state,bool set_bit)109*4c4cc7acSMika Kahola intel_pmdemand_update_phys_mask(struct drm_i915_private *i915,
110*4c4cc7acSMika Kahola 				struct intel_encoder *encoder,
111*4c4cc7acSMika Kahola 				struct intel_pmdemand_state *pmdemand_state,
112*4c4cc7acSMika Kahola 				bool set_bit)
113*4c4cc7acSMika Kahola {
114*4c4cc7acSMika Kahola 	enum phy phy;
115*4c4cc7acSMika Kahola 
116*4c4cc7acSMika Kahola 	if (DISPLAY_VER(i915) < 14)
117*4c4cc7acSMika Kahola 		return;
118*4c4cc7acSMika Kahola 
119*4c4cc7acSMika Kahola 	if (!encoder)
120*4c4cc7acSMika Kahola 		return;
121*4c4cc7acSMika Kahola 
122*4c4cc7acSMika Kahola 	phy = intel_port_to_phy(i915, encoder->port);
123*4c4cc7acSMika Kahola 	if (intel_phy_is_tc(i915, phy))
124*4c4cc7acSMika Kahola 		return;
125*4c4cc7acSMika Kahola 
126*4c4cc7acSMika Kahola 	if (set_bit)
127*4c4cc7acSMika Kahola 		pmdemand_state->active_combo_phys_mask |= BIT(phy);
128*4c4cc7acSMika Kahola 	else
129*4c4cc7acSMika Kahola 		pmdemand_state->active_combo_phys_mask &= ~BIT(phy);
130*4c4cc7acSMika Kahola }
131*4c4cc7acSMika Kahola 
132*4c4cc7acSMika Kahola void
intel_pmdemand_update_port_clock(struct drm_i915_private * i915,struct intel_pmdemand_state * pmdemand_state,enum pipe pipe,int port_clock)133*4c4cc7acSMika Kahola intel_pmdemand_update_port_clock(struct drm_i915_private *i915,
134*4c4cc7acSMika Kahola 				 struct intel_pmdemand_state *pmdemand_state,
135*4c4cc7acSMika Kahola 				 enum pipe pipe, int port_clock)
136*4c4cc7acSMika Kahola {
137*4c4cc7acSMika Kahola 	if (DISPLAY_VER(i915) < 14)
138*4c4cc7acSMika Kahola 		return;
139*4c4cc7acSMika Kahola 
140*4c4cc7acSMika Kahola 	pmdemand_state->ddi_clocks[pipe] = port_clock;
141*4c4cc7acSMika Kahola }
142*4c4cc7acSMika Kahola 
143*4c4cc7acSMika Kahola static void
intel_pmdemand_update_max_ddiclk(struct drm_i915_private * i915,struct intel_atomic_state * state,struct intel_pmdemand_state * pmdemand_state)144*4c4cc7acSMika Kahola intel_pmdemand_update_max_ddiclk(struct drm_i915_private *i915,
145*4c4cc7acSMika Kahola 				 struct intel_atomic_state *state,
146*4c4cc7acSMika Kahola 				 struct intel_pmdemand_state *pmdemand_state)
147*4c4cc7acSMika Kahola {
148*4c4cc7acSMika Kahola 	int max_ddiclk = 0;
149*4c4cc7acSMika Kahola 	const struct intel_crtc_state *new_crtc_state;
150*4c4cc7acSMika Kahola 	struct intel_crtc *crtc;
151*4c4cc7acSMika Kahola 	int i;
152*4c4cc7acSMika Kahola 
153*4c4cc7acSMika Kahola 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
154*4c4cc7acSMika Kahola 		intel_pmdemand_update_port_clock(i915, pmdemand_state,
155*4c4cc7acSMika Kahola 						 crtc->pipe,
156*4c4cc7acSMika Kahola 						 new_crtc_state->port_clock);
157*4c4cc7acSMika Kahola 
158*4c4cc7acSMika Kahola 	for (i = 0; i < ARRAY_SIZE(pmdemand_state->ddi_clocks); i++)
159*4c4cc7acSMika Kahola 		max_ddiclk = max(pmdemand_state->ddi_clocks[i], max_ddiclk);
160*4c4cc7acSMika Kahola 
161*4c4cc7acSMika Kahola 	pmdemand_state->params.ddiclk_max = DIV_ROUND_UP(max_ddiclk, 1000);
162*4c4cc7acSMika Kahola }
163*4c4cc7acSMika Kahola 
164*4c4cc7acSMika Kahola static void
intel_pmdemand_update_connector_phys(struct drm_i915_private * i915,struct intel_atomic_state * state,struct drm_connector_state * conn_state,bool set_bit,struct intel_pmdemand_state * pmdemand_state)165*4c4cc7acSMika Kahola intel_pmdemand_update_connector_phys(struct drm_i915_private *i915,
166*4c4cc7acSMika Kahola 				     struct intel_atomic_state *state,
167*4c4cc7acSMika Kahola 				     struct drm_connector_state *conn_state,
168*4c4cc7acSMika Kahola 				     bool set_bit,
169*4c4cc7acSMika Kahola 				     struct intel_pmdemand_state *pmdemand_state)
170*4c4cc7acSMika Kahola {
171*4c4cc7acSMika Kahola 	struct intel_encoder *encoder = to_intel_encoder(conn_state->best_encoder);
172*4c4cc7acSMika Kahola 	struct intel_crtc *crtc = to_intel_crtc(conn_state->crtc);
173*4c4cc7acSMika Kahola 	struct intel_crtc_state *crtc_state;
174*4c4cc7acSMika Kahola 
175*4c4cc7acSMika Kahola 	if (!crtc)
176*4c4cc7acSMika Kahola 		return;
177*4c4cc7acSMika Kahola 
178*4c4cc7acSMika Kahola 	if (set_bit)
179*4c4cc7acSMika Kahola 		crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
180*4c4cc7acSMika Kahola 	else
181*4c4cc7acSMika Kahola 		crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
182*4c4cc7acSMika Kahola 
183*4c4cc7acSMika Kahola 	if (!crtc_state->hw.active)
184*4c4cc7acSMika Kahola 		return;
185*4c4cc7acSMika Kahola 
186*4c4cc7acSMika Kahola 	intel_pmdemand_update_phys_mask(i915, encoder, pmdemand_state,
187*4c4cc7acSMika Kahola 					set_bit);
188*4c4cc7acSMika Kahola }
189*4c4cc7acSMika Kahola 
190*4c4cc7acSMika Kahola static void
intel_pmdemand_update_active_non_tc_phys(struct drm_i915_private * i915,struct intel_atomic_state * state,struct intel_pmdemand_state * pmdemand_state)191*4c4cc7acSMika Kahola intel_pmdemand_update_active_non_tc_phys(struct drm_i915_private *i915,
192*4c4cc7acSMika Kahola 					 struct intel_atomic_state *state,
193*4c4cc7acSMika Kahola 					 struct intel_pmdemand_state *pmdemand_state)
194*4c4cc7acSMika Kahola {
195*4c4cc7acSMika Kahola 	struct drm_connector_state *old_conn_state;
196*4c4cc7acSMika Kahola 	struct drm_connector_state *new_conn_state;
197*4c4cc7acSMika Kahola 	struct drm_connector *connector;
198*4c4cc7acSMika Kahola 	int i;
199*4c4cc7acSMika Kahola 
200*4c4cc7acSMika Kahola 	for_each_oldnew_connector_in_state(&state->base, connector,
201*4c4cc7acSMika Kahola 					   old_conn_state, new_conn_state, i) {
202*4c4cc7acSMika Kahola 		if (!intel_connector_needs_modeset(state, connector))
203*4c4cc7acSMika Kahola 			continue;
204*4c4cc7acSMika Kahola 
205*4c4cc7acSMika Kahola 		/* First clear the active phys in the old connector state */
206*4c4cc7acSMika Kahola 		intel_pmdemand_update_connector_phys(i915, state,
207*4c4cc7acSMika Kahola 						     old_conn_state, false,
208*4c4cc7acSMika Kahola 						     pmdemand_state);
209*4c4cc7acSMika Kahola 
210*4c4cc7acSMika Kahola 		/* Then set the active phys in new connector state */
211*4c4cc7acSMika Kahola 		intel_pmdemand_update_connector_phys(i915, state,
212*4c4cc7acSMika Kahola 						     new_conn_state, true,
213*4c4cc7acSMika Kahola 						     pmdemand_state);
214*4c4cc7acSMika Kahola 	}
215*4c4cc7acSMika Kahola 
216*4c4cc7acSMika Kahola 	pmdemand_state->params.active_phys =
217*4c4cc7acSMika Kahola 		min_t(u16, hweight16(pmdemand_state->active_combo_phys_mask),
218*4c4cc7acSMika Kahola 		      7);
219*4c4cc7acSMika Kahola }
220*4c4cc7acSMika Kahola 
221*4c4cc7acSMika Kahola static bool
intel_pmdemand_encoder_has_tc_phy(struct drm_i915_private * i915,struct intel_encoder * encoder)222*4c4cc7acSMika Kahola intel_pmdemand_encoder_has_tc_phy(struct drm_i915_private *i915,
223*4c4cc7acSMika Kahola 				  struct intel_encoder *encoder)
224*4c4cc7acSMika Kahola {
225*4c4cc7acSMika Kahola 	enum phy phy;
226*4c4cc7acSMika Kahola 
227*4c4cc7acSMika Kahola 	if (!encoder)
228*4c4cc7acSMika Kahola 		return false;
229*4c4cc7acSMika Kahola 
230*4c4cc7acSMika Kahola 	phy = intel_port_to_phy(i915, encoder->port);
231*4c4cc7acSMika Kahola 
232*4c4cc7acSMika Kahola 	return intel_phy_is_tc(i915, phy);
233*4c4cc7acSMika Kahola }
234*4c4cc7acSMika Kahola 
235*4c4cc7acSMika Kahola static bool
intel_pmdemand_connector_needs_update(struct intel_atomic_state * state)236*4c4cc7acSMika Kahola intel_pmdemand_connector_needs_update(struct intel_atomic_state *state)
237*4c4cc7acSMika Kahola {
238*4c4cc7acSMika Kahola 	struct drm_i915_private *i915 = to_i915(state->base.dev);
239*4c4cc7acSMika Kahola 	struct drm_connector_state *old_conn_state;
240*4c4cc7acSMika Kahola 	struct drm_connector_state *new_conn_state;
241*4c4cc7acSMika Kahola 	struct drm_connector *connector;
242*4c4cc7acSMika Kahola 	int i;
243*4c4cc7acSMika Kahola 
244*4c4cc7acSMika Kahola 	for_each_oldnew_connector_in_state(&state->base, connector,
245*4c4cc7acSMika Kahola 					   old_conn_state, new_conn_state, i) {
246*4c4cc7acSMika Kahola 		struct intel_encoder *old_encoder =
247*4c4cc7acSMika Kahola 			to_intel_encoder(old_conn_state->best_encoder);
248*4c4cc7acSMika Kahola 		struct intel_encoder *new_encoder =
249*4c4cc7acSMika Kahola 			to_intel_encoder(new_conn_state->best_encoder);
250*4c4cc7acSMika Kahola 
251*4c4cc7acSMika Kahola 		if (!intel_connector_needs_modeset(state, connector))
252*4c4cc7acSMika Kahola 			continue;
253*4c4cc7acSMika Kahola 
254*4c4cc7acSMika Kahola 		if (old_encoder == new_encoder ||
255*4c4cc7acSMika Kahola 		    (intel_pmdemand_encoder_has_tc_phy(i915, old_encoder) &&
256*4c4cc7acSMika Kahola 		     intel_pmdemand_encoder_has_tc_phy(i915, new_encoder)))
257*4c4cc7acSMika Kahola 			continue;
258*4c4cc7acSMika Kahola 
259*4c4cc7acSMika Kahola 		return true;
260*4c4cc7acSMika Kahola 	}
261*4c4cc7acSMika Kahola 
262*4c4cc7acSMika Kahola 	return false;
263*4c4cc7acSMika Kahola }
264*4c4cc7acSMika Kahola 
intel_pmdemand_needs_update(struct intel_atomic_state * state)265*4c4cc7acSMika Kahola static bool intel_pmdemand_needs_update(struct intel_atomic_state *state)
266*4c4cc7acSMika Kahola {
267*4c4cc7acSMika Kahola 	const struct intel_bw_state *new_bw_state, *old_bw_state;
268*4c4cc7acSMika Kahola 	const struct intel_cdclk_state *new_cdclk_state, *old_cdclk_state;
269*4c4cc7acSMika Kahola 	const struct intel_crtc_state *new_crtc_state, *old_crtc_state;
270*4c4cc7acSMika Kahola 	const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state;
271*4c4cc7acSMika Kahola 	struct intel_crtc *crtc;
272*4c4cc7acSMika Kahola 	int i;
273*4c4cc7acSMika Kahola 
274*4c4cc7acSMika Kahola 	new_bw_state = intel_atomic_get_new_bw_state(state);
275*4c4cc7acSMika Kahola 	old_bw_state = intel_atomic_get_old_bw_state(state);
276*4c4cc7acSMika Kahola 	if (new_bw_state && new_bw_state->qgv_point_peakbw !=
277*4c4cc7acSMika Kahola 	    old_bw_state->qgv_point_peakbw)
278*4c4cc7acSMika Kahola 		return true;
279*4c4cc7acSMika Kahola 
280*4c4cc7acSMika Kahola 	new_dbuf_state = intel_atomic_get_new_dbuf_state(state);
281*4c4cc7acSMika Kahola 	old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
282*4c4cc7acSMika Kahola 	if (new_dbuf_state &&
283*4c4cc7acSMika Kahola 	    (new_dbuf_state->active_pipes !=
284*4c4cc7acSMika Kahola 	     old_dbuf_state->active_pipes ||
285*4c4cc7acSMika Kahola 	     new_dbuf_state->enabled_slices !=
286*4c4cc7acSMika Kahola 	     old_dbuf_state->enabled_slices))
287*4c4cc7acSMika Kahola 		return true;
288*4c4cc7acSMika Kahola 
289*4c4cc7acSMika Kahola 	new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
290*4c4cc7acSMika Kahola 	old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
291*4c4cc7acSMika Kahola 	if (new_cdclk_state &&
292*4c4cc7acSMika Kahola 	    (new_cdclk_state->actual.cdclk !=
293*4c4cc7acSMika Kahola 	     old_cdclk_state->actual.cdclk ||
294*4c4cc7acSMika Kahola 	     new_cdclk_state->actual.voltage_level !=
295*4c4cc7acSMika Kahola 	     old_cdclk_state->actual.voltage_level))
296*4c4cc7acSMika Kahola 		return true;
297*4c4cc7acSMika Kahola 
298*4c4cc7acSMika Kahola 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
299*4c4cc7acSMika Kahola 					    new_crtc_state, i)
300*4c4cc7acSMika Kahola 		if (new_crtc_state->port_clock != old_crtc_state->port_clock)
301*4c4cc7acSMika Kahola 			return true;
302*4c4cc7acSMika Kahola 
303*4c4cc7acSMika Kahola 	return intel_pmdemand_connector_needs_update(state);
304*4c4cc7acSMika Kahola }
305*4c4cc7acSMika Kahola 
intel_pmdemand_atomic_check(struct intel_atomic_state * state)306*4c4cc7acSMika Kahola int intel_pmdemand_atomic_check(struct intel_atomic_state *state)
307*4c4cc7acSMika Kahola {
308*4c4cc7acSMika Kahola 	struct drm_i915_private *i915 = to_i915(state->base.dev);
309*4c4cc7acSMika Kahola 	const struct intel_bw_state *new_bw_state;
310*4c4cc7acSMika Kahola 	const struct intel_cdclk_state *new_cdclk_state;
311*4c4cc7acSMika Kahola 	const struct intel_dbuf_state *new_dbuf_state;
312*4c4cc7acSMika Kahola 	struct intel_pmdemand_state *new_pmdemand_state;
313*4c4cc7acSMika Kahola 
314*4c4cc7acSMika Kahola 	if (DISPLAY_VER(i915) < 14)
315*4c4cc7acSMika Kahola 		return 0;
316*4c4cc7acSMika Kahola 
317*4c4cc7acSMika Kahola 	if (!intel_pmdemand_needs_update(state))
318*4c4cc7acSMika Kahola 		return 0;
319*4c4cc7acSMika Kahola 
320*4c4cc7acSMika Kahola 	new_pmdemand_state = intel_atomic_get_pmdemand_state(state);
321*4c4cc7acSMika Kahola 	if (IS_ERR(new_pmdemand_state))
322*4c4cc7acSMika Kahola 		return PTR_ERR(new_pmdemand_state);
323*4c4cc7acSMika Kahola 
324*4c4cc7acSMika Kahola 	new_bw_state = intel_atomic_get_bw_state(state);
325*4c4cc7acSMika Kahola 	if (IS_ERR(new_bw_state))
326*4c4cc7acSMika Kahola 		return PTR_ERR(new_bw_state);
327*4c4cc7acSMika Kahola 
328*4c4cc7acSMika Kahola 	/* firmware will calculate the qclk_gv_index, requirement is set to 0 */
329*4c4cc7acSMika Kahola 	new_pmdemand_state->params.qclk_gv_index = 0;
330*4c4cc7acSMika Kahola 	new_pmdemand_state->params.qclk_gv_bw = new_bw_state->qgv_point_peakbw;
331*4c4cc7acSMika Kahola 
332*4c4cc7acSMika Kahola 	new_dbuf_state = intel_atomic_get_dbuf_state(state);
333*4c4cc7acSMika Kahola 	if (IS_ERR(new_dbuf_state))
334*4c4cc7acSMika Kahola 		return PTR_ERR(new_dbuf_state);
335*4c4cc7acSMika Kahola 
336*4c4cc7acSMika Kahola 	new_pmdemand_state->params.active_pipes =
337*4c4cc7acSMika Kahola 		min_t(u8, hweight8(new_dbuf_state->active_pipes), 3);
338*4c4cc7acSMika Kahola 	new_pmdemand_state->params.active_dbufs =
339*4c4cc7acSMika Kahola 		min_t(u8, hweight8(new_dbuf_state->enabled_slices), 3);
340*4c4cc7acSMika Kahola 
341*4c4cc7acSMika Kahola 	new_cdclk_state = intel_atomic_get_cdclk_state(state);
342*4c4cc7acSMika Kahola 	if (IS_ERR(new_cdclk_state))
343*4c4cc7acSMika Kahola 		return PTR_ERR(new_cdclk_state);
344*4c4cc7acSMika Kahola 
345*4c4cc7acSMika Kahola 	new_pmdemand_state->params.voltage_index =
346*4c4cc7acSMika Kahola 		new_cdclk_state->actual.voltage_level;
347*4c4cc7acSMika Kahola 	new_pmdemand_state->params.cdclk_freq_mhz =
348*4c4cc7acSMika Kahola 		DIV_ROUND_UP(new_cdclk_state->actual.cdclk, 1000);
349*4c4cc7acSMika Kahola 
350*4c4cc7acSMika Kahola 	intel_pmdemand_update_max_ddiclk(i915, state, new_pmdemand_state);
351*4c4cc7acSMika Kahola 
352*4c4cc7acSMika Kahola 	intel_pmdemand_update_active_non_tc_phys(i915, state, new_pmdemand_state);
353*4c4cc7acSMika Kahola 
354*4c4cc7acSMika Kahola 	/*
355*4c4cc7acSMika Kahola 	 * Active_PLLs starts with 1 because of CDCLK PLL.
356*4c4cc7acSMika Kahola 	 * TODO: Missing to account genlock filter when it gets used.
357*4c4cc7acSMika Kahola 	 */
358*4c4cc7acSMika Kahola 	new_pmdemand_state->params.plls =
359*4c4cc7acSMika Kahola 		min_t(u16, new_pmdemand_state->params.active_phys + 1, 7);
360*4c4cc7acSMika Kahola 
361*4c4cc7acSMika Kahola 	/*
362*4c4cc7acSMika Kahola 	 * Setting scalers to max as it can not be calculated during flips and
363*4c4cc7acSMika Kahola 	 * fastsets without taking global states locks.
364*4c4cc7acSMika Kahola 	 */
365*4c4cc7acSMika Kahola 	new_pmdemand_state->params.scalers = 7;
366*4c4cc7acSMika Kahola 
367*4c4cc7acSMika Kahola 	if (state->base.allow_modeset)
368*4c4cc7acSMika Kahola 		return intel_atomic_serialize_global_state(&new_pmdemand_state->base);
369*4c4cc7acSMika Kahola 	else
370*4c4cc7acSMika Kahola 		return intel_atomic_lock_global_state(&new_pmdemand_state->base);
371*4c4cc7acSMika Kahola }
372*4c4cc7acSMika Kahola 
intel_pmdemand_check_prev_transaction(struct drm_i915_private * i915)373*4c4cc7acSMika Kahola static bool intel_pmdemand_check_prev_transaction(struct drm_i915_private *i915)
374*4c4cc7acSMika Kahola {
375*4c4cc7acSMika Kahola 	return !(intel_de_wait_for_clear(i915,
376*4c4cc7acSMika Kahola 					 XELPDP_INITIATE_PMDEMAND_REQUEST(1),
377*4c4cc7acSMika Kahola 					 XELPDP_PMDEMAND_REQ_ENABLE, 10) ||
378*4c4cc7acSMika Kahola 		 intel_de_wait_for_clear(i915,
379*4c4cc7acSMika Kahola 					 GEN12_DCPR_STATUS_1,
380*4c4cc7acSMika Kahola 					 XELPDP_PMDEMAND_INFLIGHT_STATUS, 10));
381*4c4cc7acSMika Kahola }
382*4c4cc7acSMika Kahola 
383*4c4cc7acSMika Kahola void
intel_pmdemand_init_pmdemand_params(struct drm_i915_private * i915,struct intel_pmdemand_state * pmdemand_state)384*4c4cc7acSMika Kahola intel_pmdemand_init_pmdemand_params(struct drm_i915_private *i915,
385*4c4cc7acSMika Kahola 				    struct intel_pmdemand_state *pmdemand_state)
386*4c4cc7acSMika Kahola {
387*4c4cc7acSMika Kahola 	u32 reg1, reg2;
388*4c4cc7acSMika Kahola 
389*4c4cc7acSMika Kahola 	if (DISPLAY_VER(i915) < 14)
390*4c4cc7acSMika Kahola 		return;
391*4c4cc7acSMika Kahola 
392*4c4cc7acSMika Kahola 	mutex_lock(&i915->display.pmdemand.lock);
393*4c4cc7acSMika Kahola 	if (drm_WARN_ON(&i915->drm,
394*4c4cc7acSMika Kahola 			!intel_pmdemand_check_prev_transaction(i915))) {
395*4c4cc7acSMika Kahola 		memset(&pmdemand_state->params, 0,
396*4c4cc7acSMika Kahola 		       sizeof(pmdemand_state->params));
397*4c4cc7acSMika Kahola 		goto unlock;
398*4c4cc7acSMika Kahola 	}
399*4c4cc7acSMika Kahola 
400*4c4cc7acSMika Kahola 	reg1 = intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(0));
401*4c4cc7acSMika Kahola 
402*4c4cc7acSMika Kahola 	reg2 = intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1));
403*4c4cc7acSMika Kahola 
404*4c4cc7acSMika Kahola 	/* Set 1*/
405*4c4cc7acSMika Kahola 	pmdemand_state->params.qclk_gv_bw =
406*4c4cc7acSMika Kahola 		REG_FIELD_GET(XELPDP_PMDEMAND_QCLK_GV_BW_MASK, reg1);
407*4c4cc7acSMika Kahola 	pmdemand_state->params.voltage_index =
408*4c4cc7acSMika Kahola 		REG_FIELD_GET(XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK, reg1);
409*4c4cc7acSMika Kahola 	pmdemand_state->params.qclk_gv_index =
410*4c4cc7acSMika Kahola 		REG_FIELD_GET(XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK, reg1);
411*4c4cc7acSMika Kahola 	pmdemand_state->params.active_pipes =
412*4c4cc7acSMika Kahola 		REG_FIELD_GET(XELPDP_PMDEMAND_PIPES_MASK, reg1);
413*4c4cc7acSMika Kahola 	pmdemand_state->params.active_dbufs =
414*4c4cc7acSMika Kahola 		REG_FIELD_GET(XELPDP_PMDEMAND_DBUFS_MASK, reg1);
415*4c4cc7acSMika Kahola 	pmdemand_state->params.active_phys =
416*4c4cc7acSMika Kahola 		REG_FIELD_GET(XELPDP_PMDEMAND_PHYS_MASK, reg1);
417*4c4cc7acSMika Kahola 
418*4c4cc7acSMika Kahola 	/* Set 2*/
419*4c4cc7acSMika Kahola 	pmdemand_state->params.cdclk_freq_mhz =
420*4c4cc7acSMika Kahola 		REG_FIELD_GET(XELPDP_PMDEMAND_CDCLK_FREQ_MASK, reg2);
421*4c4cc7acSMika Kahola 	pmdemand_state->params.ddiclk_max =
422*4c4cc7acSMika Kahola 		REG_FIELD_GET(XELPDP_PMDEMAND_DDICLK_FREQ_MASK, reg2);
423*4c4cc7acSMika Kahola 	pmdemand_state->params.scalers =
424*4c4cc7acSMika Kahola 		REG_FIELD_GET(XELPDP_PMDEMAND_SCALERS_MASK, reg2);
425*4c4cc7acSMika Kahola 
426*4c4cc7acSMika Kahola unlock:
427*4c4cc7acSMika Kahola 	mutex_unlock(&i915->display.pmdemand.lock);
428*4c4cc7acSMika Kahola }
429*4c4cc7acSMika Kahola 
intel_pmdemand_req_complete(struct drm_i915_private * i915)430*4c4cc7acSMika Kahola static bool intel_pmdemand_req_complete(struct drm_i915_private *i915)
431*4c4cc7acSMika Kahola {
432*4c4cc7acSMika Kahola 	return !(intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1)) &
433*4c4cc7acSMika Kahola 		 XELPDP_PMDEMAND_REQ_ENABLE);
434*4c4cc7acSMika Kahola }
435*4c4cc7acSMika Kahola 
intel_pmdemand_wait(struct drm_i915_private * i915)436*4c4cc7acSMika Kahola static void intel_pmdemand_wait(struct drm_i915_private *i915)
437*4c4cc7acSMika Kahola {
438*4c4cc7acSMika Kahola 	if (!wait_event_timeout(i915->display.pmdemand.waitqueue,
439*4c4cc7acSMika Kahola 				intel_pmdemand_req_complete(i915),
440*4c4cc7acSMika Kahola 				msecs_to_jiffies_timeout(10)))
441*4c4cc7acSMika Kahola 		drm_err(&i915->drm,
442*4c4cc7acSMika Kahola 			"timed out waiting for Punit PM Demand Response\n");
443*4c4cc7acSMika Kahola }
444*4c4cc7acSMika Kahola 
445*4c4cc7acSMika Kahola /* Required to be programmed during Display Init Sequences. */
intel_pmdemand_program_dbuf(struct drm_i915_private * i915,u8 dbuf_slices)446*4c4cc7acSMika Kahola void intel_pmdemand_program_dbuf(struct drm_i915_private *i915,
447*4c4cc7acSMika Kahola 				 u8 dbuf_slices)
448*4c4cc7acSMika Kahola {
449*4c4cc7acSMika Kahola 	u32 dbufs = min_t(u32, hweight8(dbuf_slices), 3);
450*4c4cc7acSMika Kahola 
451*4c4cc7acSMika Kahola 	mutex_lock(&i915->display.pmdemand.lock);
452*4c4cc7acSMika Kahola 	if (drm_WARN_ON(&i915->drm,
453*4c4cc7acSMika Kahola 			!intel_pmdemand_check_prev_transaction(i915)))
454*4c4cc7acSMika Kahola 		goto unlock;
455*4c4cc7acSMika Kahola 
456*4c4cc7acSMika Kahola 	intel_de_rmw(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(0),
457*4c4cc7acSMika Kahola 		     XELPDP_PMDEMAND_DBUFS_MASK,
458*4c4cc7acSMika Kahola 		     REG_FIELD_PREP(XELPDP_PMDEMAND_DBUFS_MASK, dbufs));
459*4c4cc7acSMika Kahola 	intel_de_rmw(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1), 0,
460*4c4cc7acSMika Kahola 		     XELPDP_PMDEMAND_REQ_ENABLE);
461*4c4cc7acSMika Kahola 
462*4c4cc7acSMika Kahola 	intel_pmdemand_wait(i915);
463*4c4cc7acSMika Kahola 
464*4c4cc7acSMika Kahola unlock:
465*4c4cc7acSMika Kahola 	mutex_unlock(&i915->display.pmdemand.lock);
466*4c4cc7acSMika Kahola }
467*4c4cc7acSMika Kahola 
468*4c4cc7acSMika Kahola static void
intel_pmdemand_update_params(const struct intel_pmdemand_state * new,const struct intel_pmdemand_state * old,u32 * reg1,u32 * reg2,bool serialized)469*4c4cc7acSMika Kahola intel_pmdemand_update_params(const struct intel_pmdemand_state *new,
470*4c4cc7acSMika Kahola 			     const struct intel_pmdemand_state *old,
471*4c4cc7acSMika Kahola 			     u32 *reg1, u32 *reg2, bool serialized)
472*4c4cc7acSMika Kahola {
473*4c4cc7acSMika Kahola 	/*
474*4c4cc7acSMika Kahola 	 * The pmdemand parameter updates happens in two steps. Pre plane and
475*4c4cc7acSMika Kahola 	 * post plane updates. During the pre plane, as DE might still be
476*4c4cc7acSMika Kahola 	 * handling with some old operations, to avoid unexpected performance
477*4c4cc7acSMika Kahola 	 * issues, program the pmdemand parameters with higher of old and new
478*4c4cc7acSMika Kahola 	 * values. And then after once settled, use the new parameter values
479*4c4cc7acSMika Kahola 	 * as part of the post plane update.
480*4c4cc7acSMika Kahola 	 *
481*4c4cc7acSMika Kahola 	 * If the pmdemand params update happens without modeset allowed, this
482*4c4cc7acSMika Kahola 	 * means we can't serialize the updates. So that implies possibility of
483*4c4cc7acSMika Kahola 	 * some parallel atomic commits affecting the pmdemand parameters. In
484*4c4cc7acSMika Kahola 	 * that case, we need to consider the current values from the register
485*4c4cc7acSMika Kahola 	 * as well. So in pre-plane case, we need to check the max of old, new
486*4c4cc7acSMika Kahola 	 * and current register value if not serialized. In post plane update
487*4c4cc7acSMika Kahola 	 * we need to consider max of new and current register value if not
488*4c4cc7acSMika Kahola 	 * serialized
489*4c4cc7acSMika Kahola 	 */
490*4c4cc7acSMika Kahola 
491*4c4cc7acSMika Kahola #define update_reg(reg, field, mask) do { \
492*4c4cc7acSMika Kahola 	u32 current_val = serialized ? 0 : REG_FIELD_GET((mask), *(reg)); \
493*4c4cc7acSMika Kahola 	u32 old_val = old ? old->params.field : 0; \
494*4c4cc7acSMika Kahola 	u32 new_val = new->params.field; \
495*4c4cc7acSMika Kahola \
496*4c4cc7acSMika Kahola 	*(reg) &= ~(mask); \
497*4c4cc7acSMika Kahola 	*(reg) |= REG_FIELD_PREP((mask), max3(old_val, new_val, current_val)); \
498*4c4cc7acSMika Kahola } while (0)
499*4c4cc7acSMika Kahola 
500*4c4cc7acSMika Kahola 	/* Set 1*/
501*4c4cc7acSMika Kahola 	update_reg(reg1, qclk_gv_bw, XELPDP_PMDEMAND_QCLK_GV_BW_MASK);
502*4c4cc7acSMika Kahola 	update_reg(reg1, voltage_index, XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK);
503*4c4cc7acSMika Kahola 	update_reg(reg1, qclk_gv_index, XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK);
504*4c4cc7acSMika Kahola 	update_reg(reg1, active_pipes, XELPDP_PMDEMAND_PIPES_MASK);
505*4c4cc7acSMika Kahola 	update_reg(reg1, active_dbufs, XELPDP_PMDEMAND_DBUFS_MASK);
506*4c4cc7acSMika Kahola 	update_reg(reg1, active_phys, XELPDP_PMDEMAND_PHYS_MASK);
507*4c4cc7acSMika Kahola 
508*4c4cc7acSMika Kahola 	/* Set 2*/
509*4c4cc7acSMika Kahola 	update_reg(reg2, cdclk_freq_mhz, XELPDP_PMDEMAND_CDCLK_FREQ_MASK);
510*4c4cc7acSMika Kahola 	update_reg(reg2, ddiclk_max, XELPDP_PMDEMAND_DDICLK_FREQ_MASK);
511*4c4cc7acSMika Kahola 	update_reg(reg2, scalers, XELPDP_PMDEMAND_SCALERS_MASK);
512*4c4cc7acSMika Kahola 	update_reg(reg2, plls, XELPDP_PMDEMAND_PLLS_MASK);
513*4c4cc7acSMika Kahola 
514*4c4cc7acSMika Kahola #undef update_reg
515*4c4cc7acSMika Kahola }
516*4c4cc7acSMika Kahola 
517*4c4cc7acSMika Kahola static void
intel_pmdemand_program_params(struct drm_i915_private * i915,const struct intel_pmdemand_state * new,const struct intel_pmdemand_state * old,bool serialized)518*4c4cc7acSMika Kahola intel_pmdemand_program_params(struct drm_i915_private *i915,
519*4c4cc7acSMika Kahola 			      const struct intel_pmdemand_state *new,
520*4c4cc7acSMika Kahola 			      const struct intel_pmdemand_state *old,
521*4c4cc7acSMika Kahola 			      bool serialized)
522*4c4cc7acSMika Kahola {
523*4c4cc7acSMika Kahola 	bool changed = false;
524*4c4cc7acSMika Kahola 	u32 reg1, mod_reg1;
525*4c4cc7acSMika Kahola 	u32 reg2, mod_reg2;
526*4c4cc7acSMika Kahola 
527*4c4cc7acSMika Kahola 	mutex_lock(&i915->display.pmdemand.lock);
528*4c4cc7acSMika Kahola 	if (drm_WARN_ON(&i915->drm,
529*4c4cc7acSMika Kahola 			!intel_pmdemand_check_prev_transaction(i915)))
530*4c4cc7acSMika Kahola 		goto unlock;
531*4c4cc7acSMika Kahola 
532*4c4cc7acSMika Kahola 	reg1 = intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(0));
533*4c4cc7acSMika Kahola 	mod_reg1 = reg1;
534*4c4cc7acSMika Kahola 
535*4c4cc7acSMika Kahola 	reg2 = intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1));
536*4c4cc7acSMika Kahola 	mod_reg2 = reg2;
537*4c4cc7acSMika Kahola 
538*4c4cc7acSMika Kahola 	intel_pmdemand_update_params(new, old, &mod_reg1, &mod_reg2,
539*4c4cc7acSMika Kahola 				     serialized);
540*4c4cc7acSMika Kahola 
541*4c4cc7acSMika Kahola 	if (reg1 != mod_reg1) {
542*4c4cc7acSMika Kahola 		intel_de_write(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(0),
543*4c4cc7acSMika Kahola 			       mod_reg1);
544*4c4cc7acSMika Kahola 		changed = true;
545*4c4cc7acSMika Kahola 	}
546*4c4cc7acSMika Kahola 
547*4c4cc7acSMika Kahola 	if (reg2 != mod_reg2) {
548*4c4cc7acSMika Kahola 		intel_de_write(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1),
549*4c4cc7acSMika Kahola 			       mod_reg2);
550*4c4cc7acSMika Kahola 		changed = true;
551*4c4cc7acSMika Kahola 	}
552*4c4cc7acSMika Kahola 
553*4c4cc7acSMika Kahola 	/* Initiate pm demand request only if register values are changed */
554*4c4cc7acSMika Kahola 	if (!changed)
555*4c4cc7acSMika Kahola 		goto unlock;
556*4c4cc7acSMika Kahola 
557*4c4cc7acSMika Kahola 	drm_dbg_kms(&i915->drm,
558*4c4cc7acSMika Kahola 		    "initate pmdemand request values: (0x%x 0x%x)\n",
559*4c4cc7acSMika Kahola 		    mod_reg1, mod_reg2);
560*4c4cc7acSMika Kahola 
561*4c4cc7acSMika Kahola 	intel_de_rmw(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1), 0,
562*4c4cc7acSMika Kahola 		     XELPDP_PMDEMAND_REQ_ENABLE);
563*4c4cc7acSMika Kahola 
564*4c4cc7acSMika Kahola 	intel_pmdemand_wait(i915);
565*4c4cc7acSMika Kahola 
566*4c4cc7acSMika Kahola unlock:
567*4c4cc7acSMika Kahola 	mutex_unlock(&i915->display.pmdemand.lock);
568*4c4cc7acSMika Kahola }
569*4c4cc7acSMika Kahola 
570*4c4cc7acSMika Kahola static bool
intel_pmdemand_state_changed(const struct intel_pmdemand_state * new,const struct intel_pmdemand_state * old)571*4c4cc7acSMika Kahola intel_pmdemand_state_changed(const struct intel_pmdemand_state *new,
572*4c4cc7acSMika Kahola 			     const struct intel_pmdemand_state *old)
573*4c4cc7acSMika Kahola {
574*4c4cc7acSMika Kahola 	return memcmp(&new->params, &old->params, sizeof(new->params)) != 0;
575*4c4cc7acSMika Kahola }
576*4c4cc7acSMika Kahola 
intel_pmdemand_pre_plane_update(struct intel_atomic_state * state)577*4c4cc7acSMika Kahola void intel_pmdemand_pre_plane_update(struct intel_atomic_state *state)
578*4c4cc7acSMika Kahola {
579*4c4cc7acSMika Kahola 	struct drm_i915_private *i915 = to_i915(state->base.dev);
580*4c4cc7acSMika Kahola 	const struct intel_pmdemand_state *new_pmdemand_state =
581*4c4cc7acSMika Kahola 		intel_atomic_get_new_pmdemand_state(state);
582*4c4cc7acSMika Kahola 	const struct intel_pmdemand_state *old_pmdemand_state =
583*4c4cc7acSMika Kahola 		intel_atomic_get_old_pmdemand_state(state);
584*4c4cc7acSMika Kahola 
585*4c4cc7acSMika Kahola 	if (DISPLAY_VER(i915) < 14)
586*4c4cc7acSMika Kahola 		return;
587*4c4cc7acSMika Kahola 
588*4c4cc7acSMika Kahola 	if (!new_pmdemand_state ||
589*4c4cc7acSMika Kahola 	    !intel_pmdemand_state_changed(new_pmdemand_state,
590*4c4cc7acSMika Kahola 					  old_pmdemand_state))
591*4c4cc7acSMika Kahola 		return;
592*4c4cc7acSMika Kahola 
593*4c4cc7acSMika Kahola 	WARN_ON(!new_pmdemand_state->base.changed);
594*4c4cc7acSMika Kahola 
595*4c4cc7acSMika Kahola 	intel_pmdemand_program_params(i915, new_pmdemand_state,
596*4c4cc7acSMika Kahola 				      old_pmdemand_state,
597*4c4cc7acSMika Kahola 				      intel_atomic_global_state_is_serialized(state));
598*4c4cc7acSMika Kahola }
599*4c4cc7acSMika Kahola 
intel_pmdemand_post_plane_update(struct intel_atomic_state * state)600*4c4cc7acSMika Kahola void intel_pmdemand_post_plane_update(struct intel_atomic_state *state)
601*4c4cc7acSMika Kahola {
602*4c4cc7acSMika Kahola 	struct drm_i915_private *i915 = to_i915(state->base.dev);
603*4c4cc7acSMika Kahola 	const struct intel_pmdemand_state *new_pmdemand_state =
604*4c4cc7acSMika Kahola 		intel_atomic_get_new_pmdemand_state(state);
605*4c4cc7acSMika Kahola 	const struct intel_pmdemand_state *old_pmdemand_state =
606*4c4cc7acSMika Kahola 		intel_atomic_get_old_pmdemand_state(state);
607*4c4cc7acSMika Kahola 
608*4c4cc7acSMika Kahola 	if (DISPLAY_VER(i915) < 14)
609*4c4cc7acSMika Kahola 		return;
610*4c4cc7acSMika Kahola 
611*4c4cc7acSMika Kahola 	if (!new_pmdemand_state ||
612*4c4cc7acSMika Kahola 	    !intel_pmdemand_state_changed(new_pmdemand_state,
613*4c4cc7acSMika Kahola 					  old_pmdemand_state))
614*4c4cc7acSMika Kahola 		return;
615*4c4cc7acSMika Kahola 
616*4c4cc7acSMika Kahola 	WARN_ON(!new_pmdemand_state->base.changed);
617*4c4cc7acSMika Kahola 
618*4c4cc7acSMika Kahola 	intel_pmdemand_program_params(i915, new_pmdemand_state, NULL,
619*4c4cc7acSMika Kahola 				      intel_atomic_global_state_is_serialized(state));
620*4c4cc7acSMika Kahola }
621