1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #include <drm/drm_atomic_state_helper.h>
7 
8 #include "intel_atomic.h"
9 #include "intel_bw.h"
10 #include "intel_cdclk.h"
11 #include "intel_display_types.h"
12 #include "intel_pm.h"
13 #include "intel_sideband.h"
14 
15 /* Parameters for Qclk Geyserville (QGV) */
16 struct intel_qgv_point {
17 	u16 dclk, t_rp, t_rdpre, t_rc, t_ras, t_rcd;
18 };
19 
20 struct intel_qgv_info {
21 	struct intel_qgv_point points[I915_NUM_QGV_POINTS];
22 	u8 num_points;
23 	u8 num_channels;
24 	u8 t_bl;
25 	enum intel_dram_type dram_type;
26 };
27 
28 static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv,
29 					  struct intel_qgv_info *qi)
30 {
31 	u32 val = 0;
32 	int ret;
33 
34 	ret = sandybridge_pcode_read(dev_priv,
35 				     ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
36 				     ICL_PCODE_MEM_SS_READ_GLOBAL_INFO,
37 				     &val, NULL);
38 	if (ret)
39 		return ret;
40 
41 	if (IS_GEN(dev_priv, 12)) {
42 		switch (val & 0xf) {
43 		case 0:
44 			qi->dram_type = INTEL_DRAM_DDR4;
45 			break;
46 		case 3:
47 			qi->dram_type = INTEL_DRAM_LPDDR4;
48 			break;
49 		case 4:
50 			qi->dram_type = INTEL_DRAM_DDR3;
51 			break;
52 		case 5:
53 			qi->dram_type = INTEL_DRAM_LPDDR3;
54 			break;
55 		default:
56 			MISSING_CASE(val & 0xf);
57 			break;
58 		}
59 	} else if (IS_GEN(dev_priv, 11)) {
60 		switch (val & 0xf) {
61 		case 0:
62 			qi->dram_type = INTEL_DRAM_DDR4;
63 			break;
64 		case 1:
65 			qi->dram_type = INTEL_DRAM_DDR3;
66 			break;
67 		case 2:
68 			qi->dram_type = INTEL_DRAM_LPDDR3;
69 			break;
70 		case 3:
71 			qi->dram_type = INTEL_DRAM_LPDDR4;
72 			break;
73 		default:
74 			MISSING_CASE(val & 0xf);
75 			break;
76 		}
77 	} else {
78 		MISSING_CASE(INTEL_GEN(dev_priv));
79 		qi->dram_type = INTEL_DRAM_LPDDR3; /* Conservative default */
80 	}
81 
82 	qi->num_channels = (val & 0xf0) >> 4;
83 	qi->num_points = (val & 0xf00) >> 8;
84 
85 	if (IS_GEN(dev_priv, 12))
86 		qi->t_bl = qi->dram_type == INTEL_DRAM_DDR4 ? 4 : 16;
87 	else if (IS_GEN(dev_priv, 11))
88 		qi->t_bl = qi->dram_type == INTEL_DRAM_DDR4 ? 4 : 8;
89 
90 	return 0;
91 }
92 
93 static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv,
94 					 struct intel_qgv_point *sp,
95 					 int point)
96 {
97 	u32 val = 0, val2 = 0;
98 	int ret;
99 
100 	ret = sandybridge_pcode_read(dev_priv,
101 				     ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
102 				     ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point),
103 				     &val, &val2);
104 	if (ret)
105 		return ret;
106 
107 	sp->dclk = val & 0xffff;
108 	sp->t_rp = (val & 0xff0000) >> 16;
109 	sp->t_rcd = (val & 0xff000000) >> 24;
110 
111 	sp->t_rdpre = val2 & 0xff;
112 	sp->t_ras = (val2 & 0xff00) >> 8;
113 
114 	sp->t_rc = sp->t_rp + sp->t_ras;
115 
116 	return 0;
117 }
118 
119 int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
120 				  u32 points_mask)
121 {
122 	int ret;
123 
124 	/* bspec says to keep retrying for at least 1 ms */
125 	ret = skl_pcode_request(dev_priv, ICL_PCODE_SAGV_DE_MEM_SS_CONFIG,
126 				points_mask,
127 				ICL_PCODE_POINTS_RESTRICTED_MASK,
128 				ICL_PCODE_POINTS_RESTRICTED,
129 				1);
130 
131 	if (ret < 0) {
132 		drm_err(&dev_priv->drm, "Failed to disable qgv points (%d)\n", ret);
133 		return ret;
134 	}
135 
136 	return 0;
137 }
138 
139 static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
140 			      struct intel_qgv_info *qi)
141 {
142 	int i, ret;
143 
144 	ret = icl_pcode_read_mem_global_info(dev_priv, qi);
145 	if (ret)
146 		return ret;
147 
148 	if (drm_WARN_ON(&dev_priv->drm,
149 			qi->num_points > ARRAY_SIZE(qi->points)))
150 		qi->num_points = ARRAY_SIZE(qi->points);
151 
152 	for (i = 0; i < qi->num_points; i++) {
153 		struct intel_qgv_point *sp = &qi->points[i];
154 
155 		ret = icl_pcode_read_qgv_point_info(dev_priv, sp, i);
156 		if (ret)
157 			return ret;
158 
159 		drm_dbg_kms(&dev_priv->drm,
160 			    "QGV %d: DCLK=%d tRP=%d tRDPRE=%d tRAS=%d tRCD=%d tRC=%d\n",
161 			    i, sp->dclk, sp->t_rp, sp->t_rdpre, sp->t_ras,
162 			    sp->t_rcd, sp->t_rc);
163 	}
164 
165 	return 0;
166 }
167 
168 static int icl_calc_bw(int dclk, int num, int den)
169 {
170 	/* multiples of 16.666MHz (100/6) */
171 	return DIV_ROUND_CLOSEST(num * dclk * 100, den * 6);
172 }
173 
174 static int icl_sagv_max_dclk(const struct intel_qgv_info *qi)
175 {
176 	u16 dclk = 0;
177 	int i;
178 
179 	for (i = 0; i < qi->num_points; i++)
180 		dclk = max(dclk, qi->points[i].dclk);
181 
182 	return dclk;
183 }
184 
185 struct intel_sa_info {
186 	u16 displayrtids;
187 	u8 deburst, deprogbwlimit;
188 };
189 
190 static const struct intel_sa_info icl_sa_info = {
191 	.deburst = 8,
192 	.deprogbwlimit = 25, /* GB/s */
193 	.displayrtids = 128,
194 };
195 
196 static const struct intel_sa_info tgl_sa_info = {
197 	.deburst = 16,
198 	.deprogbwlimit = 34, /* GB/s */
199 	.displayrtids = 256,
200 };
201 
202 static const struct intel_sa_info rkl_sa_info = {
203 	.deburst = 16,
204 	.deprogbwlimit = 20, /* GB/s */
205 	.displayrtids = 128,
206 };
207 
208 static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa)
209 {
210 	struct intel_qgv_info qi = {};
211 	bool is_y_tile = true; /* assume y tile may be used */
212 	int num_channels;
213 	int deinterleave;
214 	int ipqdepth, ipqdepthpch;
215 	int dclk_max;
216 	int maxdebw;
217 	int i, ret;
218 
219 	ret = icl_get_qgv_points(dev_priv, &qi);
220 	if (ret) {
221 		drm_dbg_kms(&dev_priv->drm,
222 			    "Failed to get memory subsystem information, ignoring bandwidth limits");
223 		return ret;
224 	}
225 	num_channels = qi.num_channels;
226 
227 	deinterleave = DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2);
228 	dclk_max = icl_sagv_max_dclk(&qi);
229 
230 	ipqdepthpch = 16;
231 
232 	maxdebw = min(sa->deprogbwlimit * 1000,
233 		      icl_calc_bw(dclk_max, 16, 1) * 6 / 10); /* 60% */
234 	ipqdepth = min(ipqdepthpch, sa->displayrtids / num_channels);
235 
236 	for (i = 0; i < ARRAY_SIZE(dev_priv->max_bw); i++) {
237 		struct intel_bw_info *bi = &dev_priv->max_bw[i];
238 		int clpchgroup;
239 		int j;
240 
241 		clpchgroup = (sa->deburst * deinterleave / num_channels) << i;
242 		bi->num_planes = (ipqdepth - clpchgroup) / clpchgroup + 1;
243 
244 		bi->num_qgv_points = qi.num_points;
245 
246 		for (j = 0; j < qi.num_points; j++) {
247 			const struct intel_qgv_point *sp = &qi.points[j];
248 			int ct, bw;
249 
250 			/*
251 			 * Max row cycle time
252 			 *
253 			 * FIXME what is the logic behind the
254 			 * assumed burst length?
255 			 */
256 			ct = max_t(int, sp->t_rc, sp->t_rp + sp->t_rcd +
257 				   (clpchgroup - 1) * qi.t_bl + sp->t_rdpre);
258 			bw = icl_calc_bw(sp->dclk, clpchgroup * 32 * num_channels, ct);
259 
260 			bi->deratedbw[j] = min(maxdebw,
261 					       bw * 9 / 10); /* 90% */
262 
263 			drm_dbg_kms(&dev_priv->drm,
264 				    "BW%d / QGV %d: num_planes=%d deratedbw=%u\n",
265 				    i, j, bi->num_planes, bi->deratedbw[j]);
266 		}
267 
268 		if (bi->num_planes == 1)
269 			break;
270 	}
271 
272 	/*
273 	 * In case if SAGV is disabled in BIOS, we always get 1
274 	 * SAGV point, but we can't send PCode commands to restrict it
275 	 * as it will fail and pointless anyway.
276 	 */
277 	if (qi.num_points == 1)
278 		dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
279 	else
280 		dev_priv->sagv_status = I915_SAGV_ENABLED;
281 
282 	return 0;
283 }
284 
285 static unsigned int icl_max_bw(struct drm_i915_private *dev_priv,
286 			       int num_planes, int qgv_point)
287 {
288 	int i;
289 
290 	/*
291 	 * Let's return max bw for 0 planes
292 	 */
293 	num_planes = max(1, num_planes);
294 
295 	for (i = 0; i < ARRAY_SIZE(dev_priv->max_bw); i++) {
296 		const struct intel_bw_info *bi =
297 			&dev_priv->max_bw[i];
298 
299 		/*
300 		 * Pcode will not expose all QGV points when
301 		 * SAGV is forced to off/min/med/max.
302 		 */
303 		if (qgv_point >= bi->num_qgv_points)
304 			return UINT_MAX;
305 
306 		if (num_planes >= bi->num_planes)
307 			return bi->deratedbw[qgv_point];
308 	}
309 
310 	return 0;
311 }
312 
313 void intel_bw_init_hw(struct drm_i915_private *dev_priv)
314 {
315 	if (!HAS_DISPLAY(dev_priv))
316 		return;
317 
318 	if (IS_ROCKETLAKE(dev_priv))
319 		icl_get_bw_info(dev_priv, &rkl_sa_info);
320 	else if (IS_GEN(dev_priv, 12))
321 		icl_get_bw_info(dev_priv, &tgl_sa_info);
322 	else if (IS_GEN(dev_priv, 11))
323 		icl_get_bw_info(dev_priv, &icl_sa_info);
324 }
325 
326 static unsigned int intel_bw_crtc_num_active_planes(const struct intel_crtc_state *crtc_state)
327 {
328 	/*
329 	 * We assume cursors are small enough
330 	 * to not not cause bandwidth problems.
331 	 */
332 	return hweight8(crtc_state->active_planes & ~BIT(PLANE_CURSOR));
333 }
334 
335 static unsigned int intel_bw_crtc_data_rate(const struct intel_crtc_state *crtc_state)
336 {
337 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
338 	unsigned int data_rate = 0;
339 	enum plane_id plane_id;
340 
341 	for_each_plane_id_on_crtc(crtc, plane_id) {
342 		/*
343 		 * We assume cursors are small enough
344 		 * to not not cause bandwidth problems.
345 		 */
346 		if (plane_id == PLANE_CURSOR)
347 			continue;
348 
349 		data_rate += crtc_state->data_rate[plane_id];
350 	}
351 
352 	return data_rate;
353 }
354 
355 void intel_bw_crtc_update(struct intel_bw_state *bw_state,
356 			  const struct intel_crtc_state *crtc_state)
357 {
358 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
359 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
360 
361 	bw_state->data_rate[crtc->pipe] =
362 		intel_bw_crtc_data_rate(crtc_state);
363 	bw_state->num_active_planes[crtc->pipe] =
364 		intel_bw_crtc_num_active_planes(crtc_state);
365 
366 	drm_dbg_kms(&i915->drm, "pipe %c data rate %u num active planes %u\n",
367 		    pipe_name(crtc->pipe),
368 		    bw_state->data_rate[crtc->pipe],
369 		    bw_state->num_active_planes[crtc->pipe]);
370 }
371 
372 static unsigned int intel_bw_num_active_planes(struct drm_i915_private *dev_priv,
373 					       const struct intel_bw_state *bw_state)
374 {
375 	unsigned int num_active_planes = 0;
376 	enum pipe pipe;
377 
378 	for_each_pipe(dev_priv, pipe)
379 		num_active_planes += bw_state->num_active_planes[pipe];
380 
381 	return num_active_planes;
382 }
383 
384 static unsigned int intel_bw_data_rate(struct drm_i915_private *dev_priv,
385 				       const struct intel_bw_state *bw_state)
386 {
387 	unsigned int data_rate = 0;
388 	enum pipe pipe;
389 
390 	for_each_pipe(dev_priv, pipe)
391 		data_rate += bw_state->data_rate[pipe];
392 
393 	return data_rate;
394 }
395 
396 struct intel_bw_state *
397 intel_atomic_get_old_bw_state(struct intel_atomic_state *state)
398 {
399 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
400 	struct intel_global_state *bw_state;
401 
402 	bw_state = intel_atomic_get_old_global_obj_state(state, &dev_priv->bw_obj);
403 
404 	return to_intel_bw_state(bw_state);
405 }
406 
407 struct intel_bw_state *
408 intel_atomic_get_new_bw_state(struct intel_atomic_state *state)
409 {
410 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
411 	struct intel_global_state *bw_state;
412 
413 	bw_state = intel_atomic_get_new_global_obj_state(state, &dev_priv->bw_obj);
414 
415 	return to_intel_bw_state(bw_state);
416 }
417 
418 struct intel_bw_state *
419 intel_atomic_get_bw_state(struct intel_atomic_state *state)
420 {
421 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
422 	struct intel_global_state *bw_state;
423 
424 	bw_state = intel_atomic_get_global_obj_state(state, &dev_priv->bw_obj);
425 	if (IS_ERR(bw_state))
426 		return ERR_CAST(bw_state);
427 
428 	return to_intel_bw_state(bw_state);
429 }
430 
431 int skl_bw_calc_min_cdclk(struct intel_atomic_state *state)
432 {
433 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
434 	struct intel_bw_state *new_bw_state = NULL;
435 	struct intel_bw_state *old_bw_state = NULL;
436 	const struct intel_crtc_state *crtc_state;
437 	struct intel_crtc *crtc;
438 	int max_bw = 0;
439 	int slice_id;
440 	enum pipe pipe;
441 	int i;
442 
443 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
444 		enum plane_id plane_id;
445 		struct intel_dbuf_bw *crtc_bw;
446 
447 		new_bw_state = intel_atomic_get_bw_state(state);
448 		if (IS_ERR(new_bw_state))
449 			return PTR_ERR(new_bw_state);
450 
451 		old_bw_state = intel_atomic_get_old_bw_state(state);
452 
453 		crtc_bw = &new_bw_state->dbuf_bw[crtc->pipe];
454 
455 		memset(&crtc_bw->used_bw, 0, sizeof(crtc_bw->used_bw));
456 
457 		if (!crtc_state->hw.active)
458 			continue;
459 
460 		for_each_plane_id_on_crtc(crtc, plane_id) {
461 			const struct skl_ddb_entry *plane_alloc =
462 				&crtc_state->wm.skl.plane_ddb_y[plane_id];
463 			const struct skl_ddb_entry *uv_plane_alloc =
464 				&crtc_state->wm.skl.plane_ddb_uv[plane_id];
465 			unsigned int data_rate = crtc_state->data_rate[plane_id];
466 			unsigned int dbuf_mask = 0;
467 
468 			dbuf_mask |= skl_ddb_dbuf_slice_mask(dev_priv, plane_alloc);
469 			dbuf_mask |= skl_ddb_dbuf_slice_mask(dev_priv, uv_plane_alloc);
470 
471 			/*
472 			 * FIXME: To calculate that more properly we probably
473 			 * need to to split per plane data_rate into data_rate_y
474 			 * and data_rate_uv for multiplanar formats in order not
475 			 * to get accounted those twice if they happen to reside
476 			 * on different slices.
477 			 * However for pre-icl this would work anyway because
478 			 * we have only single slice and for icl+ uv plane has
479 			 * non-zero data rate.
480 			 * So in worst case those calculation are a bit
481 			 * pessimistic, which shouldn't pose any significant
482 			 * problem anyway.
483 			 */
484 			for_each_dbuf_slice_in_mask(slice_id, dbuf_mask)
485 				crtc_bw->used_bw[slice_id] += data_rate;
486 		}
487 	}
488 
489 	if (!old_bw_state)
490 		return 0;
491 
492 	for_each_pipe(dev_priv, pipe) {
493 		struct intel_dbuf_bw *crtc_bw;
494 
495 		crtc_bw = &new_bw_state->dbuf_bw[pipe];
496 
497 		for_each_dbuf_slice(slice_id) {
498 			/*
499 			 * Current experimental observations show that contrary
500 			 * to BSpec we get underruns once we exceed 64 * CDCLK
501 			 * for slices in total.
502 			 * As a temporary measure in order not to keep CDCLK
503 			 * bumped up all the time we calculate CDCLK according
504 			 * to this formula for  overall bw consumed by slices.
505 			 */
506 			max_bw += crtc_bw->used_bw[slice_id];
507 		}
508 	}
509 
510 	new_bw_state->min_cdclk = max_bw / 64;
511 
512 	if (new_bw_state->min_cdclk != old_bw_state->min_cdclk) {
513 		int ret = intel_atomic_lock_global_state(&new_bw_state->base);
514 
515 		if (ret)
516 			return ret;
517 	}
518 
519 	return 0;
520 }
521 
522 int intel_bw_calc_min_cdclk(struct intel_atomic_state *state)
523 {
524 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
525 	struct intel_bw_state *new_bw_state = NULL;
526 	struct intel_bw_state *old_bw_state = NULL;
527 	const struct intel_crtc_state *crtc_state;
528 	struct intel_crtc *crtc;
529 	int min_cdclk = 0;
530 	enum pipe pipe;
531 	int i;
532 
533 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
534 		new_bw_state = intel_atomic_get_bw_state(state);
535 		if (IS_ERR(new_bw_state))
536 			return PTR_ERR(new_bw_state);
537 
538 		old_bw_state = intel_atomic_get_old_bw_state(state);
539 	}
540 
541 	if (!old_bw_state)
542 		return 0;
543 
544 	for_each_pipe(dev_priv, pipe) {
545 		struct intel_cdclk_state *cdclk_state;
546 
547 		cdclk_state = intel_atomic_get_new_cdclk_state(state);
548 		if (!cdclk_state)
549 			return 0;
550 
551 		min_cdclk = max(cdclk_state->min_cdclk[pipe], min_cdclk);
552 	}
553 
554 	new_bw_state->min_cdclk = min_cdclk;
555 
556 	if (new_bw_state->min_cdclk != old_bw_state->min_cdclk) {
557 		int ret = intel_atomic_lock_global_state(&new_bw_state->base);
558 
559 		if (ret)
560 			return ret;
561 	}
562 
563 	return 0;
564 }
565 
566 int intel_bw_atomic_check(struct intel_atomic_state *state)
567 {
568 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
569 	struct intel_crtc_state *new_crtc_state, *old_crtc_state;
570 	struct intel_bw_state *new_bw_state = NULL;
571 	const struct intel_bw_state *old_bw_state = NULL;
572 	unsigned int data_rate;
573 	unsigned int num_active_planes;
574 	struct intel_crtc *crtc;
575 	int i, ret;
576 	u32 allowed_points = 0;
577 	unsigned int max_bw_point = 0, max_bw = 0;
578 	unsigned int num_qgv_points = dev_priv->max_bw[0].num_qgv_points;
579 	u32 mask = (1 << num_qgv_points) - 1;
580 
581 	/* FIXME earlier gens need some checks too */
582 	if (INTEL_GEN(dev_priv) < 11)
583 		return 0;
584 
585 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
586 					    new_crtc_state, i) {
587 		unsigned int old_data_rate =
588 			intel_bw_crtc_data_rate(old_crtc_state);
589 		unsigned int new_data_rate =
590 			intel_bw_crtc_data_rate(new_crtc_state);
591 		unsigned int old_active_planes =
592 			intel_bw_crtc_num_active_planes(old_crtc_state);
593 		unsigned int new_active_planes =
594 			intel_bw_crtc_num_active_planes(new_crtc_state);
595 
596 		/*
597 		 * Avoid locking the bw state when
598 		 * nothing significant has changed.
599 		 */
600 		if (old_data_rate == new_data_rate &&
601 		    old_active_planes == new_active_planes)
602 			continue;
603 
604 		new_bw_state = intel_atomic_get_bw_state(state);
605 		if (IS_ERR(new_bw_state))
606 			return PTR_ERR(new_bw_state);
607 
608 		new_bw_state->data_rate[crtc->pipe] = new_data_rate;
609 		new_bw_state->num_active_planes[crtc->pipe] = new_active_planes;
610 
611 		drm_dbg_kms(&dev_priv->drm,
612 			    "pipe %c data rate %u num active planes %u\n",
613 			    pipe_name(crtc->pipe),
614 			    new_bw_state->data_rate[crtc->pipe],
615 			    new_bw_state->num_active_planes[crtc->pipe]);
616 	}
617 
618 	if (!new_bw_state)
619 		return 0;
620 
621 	ret = intel_atomic_lock_global_state(&new_bw_state->base);
622 	if (ret)
623 		return ret;
624 
625 	data_rate = intel_bw_data_rate(dev_priv, new_bw_state);
626 	data_rate = DIV_ROUND_UP(data_rate, 1000);
627 
628 	num_active_planes = intel_bw_num_active_planes(dev_priv, new_bw_state);
629 
630 	for (i = 0; i < num_qgv_points; i++) {
631 		unsigned int max_data_rate;
632 
633 		max_data_rate = icl_max_bw(dev_priv, num_active_planes, i);
634 		/*
635 		 * We need to know which qgv point gives us
636 		 * maximum bandwidth in order to disable SAGV
637 		 * if we find that we exceed SAGV block time
638 		 * with watermarks. By that moment we already
639 		 * have those, as it is calculated earlier in
640 		 * intel_atomic_check,
641 		 */
642 		if (max_data_rate > max_bw) {
643 			max_bw_point = i;
644 			max_bw = max_data_rate;
645 		}
646 		if (max_data_rate >= data_rate)
647 			allowed_points |= BIT(i);
648 		drm_dbg_kms(&dev_priv->drm, "QGV point %d: max bw %d required %d\n",
649 			    i, max_data_rate, data_rate);
650 	}
651 
652 	/*
653 	 * BSpec states that we always should have at least one allowed point
654 	 * left, so if we couldn't - simply reject the configuration for obvious
655 	 * reasons.
656 	 */
657 	if (allowed_points == 0) {
658 		drm_dbg_kms(&dev_priv->drm, "No QGV points provide sufficient memory"
659 			    " bandwidth %d for display configuration(%d active planes).\n",
660 			    data_rate, num_active_planes);
661 		return -EINVAL;
662 	}
663 
664 	/*
665 	 * Leave only single point with highest bandwidth, if
666 	 * we can't enable SAGV due to the increased memory latency it may
667 	 * cause.
668 	 */
669 	if (!intel_can_enable_sagv(dev_priv, new_bw_state)) {
670 		allowed_points = BIT(max_bw_point);
671 		drm_dbg_kms(&dev_priv->drm, "No SAGV, using single QGV point %d\n",
672 			    max_bw_point);
673 	}
674 	/*
675 	 * We store the ones which need to be masked as that is what PCode
676 	 * actually accepts as a parameter.
677 	 */
678 	new_bw_state->qgv_points_mask = ~allowed_points & mask;
679 
680 	old_bw_state = intel_atomic_get_old_bw_state(state);
681 	/*
682 	 * If the actual mask had changed we need to make sure that
683 	 * the commits are serialized(in case this is a nomodeset, nonblocking)
684 	 */
685 	if (new_bw_state->qgv_points_mask != old_bw_state->qgv_points_mask) {
686 		ret = intel_atomic_serialize_global_state(&new_bw_state->base);
687 		if (ret)
688 			return ret;
689 	}
690 
691 	return 0;
692 }
693 
694 static struct intel_global_state *
695 intel_bw_duplicate_state(struct intel_global_obj *obj)
696 {
697 	struct intel_bw_state *state;
698 
699 	state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
700 	if (!state)
701 		return NULL;
702 
703 	return &state->base;
704 }
705 
706 static void intel_bw_destroy_state(struct intel_global_obj *obj,
707 				   struct intel_global_state *state)
708 {
709 	kfree(state);
710 }
711 
712 static const struct intel_global_state_funcs intel_bw_funcs = {
713 	.atomic_duplicate_state = intel_bw_duplicate_state,
714 	.atomic_destroy_state = intel_bw_destroy_state,
715 };
716 
717 int intel_bw_init(struct drm_i915_private *dev_priv)
718 {
719 	struct intel_bw_state *state;
720 
721 	state = kzalloc(sizeof(*state), GFP_KERNEL);
722 	if (!state)
723 		return -ENOMEM;
724 
725 	intel_atomic_global_obj_init(dev_priv, &dev_priv->bw_obj,
726 				     &state->base, &intel_bw_funcs);
727 
728 	return 0;
729 }
730