1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #include <drm/drm_atomic_state_helper.h>
7 
8 #include "i915_drv.h"
9 #include "i915_reg.h"
10 #include "i915_utils.h"
11 #include "intel_atomic.h"
12 #include "intel_bw.h"
13 #include "intel_cdclk.h"
14 #include "intel_display_core.h"
15 #include "intel_display_types.h"
16 #include "skl_watermark.h"
17 #include "intel_mchbar_regs.h"
18 #include "intel_pcode.h"
19 
20 /* Parameters for Qclk Geyserville (QGV) */
21 struct intel_qgv_point {
22 	u16 dclk, t_rp, t_rdpre, t_rc, t_ras, t_rcd;
23 };
24 
25 struct intel_psf_gv_point {
26 	u8 clk; /* clock in multiples of 16.6666 MHz */
27 };
28 
29 struct intel_qgv_info {
30 	struct intel_qgv_point points[I915_NUM_QGV_POINTS];
31 	struct intel_psf_gv_point psf_points[I915_NUM_PSF_GV_POINTS];
32 	u8 num_points;
33 	u8 num_psf_points;
34 	u8 t_bl;
35 	u8 max_numchannels;
36 	u8 channel_width;
37 	u8 deinterleave;
38 };
39 
40 static int dg1_mchbar_read_qgv_point_info(struct drm_i915_private *dev_priv,
41 					  struct intel_qgv_point *sp,
42 					  int point)
43 {
44 	u32 dclk_ratio, dclk_reference;
45 	u32 val;
46 
47 	val = intel_uncore_read(&dev_priv->uncore, SA_PERF_STATUS_0_0_0_MCHBAR_PC);
48 	dclk_ratio = REG_FIELD_GET(DG1_QCLK_RATIO_MASK, val);
49 	if (val & DG1_QCLK_REFERENCE)
50 		dclk_reference = 6; /* 6 * 16.666 MHz = 100 MHz */
51 	else
52 		dclk_reference = 8; /* 8 * 16.666 MHz = 133 MHz */
53 	sp->dclk = DIV_ROUND_UP((16667 * dclk_ratio * dclk_reference) + 500, 1000);
54 
55 	val = intel_uncore_read(&dev_priv->uncore, SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU);
56 	if (val & DG1_GEAR_TYPE)
57 		sp->dclk *= 2;
58 
59 	if (sp->dclk == 0)
60 		return -EINVAL;
61 
62 	val = intel_uncore_read(&dev_priv->uncore, MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR);
63 	sp->t_rp = REG_FIELD_GET(DG1_DRAM_T_RP_MASK, val);
64 	sp->t_rdpre = REG_FIELD_GET(DG1_DRAM_T_RDPRE_MASK, val);
65 
66 	val = intel_uncore_read(&dev_priv->uncore, MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR_HIGH);
67 	sp->t_rcd = REG_FIELD_GET(DG1_DRAM_T_RCD_MASK, val);
68 	sp->t_ras = REG_FIELD_GET(DG1_DRAM_T_RAS_MASK, val);
69 
70 	sp->t_rc = sp->t_rp + sp->t_ras;
71 
72 	return 0;
73 }
74 
75 static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv,
76 					 struct intel_qgv_point *sp,
77 					 int point)
78 {
79 	u32 val = 0, val2 = 0;
80 	u16 dclk;
81 	int ret;
82 
83 	ret = snb_pcode_read(&dev_priv->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
84 			     ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point),
85 			     &val, &val2);
86 	if (ret)
87 		return ret;
88 
89 	dclk = val & 0xffff;
90 	sp->dclk = DIV_ROUND_UP((16667 * dclk) + (DISPLAY_VER(dev_priv) > 11 ? 500 : 0), 1000);
91 	sp->t_rp = (val & 0xff0000) >> 16;
92 	sp->t_rcd = (val & 0xff000000) >> 24;
93 
94 	sp->t_rdpre = val2 & 0xff;
95 	sp->t_ras = (val2 & 0xff00) >> 8;
96 
97 	sp->t_rc = sp->t_rp + sp->t_ras;
98 
99 	return 0;
100 }
101 
102 static int adls_pcode_read_psf_gv_point_info(struct drm_i915_private *dev_priv,
103 					    struct intel_psf_gv_point *points)
104 {
105 	u32 val = 0;
106 	int ret;
107 	int i;
108 
109 	ret = snb_pcode_read(&dev_priv->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
110 			     ADL_PCODE_MEM_SS_READ_PSF_GV_INFO, &val, NULL);
111 	if (ret)
112 		return ret;
113 
114 	for (i = 0; i < I915_NUM_PSF_GV_POINTS; i++) {
115 		points[i].clk = val & 0xff;
116 		val >>= 8;
117 	}
118 
119 	return 0;
120 }
121 
122 static u16 icl_qgv_points_mask(struct drm_i915_private *i915)
123 {
124 	unsigned int num_psf_gv_points = i915->display.bw.max[0].num_psf_gv_points;
125 	unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points;
126 	u16 qgv_points = 0, psf_points = 0;
127 
128 	/*
129 	 * We can _not_ use the whole ADLS_QGV_PT_MASK here, as PCode rejects
130 	 * it with failure if we try masking any unadvertised points.
131 	 * So need to operate only with those returned from PCode.
132 	 */
133 	if (num_qgv_points > 0)
134 		qgv_points = GENMASK(num_qgv_points - 1, 0);
135 
136 	if (num_psf_gv_points > 0)
137 		psf_points = GENMASK(num_psf_gv_points - 1, 0);
138 
139 	return ICL_PCODE_REQ_QGV_PT(qgv_points) | ADLS_PCODE_REQ_PSF_PT(psf_points);
140 }
141 
142 static bool is_sagv_enabled(struct drm_i915_private *i915, u16 points_mask)
143 {
144 	return !is_power_of_2(~points_mask & icl_qgv_points_mask(i915) &
145 			      ICL_PCODE_REQ_QGV_PT_MASK);
146 }
147 
148 int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
149 				  u32 points_mask)
150 {
151 	int ret;
152 
153 	/* bspec says to keep retrying for at least 1 ms */
154 	ret = skl_pcode_request(&dev_priv->uncore, ICL_PCODE_SAGV_DE_MEM_SS_CONFIG,
155 				points_mask,
156 				ICL_PCODE_REP_QGV_MASK | ADLS_PCODE_REP_PSF_MASK,
157 				ICL_PCODE_REP_QGV_SAFE | ADLS_PCODE_REP_PSF_SAFE,
158 				1);
159 
160 	if (ret < 0) {
161 		drm_err(&dev_priv->drm, "Failed to disable qgv points (%d) points: 0x%x\n", ret, points_mask);
162 		return ret;
163 	}
164 
165 	dev_priv->display.sagv.status = is_sagv_enabled(dev_priv, points_mask) ?
166 		I915_SAGV_ENABLED : I915_SAGV_DISABLED;
167 
168 	return 0;
169 }
170 
171 static int mtl_read_qgv_point_info(struct drm_i915_private *dev_priv,
172 				   struct intel_qgv_point *sp, int point)
173 {
174 	u32 val, val2;
175 	u16 dclk;
176 
177 	val = intel_uncore_read(&dev_priv->uncore,
178 				MTL_MEM_SS_INFO_QGV_POINT_LOW(point));
179 	val2 = intel_uncore_read(&dev_priv->uncore,
180 				 MTL_MEM_SS_INFO_QGV_POINT_HIGH(point));
181 	dclk = REG_FIELD_GET(MTL_DCLK_MASK, val);
182 	sp->dclk = DIV_ROUND_UP((16667 * dclk), 1000);
183 	sp->t_rp = REG_FIELD_GET(MTL_TRP_MASK, val);
184 	sp->t_rcd = REG_FIELD_GET(MTL_TRCD_MASK, val);
185 
186 	sp->t_rdpre = REG_FIELD_GET(MTL_TRDPRE_MASK, val2);
187 	sp->t_ras = REG_FIELD_GET(MTL_TRAS_MASK, val2);
188 
189 	sp->t_rc = sp->t_rp + sp->t_ras;
190 
191 	return 0;
192 }
193 
194 static int
195 intel_read_qgv_point_info(struct drm_i915_private *dev_priv,
196 			  struct intel_qgv_point *sp,
197 			  int point)
198 {
199 	if (DISPLAY_VER(dev_priv) >= 14)
200 		return mtl_read_qgv_point_info(dev_priv, sp, point);
201 	else if (IS_DG1(dev_priv))
202 		return dg1_mchbar_read_qgv_point_info(dev_priv, sp, point);
203 	else
204 		return icl_pcode_read_qgv_point_info(dev_priv, sp, point);
205 }
206 
207 static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
208 			      struct intel_qgv_info *qi,
209 			      bool is_y_tile)
210 {
211 	const struct dram_info *dram_info = &dev_priv->dram_info;
212 	int i, ret;
213 
214 	qi->num_points = dram_info->num_qgv_points;
215 	qi->num_psf_points = dram_info->num_psf_gv_points;
216 
217 	if (DISPLAY_VER(dev_priv) >= 14) {
218 		switch (dram_info->type) {
219 		case INTEL_DRAM_DDR4:
220 			qi->t_bl = 4;
221 			qi->max_numchannels = 2;
222 			qi->channel_width = 64;
223 			qi->deinterleave = 2;
224 			break;
225 		case INTEL_DRAM_DDR5:
226 			qi->t_bl = 8;
227 			qi->max_numchannels = 4;
228 			qi->channel_width = 32;
229 			qi->deinterleave = 2;
230 			break;
231 		case INTEL_DRAM_LPDDR4:
232 		case INTEL_DRAM_LPDDR5:
233 			qi->t_bl = 16;
234 			qi->max_numchannels = 8;
235 			qi->channel_width = 16;
236 			qi->deinterleave = 4;
237 			break;
238 		default:
239 			MISSING_CASE(dram_info->type);
240 			return -EINVAL;
241 		}
242 	} else if (DISPLAY_VER(dev_priv) >= 12) {
243 		switch (dram_info->type) {
244 		case INTEL_DRAM_DDR4:
245 			qi->t_bl = is_y_tile ? 8 : 4;
246 			qi->max_numchannels = 2;
247 			qi->channel_width = 64;
248 			qi->deinterleave = is_y_tile ? 1 : 2;
249 			break;
250 		case INTEL_DRAM_DDR5:
251 			qi->t_bl = is_y_tile ? 16 : 8;
252 			qi->max_numchannels = 4;
253 			qi->channel_width = 32;
254 			qi->deinterleave = is_y_tile ? 1 : 2;
255 			break;
256 		case INTEL_DRAM_LPDDR4:
257 			if (IS_ROCKETLAKE(dev_priv)) {
258 				qi->t_bl = 8;
259 				qi->max_numchannels = 4;
260 				qi->channel_width = 32;
261 				qi->deinterleave = 2;
262 				break;
263 			}
264 			fallthrough;
265 		case INTEL_DRAM_LPDDR5:
266 			qi->t_bl = 16;
267 			qi->max_numchannels = 8;
268 			qi->channel_width = 16;
269 			qi->deinterleave = is_y_tile ? 2 : 4;
270 			break;
271 		default:
272 			qi->t_bl = 16;
273 			qi->max_numchannels = 1;
274 			break;
275 		}
276 	} else if (DISPLAY_VER(dev_priv) == 11) {
277 		qi->t_bl = dev_priv->dram_info.type == INTEL_DRAM_DDR4 ? 4 : 8;
278 		qi->max_numchannels = 1;
279 	}
280 
281 	if (drm_WARN_ON(&dev_priv->drm,
282 			qi->num_points > ARRAY_SIZE(qi->points)))
283 		qi->num_points = ARRAY_SIZE(qi->points);
284 
285 	for (i = 0; i < qi->num_points; i++) {
286 		struct intel_qgv_point *sp = &qi->points[i];
287 
288 		ret = intel_read_qgv_point_info(dev_priv, sp, i);
289 		if (ret)
290 			return ret;
291 
292 		drm_dbg_kms(&dev_priv->drm,
293 			    "QGV %d: DCLK=%d tRP=%d tRDPRE=%d tRAS=%d tRCD=%d tRC=%d\n",
294 			    i, sp->dclk, sp->t_rp, sp->t_rdpre, sp->t_ras,
295 			    sp->t_rcd, sp->t_rc);
296 	}
297 
298 	if (qi->num_psf_points > 0) {
299 		ret = adls_pcode_read_psf_gv_point_info(dev_priv, qi->psf_points);
300 		if (ret) {
301 			drm_err(&dev_priv->drm, "Failed to read PSF point data; PSF points will not be considered in bandwidth calculations.\n");
302 			qi->num_psf_points = 0;
303 		}
304 
305 		for (i = 0; i < qi->num_psf_points; i++)
306 			drm_dbg_kms(&dev_priv->drm,
307 				    "PSF GV %d: CLK=%d \n",
308 				    i, qi->psf_points[i].clk);
309 	}
310 
311 	return 0;
312 }
313 
314 static int adl_calc_psf_bw(int clk)
315 {
316 	/*
317 	 * clk is multiples of 16.666MHz (100/6)
318 	 * According to BSpec PSF GV bandwidth is
319 	 * calculated as BW = 64 * clk * 16.666Mhz
320 	 */
321 	return DIV_ROUND_CLOSEST(64 * clk * 100, 6);
322 }
323 
324 static int icl_sagv_max_dclk(const struct intel_qgv_info *qi)
325 {
326 	u16 dclk = 0;
327 	int i;
328 
329 	for (i = 0; i < qi->num_points; i++)
330 		dclk = max(dclk, qi->points[i].dclk);
331 
332 	return dclk;
333 }
334 
335 struct intel_sa_info {
336 	u16 displayrtids;
337 	u8 deburst, deprogbwlimit, derating;
338 };
339 
340 static const struct intel_sa_info icl_sa_info = {
341 	.deburst = 8,
342 	.deprogbwlimit = 25, /* GB/s */
343 	.displayrtids = 128,
344 	.derating = 10,
345 };
346 
347 static const struct intel_sa_info tgl_sa_info = {
348 	.deburst = 16,
349 	.deprogbwlimit = 34, /* GB/s */
350 	.displayrtids = 256,
351 	.derating = 10,
352 };
353 
354 static const struct intel_sa_info rkl_sa_info = {
355 	.deburst = 8,
356 	.deprogbwlimit = 20, /* GB/s */
357 	.displayrtids = 128,
358 	.derating = 10,
359 };
360 
361 static const struct intel_sa_info adls_sa_info = {
362 	.deburst = 16,
363 	.deprogbwlimit = 38, /* GB/s */
364 	.displayrtids = 256,
365 	.derating = 10,
366 };
367 
368 static const struct intel_sa_info adlp_sa_info = {
369 	.deburst = 16,
370 	.deprogbwlimit = 38, /* GB/s */
371 	.displayrtids = 256,
372 	.derating = 20,
373 };
374 
375 static const struct intel_sa_info mtl_sa_info = {
376 	.deburst = 32,
377 	.deprogbwlimit = 38, /* GB/s */
378 	.displayrtids = 256,
379 	.derating = 20,
380 };
381 
382 static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa)
383 {
384 	struct intel_qgv_info qi = {};
385 	bool is_y_tile = true; /* assume y tile may be used */
386 	int num_channels = max_t(u8, 1, dev_priv->dram_info.num_channels);
387 	int ipqdepth, ipqdepthpch = 16;
388 	int dclk_max;
389 	int maxdebw;
390 	int num_groups = ARRAY_SIZE(dev_priv->display.bw.max);
391 	int i, ret;
392 
393 	ret = icl_get_qgv_points(dev_priv, &qi, is_y_tile);
394 	if (ret) {
395 		drm_dbg_kms(&dev_priv->drm,
396 			    "Failed to get memory subsystem information, ignoring bandwidth limits");
397 		return ret;
398 	}
399 
400 	dclk_max = icl_sagv_max_dclk(&qi);
401 	maxdebw = min(sa->deprogbwlimit * 1000, dclk_max * 16 * 6 / 10);
402 	ipqdepth = min(ipqdepthpch, sa->displayrtids / num_channels);
403 	qi.deinterleave = DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2);
404 
405 	for (i = 0; i < num_groups; i++) {
406 		struct intel_bw_info *bi = &dev_priv->display.bw.max[i];
407 		int clpchgroup;
408 		int j;
409 
410 		clpchgroup = (sa->deburst * qi.deinterleave / num_channels) << i;
411 		bi->num_planes = (ipqdepth - clpchgroup) / clpchgroup + 1;
412 
413 		bi->num_qgv_points = qi.num_points;
414 		bi->num_psf_gv_points = qi.num_psf_points;
415 
416 		for (j = 0; j < qi.num_points; j++) {
417 			const struct intel_qgv_point *sp = &qi.points[j];
418 			int ct, bw;
419 
420 			/*
421 			 * Max row cycle time
422 			 *
423 			 * FIXME what is the logic behind the
424 			 * assumed burst length?
425 			 */
426 			ct = max_t(int, sp->t_rc, sp->t_rp + sp->t_rcd +
427 				   (clpchgroup - 1) * qi.t_bl + sp->t_rdpre);
428 			bw = DIV_ROUND_UP(sp->dclk * clpchgroup * 32 * num_channels, ct);
429 
430 			bi->deratedbw[j] = min(maxdebw,
431 					       bw * (100 - sa->derating) / 100);
432 
433 			drm_dbg_kms(&dev_priv->drm,
434 				    "BW%d / QGV %d: num_planes=%d deratedbw=%u\n",
435 				    i, j, bi->num_planes, bi->deratedbw[j]);
436 		}
437 	}
438 	/*
439 	 * In case if SAGV is disabled in BIOS, we always get 1
440 	 * SAGV point, but we can't send PCode commands to restrict it
441 	 * as it will fail and pointless anyway.
442 	 */
443 	if (qi.num_points == 1)
444 		dev_priv->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
445 	else
446 		dev_priv->display.sagv.status = I915_SAGV_ENABLED;
447 
448 	return 0;
449 }
450 
451 static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa)
452 {
453 	struct intel_qgv_info qi = {};
454 	const struct dram_info *dram_info = &dev_priv->dram_info;
455 	bool is_y_tile = true; /* assume y tile may be used */
456 	int num_channels = max_t(u8, 1, dev_priv->dram_info.num_channels);
457 	int ipqdepth, ipqdepthpch = 16;
458 	int dclk_max;
459 	int maxdebw, peakbw;
460 	int clperchgroup;
461 	int num_groups = ARRAY_SIZE(dev_priv->display.bw.max);
462 	int i, ret;
463 
464 	ret = icl_get_qgv_points(dev_priv, &qi, is_y_tile);
465 	if (ret) {
466 		drm_dbg_kms(&dev_priv->drm,
467 			    "Failed to get memory subsystem information, ignoring bandwidth limits");
468 		return ret;
469 	}
470 
471 	if (DISPLAY_VER(dev_priv) < 14 &&
472 	    (dram_info->type == INTEL_DRAM_LPDDR4 || dram_info->type == INTEL_DRAM_LPDDR5))
473 		num_channels *= 2;
474 
475 	qi.deinterleave = qi.deinterleave ? : DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2);
476 
477 	if (num_channels < qi.max_numchannels && DISPLAY_VER(dev_priv) >= 12)
478 		qi.deinterleave = max(DIV_ROUND_UP(qi.deinterleave, 2), 1);
479 
480 	if (DISPLAY_VER(dev_priv) > 11 && num_channels > qi.max_numchannels)
481 		drm_warn(&dev_priv->drm, "Number of channels exceeds max number of channels.");
482 	if (qi.max_numchannels != 0)
483 		num_channels = min_t(u8, num_channels, qi.max_numchannels);
484 
485 	dclk_max = icl_sagv_max_dclk(&qi);
486 
487 	peakbw = num_channels * DIV_ROUND_UP(qi.channel_width, 8) * dclk_max;
488 	maxdebw = min(sa->deprogbwlimit * 1000, peakbw * 6 / 10); /* 60% */
489 
490 	ipqdepth = min(ipqdepthpch, sa->displayrtids / num_channels);
491 	/*
492 	 * clperchgroup = 4kpagespermempage * clperchperblock,
493 	 * clperchperblock = 8 / num_channels * interleave
494 	 */
495 	clperchgroup = 4 * DIV_ROUND_UP(8, num_channels) * qi.deinterleave;
496 
497 	for (i = 0; i < num_groups; i++) {
498 		struct intel_bw_info *bi = &dev_priv->display.bw.max[i];
499 		struct intel_bw_info *bi_next;
500 		int clpchgroup;
501 		int j;
502 
503 		clpchgroup = (sa->deburst * qi.deinterleave / num_channels) << i;
504 
505 		if (i < num_groups - 1) {
506 			bi_next = &dev_priv->display.bw.max[i + 1];
507 
508 			if (clpchgroup < clperchgroup)
509 				bi_next->num_planes = (ipqdepth - clpchgroup) /
510 						       clpchgroup + 1;
511 			else
512 				bi_next->num_planes = 0;
513 		}
514 
515 		bi->num_qgv_points = qi.num_points;
516 		bi->num_psf_gv_points = qi.num_psf_points;
517 
518 		for (j = 0; j < qi.num_points; j++) {
519 			const struct intel_qgv_point *sp = &qi.points[j];
520 			int ct, bw;
521 
522 			/*
523 			 * Max row cycle time
524 			 *
525 			 * FIXME what is the logic behind the
526 			 * assumed burst length?
527 			 */
528 			ct = max_t(int, sp->t_rc, sp->t_rp + sp->t_rcd +
529 				   (clpchgroup - 1) * qi.t_bl + sp->t_rdpre);
530 			bw = DIV_ROUND_UP(sp->dclk * clpchgroup * 32 * num_channels, ct);
531 
532 			bi->deratedbw[j] = min(maxdebw,
533 					       bw * (100 - sa->derating) / 100);
534 
535 			drm_dbg_kms(&dev_priv->drm,
536 				    "BW%d / QGV %d: num_planes=%d deratedbw=%u\n",
537 				    i, j, bi->num_planes, bi->deratedbw[j]);
538 		}
539 
540 		for (j = 0; j < qi.num_psf_points; j++) {
541 			const struct intel_psf_gv_point *sp = &qi.psf_points[j];
542 
543 			bi->psf_bw[j] = adl_calc_psf_bw(sp->clk);
544 
545 			drm_dbg_kms(&dev_priv->drm,
546 				    "BW%d / PSF GV %d: num_planes=%d bw=%u\n",
547 				    i, j, bi->num_planes, bi->psf_bw[j]);
548 		}
549 	}
550 
551 	/*
552 	 * In case if SAGV is disabled in BIOS, we always get 1
553 	 * SAGV point, but we can't send PCode commands to restrict it
554 	 * as it will fail and pointless anyway.
555 	 */
556 	if (qi.num_points == 1)
557 		dev_priv->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
558 	else
559 		dev_priv->display.sagv.status = I915_SAGV_ENABLED;
560 
561 	return 0;
562 }
563 
564 static void dg2_get_bw_info(struct drm_i915_private *i915)
565 {
566 	unsigned int deratedbw = IS_DG2_G11(i915) ? 38000 : 50000;
567 	int num_groups = ARRAY_SIZE(i915->display.bw.max);
568 	int i;
569 
570 	/*
571 	 * DG2 doesn't have SAGV or QGV points, just a constant max bandwidth
572 	 * that doesn't depend on the number of planes enabled. So fill all the
573 	 * plane group with constant bw information for uniformity with other
574 	 * platforms. DG2-G10 platforms have a constant 50 GB/s bandwidth,
575 	 * whereas DG2-G11 platforms have 38 GB/s.
576 	 */
577 	for (i = 0; i < num_groups; i++) {
578 		struct intel_bw_info *bi = &i915->display.bw.max[i];
579 
580 		bi->num_planes = 1;
581 		/* Need only one dummy QGV point per group */
582 		bi->num_qgv_points = 1;
583 		bi->deratedbw[0] = deratedbw;
584 	}
585 
586 	i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
587 }
588 
589 static unsigned int icl_max_bw(struct drm_i915_private *dev_priv,
590 			       int num_planes, int qgv_point)
591 {
592 	int i;
593 
594 	/*
595 	 * Let's return max bw for 0 planes
596 	 */
597 	num_planes = max(1, num_planes);
598 
599 	for (i = 0; i < ARRAY_SIZE(dev_priv->display.bw.max); i++) {
600 		const struct intel_bw_info *bi =
601 			&dev_priv->display.bw.max[i];
602 
603 		/*
604 		 * Pcode will not expose all QGV points when
605 		 * SAGV is forced to off/min/med/max.
606 		 */
607 		if (qgv_point >= bi->num_qgv_points)
608 			return UINT_MAX;
609 
610 		if (num_planes >= bi->num_planes)
611 			return bi->deratedbw[qgv_point];
612 	}
613 
614 	return 0;
615 }
616 
617 static unsigned int tgl_max_bw(struct drm_i915_private *dev_priv,
618 			       int num_planes, int qgv_point)
619 {
620 	int i;
621 
622 	/*
623 	 * Let's return max bw for 0 planes
624 	 */
625 	num_planes = max(1, num_planes);
626 
627 	for (i = ARRAY_SIZE(dev_priv->display.bw.max) - 1; i >= 0; i--) {
628 		const struct intel_bw_info *bi =
629 			&dev_priv->display.bw.max[i];
630 
631 		/*
632 		 * Pcode will not expose all QGV points when
633 		 * SAGV is forced to off/min/med/max.
634 		 */
635 		if (qgv_point >= bi->num_qgv_points)
636 			return UINT_MAX;
637 
638 		if (num_planes <= bi->num_planes)
639 			return bi->deratedbw[qgv_point];
640 	}
641 
642 	return dev_priv->display.bw.max[0].deratedbw[qgv_point];
643 }
644 
645 static unsigned int adl_psf_bw(struct drm_i915_private *dev_priv,
646 			       int psf_gv_point)
647 {
648 	const struct intel_bw_info *bi =
649 			&dev_priv->display.bw.max[0];
650 
651 	return bi->psf_bw[psf_gv_point];
652 }
653 
654 void intel_bw_init_hw(struct drm_i915_private *dev_priv)
655 {
656 	if (!HAS_DISPLAY(dev_priv))
657 		return;
658 
659 	if (DISPLAY_VER(dev_priv) >= 14)
660 		tgl_get_bw_info(dev_priv, &mtl_sa_info);
661 	else if (IS_DG2(dev_priv))
662 		dg2_get_bw_info(dev_priv);
663 	else if (IS_ALDERLAKE_P(dev_priv))
664 		tgl_get_bw_info(dev_priv, &adlp_sa_info);
665 	else if (IS_ALDERLAKE_S(dev_priv))
666 		tgl_get_bw_info(dev_priv, &adls_sa_info);
667 	else if (IS_ROCKETLAKE(dev_priv))
668 		tgl_get_bw_info(dev_priv, &rkl_sa_info);
669 	else if (DISPLAY_VER(dev_priv) == 12)
670 		tgl_get_bw_info(dev_priv, &tgl_sa_info);
671 	else if (DISPLAY_VER(dev_priv) == 11)
672 		icl_get_bw_info(dev_priv, &icl_sa_info);
673 }
674 
675 static unsigned int intel_bw_crtc_num_active_planes(const struct intel_crtc_state *crtc_state)
676 {
677 	/*
678 	 * We assume cursors are small enough
679 	 * to not not cause bandwidth problems.
680 	 */
681 	return hweight8(crtc_state->active_planes & ~BIT(PLANE_CURSOR));
682 }
683 
684 static unsigned int intel_bw_crtc_data_rate(const struct intel_crtc_state *crtc_state)
685 {
686 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
687 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
688 	unsigned int data_rate = 0;
689 	enum plane_id plane_id;
690 
691 	for_each_plane_id_on_crtc(crtc, plane_id) {
692 		/*
693 		 * We assume cursors are small enough
694 		 * to not not cause bandwidth problems.
695 		 */
696 		if (plane_id == PLANE_CURSOR)
697 			continue;
698 
699 		data_rate += crtc_state->data_rate[plane_id];
700 
701 		if (DISPLAY_VER(i915) < 11)
702 			data_rate += crtc_state->data_rate_y[plane_id];
703 	}
704 
705 	return data_rate;
706 }
707 
708 /* "Maximum Pipe Read Bandwidth" */
709 static int intel_bw_crtc_min_cdclk(const struct intel_crtc_state *crtc_state)
710 {
711 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
712 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
713 
714 	if (DISPLAY_VER(i915) < 12)
715 		return 0;
716 
717 	return DIV_ROUND_UP_ULL(mul_u32_u32(intel_bw_crtc_data_rate(crtc_state), 10), 512);
718 }
719 
720 void intel_bw_crtc_update(struct intel_bw_state *bw_state,
721 			  const struct intel_crtc_state *crtc_state)
722 {
723 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
724 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
725 
726 	bw_state->data_rate[crtc->pipe] =
727 		intel_bw_crtc_data_rate(crtc_state);
728 	bw_state->num_active_planes[crtc->pipe] =
729 		intel_bw_crtc_num_active_planes(crtc_state);
730 
731 	drm_dbg_kms(&i915->drm, "pipe %c data rate %u num active planes %u\n",
732 		    pipe_name(crtc->pipe),
733 		    bw_state->data_rate[crtc->pipe],
734 		    bw_state->num_active_planes[crtc->pipe]);
735 }
736 
737 static unsigned int intel_bw_num_active_planes(struct drm_i915_private *dev_priv,
738 					       const struct intel_bw_state *bw_state)
739 {
740 	unsigned int num_active_planes = 0;
741 	enum pipe pipe;
742 
743 	for_each_pipe(dev_priv, pipe)
744 		num_active_planes += bw_state->num_active_planes[pipe];
745 
746 	return num_active_planes;
747 }
748 
749 static unsigned int intel_bw_data_rate(struct drm_i915_private *dev_priv,
750 				       const struct intel_bw_state *bw_state)
751 {
752 	unsigned int data_rate = 0;
753 	enum pipe pipe;
754 
755 	for_each_pipe(dev_priv, pipe)
756 		data_rate += bw_state->data_rate[pipe];
757 
758 	if (DISPLAY_VER(dev_priv) >= 13 && i915_vtd_active(dev_priv))
759 		data_rate = DIV_ROUND_UP(data_rate * 105, 100);
760 
761 	return data_rate;
762 }
763 
764 struct intel_bw_state *
765 intel_atomic_get_old_bw_state(struct intel_atomic_state *state)
766 {
767 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
768 	struct intel_global_state *bw_state;
769 
770 	bw_state = intel_atomic_get_old_global_obj_state(state, &dev_priv->display.bw.obj);
771 
772 	return to_intel_bw_state(bw_state);
773 }
774 
775 struct intel_bw_state *
776 intel_atomic_get_new_bw_state(struct intel_atomic_state *state)
777 {
778 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
779 	struct intel_global_state *bw_state;
780 
781 	bw_state = intel_atomic_get_new_global_obj_state(state, &dev_priv->display.bw.obj);
782 
783 	return to_intel_bw_state(bw_state);
784 }
785 
786 struct intel_bw_state *
787 intel_atomic_get_bw_state(struct intel_atomic_state *state)
788 {
789 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
790 	struct intel_global_state *bw_state;
791 
792 	bw_state = intel_atomic_get_global_obj_state(state, &dev_priv->display.bw.obj);
793 	if (IS_ERR(bw_state))
794 		return ERR_CAST(bw_state);
795 
796 	return to_intel_bw_state(bw_state);
797 }
798 
799 static bool intel_bw_state_changed(struct drm_i915_private *i915,
800 				   const struct intel_bw_state *old_bw_state,
801 				   const struct intel_bw_state *new_bw_state)
802 {
803 	enum pipe pipe;
804 
805 	for_each_pipe(i915, pipe) {
806 		const struct intel_dbuf_bw *old_crtc_bw =
807 			&old_bw_state->dbuf_bw[pipe];
808 		const struct intel_dbuf_bw *new_crtc_bw =
809 			&new_bw_state->dbuf_bw[pipe];
810 		enum dbuf_slice slice;
811 
812 		for_each_dbuf_slice(i915, slice) {
813 			if (old_crtc_bw->max_bw[slice] != new_crtc_bw->max_bw[slice] ||
814 			    old_crtc_bw->active_planes[slice] != new_crtc_bw->active_planes[slice])
815 				return true;
816 		}
817 
818 		if (old_bw_state->min_cdclk[pipe] != new_bw_state->min_cdclk[pipe])
819 			return true;
820 	}
821 
822 	return false;
823 }
824 
825 static void skl_plane_calc_dbuf_bw(struct intel_bw_state *bw_state,
826 				   struct intel_crtc *crtc,
827 				   enum plane_id plane_id,
828 				   const struct skl_ddb_entry *ddb,
829 				   unsigned int data_rate)
830 {
831 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
832 	struct intel_dbuf_bw *crtc_bw = &bw_state->dbuf_bw[crtc->pipe];
833 	unsigned int dbuf_mask = skl_ddb_dbuf_slice_mask(i915, ddb);
834 	enum dbuf_slice slice;
835 
836 	/*
837 	 * The arbiter can only really guarantee an
838 	 * equal share of the total bw to each plane.
839 	 */
840 	for_each_dbuf_slice_in_mask(i915, slice, dbuf_mask) {
841 		crtc_bw->max_bw[slice] = max(crtc_bw->max_bw[slice], data_rate);
842 		crtc_bw->active_planes[slice] |= BIT(plane_id);
843 	}
844 }
845 
846 static void skl_crtc_calc_dbuf_bw(struct intel_bw_state *bw_state,
847 				  const struct intel_crtc_state *crtc_state)
848 {
849 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
850 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
851 	struct intel_dbuf_bw *crtc_bw = &bw_state->dbuf_bw[crtc->pipe];
852 	enum plane_id plane_id;
853 
854 	memset(crtc_bw, 0, sizeof(*crtc_bw));
855 
856 	if (!crtc_state->hw.active)
857 		return;
858 
859 	for_each_plane_id_on_crtc(crtc, plane_id) {
860 		/*
861 		 * We assume cursors are small enough
862 		 * to not cause bandwidth problems.
863 		 */
864 		if (plane_id == PLANE_CURSOR)
865 			continue;
866 
867 		skl_plane_calc_dbuf_bw(bw_state, crtc, plane_id,
868 				       &crtc_state->wm.skl.plane_ddb[plane_id],
869 				       crtc_state->data_rate[plane_id]);
870 
871 		if (DISPLAY_VER(i915) < 11)
872 			skl_plane_calc_dbuf_bw(bw_state, crtc, plane_id,
873 					       &crtc_state->wm.skl.plane_ddb_y[plane_id],
874 					       crtc_state->data_rate[plane_id]);
875 	}
876 }
877 
878 /* "Maximum Data Buffer Bandwidth" */
879 static int
880 intel_bw_dbuf_min_cdclk(struct drm_i915_private *i915,
881 			const struct intel_bw_state *bw_state)
882 {
883 	unsigned int total_max_bw = 0;
884 	enum dbuf_slice slice;
885 
886 	for_each_dbuf_slice(i915, slice) {
887 		int num_active_planes = 0;
888 		unsigned int max_bw = 0;
889 		enum pipe pipe;
890 
891 		/*
892 		 * The arbiter can only really guarantee an
893 		 * equal share of the total bw to each plane.
894 		 */
895 		for_each_pipe(i915, pipe) {
896 			const struct intel_dbuf_bw *crtc_bw = &bw_state->dbuf_bw[pipe];
897 
898 			max_bw = max(crtc_bw->max_bw[slice], max_bw);
899 			num_active_planes += hweight8(crtc_bw->active_planes[slice]);
900 		}
901 		max_bw *= num_active_planes;
902 
903 		total_max_bw = max(total_max_bw, max_bw);
904 	}
905 
906 	return DIV_ROUND_UP(total_max_bw, 64);
907 }
908 
909 int intel_bw_min_cdclk(struct drm_i915_private *i915,
910 		       const struct intel_bw_state *bw_state)
911 {
912 	enum pipe pipe;
913 	int min_cdclk;
914 
915 	min_cdclk = intel_bw_dbuf_min_cdclk(i915, bw_state);
916 
917 	for_each_pipe(i915, pipe)
918 		min_cdclk = max(bw_state->min_cdclk[pipe], min_cdclk);
919 
920 	return min_cdclk;
921 }
922 
923 int intel_bw_calc_min_cdclk(struct intel_atomic_state *state,
924 			    bool *need_cdclk_calc)
925 {
926 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
927 	struct intel_bw_state *new_bw_state = NULL;
928 	const struct intel_bw_state *old_bw_state = NULL;
929 	const struct intel_cdclk_state *cdclk_state;
930 	const struct intel_crtc_state *crtc_state;
931 	int old_min_cdclk, new_min_cdclk;
932 	struct intel_crtc *crtc;
933 	int i;
934 
935 	if (DISPLAY_VER(dev_priv) < 9)
936 		return 0;
937 
938 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
939 		new_bw_state = intel_atomic_get_bw_state(state);
940 		if (IS_ERR(new_bw_state))
941 			return PTR_ERR(new_bw_state);
942 
943 		old_bw_state = intel_atomic_get_old_bw_state(state);
944 
945 		skl_crtc_calc_dbuf_bw(new_bw_state, crtc_state);
946 
947 		new_bw_state->min_cdclk[crtc->pipe] =
948 			intel_bw_crtc_min_cdclk(crtc_state);
949 	}
950 
951 	if (!old_bw_state)
952 		return 0;
953 
954 	if (intel_bw_state_changed(dev_priv, old_bw_state, new_bw_state)) {
955 		int ret = intel_atomic_lock_global_state(&new_bw_state->base);
956 		if (ret)
957 			return ret;
958 	}
959 
960 	old_min_cdclk = intel_bw_min_cdclk(dev_priv, old_bw_state);
961 	new_min_cdclk = intel_bw_min_cdclk(dev_priv, new_bw_state);
962 
963 	/*
964 	 * No need to check against the cdclk state if
965 	 * the min cdclk doesn't increase.
966 	 *
967 	 * Ie. we only ever increase the cdclk due to bandwidth
968 	 * requirements. This can reduce back and forth
969 	 * display blinking due to constant cdclk changes.
970 	 */
971 	if (new_min_cdclk <= old_min_cdclk)
972 		return 0;
973 
974 	cdclk_state = intel_atomic_get_cdclk_state(state);
975 	if (IS_ERR(cdclk_state))
976 		return PTR_ERR(cdclk_state);
977 
978 	/*
979 	 * No need to recalculate the cdclk state if
980 	 * the min cdclk doesn't increase.
981 	 *
982 	 * Ie. we only ever increase the cdclk due to bandwidth
983 	 * requirements. This can reduce back and forth
984 	 * display blinking due to constant cdclk changes.
985 	 */
986 	if (new_min_cdclk <= cdclk_state->bw_min_cdclk)
987 		return 0;
988 
989 	drm_dbg_kms(&dev_priv->drm,
990 		    "new bandwidth min cdclk (%d kHz) > old min cdclk (%d kHz)\n",
991 		    new_min_cdclk, cdclk_state->bw_min_cdclk);
992 	*need_cdclk_calc = true;
993 
994 	return 0;
995 }
996 
997 static int intel_bw_check_data_rate(struct intel_atomic_state *state, bool *changed)
998 {
999 	struct drm_i915_private *i915 = to_i915(state->base.dev);
1000 	const struct intel_crtc_state *new_crtc_state, *old_crtc_state;
1001 	struct intel_crtc *crtc;
1002 	int i;
1003 
1004 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
1005 					    new_crtc_state, i) {
1006 		unsigned int old_data_rate =
1007 			intel_bw_crtc_data_rate(old_crtc_state);
1008 		unsigned int new_data_rate =
1009 			intel_bw_crtc_data_rate(new_crtc_state);
1010 		unsigned int old_active_planes =
1011 			intel_bw_crtc_num_active_planes(old_crtc_state);
1012 		unsigned int new_active_planes =
1013 			intel_bw_crtc_num_active_planes(new_crtc_state);
1014 		struct intel_bw_state *new_bw_state;
1015 
1016 		/*
1017 		 * Avoid locking the bw state when
1018 		 * nothing significant has changed.
1019 		 */
1020 		if (old_data_rate == new_data_rate &&
1021 		    old_active_planes == new_active_planes)
1022 			continue;
1023 
1024 		new_bw_state = intel_atomic_get_bw_state(state);
1025 		if (IS_ERR(new_bw_state))
1026 			return PTR_ERR(new_bw_state);
1027 
1028 		new_bw_state->data_rate[crtc->pipe] = new_data_rate;
1029 		new_bw_state->num_active_planes[crtc->pipe] = new_active_planes;
1030 
1031 		*changed = true;
1032 
1033 		drm_dbg_kms(&i915->drm,
1034 			    "[CRTC:%d:%s] data rate %u num active planes %u\n",
1035 			    crtc->base.base.id, crtc->base.name,
1036 			    new_bw_state->data_rate[crtc->pipe],
1037 			    new_bw_state->num_active_planes[crtc->pipe]);
1038 	}
1039 
1040 	return 0;
1041 }
1042 
1043 int intel_bw_atomic_check(struct intel_atomic_state *state)
1044 {
1045 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1046 	const struct intel_bw_state *old_bw_state;
1047 	struct intel_bw_state *new_bw_state;
1048 	unsigned int data_rate;
1049 	unsigned int num_active_planes;
1050 	int i, ret;
1051 	u16 qgv_points = 0, psf_points = 0;
1052 	unsigned int max_bw_point = 0, max_bw = 0;
1053 	unsigned int num_qgv_points = dev_priv->display.bw.max[0].num_qgv_points;
1054 	unsigned int num_psf_gv_points = dev_priv->display.bw.max[0].num_psf_gv_points;
1055 	bool changed = false;
1056 
1057 	/* FIXME earlier gens need some checks too */
1058 	if (DISPLAY_VER(dev_priv) < 11)
1059 		return 0;
1060 
1061 	ret = intel_bw_check_data_rate(state, &changed);
1062 	if (ret)
1063 		return ret;
1064 
1065 	old_bw_state = intel_atomic_get_old_bw_state(state);
1066 	new_bw_state = intel_atomic_get_new_bw_state(state);
1067 
1068 	if (new_bw_state &&
1069 	    intel_can_enable_sagv(dev_priv, old_bw_state) !=
1070 	    intel_can_enable_sagv(dev_priv, new_bw_state))
1071 		changed = true;
1072 
1073 	/*
1074 	 * If none of our inputs (data rates, number of active
1075 	 * planes, SAGV yes/no) changed then nothing to do here.
1076 	 */
1077 	if (!changed)
1078 		return 0;
1079 
1080 	ret = intel_atomic_lock_global_state(&new_bw_state->base);
1081 	if (ret)
1082 		return ret;
1083 
1084 	data_rate = intel_bw_data_rate(dev_priv, new_bw_state);
1085 	data_rate = DIV_ROUND_UP(data_rate, 1000);
1086 
1087 	num_active_planes = intel_bw_num_active_planes(dev_priv, new_bw_state);
1088 
1089 	for (i = 0; i < num_qgv_points; i++) {
1090 		unsigned int max_data_rate;
1091 
1092 		if (DISPLAY_VER(dev_priv) > 11)
1093 			max_data_rate = tgl_max_bw(dev_priv, num_active_planes, i);
1094 		else
1095 			max_data_rate = icl_max_bw(dev_priv, num_active_planes, i);
1096 		/*
1097 		 * We need to know which qgv point gives us
1098 		 * maximum bandwidth in order to disable SAGV
1099 		 * if we find that we exceed SAGV block time
1100 		 * with watermarks. By that moment we already
1101 		 * have those, as it is calculated earlier in
1102 		 * intel_atomic_check,
1103 		 */
1104 		if (max_data_rate > max_bw) {
1105 			max_bw_point = i;
1106 			max_bw = max_data_rate;
1107 		}
1108 		if (max_data_rate >= data_rate)
1109 			qgv_points |= BIT(i);
1110 
1111 		drm_dbg_kms(&dev_priv->drm, "QGV point %d: max bw %d required %d\n",
1112 			    i, max_data_rate, data_rate);
1113 	}
1114 
1115 	for (i = 0; i < num_psf_gv_points; i++) {
1116 		unsigned int max_data_rate = adl_psf_bw(dev_priv, i);
1117 
1118 		if (max_data_rate >= data_rate)
1119 			psf_points |= BIT(i);
1120 
1121 		drm_dbg_kms(&dev_priv->drm, "PSF GV point %d: max bw %d"
1122 			    " required %d\n",
1123 			    i, max_data_rate, data_rate);
1124 	}
1125 
1126 	/*
1127 	 * BSpec states that we always should have at least one allowed point
1128 	 * left, so if we couldn't - simply reject the configuration for obvious
1129 	 * reasons.
1130 	 */
1131 	if (qgv_points == 0) {
1132 		drm_dbg_kms(&dev_priv->drm, "No QGV points provide sufficient memory"
1133 			    " bandwidth %d for display configuration(%d active planes).\n",
1134 			    data_rate, num_active_planes);
1135 		return -EINVAL;
1136 	}
1137 
1138 	if (num_psf_gv_points > 0 && psf_points == 0) {
1139 		drm_dbg_kms(&dev_priv->drm, "No PSF GV points provide sufficient memory"
1140 			    " bandwidth %d for display configuration(%d active planes).\n",
1141 			    data_rate, num_active_planes);
1142 		return -EINVAL;
1143 	}
1144 
1145 	/*
1146 	 * Leave only single point with highest bandwidth, if
1147 	 * we can't enable SAGV due to the increased memory latency it may
1148 	 * cause.
1149 	 */
1150 	if (!intel_can_enable_sagv(dev_priv, new_bw_state)) {
1151 		qgv_points = BIT(max_bw_point);
1152 		drm_dbg_kms(&dev_priv->drm, "No SAGV, using single QGV point %d\n",
1153 			    max_bw_point);
1154 	}
1155 
1156 	/*
1157 	 * We store the ones which need to be masked as that is what PCode
1158 	 * actually accepts as a parameter.
1159 	 */
1160 	new_bw_state->qgv_points_mask =
1161 		~(ICL_PCODE_REQ_QGV_PT(qgv_points) |
1162 		  ADLS_PCODE_REQ_PSF_PT(psf_points)) &
1163 		icl_qgv_points_mask(dev_priv);
1164 
1165 	/*
1166 	 * If the actual mask had changed we need to make sure that
1167 	 * the commits are serialized(in case this is a nomodeset, nonblocking)
1168 	 */
1169 	if (new_bw_state->qgv_points_mask != old_bw_state->qgv_points_mask) {
1170 		ret = intel_atomic_serialize_global_state(&new_bw_state->base);
1171 		if (ret)
1172 			return ret;
1173 	}
1174 
1175 	return 0;
1176 }
1177 
1178 static struct intel_global_state *
1179 intel_bw_duplicate_state(struct intel_global_obj *obj)
1180 {
1181 	struct intel_bw_state *state;
1182 
1183 	state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
1184 	if (!state)
1185 		return NULL;
1186 
1187 	return &state->base;
1188 }
1189 
1190 static void intel_bw_destroy_state(struct intel_global_obj *obj,
1191 				   struct intel_global_state *state)
1192 {
1193 	kfree(state);
1194 }
1195 
1196 static const struct intel_global_state_funcs intel_bw_funcs = {
1197 	.atomic_duplicate_state = intel_bw_duplicate_state,
1198 	.atomic_destroy_state = intel_bw_destroy_state,
1199 };
1200 
1201 int intel_bw_init(struct drm_i915_private *dev_priv)
1202 {
1203 	struct intel_bw_state *state;
1204 
1205 	state = kzalloc(sizeof(*state), GFP_KERNEL);
1206 	if (!state)
1207 		return -ENOMEM;
1208 
1209 	intel_atomic_global_obj_init(dev_priv, &dev_priv->display.bw.obj,
1210 				     &state->base, &intel_bw_funcs);
1211 
1212 	return 0;
1213 }
1214