1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include <linux/string_helpers.h>
26 
27 #include <drm/drm_print.h>
28 #include <drm/i915_pciids.h>
29 
30 #include "display/intel_cdclk.h"
31 #include "display/intel_de.h"
32 #include "display/intel_display.h"
33 #include "gt/intel_gt_regs.h"
34 #include "i915_drv.h"
35 #include "i915_reg.h"
36 #include "i915_utils.h"
37 #include "intel_device_info.h"
38 
39 #define PLATFORM_NAME(x) [INTEL_##x] = #x
40 static const char * const platform_names[] = {
41 	PLATFORM_NAME(I830),
42 	PLATFORM_NAME(I845G),
43 	PLATFORM_NAME(I85X),
44 	PLATFORM_NAME(I865G),
45 	PLATFORM_NAME(I915G),
46 	PLATFORM_NAME(I915GM),
47 	PLATFORM_NAME(I945G),
48 	PLATFORM_NAME(I945GM),
49 	PLATFORM_NAME(G33),
50 	PLATFORM_NAME(PINEVIEW),
51 	PLATFORM_NAME(I965G),
52 	PLATFORM_NAME(I965GM),
53 	PLATFORM_NAME(G45),
54 	PLATFORM_NAME(GM45),
55 	PLATFORM_NAME(IRONLAKE),
56 	PLATFORM_NAME(SANDYBRIDGE),
57 	PLATFORM_NAME(IVYBRIDGE),
58 	PLATFORM_NAME(VALLEYVIEW),
59 	PLATFORM_NAME(HASWELL),
60 	PLATFORM_NAME(BROADWELL),
61 	PLATFORM_NAME(CHERRYVIEW),
62 	PLATFORM_NAME(SKYLAKE),
63 	PLATFORM_NAME(BROXTON),
64 	PLATFORM_NAME(KABYLAKE),
65 	PLATFORM_NAME(GEMINILAKE),
66 	PLATFORM_NAME(COFFEELAKE),
67 	PLATFORM_NAME(COMETLAKE),
68 	PLATFORM_NAME(ICELAKE),
69 	PLATFORM_NAME(ELKHARTLAKE),
70 	PLATFORM_NAME(JASPERLAKE),
71 	PLATFORM_NAME(TIGERLAKE),
72 	PLATFORM_NAME(ROCKETLAKE),
73 	PLATFORM_NAME(DG1),
74 	PLATFORM_NAME(ALDERLAKE_S),
75 	PLATFORM_NAME(ALDERLAKE_P),
76 	PLATFORM_NAME(XEHPSDV),
77 	PLATFORM_NAME(DG2),
78 	PLATFORM_NAME(PONTEVECCHIO),
79 	PLATFORM_NAME(METEORLAKE),
80 };
81 #undef PLATFORM_NAME
82 
83 const char *intel_platform_name(enum intel_platform platform)
84 {
85 	BUILD_BUG_ON(ARRAY_SIZE(platform_names) != INTEL_MAX_PLATFORMS);
86 
87 	if (WARN_ON_ONCE(platform >= ARRAY_SIZE(platform_names) ||
88 			 platform_names[platform] == NULL))
89 		return "<unknown>";
90 
91 	return platform_names[platform];
92 }
93 
94 void intel_device_info_print(const struct intel_device_info *info,
95 			     const struct intel_runtime_info *runtime,
96 			     struct drm_printer *p)
97 {
98 	if (runtime->graphics.ip.rel)
99 		drm_printf(p, "graphics version: %u.%02u\n",
100 			   runtime->graphics.ip.ver,
101 			   runtime->graphics.ip.rel);
102 	else
103 		drm_printf(p, "graphics version: %u\n",
104 			   runtime->graphics.ip.ver);
105 
106 	if (runtime->media.ip.rel)
107 		drm_printf(p, "media version: %u.%02u\n",
108 			   runtime->media.ip.ver,
109 			   runtime->media.ip.rel);
110 	else
111 		drm_printf(p, "media version: %u\n",
112 			   runtime->media.ip.ver);
113 
114 	if (runtime->display.ip.rel)
115 		drm_printf(p, "display version: %u.%02u\n",
116 			   runtime->display.ip.ver,
117 			   runtime->display.ip.rel);
118 	else
119 		drm_printf(p, "display version: %u\n",
120 			   runtime->display.ip.ver);
121 
122 	drm_printf(p, "graphics stepping: %s\n", intel_step_name(runtime->step.graphics_step));
123 	drm_printf(p, "media stepping: %s\n", intel_step_name(runtime->step.media_step));
124 	drm_printf(p, "display stepping: %s\n", intel_step_name(runtime->step.display_step));
125 	drm_printf(p, "base die stepping: %s\n", intel_step_name(runtime->step.basedie_step));
126 
127 	drm_printf(p, "gt: %d\n", info->gt);
128 	drm_printf(p, "memory-regions: 0x%x\n", runtime->memory_regions);
129 	drm_printf(p, "page-sizes: 0x%x\n", runtime->page_sizes);
130 	drm_printf(p, "platform: %s\n", intel_platform_name(info->platform));
131 	drm_printf(p, "ppgtt-size: %d\n", runtime->ppgtt_size);
132 	drm_printf(p, "ppgtt-type: %d\n", runtime->ppgtt_type);
133 	drm_printf(p, "dma_mask_size: %u\n", info->dma_mask_size);
134 
135 #define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, str_yes_no(info->name))
136 	DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
137 #undef PRINT_FLAG
138 
139 	drm_printf(p, "has_pooled_eu: %s\n", str_yes_no(runtime->has_pooled_eu));
140 
141 #define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, str_yes_no(info->display.name))
142 	DEV_INFO_DISPLAY_FOR_EACH_FLAG(PRINT_FLAG);
143 #undef PRINT_FLAG
144 
145 	drm_printf(p, "has_hdcp: %s\n", str_yes_no(runtime->has_hdcp));
146 	drm_printf(p, "has_dmc: %s\n", str_yes_no(runtime->has_dmc));
147 	drm_printf(p, "has_dsc: %s\n", str_yes_no(runtime->has_dsc));
148 
149 	drm_printf(p, "rawclk rate: %u kHz\n", runtime->rawclk_freq);
150 }
151 
152 #undef INTEL_VGA_DEVICE
153 #define INTEL_VGA_DEVICE(id, info) (id)
154 
155 static const u16 subplatform_ult_ids[] = {
156 	INTEL_HSW_ULT_GT1_IDS(0),
157 	INTEL_HSW_ULT_GT2_IDS(0),
158 	INTEL_HSW_ULT_GT3_IDS(0),
159 	INTEL_BDW_ULT_GT1_IDS(0),
160 	INTEL_BDW_ULT_GT2_IDS(0),
161 	INTEL_BDW_ULT_GT3_IDS(0),
162 	INTEL_BDW_ULT_RSVD_IDS(0),
163 	INTEL_SKL_ULT_GT1_IDS(0),
164 	INTEL_SKL_ULT_GT2_IDS(0),
165 	INTEL_SKL_ULT_GT3_IDS(0),
166 	INTEL_KBL_ULT_GT1_IDS(0),
167 	INTEL_KBL_ULT_GT2_IDS(0),
168 	INTEL_KBL_ULT_GT3_IDS(0),
169 	INTEL_CFL_U_GT2_IDS(0),
170 	INTEL_CFL_U_GT3_IDS(0),
171 	INTEL_WHL_U_GT1_IDS(0),
172 	INTEL_WHL_U_GT2_IDS(0),
173 	INTEL_WHL_U_GT3_IDS(0),
174 	INTEL_CML_U_GT1_IDS(0),
175 	INTEL_CML_U_GT2_IDS(0),
176 };
177 
178 static const u16 subplatform_ulx_ids[] = {
179 	INTEL_HSW_ULX_GT1_IDS(0),
180 	INTEL_HSW_ULX_GT2_IDS(0),
181 	INTEL_BDW_ULX_GT1_IDS(0),
182 	INTEL_BDW_ULX_GT2_IDS(0),
183 	INTEL_BDW_ULX_GT3_IDS(0),
184 	INTEL_BDW_ULX_RSVD_IDS(0),
185 	INTEL_SKL_ULX_GT1_IDS(0),
186 	INTEL_SKL_ULX_GT2_IDS(0),
187 	INTEL_KBL_ULX_GT1_IDS(0),
188 	INTEL_KBL_ULX_GT2_IDS(0),
189 	INTEL_AML_KBL_GT2_IDS(0),
190 	INTEL_AML_CFL_GT2_IDS(0),
191 };
192 
193 static const u16 subplatform_portf_ids[] = {
194 	INTEL_ICL_PORT_F_IDS(0),
195 };
196 
197 static const u16 subplatform_uy_ids[] = {
198 	INTEL_TGL_12_GT2_IDS(0),
199 };
200 
201 static const u16 subplatform_n_ids[] = {
202 	INTEL_ADLN_IDS(0),
203 };
204 
205 static const u16 subplatform_rpl_ids[] = {
206 	INTEL_RPLS_IDS(0),
207 	INTEL_RPLP_IDS(0),
208 };
209 
210 static const u16 subplatform_rplu_ids[] = {
211 	INTEL_RPLU_IDS(0),
212 };
213 
214 static const u16 subplatform_g10_ids[] = {
215 	INTEL_DG2_G10_IDS(0),
216 	INTEL_ATS_M150_IDS(0),
217 };
218 
219 static const u16 subplatform_g11_ids[] = {
220 	INTEL_DG2_G11_IDS(0),
221 	INTEL_ATS_M75_IDS(0),
222 };
223 
224 static const u16 subplatform_g12_ids[] = {
225 	INTEL_DG2_G12_IDS(0),
226 };
227 
228 static const u16 subplatform_m_ids[] = {
229 	INTEL_MTL_M_IDS(0),
230 };
231 
232 static const u16 subplatform_p_ids[] = {
233 	INTEL_MTL_P_IDS(0),
234 };
235 
236 static bool find_devid(u16 id, const u16 *p, unsigned int num)
237 {
238 	for (; num; num--, p++) {
239 		if (*p == id)
240 			return true;
241 	}
242 
243 	return false;
244 }
245 
246 static void intel_device_info_subplatform_init(struct drm_i915_private *i915)
247 {
248 	const struct intel_device_info *info = INTEL_INFO(i915);
249 	const struct intel_runtime_info *rinfo = RUNTIME_INFO(i915);
250 	const unsigned int pi = __platform_mask_index(rinfo, info->platform);
251 	const unsigned int pb = __platform_mask_bit(rinfo, info->platform);
252 	u16 devid = INTEL_DEVID(i915);
253 	u32 mask = 0;
254 
255 	/* Make sure IS_<platform> checks are working. */
256 	RUNTIME_INFO(i915)->platform_mask[pi] = BIT(pb);
257 
258 	/* Find and mark subplatform bits based on the PCI device id. */
259 	if (find_devid(devid, subplatform_ult_ids,
260 		       ARRAY_SIZE(subplatform_ult_ids))) {
261 		mask = BIT(INTEL_SUBPLATFORM_ULT);
262 	} else if (find_devid(devid, subplatform_ulx_ids,
263 			      ARRAY_SIZE(subplatform_ulx_ids))) {
264 		mask = BIT(INTEL_SUBPLATFORM_ULX);
265 		if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
266 			/* ULX machines are also considered ULT. */
267 			mask |= BIT(INTEL_SUBPLATFORM_ULT);
268 		}
269 	} else if (find_devid(devid, subplatform_portf_ids,
270 			      ARRAY_SIZE(subplatform_portf_ids))) {
271 		mask = BIT(INTEL_SUBPLATFORM_PORTF);
272 	} else if (find_devid(devid, subplatform_uy_ids,
273 			   ARRAY_SIZE(subplatform_uy_ids))) {
274 		mask = BIT(INTEL_SUBPLATFORM_UY);
275 	} else if (find_devid(devid, subplatform_n_ids,
276 				ARRAY_SIZE(subplatform_n_ids))) {
277 		mask = BIT(INTEL_SUBPLATFORM_N);
278 	} else if (find_devid(devid, subplatform_rpl_ids,
279 			      ARRAY_SIZE(subplatform_rpl_ids))) {
280 		mask = BIT(INTEL_SUBPLATFORM_RPL);
281 		if (find_devid(devid, subplatform_rplu_ids,
282 			       ARRAY_SIZE(subplatform_rplu_ids)))
283 			mask |= BIT(INTEL_SUBPLATFORM_RPLU);
284 	} else if (find_devid(devid, subplatform_g10_ids,
285 			      ARRAY_SIZE(subplatform_g10_ids))) {
286 		mask = BIT(INTEL_SUBPLATFORM_G10);
287 	} else if (find_devid(devid, subplatform_g11_ids,
288 			      ARRAY_SIZE(subplatform_g11_ids))) {
289 		mask = BIT(INTEL_SUBPLATFORM_G11);
290 	} else if (find_devid(devid, subplatform_g12_ids,
291 			      ARRAY_SIZE(subplatform_g12_ids))) {
292 		mask = BIT(INTEL_SUBPLATFORM_G12);
293 	} else if (find_devid(devid, subplatform_m_ids,
294 			      ARRAY_SIZE(subplatform_m_ids))) {
295 		mask = BIT(INTEL_SUBPLATFORM_M);
296 	} else if (find_devid(devid, subplatform_p_ids,
297 			      ARRAY_SIZE(subplatform_p_ids))) {
298 		mask = BIT(INTEL_SUBPLATFORM_P);
299 	}
300 
301 	GEM_BUG_ON(mask & ~INTEL_SUBPLATFORM_MASK);
302 
303 	RUNTIME_INFO(i915)->platform_mask[pi] |= mask;
304 }
305 
306 static void ip_ver_read(struct drm_i915_private *i915, u32 offset, struct intel_ip_version *ip)
307 {
308 	struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
309 	void __iomem *addr;
310 	u32 val;
311 	u8 expected_ver = ip->ver;
312 	u8 expected_rel = ip->rel;
313 
314 	addr = pci_iomap_range(pdev, 0, offset, sizeof(u32));
315 	if (drm_WARN_ON(&i915->drm, !addr))
316 		return;
317 
318 	val = ioread32(addr);
319 	pci_iounmap(pdev, addr);
320 
321 	ip->ver = REG_FIELD_GET(GMD_ID_ARCH_MASK, val);
322 	ip->rel = REG_FIELD_GET(GMD_ID_RELEASE_MASK, val);
323 	ip->step = REG_FIELD_GET(GMD_ID_STEP, val);
324 
325 	/* Sanity check against expected versions from device info */
326 	if (IP_VER(ip->ver, ip->rel) < IP_VER(expected_ver, expected_rel))
327 		drm_dbg(&i915->drm,
328 			"Hardware reports GMD IP version %u.%u (REG[0x%x] = 0x%08x) but minimum expected is %u.%u\n",
329 			ip->ver, ip->rel, offset, val, expected_ver, expected_rel);
330 }
331 
332 /*
333  * Setup the graphics version for the current device.  This must be done before
334  * any code that performs checks on GRAPHICS_VER or DISPLAY_VER, so this
335  * function should be called very early in the driver initialization sequence.
336  *
337  * Regular MMIO access is not yet setup at the point this function is called so
338  * we peek at the appropriate MMIO offset directly.  The GMD_ID register is
339  * part of an 'always on' power well by design, so we don't need to worry about
340  * forcewake while reading it.
341  */
342 static void intel_ipver_early_init(struct drm_i915_private *i915)
343 {
344 	struct intel_runtime_info *runtime = RUNTIME_INFO(i915);
345 
346 	if (!HAS_GMD_ID(i915)) {
347 		drm_WARN_ON(&i915->drm, RUNTIME_INFO(i915)->graphics.ip.ver > 12);
348 		/*
349 		 * On older platforms, graphics and media share the same ip
350 		 * version and release.
351 		 */
352 		RUNTIME_INFO(i915)->media.ip =
353 			RUNTIME_INFO(i915)->graphics.ip;
354 		return;
355 	}
356 
357 	ip_ver_read(i915, i915_mmio_reg_offset(GMD_ID_GRAPHICS),
358 		    &runtime->graphics.ip);
359 	/* Wa_22012778468 */
360 	if (runtime->graphics.ip.ver == 0x0 &&
361 	    INTEL_INFO(i915)->platform == INTEL_METEORLAKE) {
362 		RUNTIME_INFO(i915)->graphics.ip.ver = 12;
363 		RUNTIME_INFO(i915)->graphics.ip.rel = 70;
364 	}
365 	ip_ver_read(i915, i915_mmio_reg_offset(GMD_ID_DISPLAY),
366 		    &runtime->display.ip);
367 	ip_ver_read(i915, i915_mmio_reg_offset(GMD_ID_MEDIA),
368 		    &runtime->media.ip);
369 }
370 
371 /**
372  * intel_device_info_runtime_init_early - initialize early runtime info
373  * @i915: the i915 device
374  *
375  * Determine early intel_device_info fields at runtime. This function needs
376  * to be called before the MMIO has been setup.
377  */
378 void intel_device_info_runtime_init_early(struct drm_i915_private *i915)
379 {
380 	intel_ipver_early_init(i915);
381 	intel_device_info_subplatform_init(i915);
382 }
383 
384 /**
385  * intel_device_info_runtime_init - initialize runtime info
386  * @dev_priv: the i915 device
387  *
388  * Determine various intel_device_info fields at runtime.
389  *
390  * Use it when either:
391  *   - it's judged too laborious to fill n static structures with the limit
392  *     when a simple if statement does the job,
393  *   - run-time checks (eg read fuse/strap registers) are needed.
394  *
395  * This function needs to be called:
396  *   - after the MMIO has been setup as we are reading registers,
397  *   - after the PCH has been detected,
398  *   - before the first usage of the fields it can tweak.
399  */
400 void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
401 {
402 	struct intel_device_info *info = mkwrite_device_info(dev_priv);
403 	struct intel_runtime_info *runtime = RUNTIME_INFO(dev_priv);
404 	enum pipe pipe;
405 
406 	/* Wa_14011765242: adl-s A0,A1 */
407 	if (IS_ADLS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_A2))
408 		for_each_pipe(dev_priv, pipe)
409 			runtime->num_scalers[pipe] = 0;
410 	else if (DISPLAY_VER(dev_priv) >= 11) {
411 		for_each_pipe(dev_priv, pipe)
412 			runtime->num_scalers[pipe] = 2;
413 	} else if (DISPLAY_VER(dev_priv) >= 9) {
414 		runtime->num_scalers[PIPE_A] = 2;
415 		runtime->num_scalers[PIPE_B] = 2;
416 		runtime->num_scalers[PIPE_C] = 1;
417 	}
418 
419 	BUILD_BUG_ON(BITS_PER_TYPE(intel_engine_mask_t) < I915_NUM_ENGINES);
420 
421 	if (DISPLAY_VER(dev_priv) >= 13 || HAS_D12_PLANE_MINIMIZATION(dev_priv))
422 		for_each_pipe(dev_priv, pipe)
423 			runtime->num_sprites[pipe] = 4;
424 	else if (DISPLAY_VER(dev_priv) >= 11)
425 		for_each_pipe(dev_priv, pipe)
426 			runtime->num_sprites[pipe] = 6;
427 	else if (DISPLAY_VER(dev_priv) == 10)
428 		for_each_pipe(dev_priv, pipe)
429 			runtime->num_sprites[pipe] = 3;
430 	else if (IS_BROXTON(dev_priv)) {
431 		/*
432 		 * Skylake and Broxton currently don't expose the topmost plane as its
433 		 * use is exclusive with the legacy cursor and we only want to expose
434 		 * one of those, not both. Until we can safely expose the topmost plane
435 		 * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
436 		 * we don't expose the topmost plane at all to prevent ABI breakage
437 		 * down the line.
438 		 */
439 
440 		runtime->num_sprites[PIPE_A] = 2;
441 		runtime->num_sprites[PIPE_B] = 2;
442 		runtime->num_sprites[PIPE_C] = 1;
443 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
444 		for_each_pipe(dev_priv, pipe)
445 			runtime->num_sprites[pipe] = 2;
446 	} else if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv)) {
447 		for_each_pipe(dev_priv, pipe)
448 			runtime->num_sprites[pipe] = 1;
449 	}
450 
451 	if (HAS_DISPLAY(dev_priv) &&
452 	    (IS_DGFX(dev_priv) || DISPLAY_VER(dev_priv) >= 14) &&
453 	    !(intel_de_read(dev_priv, GU_CNTL_PROTECTED) & DEPRESENT)) {
454 		drm_info(&dev_priv->drm, "Display not present, disabling\n");
455 
456 		runtime->pipe_mask = 0;
457 	}
458 
459 	if (HAS_DISPLAY(dev_priv) && IS_GRAPHICS_VER(dev_priv, 7, 8) &&
460 	    HAS_PCH_SPLIT(dev_priv)) {
461 		u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
462 		u32 sfuse_strap = intel_de_read(dev_priv, SFUSE_STRAP);
463 
464 		/*
465 		 * SFUSE_STRAP is supposed to have a bit signalling the display
466 		 * is fused off. Unfortunately it seems that, at least in
467 		 * certain cases, fused off display means that PCH display
468 		 * reads don't land anywhere. In that case, we read 0s.
469 		 *
470 		 * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
471 		 * should be set when taking over after the firmware.
472 		 */
473 		if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
474 		    sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
475 		    (HAS_PCH_CPT(dev_priv) &&
476 		     !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
477 			drm_info(&dev_priv->drm,
478 				 "Display fused off, disabling\n");
479 			runtime->pipe_mask = 0;
480 		} else if (fuse_strap & IVB_PIPE_C_DISABLE) {
481 			drm_info(&dev_priv->drm, "PipeC fused off\n");
482 			runtime->pipe_mask &= ~BIT(PIPE_C);
483 			runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_C);
484 		}
485 	} else if (HAS_DISPLAY(dev_priv) && DISPLAY_VER(dev_priv) >= 9) {
486 		u32 dfsm = intel_de_read(dev_priv, SKL_DFSM);
487 
488 		if (dfsm & SKL_DFSM_PIPE_A_DISABLE) {
489 			runtime->pipe_mask &= ~BIT(PIPE_A);
490 			runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_A);
491 			runtime->fbc_mask &= ~BIT(INTEL_FBC_A);
492 		}
493 		if (dfsm & SKL_DFSM_PIPE_B_DISABLE) {
494 			runtime->pipe_mask &= ~BIT(PIPE_B);
495 			runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_B);
496 		}
497 		if (dfsm & SKL_DFSM_PIPE_C_DISABLE) {
498 			runtime->pipe_mask &= ~BIT(PIPE_C);
499 			runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_C);
500 		}
501 
502 		if (DISPLAY_VER(dev_priv) >= 12 &&
503 		    (dfsm & TGL_DFSM_PIPE_D_DISABLE)) {
504 			runtime->pipe_mask &= ~BIT(PIPE_D);
505 			runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_D);
506 		}
507 
508 		if (dfsm & SKL_DFSM_DISPLAY_HDCP_DISABLE)
509 			runtime->has_hdcp = 0;
510 
511 		if (dfsm & SKL_DFSM_DISPLAY_PM_DISABLE)
512 			runtime->fbc_mask = 0;
513 
514 		if (DISPLAY_VER(dev_priv) >= 11 && (dfsm & ICL_DFSM_DMC_DISABLE))
515 			runtime->has_dmc = 0;
516 
517 		if (IS_DISPLAY_VER(dev_priv, 10, 12) &&
518 		    (dfsm & GLK_DFSM_DISPLAY_DSC_DISABLE))
519 			runtime->has_dsc = 0;
520 	}
521 
522 	if (GRAPHICS_VER(dev_priv) == 6 && i915_vtd_active(dev_priv)) {
523 		drm_info(&dev_priv->drm,
524 			 "Disabling ppGTT for VT-d support\n");
525 		runtime->ppgtt_type = INTEL_PPGTT_NONE;
526 	}
527 
528 	runtime->rawclk_freq = intel_read_rawclk(dev_priv);
529 	drm_dbg(&dev_priv->drm, "rawclk rate: %d kHz\n", runtime->rawclk_freq);
530 
531 	if (!HAS_DISPLAY(dev_priv)) {
532 		dev_priv->drm.driver_features &= ~(DRIVER_MODESET |
533 						   DRIVER_ATOMIC);
534 		memset(&info->display, 0, sizeof(info->display));
535 
536 		runtime->cpu_transcoder_mask = 0;
537 		memset(runtime->num_sprites, 0, sizeof(runtime->num_sprites));
538 		memset(runtime->num_scalers, 0, sizeof(runtime->num_scalers));
539 		runtime->fbc_mask = 0;
540 		runtime->has_hdcp = false;
541 		runtime->has_dmc = false;
542 		runtime->has_dsc = false;
543 	}
544 
545 	/* Disable nuclear pageflip by default on pre-g4x */
546 	if (!dev_priv->params.nuclear_pageflip &&
547 	    DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
548 		dev_priv->drm.driver_features &= ~DRIVER_ATOMIC;
549 }
550 
551 void intel_driver_caps_print(const struct intel_driver_caps *caps,
552 			     struct drm_printer *p)
553 {
554 	drm_printf(p, "Has logical contexts? %s\n",
555 		   str_yes_no(caps->has_logical_contexts));
556 	drm_printf(p, "scheduler: 0x%x\n", caps->scheduler);
557 }
558