xref: /openbmc/linux/drivers/gpu/drm/i915/i915_drv.h (revision aadaeca4)
1 /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
2  */
3 /*
4  *
5  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the
10  * "Software"), to deal in the Software without restriction, including
11  * without limitation the rights to use, copy, modify, merge, publish,
12  * distribute, sub license, and/or sell copies of the Software, and to
13  * permit persons to whom the Software is furnished to do so, subject to
14  * the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the
17  * next paragraph) shall be included in all copies or substantial portions
18  * of the Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27  *
28  */
29 
30 #ifndef _I915_DRV_H_
31 #define _I915_DRV_H_
32 
33 #include <uapi/drm/i915_drm.h>
34 
35 #include <asm/hypervisor.h>
36 
37 #include <linux/i2c.h>
38 #include <linux/i2c-algo-bit.h>
39 #include <linux/intel-iommu.h>
40 #include <linux/pm_qos.h>
41 
42 #include <drm/drm_connector.h>
43 #include <drm/ttm/ttm_device.h>
44 
45 #include "display/intel_bios.h"
46 #include "display/intel_cdclk.h"
47 #include "display/intel_display.h"
48 #include "display/intel_display_power.h"
49 #include "display/intel_dmc.h"
50 #include "display/intel_dpll_mgr.h"
51 #include "display/intel_dsb.h"
52 #include "display/intel_fbc.h"
53 #include "display/intel_frontbuffer.h"
54 #include "display/intel_global_state.h"
55 #include "display/intel_gmbus.h"
56 #include "display/intel_opregion.h"
57 
58 #include "gem/i915_gem_context_types.h"
59 #include "gem/i915_gem_lmem.h"
60 #include "gem/i915_gem_shrinker.h"
61 #include "gem/i915_gem_stolen.h"
62 
63 #include "gt/intel_engine.h"
64 #include "gt/intel_gt_types.h"
65 #include "gt/intel_region_lmem.h"
66 #include "gt/intel_workarounds.h"
67 #include "gt/uc/intel_uc.h"
68 
69 #include "i915_gem.h"
70 #include "i915_gpu_error.h"
71 #include "i915_params.h"
72 #include "i915_perf_types.h"
73 #include "i915_scheduler.h"
74 #include "i915_utils.h"
75 #include "intel_device_info.h"
76 #include "intel_memory_region.h"
77 #include "intel_pch.h"
78 #include "intel_pm_types.h"
79 #include "intel_runtime_pm.h"
80 #include "intel_step.h"
81 #include "intel_uncore.h"
82 #include "intel_wopcm.h"
83 
84 struct dpll;
85 struct drm_i915_clock_gating_funcs;
86 struct drm_i915_gem_object;
87 struct drm_i915_private;
88 struct intel_atomic_state;
89 struct intel_audio_funcs;
90 struct intel_cdclk_config;
91 struct intel_cdclk_funcs;
92 struct intel_cdclk_state;
93 struct intel_cdclk_vals;
94 struct intel_color_funcs;
95 struct intel_connector;
96 struct intel_crtc;
97 struct intel_dp;
98 struct intel_dpll_funcs;
99 struct intel_encoder;
100 struct intel_fbdev;
101 struct intel_fdi_funcs;
102 struct intel_hotplug_funcs;
103 struct intel_initial_plane_config;
104 struct intel_limit;
105 struct intel_overlay;
106 struct intel_overlay_error_state;
107 struct vlv_s0ix_state;
108 
109 /* Threshold == 5 for long IRQs, 50 for short */
110 #define HPD_STORM_DEFAULT_THRESHOLD 50
111 
112 struct i915_hotplug {
113 	struct delayed_work hotplug_work;
114 
115 	const u32 *hpd, *pch_hpd;
116 
117 	struct {
118 		unsigned long last_jiffies;
119 		int count;
120 		enum {
121 			HPD_ENABLED = 0,
122 			HPD_DISABLED = 1,
123 			HPD_MARK_DISABLED = 2
124 		} state;
125 	} stats[HPD_NUM_PINS];
126 	u32 event_bits;
127 	u32 retry_bits;
128 	struct delayed_work reenable_work;
129 
130 	u32 long_port_mask;
131 	u32 short_port_mask;
132 	struct work_struct dig_port_work;
133 
134 	struct work_struct poll_init_work;
135 	bool poll_enabled;
136 
137 	unsigned int hpd_storm_threshold;
138 	/* Whether or not to count short HPD IRQs in HPD storms */
139 	u8 hpd_short_storm_enabled;
140 
141 	/*
142 	 * if we get a HPD irq from DP and a HPD irq from non-DP
143 	 * the non-DP HPD could block the workqueue on a mode config
144 	 * mutex getting, that userspace may have taken. However
145 	 * userspace is waiting on the DP workqueue to run which is
146 	 * blocked behind the non-DP one.
147 	 */
148 	struct workqueue_struct *dp_wq;
149 };
150 
151 #define I915_GEM_GPU_DOMAINS \
152 	(I915_GEM_DOMAIN_RENDER | \
153 	 I915_GEM_DOMAIN_SAMPLER | \
154 	 I915_GEM_DOMAIN_COMMAND | \
155 	 I915_GEM_DOMAIN_INSTRUCTION | \
156 	 I915_GEM_DOMAIN_VERTEX)
157 
158 struct sdvo_device_mapping {
159 	u8 initialized;
160 	u8 dvo_port;
161 	u8 slave_addr;
162 	u8 dvo_wiring;
163 	u8 i2c_pin;
164 	u8 ddc_pin;
165 };
166 
167 /* functions used for watermark calcs for display. */
168 struct drm_i915_wm_disp_funcs {
169 	/* update_wm is for legacy wm management */
170 	void (*update_wm)(struct drm_i915_private *dev_priv);
171 	int (*compute_pipe_wm)(struct intel_atomic_state *state,
172 			       struct intel_crtc *crtc);
173 	int (*compute_intermediate_wm)(struct intel_atomic_state *state,
174 				       struct intel_crtc *crtc);
175 	void (*initial_watermarks)(struct intel_atomic_state *state,
176 				   struct intel_crtc *crtc);
177 	void (*atomic_update_watermarks)(struct intel_atomic_state *state,
178 					 struct intel_crtc *crtc);
179 	void (*optimize_watermarks)(struct intel_atomic_state *state,
180 				    struct intel_crtc *crtc);
181 	int (*compute_global_watermarks)(struct intel_atomic_state *state);
182 };
183 
184 struct drm_i915_display_funcs {
185 	/* Returns the active state of the crtc, and if the crtc is active,
186 	 * fills out the pipe-config with the hw state. */
187 	bool (*get_pipe_config)(struct intel_crtc *,
188 				struct intel_crtc_state *);
189 	void (*get_initial_plane_config)(struct intel_crtc *,
190 					 struct intel_initial_plane_config *);
191 	void (*crtc_enable)(struct intel_atomic_state *state,
192 			    struct intel_crtc *crtc);
193 	void (*crtc_disable)(struct intel_atomic_state *state,
194 			     struct intel_crtc *crtc);
195 	void (*commit_modeset_enables)(struct intel_atomic_state *state);
196 };
197 
198 #define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */
199 
200 /*
201  * HIGH_RR is the highest eDP panel refresh rate read from EDID
202  * LOW_RR is the lowest eDP panel refresh rate found from EDID
203  * parsing for same resolution.
204  */
205 enum drrs_refresh_rate_type {
206 	DRRS_HIGH_RR,
207 	DRRS_LOW_RR,
208 	DRRS_MAX_RR, /* RR count */
209 };
210 
211 enum drrs_support_type {
212 	DRRS_NOT_SUPPORTED = 0,
213 	STATIC_DRRS_SUPPORT = 1,
214 	SEAMLESS_DRRS_SUPPORT = 2
215 };
216 
217 struct i915_drrs {
218 	struct mutex mutex;
219 	struct delayed_work work;
220 	struct intel_dp *dp;
221 	unsigned busy_frontbuffer_bits;
222 	enum drrs_refresh_rate_type refresh_rate_type;
223 	enum drrs_support_type type;
224 };
225 
226 #define QUIRK_LVDS_SSC_DISABLE (1<<1)
227 #define QUIRK_INVERT_BRIGHTNESS (1<<2)
228 #define QUIRK_BACKLIGHT_PRESENT (1<<3)
229 #define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
230 #define QUIRK_INCREASE_T12_DELAY (1<<6)
231 #define QUIRK_INCREASE_DDI_DISABLED_TIME (1<<7)
232 #define QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK (1<<8)
233 
234 struct intel_gmbus {
235 	struct i2c_adapter adapter;
236 #define GMBUS_FORCE_BIT_RETRY (1U << 31)
237 	u32 force_bit;
238 	u32 reg0;
239 	i915_reg_t gpio_reg;
240 	struct i2c_algo_bit_data bit_algo;
241 	struct drm_i915_private *dev_priv;
242 };
243 
244 struct i915_suspend_saved_registers {
245 	u32 saveDSPARB;
246 	u32 saveSWF0[16];
247 	u32 saveSWF1[16];
248 	u32 saveSWF3[3];
249 	u16 saveGCDGMBUS;
250 };
251 
252 #define MAX_L3_SLICES 2
253 struct intel_l3_parity {
254 	u32 *remap_info[MAX_L3_SLICES];
255 	struct work_struct error_work;
256 	int which_slice;
257 };
258 
259 struct i915_gem_mm {
260 	/*
261 	 * Shortcut for the stolen region. This points to either
262 	 * INTEL_REGION_STOLEN_SMEM for integrated platforms, or
263 	 * INTEL_REGION_STOLEN_LMEM for discrete, or NULL if the device doesn't
264 	 * support stolen.
265 	 */
266 	struct intel_memory_region *stolen_region;
267 	/** Memory allocator for GTT stolen memory */
268 	struct drm_mm stolen;
269 	/** Protects the usage of the GTT stolen memory allocator. This is
270 	 * always the inner lock when overlapping with struct_mutex. */
271 	struct mutex stolen_lock;
272 
273 	/* Protects bound_list/unbound_list and #drm_i915_gem_object.mm.link */
274 	spinlock_t obj_lock;
275 
276 	/**
277 	 * List of objects which are purgeable.
278 	 */
279 	struct list_head purge_list;
280 
281 	/**
282 	 * List of objects which have allocated pages and are shrinkable.
283 	 */
284 	struct list_head shrink_list;
285 
286 	/**
287 	 * List of objects which are pending destruction.
288 	 */
289 	struct llist_head free_list;
290 	struct delayed_work free_work;
291 	/**
292 	 * Count of objects pending destructions. Used to skip needlessly
293 	 * waiting on an RCU barrier if no objects are waiting to be freed.
294 	 */
295 	atomic_t free_count;
296 
297 	/**
298 	 * tmpfs instance used for shmem backed objects
299 	 */
300 	struct vfsmount *gemfs;
301 
302 	struct intel_memory_region *regions[INTEL_REGION_UNKNOWN];
303 
304 	struct notifier_block oom_notifier;
305 	struct notifier_block vmap_notifier;
306 	struct shrinker shrinker;
307 
308 #ifdef CONFIG_MMU_NOTIFIER
309 	/**
310 	 * notifier_lock for mmu notifiers, memory may not be allocated
311 	 * while holding this lock.
312 	 */
313 	rwlock_t notifier_lock;
314 #endif
315 
316 	/* shrinker accounting, also useful for userland debugging */
317 	u64 shrink_memory;
318 	u32 shrink_count;
319 };
320 
321 #define I915_IDLE_ENGINES_TIMEOUT (200) /* in ms */
322 
323 unsigned long i915_fence_context_timeout(const struct drm_i915_private *i915,
324 					 u64 context);
325 
326 static inline unsigned long
327 i915_fence_timeout(const struct drm_i915_private *i915)
328 {
329 	return i915_fence_context_timeout(i915, U64_MAX);
330 }
331 
332 /* Amount of SAGV/QGV points, BSpec precisely defines this */
333 #define I915_NUM_QGV_POINTS 8
334 
335 #define HAS_HW_SAGV_WM(i915) (DISPLAY_VER(i915) >= 13 && !IS_DGFX(i915))
336 
337 /* Amount of PSF GV points, BSpec precisely defines this */
338 #define I915_NUM_PSF_GV_POINTS 3
339 
340 struct intel_vbt_data {
341 	/* bdb version */
342 	u16 version;
343 
344 	struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
345 	struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
346 
347 	/* Feature bits */
348 	unsigned int int_tv_support:1;
349 	unsigned int lvds_dither:1;
350 	unsigned int int_crt_support:1;
351 	unsigned int lvds_use_ssc:1;
352 	unsigned int int_lvds_support:1;
353 	unsigned int display_clock_mode:1;
354 	unsigned int fdi_rx_polarity_inverted:1;
355 	unsigned int panel_type:4;
356 	int lvds_ssc_freq;
357 	unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
358 	enum drm_panel_orientation orientation;
359 
360 	bool override_afc_startup;
361 	u8 override_afc_startup_val;
362 
363 	enum drrs_support_type drrs_type;
364 
365 	struct {
366 		int rate;
367 		int lanes;
368 		int preemphasis;
369 		int vswing;
370 		bool low_vswing;
371 		bool initialized;
372 		int bpp;
373 		struct edp_power_seq pps;
374 		bool hobl;
375 	} edp;
376 
377 	struct {
378 		bool enable;
379 		bool full_link;
380 		bool require_aux_wakeup;
381 		int idle_frames;
382 		int tp1_wakeup_time_us;
383 		int tp2_tp3_wakeup_time_us;
384 		int psr2_tp2_tp3_wakeup_time_us;
385 	} psr;
386 
387 	struct {
388 		u16 pwm_freq_hz;
389 		u16 brightness_precision_bits;
390 		bool present;
391 		bool active_low_pwm;
392 		u8 min_brightness;	/* min_brightness/255 of max */
393 		u8 controller;		/* brightness controller number */
394 		enum intel_backlight_type type;
395 	} backlight;
396 
397 	/* MIPI DSI */
398 	struct {
399 		u16 panel_id;
400 		struct mipi_config *config;
401 		struct mipi_pps_data *pps;
402 		u16 bl_ports;
403 		u16 cabc_ports;
404 		u8 seq_version;
405 		u32 size;
406 		u8 *data;
407 		const u8 *sequence[MIPI_SEQ_MAX];
408 		u8 *deassert_seq; /* Used by fixup_mipi_sequences() */
409 		enum drm_panel_orientation orientation;
410 	} dsi;
411 
412 	int crt_ddc_pin;
413 
414 	struct list_head display_devices;
415 
416 	struct intel_bios_encoder_data *ports[I915_MAX_PORTS]; /* Non-NULL if port present. */
417 	struct sdvo_device_mapping sdvo_mappings[2];
418 };
419 
420 struct i915_frontbuffer_tracking {
421 	spinlock_t lock;
422 
423 	/*
424 	 * Tracking bits for delayed frontbuffer flushing du to gpu activity or
425 	 * scheduled flips.
426 	 */
427 	unsigned busy_bits;
428 	unsigned flip_bits;
429 };
430 
431 struct i915_virtual_gpu {
432 	struct mutex lock; /* serialises sending of g2v_notify command pkts */
433 	bool active;
434 	u32 caps;
435 };
436 
437 struct i915_selftest_stash {
438 	atomic_t counter;
439 	struct ida mock_region_instances;
440 };
441 
442 /* intel_audio.c private */
443 struct intel_audio_private {
444 	/* Display internal audio functions */
445 	const struct intel_audio_funcs *funcs;
446 
447 	/* hda/i915 audio component */
448 	struct i915_audio_component *component;
449 	bool component_registered;
450 	/* mutex for audio/video sync */
451 	struct mutex mutex;
452 	int power_refcount;
453 	u32 freq_cntrl;
454 
455 	/* Used to save the pipe-to-encoder mapping for audio */
456 	struct intel_encoder *encoder_map[I915_MAX_PIPES];
457 
458 	/* necessary resource sharing with HDMI LPE audio driver. */
459 	struct {
460 		struct platform_device *platdev;
461 		int irq;
462 	} lpe;
463 };
464 
465 struct drm_i915_private {
466 	struct drm_device drm;
467 
468 	/* FIXME: Device release actions should all be moved to drmm_ */
469 	bool do_release;
470 
471 	/* i915 device parameters */
472 	struct i915_params params;
473 
474 	const struct intel_device_info __info; /* Use INTEL_INFO() to access. */
475 	struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */
476 	struct intel_driver_caps caps;
477 
478 	/**
479 	 * Data Stolen Memory - aka "i915 stolen memory" gives us the start and
480 	 * end of stolen which we can optionally use to create GEM objects
481 	 * backed by stolen memory. Note that stolen_usable_size tells us
482 	 * exactly how much of this we are actually allowed to use, given that
483 	 * some portion of it is in fact reserved for use by hardware functions.
484 	 */
485 	struct resource dsm;
486 	/**
487 	 * Reseved portion of Data Stolen Memory
488 	 */
489 	struct resource dsm_reserved;
490 
491 	/*
492 	 * Stolen memory is segmented in hardware with different portions
493 	 * offlimits to certain functions.
494 	 *
495 	 * The drm_mm is initialised to the total accessible range, as found
496 	 * from the PCI config. On Broadwell+, this is further restricted to
497 	 * avoid the first page! The upper end of stolen memory is reserved for
498 	 * hardware functions and similarly removed from the accessible range.
499 	 */
500 	resource_size_t stolen_usable_size;	/* Total size minus reserved ranges */
501 
502 	struct intel_uncore uncore;
503 	struct intel_uncore_mmio_debug mmio_debug;
504 
505 	struct i915_virtual_gpu vgpu;
506 
507 	struct intel_gvt *gvt;
508 
509 	struct intel_wopcm wopcm;
510 
511 	struct intel_dmc dmc;
512 
513 	struct intel_gmbus gmbus[GMBUS_NUM_PINS];
514 
515 	/** gmbus_mutex protects against concurrent usage of the single hw gmbus
516 	 * controller on different i2c buses. */
517 	struct mutex gmbus_mutex;
518 
519 	/**
520 	 * Base address of where the gmbus and gpio blocks are located (either
521 	 * on PCH or on SoC for platforms without PCH).
522 	 */
523 	u32 gpio_mmio_base;
524 
525 	/* MMIO base address for MIPI regs */
526 	u32 mipi_mmio_base;
527 
528 	u32 pps_mmio_base;
529 
530 	wait_queue_head_t gmbus_wait_queue;
531 
532 	struct pci_dev *bridge_dev;
533 
534 	struct rb_root uabi_engines;
535 
536 	struct resource mch_res;
537 
538 	/* protects the irq masks */
539 	spinlock_t irq_lock;
540 
541 	bool display_irqs_enabled;
542 
543 	/* Sideband mailbox protection */
544 	struct mutex sb_lock;
545 	struct pm_qos_request sb_qos;
546 
547 	/** Cached value of IMR to avoid reads in updating the bitfield */
548 	union {
549 		u32 irq_mask;
550 		u32 de_irq_mask[I915_MAX_PIPES];
551 	};
552 	u32 pipestat_irq_mask[I915_MAX_PIPES];
553 
554 	struct i915_hotplug hotplug;
555 	struct intel_fbc *fbc[I915_MAX_FBCS];
556 	struct i915_drrs drrs;
557 	struct intel_opregion opregion;
558 	struct intel_vbt_data vbt;
559 
560 	bool preserve_bios_swizzle;
561 
562 	/* overlay */
563 	struct intel_overlay *overlay;
564 
565 	/* backlight registers and fields in struct intel_panel */
566 	struct mutex backlight_lock;
567 
568 	/* protects panel power sequencer state */
569 	struct mutex pps_mutex;
570 
571 	unsigned int fsb_freq, mem_freq, is_ddr3;
572 	unsigned int skl_preferred_vco_freq;
573 	unsigned int max_cdclk_freq;
574 
575 	unsigned int max_dotclk_freq;
576 	unsigned int hpll_freq;
577 	unsigned int fdi_pll_freq;
578 	unsigned int czclk_freq;
579 
580 	struct {
581 		/* The current hardware cdclk configuration */
582 		struct intel_cdclk_config hw;
583 
584 		/* cdclk, divider, and ratio table from bspec */
585 		const struct intel_cdclk_vals *table;
586 
587 		struct intel_global_obj obj;
588 	} cdclk;
589 
590 	struct {
591 		/* The current hardware dbuf configuration */
592 		u8 enabled_slices;
593 
594 		struct intel_global_obj obj;
595 	} dbuf;
596 
597 	/**
598 	 * wq - Driver workqueue for GEM.
599 	 *
600 	 * NOTE: Work items scheduled here are not allowed to grab any modeset
601 	 * locks, for otherwise the flushing done in the pageflip code will
602 	 * result in deadlocks.
603 	 */
604 	struct workqueue_struct *wq;
605 
606 	/* ordered wq for modesets */
607 	struct workqueue_struct *modeset_wq;
608 	/* unbound hipri wq for page flips/plane updates */
609 	struct workqueue_struct *flip_wq;
610 
611 	/* pm private clock gating functions */
612 	const struct drm_i915_clock_gating_funcs *clock_gating_funcs;
613 
614 	/* pm display functions */
615 	const struct drm_i915_wm_disp_funcs *wm_disp;
616 
617 	/* irq display functions */
618 	const struct intel_hotplug_funcs *hotplug_funcs;
619 
620 	/* fdi display functions */
621 	const struct intel_fdi_funcs *fdi_funcs;
622 
623 	/* display pll funcs */
624 	const struct intel_dpll_funcs *dpll_funcs;
625 
626 	/* Display functions */
627 	const struct drm_i915_display_funcs *display;
628 
629 	/* Display internal color functions */
630 	const struct intel_color_funcs *color_funcs;
631 
632 	/* Display CDCLK functions */
633 	const struct intel_cdclk_funcs *cdclk_funcs;
634 
635 	/* PCH chipset type */
636 	enum intel_pch pch_type;
637 	unsigned short pch_id;
638 
639 	unsigned long quirks;
640 
641 	struct drm_atomic_state *modeset_restore_state;
642 	struct drm_modeset_acquire_ctx reset_ctx;
643 
644 	struct i915_gem_mm mm;
645 
646 	/* Kernel Modesetting */
647 
648 	/**
649 	 * dpll and cdclk state is protected by connection_mutex
650 	 * dpll.lock serializes intel_{prepare,enable,disable}_shared_dpll.
651 	 * Must be global rather than per dpll, because on some platforms plls
652 	 * share registers.
653 	 */
654 	struct {
655 		struct mutex lock;
656 
657 		int num_shared_dpll;
658 		struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
659 		const struct intel_dpll_mgr *mgr;
660 
661 		struct {
662 			int nssc;
663 			int ssc;
664 		} ref_clks;
665 	} dpll;
666 
667 	struct list_head global_obj_list;
668 
669 	/*
670 	 * For reading active_pipes holding any crtc lock is
671 	 * sufficient, for writing must hold all of them.
672 	 */
673 	u8 active_pipes;
674 
675 	struct i915_frontbuffer_tracking fb_tracking;
676 
677 	struct intel_atomic_helper {
678 		struct llist_head free_list;
679 		struct work_struct free_work;
680 	} atomic_helper;
681 
682 	bool mchbar_need_disable;
683 
684 	struct intel_l3_parity l3_parity;
685 
686 	/*
687 	 * HTI (aka HDPORT) state read during initial hw readout.  Most
688 	 * platforms don't have HTI, so this will just stay 0.  Those that do
689 	 * will use this later to figure out which PLLs and PHYs are unavailable
690 	 * for driver usage.
691 	 */
692 	u32 hti_state;
693 
694 	/*
695 	 * edram size in MB.
696 	 * Cannot be determined by PCIID. You must always read a register.
697 	 */
698 	u32 edram_size_mb;
699 
700 	struct i915_power_domains power_domains;
701 
702 	struct i915_gpu_error gpu_error;
703 
704 	struct drm_i915_gem_object *vlv_pctx;
705 
706 	/* list of fbdev register on this device */
707 	struct intel_fbdev *fbdev;
708 	struct work_struct fbdev_suspend_work;
709 
710 	struct drm_property *broadcast_rgb_property;
711 	struct drm_property *force_audio_property;
712 
713 	u32 fdi_rx_config;
714 
715 	/* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */
716 	u32 chv_phy_control;
717 	/*
718 	 * Shadows for CHV DPLL_MD regs to keep the state
719 	 * checker somewhat working in the presence hardware
720 	 * crappiness (can't read out DPLL_MD for pipes B & C).
721 	 */
722 	u32 chv_dpll_md[I915_MAX_PIPES];
723 	u32 bxt_phy_grc;
724 
725 	u32 suspend_count;
726 	bool power_domains_suspended;
727 	struct i915_suspend_saved_registers regfile;
728 	struct vlv_s0ix_state *vlv_s0ix_state;
729 
730 	enum {
731 		I915_SAGV_UNKNOWN = 0,
732 		I915_SAGV_DISABLED,
733 		I915_SAGV_ENABLED,
734 		I915_SAGV_NOT_CONTROLLED
735 	} sagv_status;
736 
737 	u32 sagv_block_time_us;
738 
739 	struct {
740 		/*
741 		 * Raw watermark latency values:
742 		 * in 0.1us units for WM0,
743 		 * in 0.5us units for WM1+.
744 		 */
745 		/* primary */
746 		u16 pri_latency[5];
747 		/* sprite */
748 		u16 spr_latency[5];
749 		/* cursor */
750 		u16 cur_latency[5];
751 		/*
752 		 * Raw watermark memory latency values
753 		 * for SKL for all 8 levels
754 		 * in 1us units.
755 		 */
756 		u16 skl_latency[8];
757 
758 		/* current hardware state */
759 		union {
760 			struct ilk_wm_values hw;
761 			struct vlv_wm_values vlv;
762 			struct g4x_wm_values g4x;
763 		};
764 
765 		u8 max_level;
766 
767 		/*
768 		 * Should be held around atomic WM register writing; also
769 		 * protects * intel_crtc->wm.active and
770 		 * crtc_state->wm.need_postvbl_update.
771 		 */
772 		struct mutex wm_mutex;
773 	} wm;
774 
775 	struct dram_info {
776 		bool wm_lv_0_adjust_needed;
777 		u8 num_channels;
778 		bool symmetric_memory;
779 		enum intel_dram_type {
780 			INTEL_DRAM_UNKNOWN,
781 			INTEL_DRAM_DDR3,
782 			INTEL_DRAM_DDR4,
783 			INTEL_DRAM_LPDDR3,
784 			INTEL_DRAM_LPDDR4,
785 			INTEL_DRAM_DDR5,
786 			INTEL_DRAM_LPDDR5,
787 		} type;
788 		u8 num_qgv_points;
789 		u8 num_psf_gv_points;
790 	} dram_info;
791 
792 	struct intel_bw_info {
793 		/* for each QGV point */
794 		unsigned int deratedbw[I915_NUM_QGV_POINTS];
795 		/* for each PSF GV point */
796 		unsigned int psf_bw[I915_NUM_PSF_GV_POINTS];
797 		u8 num_qgv_points;
798 		u8 num_psf_gv_points;
799 		u8 num_planes;
800 	} max_bw[6];
801 
802 	struct intel_global_obj bw_obj;
803 
804 	struct intel_runtime_pm runtime_pm;
805 
806 	struct i915_perf perf;
807 
808 	/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
809 	struct intel_gt gt0;
810 
811 	struct {
812 		struct i915_gem_contexts {
813 			spinlock_t lock; /* locks list */
814 			struct list_head list;
815 		} contexts;
816 
817 		/*
818 		 * We replace the local file with a global mappings as the
819 		 * backing storage for the mmap is on the device and not
820 		 * on the struct file, and we do not want to prolong the
821 		 * lifetime of the local fd. To minimise the number of
822 		 * anonymous inodes we create, we use a global singleton to
823 		 * share the global mapping.
824 		 */
825 		struct file *mmap_singleton;
826 	} gem;
827 
828 	u8 framestart_delay;
829 
830 	/* Window2 specifies time required to program DSB (Window2) in number of scan lines */
831 	u8 window2_delay;
832 
833 	u8 pch_ssc_use;
834 
835 	/* For i915gm/i945gm vblank irq workaround */
836 	u8 vblank_enabled;
837 
838 	bool irq_enabled;
839 
840 	/* perform PHY state sanity checks? */
841 	bool chv_phy_assert[2];
842 
843 	bool ipc_enabled;
844 
845 	struct intel_audio_private audio;
846 
847 	struct i915_pmu pmu;
848 
849 	struct i915_hdcp_comp_master *hdcp_master;
850 	bool hdcp_comp_added;
851 
852 	/* Mutex to protect the above hdcp component related values. */
853 	struct mutex hdcp_comp_mutex;
854 
855 	/* The TTM device structure. */
856 	struct ttm_device bdev;
857 
858 	I915_SELFTEST_DECLARE(struct i915_selftest_stash selftest;)
859 
860 	/*
861 	 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
862 	 * will be rejected. Instead look for a better place.
863 	 */
864 };
865 
866 static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
867 {
868 	return container_of(dev, struct drm_i915_private, drm);
869 }
870 
871 static inline struct drm_i915_private *kdev_to_i915(struct device *kdev)
872 {
873 	return dev_get_drvdata(kdev);
874 }
875 
876 static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev)
877 {
878 	return pci_get_drvdata(pdev);
879 }
880 
881 static inline struct intel_gt *to_gt(struct drm_i915_private *i915)
882 {
883 	return &i915->gt0;
884 }
885 
886 /* Simple iterator over all initialised engines */
887 #define for_each_engine(engine__, dev_priv__, id__) \
888 	for ((id__) = 0; \
889 	     (id__) < I915_NUM_ENGINES; \
890 	     (id__)++) \
891 		for_each_if ((engine__) = (dev_priv__)->engine[(id__)])
892 
893 /* Iterator over subset of engines selected by mask */
894 #define for_each_engine_masked(engine__, gt__, mask__, tmp__) \
895 	for ((tmp__) = (mask__) & (gt__)->info.engine_mask; \
896 	     (tmp__) ? \
897 	     ((engine__) = (gt__)->engine[__mask_next_bit(tmp__)]), 1 : \
898 	     0;)
899 
900 #define rb_to_uabi_engine(rb) \
901 	rb_entry_safe(rb, struct intel_engine_cs, uabi_node)
902 
903 #define for_each_uabi_engine(engine__, i915__) \
904 	for ((engine__) = rb_to_uabi_engine(rb_first(&(i915__)->uabi_engines));\
905 	     (engine__); \
906 	     (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node)))
907 
908 #define for_each_uabi_class_engine(engine__, class__, i915__) \
909 	for ((engine__) = intel_engine_lookup_user((i915__), (class__), 0); \
910 	     (engine__) && (engine__)->uabi_class == (class__); \
911 	     (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node)))
912 
913 #define I915_GTT_OFFSET_NONE ((u32)-1)
914 
915 /*
916  * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is
917  * considered to be the frontbuffer for the given plane interface-wise. This
918  * doesn't mean that the hw necessarily already scans it out, but that any
919  * rendering (by the cpu or gpu) will land in the frontbuffer eventually.
920  *
921  * We have one bit per pipe and per scanout plane type.
922  */
923 #define INTEL_FRONTBUFFER_BITS_PER_PIPE 8
924 #define INTEL_FRONTBUFFER(pipe, plane_id) ({ \
925 	BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES > 32); \
926 	BUILD_BUG_ON(I915_MAX_PLANES > INTEL_FRONTBUFFER_BITS_PER_PIPE); \
927 	BIT((plane_id) + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)); \
928 })
929 #define INTEL_FRONTBUFFER_OVERLAY(pipe) \
930 	BIT(INTEL_FRONTBUFFER_BITS_PER_PIPE - 1 + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
931 #define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
932 	GENMASK(INTEL_FRONTBUFFER_BITS_PER_PIPE * ((pipe) + 1) - 1, \
933 		INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
934 
935 #define INTEL_INFO(dev_priv)	(&(dev_priv)->__info)
936 #define RUNTIME_INFO(dev_priv)	(&(dev_priv)->__runtime)
937 #define DRIVER_CAPS(dev_priv)	(&(dev_priv)->caps)
938 
939 #define INTEL_DEVID(dev_priv)	(RUNTIME_INFO(dev_priv)->device_id)
940 
941 #define IP_VER(ver, rel)		((ver) << 8 | (rel))
942 
943 #define GRAPHICS_VER(i915)		(INTEL_INFO(i915)->graphics.ver)
944 #define GRAPHICS_VER_FULL(i915)		IP_VER(INTEL_INFO(i915)->graphics.ver, \
945 					       INTEL_INFO(i915)->graphics.rel)
946 #define IS_GRAPHICS_VER(i915, from, until) \
947 	(GRAPHICS_VER(i915) >= (from) && GRAPHICS_VER(i915) <= (until))
948 
949 #define MEDIA_VER(i915)			(INTEL_INFO(i915)->media.ver)
950 #define MEDIA_VER_FULL(i915)		IP_VER(INTEL_INFO(i915)->media.ver, \
951 					       INTEL_INFO(i915)->media.rel)
952 #define IS_MEDIA_VER(i915, from, until) \
953 	(MEDIA_VER(i915) >= (from) && MEDIA_VER(i915) <= (until))
954 
955 #define DISPLAY_VER(i915)	(INTEL_INFO(i915)->display.ver)
956 #define IS_DISPLAY_VER(i915, from, until) \
957 	(DISPLAY_VER(i915) >= (from) && DISPLAY_VER(i915) <= (until))
958 
959 #define INTEL_REVID(dev_priv)	(to_pci_dev((dev_priv)->drm.dev)->revision)
960 
961 #define HAS_DSB(dev_priv)	(INTEL_INFO(dev_priv)->display.has_dsb)
962 
963 #define INTEL_DISPLAY_STEP(__i915) (RUNTIME_INFO(__i915)->step.display_step)
964 #define INTEL_GRAPHICS_STEP(__i915) (RUNTIME_INFO(__i915)->step.graphics_step)
965 #define INTEL_MEDIA_STEP(__i915) (RUNTIME_INFO(__i915)->step.media_step)
966 
967 #define IS_DISPLAY_STEP(__i915, since, until) \
968 	(drm_WARN_ON(&(__i915)->drm, INTEL_DISPLAY_STEP(__i915) == STEP_NONE), \
969 	 INTEL_DISPLAY_STEP(__i915) >= (since) && INTEL_DISPLAY_STEP(__i915) < (until))
970 
971 #define IS_GRAPHICS_STEP(__i915, since, until) \
972 	(drm_WARN_ON(&(__i915)->drm, INTEL_GRAPHICS_STEP(__i915) == STEP_NONE), \
973 	 INTEL_GRAPHICS_STEP(__i915) >= (since) && INTEL_GRAPHICS_STEP(__i915) < (until))
974 
975 #define IS_MEDIA_STEP(__i915, since, until) \
976 	(drm_WARN_ON(&(__i915)->drm, INTEL_MEDIA_STEP(__i915) == STEP_NONE), \
977 	 INTEL_MEDIA_STEP(__i915) >= (since) && INTEL_MEDIA_STEP(__i915) < (until))
978 
979 static __always_inline unsigned int
980 __platform_mask_index(const struct intel_runtime_info *info,
981 		      enum intel_platform p)
982 {
983 	const unsigned int pbits =
984 		BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS;
985 
986 	/* Expand the platform_mask array if this fails. */
987 	BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
988 		     pbits * ARRAY_SIZE(info->platform_mask));
989 
990 	return p / pbits;
991 }
992 
993 static __always_inline unsigned int
994 __platform_mask_bit(const struct intel_runtime_info *info,
995 		    enum intel_platform p)
996 {
997 	const unsigned int pbits =
998 		BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS;
999 
1000 	return p % pbits + INTEL_SUBPLATFORM_BITS;
1001 }
1002 
1003 static inline u32
1004 intel_subplatform(const struct intel_runtime_info *info, enum intel_platform p)
1005 {
1006 	const unsigned int pi = __platform_mask_index(info, p);
1007 
1008 	return info->platform_mask[pi] & INTEL_SUBPLATFORM_MASK;
1009 }
1010 
1011 static __always_inline bool
1012 IS_PLATFORM(const struct drm_i915_private *i915, enum intel_platform p)
1013 {
1014 	const struct intel_runtime_info *info = RUNTIME_INFO(i915);
1015 	const unsigned int pi = __platform_mask_index(info, p);
1016 	const unsigned int pb = __platform_mask_bit(info, p);
1017 
1018 	BUILD_BUG_ON(!__builtin_constant_p(p));
1019 
1020 	return info->platform_mask[pi] & BIT(pb);
1021 }
1022 
1023 static __always_inline bool
1024 IS_SUBPLATFORM(const struct drm_i915_private *i915,
1025 	       enum intel_platform p, unsigned int s)
1026 {
1027 	const struct intel_runtime_info *info = RUNTIME_INFO(i915);
1028 	const unsigned int pi = __platform_mask_index(info, p);
1029 	const unsigned int pb = __platform_mask_bit(info, p);
1030 	const unsigned int msb = BITS_PER_TYPE(info->platform_mask[0]) - 1;
1031 	const u32 mask = info->platform_mask[pi];
1032 
1033 	BUILD_BUG_ON(!__builtin_constant_p(p));
1034 	BUILD_BUG_ON(!__builtin_constant_p(s));
1035 	BUILD_BUG_ON((s) >= INTEL_SUBPLATFORM_BITS);
1036 
1037 	/* Shift and test on the MSB position so sign flag can be used. */
1038 	return ((mask << (msb - pb)) & (mask << (msb - s))) & BIT(msb);
1039 }
1040 
1041 #define IS_MOBILE(dev_priv)	(INTEL_INFO(dev_priv)->is_mobile)
1042 #define IS_DGFX(dev_priv)   (INTEL_INFO(dev_priv)->is_dgfx)
1043 
1044 #define IS_I830(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I830)
1045 #define IS_I845G(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I845G)
1046 #define IS_I85X(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I85X)
1047 #define IS_I865G(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I865G)
1048 #define IS_I915G(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I915G)
1049 #define IS_I915GM(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I915GM)
1050 #define IS_I945G(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I945G)
1051 #define IS_I945GM(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I945GM)
1052 #define IS_I965G(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I965G)
1053 #define IS_I965GM(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I965GM)
1054 #define IS_G45(dev_priv)	IS_PLATFORM(dev_priv, INTEL_G45)
1055 #define IS_GM45(dev_priv)	IS_PLATFORM(dev_priv, INTEL_GM45)
1056 #define IS_G4X(dev_priv)	(IS_G45(dev_priv) || IS_GM45(dev_priv))
1057 #define IS_PINEVIEW(dev_priv)	IS_PLATFORM(dev_priv, INTEL_PINEVIEW)
1058 #define IS_G33(dev_priv)	IS_PLATFORM(dev_priv, INTEL_G33)
1059 #define IS_IRONLAKE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_IRONLAKE)
1060 #define IS_IRONLAKE_M(dev_priv) \
1061 	(IS_PLATFORM(dev_priv, INTEL_IRONLAKE) && IS_MOBILE(dev_priv))
1062 #define IS_SANDYBRIDGE(dev_priv) IS_PLATFORM(dev_priv, INTEL_SANDYBRIDGE)
1063 #define IS_IVYBRIDGE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_IVYBRIDGE)
1064 #define IS_IVB_GT1(dev_priv)	(IS_IVYBRIDGE(dev_priv) && \
1065 				 INTEL_INFO(dev_priv)->gt == 1)
1066 #define IS_VALLEYVIEW(dev_priv)	IS_PLATFORM(dev_priv, INTEL_VALLEYVIEW)
1067 #define IS_CHERRYVIEW(dev_priv)	IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW)
1068 #define IS_HASWELL(dev_priv)	IS_PLATFORM(dev_priv, INTEL_HASWELL)
1069 #define IS_BROADWELL(dev_priv)	IS_PLATFORM(dev_priv, INTEL_BROADWELL)
1070 #define IS_SKYLAKE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_SKYLAKE)
1071 #define IS_BROXTON(dev_priv)	IS_PLATFORM(dev_priv, INTEL_BROXTON)
1072 #define IS_KABYLAKE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_KABYLAKE)
1073 #define IS_GEMINILAKE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_GEMINILAKE)
1074 #define IS_COFFEELAKE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_COFFEELAKE)
1075 #define IS_COMETLAKE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_COMETLAKE)
1076 #define IS_ICELAKE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_ICELAKE)
1077 #define IS_JSL_EHL(dev_priv)	(IS_PLATFORM(dev_priv, INTEL_JASPERLAKE) || \
1078 				IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE))
1079 #define IS_TIGERLAKE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_TIGERLAKE)
1080 #define IS_ROCKETLAKE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_ROCKETLAKE)
1081 #define IS_DG1(dev_priv)        IS_PLATFORM(dev_priv, INTEL_DG1)
1082 #define IS_ALDERLAKE_S(dev_priv) IS_PLATFORM(dev_priv, INTEL_ALDERLAKE_S)
1083 #define IS_ALDERLAKE_P(dev_priv) IS_PLATFORM(dev_priv, INTEL_ALDERLAKE_P)
1084 #define IS_XEHPSDV(dev_priv) IS_PLATFORM(dev_priv, INTEL_XEHPSDV)
1085 #define IS_DG2(dev_priv)	IS_PLATFORM(dev_priv, INTEL_DG2)
1086 #define IS_DG2_G10(dev_priv) \
1087 	IS_SUBPLATFORM(dev_priv, INTEL_DG2, INTEL_SUBPLATFORM_G10)
1088 #define IS_DG2_G11(dev_priv) \
1089 	IS_SUBPLATFORM(dev_priv, INTEL_DG2, INTEL_SUBPLATFORM_G11)
1090 #define IS_DG2_G12(dev_priv) \
1091 	IS_SUBPLATFORM(dev_priv, INTEL_DG2, INTEL_SUBPLATFORM_G12)
1092 #define IS_ADLS_RPLS(dev_priv) \
1093 	IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_S, INTEL_SUBPLATFORM_RPL_S)
1094 #define IS_ADLP_N(dev_priv) \
1095 	IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_N)
1096 #define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
1097 				    (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
1098 #define IS_BDW_ULT(dev_priv) \
1099 	IS_SUBPLATFORM(dev_priv, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULT)
1100 #define IS_BDW_ULX(dev_priv) \
1101 	IS_SUBPLATFORM(dev_priv, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULX)
1102 #define IS_BDW_GT3(dev_priv)	(IS_BROADWELL(dev_priv) && \
1103 				 INTEL_INFO(dev_priv)->gt == 3)
1104 #define IS_HSW_ULT(dev_priv) \
1105 	IS_SUBPLATFORM(dev_priv, INTEL_HASWELL, INTEL_SUBPLATFORM_ULT)
1106 #define IS_HSW_GT3(dev_priv)	(IS_HASWELL(dev_priv) && \
1107 				 INTEL_INFO(dev_priv)->gt == 3)
1108 #define IS_HSW_GT1(dev_priv)	(IS_HASWELL(dev_priv) && \
1109 				 INTEL_INFO(dev_priv)->gt == 1)
1110 /* ULX machines are also considered ULT. */
1111 #define IS_HSW_ULX(dev_priv) \
1112 	IS_SUBPLATFORM(dev_priv, INTEL_HASWELL, INTEL_SUBPLATFORM_ULX)
1113 #define IS_SKL_ULT(dev_priv) \
1114 	IS_SUBPLATFORM(dev_priv, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULT)
1115 #define IS_SKL_ULX(dev_priv) \
1116 	IS_SUBPLATFORM(dev_priv, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULX)
1117 #define IS_KBL_ULT(dev_priv) \
1118 	IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULT)
1119 #define IS_KBL_ULX(dev_priv) \
1120 	IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULX)
1121 #define IS_SKL_GT2(dev_priv)	(IS_SKYLAKE(dev_priv) && \
1122 				 INTEL_INFO(dev_priv)->gt == 2)
1123 #define IS_SKL_GT3(dev_priv)	(IS_SKYLAKE(dev_priv) && \
1124 				 INTEL_INFO(dev_priv)->gt == 3)
1125 #define IS_SKL_GT4(dev_priv)	(IS_SKYLAKE(dev_priv) && \
1126 				 INTEL_INFO(dev_priv)->gt == 4)
1127 #define IS_KBL_GT2(dev_priv)	(IS_KABYLAKE(dev_priv) && \
1128 				 INTEL_INFO(dev_priv)->gt == 2)
1129 #define IS_KBL_GT3(dev_priv)	(IS_KABYLAKE(dev_priv) && \
1130 				 INTEL_INFO(dev_priv)->gt == 3)
1131 #define IS_CFL_ULT(dev_priv) \
1132 	IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULT)
1133 #define IS_CFL_ULX(dev_priv) \
1134 	IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULX)
1135 #define IS_CFL_GT2(dev_priv)	(IS_COFFEELAKE(dev_priv) && \
1136 				 INTEL_INFO(dev_priv)->gt == 2)
1137 #define IS_CFL_GT3(dev_priv)	(IS_COFFEELAKE(dev_priv) && \
1138 				 INTEL_INFO(dev_priv)->gt == 3)
1139 
1140 #define IS_CML_ULT(dev_priv) \
1141 	IS_SUBPLATFORM(dev_priv, INTEL_COMETLAKE, INTEL_SUBPLATFORM_ULT)
1142 #define IS_CML_ULX(dev_priv) \
1143 	IS_SUBPLATFORM(dev_priv, INTEL_COMETLAKE, INTEL_SUBPLATFORM_ULX)
1144 #define IS_CML_GT2(dev_priv)	(IS_COMETLAKE(dev_priv) && \
1145 				 INTEL_INFO(dev_priv)->gt == 2)
1146 
1147 #define IS_ICL_WITH_PORT_F(dev_priv) \
1148 	IS_SUBPLATFORM(dev_priv, INTEL_ICELAKE, INTEL_SUBPLATFORM_PORTF)
1149 
1150 #define IS_TGL_UY(dev_priv) \
1151 	IS_SUBPLATFORM(dev_priv, INTEL_TIGERLAKE, INTEL_SUBPLATFORM_UY)
1152 
1153 #define IS_SKL_GRAPHICS_STEP(p, since, until) (IS_SKYLAKE(p) && IS_GRAPHICS_STEP(p, since, until))
1154 
1155 #define IS_KBL_GRAPHICS_STEP(dev_priv, since, until) \
1156 	(IS_KABYLAKE(dev_priv) && IS_GRAPHICS_STEP(dev_priv, since, until))
1157 #define IS_KBL_DISPLAY_STEP(dev_priv, since, until) \
1158 	(IS_KABYLAKE(dev_priv) && IS_DISPLAY_STEP(dev_priv, since, until))
1159 
1160 #define IS_JSL_EHL_GRAPHICS_STEP(p, since, until) \
1161 	(IS_JSL_EHL(p) && IS_GRAPHICS_STEP(p, since, until))
1162 #define IS_JSL_EHL_DISPLAY_STEP(p, since, until) \
1163 	(IS_JSL_EHL(p) && IS_DISPLAY_STEP(p, since, until))
1164 
1165 #define IS_TGL_DISPLAY_STEP(__i915, since, until) \
1166 	(IS_TIGERLAKE(__i915) && \
1167 	 IS_DISPLAY_STEP(__i915, since, until))
1168 
1169 #define IS_TGL_UY_GRAPHICS_STEP(__i915, since, until) \
1170 	(IS_TGL_UY(__i915) && \
1171 	 IS_GRAPHICS_STEP(__i915, since, until))
1172 
1173 #define IS_TGL_GRAPHICS_STEP(__i915, since, until) \
1174 	(IS_TIGERLAKE(__i915) && !IS_TGL_UY(__i915)) && \
1175 	 IS_GRAPHICS_STEP(__i915, since, until))
1176 
1177 #define IS_RKL_DISPLAY_STEP(p, since, until) \
1178 	(IS_ROCKETLAKE(p) && IS_DISPLAY_STEP(p, since, until))
1179 
1180 #define IS_DG1_GRAPHICS_STEP(p, since, until) \
1181 	(IS_DG1(p) && IS_GRAPHICS_STEP(p, since, until))
1182 #define IS_DG1_DISPLAY_STEP(p, since, until) \
1183 	(IS_DG1(p) && IS_DISPLAY_STEP(p, since, until))
1184 
1185 #define IS_ADLS_DISPLAY_STEP(__i915, since, until) \
1186 	(IS_ALDERLAKE_S(__i915) && \
1187 	 IS_DISPLAY_STEP(__i915, since, until))
1188 
1189 #define IS_ADLS_GRAPHICS_STEP(__i915, since, until) \
1190 	(IS_ALDERLAKE_S(__i915) && \
1191 	 IS_GRAPHICS_STEP(__i915, since, until))
1192 
1193 #define IS_ADLP_DISPLAY_STEP(__i915, since, until) \
1194 	(IS_ALDERLAKE_P(__i915) && \
1195 	 IS_DISPLAY_STEP(__i915, since, until))
1196 
1197 #define IS_ADLP_GRAPHICS_STEP(__i915, since, until) \
1198 	(IS_ALDERLAKE_P(__i915) && \
1199 	 IS_GRAPHICS_STEP(__i915, since, until))
1200 
1201 #define IS_XEHPSDV_GRAPHICS_STEP(__i915, since, until) \
1202 	(IS_XEHPSDV(__i915) && IS_GRAPHICS_STEP(__i915, since, until))
1203 
1204 /*
1205  * DG2 hardware steppings are a bit unusual.  The hardware design was forked to
1206  * create three variants (G10, G11, and G12) which each have distinct
1207  * workaround sets.  The G11 and G12 forks of the DG2 design reset the GT
1208  * stepping back to "A0" for their first iterations, even though they're more
1209  * similar to a G10 B0 stepping and G10 C0 stepping respectively in terms of
1210  * functionality and workarounds.  However the display stepping does not reset
1211  * in the same manner --- a specific stepping like "B0" has a consistent
1212  * meaning regardless of whether it belongs to a G10, G11, or G12 DG2.
1213  *
1214  * TLDR:  All GT workarounds and stepping-specific logic must be applied in
1215  * relation to a specific subplatform (G10/G11/G12), whereas display workarounds
1216  * and stepping-specific logic will be applied with a general DG2-wide stepping
1217  * number.
1218  */
1219 #define IS_DG2_GRAPHICS_STEP(__i915, variant, since, until) \
1220 	(IS_SUBPLATFORM(__i915, INTEL_DG2, INTEL_SUBPLATFORM_##variant) && \
1221 	 IS_GRAPHICS_STEP(__i915, since, until))
1222 
1223 #define IS_DG2_DISPLAY_STEP(__i915, since, until) \
1224 	(IS_DG2(__i915) && \
1225 	 IS_DISPLAY_STEP(__i915, since, until))
1226 
1227 #define IS_LP(dev_priv)		(INTEL_INFO(dev_priv)->is_lp)
1228 #define IS_GEN9_LP(dev_priv)	(GRAPHICS_VER(dev_priv) == 9 && IS_LP(dev_priv))
1229 #define IS_GEN9_BC(dev_priv)	(GRAPHICS_VER(dev_priv) == 9 && !IS_LP(dev_priv))
1230 
1231 #define __HAS_ENGINE(engine_mask, id) ((engine_mask) & BIT(id))
1232 #define HAS_ENGINE(gt, id) __HAS_ENGINE((gt)->info.engine_mask, id)
1233 
1234 #define ENGINE_INSTANCES_MASK(gt, first, count) ({		\
1235 	unsigned int first__ = (first);					\
1236 	unsigned int count__ = (count);					\
1237 	((gt)->info.engine_mask &						\
1238 	 GENMASK(first__ + count__ - 1, first__)) >> first__;		\
1239 })
1240 #define VDBOX_MASK(gt) \
1241 	ENGINE_INSTANCES_MASK(gt, VCS0, I915_MAX_VCS)
1242 #define VEBOX_MASK(gt) \
1243 	ENGINE_INSTANCES_MASK(gt, VECS0, I915_MAX_VECS)
1244 #define CCS_MASK(gt) \
1245 	ENGINE_INSTANCES_MASK(gt, CCS0, I915_MAX_CCS)
1246 
1247 /*
1248  * The Gen7 cmdparser copies the scanned buffer to the ggtt for execution
1249  * All later gens can run the final buffer from the ppgtt
1250  */
1251 #define CMDPARSER_USES_GGTT(dev_priv) (GRAPHICS_VER(dev_priv) == 7)
1252 
1253 #define HAS_LLC(dev_priv)	(INTEL_INFO(dev_priv)->has_llc)
1254 #define HAS_SNOOP(dev_priv)	(INTEL_INFO(dev_priv)->has_snoop)
1255 #define HAS_EDRAM(dev_priv)	((dev_priv)->edram_size_mb)
1256 #define HAS_SECURE_BATCHES(dev_priv) (GRAPHICS_VER(dev_priv) < 6)
1257 #define HAS_WT(dev_priv)	HAS_EDRAM(dev_priv)
1258 
1259 #define HWS_NEEDS_PHYSICAL(dev_priv)	(INTEL_INFO(dev_priv)->hws_needs_physical)
1260 
1261 #define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \
1262 		(INTEL_INFO(dev_priv)->has_logical_ring_contexts)
1263 #define HAS_LOGICAL_RING_ELSQ(dev_priv) \
1264 		(INTEL_INFO(dev_priv)->has_logical_ring_elsq)
1265 
1266 #define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv)
1267 
1268 #define INTEL_PPGTT(dev_priv) (INTEL_INFO(dev_priv)->ppgtt_type)
1269 #define HAS_PPGTT(dev_priv) \
1270 	(INTEL_PPGTT(dev_priv) != INTEL_PPGTT_NONE)
1271 #define HAS_FULL_PPGTT(dev_priv) \
1272 	(INTEL_PPGTT(dev_priv) >= INTEL_PPGTT_FULL)
1273 
1274 #define HAS_PAGE_SIZES(dev_priv, sizes) ({ \
1275 	GEM_BUG_ON((sizes) == 0); \
1276 	((sizes) & ~INTEL_INFO(dev_priv)->page_sizes) == 0; \
1277 })
1278 
1279 #define HAS_OVERLAY(dev_priv)		 (INTEL_INFO(dev_priv)->display.has_overlay)
1280 #define OVERLAY_NEEDS_PHYSICAL(dev_priv) \
1281 		(INTEL_INFO(dev_priv)->display.overlay_needs_physical)
1282 
1283 /* Early gen2 have a totally busted CS tlb and require pinned batches. */
1284 #define HAS_BROKEN_CS_TLB(dev_priv)	(IS_I830(dev_priv) || IS_I845G(dev_priv))
1285 
1286 #define NEEDS_RC6_CTX_CORRUPTION_WA(dev_priv)	\
1287 	(IS_BROADWELL(dev_priv) || GRAPHICS_VER(dev_priv) == 9)
1288 
1289 /* WaRsDisableCoarsePowerGating:skl,cnl */
1290 #define NEEDS_WaRsDisableCoarsePowerGating(dev_priv)			\
1291 	(IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv))
1292 
1293 #define HAS_GMBUS_IRQ(dev_priv) (DISPLAY_VER(dev_priv) >= 4)
1294 #define HAS_GMBUS_BURST_READ(dev_priv) (DISPLAY_VER(dev_priv) >= 11 || \
1295 					IS_GEMINILAKE(dev_priv) || \
1296 					IS_KABYLAKE(dev_priv))
1297 
1298 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
1299  * rows, which changed the alignment requirements and fence programming.
1300  */
1301 #define HAS_128_BYTE_Y_TILING(dev_priv) (GRAPHICS_VER(dev_priv) != 2 && \
1302 					 !(IS_I915G(dev_priv) || IS_I915GM(dev_priv)))
1303 #define SUPPORTS_TV(dev_priv)		(INTEL_INFO(dev_priv)->display.supports_tv)
1304 #define I915_HAS_HOTPLUG(dev_priv)	(INTEL_INFO(dev_priv)->display.has_hotplug)
1305 
1306 #define HAS_FW_BLC(dev_priv)	(DISPLAY_VER(dev_priv) > 2)
1307 #define HAS_FBC(dev_priv)	(INTEL_INFO(dev_priv)->display.fbc_mask != 0)
1308 #define HAS_CUR_FBC(dev_priv)	(!HAS_GMCH(dev_priv) && DISPLAY_VER(dev_priv) >= 7)
1309 
1310 #define HAS_IPS(dev_priv)	(IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv))
1311 
1312 #define HAS_DP_MST(dev_priv)	(INTEL_INFO(dev_priv)->display.has_dp_mst)
1313 #define HAS_DP20(dev_priv)	(IS_DG2(dev_priv))
1314 
1315 #define HAS_CDCLK_CRAWL(dev_priv)	 (INTEL_INFO(dev_priv)->display.has_cdclk_crawl)
1316 #define HAS_DDI(dev_priv)		 (INTEL_INFO(dev_priv)->display.has_ddi)
1317 #define HAS_FPGA_DBG_UNCLAIMED(dev_priv) (INTEL_INFO(dev_priv)->display.has_fpga_dbg)
1318 #define HAS_PSR(dev_priv)		 (INTEL_INFO(dev_priv)->display.has_psr)
1319 #define HAS_PSR_HW_TRACKING(dev_priv) \
1320 	(INTEL_INFO(dev_priv)->display.has_psr_hw_tracking)
1321 #define HAS_PSR2_SEL_FETCH(dev_priv)	 (DISPLAY_VER(dev_priv) >= 12)
1322 #define HAS_TRANSCODER(dev_priv, trans)	 ((INTEL_INFO(dev_priv)->display.cpu_transcoder_mask & BIT(trans)) != 0)
1323 
1324 #define HAS_RC6(dev_priv)		 (INTEL_INFO(dev_priv)->has_rc6)
1325 #define HAS_RC6p(dev_priv)		 (INTEL_INFO(dev_priv)->has_rc6p)
1326 #define HAS_RC6pp(dev_priv)		 (false) /* HW was never validated */
1327 
1328 #define HAS_RPS(dev_priv)	(INTEL_INFO(dev_priv)->has_rps)
1329 
1330 #define HAS_DMC(dev_priv)	(INTEL_INFO(dev_priv)->display.has_dmc)
1331 
1332 #define HAS_MSO(i915)		(DISPLAY_VER(i915) >= 12)
1333 
1334 #define HAS_RUNTIME_PM(dev_priv) (INTEL_INFO(dev_priv)->has_runtime_pm)
1335 #define HAS_64BIT_RELOC(dev_priv) (INTEL_INFO(dev_priv)->has_64bit_reloc)
1336 
1337 #define HAS_MSLICES(dev_priv) \
1338 	(INTEL_INFO(dev_priv)->has_mslices)
1339 
1340 /*
1341  * Set this flag, when platform requires 64K GTT page sizes or larger for
1342  * device local memory access.
1343  */
1344 #define HAS_64K_PAGES(dev_priv) (INTEL_INFO(dev_priv)->has_64k_pages)
1345 
1346 /*
1347  * Set this flag when platform doesn't allow both 64k pages and 4k pages in
1348  * the same PT. this flag means we need to support compact PT layout for the
1349  * ppGTT when using the 64K GTT pages.
1350  */
1351 #define NEEDS_COMPACT_PT(dev_priv) (INTEL_INFO(dev_priv)->needs_compact_pt)
1352 
1353 #define HAS_IPC(dev_priv)		 (INTEL_INFO(dev_priv)->display.has_ipc)
1354 
1355 #define HAS_REGION(i915, i) (INTEL_INFO(i915)->memory_regions & (i))
1356 #define HAS_LMEM(i915) HAS_REGION(i915, REGION_LMEM)
1357 
1358 /*
1359  * Platform has the dedicated compression control state for each lmem surfaces
1360  * stored in lmem to support the 3D and media compression formats.
1361  */
1362 #define HAS_FLAT_CCS(dev_priv)   (INTEL_INFO(dev_priv)->has_flat_ccs)
1363 
1364 #define HAS_GT_UC(dev_priv)	(INTEL_INFO(dev_priv)->has_gt_uc)
1365 
1366 #define HAS_POOLED_EU(dev_priv)	(INTEL_INFO(dev_priv)->has_pooled_eu)
1367 
1368 #define HAS_GLOBAL_MOCS_REGISTERS(dev_priv)	(INTEL_INFO(dev_priv)->has_global_mocs)
1369 
1370 #define HAS_PXP(dev_priv)  ((IS_ENABLED(CONFIG_DRM_I915_PXP) && \
1371 			    INTEL_INFO(dev_priv)->has_pxp) && \
1372 			    VDBOX_MASK(to_gt(dev_priv)))
1373 
1374 #define HAS_GMCH(dev_priv) (INTEL_INFO(dev_priv)->display.has_gmch)
1375 
1376 #define HAS_LSPCON(dev_priv) (IS_DISPLAY_VER(dev_priv, 9, 10))
1377 
1378 /* DPF == dynamic parity feature */
1379 #define HAS_L3_DPF(dev_priv) (INTEL_INFO(dev_priv)->has_l3_dpf)
1380 #define NUM_L3_SLICES(dev_priv) (IS_HSW_GT3(dev_priv) ? \
1381 				 2 : HAS_L3_DPF(dev_priv))
1382 
1383 #define GT_FREQUENCY_MULTIPLIER 50
1384 #define GEN9_FREQ_SCALER 3
1385 
1386 #define INTEL_NUM_PIPES(dev_priv) (hweight8(INTEL_INFO(dev_priv)->display.pipe_mask))
1387 
1388 #define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->display.pipe_mask != 0)
1389 
1390 #define HAS_VRR(i915)	(DISPLAY_VER(i915) >= 11)
1391 
1392 #define HAS_ASYNC_FLIPS(i915)		(DISPLAY_VER(i915) >= 5)
1393 
1394 /* Only valid when HAS_DISPLAY() is true */
1395 #define INTEL_DISPLAY_ENABLED(dev_priv) \
1396 	(drm_WARN_ON(&(dev_priv)->drm, !HAS_DISPLAY(dev_priv)), !(dev_priv)->params.disable_display)
1397 
1398 #define HAS_GUC_DEPRIVILEGE(dev_priv) \
1399 	(INTEL_INFO(dev_priv)->has_guc_deprivilege)
1400 
1401 static inline bool run_as_guest(void)
1402 {
1403 	return !hypervisor_is_type(X86_HYPER_NATIVE);
1404 }
1405 
1406 #define HAS_D12_PLANE_MINIMIZATION(dev_priv) (IS_ROCKETLAKE(dev_priv) || \
1407 					      IS_ALDERLAKE_S(dev_priv))
1408 
1409 static inline bool intel_vtd_active(struct drm_i915_private *i915)
1410 {
1411 	if (device_iommu_mapped(i915->drm.dev))
1412 		return true;
1413 
1414 	/* Running as a guest, we assume the host is enforcing VT'd */
1415 	return run_as_guest();
1416 }
1417 
1418 void
1419 i915_print_iommu_status(struct drm_i915_private *i915, struct drm_printer *p);
1420 
1421 static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
1422 {
1423 	return DISPLAY_VER(dev_priv) >= 6 && intel_vtd_active(dev_priv);
1424 }
1425 
1426 static inline bool
1427 intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *i915)
1428 {
1429 	return IS_BROXTON(i915) && intel_vtd_active(i915);
1430 }
1431 
1432 static inline bool
1433 intel_vm_no_concurrent_access_wa(struct drm_i915_private *i915)
1434 {
1435 	return IS_CHERRYVIEW(i915) || intel_ggtt_update_needs_vtd_wa(i915);
1436 }
1437 
1438 /* i915_gem.c */
1439 void i915_gem_init_early(struct drm_i915_private *dev_priv);
1440 void i915_gem_cleanup_early(struct drm_i915_private *dev_priv);
1441 
1442 static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
1443 {
1444 	/*
1445 	 * A single pass should suffice to release all the freed objects (along
1446 	 * most call paths) , but be a little more paranoid in that freeing
1447 	 * the objects does take a little amount of time, during which the rcu
1448 	 * callbacks could have added new objects into the freed list, and
1449 	 * armed the work again.
1450 	 */
1451 	while (atomic_read(&i915->mm.free_count)) {
1452 		flush_delayed_work(&i915->mm.free_work);
1453 		flush_delayed_work(&i915->bdev.wq);
1454 		rcu_barrier();
1455 	}
1456 }
1457 
1458 static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915)
1459 {
1460 	/*
1461 	 * Similar to objects above (see i915_gem_drain_freed-objects), in
1462 	 * general we have workers that are armed by RCU and then rearm
1463 	 * themselves in their callbacks. To be paranoid, we need to
1464 	 * drain the workqueue a second time after waiting for the RCU
1465 	 * grace period so that we catch work queued via RCU from the first
1466 	 * pass. As neither drain_workqueue() nor flush_workqueue() report
1467 	 * a result, we make an assumption that we only don't require more
1468 	 * than 3 passes to catch all _recursive_ RCU delayed work.
1469 	 *
1470 	 */
1471 	int pass = 3;
1472 	do {
1473 		flush_workqueue(i915->wq);
1474 		rcu_barrier();
1475 		i915_gem_drain_freed_objects(i915);
1476 	} while (--pass);
1477 	drain_workqueue(i915->wq);
1478 }
1479 
1480 struct i915_vma * __must_check
1481 i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj,
1482 			    struct i915_gem_ww_ctx *ww,
1483 			    const struct i915_ggtt_view *view,
1484 			    u64 size, u64 alignment, u64 flags);
1485 
1486 struct i915_vma * __must_check
1487 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
1488 			 const struct i915_ggtt_view *view,
1489 			 u64 size, u64 alignment, u64 flags);
1490 
1491 int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
1492 			   unsigned long flags);
1493 #define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0)
1494 #define I915_GEM_OBJECT_UNBIND_BARRIER BIT(1)
1495 #define I915_GEM_OBJECT_UNBIND_TEST BIT(2)
1496 #define I915_GEM_OBJECT_UNBIND_VM_TRYLOCK BIT(3)
1497 #define I915_GEM_OBJECT_UNBIND_ASYNC BIT(4)
1498 
1499 void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
1500 
1501 int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno);
1502 
1503 int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
1504 void i915_gem_driver_register(struct drm_i915_private *i915);
1505 void i915_gem_driver_unregister(struct drm_i915_private *i915);
1506 void i915_gem_driver_remove(struct drm_i915_private *dev_priv);
1507 void i915_gem_driver_release(struct drm_i915_private *dev_priv);
1508 
1509 int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file);
1510 
1511 /* i915_gem_tiling.c */
1512 static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
1513 {
1514 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
1515 
1516 	return to_gt(i915)->ggtt->bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
1517 		i915_gem_object_is_tiled(obj);
1518 }
1519 
1520 /* intel_device_info.c */
1521 static inline struct intel_device_info *
1522 mkwrite_device_info(struct drm_i915_private *dev_priv)
1523 {
1524 	return (struct intel_device_info *)INTEL_INFO(dev_priv);
1525 }
1526 
1527 static inline enum i915_map_type
1528 i915_coherent_map_type(struct drm_i915_private *i915,
1529 		       struct drm_i915_gem_object *obj, bool always_coherent)
1530 {
1531 	if (i915_gem_object_is_lmem(obj))
1532 		return I915_MAP_WC;
1533 	if (HAS_LLC(i915) || always_coherent)
1534 		return I915_MAP_WB;
1535 	else
1536 		return I915_MAP_WC;
1537 }
1538 
1539 #endif
1540