xref: /openbmc/linux/drivers/gpu/drm/i915/i915_drv.h (revision a34a3ed7)
1 /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
2  */
3 /*
4  *
5  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the
10  * "Software"), to deal in the Software without restriction, including
11  * without limitation the rights to use, copy, modify, merge, publish,
12  * distribute, sub license, and/or sell copies of the Software, and to
13  * permit persons to whom the Software is furnished to do so, subject to
14  * the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the
17  * next paragraph) shall be included in all copies or substantial portions
18  * of the Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27  *
28  */
29 
30 #ifndef _I915_DRV_H_
31 #define _I915_DRV_H_
32 
33 #include <uapi/drm/i915_drm.h>
34 #include <uapi/drm/drm_fourcc.h>
35 
36 #include <linux/io-mapping.h>
37 #include <linux/i2c.h>
38 #include <linux/i2c-algo-bit.h>
39 #include <linux/backlight.h>
40 #include <linux/hash.h>
41 #include <linux/intel-iommu.h>
42 #include <linux/kref.h>
43 #include <linux/perf_event.h>
44 #include <linux/pm_qos.h>
45 #include <linux/reservation.h>
46 #include <linux/shmem_fs.h>
47 
48 #include <drm/drmP.h>
49 #include <drm/intel-gtt.h>
50 #include <drm/drm_legacy.h> /* for struct drm_dma_handle */
51 #include <drm/drm_gem.h>
52 #include <drm/drm_auth.h>
53 #include <drm/drm_cache.h>
54 
55 #include "i915_params.h"
56 #include "i915_reg.h"
57 #include "i915_utils.h"
58 
59 #include "intel_bios.h"
60 #include "intel_device_info.h"
61 #include "intel_display.h"
62 #include "intel_dpll_mgr.h"
63 #include "intel_lrc.h"
64 #include "intel_opregion.h"
65 #include "intel_ringbuffer.h"
66 #include "intel_uncore.h"
67 #include "intel_uc.h"
68 
69 #include "i915_gem.h"
70 #include "i915_gem_context.h"
71 #include "i915_gem_fence_reg.h"
72 #include "i915_gem_object.h"
73 #include "i915_gem_gtt.h"
74 #include "i915_gem_request.h"
75 #include "i915_gem_timeline.h"
76 
77 #include "i915_vma.h"
78 
79 #include "intel_gvt.h"
80 
81 /* General customization:
82  */
83 
84 #define DRIVER_NAME		"i915"
85 #define DRIVER_DESC		"Intel Graphics"
86 #define DRIVER_DATE		"20171222"
87 #define DRIVER_TIMESTAMP	1513971710
88 
89 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
90  * WARN_ON()) for hw state sanity checks to check for unexpected conditions
91  * which may not necessarily be a user visible problem.  This will either
92  * WARN() or DRM_ERROR() depending on the verbose_checks moduleparam, to
93  * enable distros and users to tailor their preferred amount of i915 abrt
94  * spam.
95  */
96 #define I915_STATE_WARN(condition, format...) ({			\
97 	int __ret_warn_on = !!(condition);				\
98 	if (unlikely(__ret_warn_on))					\
99 		if (!WARN(i915_modparams.verbose_state_checks, format))	\
100 			DRM_ERROR(format);				\
101 	unlikely(__ret_warn_on);					\
102 })
103 
104 #define I915_STATE_WARN_ON(x)						\
105 	I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
106 
107 bool __i915_inject_load_failure(const char *func, int line);
108 #define i915_inject_load_failure() \
109 	__i915_inject_load_failure(__func__, __LINE__)
110 
111 typedef struct {
112 	uint32_t val;
113 } uint_fixed_16_16_t;
114 
115 #define FP_16_16_MAX ({ \
116 	uint_fixed_16_16_t fp; \
117 	fp.val = UINT_MAX; \
118 	fp; \
119 })
120 
121 static inline bool is_fixed16_zero(uint_fixed_16_16_t val)
122 {
123 	if (val.val == 0)
124 		return true;
125 	return false;
126 }
127 
128 static inline uint_fixed_16_16_t u32_to_fixed16(uint32_t val)
129 {
130 	uint_fixed_16_16_t fp;
131 
132 	WARN_ON(val > U16_MAX);
133 
134 	fp.val = val << 16;
135 	return fp;
136 }
137 
138 static inline uint32_t fixed16_to_u32_round_up(uint_fixed_16_16_t fp)
139 {
140 	return DIV_ROUND_UP(fp.val, 1 << 16);
141 }
142 
143 static inline uint32_t fixed16_to_u32(uint_fixed_16_16_t fp)
144 {
145 	return fp.val >> 16;
146 }
147 
148 static inline uint_fixed_16_16_t min_fixed16(uint_fixed_16_16_t min1,
149 						 uint_fixed_16_16_t min2)
150 {
151 	uint_fixed_16_16_t min;
152 
153 	min.val = min(min1.val, min2.val);
154 	return min;
155 }
156 
157 static inline uint_fixed_16_16_t max_fixed16(uint_fixed_16_16_t max1,
158 						 uint_fixed_16_16_t max2)
159 {
160 	uint_fixed_16_16_t max;
161 
162 	max.val = max(max1.val, max2.val);
163 	return max;
164 }
165 
166 static inline uint_fixed_16_16_t clamp_u64_to_fixed16(uint64_t val)
167 {
168 	uint_fixed_16_16_t fp;
169 	WARN_ON(val > U32_MAX);
170 	fp.val = (uint32_t) val;
171 	return fp;
172 }
173 
174 static inline uint32_t div_round_up_fixed16(uint_fixed_16_16_t val,
175 					    uint_fixed_16_16_t d)
176 {
177 	return DIV_ROUND_UP(val.val, d.val);
178 }
179 
180 static inline uint32_t mul_round_up_u32_fixed16(uint32_t val,
181 						uint_fixed_16_16_t mul)
182 {
183 	uint64_t intermediate_val;
184 
185 	intermediate_val = (uint64_t) val * mul.val;
186 	intermediate_val = DIV_ROUND_UP_ULL(intermediate_val, 1 << 16);
187 	WARN_ON(intermediate_val > U32_MAX);
188 	return (uint32_t) intermediate_val;
189 }
190 
191 static inline uint_fixed_16_16_t mul_fixed16(uint_fixed_16_16_t val,
192 					     uint_fixed_16_16_t mul)
193 {
194 	uint64_t intermediate_val;
195 
196 	intermediate_val = (uint64_t) val.val * mul.val;
197 	intermediate_val = intermediate_val >> 16;
198 	return clamp_u64_to_fixed16(intermediate_val);
199 }
200 
201 static inline uint_fixed_16_16_t div_fixed16(uint32_t val, uint32_t d)
202 {
203 	uint64_t interm_val;
204 
205 	interm_val = (uint64_t)val << 16;
206 	interm_val = DIV_ROUND_UP_ULL(interm_val, d);
207 	return clamp_u64_to_fixed16(interm_val);
208 }
209 
210 static inline uint32_t div_round_up_u32_fixed16(uint32_t val,
211 						uint_fixed_16_16_t d)
212 {
213 	uint64_t interm_val;
214 
215 	interm_val = (uint64_t)val << 16;
216 	interm_val = DIV_ROUND_UP_ULL(interm_val, d.val);
217 	WARN_ON(interm_val > U32_MAX);
218 	return (uint32_t) interm_val;
219 }
220 
221 static inline uint_fixed_16_16_t mul_u32_fixed16(uint32_t val,
222 						     uint_fixed_16_16_t mul)
223 {
224 	uint64_t intermediate_val;
225 
226 	intermediate_val = (uint64_t) val * mul.val;
227 	return clamp_u64_to_fixed16(intermediate_val);
228 }
229 
230 static inline uint_fixed_16_16_t add_fixed16(uint_fixed_16_16_t add1,
231 					     uint_fixed_16_16_t add2)
232 {
233 	uint64_t interm_sum;
234 
235 	interm_sum = (uint64_t) add1.val + add2.val;
236 	return clamp_u64_to_fixed16(interm_sum);
237 }
238 
239 static inline uint_fixed_16_16_t add_fixed16_u32(uint_fixed_16_16_t add1,
240 						 uint32_t add2)
241 {
242 	uint64_t interm_sum;
243 	uint_fixed_16_16_t interm_add2 = u32_to_fixed16(add2);
244 
245 	interm_sum = (uint64_t) add1.val + interm_add2.val;
246 	return clamp_u64_to_fixed16(interm_sum);
247 }
248 
249 enum hpd_pin {
250 	HPD_NONE = 0,
251 	HPD_TV = HPD_NONE,     /* TV is known to be unreliable */
252 	HPD_CRT,
253 	HPD_SDVO_B,
254 	HPD_SDVO_C,
255 	HPD_PORT_A,
256 	HPD_PORT_B,
257 	HPD_PORT_C,
258 	HPD_PORT_D,
259 	HPD_PORT_E,
260 	HPD_NUM_PINS
261 };
262 
263 #define for_each_hpd_pin(__pin) \
264 	for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++)
265 
266 #define HPD_STORM_DEFAULT_THRESHOLD 5
267 
268 struct i915_hotplug {
269 	struct work_struct hotplug_work;
270 
271 	struct {
272 		unsigned long last_jiffies;
273 		int count;
274 		enum {
275 			HPD_ENABLED = 0,
276 			HPD_DISABLED = 1,
277 			HPD_MARK_DISABLED = 2
278 		} state;
279 	} stats[HPD_NUM_PINS];
280 	u32 event_bits;
281 	struct delayed_work reenable_work;
282 
283 	struct intel_digital_port *irq_port[I915_MAX_PORTS];
284 	u32 long_port_mask;
285 	u32 short_port_mask;
286 	struct work_struct dig_port_work;
287 
288 	struct work_struct poll_init_work;
289 	bool poll_enabled;
290 
291 	unsigned int hpd_storm_threshold;
292 
293 	/*
294 	 * if we get a HPD irq from DP and a HPD irq from non-DP
295 	 * the non-DP HPD could block the workqueue on a mode config
296 	 * mutex getting, that userspace may have taken. However
297 	 * userspace is waiting on the DP workqueue to run which is
298 	 * blocked behind the non-DP one.
299 	 */
300 	struct workqueue_struct *dp_wq;
301 };
302 
303 #define I915_GEM_GPU_DOMAINS \
304 	(I915_GEM_DOMAIN_RENDER | \
305 	 I915_GEM_DOMAIN_SAMPLER | \
306 	 I915_GEM_DOMAIN_COMMAND | \
307 	 I915_GEM_DOMAIN_INSTRUCTION | \
308 	 I915_GEM_DOMAIN_VERTEX)
309 
310 struct drm_i915_private;
311 struct i915_mm_struct;
312 struct i915_mmu_object;
313 
314 struct drm_i915_file_private {
315 	struct drm_i915_private *dev_priv;
316 	struct drm_file *file;
317 
318 	struct {
319 		spinlock_t lock;
320 		struct list_head request_list;
321 /* 20ms is a fairly arbitrary limit (greater than the average frame time)
322  * chosen to prevent the CPU getting more than a frame ahead of the GPU
323  * (when using lax throttling for the frontbuffer). We also use it to
324  * offer free GPU waitboosts for severely congested workloads.
325  */
326 #define DRM_I915_THROTTLE_JIFFIES msecs_to_jiffies(20)
327 	} mm;
328 	struct idr context_idr;
329 
330 	struct intel_rps_client {
331 		atomic_t boosts;
332 	} rps_client;
333 
334 	unsigned int bsd_engine;
335 
336 /* Client can have a maximum of 3 contexts banned before
337  * it is denied of creating new contexts. As one context
338  * ban needs 4 consecutive hangs, and more if there is
339  * progress in between, this is a last resort stop gap measure
340  * to limit the badly behaving clients access to gpu.
341  */
342 #define I915_MAX_CLIENT_CONTEXT_BANS 3
343 	atomic_t context_bans;
344 };
345 
346 /* Interface history:
347  *
348  * 1.1: Original.
349  * 1.2: Add Power Management
350  * 1.3: Add vblank support
351  * 1.4: Fix cmdbuffer path, add heap destroy
352  * 1.5: Add vblank pipe configuration
353  * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
354  *      - Support vertical blank on secondary display pipe
355  */
356 #define DRIVER_MAJOR		1
357 #define DRIVER_MINOR		6
358 #define DRIVER_PATCHLEVEL	0
359 
360 struct intel_overlay;
361 struct intel_overlay_error_state;
362 
363 struct sdvo_device_mapping {
364 	u8 initialized;
365 	u8 dvo_port;
366 	u8 slave_addr;
367 	u8 dvo_wiring;
368 	u8 i2c_pin;
369 	u8 ddc_pin;
370 };
371 
372 struct intel_connector;
373 struct intel_encoder;
374 struct intel_atomic_state;
375 struct intel_crtc_state;
376 struct intel_initial_plane_config;
377 struct intel_crtc;
378 struct intel_limit;
379 struct dpll;
380 struct intel_cdclk_state;
381 
382 struct drm_i915_display_funcs {
383 	void (*get_cdclk)(struct drm_i915_private *dev_priv,
384 			  struct intel_cdclk_state *cdclk_state);
385 	void (*set_cdclk)(struct drm_i915_private *dev_priv,
386 			  const struct intel_cdclk_state *cdclk_state);
387 	int (*get_fifo_size)(struct drm_i915_private *dev_priv,
388 			     enum i9xx_plane_id i9xx_plane);
389 	int (*compute_pipe_wm)(struct intel_crtc_state *cstate);
390 	int (*compute_intermediate_wm)(struct drm_device *dev,
391 				       struct intel_crtc *intel_crtc,
392 				       struct intel_crtc_state *newstate);
393 	void (*initial_watermarks)(struct intel_atomic_state *state,
394 				   struct intel_crtc_state *cstate);
395 	void (*atomic_update_watermarks)(struct intel_atomic_state *state,
396 					 struct intel_crtc_state *cstate);
397 	void (*optimize_watermarks)(struct intel_atomic_state *state,
398 				    struct intel_crtc_state *cstate);
399 	int (*compute_global_watermarks)(struct drm_atomic_state *state);
400 	void (*update_wm)(struct intel_crtc *crtc);
401 	int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
402 	/* Returns the active state of the crtc, and if the crtc is active,
403 	 * fills out the pipe-config with the hw state. */
404 	bool (*get_pipe_config)(struct intel_crtc *,
405 				struct intel_crtc_state *);
406 	void (*get_initial_plane_config)(struct intel_crtc *,
407 					 struct intel_initial_plane_config *);
408 	int (*crtc_compute_clock)(struct intel_crtc *crtc,
409 				  struct intel_crtc_state *crtc_state);
410 	void (*crtc_enable)(struct intel_crtc_state *pipe_config,
411 			    struct drm_atomic_state *old_state);
412 	void (*crtc_disable)(struct intel_crtc_state *old_crtc_state,
413 			     struct drm_atomic_state *old_state);
414 	void (*update_crtcs)(struct drm_atomic_state *state);
415 	void (*audio_codec_enable)(struct intel_encoder *encoder,
416 				   const struct intel_crtc_state *crtc_state,
417 				   const struct drm_connector_state *conn_state);
418 	void (*audio_codec_disable)(struct intel_encoder *encoder,
419 				    const struct intel_crtc_state *old_crtc_state,
420 				    const struct drm_connector_state *old_conn_state);
421 	void (*fdi_link_train)(struct intel_crtc *crtc,
422 			       const struct intel_crtc_state *crtc_state);
423 	void (*init_clock_gating)(struct drm_i915_private *dev_priv);
424 	void (*hpd_irq_setup)(struct drm_i915_private *dev_priv);
425 	/* clock updates for mode set */
426 	/* cursor updates */
427 	/* render clock increase/decrease */
428 	/* display clock increase/decrease */
429 	/* pll clock increase/decrease */
430 
431 	void (*load_csc_matrix)(struct drm_crtc_state *crtc_state);
432 	void (*load_luts)(struct drm_crtc_state *crtc_state);
433 };
434 
435 #define CSR_VERSION(major, minor)	((major) << 16 | (minor))
436 #define CSR_VERSION_MAJOR(version)	((version) >> 16)
437 #define CSR_VERSION_MINOR(version)	((version) & 0xffff)
438 
439 struct intel_csr {
440 	struct work_struct work;
441 	const char *fw_path;
442 	uint32_t *dmc_payload;
443 	uint32_t dmc_fw_size;
444 	uint32_t version;
445 	uint32_t mmio_count;
446 	i915_reg_t mmioaddr[8];
447 	uint32_t mmiodata[8];
448 	uint32_t dc_state;
449 	uint32_t allowed_dc_mask;
450 };
451 
452 struct intel_display_error_state;
453 
454 struct i915_gpu_state {
455 	struct kref ref;
456 	struct timeval time;
457 	struct timeval boottime;
458 	struct timeval uptime;
459 
460 	struct drm_i915_private *i915;
461 
462 	char error_msg[128];
463 	bool simulated;
464 	bool awake;
465 	bool wakelock;
466 	bool suspended;
467 	int iommu;
468 	u32 reset_count;
469 	u32 suspend_count;
470 	struct intel_device_info device_info;
471 	struct i915_params params;
472 
473 	struct i915_error_uc {
474 		struct intel_uc_fw guc_fw;
475 		struct intel_uc_fw huc_fw;
476 		struct drm_i915_error_object *guc_log;
477 	} uc;
478 
479 	/* Generic register state */
480 	u32 eir;
481 	u32 pgtbl_er;
482 	u32 ier;
483 	u32 gtier[4], ngtier;
484 	u32 ccid;
485 	u32 derrmr;
486 	u32 forcewake;
487 	u32 error; /* gen6+ */
488 	u32 err_int; /* gen7 */
489 	u32 fault_data0; /* gen8, gen9 */
490 	u32 fault_data1; /* gen8, gen9 */
491 	u32 done_reg;
492 	u32 gac_eco;
493 	u32 gam_ecochk;
494 	u32 gab_ctl;
495 	u32 gfx_mode;
496 
497 	u32 nfence;
498 	u64 fence[I915_MAX_NUM_FENCES];
499 	struct intel_overlay_error_state *overlay;
500 	struct intel_display_error_state *display;
501 
502 	struct drm_i915_error_engine {
503 		int engine_id;
504 		/* Software tracked state */
505 		bool idle;
506 		bool waiting;
507 		int num_waiters;
508 		unsigned long hangcheck_timestamp;
509 		bool hangcheck_stalled;
510 		enum intel_engine_hangcheck_action hangcheck_action;
511 		struct i915_address_space *vm;
512 		int num_requests;
513 		u32 reset_count;
514 
515 		/* position of active request inside the ring */
516 		u32 rq_head, rq_post, rq_tail;
517 
518 		/* our own tracking of ring head and tail */
519 		u32 cpu_ring_head;
520 		u32 cpu_ring_tail;
521 
522 		u32 last_seqno;
523 
524 		/* Register state */
525 		u32 start;
526 		u32 tail;
527 		u32 head;
528 		u32 ctl;
529 		u32 mode;
530 		u32 hws;
531 		u32 ipeir;
532 		u32 ipehr;
533 		u32 bbstate;
534 		u32 instpm;
535 		u32 instps;
536 		u32 seqno;
537 		u64 bbaddr;
538 		u64 acthd;
539 		u32 fault_reg;
540 		u64 faddr;
541 		u32 rc_psmi; /* sleep state */
542 		u32 semaphore_mboxes[I915_NUM_ENGINES - 1];
543 		struct intel_instdone instdone;
544 
545 		struct drm_i915_error_context {
546 			char comm[TASK_COMM_LEN];
547 			pid_t pid;
548 			u32 handle;
549 			u32 hw_id;
550 			int priority;
551 			int ban_score;
552 			int active;
553 			int guilty;
554 		} context;
555 
556 		struct drm_i915_error_object {
557 			u64 gtt_offset;
558 			u64 gtt_size;
559 			int page_count;
560 			int unused;
561 			u32 *pages[0];
562 		} *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;
563 
564 		struct drm_i915_error_object **user_bo;
565 		long user_bo_count;
566 
567 		struct drm_i915_error_object *wa_ctx;
568 		struct drm_i915_error_object *default_state;
569 
570 		struct drm_i915_error_request {
571 			long jiffies;
572 			pid_t pid;
573 			u32 context;
574 			int priority;
575 			int ban_score;
576 			u32 seqno;
577 			u32 head;
578 			u32 tail;
579 		} *requests, execlist[EXECLIST_MAX_PORTS];
580 		unsigned int num_ports;
581 
582 		struct drm_i915_error_waiter {
583 			char comm[TASK_COMM_LEN];
584 			pid_t pid;
585 			u32 seqno;
586 		} *waiters;
587 
588 		struct {
589 			u32 gfx_mode;
590 			union {
591 				u64 pdp[4];
592 				u32 pp_dir_base;
593 			};
594 		} vm_info;
595 	} engine[I915_NUM_ENGINES];
596 
597 	struct drm_i915_error_buffer {
598 		u32 size;
599 		u32 name;
600 		u32 rseqno[I915_NUM_ENGINES], wseqno;
601 		u64 gtt_offset;
602 		u32 read_domains;
603 		u32 write_domain;
604 		s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
605 		u32 tiling:2;
606 		u32 dirty:1;
607 		u32 purgeable:1;
608 		u32 userptr:1;
609 		s32 engine:4;
610 		u32 cache_level:3;
611 	} *active_bo[I915_NUM_ENGINES], *pinned_bo;
612 	u32 active_bo_count[I915_NUM_ENGINES], pinned_bo_count;
613 	struct i915_address_space *active_vm[I915_NUM_ENGINES];
614 };
615 
616 enum i915_cache_level {
617 	I915_CACHE_NONE = 0,
618 	I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */
619 	I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc
620 			      caches, eg sampler/render caches, and the
621 			      large Last-Level-Cache. LLC is coherent with
622 			      the CPU, but L3 is only visible to the GPU. */
623 	I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */
624 };
625 
626 #define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */
627 
628 enum fb_op_origin {
629 	ORIGIN_GTT,
630 	ORIGIN_CPU,
631 	ORIGIN_CS,
632 	ORIGIN_FLIP,
633 	ORIGIN_DIRTYFB,
634 };
635 
636 struct intel_fbc {
637 	/* This is always the inner lock when overlapping with struct_mutex and
638 	 * it's the outer lock when overlapping with stolen_lock. */
639 	struct mutex lock;
640 	unsigned threshold;
641 	unsigned int possible_framebuffer_bits;
642 	unsigned int busy_bits;
643 	unsigned int visible_pipes_mask;
644 	struct intel_crtc *crtc;
645 
646 	struct drm_mm_node compressed_fb;
647 	struct drm_mm_node *compressed_llb;
648 
649 	bool false_color;
650 
651 	bool enabled;
652 	bool active;
653 
654 	bool underrun_detected;
655 	struct work_struct underrun_work;
656 
657 	/*
658 	 * Due to the atomic rules we can't access some structures without the
659 	 * appropriate locking, so we cache information here in order to avoid
660 	 * these problems.
661 	 */
662 	struct intel_fbc_state_cache {
663 		struct i915_vma *vma;
664 
665 		struct {
666 			unsigned int mode_flags;
667 			uint32_t hsw_bdw_pixel_rate;
668 		} crtc;
669 
670 		struct {
671 			unsigned int rotation;
672 			int src_w;
673 			int src_h;
674 			bool visible;
675 			/*
676 			 * Display surface base address adjustement for
677 			 * pageflips. Note that on gen4+ this only adjusts up
678 			 * to a tile, offsets within a tile are handled in
679 			 * the hw itself (with the TILEOFF register).
680 			 */
681 			int adjusted_x;
682 			int adjusted_y;
683 
684 			int y;
685 		} plane;
686 
687 		struct {
688 			const struct drm_format_info *format;
689 			unsigned int stride;
690 		} fb;
691 	} state_cache;
692 
693 	/*
694 	 * This structure contains everything that's relevant to program the
695 	 * hardware registers. When we want to figure out if we need to disable
696 	 * and re-enable FBC for a new configuration we just check if there's
697 	 * something different in the struct. The genx_fbc_activate functions
698 	 * are supposed to read from it in order to program the registers.
699 	 */
700 	struct intel_fbc_reg_params {
701 		struct i915_vma *vma;
702 
703 		struct {
704 			enum pipe pipe;
705 			enum i9xx_plane_id i9xx_plane;
706 			unsigned int fence_y_offset;
707 		} crtc;
708 
709 		struct {
710 			const struct drm_format_info *format;
711 			unsigned int stride;
712 		} fb;
713 
714 		int cfb_size;
715 		unsigned int gen9_wa_cfb_stride;
716 	} params;
717 
718 	struct intel_fbc_work {
719 		bool scheduled;
720 		u32 scheduled_vblank;
721 		struct work_struct work;
722 	} work;
723 
724 	const char *no_fbc_reason;
725 };
726 
727 /*
728  * HIGH_RR is the highest eDP panel refresh rate read from EDID
729  * LOW_RR is the lowest eDP panel refresh rate found from EDID
730  * parsing for same resolution.
731  */
732 enum drrs_refresh_rate_type {
733 	DRRS_HIGH_RR,
734 	DRRS_LOW_RR,
735 	DRRS_MAX_RR, /* RR count */
736 };
737 
738 enum drrs_support_type {
739 	DRRS_NOT_SUPPORTED = 0,
740 	STATIC_DRRS_SUPPORT = 1,
741 	SEAMLESS_DRRS_SUPPORT = 2
742 };
743 
744 struct intel_dp;
745 struct i915_drrs {
746 	struct mutex mutex;
747 	struct delayed_work work;
748 	struct intel_dp *dp;
749 	unsigned busy_frontbuffer_bits;
750 	enum drrs_refresh_rate_type refresh_rate_type;
751 	enum drrs_support_type type;
752 };
753 
754 struct i915_psr {
755 	struct mutex lock;
756 	bool sink_support;
757 	bool source_ok;
758 	struct intel_dp *enabled;
759 	bool active;
760 	struct delayed_work work;
761 	unsigned busy_frontbuffer_bits;
762 	bool psr2_support;
763 	bool aux_frame_sync;
764 	bool link_standby;
765 	bool y_cord_support;
766 	bool colorimetry_support;
767 	bool alpm;
768 
769 	void (*enable_source)(struct intel_dp *,
770 			      const struct intel_crtc_state *);
771 	void (*disable_source)(struct intel_dp *,
772 			       const struct intel_crtc_state *);
773 	void (*enable_sink)(struct intel_dp *);
774 	void (*activate)(struct intel_dp *);
775 	void (*setup_vsc)(struct intel_dp *, const struct intel_crtc_state *);
776 };
777 
778 enum intel_pch {
779 	PCH_NONE = 0,	/* No PCH present */
780 	PCH_IBX,	/* Ibexpeak PCH */
781 	PCH_CPT,	/* Cougarpoint/Pantherpoint PCH */
782 	PCH_LPT,	/* Lynxpoint/Wildcatpoint PCH */
783 	PCH_SPT,        /* Sunrisepoint PCH */
784 	PCH_KBP,        /* Kaby Lake PCH */
785 	PCH_CNP,        /* Cannon Lake PCH */
786 	PCH_NOP,
787 };
788 
789 enum intel_sbi_destination {
790 	SBI_ICLK,
791 	SBI_MPHY,
792 };
793 
794 #define QUIRK_LVDS_SSC_DISABLE (1<<1)
795 #define QUIRK_INVERT_BRIGHTNESS (1<<2)
796 #define QUIRK_BACKLIGHT_PRESENT (1<<3)
797 #define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
798 #define QUIRK_INCREASE_T12_DELAY (1<<6)
799 
800 struct intel_fbdev;
801 struct intel_fbc_work;
802 
803 struct intel_gmbus {
804 	struct i2c_adapter adapter;
805 #define GMBUS_FORCE_BIT_RETRY (1U << 31)
806 	u32 force_bit;
807 	u32 reg0;
808 	i915_reg_t gpio_reg;
809 	struct i2c_algo_bit_data bit_algo;
810 	struct drm_i915_private *dev_priv;
811 };
812 
813 struct i915_suspend_saved_registers {
814 	u32 saveDSPARB;
815 	u32 saveFBC_CONTROL;
816 	u32 saveCACHE_MODE_0;
817 	u32 saveMI_ARB_STATE;
818 	u32 saveSWF0[16];
819 	u32 saveSWF1[16];
820 	u32 saveSWF3[3];
821 	uint64_t saveFENCE[I915_MAX_NUM_FENCES];
822 	u32 savePCH_PORT_HOTPLUG;
823 	u16 saveGCDGMBUS;
824 };
825 
826 struct vlv_s0ix_state {
827 	/* GAM */
828 	u32 wr_watermark;
829 	u32 gfx_prio_ctrl;
830 	u32 arb_mode;
831 	u32 gfx_pend_tlb0;
832 	u32 gfx_pend_tlb1;
833 	u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM];
834 	u32 media_max_req_count;
835 	u32 gfx_max_req_count;
836 	u32 render_hwsp;
837 	u32 ecochk;
838 	u32 bsd_hwsp;
839 	u32 blt_hwsp;
840 	u32 tlb_rd_addr;
841 
842 	/* MBC */
843 	u32 g3dctl;
844 	u32 gsckgctl;
845 	u32 mbctl;
846 
847 	/* GCP */
848 	u32 ucgctl1;
849 	u32 ucgctl3;
850 	u32 rcgctl1;
851 	u32 rcgctl2;
852 	u32 rstctl;
853 	u32 misccpctl;
854 
855 	/* GPM */
856 	u32 gfxpause;
857 	u32 rpdeuhwtc;
858 	u32 rpdeuc;
859 	u32 ecobus;
860 	u32 pwrdwnupctl;
861 	u32 rp_down_timeout;
862 	u32 rp_deucsw;
863 	u32 rcubmabdtmr;
864 	u32 rcedata;
865 	u32 spare2gh;
866 
867 	/* Display 1 CZ domain */
868 	u32 gt_imr;
869 	u32 gt_ier;
870 	u32 pm_imr;
871 	u32 pm_ier;
872 	u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM];
873 
874 	/* GT SA CZ domain */
875 	u32 tilectl;
876 	u32 gt_fifoctl;
877 	u32 gtlc_wake_ctrl;
878 	u32 gtlc_survive;
879 	u32 pmwgicz;
880 
881 	/* Display 2 CZ domain */
882 	u32 gu_ctl0;
883 	u32 gu_ctl1;
884 	u32 pcbr;
885 	u32 clock_gate_dis2;
886 };
887 
888 struct intel_rps_ei {
889 	ktime_t ktime;
890 	u32 render_c0;
891 	u32 media_c0;
892 };
893 
894 struct intel_rps {
895 	/*
896 	 * work, interrupts_enabled and pm_iir are protected by
897 	 * dev_priv->irq_lock
898 	 */
899 	struct work_struct work;
900 	bool interrupts_enabled;
901 	u32 pm_iir;
902 
903 	/* PM interrupt bits that should never be masked */
904 	u32 pm_intrmsk_mbz;
905 
906 	/* Frequencies are stored in potentially platform dependent multiples.
907 	 * In other words, *_freq needs to be multiplied by X to be interesting.
908 	 * Soft limits are those which are used for the dynamic reclocking done
909 	 * by the driver (raise frequencies under heavy loads, and lower for
910 	 * lighter loads). Hard limits are those imposed by the hardware.
911 	 *
912 	 * A distinction is made for overclocking, which is never enabled by
913 	 * default, and is considered to be above the hard limit if it's
914 	 * possible at all.
915 	 */
916 	u8 cur_freq;		/* Current frequency (cached, may not == HW) */
917 	u8 min_freq_softlimit;	/* Minimum frequency permitted by the driver */
918 	u8 max_freq_softlimit;	/* Max frequency permitted by the driver */
919 	u8 max_freq;		/* Maximum frequency, RP0 if not overclocking */
920 	u8 min_freq;		/* AKA RPn. Minimum frequency */
921 	u8 boost_freq;		/* Frequency to request when wait boosting */
922 	u8 idle_freq;		/* Frequency to request when we are idle */
923 	u8 efficient_freq;	/* AKA RPe. Pre-determined balanced frequency */
924 	u8 rp1_freq;		/* "less than" RP0 power/freqency */
925 	u8 rp0_freq;		/* Non-overclocked max frequency. */
926 	u16 gpll_ref_freq;	/* vlv/chv GPLL reference frequency */
927 
928 	u8 up_threshold; /* Current %busy required to uplock */
929 	u8 down_threshold; /* Current %busy required to downclock */
930 
931 	int last_adj;
932 	enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
933 
934 	bool enabled;
935 	atomic_t num_waiters;
936 	atomic_t boosts;
937 
938 	/* manual wa residency calculations */
939 	struct intel_rps_ei ei;
940 };
941 
942 struct intel_rc6 {
943 	bool enabled;
944 };
945 
946 struct intel_llc_pstate {
947 	bool enabled;
948 };
949 
950 struct intel_gen6_power_mgmt {
951 	struct intel_rps rps;
952 	struct intel_rc6 rc6;
953 	struct intel_llc_pstate llc_pstate;
954 };
955 
956 /* defined intel_pm.c */
957 extern spinlock_t mchdev_lock;
958 
959 struct intel_ilk_power_mgmt {
960 	u8 cur_delay;
961 	u8 min_delay;
962 	u8 max_delay;
963 	u8 fmax;
964 	u8 fstart;
965 
966 	u64 last_count1;
967 	unsigned long last_time1;
968 	unsigned long chipset_power;
969 	u64 last_count2;
970 	u64 last_time2;
971 	unsigned long gfx_power;
972 	u8 corr;
973 
974 	int c_m;
975 	int r_t;
976 };
977 
978 struct drm_i915_private;
979 struct i915_power_well;
980 
981 struct i915_power_well_ops {
982 	/*
983 	 * Synchronize the well's hw state to match the current sw state, for
984 	 * example enable/disable it based on the current refcount. Called
985 	 * during driver init and resume time, possibly after first calling
986 	 * the enable/disable handlers.
987 	 */
988 	void (*sync_hw)(struct drm_i915_private *dev_priv,
989 			struct i915_power_well *power_well);
990 	/*
991 	 * Enable the well and resources that depend on it (for example
992 	 * interrupts located on the well). Called after the 0->1 refcount
993 	 * transition.
994 	 */
995 	void (*enable)(struct drm_i915_private *dev_priv,
996 		       struct i915_power_well *power_well);
997 	/*
998 	 * Disable the well and resources that depend on it. Called after
999 	 * the 1->0 refcount transition.
1000 	 */
1001 	void (*disable)(struct drm_i915_private *dev_priv,
1002 			struct i915_power_well *power_well);
1003 	/* Returns the hw enabled state. */
1004 	bool (*is_enabled)(struct drm_i915_private *dev_priv,
1005 			   struct i915_power_well *power_well);
1006 };
1007 
1008 /* Power well structure for haswell */
1009 struct i915_power_well {
1010 	const char *name;
1011 	bool always_on;
1012 	/* power well enable/disable usage count */
1013 	int count;
1014 	/* cached hw enabled state */
1015 	bool hw_enabled;
1016 	u64 domains;
1017 	/* unique identifier for this power well */
1018 	enum i915_power_well_id id;
1019 	/*
1020 	 * Arbitraty data associated with this power well. Platform and power
1021 	 * well specific.
1022 	 */
1023 	union {
1024 		struct {
1025 			enum dpio_phy phy;
1026 		} bxt;
1027 		struct {
1028 			/* Mask of pipes whose IRQ logic is backed by the pw */
1029 			u8 irq_pipe_mask;
1030 			/* The pw is backing the VGA functionality */
1031 			bool has_vga:1;
1032 			bool has_fuses:1;
1033 		} hsw;
1034 	};
1035 	const struct i915_power_well_ops *ops;
1036 };
1037 
1038 struct i915_power_domains {
1039 	/*
1040 	 * Power wells needed for initialization at driver init and suspend
1041 	 * time are on. They are kept on until after the first modeset.
1042 	 */
1043 	bool init_power_on;
1044 	bool initializing;
1045 	int power_well_count;
1046 
1047 	struct mutex lock;
1048 	int domain_use_count[POWER_DOMAIN_NUM];
1049 	struct i915_power_well *power_wells;
1050 };
1051 
1052 #define MAX_L3_SLICES 2
1053 struct intel_l3_parity {
1054 	u32 *remap_info[MAX_L3_SLICES];
1055 	struct work_struct error_work;
1056 	int which_slice;
1057 };
1058 
1059 struct i915_gem_mm {
1060 	/** Memory allocator for GTT stolen memory */
1061 	struct drm_mm stolen;
1062 	/** Protects the usage of the GTT stolen memory allocator. This is
1063 	 * always the inner lock when overlapping with struct_mutex. */
1064 	struct mutex stolen_lock;
1065 
1066 	/* Protects bound_list/unbound_list and #drm_i915_gem_object.mm.link */
1067 	spinlock_t obj_lock;
1068 
1069 	/** List of all objects in gtt_space. Used to restore gtt
1070 	 * mappings on resume */
1071 	struct list_head bound_list;
1072 	/**
1073 	 * List of objects which are not bound to the GTT (thus
1074 	 * are idle and not used by the GPU). These objects may or may
1075 	 * not actually have any pages attached.
1076 	 */
1077 	struct list_head unbound_list;
1078 
1079 	/** List of all objects in gtt_space, currently mmaped by userspace.
1080 	 * All objects within this list must also be on bound_list.
1081 	 */
1082 	struct list_head userfault_list;
1083 
1084 	/**
1085 	 * List of objects which are pending destruction.
1086 	 */
1087 	struct llist_head free_list;
1088 	struct work_struct free_work;
1089 	spinlock_t free_lock;
1090 
1091 	/**
1092 	 * Small stash of WC pages
1093 	 */
1094 	struct pagevec wc_stash;
1095 
1096 	/**
1097 	 * tmpfs instance used for shmem backed objects
1098 	 */
1099 	struct vfsmount *gemfs;
1100 
1101 	/** PPGTT used for aliasing the PPGTT with the GTT */
1102 	struct i915_hw_ppgtt *aliasing_ppgtt;
1103 
1104 	struct notifier_block oom_notifier;
1105 	struct notifier_block vmap_notifier;
1106 	struct shrinker shrinker;
1107 
1108 	/** LRU list of objects with fence regs on them. */
1109 	struct list_head fence_list;
1110 
1111 	/**
1112 	 * Workqueue to fault in userptr pages, flushed by the execbuf
1113 	 * when required but otherwise left to userspace to try again
1114 	 * on EAGAIN.
1115 	 */
1116 	struct workqueue_struct *userptr_wq;
1117 
1118 	u64 unordered_timeline;
1119 
1120 	/* the indicator for dispatch video commands on two BSD rings */
1121 	atomic_t bsd_engine_dispatch_index;
1122 
1123 	/** Bit 6 swizzling required for X tiling */
1124 	uint32_t bit_6_swizzle_x;
1125 	/** Bit 6 swizzling required for Y tiling */
1126 	uint32_t bit_6_swizzle_y;
1127 
1128 	/* accounting, useful for userland debugging */
1129 	spinlock_t object_stat_lock;
1130 	u64 object_memory;
1131 	u32 object_count;
1132 };
1133 
1134 struct drm_i915_error_state_buf {
1135 	struct drm_i915_private *i915;
1136 	unsigned bytes;
1137 	unsigned size;
1138 	int err;
1139 	u8 *buf;
1140 	loff_t start;
1141 	loff_t pos;
1142 };
1143 
1144 #define I915_IDLE_ENGINES_TIMEOUT (200) /* in ms */
1145 
1146 #define I915_RESET_TIMEOUT (10 * HZ) /* 10s */
1147 #define I915_FENCE_TIMEOUT (10 * HZ) /* 10s */
1148 
1149 #define I915_ENGINE_DEAD_TIMEOUT  (4 * HZ)  /* Seqno, head and subunits dead */
1150 #define I915_SEQNO_DEAD_TIMEOUT   (12 * HZ) /* Seqno dead with active head */
1151 
1152 struct i915_gpu_error {
1153 	/* For hangcheck timer */
1154 #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
1155 #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
1156 
1157 	struct delayed_work hangcheck_work;
1158 
1159 	/* For reset and error_state handling. */
1160 	spinlock_t lock;
1161 	/* Protected by the above dev->gpu_error.lock. */
1162 	struct i915_gpu_state *first_error;
1163 
1164 	atomic_t pending_fb_pin;
1165 
1166 	unsigned long missed_irq_rings;
1167 
1168 	/**
1169 	 * State variable controlling the reset flow and count
1170 	 *
1171 	 * This is a counter which gets incremented when reset is triggered,
1172 	 *
1173 	 * Before the reset commences, the I915_RESET_BACKOFF bit is set
1174 	 * meaning that any waiters holding onto the struct_mutex should
1175 	 * relinquish the lock immediately in order for the reset to start.
1176 	 *
1177 	 * If reset is not completed succesfully, the I915_WEDGE bit is
1178 	 * set meaning that hardware is terminally sour and there is no
1179 	 * recovery. All waiters on the reset_queue will be woken when
1180 	 * that happens.
1181 	 *
1182 	 * This counter is used by the wait_seqno code to notice that reset
1183 	 * event happened and it needs to restart the entire ioctl (since most
1184 	 * likely the seqno it waited for won't ever signal anytime soon).
1185 	 *
1186 	 * This is important for lock-free wait paths, where no contended lock
1187 	 * naturally enforces the correct ordering between the bail-out of the
1188 	 * waiter and the gpu reset work code.
1189 	 */
1190 	unsigned long reset_count;
1191 
1192 	/**
1193 	 * flags: Control various stages of the GPU reset
1194 	 *
1195 	 * #I915_RESET_BACKOFF - When we start a reset, we want to stop any
1196 	 * other users acquiring the struct_mutex. To do this we set the
1197 	 * #I915_RESET_BACKOFF bit in the error flags when we detect a reset
1198 	 * and then check for that bit before acquiring the struct_mutex (in
1199 	 * i915_mutex_lock_interruptible()?). I915_RESET_BACKOFF serves a
1200 	 * secondary role in preventing two concurrent global reset attempts.
1201 	 *
1202 	 * #I915_RESET_HANDOFF - To perform the actual GPU reset, we need the
1203 	 * struct_mutex. We try to acquire the struct_mutex in the reset worker,
1204 	 * but it may be held by some long running waiter (that we cannot
1205 	 * interrupt without causing trouble). Once we are ready to do the GPU
1206 	 * reset, we set the I915_RESET_HANDOFF bit and wakeup any waiters. If
1207 	 * they already hold the struct_mutex and want to participate they can
1208 	 * inspect the bit and do the reset directly, otherwise the worker
1209 	 * waits for the struct_mutex.
1210 	 *
1211 	 * #I915_RESET_ENGINE[num_engines] - Since the driver doesn't need to
1212 	 * acquire the struct_mutex to reset an engine, we need an explicit
1213 	 * flag to prevent two concurrent reset attempts in the same engine.
1214 	 * As the number of engines continues to grow, allocate the flags from
1215 	 * the most significant bits.
1216 	 *
1217 	 * #I915_WEDGED - If reset fails and we can no longer use the GPU,
1218 	 * we set the #I915_WEDGED bit. Prior to command submission, e.g.
1219 	 * i915_gem_request_alloc(), this bit is checked and the sequence
1220 	 * aborted (with -EIO reported to userspace) if set.
1221 	 */
1222 	unsigned long flags;
1223 #define I915_RESET_BACKOFF	0
1224 #define I915_RESET_HANDOFF	1
1225 #define I915_RESET_MODESET	2
1226 #define I915_WEDGED		(BITS_PER_LONG - 1)
1227 #define I915_RESET_ENGINE	(I915_WEDGED - I915_NUM_ENGINES)
1228 
1229 	/** Number of times an engine has been reset */
1230 	u32 reset_engine_count[I915_NUM_ENGINES];
1231 
1232 	/**
1233 	 * Waitqueue to signal when a hang is detected. Used to for waiters
1234 	 * to release the struct_mutex for the reset to procede.
1235 	 */
1236 	wait_queue_head_t wait_queue;
1237 
1238 	/**
1239 	 * Waitqueue to signal when the reset has completed. Used by clients
1240 	 * that wait for dev_priv->mm.wedged to settle.
1241 	 */
1242 	wait_queue_head_t reset_queue;
1243 
1244 	/* For missed irq/seqno simulation. */
1245 	unsigned long test_irq_rings;
1246 };
1247 
1248 enum modeset_restore {
1249 	MODESET_ON_LID_OPEN,
1250 	MODESET_DONE,
1251 	MODESET_SUSPENDED,
1252 };
1253 
1254 #define DP_AUX_A 0x40
1255 #define DP_AUX_B 0x10
1256 #define DP_AUX_C 0x20
1257 #define DP_AUX_D 0x30
1258 
1259 #define DDC_PIN_B  0x05
1260 #define DDC_PIN_C  0x04
1261 #define DDC_PIN_D  0x06
1262 
1263 struct ddi_vbt_port_info {
1264 	int max_tmds_clock;
1265 
1266 	/*
1267 	 * This is an index in the HDMI/DVI DDI buffer translation table.
1268 	 * The special value HDMI_LEVEL_SHIFT_UNKNOWN means the VBT didn't
1269 	 * populate this field.
1270 	 */
1271 #define HDMI_LEVEL_SHIFT_UNKNOWN	0xff
1272 	uint8_t hdmi_level_shift;
1273 
1274 	uint8_t supports_dvi:1;
1275 	uint8_t supports_hdmi:1;
1276 	uint8_t supports_dp:1;
1277 	uint8_t supports_edp:1;
1278 
1279 	uint8_t alternate_aux_channel;
1280 	uint8_t alternate_ddc_pin;
1281 
1282 	uint8_t dp_boost_level;
1283 	uint8_t hdmi_boost_level;
1284 };
1285 
1286 enum psr_lines_to_wait {
1287 	PSR_0_LINES_TO_WAIT = 0,
1288 	PSR_1_LINE_TO_WAIT,
1289 	PSR_4_LINES_TO_WAIT,
1290 	PSR_8_LINES_TO_WAIT
1291 };
1292 
1293 struct intel_vbt_data {
1294 	struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
1295 	struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
1296 
1297 	/* Feature bits */
1298 	unsigned int int_tv_support:1;
1299 	unsigned int lvds_dither:1;
1300 	unsigned int lvds_vbt:1;
1301 	unsigned int int_crt_support:1;
1302 	unsigned int lvds_use_ssc:1;
1303 	unsigned int display_clock_mode:1;
1304 	unsigned int fdi_rx_polarity_inverted:1;
1305 	unsigned int panel_type:4;
1306 	int lvds_ssc_freq;
1307 	unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
1308 
1309 	enum drrs_support_type drrs_type;
1310 
1311 	struct {
1312 		int rate;
1313 		int lanes;
1314 		int preemphasis;
1315 		int vswing;
1316 		bool low_vswing;
1317 		bool initialized;
1318 		bool support;
1319 		int bpp;
1320 		struct edp_power_seq pps;
1321 	} edp;
1322 
1323 	struct {
1324 		bool full_link;
1325 		bool require_aux_wakeup;
1326 		int idle_frames;
1327 		enum psr_lines_to_wait lines_to_wait;
1328 		int tp1_wakeup_time;
1329 		int tp2_tp3_wakeup_time;
1330 	} psr;
1331 
1332 	struct {
1333 		u16 pwm_freq_hz;
1334 		bool present;
1335 		bool active_low_pwm;
1336 		u8 min_brightness;	/* min_brightness/255 of max */
1337 		u8 controller;		/* brightness controller number */
1338 		enum intel_backlight_type type;
1339 	} backlight;
1340 
1341 	/* MIPI DSI */
1342 	struct {
1343 		u16 panel_id;
1344 		struct mipi_config *config;
1345 		struct mipi_pps_data *pps;
1346 		u16 bl_ports;
1347 		u16 cabc_ports;
1348 		u8 seq_version;
1349 		u32 size;
1350 		u8 *data;
1351 		const u8 *sequence[MIPI_SEQ_MAX];
1352 	} dsi;
1353 
1354 	int crt_ddc_pin;
1355 
1356 	int child_dev_num;
1357 	struct child_device_config *child_dev;
1358 
1359 	struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS];
1360 	struct sdvo_device_mapping sdvo_mappings[2];
1361 };
1362 
1363 enum intel_ddb_partitioning {
1364 	INTEL_DDB_PART_1_2,
1365 	INTEL_DDB_PART_5_6, /* IVB+ */
1366 };
1367 
1368 struct intel_wm_level {
1369 	bool enable;
1370 	uint32_t pri_val;
1371 	uint32_t spr_val;
1372 	uint32_t cur_val;
1373 	uint32_t fbc_val;
1374 };
1375 
1376 struct ilk_wm_values {
1377 	uint32_t wm_pipe[3];
1378 	uint32_t wm_lp[3];
1379 	uint32_t wm_lp_spr[3];
1380 	uint32_t wm_linetime[3];
1381 	bool enable_fbc_wm;
1382 	enum intel_ddb_partitioning partitioning;
1383 };
1384 
1385 struct g4x_pipe_wm {
1386 	uint16_t plane[I915_MAX_PLANES];
1387 	uint16_t fbc;
1388 };
1389 
1390 struct g4x_sr_wm {
1391 	uint16_t plane;
1392 	uint16_t cursor;
1393 	uint16_t fbc;
1394 };
1395 
1396 struct vlv_wm_ddl_values {
1397 	uint8_t plane[I915_MAX_PLANES];
1398 };
1399 
1400 struct vlv_wm_values {
1401 	struct g4x_pipe_wm pipe[3];
1402 	struct g4x_sr_wm sr;
1403 	struct vlv_wm_ddl_values ddl[3];
1404 	uint8_t level;
1405 	bool cxsr;
1406 };
1407 
1408 struct g4x_wm_values {
1409 	struct g4x_pipe_wm pipe[2];
1410 	struct g4x_sr_wm sr;
1411 	struct g4x_sr_wm hpll;
1412 	bool cxsr;
1413 	bool hpll_en;
1414 	bool fbc_en;
1415 };
1416 
1417 struct skl_ddb_entry {
1418 	uint16_t start, end;	/* in number of blocks, 'end' is exclusive */
1419 };
1420 
1421 static inline uint16_t skl_ddb_entry_size(const struct skl_ddb_entry *entry)
1422 {
1423 	return entry->end - entry->start;
1424 }
1425 
1426 static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
1427 				       const struct skl_ddb_entry *e2)
1428 {
1429 	if (e1->start == e2->start && e1->end == e2->end)
1430 		return true;
1431 
1432 	return false;
1433 }
1434 
1435 struct skl_ddb_allocation {
1436 	struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES]; /* packed/uv */
1437 	struct skl_ddb_entry y_plane[I915_MAX_PIPES][I915_MAX_PLANES];
1438 };
1439 
1440 struct skl_wm_values {
1441 	unsigned dirty_pipes;
1442 	struct skl_ddb_allocation ddb;
1443 };
1444 
1445 struct skl_wm_level {
1446 	bool plane_en;
1447 	uint16_t plane_res_b;
1448 	uint8_t plane_res_l;
1449 };
1450 
1451 /* Stores plane specific WM parameters */
1452 struct skl_wm_params {
1453 	bool x_tiled, y_tiled;
1454 	bool rc_surface;
1455 	uint32_t width;
1456 	uint8_t cpp;
1457 	uint32_t plane_pixel_rate;
1458 	uint32_t y_min_scanlines;
1459 	uint32_t plane_bytes_per_line;
1460 	uint_fixed_16_16_t plane_blocks_per_line;
1461 	uint_fixed_16_16_t y_tile_minimum;
1462 	uint32_t linetime_us;
1463 };
1464 
1465 /*
1466  * This struct helps tracking the state needed for runtime PM, which puts the
1467  * device in PCI D3 state. Notice that when this happens, nothing on the
1468  * graphics device works, even register access, so we don't get interrupts nor
1469  * anything else.
1470  *
1471  * Every piece of our code that needs to actually touch the hardware needs to
1472  * either call intel_runtime_pm_get or call intel_display_power_get with the
1473  * appropriate power domain.
1474  *
1475  * Our driver uses the autosuspend delay feature, which means we'll only really
1476  * suspend if we stay with zero refcount for a certain amount of time. The
1477  * default value is currently very conservative (see intel_runtime_pm_enable), but
1478  * it can be changed with the standard runtime PM files from sysfs.
1479  *
1480  * The irqs_disabled variable becomes true exactly after we disable the IRQs and
1481  * goes back to false exactly before we reenable the IRQs. We use this variable
1482  * to check if someone is trying to enable/disable IRQs while they're supposed
1483  * to be disabled. This shouldn't happen and we'll print some error messages in
1484  * case it happens.
1485  *
1486  * For more, read the Documentation/power/runtime_pm.txt.
1487  */
1488 struct i915_runtime_pm {
1489 	atomic_t wakeref_count;
1490 	bool suspended;
1491 	bool irqs_enabled;
1492 };
1493 
1494 enum intel_pipe_crc_source {
1495 	INTEL_PIPE_CRC_SOURCE_NONE,
1496 	INTEL_PIPE_CRC_SOURCE_PLANE1,
1497 	INTEL_PIPE_CRC_SOURCE_PLANE2,
1498 	INTEL_PIPE_CRC_SOURCE_PF,
1499 	INTEL_PIPE_CRC_SOURCE_PIPE,
1500 	/* TV/DP on pre-gen5/vlv can't use the pipe source. */
1501 	INTEL_PIPE_CRC_SOURCE_TV,
1502 	INTEL_PIPE_CRC_SOURCE_DP_B,
1503 	INTEL_PIPE_CRC_SOURCE_DP_C,
1504 	INTEL_PIPE_CRC_SOURCE_DP_D,
1505 	INTEL_PIPE_CRC_SOURCE_AUTO,
1506 	INTEL_PIPE_CRC_SOURCE_MAX,
1507 };
1508 
1509 struct intel_pipe_crc_entry {
1510 	uint32_t frame;
1511 	uint32_t crc[5];
1512 };
1513 
1514 #define INTEL_PIPE_CRC_ENTRIES_NR	128
1515 struct intel_pipe_crc {
1516 	spinlock_t lock;
1517 	bool opened;		/* exclusive access to the result file */
1518 	struct intel_pipe_crc_entry *entries;
1519 	enum intel_pipe_crc_source source;
1520 	int head, tail;
1521 	wait_queue_head_t wq;
1522 	int skipped;
1523 };
1524 
1525 struct i915_frontbuffer_tracking {
1526 	spinlock_t lock;
1527 
1528 	/*
1529 	 * Tracking bits for delayed frontbuffer flushing du to gpu activity or
1530 	 * scheduled flips.
1531 	 */
1532 	unsigned busy_bits;
1533 	unsigned flip_bits;
1534 };
1535 
1536 struct i915_wa_reg {
1537 	i915_reg_t addr;
1538 	u32 value;
1539 	/* bitmask representing WA bits */
1540 	u32 mask;
1541 };
1542 
1543 #define I915_MAX_WA_REGS 16
1544 
1545 struct i915_workarounds {
1546 	struct i915_wa_reg reg[I915_MAX_WA_REGS];
1547 	u32 count;
1548 	u32 hw_whitelist_count[I915_NUM_ENGINES];
1549 };
1550 
1551 struct i915_virtual_gpu {
1552 	bool active;
1553 	u32 caps;
1554 };
1555 
1556 /* used in computing the new watermarks state */
1557 struct intel_wm_config {
1558 	unsigned int num_pipes_active;
1559 	bool sprites_enabled;
1560 	bool sprites_scaled;
1561 };
1562 
1563 struct i915_oa_format {
1564 	u32 format;
1565 	int size;
1566 };
1567 
1568 struct i915_oa_reg {
1569 	i915_reg_t addr;
1570 	u32 value;
1571 };
1572 
1573 struct i915_oa_config {
1574 	char uuid[UUID_STRING_LEN + 1];
1575 	int id;
1576 
1577 	const struct i915_oa_reg *mux_regs;
1578 	u32 mux_regs_len;
1579 	const struct i915_oa_reg *b_counter_regs;
1580 	u32 b_counter_regs_len;
1581 	const struct i915_oa_reg *flex_regs;
1582 	u32 flex_regs_len;
1583 
1584 	struct attribute_group sysfs_metric;
1585 	struct attribute *attrs[2];
1586 	struct device_attribute sysfs_metric_id;
1587 
1588 	atomic_t ref_count;
1589 };
1590 
1591 struct i915_perf_stream;
1592 
1593 /**
1594  * struct i915_perf_stream_ops - the OPs to support a specific stream type
1595  */
1596 struct i915_perf_stream_ops {
1597 	/**
1598 	 * @enable: Enables the collection of HW samples, either in response to
1599 	 * `I915_PERF_IOCTL_ENABLE` or implicitly called when stream is opened
1600 	 * without `I915_PERF_FLAG_DISABLED`.
1601 	 */
1602 	void (*enable)(struct i915_perf_stream *stream);
1603 
1604 	/**
1605 	 * @disable: Disables the collection of HW samples, either in response
1606 	 * to `I915_PERF_IOCTL_DISABLE` or implicitly called before destroying
1607 	 * the stream.
1608 	 */
1609 	void (*disable)(struct i915_perf_stream *stream);
1610 
1611 	/**
1612 	 * @poll_wait: Call poll_wait, passing a wait queue that will be woken
1613 	 * once there is something ready to read() for the stream
1614 	 */
1615 	void (*poll_wait)(struct i915_perf_stream *stream,
1616 			  struct file *file,
1617 			  poll_table *wait);
1618 
1619 	/**
1620 	 * @wait_unlocked: For handling a blocking read, wait until there is
1621 	 * something to ready to read() for the stream. E.g. wait on the same
1622 	 * wait queue that would be passed to poll_wait().
1623 	 */
1624 	int (*wait_unlocked)(struct i915_perf_stream *stream);
1625 
1626 	/**
1627 	 * @read: Copy buffered metrics as records to userspace
1628 	 * **buf**: the userspace, destination buffer
1629 	 * **count**: the number of bytes to copy, requested by userspace
1630 	 * **offset**: zero at the start of the read, updated as the read
1631 	 * proceeds, it represents how many bytes have been copied so far and
1632 	 * the buffer offset for copying the next record.
1633 	 *
1634 	 * Copy as many buffered i915 perf samples and records for this stream
1635 	 * to userspace as will fit in the given buffer.
1636 	 *
1637 	 * Only write complete records; returning -%ENOSPC if there isn't room
1638 	 * for a complete record.
1639 	 *
1640 	 * Return any error condition that results in a short read such as
1641 	 * -%ENOSPC or -%EFAULT, even though these may be squashed before
1642 	 * returning to userspace.
1643 	 */
1644 	int (*read)(struct i915_perf_stream *stream,
1645 		    char __user *buf,
1646 		    size_t count,
1647 		    size_t *offset);
1648 
1649 	/**
1650 	 * @destroy: Cleanup any stream specific resources.
1651 	 *
1652 	 * The stream will always be disabled before this is called.
1653 	 */
1654 	void (*destroy)(struct i915_perf_stream *stream);
1655 };
1656 
1657 /**
1658  * struct i915_perf_stream - state for a single open stream FD
1659  */
1660 struct i915_perf_stream {
1661 	/**
1662 	 * @dev_priv: i915 drm device
1663 	 */
1664 	struct drm_i915_private *dev_priv;
1665 
1666 	/**
1667 	 * @link: Links the stream into ``&drm_i915_private->streams``
1668 	 */
1669 	struct list_head link;
1670 
1671 	/**
1672 	 * @sample_flags: Flags representing the `DRM_I915_PERF_PROP_SAMPLE_*`
1673 	 * properties given when opening a stream, representing the contents
1674 	 * of a single sample as read() by userspace.
1675 	 */
1676 	u32 sample_flags;
1677 
1678 	/**
1679 	 * @sample_size: Considering the configured contents of a sample
1680 	 * combined with the required header size, this is the total size
1681 	 * of a single sample record.
1682 	 */
1683 	int sample_size;
1684 
1685 	/**
1686 	 * @ctx: %NULL if measuring system-wide across all contexts or a
1687 	 * specific context that is being monitored.
1688 	 */
1689 	struct i915_gem_context *ctx;
1690 
1691 	/**
1692 	 * @enabled: Whether the stream is currently enabled, considering
1693 	 * whether the stream was opened in a disabled state and based
1694 	 * on `I915_PERF_IOCTL_ENABLE` and `I915_PERF_IOCTL_DISABLE` calls.
1695 	 */
1696 	bool enabled;
1697 
1698 	/**
1699 	 * @ops: The callbacks providing the implementation of this specific
1700 	 * type of configured stream.
1701 	 */
1702 	const struct i915_perf_stream_ops *ops;
1703 
1704 	/**
1705 	 * @oa_config: The OA configuration used by the stream.
1706 	 */
1707 	struct i915_oa_config *oa_config;
1708 };
1709 
1710 /**
1711  * struct i915_oa_ops - Gen specific implementation of an OA unit stream
1712  */
1713 struct i915_oa_ops {
1714 	/**
1715 	 * @is_valid_b_counter_reg: Validates register's address for
1716 	 * programming boolean counters for a particular platform.
1717 	 */
1718 	bool (*is_valid_b_counter_reg)(struct drm_i915_private *dev_priv,
1719 				       u32 addr);
1720 
1721 	/**
1722 	 * @is_valid_mux_reg: Validates register's address for programming mux
1723 	 * for a particular platform.
1724 	 */
1725 	bool (*is_valid_mux_reg)(struct drm_i915_private *dev_priv, u32 addr);
1726 
1727 	/**
1728 	 * @is_valid_flex_reg: Validates register's address for programming
1729 	 * flex EU filtering for a particular platform.
1730 	 */
1731 	bool (*is_valid_flex_reg)(struct drm_i915_private *dev_priv, u32 addr);
1732 
1733 	/**
1734 	 * @init_oa_buffer: Resets the head and tail pointers of the
1735 	 * circular buffer for periodic OA reports.
1736 	 *
1737 	 * Called when first opening a stream for OA metrics, but also may be
1738 	 * called in response to an OA buffer overflow or other error
1739 	 * condition.
1740 	 *
1741 	 * Note it may be necessary to clear the full OA buffer here as part of
1742 	 * maintaining the invariable that new reports must be written to
1743 	 * zeroed memory for us to be able to reliable detect if an expected
1744 	 * report has not yet landed in memory.  (At least on Haswell the OA
1745 	 * buffer tail pointer is not synchronized with reports being visible
1746 	 * to the CPU)
1747 	 */
1748 	void (*init_oa_buffer)(struct drm_i915_private *dev_priv);
1749 
1750 	/**
1751 	 * @enable_metric_set: Selects and applies any MUX configuration to set
1752 	 * up the Boolean and Custom (B/C) counters that are part of the
1753 	 * counter reports being sampled. May apply system constraints such as
1754 	 * disabling EU clock gating as required.
1755 	 */
1756 	int (*enable_metric_set)(struct drm_i915_private *dev_priv,
1757 				 const struct i915_oa_config *oa_config);
1758 
1759 	/**
1760 	 * @disable_metric_set: Remove system constraints associated with using
1761 	 * the OA unit.
1762 	 */
1763 	void (*disable_metric_set)(struct drm_i915_private *dev_priv);
1764 
1765 	/**
1766 	 * @oa_enable: Enable periodic sampling
1767 	 */
1768 	void (*oa_enable)(struct drm_i915_private *dev_priv);
1769 
1770 	/**
1771 	 * @oa_disable: Disable periodic sampling
1772 	 */
1773 	void (*oa_disable)(struct drm_i915_private *dev_priv);
1774 
1775 	/**
1776 	 * @read: Copy data from the circular OA buffer into a given userspace
1777 	 * buffer.
1778 	 */
1779 	int (*read)(struct i915_perf_stream *stream,
1780 		    char __user *buf,
1781 		    size_t count,
1782 		    size_t *offset);
1783 
1784 	/**
1785 	 * @oa_hw_tail_read: read the OA tail pointer register
1786 	 *
1787 	 * In particular this enables us to share all the fiddly code for
1788 	 * handling the OA unit tail pointer race that affects multiple
1789 	 * generations.
1790 	 */
1791 	u32 (*oa_hw_tail_read)(struct drm_i915_private *dev_priv);
1792 };
1793 
1794 struct intel_cdclk_state {
1795 	unsigned int cdclk, vco, ref;
1796 	u8 voltage_level;
1797 };
1798 
1799 struct drm_i915_private {
1800 	struct drm_device drm;
1801 
1802 	struct kmem_cache *objects;
1803 	struct kmem_cache *vmas;
1804 	struct kmem_cache *luts;
1805 	struct kmem_cache *requests;
1806 	struct kmem_cache *dependencies;
1807 	struct kmem_cache *priorities;
1808 
1809 	const struct intel_device_info info;
1810 
1811 	/**
1812 	 * Data Stolen Memory - aka "i915 stolen memory" gives us the start and
1813 	 * end of stolen which we can optionally use to create GEM objects
1814 	 * backed by stolen memory. Note that stolen_usable_size tells us
1815 	 * exactly how much of this we are actually allowed to use, given that
1816 	 * some portion of it is in fact reserved for use by hardware functions.
1817 	 */
1818 	struct resource dsm;
1819 	/**
1820 	 * Reseved portion of Data Stolen Memory
1821 	 */
1822 	struct resource dsm_reserved;
1823 
1824 	/*
1825 	 * Stolen memory is segmented in hardware with different portions
1826 	 * offlimits to certain functions.
1827 	 *
1828 	 * The drm_mm is initialised to the total accessible range, as found
1829 	 * from the PCI config. On Broadwell+, this is further restricted to
1830 	 * avoid the first page! The upper end of stolen memory is reserved for
1831 	 * hardware functions and similarly removed from the accessible range.
1832 	 */
1833 	resource_size_t stolen_usable_size;	/* Total size minus reserved ranges */
1834 
1835 	void __iomem *regs;
1836 
1837 	struct intel_uncore uncore;
1838 
1839 	struct i915_virtual_gpu vgpu;
1840 
1841 	struct intel_gvt *gvt;
1842 
1843 	struct intel_huc huc;
1844 	struct intel_guc guc;
1845 
1846 	struct intel_csr csr;
1847 
1848 	struct intel_gmbus gmbus[GMBUS_NUM_PINS];
1849 
1850 	/** gmbus_mutex protects against concurrent usage of the single hw gmbus
1851 	 * controller on different i2c buses. */
1852 	struct mutex gmbus_mutex;
1853 
1854 	/**
1855 	 * Base address of the gmbus and gpio block.
1856 	 */
1857 	uint32_t gpio_mmio_base;
1858 
1859 	/* MMIO base address for MIPI regs */
1860 	uint32_t mipi_mmio_base;
1861 
1862 	uint32_t psr_mmio_base;
1863 
1864 	uint32_t pps_mmio_base;
1865 
1866 	wait_queue_head_t gmbus_wait_queue;
1867 
1868 	struct pci_dev *bridge_dev;
1869 	struct intel_engine_cs *engine[I915_NUM_ENGINES];
1870 	/* Context used internally to idle the GPU and setup initial state */
1871 	struct i915_gem_context *kernel_context;
1872 	/* Context only to be used for injecting preemption commands */
1873 	struct i915_gem_context *preempt_context;
1874 	struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1]
1875 					    [MAX_ENGINE_INSTANCE + 1];
1876 
1877 	struct drm_dma_handle *status_page_dmah;
1878 	struct resource mch_res;
1879 
1880 	/* protects the irq masks */
1881 	spinlock_t irq_lock;
1882 
1883 	bool display_irqs_enabled;
1884 
1885 	/* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
1886 	struct pm_qos_request pm_qos;
1887 
1888 	/* Sideband mailbox protection */
1889 	struct mutex sb_lock;
1890 
1891 	/** Cached value of IMR to avoid reads in updating the bitfield */
1892 	union {
1893 		u32 irq_mask;
1894 		u32 de_irq_mask[I915_MAX_PIPES];
1895 	};
1896 	u32 gt_irq_mask;
1897 	u32 pm_imr;
1898 	u32 pm_ier;
1899 	u32 pm_rps_events;
1900 	u32 pm_guc_events;
1901 	u32 pipestat_irq_mask[I915_MAX_PIPES];
1902 
1903 	struct i915_hotplug hotplug;
1904 	struct intel_fbc fbc;
1905 	struct i915_drrs drrs;
1906 	struct intel_opregion opregion;
1907 	struct intel_vbt_data vbt;
1908 
1909 	bool preserve_bios_swizzle;
1910 
1911 	/* overlay */
1912 	struct intel_overlay *overlay;
1913 
1914 	/* backlight registers and fields in struct intel_panel */
1915 	struct mutex backlight_lock;
1916 
1917 	/* LVDS info */
1918 	bool no_aux_handshake;
1919 
1920 	/* protects panel power sequencer state */
1921 	struct mutex pps_mutex;
1922 
1923 	struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
1924 	int num_fence_regs; /* 8 on pre-965, 16 otherwise */
1925 
1926 	unsigned int fsb_freq, mem_freq, is_ddr3;
1927 	unsigned int skl_preferred_vco_freq;
1928 	unsigned int max_cdclk_freq;
1929 
1930 	unsigned int max_dotclk_freq;
1931 	unsigned int rawclk_freq;
1932 	unsigned int hpll_freq;
1933 	unsigned int fdi_pll_freq;
1934 	unsigned int czclk_freq;
1935 
1936 	struct {
1937 		/*
1938 		 * The current logical cdclk state.
1939 		 * See intel_atomic_state.cdclk.logical
1940 		 *
1941 		 * For reading holding any crtc lock is sufficient,
1942 		 * for writing must hold all of them.
1943 		 */
1944 		struct intel_cdclk_state logical;
1945 		/*
1946 		 * The current actual cdclk state.
1947 		 * See intel_atomic_state.cdclk.actual
1948 		 */
1949 		struct intel_cdclk_state actual;
1950 		/* The current hardware cdclk state */
1951 		struct intel_cdclk_state hw;
1952 	} cdclk;
1953 
1954 	/**
1955 	 * wq - Driver workqueue for GEM.
1956 	 *
1957 	 * NOTE: Work items scheduled here are not allowed to grab any modeset
1958 	 * locks, for otherwise the flushing done in the pageflip code will
1959 	 * result in deadlocks.
1960 	 */
1961 	struct workqueue_struct *wq;
1962 
1963 	/* ordered wq for modesets */
1964 	struct workqueue_struct *modeset_wq;
1965 
1966 	/* Display functions */
1967 	struct drm_i915_display_funcs display;
1968 
1969 	/* PCH chipset type */
1970 	enum intel_pch pch_type;
1971 	unsigned short pch_id;
1972 
1973 	unsigned long quirks;
1974 
1975 	enum modeset_restore modeset_restore;
1976 	struct mutex modeset_restore_lock;
1977 	struct drm_atomic_state *modeset_restore_state;
1978 	struct drm_modeset_acquire_ctx reset_ctx;
1979 
1980 	struct list_head vm_list; /* Global list of all address spaces */
1981 	struct i915_ggtt ggtt; /* VM representing the global address space */
1982 
1983 	struct i915_gem_mm mm;
1984 	DECLARE_HASHTABLE(mm_structs, 7);
1985 	struct mutex mm_lock;
1986 
1987 	struct intel_ppat ppat;
1988 
1989 	/* Kernel Modesetting */
1990 
1991 	struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
1992 	struct intel_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES];
1993 
1994 #ifdef CONFIG_DEBUG_FS
1995 	struct intel_pipe_crc pipe_crc[I915_MAX_PIPES];
1996 #endif
1997 
1998 	/* dpll and cdclk state is protected by connection_mutex */
1999 	int num_shared_dpll;
2000 	struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
2001 	const struct intel_dpll_mgr *dpll_mgr;
2002 
2003 	/*
2004 	 * dpll_lock serializes intel_{prepare,enable,disable}_shared_dpll.
2005 	 * Must be global rather than per dpll, because on some platforms
2006 	 * plls share registers.
2007 	 */
2008 	struct mutex dpll_lock;
2009 
2010 	unsigned int active_crtcs;
2011 	/* minimum acceptable cdclk for each pipe */
2012 	int min_cdclk[I915_MAX_PIPES];
2013 	/* minimum acceptable voltage level for each pipe */
2014 	u8 min_voltage_level[I915_MAX_PIPES];
2015 
2016 	int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
2017 
2018 	struct i915_workarounds workarounds;
2019 
2020 	struct i915_frontbuffer_tracking fb_tracking;
2021 
2022 	struct intel_atomic_helper {
2023 		struct llist_head free_list;
2024 		struct work_struct free_work;
2025 	} atomic_helper;
2026 
2027 	u16 orig_clock;
2028 
2029 	bool mchbar_need_disable;
2030 
2031 	struct intel_l3_parity l3_parity;
2032 
2033 	/* Cannot be determined by PCIID. You must always read a register. */
2034 	u32 edram_cap;
2035 
2036 	/*
2037 	 * Protects RPS/RC6 register access and PCU communication.
2038 	 * Must be taken after struct_mutex if nested. Note that
2039 	 * this lock may be held for long periods of time when
2040 	 * talking to hw - so only take it when talking to hw!
2041 	 */
2042 	struct mutex pcu_lock;
2043 
2044 	/* gen6+ GT PM state */
2045 	struct intel_gen6_power_mgmt gt_pm;
2046 
2047 	/* ilk-only ips/rps state. Everything in here is protected by the global
2048 	 * mchdev_lock in intel_pm.c */
2049 	struct intel_ilk_power_mgmt ips;
2050 
2051 	struct i915_power_domains power_domains;
2052 
2053 	struct i915_psr psr;
2054 
2055 	struct i915_gpu_error gpu_error;
2056 
2057 	struct drm_i915_gem_object *vlv_pctx;
2058 
2059 	/* list of fbdev register on this device */
2060 	struct intel_fbdev *fbdev;
2061 	struct work_struct fbdev_suspend_work;
2062 
2063 	struct drm_property *broadcast_rgb_property;
2064 	struct drm_property *force_audio_property;
2065 
2066 	/* hda/i915 audio component */
2067 	struct i915_audio_component *audio_component;
2068 	bool audio_component_registered;
2069 	/**
2070 	 * av_mutex - mutex for audio/video sync
2071 	 *
2072 	 */
2073 	struct mutex av_mutex;
2074 
2075 	struct {
2076 		struct list_head list;
2077 		struct llist_head free_list;
2078 		struct work_struct free_work;
2079 
2080 		/* The hw wants to have a stable context identifier for the
2081 		 * lifetime of the context (for OA, PASID, faults, etc).
2082 		 * This is limited in execlists to 21 bits.
2083 		 */
2084 		struct ida hw_ida;
2085 #define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
2086 	} contexts;
2087 
2088 	u32 fdi_rx_config;
2089 
2090 	/* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */
2091 	u32 chv_phy_control;
2092 	/*
2093 	 * Shadows for CHV DPLL_MD regs to keep the state
2094 	 * checker somewhat working in the presence hardware
2095 	 * crappiness (can't read out DPLL_MD for pipes B & C).
2096 	 */
2097 	u32 chv_dpll_md[I915_MAX_PIPES];
2098 	u32 bxt_phy_grc;
2099 
2100 	u32 suspend_count;
2101 	bool suspended_to_idle;
2102 	struct i915_suspend_saved_registers regfile;
2103 	struct vlv_s0ix_state vlv_s0ix_state;
2104 
2105 	enum {
2106 		I915_SAGV_UNKNOWN = 0,
2107 		I915_SAGV_DISABLED,
2108 		I915_SAGV_ENABLED,
2109 		I915_SAGV_NOT_CONTROLLED
2110 	} sagv_status;
2111 
2112 	struct {
2113 		/*
2114 		 * Raw watermark latency values:
2115 		 * in 0.1us units for WM0,
2116 		 * in 0.5us units for WM1+.
2117 		 */
2118 		/* primary */
2119 		uint16_t pri_latency[5];
2120 		/* sprite */
2121 		uint16_t spr_latency[5];
2122 		/* cursor */
2123 		uint16_t cur_latency[5];
2124 		/*
2125 		 * Raw watermark memory latency values
2126 		 * for SKL for all 8 levels
2127 		 * in 1us units.
2128 		 */
2129 		uint16_t skl_latency[8];
2130 
2131 		/* current hardware state */
2132 		union {
2133 			struct ilk_wm_values hw;
2134 			struct skl_wm_values skl_hw;
2135 			struct vlv_wm_values vlv;
2136 			struct g4x_wm_values g4x;
2137 		};
2138 
2139 		uint8_t max_level;
2140 
2141 		/*
2142 		 * Should be held around atomic WM register writing; also
2143 		 * protects * intel_crtc->wm.active and
2144 		 * cstate->wm.need_postvbl_update.
2145 		 */
2146 		struct mutex wm_mutex;
2147 
2148 		/*
2149 		 * Set during HW readout of watermarks/DDB.  Some platforms
2150 		 * need to know when we're still using BIOS-provided values
2151 		 * (which we don't fully trust).
2152 		 */
2153 		bool distrust_bios_wm;
2154 	} wm;
2155 
2156 	struct i915_runtime_pm runtime_pm;
2157 
2158 	struct {
2159 		bool initialized;
2160 
2161 		struct kobject *metrics_kobj;
2162 		struct ctl_table_header *sysctl_header;
2163 
2164 		/*
2165 		 * Lock associated with adding/modifying/removing OA configs
2166 		 * in dev_priv->perf.metrics_idr.
2167 		 */
2168 		struct mutex metrics_lock;
2169 
2170 		/*
2171 		 * List of dynamic configurations, you need to hold
2172 		 * dev_priv->perf.metrics_lock to access it.
2173 		 */
2174 		struct idr metrics_idr;
2175 
2176 		/*
2177 		 * Lock associated with anything below within this structure
2178 		 * except exclusive_stream.
2179 		 */
2180 		struct mutex lock;
2181 		struct list_head streams;
2182 
2183 		struct {
2184 			/*
2185 			 * The stream currently using the OA unit. If accessed
2186 			 * outside a syscall associated to its file
2187 			 * descriptor, you need to hold
2188 			 * dev_priv->drm.struct_mutex.
2189 			 */
2190 			struct i915_perf_stream *exclusive_stream;
2191 
2192 			u32 specific_ctx_id;
2193 
2194 			struct hrtimer poll_check_timer;
2195 			wait_queue_head_t poll_wq;
2196 			bool pollin;
2197 
2198 			/**
2199 			 * For rate limiting any notifications of spurious
2200 			 * invalid OA reports
2201 			 */
2202 			struct ratelimit_state spurious_report_rs;
2203 
2204 			bool periodic;
2205 			int period_exponent;
2206 
2207 			struct i915_oa_config test_config;
2208 
2209 			struct {
2210 				struct i915_vma *vma;
2211 				u8 *vaddr;
2212 				u32 last_ctx_id;
2213 				int format;
2214 				int format_size;
2215 
2216 				/**
2217 				 * Locks reads and writes to all head/tail state
2218 				 *
2219 				 * Consider: the head and tail pointer state
2220 				 * needs to be read consistently from a hrtimer
2221 				 * callback (atomic context) and read() fop
2222 				 * (user context) with tail pointer updates
2223 				 * happening in atomic context and head updates
2224 				 * in user context and the (unlikely)
2225 				 * possibility of read() errors needing to
2226 				 * reset all head/tail state.
2227 				 *
2228 				 * Note: Contention or performance aren't
2229 				 * currently a significant concern here
2230 				 * considering the relatively low frequency of
2231 				 * hrtimer callbacks (5ms period) and that
2232 				 * reads typically only happen in response to a
2233 				 * hrtimer event and likely complete before the
2234 				 * next callback.
2235 				 *
2236 				 * Note: This lock is not held *while* reading
2237 				 * and copying data to userspace so the value
2238 				 * of head observed in htrimer callbacks won't
2239 				 * represent any partial consumption of data.
2240 				 */
2241 				spinlock_t ptr_lock;
2242 
2243 				/**
2244 				 * One 'aging' tail pointer and one 'aged'
2245 				 * tail pointer ready to used for reading.
2246 				 *
2247 				 * Initial values of 0xffffffff are invalid
2248 				 * and imply that an update is required
2249 				 * (and should be ignored by an attempted
2250 				 * read)
2251 				 */
2252 				struct {
2253 					u32 offset;
2254 				} tails[2];
2255 
2256 				/**
2257 				 * Index for the aged tail ready to read()
2258 				 * data up to.
2259 				 */
2260 				unsigned int aged_tail_idx;
2261 
2262 				/**
2263 				 * A monotonic timestamp for when the current
2264 				 * aging tail pointer was read; used to
2265 				 * determine when it is old enough to trust.
2266 				 */
2267 				u64 aging_timestamp;
2268 
2269 				/**
2270 				 * Although we can always read back the head
2271 				 * pointer register, we prefer to avoid
2272 				 * trusting the HW state, just to avoid any
2273 				 * risk that some hardware condition could
2274 				 * somehow bump the head pointer unpredictably
2275 				 * and cause us to forward the wrong OA buffer
2276 				 * data to userspace.
2277 				 */
2278 				u32 head;
2279 			} oa_buffer;
2280 
2281 			u32 gen7_latched_oastatus1;
2282 			u32 ctx_oactxctrl_offset;
2283 			u32 ctx_flexeu0_offset;
2284 
2285 			/**
2286 			 * The RPT_ID/reason field for Gen8+ includes a bit
2287 			 * to determine if the CTX ID in the report is valid
2288 			 * but the specific bit differs between Gen 8 and 9
2289 			 */
2290 			u32 gen8_valid_ctx_bit;
2291 
2292 			struct i915_oa_ops ops;
2293 			const struct i915_oa_format *oa_formats;
2294 		} oa;
2295 	} perf;
2296 
2297 	/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
2298 	struct {
2299 		void (*resume)(struct drm_i915_private *);
2300 		void (*cleanup_engine)(struct intel_engine_cs *engine);
2301 
2302 		struct list_head timelines;
2303 		struct i915_gem_timeline global_timeline;
2304 		u32 active_requests;
2305 
2306 		/**
2307 		 * Is the GPU currently considered idle, or busy executing
2308 		 * userspace requests? Whilst idle, we allow runtime power
2309 		 * management to power down the hardware and display clocks.
2310 		 * In order to reduce the effect on performance, there
2311 		 * is a slight delay before we do so.
2312 		 */
2313 		bool awake;
2314 
2315 		/**
2316 		 * We leave the user IRQ off as much as possible,
2317 		 * but this means that requests will finish and never
2318 		 * be retired once the system goes idle. Set a timer to
2319 		 * fire periodically while the ring is running. When it
2320 		 * fires, go retire requests.
2321 		 */
2322 		struct delayed_work retire_work;
2323 
2324 		/**
2325 		 * When we detect an idle GPU, we want to turn on
2326 		 * powersaving features. So once we see that there
2327 		 * are no more requests outstanding and no more
2328 		 * arrive within a small period of time, we fire
2329 		 * off the idle_work.
2330 		 */
2331 		struct delayed_work idle_work;
2332 
2333 		ktime_t last_init_time;
2334 	} gt;
2335 
2336 	/* perform PHY state sanity checks? */
2337 	bool chv_phy_assert[2];
2338 
2339 	bool ipc_enabled;
2340 
2341 	/* Used to save the pipe-to-encoder mapping for audio */
2342 	struct intel_encoder *av_enc_map[I915_MAX_PIPES];
2343 
2344 	/* necessary resource sharing with HDMI LPE audio driver. */
2345 	struct {
2346 		struct platform_device *platdev;
2347 		int	irq;
2348 	} lpe_audio;
2349 
2350 	struct i915_pmu pmu;
2351 
2352 	/*
2353 	 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
2354 	 * will be rejected. Instead look for a better place.
2355 	 */
2356 };
2357 
2358 static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
2359 {
2360 	return container_of(dev, struct drm_i915_private, drm);
2361 }
2362 
2363 static inline struct drm_i915_private *kdev_to_i915(struct device *kdev)
2364 {
2365 	return to_i915(dev_get_drvdata(kdev));
2366 }
2367 
2368 static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
2369 {
2370 	return container_of(guc, struct drm_i915_private, guc);
2371 }
2372 
2373 static inline struct drm_i915_private *huc_to_i915(struct intel_huc *huc)
2374 {
2375 	return container_of(huc, struct drm_i915_private, huc);
2376 }
2377 
2378 /* Simple iterator over all initialised engines */
2379 #define for_each_engine(engine__, dev_priv__, id__) \
2380 	for ((id__) = 0; \
2381 	     (id__) < I915_NUM_ENGINES; \
2382 	     (id__)++) \
2383 		for_each_if ((engine__) = (dev_priv__)->engine[(id__)])
2384 
2385 /* Iterator over subset of engines selected by mask */
2386 #define for_each_engine_masked(engine__, dev_priv__, mask__, tmp__) \
2387 	for (tmp__ = mask__ & INTEL_INFO(dev_priv__)->ring_mask;	\
2388 	     tmp__ ? (engine__ = (dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : 0; )
2389 
2390 enum hdmi_force_audio {
2391 	HDMI_AUDIO_OFF_DVI = -2,	/* no aux data for HDMI-DVI converter */
2392 	HDMI_AUDIO_OFF,			/* force turn off HDMI audio */
2393 	HDMI_AUDIO_AUTO,		/* trust EDID */
2394 	HDMI_AUDIO_ON,			/* force turn on HDMI audio */
2395 };
2396 
2397 #define I915_GTT_OFFSET_NONE ((u32)-1)
2398 
2399 /*
2400  * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is
2401  * considered to be the frontbuffer for the given plane interface-wise. This
2402  * doesn't mean that the hw necessarily already scans it out, but that any
2403  * rendering (by the cpu or gpu) will land in the frontbuffer eventually.
2404  *
2405  * We have one bit per pipe and per scanout plane type.
2406  */
2407 #define INTEL_MAX_SPRITE_BITS_PER_PIPE 5
2408 #define INTEL_FRONTBUFFER_BITS_PER_PIPE 8
2409 #define INTEL_FRONTBUFFER_PRIMARY(pipe) \
2410 	(1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
2411 #define INTEL_FRONTBUFFER_CURSOR(pipe) \
2412 	(1 << (1 + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
2413 #define INTEL_FRONTBUFFER_SPRITE(pipe, plane) \
2414 	(1 << (2 + plane + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
2415 #define INTEL_FRONTBUFFER_OVERLAY(pipe) \
2416 	(1 << (2 + INTEL_MAX_SPRITE_BITS_PER_PIPE + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
2417 #define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
2418 	(0xff << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
2419 
2420 /*
2421  * Optimised SGL iterator for GEM objects
2422  */
2423 static __always_inline struct sgt_iter {
2424 	struct scatterlist *sgp;
2425 	union {
2426 		unsigned long pfn;
2427 		dma_addr_t dma;
2428 	};
2429 	unsigned int curr;
2430 	unsigned int max;
2431 } __sgt_iter(struct scatterlist *sgl, bool dma) {
2432 	struct sgt_iter s = { .sgp = sgl };
2433 
2434 	if (s.sgp) {
2435 		s.max = s.curr = s.sgp->offset;
2436 		s.max += s.sgp->length;
2437 		if (dma)
2438 			s.dma = sg_dma_address(s.sgp);
2439 		else
2440 			s.pfn = page_to_pfn(sg_page(s.sgp));
2441 	}
2442 
2443 	return s;
2444 }
2445 
2446 static inline struct scatterlist *____sg_next(struct scatterlist *sg)
2447 {
2448 	++sg;
2449 	if (unlikely(sg_is_chain(sg)))
2450 		sg = sg_chain_ptr(sg);
2451 	return sg;
2452 }
2453 
2454 /**
2455  * __sg_next - return the next scatterlist entry in a list
2456  * @sg:		The current sg entry
2457  *
2458  * Description:
2459  *   If the entry is the last, return NULL; otherwise, step to the next
2460  *   element in the array (@sg@+1). If that's a chain pointer, follow it;
2461  *   otherwise just return the pointer to the current element.
2462  **/
2463 static inline struct scatterlist *__sg_next(struct scatterlist *sg)
2464 {
2465 #ifdef CONFIG_DEBUG_SG
2466 	BUG_ON(sg->sg_magic != SG_MAGIC);
2467 #endif
2468 	return sg_is_last(sg) ? NULL : ____sg_next(sg);
2469 }
2470 
2471 /**
2472  * for_each_sgt_dma - iterate over the DMA addresses of the given sg_table
2473  * @__dmap:	DMA address (output)
2474  * @__iter:	'struct sgt_iter' (iterator state, internal)
2475  * @__sgt:	sg_table to iterate over (input)
2476  */
2477 #define for_each_sgt_dma(__dmap, __iter, __sgt)				\
2478 	for ((__iter) = __sgt_iter((__sgt)->sgl, true);			\
2479 	     ((__dmap) = (__iter).dma + (__iter).curr);			\
2480 	     (((__iter).curr += PAGE_SIZE) >= (__iter).max) ?		\
2481 	     (__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0 : 0)
2482 
2483 /**
2484  * for_each_sgt_page - iterate over the pages of the given sg_table
2485  * @__pp:	page pointer (output)
2486  * @__iter:	'struct sgt_iter' (iterator state, internal)
2487  * @__sgt:	sg_table to iterate over (input)
2488  */
2489 #define for_each_sgt_page(__pp, __iter, __sgt)				\
2490 	for ((__iter) = __sgt_iter((__sgt)->sgl, false);		\
2491 	     ((__pp) = (__iter).pfn == 0 ? NULL :			\
2492 	      pfn_to_page((__iter).pfn + ((__iter).curr >> PAGE_SHIFT))); \
2493 	     (((__iter).curr += PAGE_SIZE) >= (__iter).max) ?		\
2494 	     (__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0 : 0)
2495 
2496 static inline unsigned int i915_sg_page_sizes(struct scatterlist *sg)
2497 {
2498 	unsigned int page_sizes;
2499 
2500 	page_sizes = 0;
2501 	while (sg) {
2502 		GEM_BUG_ON(sg->offset);
2503 		GEM_BUG_ON(!IS_ALIGNED(sg->length, PAGE_SIZE));
2504 		page_sizes |= sg->length;
2505 		sg = __sg_next(sg);
2506 	}
2507 
2508 	return page_sizes;
2509 }
2510 
2511 static inline unsigned int i915_sg_segment_size(void)
2512 {
2513 	unsigned int size = swiotlb_max_segment();
2514 
2515 	if (size == 0)
2516 		return SCATTERLIST_MAX_SEGMENT;
2517 
2518 	size = rounddown(size, PAGE_SIZE);
2519 	/* swiotlb_max_segment_size can return 1 byte when it means one page. */
2520 	if (size < PAGE_SIZE)
2521 		size = PAGE_SIZE;
2522 
2523 	return size;
2524 }
2525 
2526 static inline const struct intel_device_info *
2527 intel_info(const struct drm_i915_private *dev_priv)
2528 {
2529 	return &dev_priv->info;
2530 }
2531 
2532 #define INTEL_INFO(dev_priv)	intel_info((dev_priv))
2533 
2534 #define INTEL_GEN(dev_priv)	((dev_priv)->info.gen)
2535 #define INTEL_DEVID(dev_priv)	((dev_priv)->info.device_id)
2536 
2537 #define REVID_FOREVER		0xff
2538 #define INTEL_REVID(dev_priv)	((dev_priv)->drm.pdev->revision)
2539 
2540 #define GEN_FOREVER (0)
2541 
2542 #define INTEL_GEN_MASK(s, e) ( \
2543 	BUILD_BUG_ON_ZERO(!__builtin_constant_p(s)) + \
2544 	BUILD_BUG_ON_ZERO(!__builtin_constant_p(e)) + \
2545 	GENMASK((e) != GEN_FOREVER ? (e) - 1 : BITS_PER_LONG - 1, \
2546 		(s) != GEN_FOREVER ? (s) - 1 : 0) \
2547 )
2548 
2549 /*
2550  * Returns true if Gen is in inclusive range [Start, End].
2551  *
2552  * Use GEN_FOREVER for unbound start and or end.
2553  */
2554 #define IS_GEN(dev_priv, s, e) \
2555 	(!!((dev_priv)->info.gen_mask & INTEL_GEN_MASK((s), (e))))
2556 
2557 /*
2558  * Return true if revision is in range [since,until] inclusive.
2559  *
2560  * Use 0 for open-ended since, and REVID_FOREVER for open-ended until.
2561  */
2562 #define IS_REVID(p, since, until) \
2563 	(INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until))
2564 
2565 #define IS_PLATFORM(dev_priv, p) ((dev_priv)->info.platform_mask & BIT(p))
2566 
2567 #define IS_I830(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I830)
2568 #define IS_I845G(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I845G)
2569 #define IS_I85X(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I85X)
2570 #define IS_I865G(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I865G)
2571 #define IS_I915G(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I915G)
2572 #define IS_I915GM(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I915GM)
2573 #define IS_I945G(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I945G)
2574 #define IS_I945GM(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I945GM)
2575 #define IS_I965G(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I965G)
2576 #define IS_I965GM(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I965GM)
2577 #define IS_G45(dev_priv)	IS_PLATFORM(dev_priv, INTEL_G45)
2578 #define IS_GM45(dev_priv)	IS_PLATFORM(dev_priv, INTEL_GM45)
2579 #define IS_G4X(dev_priv)	(IS_G45(dev_priv) || IS_GM45(dev_priv))
2580 #define IS_PINEVIEW_G(dev_priv)	(INTEL_DEVID(dev_priv) == 0xa001)
2581 #define IS_PINEVIEW_M(dev_priv)	(INTEL_DEVID(dev_priv) == 0xa011)
2582 #define IS_PINEVIEW(dev_priv)	IS_PLATFORM(dev_priv, INTEL_PINEVIEW)
2583 #define IS_G33(dev_priv)	IS_PLATFORM(dev_priv, INTEL_G33)
2584 #define IS_IRONLAKE_M(dev_priv)	(INTEL_DEVID(dev_priv) == 0x0046)
2585 #define IS_IVYBRIDGE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_IVYBRIDGE)
2586 #define IS_IVB_GT1(dev_priv)	(IS_IVYBRIDGE(dev_priv) && \
2587 				 (dev_priv)->info.gt == 1)
2588 #define IS_VALLEYVIEW(dev_priv)	IS_PLATFORM(dev_priv, INTEL_VALLEYVIEW)
2589 #define IS_CHERRYVIEW(dev_priv)	IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW)
2590 #define IS_HASWELL(dev_priv)	IS_PLATFORM(dev_priv, INTEL_HASWELL)
2591 #define IS_BROADWELL(dev_priv)	IS_PLATFORM(dev_priv, INTEL_BROADWELL)
2592 #define IS_SKYLAKE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_SKYLAKE)
2593 #define IS_BROXTON(dev_priv)	IS_PLATFORM(dev_priv, INTEL_BROXTON)
2594 #define IS_KABYLAKE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_KABYLAKE)
2595 #define IS_GEMINILAKE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_GEMINILAKE)
2596 #define IS_COFFEELAKE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_COFFEELAKE)
2597 #define IS_CANNONLAKE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_CANNONLAKE)
2598 #define IS_MOBILE(dev_priv)	((dev_priv)->info.is_mobile)
2599 #define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
2600 				    (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
2601 #define IS_BDW_ULT(dev_priv)	(IS_BROADWELL(dev_priv) && \
2602 				 ((INTEL_DEVID(dev_priv) & 0xf) == 0x6 ||	\
2603 				 (INTEL_DEVID(dev_priv) & 0xf) == 0xb ||	\
2604 				 (INTEL_DEVID(dev_priv) & 0xf) == 0xe))
2605 /* ULX machines are also considered ULT. */
2606 #define IS_BDW_ULX(dev_priv)	(IS_BROADWELL(dev_priv) && \
2607 				 (INTEL_DEVID(dev_priv) & 0xf) == 0xe)
2608 #define IS_BDW_GT3(dev_priv)	(IS_BROADWELL(dev_priv) && \
2609 				 (dev_priv)->info.gt == 3)
2610 #define IS_HSW_ULT(dev_priv)	(IS_HASWELL(dev_priv) && \
2611 				 (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0A00)
2612 #define IS_HSW_GT3(dev_priv)	(IS_HASWELL(dev_priv) && \
2613 				 (dev_priv)->info.gt == 3)
2614 /* ULX machines are also considered ULT. */
2615 #define IS_HSW_ULX(dev_priv)	(INTEL_DEVID(dev_priv) == 0x0A0E || \
2616 				 INTEL_DEVID(dev_priv) == 0x0A1E)
2617 #define IS_SKL_ULT(dev_priv)	(INTEL_DEVID(dev_priv) == 0x1906 || \
2618 				 INTEL_DEVID(dev_priv) == 0x1913 || \
2619 				 INTEL_DEVID(dev_priv) == 0x1916 || \
2620 				 INTEL_DEVID(dev_priv) == 0x1921 || \
2621 				 INTEL_DEVID(dev_priv) == 0x1926)
2622 #define IS_SKL_ULX(dev_priv)	(INTEL_DEVID(dev_priv) == 0x190E || \
2623 				 INTEL_DEVID(dev_priv) == 0x1915 || \
2624 				 INTEL_DEVID(dev_priv) == 0x191E)
2625 #define IS_KBL_ULT(dev_priv)	(INTEL_DEVID(dev_priv) == 0x5906 || \
2626 				 INTEL_DEVID(dev_priv) == 0x5913 || \
2627 				 INTEL_DEVID(dev_priv) == 0x5916 || \
2628 				 INTEL_DEVID(dev_priv) == 0x5921 || \
2629 				 INTEL_DEVID(dev_priv) == 0x5926)
2630 #define IS_KBL_ULX(dev_priv)	(INTEL_DEVID(dev_priv) == 0x590E || \
2631 				 INTEL_DEVID(dev_priv) == 0x5915 || \
2632 				 INTEL_DEVID(dev_priv) == 0x591E)
2633 #define IS_SKL_GT2(dev_priv)	(IS_SKYLAKE(dev_priv) && \
2634 				 (dev_priv)->info.gt == 2)
2635 #define IS_SKL_GT3(dev_priv)	(IS_SKYLAKE(dev_priv) && \
2636 				 (dev_priv)->info.gt == 3)
2637 #define IS_SKL_GT4(dev_priv)	(IS_SKYLAKE(dev_priv) && \
2638 				 (dev_priv)->info.gt == 4)
2639 #define IS_KBL_GT2(dev_priv)	(IS_KABYLAKE(dev_priv) && \
2640 				 (dev_priv)->info.gt == 2)
2641 #define IS_KBL_GT3(dev_priv)	(IS_KABYLAKE(dev_priv) && \
2642 				 (dev_priv)->info.gt == 3)
2643 #define IS_CFL_ULT(dev_priv)	(IS_COFFEELAKE(dev_priv) && \
2644 				 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x00A0)
2645 #define IS_CFL_GT2(dev_priv)	(IS_COFFEELAKE(dev_priv) && \
2646 				 (dev_priv)->info.gt == 2)
2647 #define IS_CFL_GT3(dev_priv)	(IS_COFFEELAKE(dev_priv) && \
2648 				 (dev_priv)->info.gt == 3)
2649 
2650 #define IS_ALPHA_SUPPORT(intel_info) ((intel_info)->is_alpha_support)
2651 
2652 #define SKL_REVID_A0		0x0
2653 #define SKL_REVID_B0		0x1
2654 #define SKL_REVID_C0		0x2
2655 #define SKL_REVID_D0		0x3
2656 #define SKL_REVID_E0		0x4
2657 #define SKL_REVID_F0		0x5
2658 #define SKL_REVID_G0		0x6
2659 #define SKL_REVID_H0		0x7
2660 
2661 #define IS_SKL_REVID(p, since, until) (IS_SKYLAKE(p) && IS_REVID(p, since, until))
2662 
2663 #define BXT_REVID_A0		0x0
2664 #define BXT_REVID_A1		0x1
2665 #define BXT_REVID_B0		0x3
2666 #define BXT_REVID_B_LAST	0x8
2667 #define BXT_REVID_C0		0x9
2668 
2669 #define IS_BXT_REVID(dev_priv, since, until) \
2670 	(IS_BROXTON(dev_priv) && IS_REVID(dev_priv, since, until))
2671 
2672 #define KBL_REVID_A0		0x0
2673 #define KBL_REVID_B0		0x1
2674 #define KBL_REVID_C0		0x2
2675 #define KBL_REVID_D0		0x3
2676 #define KBL_REVID_E0		0x4
2677 
2678 #define IS_KBL_REVID(dev_priv, since, until) \
2679 	(IS_KABYLAKE(dev_priv) && IS_REVID(dev_priv, since, until))
2680 
2681 #define GLK_REVID_A0		0x0
2682 #define GLK_REVID_A1		0x1
2683 
2684 #define IS_GLK_REVID(dev_priv, since, until) \
2685 	(IS_GEMINILAKE(dev_priv) && IS_REVID(dev_priv, since, until))
2686 
2687 #define CNL_REVID_A0		0x0
2688 #define CNL_REVID_B0		0x1
2689 #define CNL_REVID_C0		0x2
2690 
2691 #define IS_CNL_REVID(p, since, until) \
2692 	(IS_CANNONLAKE(p) && IS_REVID(p, since, until))
2693 
2694 /*
2695  * The genX designation typically refers to the render engine, so render
2696  * capability related checks should use IS_GEN, while display and other checks
2697  * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
2698  * chips, etc.).
2699  */
2700 #define IS_GEN2(dev_priv)	(!!((dev_priv)->info.gen_mask & BIT(1)))
2701 #define IS_GEN3(dev_priv)	(!!((dev_priv)->info.gen_mask & BIT(2)))
2702 #define IS_GEN4(dev_priv)	(!!((dev_priv)->info.gen_mask & BIT(3)))
2703 #define IS_GEN5(dev_priv)	(!!((dev_priv)->info.gen_mask & BIT(4)))
2704 #define IS_GEN6(dev_priv)	(!!((dev_priv)->info.gen_mask & BIT(5)))
2705 #define IS_GEN7(dev_priv)	(!!((dev_priv)->info.gen_mask & BIT(6)))
2706 #define IS_GEN8(dev_priv)	(!!((dev_priv)->info.gen_mask & BIT(7)))
2707 #define IS_GEN9(dev_priv)	(!!((dev_priv)->info.gen_mask & BIT(8)))
2708 #define IS_GEN10(dev_priv)	(!!((dev_priv)->info.gen_mask & BIT(9)))
2709 
2710 #define IS_LP(dev_priv)	(INTEL_INFO(dev_priv)->is_lp)
2711 #define IS_GEN9_LP(dev_priv)	(IS_GEN9(dev_priv) && IS_LP(dev_priv))
2712 #define IS_GEN9_BC(dev_priv)	(IS_GEN9(dev_priv) && !IS_LP(dev_priv))
2713 
2714 #define ENGINE_MASK(id)	BIT(id)
2715 #define RENDER_RING	ENGINE_MASK(RCS)
2716 #define BSD_RING	ENGINE_MASK(VCS)
2717 #define BLT_RING	ENGINE_MASK(BCS)
2718 #define VEBOX_RING	ENGINE_MASK(VECS)
2719 #define BSD2_RING	ENGINE_MASK(VCS2)
2720 #define ALL_ENGINES	(~0)
2721 
2722 #define HAS_ENGINE(dev_priv, id) \
2723 	(!!((dev_priv)->info.ring_mask & ENGINE_MASK(id)))
2724 
2725 #define HAS_BSD(dev_priv)	HAS_ENGINE(dev_priv, VCS)
2726 #define HAS_BSD2(dev_priv)	HAS_ENGINE(dev_priv, VCS2)
2727 #define HAS_BLT(dev_priv)	HAS_ENGINE(dev_priv, BCS)
2728 #define HAS_VEBOX(dev_priv)	HAS_ENGINE(dev_priv, VECS)
2729 
2730 #define HAS_LEGACY_SEMAPHORES(dev_priv) IS_GEN7(dev_priv)
2731 
2732 #define HAS_LLC(dev_priv)	((dev_priv)->info.has_llc)
2733 #define HAS_SNOOP(dev_priv)	((dev_priv)->info.has_snoop)
2734 #define HAS_EDRAM(dev_priv)	(!!((dev_priv)->edram_cap & EDRAM_ENABLED))
2735 #define HAS_WT(dev_priv)	((IS_HASWELL(dev_priv) || \
2736 				 IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv))
2737 
2738 #define HWS_NEEDS_PHYSICAL(dev_priv)	((dev_priv)->info.hws_needs_physical)
2739 
2740 #define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \
2741 		((dev_priv)->info.has_logical_ring_contexts)
2742 #define HAS_LOGICAL_RING_PREEMPTION(dev_priv) \
2743 		((dev_priv)->info.has_logical_ring_preemption)
2744 
2745 #define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv)
2746 
2747 #define USES_PPGTT(dev_priv)		(i915_modparams.enable_ppgtt)
2748 #define USES_FULL_PPGTT(dev_priv)	(i915_modparams.enable_ppgtt >= 2)
2749 #define USES_FULL_48BIT_PPGTT(dev_priv)	(i915_modparams.enable_ppgtt == 3)
2750 #define HAS_PAGE_SIZES(dev_priv, sizes) ({ \
2751 	GEM_BUG_ON((sizes) == 0); \
2752 	((sizes) & ~(dev_priv)->info.page_sizes) == 0; \
2753 })
2754 
2755 #define HAS_OVERLAY(dev_priv)		 ((dev_priv)->info.has_overlay)
2756 #define OVERLAY_NEEDS_PHYSICAL(dev_priv) \
2757 		((dev_priv)->info.overlay_needs_physical)
2758 
2759 /* Early gen2 have a totally busted CS tlb and require pinned batches. */
2760 #define HAS_BROKEN_CS_TLB(dev_priv)	(IS_I830(dev_priv) || IS_I845G(dev_priv))
2761 
2762 /* WaRsDisableCoarsePowerGating:skl,bxt */
2763 #define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
2764 	(IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv))
2765 
2766 /*
2767  * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
2768  * even when in MSI mode. This results in spurious interrupt warnings if the
2769  * legacy irq no. is shared with another device. The kernel then disables that
2770  * interrupt source and so prevents the other device from working properly.
2771  *
2772  * Since we don't enable MSI anymore on gen4, we can always use GMBUS/AUX
2773  * interrupts.
2774  */
2775 #define HAS_AUX_IRQ(dev_priv)   true
2776 #define HAS_GMBUS_IRQ(dev_priv) (INTEL_GEN(dev_priv) >= 4)
2777 
2778 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
2779  * rows, which changed the alignment requirements and fence programming.
2780  */
2781 #define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN2(dev_priv) && \
2782 					 !(IS_I915G(dev_priv) || \
2783 					 IS_I915GM(dev_priv)))
2784 #define SUPPORTS_TV(dev_priv)		((dev_priv)->info.supports_tv)
2785 #define I915_HAS_HOTPLUG(dev_priv)	((dev_priv)->info.has_hotplug)
2786 
2787 #define HAS_FW_BLC(dev_priv) 	(INTEL_GEN(dev_priv) > 2)
2788 #define HAS_FBC(dev_priv)	((dev_priv)->info.has_fbc)
2789 #define HAS_CUR_FBC(dev_priv)	(!HAS_GMCH_DISPLAY(dev_priv) && INTEL_INFO(dev_priv)->gen >= 7)
2790 
2791 #define HAS_IPS(dev_priv)	(IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv))
2792 
2793 #define HAS_DP_MST(dev_priv)	((dev_priv)->info.has_dp_mst)
2794 
2795 #define HAS_DDI(dev_priv)		 ((dev_priv)->info.has_ddi)
2796 #define HAS_FPGA_DBG_UNCLAIMED(dev_priv) ((dev_priv)->info.has_fpga_dbg)
2797 #define HAS_PSR(dev_priv)		 ((dev_priv)->info.has_psr)
2798 
2799 #define HAS_RC6(dev_priv)		 ((dev_priv)->info.has_rc6)
2800 #define HAS_RC6p(dev_priv)		 ((dev_priv)->info.has_rc6p)
2801 #define HAS_RC6pp(dev_priv)		 (false) /* HW was never validated */
2802 
2803 #define HAS_CSR(dev_priv)	((dev_priv)->info.has_csr)
2804 
2805 #define HAS_RUNTIME_PM(dev_priv) ((dev_priv)->info.has_runtime_pm)
2806 #define HAS_64BIT_RELOC(dev_priv) ((dev_priv)->info.has_64bit_reloc)
2807 
2808 #define HAS_IPC(dev_priv)		 ((dev_priv)->info.has_ipc)
2809 
2810 /*
2811  * For now, anything with a GuC requires uCode loading, and then supports
2812  * command submission once loaded. But these are logically independent
2813  * properties, so we have separate macros to test them.
2814  */
2815 #define HAS_GUC(dev_priv)	((dev_priv)->info.has_guc)
2816 #define HAS_GUC_CT(dev_priv)	((dev_priv)->info.has_guc_ct)
2817 #define HAS_GUC_UCODE(dev_priv)	(HAS_GUC(dev_priv))
2818 #define HAS_GUC_SCHED(dev_priv)	(HAS_GUC(dev_priv))
2819 
2820 /* For now, anything with a GuC has also HuC */
2821 #define HAS_HUC(dev_priv)	(HAS_GUC(dev_priv))
2822 #define HAS_HUC_UCODE(dev_priv)	(HAS_GUC(dev_priv))
2823 
2824 /* Having a GuC is not the same as using a GuC */
2825 #define USES_GUC(dev_priv)		intel_uc_is_using_guc()
2826 #define USES_GUC_SUBMISSION(dev_priv)	intel_uc_is_using_guc_submission()
2827 #define USES_HUC(dev_priv)		intel_uc_is_using_huc()
2828 
2829 #define HAS_RESOURCE_STREAMER(dev_priv) ((dev_priv)->info.has_resource_streamer)
2830 
2831 #define HAS_POOLED_EU(dev_priv)	((dev_priv)->info.has_pooled_eu)
2832 
2833 #define INTEL_PCH_DEVICE_ID_MASK		0xff80
2834 #define INTEL_PCH_IBX_DEVICE_ID_TYPE		0x3b00
2835 #define INTEL_PCH_CPT_DEVICE_ID_TYPE		0x1c00
2836 #define INTEL_PCH_PPT_DEVICE_ID_TYPE		0x1e00
2837 #define INTEL_PCH_LPT_DEVICE_ID_TYPE		0x8c00
2838 #define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE		0x9c00
2839 #define INTEL_PCH_WPT_DEVICE_ID_TYPE		0x8c80
2840 #define INTEL_PCH_WPT_LP_DEVICE_ID_TYPE		0x9c80
2841 #define INTEL_PCH_SPT_DEVICE_ID_TYPE		0xA100
2842 #define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE		0x9D00
2843 #define INTEL_PCH_KBP_DEVICE_ID_TYPE		0xA280
2844 #define INTEL_PCH_CNP_DEVICE_ID_TYPE		0xA300
2845 #define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE		0x9D80
2846 #define INTEL_PCH_P2X_DEVICE_ID_TYPE		0x7100
2847 #define INTEL_PCH_P3X_DEVICE_ID_TYPE		0x7000
2848 #define INTEL_PCH_QEMU_DEVICE_ID_TYPE		0x2900 /* qemu q35 has 2918 */
2849 
2850 #define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type)
2851 #define HAS_PCH_CNP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CNP)
2852 #define HAS_PCH_CNP_LP(dev_priv) \
2853 	((dev_priv)->pch_id == INTEL_PCH_CNP_LP_DEVICE_ID_TYPE)
2854 #define HAS_PCH_KBP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_KBP)
2855 #define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT)
2856 #define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT)
2857 #define HAS_PCH_LPT_LP(dev_priv) \
2858 	((dev_priv)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE || \
2859 	 (dev_priv)->pch_id == INTEL_PCH_WPT_LP_DEVICE_ID_TYPE)
2860 #define HAS_PCH_LPT_H(dev_priv) \
2861 	((dev_priv)->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE || \
2862 	 (dev_priv)->pch_id == INTEL_PCH_WPT_DEVICE_ID_TYPE)
2863 #define HAS_PCH_CPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CPT)
2864 #define HAS_PCH_IBX(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_IBX)
2865 #define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP)
2866 #define HAS_PCH_SPLIT(dev_priv) (INTEL_PCH_TYPE(dev_priv) != PCH_NONE)
2867 
2868 #define HAS_GMCH_DISPLAY(dev_priv) ((dev_priv)->info.has_gmch_display)
2869 
2870 #define HAS_LSPCON(dev_priv) (INTEL_GEN(dev_priv) >= 9)
2871 
2872 /* DPF == dynamic parity feature */
2873 #define HAS_L3_DPF(dev_priv) ((dev_priv)->info.has_l3_dpf)
2874 #define NUM_L3_SLICES(dev_priv) (IS_HSW_GT3(dev_priv) ? \
2875 				 2 : HAS_L3_DPF(dev_priv))
2876 
2877 #define GT_FREQUENCY_MULTIPLIER 50
2878 #define GEN9_FREQ_SCALER 3
2879 
2880 #include "i915_trace.h"
2881 
2882 static inline bool intel_vtd_active(void)
2883 {
2884 #ifdef CONFIG_INTEL_IOMMU
2885 	if (intel_iommu_gfx_mapped)
2886 		return true;
2887 #endif
2888 	return false;
2889 }
2890 
2891 static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
2892 {
2893 	return INTEL_GEN(dev_priv) >= 6 && intel_vtd_active();
2894 }
2895 
2896 static inline bool
2897 intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *dev_priv)
2898 {
2899 	return IS_BROXTON(dev_priv) && intel_vtd_active();
2900 }
2901 
2902 int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
2903 				int enable_ppgtt);
2904 
2905 /* i915_drv.c */
2906 void __printf(3, 4)
2907 __i915_printk(struct drm_i915_private *dev_priv, const char *level,
2908 	      const char *fmt, ...);
2909 
2910 #define i915_report_error(dev_priv, fmt, ...)				   \
2911 	__i915_printk(dev_priv, KERN_ERR, fmt, ##__VA_ARGS__)
2912 
2913 #ifdef CONFIG_COMPAT
2914 extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
2915 			      unsigned long arg);
2916 #else
2917 #define i915_compat_ioctl NULL
2918 #endif
2919 extern const struct dev_pm_ops i915_pm_ops;
2920 
2921 extern int i915_driver_load(struct pci_dev *pdev,
2922 			    const struct pci_device_id *ent);
2923 extern void i915_driver_unload(struct drm_device *dev);
2924 extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask);
2925 extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv);
2926 
2927 #define I915_RESET_QUIET BIT(0)
2928 extern void i915_reset(struct drm_i915_private *i915, unsigned int flags);
2929 extern int i915_reset_engine(struct intel_engine_cs *engine,
2930 			     unsigned int flags);
2931 
2932 extern bool intel_has_reset_engine(struct drm_i915_private *dev_priv);
2933 extern int intel_reset_guc(struct drm_i915_private *dev_priv);
2934 extern int intel_guc_reset_engine(struct intel_guc *guc,
2935 				  struct intel_engine_cs *engine);
2936 extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
2937 extern void intel_hangcheck_init(struct drm_i915_private *dev_priv);
2938 extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
2939 extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
2940 extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
2941 extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
2942 int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
2943 
2944 int intel_engines_init_mmio(struct drm_i915_private *dev_priv);
2945 int intel_engines_init(struct drm_i915_private *dev_priv);
2946 
2947 /* intel_hotplug.c */
2948 void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
2949 			   u32 pin_mask, u32 long_mask);
2950 void intel_hpd_init(struct drm_i915_private *dev_priv);
2951 void intel_hpd_init_work(struct drm_i915_private *dev_priv);
2952 void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
2953 enum port intel_hpd_pin_to_port(enum hpd_pin pin);
2954 enum hpd_pin intel_hpd_pin(enum port port);
2955 bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
2956 void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
2957 
2958 /* i915_irq.c */
2959 static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv)
2960 {
2961 	unsigned long delay;
2962 
2963 	if (unlikely(!i915_modparams.enable_hangcheck))
2964 		return;
2965 
2966 	/* Don't continually defer the hangcheck so that it is always run at
2967 	 * least once after work has been scheduled on any ring. Otherwise,
2968 	 * we will ignore a hung ring if a second ring is kept busy.
2969 	 */
2970 
2971 	delay = round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES);
2972 	queue_delayed_work(system_long_wq,
2973 			   &dev_priv->gpu_error.hangcheck_work, delay);
2974 }
2975 
2976 __printf(3, 4)
2977 void i915_handle_error(struct drm_i915_private *dev_priv,
2978 		       u32 engine_mask,
2979 		       const char *fmt, ...);
2980 
2981 extern void intel_irq_init(struct drm_i915_private *dev_priv);
2982 extern void intel_irq_fini(struct drm_i915_private *dev_priv);
2983 int intel_irq_install(struct drm_i915_private *dev_priv);
2984 void intel_irq_uninstall(struct drm_i915_private *dev_priv);
2985 
2986 static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
2987 {
2988 	return dev_priv->gvt;
2989 }
2990 
2991 static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv)
2992 {
2993 	return dev_priv->vgpu.active;
2994 }
2995 
2996 u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
2997 			      enum pipe pipe);
2998 void
2999 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
3000 		     u32 status_mask);
3001 
3002 void
3003 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
3004 		      u32 status_mask);
3005 
3006 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv);
3007 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv);
3008 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
3009 				   uint32_t mask,
3010 				   uint32_t bits);
3011 void ilk_update_display_irq(struct drm_i915_private *dev_priv,
3012 			    uint32_t interrupt_mask,
3013 			    uint32_t enabled_irq_mask);
3014 static inline void
3015 ilk_enable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits)
3016 {
3017 	ilk_update_display_irq(dev_priv, bits, bits);
3018 }
3019 static inline void
3020 ilk_disable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits)
3021 {
3022 	ilk_update_display_irq(dev_priv, bits, 0);
3023 }
3024 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
3025 			 enum pipe pipe,
3026 			 uint32_t interrupt_mask,
3027 			 uint32_t enabled_irq_mask);
3028 static inline void bdw_enable_pipe_irq(struct drm_i915_private *dev_priv,
3029 				       enum pipe pipe, uint32_t bits)
3030 {
3031 	bdw_update_pipe_irq(dev_priv, pipe, bits, bits);
3032 }
3033 static inline void bdw_disable_pipe_irq(struct drm_i915_private *dev_priv,
3034 					enum pipe pipe, uint32_t bits)
3035 {
3036 	bdw_update_pipe_irq(dev_priv, pipe, bits, 0);
3037 }
3038 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
3039 				  uint32_t interrupt_mask,
3040 				  uint32_t enabled_irq_mask);
3041 static inline void
3042 ibx_enable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits)
3043 {
3044 	ibx_display_interrupt_update(dev_priv, bits, bits);
3045 }
3046 static inline void
3047 ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits)
3048 {
3049 	ibx_display_interrupt_update(dev_priv, bits, 0);
3050 }
3051 
3052 /* i915_gem.c */
3053 int i915_gem_create_ioctl(struct drm_device *dev, void *data,
3054 			  struct drm_file *file_priv);
3055 int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
3056 			 struct drm_file *file_priv);
3057 int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
3058 			  struct drm_file *file_priv);
3059 int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
3060 			struct drm_file *file_priv);
3061 int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
3062 			struct drm_file *file_priv);
3063 int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
3064 			      struct drm_file *file_priv);
3065 int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
3066 			     struct drm_file *file_priv);
3067 int i915_gem_execbuffer(struct drm_device *dev, void *data,
3068 			struct drm_file *file_priv);
3069 int i915_gem_execbuffer2(struct drm_device *dev, void *data,
3070 			 struct drm_file *file_priv);
3071 int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3072 			struct drm_file *file_priv);
3073 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3074 			       struct drm_file *file);
3075 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3076 			       struct drm_file *file);
3077 int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3078 			    struct drm_file *file_priv);
3079 int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3080 			   struct drm_file *file_priv);
3081 int i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
3082 			      struct drm_file *file_priv);
3083 int i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
3084 			      struct drm_file *file_priv);
3085 int i915_gem_init_userptr(struct drm_i915_private *dev_priv);
3086 void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv);
3087 int i915_gem_userptr_ioctl(struct drm_device *dev, void *data,
3088 			   struct drm_file *file);
3089 int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
3090 				struct drm_file *file_priv);
3091 int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
3092 			struct drm_file *file_priv);
3093 void i915_gem_sanitize(struct drm_i915_private *i915);
3094 int i915_gem_load_init(struct drm_i915_private *dev_priv);
3095 void i915_gem_load_cleanup(struct drm_i915_private *dev_priv);
3096 void i915_gem_load_init_fences(struct drm_i915_private *dev_priv);
3097 int i915_gem_freeze(struct drm_i915_private *dev_priv);
3098 int i915_gem_freeze_late(struct drm_i915_private *dev_priv);
3099 
3100 void *i915_gem_object_alloc(struct drm_i915_private *dev_priv);
3101 void i915_gem_object_free(struct drm_i915_gem_object *obj);
3102 void i915_gem_object_init(struct drm_i915_gem_object *obj,
3103 			 const struct drm_i915_gem_object_ops *ops);
3104 struct drm_i915_gem_object *
3105 i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size);
3106 struct drm_i915_gem_object *
3107 i915_gem_object_create_from_data(struct drm_i915_private *dev_priv,
3108 				 const void *data, size_t size);
3109 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
3110 void i915_gem_free_object(struct drm_gem_object *obj);
3111 
3112 static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
3113 {
3114 	/* A single pass should suffice to release all the freed objects (along
3115 	 * most call paths) , but be a little more paranoid in that freeing
3116 	 * the objects does take a little amount of time, during which the rcu
3117 	 * callbacks could have added new objects into the freed list, and
3118 	 * armed the work again.
3119 	 */
3120 	do {
3121 		rcu_barrier();
3122 	} while (flush_work(&i915->mm.free_work));
3123 }
3124 
3125 static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915)
3126 {
3127 	/*
3128 	 * Similar to objects above (see i915_gem_drain_freed-objects), in
3129 	 * general we have workers that are armed by RCU and then rearm
3130 	 * themselves in their callbacks. To be paranoid, we need to
3131 	 * drain the workqueue a second time after waiting for the RCU
3132 	 * grace period so that we catch work queued via RCU from the first
3133 	 * pass. As neither drain_workqueue() nor flush_workqueue() report
3134 	 * a result, we make an assumption that we only don't require more
3135 	 * than 2 passes to catch all recursive RCU delayed work.
3136 	 *
3137 	 */
3138 	int pass = 2;
3139 	do {
3140 		rcu_barrier();
3141 		drain_workqueue(i915->wq);
3142 	} while (--pass);
3143 }
3144 
3145 struct i915_vma * __must_check
3146 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
3147 			 const struct i915_ggtt_view *view,
3148 			 u64 size,
3149 			 u64 alignment,
3150 			 u64 flags);
3151 
3152 int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
3153 void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
3154 
3155 void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
3156 
3157 static inline int __sg_page_count(const struct scatterlist *sg)
3158 {
3159 	return sg->length >> PAGE_SHIFT;
3160 }
3161 
3162 struct scatterlist *
3163 i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
3164 		       unsigned int n, unsigned int *offset);
3165 
3166 struct page *
3167 i915_gem_object_get_page(struct drm_i915_gem_object *obj,
3168 			 unsigned int n);
3169 
3170 struct page *
3171 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
3172 			       unsigned int n);
3173 
3174 dma_addr_t
3175 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
3176 				unsigned long n);
3177 
3178 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
3179 				 struct sg_table *pages,
3180 				 unsigned int sg_page_sizes);
3181 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
3182 
3183 static inline int __must_check
3184 i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
3185 {
3186 	might_lock(&obj->mm.lock);
3187 
3188 	if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
3189 		return 0;
3190 
3191 	return __i915_gem_object_get_pages(obj);
3192 }
3193 
3194 static inline bool
3195 i915_gem_object_has_pages(struct drm_i915_gem_object *obj)
3196 {
3197 	return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages));
3198 }
3199 
3200 static inline void
3201 __i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
3202 {
3203 	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
3204 
3205 	atomic_inc(&obj->mm.pages_pin_count);
3206 }
3207 
3208 static inline bool
3209 i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
3210 {
3211 	return atomic_read(&obj->mm.pages_pin_count);
3212 }
3213 
3214 static inline void
3215 __i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
3216 {
3217 	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
3218 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
3219 
3220 	atomic_dec(&obj->mm.pages_pin_count);
3221 }
3222 
3223 static inline void
3224 i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
3225 {
3226 	__i915_gem_object_unpin_pages(obj);
3227 }
3228 
3229 enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock */
3230 	I915_MM_NORMAL = 0,
3231 	I915_MM_SHRINKER
3232 };
3233 
3234 void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
3235 				 enum i915_mm_subclass subclass);
3236 void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj);
3237 
3238 enum i915_map_type {
3239 	I915_MAP_WB = 0,
3240 	I915_MAP_WC,
3241 #define I915_MAP_OVERRIDE BIT(31)
3242 	I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE,
3243 	I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE,
3244 };
3245 
3246 /**
3247  * i915_gem_object_pin_map - return a contiguous mapping of the entire object
3248  * @obj: the object to map into kernel address space
3249  * @type: the type of mapping, used to select pgprot_t
3250  *
3251  * Calls i915_gem_object_pin_pages() to prevent reaping of the object's
3252  * pages and then returns a contiguous mapping of the backing storage into
3253  * the kernel address space. Based on the @type of mapping, the PTE will be
3254  * set to either WriteBack or WriteCombine (via pgprot_t).
3255  *
3256  * The caller is responsible for calling i915_gem_object_unpin_map() when the
3257  * mapping is no longer required.
3258  *
3259  * Returns the pointer through which to access the mapped object, or an
3260  * ERR_PTR() on error.
3261  */
3262 void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
3263 					   enum i915_map_type type);
3264 
3265 /**
3266  * i915_gem_object_unpin_map - releases an earlier mapping
3267  * @obj: the object to unmap
3268  *
3269  * After pinning the object and mapping its pages, once you are finished
3270  * with your access, call i915_gem_object_unpin_map() to release the pin
3271  * upon the mapping. Once the pin count reaches zero, that mapping may be
3272  * removed.
3273  */
3274 static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
3275 {
3276 	i915_gem_object_unpin_pages(obj);
3277 }
3278 
3279 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
3280 				    unsigned int *needs_clflush);
3281 int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
3282 				     unsigned int *needs_clflush);
3283 #define CLFLUSH_BEFORE	BIT(0)
3284 #define CLFLUSH_AFTER	BIT(1)
3285 #define CLFLUSH_FLAGS	(CLFLUSH_BEFORE | CLFLUSH_AFTER)
3286 
3287 static inline void
3288 i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj)
3289 {
3290 	i915_gem_object_unpin_pages(obj);
3291 }
3292 
3293 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
3294 void i915_vma_move_to_active(struct i915_vma *vma,
3295 			     struct drm_i915_gem_request *req,
3296 			     unsigned int flags);
3297 int i915_gem_dumb_create(struct drm_file *file_priv,
3298 			 struct drm_device *dev,
3299 			 struct drm_mode_create_dumb *args);
3300 int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
3301 		      uint32_t handle, uint64_t *offset);
3302 int i915_gem_mmap_gtt_version(void);
3303 
3304 void i915_gem_track_fb(struct drm_i915_gem_object *old,
3305 		       struct drm_i915_gem_object *new,
3306 		       unsigned frontbuffer_bits);
3307 
3308 int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno);
3309 
3310 struct drm_i915_gem_request *
3311 i915_gem_find_active_request(struct intel_engine_cs *engine);
3312 
3313 void i915_gem_retire_requests(struct drm_i915_private *dev_priv);
3314 
3315 static inline bool i915_reset_backoff(struct i915_gpu_error *error)
3316 {
3317 	return unlikely(test_bit(I915_RESET_BACKOFF, &error->flags));
3318 }
3319 
3320 static inline bool i915_reset_handoff(struct i915_gpu_error *error)
3321 {
3322 	return unlikely(test_bit(I915_RESET_HANDOFF, &error->flags));
3323 }
3324 
3325 static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
3326 {
3327 	return unlikely(test_bit(I915_WEDGED, &error->flags));
3328 }
3329 
3330 static inline bool i915_reset_backoff_or_wedged(struct i915_gpu_error *error)
3331 {
3332 	return i915_reset_backoff(error) | i915_terminally_wedged(error);
3333 }
3334 
3335 static inline u32 i915_reset_count(struct i915_gpu_error *error)
3336 {
3337 	return READ_ONCE(error->reset_count);
3338 }
3339 
3340 static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
3341 					  struct intel_engine_cs *engine)
3342 {
3343 	return READ_ONCE(error->reset_engine_count[engine->id]);
3344 }
3345 
3346 struct drm_i915_gem_request *
3347 i915_gem_reset_prepare_engine(struct intel_engine_cs *engine);
3348 int i915_gem_reset_prepare(struct drm_i915_private *dev_priv);
3349 void i915_gem_reset(struct drm_i915_private *dev_priv);
3350 void i915_gem_reset_finish_engine(struct intel_engine_cs *engine);
3351 void i915_gem_reset_finish(struct drm_i915_private *dev_priv);
3352 void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
3353 bool i915_gem_unset_wedged(struct drm_i915_private *dev_priv);
3354 void i915_gem_reset_engine(struct intel_engine_cs *engine,
3355 			   struct drm_i915_gem_request *request);
3356 
3357 void i915_gem_init_mmio(struct drm_i915_private *i915);
3358 int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
3359 int __must_check i915_gem_init_hw(struct drm_i915_private *dev_priv);
3360 void i915_gem_init_swizzling(struct drm_i915_private *dev_priv);
3361 void i915_gem_cleanup_engines(struct drm_i915_private *dev_priv);
3362 int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
3363 			   unsigned int flags);
3364 int __must_check i915_gem_suspend(struct drm_i915_private *dev_priv);
3365 void i915_gem_resume(struct drm_i915_private *dev_priv);
3366 int i915_gem_fault(struct vm_fault *vmf);
3367 int i915_gem_object_wait(struct drm_i915_gem_object *obj,
3368 			 unsigned int flags,
3369 			 long timeout,
3370 			 struct intel_rps_client *rps);
3371 int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
3372 				  unsigned int flags,
3373 				  int priority);
3374 #define I915_PRIORITY_DISPLAY I915_PRIORITY_MAX
3375 
3376 int __must_check
3377 i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write);
3378 int __must_check
3379 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write);
3380 int __must_check
3381 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
3382 struct i915_vma * __must_check
3383 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3384 				     u32 alignment,
3385 				     const struct i915_ggtt_view *view);
3386 void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma);
3387 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
3388 				int align);
3389 int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file);
3390 void i915_gem_release(struct drm_device *dev, struct drm_file *file);
3391 
3392 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3393 				    enum i915_cache_level cache_level);
3394 
3395 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
3396 				struct dma_buf *dma_buf);
3397 
3398 struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
3399 				struct drm_gem_object *gem_obj, int flags);
3400 
3401 static inline struct i915_hw_ppgtt *
3402 i915_vm_to_ppgtt(struct i915_address_space *vm)
3403 {
3404 	return container_of(vm, struct i915_hw_ppgtt, base);
3405 }
3406 
3407 /* i915_gem_fence_reg.c */
3408 struct drm_i915_fence_reg *
3409 i915_reserve_fence(struct drm_i915_private *dev_priv);
3410 void i915_unreserve_fence(struct drm_i915_fence_reg *fence);
3411 
3412 void i915_gem_revoke_fences(struct drm_i915_private *dev_priv);
3413 void i915_gem_restore_fences(struct drm_i915_private *dev_priv);
3414 
3415 void i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv);
3416 void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
3417 				       struct sg_table *pages);
3418 void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
3419 					 struct sg_table *pages);
3420 
3421 static inline struct i915_gem_context *
3422 __i915_gem_context_lookup_rcu(struct drm_i915_file_private *file_priv, u32 id)
3423 {
3424 	return idr_find(&file_priv->context_idr, id);
3425 }
3426 
3427 static inline struct i915_gem_context *
3428 i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
3429 {
3430 	struct i915_gem_context *ctx;
3431 
3432 	rcu_read_lock();
3433 	ctx = __i915_gem_context_lookup_rcu(file_priv, id);
3434 	if (ctx && !kref_get_unless_zero(&ctx->ref))
3435 		ctx = NULL;
3436 	rcu_read_unlock();
3437 
3438 	return ctx;
3439 }
3440 
3441 static inline struct intel_timeline *
3442 i915_gem_context_lookup_timeline(struct i915_gem_context *ctx,
3443 				 struct intel_engine_cs *engine)
3444 {
3445 	struct i915_address_space *vm;
3446 
3447 	vm = ctx->ppgtt ? &ctx->ppgtt->base : &ctx->i915->ggtt.base;
3448 	return &vm->timeline.engine[engine->id];
3449 }
3450 
3451 int i915_perf_open_ioctl(struct drm_device *dev, void *data,
3452 			 struct drm_file *file);
3453 int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
3454 			       struct drm_file *file);
3455 int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
3456 				  struct drm_file *file);
3457 void i915_oa_init_reg_state(struct intel_engine_cs *engine,
3458 			    struct i915_gem_context *ctx,
3459 			    uint32_t *reg_state);
3460 
3461 /* i915_gem_evict.c */
3462 int __must_check i915_gem_evict_something(struct i915_address_space *vm,
3463 					  u64 min_size, u64 alignment,
3464 					  unsigned cache_level,
3465 					  u64 start, u64 end,
3466 					  unsigned flags);
3467 int __must_check i915_gem_evict_for_node(struct i915_address_space *vm,
3468 					 struct drm_mm_node *node,
3469 					 unsigned int flags);
3470 int i915_gem_evict_vm(struct i915_address_space *vm);
3471 
3472 void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv);
3473 
3474 /* belongs in i915_gem_gtt.h */
3475 static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv)
3476 {
3477 	wmb();
3478 	if (INTEL_GEN(dev_priv) < 6)
3479 		intel_gtt_chipset_flush();
3480 }
3481 
3482 /* i915_gem_stolen.c */
3483 int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
3484 				struct drm_mm_node *node, u64 size,
3485 				unsigned alignment);
3486 int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
3487 					 struct drm_mm_node *node, u64 size,
3488 					 unsigned alignment, u64 start,
3489 					 u64 end);
3490 void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
3491 				 struct drm_mm_node *node);
3492 int i915_gem_init_stolen(struct drm_i915_private *dev_priv);
3493 void i915_gem_cleanup_stolen(struct drm_device *dev);
3494 struct drm_i915_gem_object *
3495 i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
3496 			      resource_size_t size);
3497 struct drm_i915_gem_object *
3498 i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv,
3499 					       resource_size_t stolen_offset,
3500 					       resource_size_t gtt_offset,
3501 					       resource_size_t size);
3502 
3503 /* i915_gem_internal.c */
3504 struct drm_i915_gem_object *
3505 i915_gem_object_create_internal(struct drm_i915_private *dev_priv,
3506 				phys_addr_t size);
3507 
3508 /* i915_gem_shrinker.c */
3509 unsigned long i915_gem_shrink(struct drm_i915_private *i915,
3510 			      unsigned long target,
3511 			      unsigned long *nr_scanned,
3512 			      unsigned flags);
3513 #define I915_SHRINK_PURGEABLE 0x1
3514 #define I915_SHRINK_UNBOUND 0x2
3515 #define I915_SHRINK_BOUND 0x4
3516 #define I915_SHRINK_ACTIVE 0x8
3517 #define I915_SHRINK_VMAPS 0x10
3518 unsigned long i915_gem_shrink_all(struct drm_i915_private *i915);
3519 void i915_gem_shrinker_register(struct drm_i915_private *i915);
3520 void i915_gem_shrinker_unregister(struct drm_i915_private *i915);
3521 
3522 
3523 /* i915_gem_tiling.c */
3524 static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
3525 {
3526 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
3527 
3528 	return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
3529 		i915_gem_object_is_tiled(obj);
3530 }
3531 
3532 u32 i915_gem_fence_size(struct drm_i915_private *dev_priv, u32 size,
3533 			unsigned int tiling, unsigned int stride);
3534 u32 i915_gem_fence_alignment(struct drm_i915_private *dev_priv, u32 size,
3535 			     unsigned int tiling, unsigned int stride);
3536 
3537 /* i915_debugfs.c */
3538 #ifdef CONFIG_DEBUG_FS
3539 int i915_debugfs_register(struct drm_i915_private *dev_priv);
3540 int i915_debugfs_connector_add(struct drm_connector *connector);
3541 void intel_display_crc_init(struct drm_i915_private *dev_priv);
3542 #else
3543 static inline int i915_debugfs_register(struct drm_i915_private *dev_priv) {return 0;}
3544 static inline int i915_debugfs_connector_add(struct drm_connector *connector)
3545 { return 0; }
3546 static inline void intel_display_crc_init(struct drm_i915_private *dev_priv) {}
3547 #endif
3548 
3549 /* i915_gpu_error.c */
3550 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
3551 
3552 __printf(2, 3)
3553 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
3554 int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
3555 			    const struct i915_gpu_state *gpu);
3556 int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb,
3557 			      struct drm_i915_private *i915,
3558 			      size_t count, loff_t pos);
3559 static inline void i915_error_state_buf_release(
3560 	struct drm_i915_error_state_buf *eb)
3561 {
3562 	kfree(eb->buf);
3563 }
3564 
3565 struct i915_gpu_state *i915_capture_gpu_state(struct drm_i915_private *i915);
3566 void i915_capture_error_state(struct drm_i915_private *dev_priv,
3567 			      u32 engine_mask,
3568 			      const char *error_msg);
3569 
3570 static inline struct i915_gpu_state *
3571 i915_gpu_state_get(struct i915_gpu_state *gpu)
3572 {
3573 	kref_get(&gpu->ref);
3574 	return gpu;
3575 }
3576 
3577 void __i915_gpu_state_free(struct kref *kref);
3578 static inline void i915_gpu_state_put(struct i915_gpu_state *gpu)
3579 {
3580 	if (gpu)
3581 		kref_put(&gpu->ref, __i915_gpu_state_free);
3582 }
3583 
3584 struct i915_gpu_state *i915_first_error_state(struct drm_i915_private *i915);
3585 void i915_reset_error_state(struct drm_i915_private *i915);
3586 
3587 #else
3588 
3589 static inline void i915_capture_error_state(struct drm_i915_private *dev_priv,
3590 					    u32 engine_mask,
3591 					    const char *error_msg)
3592 {
3593 }
3594 
3595 static inline struct i915_gpu_state *
3596 i915_first_error_state(struct drm_i915_private *i915)
3597 {
3598 	return NULL;
3599 }
3600 
3601 static inline void i915_reset_error_state(struct drm_i915_private *i915)
3602 {
3603 }
3604 
3605 #endif
3606 
3607 const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
3608 
3609 /* i915_cmd_parser.c */
3610 int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
3611 void intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
3612 void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine);
3613 int intel_engine_cmd_parser(struct intel_engine_cs *engine,
3614 			    struct drm_i915_gem_object *batch_obj,
3615 			    struct drm_i915_gem_object *shadow_batch_obj,
3616 			    u32 batch_start_offset,
3617 			    u32 batch_len,
3618 			    bool is_master);
3619 
3620 /* i915_perf.c */
3621 extern void i915_perf_init(struct drm_i915_private *dev_priv);
3622 extern void i915_perf_fini(struct drm_i915_private *dev_priv);
3623 extern void i915_perf_register(struct drm_i915_private *dev_priv);
3624 extern void i915_perf_unregister(struct drm_i915_private *dev_priv);
3625 
3626 /* i915_suspend.c */
3627 extern int i915_save_state(struct drm_i915_private *dev_priv);
3628 extern int i915_restore_state(struct drm_i915_private *dev_priv);
3629 
3630 /* i915_sysfs.c */
3631 void i915_setup_sysfs(struct drm_i915_private *dev_priv);
3632 void i915_teardown_sysfs(struct drm_i915_private *dev_priv);
3633 
3634 /* intel_lpe_audio.c */
3635 int  intel_lpe_audio_init(struct drm_i915_private *dev_priv);
3636 void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv);
3637 void intel_lpe_audio_irq_handler(struct drm_i915_private *dev_priv);
3638 void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
3639 			    enum pipe pipe, enum port port,
3640 			    const void *eld, int ls_clock, bool dp_output);
3641 
3642 /* intel_i2c.c */
3643 extern int intel_setup_gmbus(struct drm_i915_private *dev_priv);
3644 extern void intel_teardown_gmbus(struct drm_i915_private *dev_priv);
3645 extern bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
3646 				     unsigned int pin);
3647 
3648 extern struct i2c_adapter *
3649 intel_gmbus_get_adapter(struct drm_i915_private *dev_priv, unsigned int pin);
3650 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
3651 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
3652 static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
3653 {
3654 	return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
3655 }
3656 extern void intel_i2c_reset(struct drm_i915_private *dev_priv);
3657 
3658 /* intel_bios.c */
3659 void intel_bios_init(struct drm_i915_private *dev_priv);
3660 bool intel_bios_is_valid_vbt(const void *buf, size_t size);
3661 bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
3662 bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
3663 bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port);
3664 bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
3665 bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port);
3666 bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port);
3667 bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
3668 				     enum port port);
3669 bool intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv,
3670 				enum port port);
3671 
3672 /* intel_acpi.c */
3673 #ifdef CONFIG_ACPI
3674 extern void intel_register_dsm_handler(void);
3675 extern void intel_unregister_dsm_handler(void);
3676 #else
3677 static inline void intel_register_dsm_handler(void) { return; }
3678 static inline void intel_unregister_dsm_handler(void) { return; }
3679 #endif /* CONFIG_ACPI */
3680 
3681 /* intel_device_info.c */
3682 static inline struct intel_device_info *
3683 mkwrite_device_info(struct drm_i915_private *dev_priv)
3684 {
3685 	return (struct intel_device_info *)&dev_priv->info;
3686 }
3687 
3688 /* modesetting */
3689 extern void intel_modeset_init_hw(struct drm_device *dev);
3690 extern int intel_modeset_init(struct drm_device *dev);
3691 extern void intel_modeset_cleanup(struct drm_device *dev);
3692 extern int intel_connector_register(struct drm_connector *);
3693 extern void intel_connector_unregister(struct drm_connector *);
3694 extern int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv,
3695 				       bool state);
3696 extern void intel_display_resume(struct drm_device *dev);
3697 extern void i915_redisable_vga(struct drm_i915_private *dev_priv);
3698 extern void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv);
3699 extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val);
3700 extern void intel_init_pch_refclk(struct drm_i915_private *dev_priv);
3701 extern int intel_set_rps(struct drm_i915_private *dev_priv, u8 val);
3702 extern bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
3703 				  bool enable);
3704 
3705 int i915_reg_read_ioctl(struct drm_device *dev, void *data,
3706 			struct drm_file *file);
3707 
3708 /* overlay */
3709 extern struct intel_overlay_error_state *
3710 intel_overlay_capture_error_state(struct drm_i915_private *dev_priv);
3711 extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
3712 					    struct intel_overlay_error_state *error);
3713 
3714 extern struct intel_display_error_state *
3715 intel_display_capture_error_state(struct drm_i915_private *dev_priv);
3716 extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
3717 					    struct intel_display_error_state *error);
3718 
3719 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val);
3720 int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv, u32 mbox,
3721 				    u32 val, int timeout_us);
3722 #define sandybridge_pcode_write(dev_priv, mbox, val)	\
3723 	sandybridge_pcode_write_timeout(dev_priv, mbox, val, 500)
3724 
3725 int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
3726 		      u32 reply_mask, u32 reply, int timeout_base_ms);
3727 
3728 /* intel_sideband.c */
3729 u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr);
3730 int vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val);
3731 u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
3732 u32 vlv_iosf_sb_read(struct drm_i915_private *dev_priv, u8 port, u32 reg);
3733 void vlv_iosf_sb_write(struct drm_i915_private *dev_priv, u8 port, u32 reg, u32 val);
3734 u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg);
3735 void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
3736 u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg);
3737 void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
3738 u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg);
3739 void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
3740 u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg);
3741 void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val);
3742 u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
3743 		   enum intel_sbi_destination destination);
3744 void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
3745 		     enum intel_sbi_destination destination);
3746 u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg);
3747 void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
3748 
3749 /* intel_dpio_phy.c */
3750 void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port,
3751 			     enum dpio_phy *phy, enum dpio_channel *ch);
3752 void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv,
3753 				  enum port port, u32 margin, u32 scale,
3754 				  u32 enable, u32 deemphasis);
3755 void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy);
3756 void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy);
3757 bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
3758 			    enum dpio_phy phy);
3759 bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
3760 			      enum dpio_phy phy);
3761 uint8_t bxt_ddi_phy_calc_lane_lat_optim_mask(uint8_t lane_count);
3762 void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
3763 				     uint8_t lane_lat_optim_mask);
3764 uint8_t bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder);
3765 
3766 void chv_set_phy_signal_level(struct intel_encoder *encoder,
3767 			      u32 deemph_reg_value, u32 margin_reg_value,
3768 			      bool uniq_trans_scale);
3769 void chv_data_lane_soft_reset(struct intel_encoder *encoder,
3770 			      const struct intel_crtc_state *crtc_state,
3771 			      bool reset);
3772 void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
3773 			    const struct intel_crtc_state *crtc_state);
3774 void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
3775 				const struct intel_crtc_state *crtc_state);
3776 void chv_phy_release_cl2_override(struct intel_encoder *encoder);
3777 void chv_phy_post_pll_disable(struct intel_encoder *encoder,
3778 			      const struct intel_crtc_state *old_crtc_state);
3779 
3780 void vlv_set_phy_signal_level(struct intel_encoder *encoder,
3781 			      u32 demph_reg_value, u32 preemph_reg_value,
3782 			      u32 uniqtranscale_reg_value, u32 tx3_demph);
3783 void vlv_phy_pre_pll_enable(struct intel_encoder *encoder,
3784 			    const struct intel_crtc_state *crtc_state);
3785 void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
3786 				const struct intel_crtc_state *crtc_state);
3787 void vlv_phy_reset_lanes(struct intel_encoder *encoder,
3788 			 const struct intel_crtc_state *old_crtc_state);
3789 
3790 int intel_gpu_freq(struct drm_i915_private *dev_priv, int val);
3791 int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
3792 u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
3793 			   const i915_reg_t reg);
3794 
3795 u32 intel_get_cagf(struct drm_i915_private *dev_priv, u32 rpstat1);
3796 
3797 static inline u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
3798 					 const i915_reg_t reg)
3799 {
3800 	return DIV_ROUND_UP_ULL(intel_rc6_residency_ns(dev_priv, reg), 1000);
3801 }
3802 
3803 #define I915_READ8(reg)		dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
3804 #define I915_WRITE8(reg, val)	dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true)
3805 
3806 #define I915_READ16(reg)	dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), true)
3807 #define I915_WRITE16(reg, val)	dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), true)
3808 #define I915_READ16_NOTRACE(reg)	dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), false)
3809 #define I915_WRITE16_NOTRACE(reg, val)	dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), false)
3810 
3811 #define I915_READ(reg)		dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true)
3812 #define I915_WRITE(reg, val)	dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true)
3813 #define I915_READ_NOTRACE(reg)		dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false)
3814 #define I915_WRITE_NOTRACE(reg, val)	dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false)
3815 
3816 /* Be very careful with read/write 64-bit values. On 32-bit machines, they
3817  * will be implemented using 2 32-bit writes in an arbitrary order with
3818  * an arbitrary delay between them. This can cause the hardware to
3819  * act upon the intermediate value, possibly leading to corruption and
3820  * machine death. For this reason we do not support I915_WRITE64, or
3821  * dev_priv->uncore.funcs.mmio_writeq.
3822  *
3823  * When reading a 64-bit value as two 32-bit values, the delay may cause
3824  * the two reads to mismatch, e.g. a timestamp overflowing. Also note that
3825  * occasionally a 64-bit register does not actualy support a full readq
3826  * and must be read using two 32-bit reads.
3827  *
3828  * You have been warned.
3829  */
3830 #define I915_READ64(reg)	dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
3831 
3832 #define I915_READ64_2x32(lower_reg, upper_reg) ({			\
3833 	u32 upper, lower, old_upper, loop = 0;				\
3834 	upper = I915_READ(upper_reg);					\
3835 	do {								\
3836 		old_upper = upper;					\
3837 		lower = I915_READ(lower_reg);				\
3838 		upper = I915_READ(upper_reg);				\
3839 	} while (upper != old_upper && loop++ < 2);			\
3840 	(u64)upper << 32 | lower; })
3841 
3842 #define POSTING_READ(reg)	(void)I915_READ_NOTRACE(reg)
3843 #define POSTING_READ16(reg)	(void)I915_READ16_NOTRACE(reg)
3844 
3845 #define __raw_read(x, s) \
3846 static inline uint##x##_t __raw_i915_read##x(const struct drm_i915_private *dev_priv, \
3847 					     i915_reg_t reg) \
3848 { \
3849 	return read##s(dev_priv->regs + i915_mmio_reg_offset(reg)); \
3850 }
3851 
3852 #define __raw_write(x, s) \
3853 static inline void __raw_i915_write##x(const struct drm_i915_private *dev_priv, \
3854 				       i915_reg_t reg, uint##x##_t val) \
3855 { \
3856 	write##s(val, dev_priv->regs + i915_mmio_reg_offset(reg)); \
3857 }
3858 __raw_read(8, b)
3859 __raw_read(16, w)
3860 __raw_read(32, l)
3861 __raw_read(64, q)
3862 
3863 __raw_write(8, b)
3864 __raw_write(16, w)
3865 __raw_write(32, l)
3866 __raw_write(64, q)
3867 
3868 #undef __raw_read
3869 #undef __raw_write
3870 
3871 /* These are untraced mmio-accessors that are only valid to be used inside
3872  * critical sections, such as inside IRQ handlers, where forcewake is explicitly
3873  * controlled.
3874  *
3875  * Think twice, and think again, before using these.
3876  *
3877  * As an example, these accessors can possibly be used between:
3878  *
3879  * spin_lock_irq(&dev_priv->uncore.lock);
3880  * intel_uncore_forcewake_get__locked();
3881  *
3882  * and
3883  *
3884  * intel_uncore_forcewake_put__locked();
3885  * spin_unlock_irq(&dev_priv->uncore.lock);
3886  *
3887  *
3888  * Note: some registers may not need forcewake held, so
3889  * intel_uncore_forcewake_{get,put} can be omitted, see
3890  * intel_uncore_forcewake_for_reg().
3891  *
3892  * Certain architectures will die if the same cacheline is concurrently accessed
3893  * by different clients (e.g. on Ivybridge). Access to registers should
3894  * therefore generally be serialised, by either the dev_priv->uncore.lock or
3895  * a more localised lock guarding all access to that bank of registers.
3896  */
3897 #define I915_READ_FW(reg__) __raw_i915_read32(dev_priv, (reg__))
3898 #define I915_WRITE_FW(reg__, val__) __raw_i915_write32(dev_priv, (reg__), (val__))
3899 #define I915_WRITE64_FW(reg__, val__) __raw_i915_write64(dev_priv, (reg__), (val__))
3900 #define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__)
3901 
3902 /* "Broadcast RGB" property */
3903 #define INTEL_BROADCAST_RGB_AUTO 0
3904 #define INTEL_BROADCAST_RGB_FULL 1
3905 #define INTEL_BROADCAST_RGB_LIMITED 2
3906 
3907 static inline i915_reg_t i915_vgacntrl_reg(struct drm_i915_private *dev_priv)
3908 {
3909 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
3910 		return VLV_VGACNTRL;
3911 	else if (INTEL_GEN(dev_priv) >= 5)
3912 		return CPU_VGACNTRL;
3913 	else
3914 		return VGACNTRL;
3915 }
3916 
3917 static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
3918 {
3919 	unsigned long j = msecs_to_jiffies(m);
3920 
3921 	return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
3922 }
3923 
3924 static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
3925 {
3926 	/* nsecs_to_jiffies64() does not guard against overflow */
3927 	if (NSEC_PER_SEC % HZ &&
3928 	    div_u64(n, NSEC_PER_SEC) >= MAX_JIFFY_OFFSET / HZ)
3929 		return MAX_JIFFY_OFFSET;
3930 
3931         return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
3932 }
3933 
3934 static inline unsigned long
3935 timespec_to_jiffies_timeout(const struct timespec *value)
3936 {
3937 	unsigned long j = timespec_to_jiffies(value);
3938 
3939 	return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
3940 }
3941 
3942 /*
3943  * If you need to wait X milliseconds between events A and B, but event B
3944  * doesn't happen exactly after event A, you record the timestamp (jiffies) of
3945  * when event A happened, then just before event B you call this function and
3946  * pass the timestamp as the first argument, and X as the second argument.
3947  */
3948 static inline void
3949 wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
3950 {
3951 	unsigned long target_jiffies, tmp_jiffies, remaining_jiffies;
3952 
3953 	/*
3954 	 * Don't re-read the value of "jiffies" every time since it may change
3955 	 * behind our back and break the math.
3956 	 */
3957 	tmp_jiffies = jiffies;
3958 	target_jiffies = timestamp_jiffies +
3959 			 msecs_to_jiffies_timeout(to_wait_ms);
3960 
3961 	if (time_after(target_jiffies, tmp_jiffies)) {
3962 		remaining_jiffies = target_jiffies - tmp_jiffies;
3963 		while (remaining_jiffies)
3964 			remaining_jiffies =
3965 			    schedule_timeout_uninterruptible(remaining_jiffies);
3966 	}
3967 }
3968 
3969 static inline bool
3970 __i915_request_irq_complete(const struct drm_i915_gem_request *req)
3971 {
3972 	struct intel_engine_cs *engine = req->engine;
3973 	u32 seqno;
3974 
3975 	/* Note that the engine may have wrapped around the seqno, and
3976 	 * so our request->global_seqno will be ahead of the hardware,
3977 	 * even though it completed the request before wrapping. We catch
3978 	 * this by kicking all the waiters before resetting the seqno
3979 	 * in hardware, and also signal the fence.
3980 	 */
3981 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &req->fence.flags))
3982 		return true;
3983 
3984 	/* The request was dequeued before we were awoken. We check after
3985 	 * inspecting the hw to confirm that this was the same request
3986 	 * that generated the HWS update. The memory barriers within
3987 	 * the request execution are sufficient to ensure that a check
3988 	 * after reading the value from hw matches this request.
3989 	 */
3990 	seqno = i915_gem_request_global_seqno(req);
3991 	if (!seqno)
3992 		return false;
3993 
3994 	/* Before we do the heavier coherent read of the seqno,
3995 	 * check the value (hopefully) in the CPU cacheline.
3996 	 */
3997 	if (__i915_gem_request_completed(req, seqno))
3998 		return true;
3999 
4000 	/* Ensure our read of the seqno is coherent so that we
4001 	 * do not "miss an interrupt" (i.e. if this is the last
4002 	 * request and the seqno write from the GPU is not visible
4003 	 * by the time the interrupt fires, we will see that the
4004 	 * request is incomplete and go back to sleep awaiting
4005 	 * another interrupt that will never come.)
4006 	 *
4007 	 * Strictly, we only need to do this once after an interrupt,
4008 	 * but it is easier and safer to do it every time the waiter
4009 	 * is woken.
4010 	 */
4011 	if (engine->irq_seqno_barrier &&
4012 	    test_and_clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted)) {
4013 		struct intel_breadcrumbs *b = &engine->breadcrumbs;
4014 
4015 		/* The ordering of irq_posted versus applying the barrier
4016 		 * is crucial. The clearing of the current irq_posted must
4017 		 * be visible before we perform the barrier operation,
4018 		 * such that if a subsequent interrupt arrives, irq_posted
4019 		 * is reasserted and our task rewoken (which causes us to
4020 		 * do another __i915_request_irq_complete() immediately
4021 		 * and reapply the barrier). Conversely, if the clear
4022 		 * occurs after the barrier, then an interrupt that arrived
4023 		 * whilst we waited on the barrier would not trigger a
4024 		 * barrier on the next pass, and the read may not see the
4025 		 * seqno update.
4026 		 */
4027 		engine->irq_seqno_barrier(engine);
4028 
4029 		/* If we consume the irq, but we are no longer the bottom-half,
4030 		 * the real bottom-half may not have serialised their own
4031 		 * seqno check with the irq-barrier (i.e. may have inspected
4032 		 * the seqno before we believe it coherent since they see
4033 		 * irq_posted == false but we are still running).
4034 		 */
4035 		spin_lock_irq(&b->irq_lock);
4036 		if (b->irq_wait && b->irq_wait->tsk != current)
4037 			/* Note that if the bottom-half is changed as we
4038 			 * are sending the wake-up, the new bottom-half will
4039 			 * be woken by whomever made the change. We only have
4040 			 * to worry about when we steal the irq-posted for
4041 			 * ourself.
4042 			 */
4043 			wake_up_process(b->irq_wait->tsk);
4044 		spin_unlock_irq(&b->irq_lock);
4045 
4046 		if (__i915_gem_request_completed(req, seqno))
4047 			return true;
4048 	}
4049 
4050 	return false;
4051 }
4052 
4053 void i915_memcpy_init_early(struct drm_i915_private *dev_priv);
4054 bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len);
4055 
4056 /* The movntdqa instructions used for memcpy-from-wc require 16-byte alignment,
4057  * as well as SSE4.1 support. i915_memcpy_from_wc() will report if it cannot
4058  * perform the operation. To check beforehand, pass in the parameters to
4059  * to i915_can_memcpy_from_wc() - since we only care about the low 4 bits,
4060  * you only need to pass in the minor offsets, page-aligned pointers are
4061  * always valid.
4062  *
4063  * For just checking for SSE4.1, in the foreknowledge that the future use
4064  * will be correctly aligned, just use i915_has_memcpy_from_wc().
4065  */
4066 #define i915_can_memcpy_from_wc(dst, src, len) \
4067 	i915_memcpy_from_wc((void *)((unsigned long)(dst) | (unsigned long)(src) | (len)), NULL, 0)
4068 
4069 #define i915_has_memcpy_from_wc() \
4070 	i915_memcpy_from_wc(NULL, NULL, 0)
4071 
4072 /* i915_mm.c */
4073 int remap_io_mapping(struct vm_area_struct *vma,
4074 		     unsigned long addr, unsigned long pfn, unsigned long size,
4075 		     struct io_mapping *iomap);
4076 
4077 static inline int intel_hws_csb_write_index(struct drm_i915_private *i915)
4078 {
4079 	if (INTEL_GEN(i915) >= 10)
4080 		return CNL_HWS_CSB_WRITE_INDEX;
4081 	else
4082 		return I915_HWS_CSB_WRITE_INDEX;
4083 }
4084 
4085 #endif
4086