xref: /openbmc/linux/drivers/gpu/drm/i915/i915_drv.h (revision a09d2831)
1 /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
2  */
3 /*
4  *
5  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the
10  * "Software"), to deal in the Software without restriction, including
11  * without limitation the rights to use, copy, modify, merge, publish,
12  * distribute, sub license, and/or sell copies of the Software, and to
13  * permit persons to whom the Software is furnished to do so, subject to
14  * the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the
17  * next paragraph) shall be included in all copies or substantial portions
18  * of the Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27  *
28  */
29 
30 #ifndef _I915_DRV_H_
31 #define _I915_DRV_H_
32 
33 #include "i915_reg.h"
34 #include "intel_bios.h"
35 #include <linux/io-mapping.h>
36 
37 /* General customization:
38  */
39 
40 #define DRIVER_AUTHOR		"Tungsten Graphics, Inc."
41 
42 #define DRIVER_NAME		"i915"
43 #define DRIVER_DESC		"Intel Graphics"
44 #define DRIVER_DATE		"20080730"
45 
46 enum pipe {
47 	PIPE_A = 0,
48 	PIPE_B,
49 };
50 
51 enum plane {
52 	PLANE_A = 0,
53 	PLANE_B,
54 };
55 
56 #define I915_NUM_PIPE	2
57 
58 /* Interface history:
59  *
60  * 1.1: Original.
61  * 1.2: Add Power Management
62  * 1.3: Add vblank support
63  * 1.4: Fix cmdbuffer path, add heap destroy
64  * 1.5: Add vblank pipe configuration
65  * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
66  *      - Support vertical blank on secondary display pipe
67  */
68 #define DRIVER_MAJOR		1
69 #define DRIVER_MINOR		6
70 #define DRIVER_PATCHLEVEL	0
71 
72 #define WATCH_COHERENCY	0
73 #define WATCH_BUF	0
74 #define WATCH_EXEC	0
75 #define WATCH_LRU	0
76 #define WATCH_RELOC	0
77 #define WATCH_INACTIVE	0
78 #define WATCH_PWRITE	0
79 
80 #define I915_GEM_PHYS_CURSOR_0 1
81 #define I915_GEM_PHYS_CURSOR_1 2
82 #define I915_GEM_PHYS_OVERLAY_REGS 3
83 #define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS)
84 
85 struct drm_i915_gem_phys_object {
86 	int id;
87 	struct page **page_list;
88 	drm_dma_handle_t *handle;
89 	struct drm_gem_object *cur_obj;
90 };
91 
92 typedef struct _drm_i915_ring_buffer {
93 	unsigned long Size;
94 	u8 *virtual_start;
95 	int head;
96 	int tail;
97 	int space;
98 	drm_local_map_t map;
99 	struct drm_gem_object *ring_obj;
100 } drm_i915_ring_buffer_t;
101 
102 struct mem_block {
103 	struct mem_block *next;
104 	struct mem_block *prev;
105 	int start;
106 	int size;
107 	struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
108 };
109 
110 struct opregion_header;
111 struct opregion_acpi;
112 struct opregion_swsci;
113 struct opregion_asle;
114 
115 struct intel_opregion {
116 	struct opregion_header *header;
117 	struct opregion_acpi *acpi;
118 	struct opregion_swsci *swsci;
119 	struct opregion_asle *asle;
120 	int enabled;
121 };
122 
123 struct drm_i915_master_private {
124 	drm_local_map_t *sarea;
125 	struct _drm_i915_sarea *sarea_priv;
126 };
127 #define I915_FENCE_REG_NONE -1
128 
129 struct drm_i915_fence_reg {
130 	struct drm_gem_object *obj;
131 };
132 
133 struct sdvo_device_mapping {
134 	u8 dvo_port;
135 	u8 slave_addr;
136 	u8 dvo_wiring;
137 	u8 initialized;
138 };
139 
140 struct drm_i915_error_state {
141 	u32 eir;
142 	u32 pgtbl_er;
143 	u32 pipeastat;
144 	u32 pipebstat;
145 	u32 ipeir;
146 	u32 ipehr;
147 	u32 instdone;
148 	u32 acthd;
149 	u32 instpm;
150 	u32 instps;
151 	u32 instdone1;
152 	u32 seqno;
153 	struct timeval time;
154 };
155 
156 struct drm_i915_display_funcs {
157 	void (*dpms)(struct drm_crtc *crtc, int mode);
158 	bool (*fbc_enabled)(struct drm_crtc *crtc);
159 	void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval);
160 	void (*disable_fbc)(struct drm_device *dev);
161 	int (*get_display_clock_speed)(struct drm_device *dev);
162 	int (*get_fifo_size)(struct drm_device *dev, int plane);
163 	void (*update_wm)(struct drm_device *dev, int planea_clock,
164 			  int planeb_clock, int sr_hdisplay, int pixel_size);
165 	/* clock updates for mode set */
166 	/* cursor updates */
167 	/* render clock increase/decrease */
168 	/* display clock increase/decrease */
169 	/* pll clock increase/decrease */
170 	/* clock gating init */
171 };
172 
173 struct intel_overlay;
174 
175 struct intel_device_info {
176 	u8 is_mobile : 1;
177 	u8 is_i8xx : 1;
178 	u8 is_i915g : 1;
179 	u8 is_i9xx : 1;
180 	u8 is_i945gm : 1;
181 	u8 is_i965g : 1;
182 	u8 is_i965gm : 1;
183 	u8 is_g33 : 1;
184 	u8 need_gfx_hws : 1;
185 	u8 is_g4x : 1;
186 	u8 is_pineview : 1;
187 	u8 is_ironlake : 1;
188 	u8 has_fbc : 1;
189 	u8 has_rc6 : 1;
190 	u8 has_pipe_cxsr : 1;
191 	u8 has_hotplug : 1;
192 	u8 cursor_needs_physical : 1;
193 };
194 
195 typedef struct drm_i915_private {
196 	struct drm_device *dev;
197 
198 	const struct intel_device_info *info;
199 
200 	int has_gem;
201 
202 	void __iomem *regs;
203 
204 	struct pci_dev *bridge_dev;
205 	drm_i915_ring_buffer_t ring;
206 
207 	drm_dma_handle_t *status_page_dmah;
208 	void *hw_status_page;
209 	dma_addr_t dma_status_page;
210 	uint32_t counter;
211 	unsigned int status_gfx_addr;
212 	drm_local_map_t hws_map;
213 	struct drm_gem_object *hws_obj;
214 	struct drm_gem_object *pwrctx;
215 
216 	struct resource mch_res;
217 
218 	unsigned int cpp;
219 	int back_offset;
220 	int front_offset;
221 	int current_page;
222 	int page_flipping;
223 
224 	wait_queue_head_t irq_queue;
225 	atomic_t irq_received;
226 	/** Protects user_irq_refcount and irq_mask_reg */
227 	spinlock_t user_irq_lock;
228 	/** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
229 	int user_irq_refcount;
230 	u32 trace_irq_seqno;
231 	/** Cached value of IMR to avoid reads in updating the bitfield */
232 	u32 irq_mask_reg;
233 	u32 pipestat[2];
234 	/** splitted irq regs for graphics and display engine on Ironlake,
235 	    irq_mask_reg is still used for display irq. */
236 	u32 gt_irq_mask_reg;
237 	u32 gt_irq_enable_reg;
238 	u32 de_irq_enable_reg;
239 	u32 pch_irq_mask_reg;
240 	u32 pch_irq_enable_reg;
241 
242 	u32 hotplug_supported_mask;
243 	struct work_struct hotplug_work;
244 
245 	int tex_lru_log_granularity;
246 	int allow_batchbuffer;
247 	struct mem_block *agp_heap;
248 	unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
249 	int vblank_pipe;
250 
251 	/* For hangcheck timer */
252 #define DRM_I915_HANGCHECK_PERIOD 75 /* in jiffies */
253 	struct timer_list hangcheck_timer;
254 	int hangcheck_count;
255 	uint32_t last_acthd;
256 
257 	struct drm_mm vram;
258 
259 	unsigned long cfb_size;
260 	unsigned long cfb_pitch;
261 	int cfb_fence;
262 	int cfb_plane;
263 
264 	int irq_enabled;
265 
266 	struct intel_opregion opregion;
267 
268 	/* overlay */
269 	struct intel_overlay *overlay;
270 
271 	/* LVDS info */
272 	int backlight_duty_cycle;  /* restore backlight to this value */
273 	bool panel_wants_dither;
274 	struct drm_display_mode *panel_fixed_mode;
275 	struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
276 	struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
277 
278 	/* Feature bits from the VBIOS */
279 	unsigned int int_tv_support:1;
280 	unsigned int lvds_dither:1;
281 	unsigned int lvds_vbt:1;
282 	unsigned int int_crt_support:1;
283 	unsigned int lvds_use_ssc:1;
284 	unsigned int edp_support:1;
285 	int lvds_ssc_freq;
286 
287 	struct notifier_block lid_notifier;
288 
289 	int crt_ddc_bus; /* 0 = unknown, else GPIO to use for CRT DDC */
290 	struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */
291 	int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
292 	int num_fence_regs; /* 8 on pre-965, 16 otherwise */
293 
294 	unsigned int fsb_freq, mem_freq;
295 
296 	spinlock_t error_lock;
297 	struct drm_i915_error_state *first_error;
298 	struct work_struct error_work;
299 	struct workqueue_struct *wq;
300 
301 	/* Display functions */
302 	struct drm_i915_display_funcs display;
303 
304 	/* Register state */
305 	bool modeset_on_lid;
306 	u8 saveLBB;
307 	u32 saveDSPACNTR;
308 	u32 saveDSPBCNTR;
309 	u32 saveDSPARB;
310 	u32 saveHWS;
311 	u32 savePIPEACONF;
312 	u32 savePIPEBCONF;
313 	u32 savePIPEASRC;
314 	u32 savePIPEBSRC;
315 	u32 saveFPA0;
316 	u32 saveFPA1;
317 	u32 saveDPLL_A;
318 	u32 saveDPLL_A_MD;
319 	u32 saveHTOTAL_A;
320 	u32 saveHBLANK_A;
321 	u32 saveHSYNC_A;
322 	u32 saveVTOTAL_A;
323 	u32 saveVBLANK_A;
324 	u32 saveVSYNC_A;
325 	u32 saveBCLRPAT_A;
326 	u32 saveTRANSACONF;
327 	u32 saveTRANS_HTOTAL_A;
328 	u32 saveTRANS_HBLANK_A;
329 	u32 saveTRANS_HSYNC_A;
330 	u32 saveTRANS_VTOTAL_A;
331 	u32 saveTRANS_VBLANK_A;
332 	u32 saveTRANS_VSYNC_A;
333 	u32 savePIPEASTAT;
334 	u32 saveDSPASTRIDE;
335 	u32 saveDSPASIZE;
336 	u32 saveDSPAPOS;
337 	u32 saveDSPAADDR;
338 	u32 saveDSPASURF;
339 	u32 saveDSPATILEOFF;
340 	u32 savePFIT_PGM_RATIOS;
341 	u32 saveBLC_HIST_CTL;
342 	u32 saveBLC_PWM_CTL;
343 	u32 saveBLC_PWM_CTL2;
344 	u32 saveBLC_CPU_PWM_CTL;
345 	u32 saveBLC_CPU_PWM_CTL2;
346 	u32 saveFPB0;
347 	u32 saveFPB1;
348 	u32 saveDPLL_B;
349 	u32 saveDPLL_B_MD;
350 	u32 saveHTOTAL_B;
351 	u32 saveHBLANK_B;
352 	u32 saveHSYNC_B;
353 	u32 saveVTOTAL_B;
354 	u32 saveVBLANK_B;
355 	u32 saveVSYNC_B;
356 	u32 saveBCLRPAT_B;
357 	u32 saveTRANSBCONF;
358 	u32 saveTRANS_HTOTAL_B;
359 	u32 saveTRANS_HBLANK_B;
360 	u32 saveTRANS_HSYNC_B;
361 	u32 saveTRANS_VTOTAL_B;
362 	u32 saveTRANS_VBLANK_B;
363 	u32 saveTRANS_VSYNC_B;
364 	u32 savePIPEBSTAT;
365 	u32 saveDSPBSTRIDE;
366 	u32 saveDSPBSIZE;
367 	u32 saveDSPBPOS;
368 	u32 saveDSPBADDR;
369 	u32 saveDSPBSURF;
370 	u32 saveDSPBTILEOFF;
371 	u32 saveVGA0;
372 	u32 saveVGA1;
373 	u32 saveVGA_PD;
374 	u32 saveVGACNTRL;
375 	u32 saveADPA;
376 	u32 saveLVDS;
377 	u32 savePP_ON_DELAYS;
378 	u32 savePP_OFF_DELAYS;
379 	u32 saveDVOA;
380 	u32 saveDVOB;
381 	u32 saveDVOC;
382 	u32 savePP_ON;
383 	u32 savePP_OFF;
384 	u32 savePP_CONTROL;
385 	u32 savePP_DIVISOR;
386 	u32 savePFIT_CONTROL;
387 	u32 save_palette_a[256];
388 	u32 save_palette_b[256];
389 	u32 saveDPFC_CB_BASE;
390 	u32 saveFBC_CFB_BASE;
391 	u32 saveFBC_LL_BASE;
392 	u32 saveFBC_CONTROL;
393 	u32 saveFBC_CONTROL2;
394 	u32 saveIER;
395 	u32 saveIIR;
396 	u32 saveIMR;
397 	u32 saveDEIER;
398 	u32 saveDEIMR;
399 	u32 saveGTIER;
400 	u32 saveGTIMR;
401 	u32 saveFDI_RXA_IMR;
402 	u32 saveFDI_RXB_IMR;
403 	u32 saveCACHE_MODE_0;
404 	u32 saveMI_ARB_STATE;
405 	u32 saveSWF0[16];
406 	u32 saveSWF1[16];
407 	u32 saveSWF2[3];
408 	u8 saveMSR;
409 	u8 saveSR[8];
410 	u8 saveGR[25];
411 	u8 saveAR_INDEX;
412 	u8 saveAR[21];
413 	u8 saveDACMASK;
414 	u8 saveCR[37];
415 	uint64_t saveFENCE[16];
416 	u32 saveCURACNTR;
417 	u32 saveCURAPOS;
418 	u32 saveCURABASE;
419 	u32 saveCURBCNTR;
420 	u32 saveCURBPOS;
421 	u32 saveCURBBASE;
422 	u32 saveCURSIZE;
423 	u32 saveDP_B;
424 	u32 saveDP_C;
425 	u32 saveDP_D;
426 	u32 savePIPEA_GMCH_DATA_M;
427 	u32 savePIPEB_GMCH_DATA_M;
428 	u32 savePIPEA_GMCH_DATA_N;
429 	u32 savePIPEB_GMCH_DATA_N;
430 	u32 savePIPEA_DP_LINK_M;
431 	u32 savePIPEB_DP_LINK_M;
432 	u32 savePIPEA_DP_LINK_N;
433 	u32 savePIPEB_DP_LINK_N;
434 	u32 saveFDI_RXA_CTL;
435 	u32 saveFDI_TXA_CTL;
436 	u32 saveFDI_RXB_CTL;
437 	u32 saveFDI_TXB_CTL;
438 	u32 savePFA_CTL_1;
439 	u32 savePFB_CTL_1;
440 	u32 savePFA_WIN_SZ;
441 	u32 savePFB_WIN_SZ;
442 	u32 savePFA_WIN_POS;
443 	u32 savePFB_WIN_POS;
444 	u32 savePCH_DREF_CONTROL;
445 	u32 saveDISP_ARB_CTL;
446 	u32 savePIPEA_DATA_M1;
447 	u32 savePIPEA_DATA_N1;
448 	u32 savePIPEA_LINK_M1;
449 	u32 savePIPEA_LINK_N1;
450 	u32 savePIPEB_DATA_M1;
451 	u32 savePIPEB_DATA_N1;
452 	u32 savePIPEB_LINK_M1;
453 	u32 savePIPEB_LINK_N1;
454 
455 	struct {
456 		struct drm_mm gtt_space;
457 
458 		struct io_mapping *gtt_mapping;
459 		int gtt_mtrr;
460 
461 		/**
462 		 * Membership on list of all loaded devices, used to evict
463 		 * inactive buffers under memory pressure.
464 		 *
465 		 * Modifications should only be done whilst holding the
466 		 * shrink_list_lock spinlock.
467 		 */
468 		struct list_head shrink_list;
469 
470 		/**
471 		 * List of objects currently involved in rendering from the
472 		 * ringbuffer.
473 		 *
474 		 * Includes buffers having the contents of their GPU caches
475 		 * flushed, not necessarily primitives.  last_rendering_seqno
476 		 * represents when the rendering involved will be completed.
477 		 *
478 		 * A reference is held on the buffer while on this list.
479 		 */
480 		spinlock_t active_list_lock;
481 		struct list_head active_list;
482 
483 		/**
484 		 * List of objects which are not in the ringbuffer but which
485 		 * still have a write_domain which needs to be flushed before
486 		 * unbinding.
487 		 *
488 		 * last_rendering_seqno is 0 while an object is in this list.
489 		 *
490 		 * A reference is held on the buffer while on this list.
491 		 */
492 		struct list_head flushing_list;
493 
494 		/**
495 		 * LRU list of objects which are not in the ringbuffer and
496 		 * are ready to unbind, but are still in the GTT.
497 		 *
498 		 * last_rendering_seqno is 0 while an object is in this list.
499 		 *
500 		 * A reference is not held on the buffer while on this list,
501 		 * as merely being GTT-bound shouldn't prevent its being
502 		 * freed, and we'll pull it off the list in the free path.
503 		 */
504 		struct list_head inactive_list;
505 
506 		/** LRU list of objects with fence regs on them. */
507 		struct list_head fence_list;
508 
509 		/**
510 		 * List of breadcrumbs associated with GPU requests currently
511 		 * outstanding.
512 		 */
513 		struct list_head request_list;
514 
515 		/**
516 		 * We leave the user IRQ off as much as possible,
517 		 * but this means that requests will finish and never
518 		 * be retired once the system goes idle. Set a timer to
519 		 * fire periodically while the ring is running. When it
520 		 * fires, go retire requests.
521 		 */
522 		struct delayed_work retire_work;
523 
524 		uint32_t next_gem_seqno;
525 
526 		/**
527 		 * Waiting sequence number, if any
528 		 */
529 		uint32_t waiting_gem_seqno;
530 
531 		/**
532 		 * Last seq seen at irq time
533 		 */
534 		uint32_t irq_gem_seqno;
535 
536 		/**
537 		 * Flag if the X Server, and thus DRM, is not currently in
538 		 * control of the device.
539 		 *
540 		 * This is set between LeaveVT and EnterVT.  It needs to be
541 		 * replaced with a semaphore.  It also needs to be
542 		 * transitioned away from for kernel modesetting.
543 		 */
544 		int suspended;
545 
546 		/**
547 		 * Flag if the hardware appears to be wedged.
548 		 *
549 		 * This is set when attempts to idle the device timeout.
550 		 * It prevents command submission from occuring and makes
551 		 * every pending request fail
552 		 */
553 		atomic_t wedged;
554 
555 		/** Bit 6 swizzling required for X tiling */
556 		uint32_t bit_6_swizzle_x;
557 		/** Bit 6 swizzling required for Y tiling */
558 		uint32_t bit_6_swizzle_y;
559 
560 		/* storage for physical objects */
561 		struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
562 	} mm;
563 	struct sdvo_device_mapping sdvo_mappings[2];
564 	/* indicate whether the LVDS_BORDER should be enabled or not */
565 	unsigned int lvds_border_bits;
566 
567 	struct drm_crtc *plane_to_crtc_mapping[2];
568 	struct drm_crtc *pipe_to_crtc_mapping[2];
569 	wait_queue_head_t pending_flip_queue;
570 
571 	/* Reclocking support */
572 	bool render_reclock_avail;
573 	bool lvds_downclock_avail;
574 	/* indicates the reduced downclock for LVDS*/
575 	int lvds_downclock;
576 	struct work_struct idle_work;
577 	struct timer_list idle_timer;
578 	bool busy;
579 	u16 orig_clock;
580 	int child_dev_num;
581 	struct child_device_config *child_dev;
582 	struct drm_connector *int_lvds_connector;
583 } drm_i915_private_t;
584 
585 /** driver private structure attached to each drm_gem_object */
586 struct drm_i915_gem_object {
587 	struct drm_gem_object *obj;
588 
589 	/** Current space allocated to this object in the GTT, if any. */
590 	struct drm_mm_node *gtt_space;
591 
592 	/** This object's place on the active/flushing/inactive lists */
593 	struct list_head list;
594 
595 	/** This object's place on the fenced object LRU */
596 	struct list_head fence_list;
597 
598 	/**
599 	 * This is set if the object is on the active or flushing lists
600 	 * (has pending rendering), and is not set if it's on inactive (ready
601 	 * to be unbound).
602 	 */
603 	int active;
604 
605 	/**
606 	 * This is set if the object has been written to since last bound
607 	 * to the GTT
608 	 */
609 	int dirty;
610 
611 	/** AGP memory structure for our GTT binding. */
612 	DRM_AGP_MEM *agp_mem;
613 
614 	struct page **pages;
615 	int pages_refcount;
616 
617 	/**
618 	 * Current offset of the object in GTT space.
619 	 *
620 	 * This is the same as gtt_space->start
621 	 */
622 	uint32_t gtt_offset;
623 
624 	/**
625 	 * Fake offset for use by mmap(2)
626 	 */
627 	uint64_t mmap_offset;
628 
629 	/**
630 	 * Fence register bits (if any) for this object.  Will be set
631 	 * as needed when mapped into the GTT.
632 	 * Protected by dev->struct_mutex.
633 	 */
634 	int fence_reg;
635 
636 	/** How many users have pinned this object in GTT space */
637 	int pin_count;
638 
639 	/** Breadcrumb of last rendering to the buffer. */
640 	uint32_t last_rendering_seqno;
641 
642 	/** Current tiling mode for the object. */
643 	uint32_t tiling_mode;
644 	uint32_t stride;
645 
646 	/** Record of address bit 17 of each page at last unbind. */
647 	long *bit_17;
648 
649 	/** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */
650 	uint32_t agp_type;
651 
652 	/**
653 	 * If present, while GEM_DOMAIN_CPU is in the read domain this array
654 	 * flags which individual pages are valid.
655 	 */
656 	uint8_t *page_cpu_valid;
657 
658 	/** User space pin count and filp owning the pin */
659 	uint32_t user_pin_count;
660 	struct drm_file *pin_filp;
661 
662 	/** for phy allocated objects */
663 	struct drm_i915_gem_phys_object *phys_obj;
664 
665 	/**
666 	 * Used for checking the object doesn't appear more than once
667 	 * in an execbuffer object list.
668 	 */
669 	int in_execbuffer;
670 
671 	/**
672 	 * Advice: are the backing pages purgeable?
673 	 */
674 	int madv;
675 
676 	/**
677 	 * Number of crtcs where this object is currently the fb, but
678 	 * will be page flipped away on the next vblank.  When it
679 	 * reaches 0, dev_priv->pending_flip_queue will be woken up.
680 	 */
681 	atomic_t pending_flip;
682 };
683 
684 /**
685  * Request queue structure.
686  *
687  * The request queue allows us to note sequence numbers that have been emitted
688  * and may be associated with active buffers to be retired.
689  *
690  * By keeping this list, we can avoid having to do questionable
691  * sequence-number comparisons on buffer last_rendering_seqnos, and associate
692  * an emission time with seqnos for tracking how far ahead of the GPU we are.
693  */
694 struct drm_i915_gem_request {
695 	/** GEM sequence number associated with this request. */
696 	uint32_t seqno;
697 
698 	/** Time at which this request was emitted, in jiffies. */
699 	unsigned long emitted_jiffies;
700 
701 	/** global list entry for this request */
702 	struct list_head list;
703 
704 	/** file_priv list entry for this request */
705 	struct list_head client_list;
706 };
707 
708 struct drm_i915_file_private {
709 	struct {
710 		struct list_head request_list;
711 	} mm;
712 };
713 
714 enum intel_chip_family {
715 	CHIP_I8XX = 0x01,
716 	CHIP_I9XX = 0x02,
717 	CHIP_I915 = 0x04,
718 	CHIP_I965 = 0x08,
719 };
720 
721 extern struct drm_ioctl_desc i915_ioctls[];
722 extern int i915_max_ioctl;
723 extern unsigned int i915_fbpercrtc;
724 extern unsigned int i915_powersave;
725 
726 extern void i915_save_display(struct drm_device *dev);
727 extern void i915_restore_display(struct drm_device *dev);
728 extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
729 extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
730 
731 				/* i915_dma.c */
732 extern void i915_kernel_lost_context(struct drm_device * dev);
733 extern int i915_driver_load(struct drm_device *, unsigned long flags);
734 extern int i915_driver_unload(struct drm_device *);
735 extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv);
736 extern void i915_driver_lastclose(struct drm_device * dev);
737 extern void i915_driver_preclose(struct drm_device *dev,
738 				 struct drm_file *file_priv);
739 extern void i915_driver_postclose(struct drm_device *dev,
740 				  struct drm_file *file_priv);
741 extern int i915_driver_device_is_agp(struct drm_device * dev);
742 extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
743 			      unsigned long arg);
744 extern int i915_emit_box(struct drm_device *dev,
745 			 struct drm_clip_rect *boxes,
746 			 int i, int DR1, int DR4);
747 extern int i965_reset(struct drm_device *dev, u8 flags);
748 
749 /* i915_irq.c */
750 void i915_hangcheck_elapsed(unsigned long data);
751 extern int i915_irq_emit(struct drm_device *dev, void *data,
752 			 struct drm_file *file_priv);
753 extern int i915_irq_wait(struct drm_device *dev, void *data,
754 			 struct drm_file *file_priv);
755 void i915_user_irq_get(struct drm_device *dev);
756 void i915_trace_irq_get(struct drm_device *dev, u32 seqno);
757 void i915_user_irq_put(struct drm_device *dev);
758 extern void i915_enable_interrupt (struct drm_device *dev);
759 
760 extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
761 extern void i915_driver_irq_preinstall(struct drm_device * dev);
762 extern int i915_driver_irq_postinstall(struct drm_device *dev);
763 extern void i915_driver_irq_uninstall(struct drm_device * dev);
764 extern int i915_vblank_pipe_set(struct drm_device *dev, void *data,
765 				struct drm_file *file_priv);
766 extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
767 				struct drm_file *file_priv);
768 extern int i915_enable_vblank(struct drm_device *dev, int crtc);
769 extern void i915_disable_vblank(struct drm_device *dev, int crtc);
770 extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc);
771 extern u32 gm45_get_vblank_counter(struct drm_device *dev, int crtc);
772 extern int i915_vblank_swap(struct drm_device *dev, void *data,
773 			    struct drm_file *file_priv);
774 extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask);
775 
776 void
777 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
778 
779 void
780 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
781 
782 void intel_enable_asle (struct drm_device *dev);
783 
784 
785 /* i915_mem.c */
786 extern int i915_mem_alloc(struct drm_device *dev, void *data,
787 			  struct drm_file *file_priv);
788 extern int i915_mem_free(struct drm_device *dev, void *data,
789 			 struct drm_file *file_priv);
790 extern int i915_mem_init_heap(struct drm_device *dev, void *data,
791 			      struct drm_file *file_priv);
792 extern int i915_mem_destroy_heap(struct drm_device *dev, void *data,
793 				 struct drm_file *file_priv);
794 extern void i915_mem_takedown(struct mem_block **heap);
795 extern void i915_mem_release(struct drm_device * dev,
796 			     struct drm_file *file_priv, struct mem_block *heap);
797 /* i915_gem.c */
798 int i915_gem_init_ioctl(struct drm_device *dev, void *data,
799 			struct drm_file *file_priv);
800 int i915_gem_create_ioctl(struct drm_device *dev, void *data,
801 			  struct drm_file *file_priv);
802 int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
803 			 struct drm_file *file_priv);
804 int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
805 			  struct drm_file *file_priv);
806 int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
807 			struct drm_file *file_priv);
808 int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
809 			struct drm_file *file_priv);
810 int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
811 			      struct drm_file *file_priv);
812 int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
813 			     struct drm_file *file_priv);
814 int i915_gem_execbuffer(struct drm_device *dev, void *data,
815 			struct drm_file *file_priv);
816 int i915_gem_execbuffer2(struct drm_device *dev, void *data,
817 			 struct drm_file *file_priv);
818 int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
819 		       struct drm_file *file_priv);
820 int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
821 			 struct drm_file *file_priv);
822 int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
823 			struct drm_file *file_priv);
824 int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
825 			    struct drm_file *file_priv);
826 int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
827 			   struct drm_file *file_priv);
828 int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
829 			   struct drm_file *file_priv);
830 int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
831 			   struct drm_file *file_priv);
832 int i915_gem_set_tiling(struct drm_device *dev, void *data,
833 			struct drm_file *file_priv);
834 int i915_gem_get_tiling(struct drm_device *dev, void *data,
835 			struct drm_file *file_priv);
836 int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
837 				struct drm_file *file_priv);
838 void i915_gem_load(struct drm_device *dev);
839 int i915_gem_init_object(struct drm_gem_object *obj);
840 void i915_gem_free_object(struct drm_gem_object *obj);
841 int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
842 void i915_gem_object_unpin(struct drm_gem_object *obj);
843 int i915_gem_object_unbind(struct drm_gem_object *obj);
844 void i915_gem_release_mmap(struct drm_gem_object *obj);
845 void i915_gem_lastclose(struct drm_device *dev);
846 uint32_t i915_get_gem_seqno(struct drm_device *dev);
847 bool i915_seqno_passed(uint32_t seq1, uint32_t seq2);
848 int i915_gem_object_get_fence_reg(struct drm_gem_object *obj);
849 int i915_gem_object_put_fence_reg(struct drm_gem_object *obj);
850 void i915_gem_retire_requests(struct drm_device *dev);
851 void i915_gem_retire_work_handler(struct work_struct *work);
852 void i915_gem_clflush_object(struct drm_gem_object *obj);
853 int i915_gem_object_set_domain(struct drm_gem_object *obj,
854 			       uint32_t read_domains,
855 			       uint32_t write_domain);
856 int i915_gem_init_ringbuffer(struct drm_device *dev);
857 void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
858 int i915_gem_do_init(struct drm_device *dev, unsigned long start,
859 		     unsigned long end);
860 int i915_gem_idle(struct drm_device *dev);
861 uint32_t i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
862 			  uint32_t flush_domains);
863 int i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible);
864 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
865 int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
866 				      int write);
867 int i915_gem_attach_phys_object(struct drm_device *dev,
868 				struct drm_gem_object *obj, int id);
869 void i915_gem_detach_phys_object(struct drm_device *dev,
870 				 struct drm_gem_object *obj);
871 void i915_gem_free_all_phys_object(struct drm_device *dev);
872 int i915_gem_object_get_pages(struct drm_gem_object *obj);
873 void i915_gem_object_put_pages(struct drm_gem_object *obj);
874 void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
875 void i915_gem_object_flush_write_domain(struct drm_gem_object *obj);
876 
877 void i915_gem_shrinker_init(void);
878 void i915_gem_shrinker_exit(void);
879 
880 /* i915_gem_tiling.c */
881 void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
882 void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj);
883 void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj);
884 bool i915_tiling_ok(struct drm_device *dev, int stride, int size,
885 		    int tiling_mode);
886 bool i915_obj_fenceable(struct drm_device *dev, struct drm_gem_object *obj);
887 
888 /* i915_gem_debug.c */
889 void i915_gem_dump_object(struct drm_gem_object *obj, int len,
890 			  const char *where, uint32_t mark);
891 #if WATCH_INACTIVE
892 void i915_verify_inactive(struct drm_device *dev, char *file, int line);
893 #else
894 #define i915_verify_inactive(dev, file, line)
895 #endif
896 void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle);
897 void i915_gem_dump_object(struct drm_gem_object *obj, int len,
898 			  const char *where, uint32_t mark);
899 void i915_dump_lru(struct drm_device *dev, const char *where);
900 
901 /* i915_debugfs.c */
902 int i915_debugfs_init(struct drm_minor *minor);
903 void i915_debugfs_cleanup(struct drm_minor *minor);
904 
905 /* i915_suspend.c */
906 extern int i915_save_state(struct drm_device *dev);
907 extern int i915_restore_state(struct drm_device *dev);
908 
909 /* i915_suspend.c */
910 extern int i915_save_state(struct drm_device *dev);
911 extern int i915_restore_state(struct drm_device *dev);
912 
913 #ifdef CONFIG_ACPI
914 /* i915_opregion.c */
915 extern int intel_opregion_init(struct drm_device *dev, int resume);
916 extern void intel_opregion_free(struct drm_device *dev, int suspend);
917 extern void opregion_asle_intr(struct drm_device *dev);
918 extern void ironlake_opregion_gse_intr(struct drm_device *dev);
919 extern void opregion_enable_asle(struct drm_device *dev);
920 #else
921 static inline int intel_opregion_init(struct drm_device *dev, int resume) { return 0; }
922 static inline void intel_opregion_free(struct drm_device *dev, int suspend) { return; }
923 static inline void opregion_asle_intr(struct drm_device *dev) { return; }
924 static inline void ironlake_opregion_gse_intr(struct drm_device *dev) { return; }
925 static inline void opregion_enable_asle(struct drm_device *dev) { return; }
926 #endif
927 
928 /* modesetting */
929 extern void intel_modeset_init(struct drm_device *dev);
930 extern void intel_modeset_cleanup(struct drm_device *dev);
931 extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
932 extern void i8xx_disable_fbc(struct drm_device *dev);
933 extern void g4x_disable_fbc(struct drm_device *dev);
934 
935 /**
936  * Lock test for when it's just for synchronization of ring access.
937  *
938  * In that case, we don't need to do it when GEM is initialized as nobody else
939  * has access to the ring.
940  */
941 #define RING_LOCK_TEST_WITH_RETURN(dev, file_priv) do {			\
942 	if (((drm_i915_private_t *)dev->dev_private)->ring.ring_obj == NULL) \
943 		LOCK_TEST_WITH_RETURN(dev, file_priv);			\
944 } while (0)
945 
946 #define I915_READ(reg)          readl(dev_priv->regs + (reg))
947 #define I915_WRITE(reg, val)     writel(val, dev_priv->regs + (reg))
948 #define I915_READ16(reg)	readw(dev_priv->regs + (reg))
949 #define I915_WRITE16(reg, val)	writel(val, dev_priv->regs + (reg))
950 #define I915_READ8(reg)		readb(dev_priv->regs + (reg))
951 #define I915_WRITE8(reg, val)	writeb(val, dev_priv->regs + (reg))
952 #define I915_WRITE64(reg, val)	writeq(val, dev_priv->regs + (reg))
953 #define I915_READ64(reg)	readq(dev_priv->regs + (reg))
954 #define POSTING_READ(reg)	(void)I915_READ(reg)
955 
956 #define I915_VERBOSE 0
957 
958 #define RING_LOCALS	volatile unsigned int *ring_virt__;
959 
960 #define BEGIN_LP_RING(n) do {						\
961 	int bytes__ = 4*(n);						\
962 	if (I915_VERBOSE) DRM_DEBUG("BEGIN_LP_RING(%d)\n", (n));	\
963 	/* a wrap must occur between instructions so pad beforehand */	\
964 	if (unlikely (dev_priv->ring.tail + bytes__ > dev_priv->ring.Size)) \
965 		i915_wrap_ring(dev);					\
966 	if (unlikely (dev_priv->ring.space < bytes__))			\
967 		i915_wait_ring(dev, bytes__, __func__);			\
968 	ring_virt__ = (unsigned int *)					\
969 	        (dev_priv->ring.virtual_start + dev_priv->ring.tail);	\
970 	dev_priv->ring.tail += bytes__;					\
971 	dev_priv->ring.tail &= dev_priv->ring.Size - 1;			\
972 	dev_priv->ring.space -= bytes__;				\
973 } while (0)
974 
975 #define OUT_RING(n) do {						\
976 	if (I915_VERBOSE) DRM_DEBUG("   OUT_RING %x\n", (int)(n));	\
977 	*ring_virt__++ = (n);						\
978 } while (0)
979 
980 #define ADVANCE_LP_RING() do {						\
981 	if (I915_VERBOSE)						\
982 		DRM_DEBUG("ADVANCE_LP_RING %x\n", dev_priv->ring.tail);	\
983 	I915_WRITE(PRB0_TAIL, dev_priv->ring.tail);			\
984 } while(0)
985 
986 /**
987  * Reads a dword out of the status page, which is written to from the command
988  * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
989  * MI_STORE_DATA_IMM.
990  *
991  * The following dwords have a reserved meaning:
992  * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
993  * 0x04: ring 0 head pointer
994  * 0x05: ring 1 head pointer (915-class)
995  * 0x06: ring 2 head pointer (915-class)
996  * 0x10-0x1b: Context status DWords (GM45)
997  * 0x1f: Last written status offset. (GM45)
998  *
999  * The area from dword 0x20 to 0x3ff is available for driver usage.
1000  */
1001 #define READ_HWSP(dev_priv, reg)  (((volatile u32*)(dev_priv->hw_status_page))[reg])
1002 #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
1003 #define I915_GEM_HWS_INDEX		0x20
1004 #define I915_BREADCRUMB_INDEX		0x21
1005 
1006 extern int i915_wrap_ring(struct drm_device * dev);
1007 extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
1008 
1009 #define INTEL_INFO(dev)	(((struct drm_i915_private *) (dev)->dev_private)->info)
1010 
1011 #define IS_I830(dev)		((dev)->pci_device == 0x3577)
1012 #define IS_845G(dev)		((dev)->pci_device == 0x2562)
1013 #define IS_I85X(dev)		((dev)->pci_device == 0x3582)
1014 #define IS_I865G(dev)		((dev)->pci_device == 0x2572)
1015 #define IS_I8XX(dev)		(INTEL_INFO(dev)->is_i8xx)
1016 #define IS_I915G(dev)		(INTEL_INFO(dev)->is_i915g)
1017 #define IS_I915GM(dev)		((dev)->pci_device == 0x2592)
1018 #define IS_I945G(dev)		((dev)->pci_device == 0x2772)
1019 #define IS_I945GM(dev)		(INTEL_INFO(dev)->is_i945gm)
1020 #define IS_I965G(dev)		(INTEL_INFO(dev)->is_i965g)
1021 #define IS_I965GM(dev)		(INTEL_INFO(dev)->is_i965gm)
1022 #define IS_GM45(dev)		((dev)->pci_device == 0x2A42)
1023 #define IS_G4X(dev)		(INTEL_INFO(dev)->is_g4x)
1024 #define IS_PINEVIEW_G(dev)	((dev)->pci_device == 0xa001)
1025 #define IS_PINEVIEW_M(dev)	((dev)->pci_device == 0xa011)
1026 #define IS_PINEVIEW(dev)	(INTEL_INFO(dev)->is_pineview)
1027 #define IS_G33(dev)		(INTEL_INFO(dev)->is_g33)
1028 #define IS_IRONLAKE_D(dev)	((dev)->pci_device == 0x0042)
1029 #define IS_IRONLAKE_M(dev)	((dev)->pci_device == 0x0046)
1030 #define IS_IRONLAKE(dev)	(INTEL_INFO(dev)->is_ironlake)
1031 #define IS_I9XX(dev)		(INTEL_INFO(dev)->is_i9xx)
1032 #define IS_MOBILE(dev)		(INTEL_INFO(dev)->is_mobile)
1033 
1034 #define I915_NEED_GFX_HWS(dev)	(INTEL_INFO(dev)->need_gfx_hws)
1035 
1036 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
1037  * rows, which changed the alignment requirements and fence programming.
1038  */
1039 #define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \
1040 						      IS_I915GM(dev)))
1041 #define SUPPORTS_DIGITAL_OUTPUTS(dev)	(IS_I9XX(dev) && !IS_PINEVIEW(dev))
1042 #define SUPPORTS_INTEGRATED_HDMI(dev)	(IS_G4X(dev) || IS_IRONLAKE(dev))
1043 #define SUPPORTS_INTEGRATED_DP(dev)	(IS_G4X(dev) || IS_IRONLAKE(dev))
1044 #define SUPPORTS_EDP(dev)		(IS_IRONLAKE_M(dev))
1045 #define SUPPORTS_TV(dev)		(IS_I9XX(dev) && IS_MOBILE(dev) && \
1046 					!IS_IRONLAKE(dev) && !IS_PINEVIEW(dev))
1047 #define I915_HAS_HOTPLUG(dev)		 (INTEL_INFO(dev)->has_hotplug)
1048 /* dsparb controlled by hw only */
1049 #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
1050 
1051 #define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IRONLAKE(dev))
1052 #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
1053 #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
1054 #define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6)
1055 
1056 #define PRIMARY_RINGBUFFER_SIZE         (128*1024)
1057 
1058 #endif
1059