xref: /openbmc/linux/drivers/gpu/drm/i915/i915_drv.h (revision b8bb76713ec50df2f11efee386e16f93d51e1076)
1 /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
2  */
3 /*
4  *
5  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the
10  * "Software"), to deal in the Software without restriction, including
11  * without limitation the rights to use, copy, modify, merge, publish,
12  * distribute, sub license, and/or sell copies of the Software, and to
13  * permit persons to whom the Software is furnished to do so, subject to
14  * the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the
17  * next paragraph) shall be included in all copies or substantial portions
18  * of the Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27  *
28  */
29 
30 #ifndef _I915_DRV_H_
31 #define _I915_DRV_H_
32 
33 #include "i915_reg.h"
34 #include "intel_bios.h"
35 #include <linux/io-mapping.h>
36 
37 /* General customization:
38  */
39 
40 #define DRIVER_AUTHOR		"Tungsten Graphics, Inc."
41 
42 #define DRIVER_NAME		"i915"
43 #define DRIVER_DESC		"Intel Graphics"
44 #define DRIVER_DATE		"20080730"
45 
46 enum pipe {
47 	PIPE_A = 0,
48 	PIPE_B,
49 };
50 
51 #define I915_NUM_PIPE	2
52 
53 /* Interface history:
54  *
55  * 1.1: Original.
56  * 1.2: Add Power Management
57  * 1.3: Add vblank support
58  * 1.4: Fix cmdbuffer path, add heap destroy
59  * 1.5: Add vblank pipe configuration
60  * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
61  *      - Support vertical blank on secondary display pipe
62  */
63 #define DRIVER_MAJOR		1
64 #define DRIVER_MINOR		6
65 #define DRIVER_PATCHLEVEL	0
66 
67 #define WATCH_COHERENCY	0
68 #define WATCH_BUF	0
69 #define WATCH_EXEC	0
70 #define WATCH_LRU	0
71 #define WATCH_RELOC	0
72 #define WATCH_INACTIVE	0
73 #define WATCH_PWRITE	0
74 
75 #define I915_GEM_PHYS_CURSOR_0 1
76 #define I915_GEM_PHYS_CURSOR_1 2
77 #define I915_GEM_PHYS_OVERLAY_REGS 3
78 #define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS)
79 
80 struct drm_i915_gem_phys_object {
81 	int id;
82 	struct page **page_list;
83 	drm_dma_handle_t *handle;
84 	struct drm_gem_object *cur_obj;
85 };
86 
87 typedef struct _drm_i915_ring_buffer {
88 	int tail_mask;
89 	unsigned long Size;
90 	u8 *virtual_start;
91 	int head;
92 	int tail;
93 	int space;
94 	drm_local_map_t map;
95 	struct drm_gem_object *ring_obj;
96 } drm_i915_ring_buffer_t;
97 
98 struct mem_block {
99 	struct mem_block *next;
100 	struct mem_block *prev;
101 	int start;
102 	int size;
103 	struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
104 };
105 
106 struct opregion_header;
107 struct opregion_acpi;
108 struct opregion_swsci;
109 struct opregion_asle;
110 
111 struct intel_opregion {
112 	struct opregion_header *header;
113 	struct opregion_acpi *acpi;
114 	struct opregion_swsci *swsci;
115 	struct opregion_asle *asle;
116 	int enabled;
117 };
118 
119 struct drm_i915_master_private {
120 	drm_local_map_t *sarea;
121 	struct _drm_i915_sarea *sarea_priv;
122 };
123 #define I915_FENCE_REG_NONE -1
124 
125 struct drm_i915_fence_reg {
126 	struct drm_gem_object *obj;
127 };
128 
129 typedef struct drm_i915_private {
130 	struct drm_device *dev;
131 
132 	int has_gem;
133 
134 	void __iomem *regs;
135 
136 	drm_i915_ring_buffer_t ring;
137 
138 	drm_dma_handle_t *status_page_dmah;
139 	void *hw_status_page;
140 	dma_addr_t dma_status_page;
141 	uint32_t counter;
142 	unsigned int status_gfx_addr;
143 	drm_local_map_t hws_map;
144 	struct drm_gem_object *hws_obj;
145 
146 	unsigned int cpp;
147 	int back_offset;
148 	int front_offset;
149 	int current_page;
150 	int page_flipping;
151 
152 	wait_queue_head_t irq_queue;
153 	atomic_t irq_received;
154 	/** Protects user_irq_refcount and irq_mask_reg */
155 	spinlock_t user_irq_lock;
156 	/** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
157 	int user_irq_refcount;
158 	/** Cached value of IMR to avoid reads in updating the bitfield */
159 	u32 irq_mask_reg;
160 	u32 pipestat[2];
161 
162 	int tex_lru_log_granularity;
163 	int allow_batchbuffer;
164 	struct mem_block *agp_heap;
165 	unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
166 	int vblank_pipe;
167 
168 	bool cursor_needs_physical;
169 
170 	struct drm_mm vram;
171 
172 	int irq_enabled;
173 
174 	struct intel_opregion opregion;
175 
176 	/* LVDS info */
177 	int backlight_duty_cycle;  /* restore backlight to this value */
178 	bool panel_wants_dither;
179 	struct drm_display_mode *panel_fixed_mode;
180 	struct drm_display_mode *vbt_mode; /* if any */
181 
182 	/* Feature bits from the VBIOS */
183 	unsigned int int_tv_support:1;
184 	unsigned int lvds_dither:1;
185 	unsigned int lvds_vbt:1;
186 	unsigned int int_crt_support:1;
187 	unsigned int lvds_use_ssc:1;
188 	int lvds_ssc_freq;
189 
190 	struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */
191 	int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
192 	int num_fence_regs; /* 8 on pre-965, 16 otherwise */
193 
194 	/* Register state */
195 	u8 saveLBB;
196 	u32 saveDSPACNTR;
197 	u32 saveDSPBCNTR;
198 	u32 saveDSPARB;
199 	u32 saveRENDERSTANDBY;
200 	u32 saveHWS;
201 	u32 savePIPEACONF;
202 	u32 savePIPEBCONF;
203 	u32 savePIPEASRC;
204 	u32 savePIPEBSRC;
205 	u32 saveFPA0;
206 	u32 saveFPA1;
207 	u32 saveDPLL_A;
208 	u32 saveDPLL_A_MD;
209 	u32 saveHTOTAL_A;
210 	u32 saveHBLANK_A;
211 	u32 saveHSYNC_A;
212 	u32 saveVTOTAL_A;
213 	u32 saveVBLANK_A;
214 	u32 saveVSYNC_A;
215 	u32 saveBCLRPAT_A;
216 	u32 savePIPEASTAT;
217 	u32 saveDSPASTRIDE;
218 	u32 saveDSPASIZE;
219 	u32 saveDSPAPOS;
220 	u32 saveDSPAADDR;
221 	u32 saveDSPASURF;
222 	u32 saveDSPATILEOFF;
223 	u32 savePFIT_PGM_RATIOS;
224 	u32 saveBLC_PWM_CTL;
225 	u32 saveBLC_PWM_CTL2;
226 	u32 saveFPB0;
227 	u32 saveFPB1;
228 	u32 saveDPLL_B;
229 	u32 saveDPLL_B_MD;
230 	u32 saveHTOTAL_B;
231 	u32 saveHBLANK_B;
232 	u32 saveHSYNC_B;
233 	u32 saveVTOTAL_B;
234 	u32 saveVBLANK_B;
235 	u32 saveVSYNC_B;
236 	u32 saveBCLRPAT_B;
237 	u32 savePIPEBSTAT;
238 	u32 saveDSPBSTRIDE;
239 	u32 saveDSPBSIZE;
240 	u32 saveDSPBPOS;
241 	u32 saveDSPBADDR;
242 	u32 saveDSPBSURF;
243 	u32 saveDSPBTILEOFF;
244 	u32 saveVGA0;
245 	u32 saveVGA1;
246 	u32 saveVGA_PD;
247 	u32 saveVGACNTRL;
248 	u32 saveADPA;
249 	u32 saveLVDS;
250 	u32 savePP_ON_DELAYS;
251 	u32 savePP_OFF_DELAYS;
252 	u32 saveDVOA;
253 	u32 saveDVOB;
254 	u32 saveDVOC;
255 	u32 savePP_ON;
256 	u32 savePP_OFF;
257 	u32 savePP_CONTROL;
258 	u32 savePP_DIVISOR;
259 	u32 savePFIT_CONTROL;
260 	u32 save_palette_a[256];
261 	u32 save_palette_b[256];
262 	u32 saveFBC_CFB_BASE;
263 	u32 saveFBC_LL_BASE;
264 	u32 saveFBC_CONTROL;
265 	u32 saveFBC_CONTROL2;
266 	u32 saveIER;
267 	u32 saveIIR;
268 	u32 saveIMR;
269 	u32 saveCACHE_MODE_0;
270 	u32 saveD_STATE;
271 	u32 saveCG_2D_DIS;
272 	u32 saveMI_ARB_STATE;
273 	u32 saveSWF0[16];
274 	u32 saveSWF1[16];
275 	u32 saveSWF2[3];
276 	u8 saveMSR;
277 	u8 saveSR[8];
278 	u8 saveGR[25];
279 	u8 saveAR_INDEX;
280 	u8 saveAR[21];
281 	u8 saveDACMASK;
282 	u8 saveCR[37];
283 
284 	struct {
285 		struct drm_mm gtt_space;
286 
287 		struct io_mapping *gtt_mapping;
288 		int gtt_mtrr;
289 
290 		/**
291 		 * List of objects currently involved in rendering from the
292 		 * ringbuffer.
293 		 *
294 		 * Includes buffers having the contents of their GPU caches
295 		 * flushed, not necessarily primitives.  last_rendering_seqno
296 		 * represents when the rendering involved will be completed.
297 		 *
298 		 * A reference is held on the buffer while on this list.
299 		 */
300 		struct list_head active_list;
301 
302 		/**
303 		 * List of objects which are not in the ringbuffer but which
304 		 * still have a write_domain which needs to be flushed before
305 		 * unbinding.
306 		 *
307 		 * last_rendering_seqno is 0 while an object is in this list.
308 		 *
309 		 * A reference is held on the buffer while on this list.
310 		 */
311 		struct list_head flushing_list;
312 
313 		/**
314 		 * LRU list of objects which are not in the ringbuffer and
315 		 * are ready to unbind, but are still in the GTT.
316 		 *
317 		 * last_rendering_seqno is 0 while an object is in this list.
318 		 *
319 		 * A reference is not held on the buffer while on this list,
320 		 * as merely being GTT-bound shouldn't prevent its being
321 		 * freed, and we'll pull it off the list in the free path.
322 		 */
323 		struct list_head inactive_list;
324 
325 		/**
326 		 * List of breadcrumbs associated with GPU requests currently
327 		 * outstanding.
328 		 */
329 		struct list_head request_list;
330 
331 		/**
332 		 * We leave the user IRQ off as much as possible,
333 		 * but this means that requests will finish and never
334 		 * be retired once the system goes idle. Set a timer to
335 		 * fire periodically while the ring is running. When it
336 		 * fires, go retire requests.
337 		 */
338 		struct delayed_work retire_work;
339 
340 		uint32_t next_gem_seqno;
341 
342 		/**
343 		 * Waiting sequence number, if any
344 		 */
345 		uint32_t waiting_gem_seqno;
346 
347 		/**
348 		 * Last seq seen at irq time
349 		 */
350 		uint32_t irq_gem_seqno;
351 
352 		/**
353 		 * Flag if the X Server, and thus DRM, is not currently in
354 		 * control of the device.
355 		 *
356 		 * This is set between LeaveVT and EnterVT.  It needs to be
357 		 * replaced with a semaphore.  It also needs to be
358 		 * transitioned away from for kernel modesetting.
359 		 */
360 		int suspended;
361 
362 		/**
363 		 * Flag if the hardware appears to be wedged.
364 		 *
365 		 * This is set when attempts to idle the device timeout.
366 		 * It prevents command submission from occuring and makes
367 		 * every pending request fail
368 		 */
369 		int wedged;
370 
371 		/** Bit 6 swizzling required for X tiling */
372 		uint32_t bit_6_swizzle_x;
373 		/** Bit 6 swizzling required for Y tiling */
374 		uint32_t bit_6_swizzle_y;
375 
376 		/* storage for physical objects */
377 		struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
378 	} mm;
379 } drm_i915_private_t;
380 
381 /** driver private structure attached to each drm_gem_object */
382 struct drm_i915_gem_object {
383 	struct drm_gem_object *obj;
384 
385 	/** Current space allocated to this object in the GTT, if any. */
386 	struct drm_mm_node *gtt_space;
387 
388 	/** This object's place on the active/flushing/inactive lists */
389 	struct list_head list;
390 
391 	/**
392 	 * This is set if the object is on the active or flushing lists
393 	 * (has pending rendering), and is not set if it's on inactive (ready
394 	 * to be unbound).
395 	 */
396 	int active;
397 
398 	/**
399 	 * This is set if the object has been written to since last bound
400 	 * to the GTT
401 	 */
402 	int dirty;
403 
404 	/** AGP memory structure for our GTT binding. */
405 	DRM_AGP_MEM *agp_mem;
406 
407 	struct page **pages;
408 	int pages_refcount;
409 
410 	/**
411 	 * Current offset of the object in GTT space.
412 	 *
413 	 * This is the same as gtt_space->start
414 	 */
415 	uint32_t gtt_offset;
416 	/**
417 	 * Required alignment for the object
418 	 */
419 	uint32_t gtt_alignment;
420 	/**
421 	 * Fake offset for use by mmap(2)
422 	 */
423 	uint64_t mmap_offset;
424 
425 	/**
426 	 * Fence register bits (if any) for this object.  Will be set
427 	 * as needed when mapped into the GTT.
428 	 * Protected by dev->struct_mutex.
429 	 */
430 	int fence_reg;
431 
432 	/** Boolean whether this object has a valid gtt offset. */
433 	int gtt_bound;
434 
435 	/** How many users have pinned this object in GTT space */
436 	int pin_count;
437 
438 	/** Breadcrumb of last rendering to the buffer. */
439 	uint32_t last_rendering_seqno;
440 
441 	/** Current tiling mode for the object. */
442 	uint32_t tiling_mode;
443 	uint32_t stride;
444 
445 	/** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */
446 	uint32_t agp_type;
447 
448 	/**
449 	 * If present, while GEM_DOMAIN_CPU is in the read domain this array
450 	 * flags which individual pages are valid.
451 	 */
452 	uint8_t *page_cpu_valid;
453 
454 	/** User space pin count and filp owning the pin */
455 	uint32_t user_pin_count;
456 	struct drm_file *pin_filp;
457 
458 	/** for phy allocated objects */
459 	struct drm_i915_gem_phys_object *phys_obj;
460 
461 	/**
462 	 * Used for checking the object doesn't appear more than once
463 	 * in an execbuffer object list.
464 	 */
465 	int in_execbuffer;
466 };
467 
468 /**
469  * Request queue structure.
470  *
471  * The request queue allows us to note sequence numbers that have been emitted
472  * and may be associated with active buffers to be retired.
473  *
474  * By keeping this list, we can avoid having to do questionable
475  * sequence-number comparisons on buffer last_rendering_seqnos, and associate
476  * an emission time with seqnos for tracking how far ahead of the GPU we are.
477  */
478 struct drm_i915_gem_request {
479 	/** GEM sequence number associated with this request. */
480 	uint32_t seqno;
481 
482 	/** Time at which this request was emitted, in jiffies. */
483 	unsigned long emitted_jiffies;
484 
485 	struct list_head list;
486 };
487 
488 struct drm_i915_file_private {
489 	struct {
490 		uint32_t last_gem_seqno;
491 		uint32_t last_gem_throttle_seqno;
492 	} mm;
493 };
494 
495 enum intel_chip_family {
496 	CHIP_I8XX = 0x01,
497 	CHIP_I9XX = 0x02,
498 	CHIP_I915 = 0x04,
499 	CHIP_I965 = 0x08,
500 };
501 
502 extern struct drm_ioctl_desc i915_ioctls[];
503 extern int i915_max_ioctl;
504 extern unsigned int i915_fbpercrtc;
505 
506 extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
507 extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
508 
509 				/* i915_dma.c */
510 extern void i915_kernel_lost_context(struct drm_device * dev);
511 extern int i915_driver_load(struct drm_device *, unsigned long flags);
512 extern int i915_driver_unload(struct drm_device *);
513 extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv);
514 extern void i915_driver_lastclose(struct drm_device * dev);
515 extern void i915_driver_preclose(struct drm_device *dev,
516 				 struct drm_file *file_priv);
517 extern void i915_driver_postclose(struct drm_device *dev,
518 				  struct drm_file *file_priv);
519 extern int i915_driver_device_is_agp(struct drm_device * dev);
520 extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
521 			      unsigned long arg);
522 extern int i915_emit_box(struct drm_device *dev,
523 			 struct drm_clip_rect *boxes,
524 			 int i, int DR1, int DR4);
525 
526 /* i915_irq.c */
527 extern int i915_irq_emit(struct drm_device *dev, void *data,
528 			 struct drm_file *file_priv);
529 extern int i915_irq_wait(struct drm_device *dev, void *data,
530 			 struct drm_file *file_priv);
531 void i915_user_irq_get(struct drm_device *dev);
532 void i915_user_irq_put(struct drm_device *dev);
533 extern void i915_enable_interrupt (struct drm_device *dev);
534 
535 extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
536 extern void i915_driver_irq_preinstall(struct drm_device * dev);
537 extern int i915_driver_irq_postinstall(struct drm_device *dev);
538 extern void i915_driver_irq_uninstall(struct drm_device * dev);
539 extern int i915_vblank_pipe_set(struct drm_device *dev, void *data,
540 				struct drm_file *file_priv);
541 extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
542 				struct drm_file *file_priv);
543 extern int i915_enable_vblank(struct drm_device *dev, int crtc);
544 extern void i915_disable_vblank(struct drm_device *dev, int crtc);
545 extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc);
546 extern u32 gm45_get_vblank_counter(struct drm_device *dev, int crtc);
547 extern int i915_vblank_swap(struct drm_device *dev, void *data,
548 			    struct drm_file *file_priv);
549 extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask);
550 
551 void
552 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
553 
554 void
555 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
556 
557 
558 /* i915_mem.c */
559 extern int i915_mem_alloc(struct drm_device *dev, void *data,
560 			  struct drm_file *file_priv);
561 extern int i915_mem_free(struct drm_device *dev, void *data,
562 			 struct drm_file *file_priv);
563 extern int i915_mem_init_heap(struct drm_device *dev, void *data,
564 			      struct drm_file *file_priv);
565 extern int i915_mem_destroy_heap(struct drm_device *dev, void *data,
566 				 struct drm_file *file_priv);
567 extern void i915_mem_takedown(struct mem_block **heap);
568 extern void i915_mem_release(struct drm_device * dev,
569 			     struct drm_file *file_priv, struct mem_block *heap);
570 /* i915_gem.c */
571 int i915_gem_init_ioctl(struct drm_device *dev, void *data,
572 			struct drm_file *file_priv);
573 int i915_gem_create_ioctl(struct drm_device *dev, void *data,
574 			  struct drm_file *file_priv);
575 int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
576 			 struct drm_file *file_priv);
577 int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
578 			  struct drm_file *file_priv);
579 int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
580 			struct drm_file *file_priv);
581 int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
582 			struct drm_file *file_priv);
583 int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
584 			      struct drm_file *file_priv);
585 int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
586 			     struct drm_file *file_priv);
587 int i915_gem_execbuffer(struct drm_device *dev, void *data,
588 			struct drm_file *file_priv);
589 int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
590 		       struct drm_file *file_priv);
591 int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
592 			 struct drm_file *file_priv);
593 int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
594 			struct drm_file *file_priv);
595 int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
596 			    struct drm_file *file_priv);
597 int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
598 			   struct drm_file *file_priv);
599 int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
600 			   struct drm_file *file_priv);
601 int i915_gem_set_tiling(struct drm_device *dev, void *data,
602 			struct drm_file *file_priv);
603 int i915_gem_get_tiling(struct drm_device *dev, void *data,
604 			struct drm_file *file_priv);
605 int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
606 				struct drm_file *file_priv);
607 void i915_gem_load(struct drm_device *dev);
608 int i915_gem_init_object(struct drm_gem_object *obj);
609 void i915_gem_free_object(struct drm_gem_object *obj);
610 int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
611 void i915_gem_object_unpin(struct drm_gem_object *obj);
612 int i915_gem_object_unbind(struct drm_gem_object *obj);
613 void i915_gem_lastclose(struct drm_device *dev);
614 uint32_t i915_get_gem_seqno(struct drm_device *dev);
615 void i915_gem_retire_requests(struct drm_device *dev);
616 void i915_gem_retire_work_handler(struct work_struct *work);
617 void i915_gem_clflush_object(struct drm_gem_object *obj);
618 int i915_gem_object_set_domain(struct drm_gem_object *obj,
619 			       uint32_t read_domains,
620 			       uint32_t write_domain);
621 int i915_gem_init_ringbuffer(struct drm_device *dev);
622 void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
623 int i915_gem_do_init(struct drm_device *dev, unsigned long start,
624 		     unsigned long end);
625 int i915_gem_idle(struct drm_device *dev);
626 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
627 int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
628 				      int write);
629 int i915_gem_attach_phys_object(struct drm_device *dev,
630 				struct drm_gem_object *obj, int id);
631 void i915_gem_detach_phys_object(struct drm_device *dev,
632 				 struct drm_gem_object *obj);
633 void i915_gem_free_all_phys_object(struct drm_device *dev);
634 
635 /* i915_gem_tiling.c */
636 void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
637 
638 /* i915_gem_debug.c */
639 void i915_gem_dump_object(struct drm_gem_object *obj, int len,
640 			  const char *where, uint32_t mark);
641 #if WATCH_INACTIVE
642 void i915_verify_inactive(struct drm_device *dev, char *file, int line);
643 #else
644 #define i915_verify_inactive(dev, file, line)
645 #endif
646 void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle);
647 void i915_gem_dump_object(struct drm_gem_object *obj, int len,
648 			  const char *where, uint32_t mark);
649 void i915_dump_lru(struct drm_device *dev, const char *where);
650 
651 /* i915_debugfs.c */
652 int i915_gem_debugfs_init(struct drm_minor *minor);
653 void i915_gem_debugfs_cleanup(struct drm_minor *minor);
654 
655 /* i915_suspend.c */
656 extern int i915_save_state(struct drm_device *dev);
657 extern int i915_restore_state(struct drm_device *dev);
658 
659 /* i915_suspend.c */
660 extern int i915_save_state(struct drm_device *dev);
661 extern int i915_restore_state(struct drm_device *dev);
662 
663 #ifdef CONFIG_ACPI
664 /* i915_opregion.c */
665 extern int intel_opregion_init(struct drm_device *dev);
666 extern void intel_opregion_free(struct drm_device *dev);
667 extern void opregion_asle_intr(struct drm_device *dev);
668 extern void opregion_enable_asle(struct drm_device *dev);
669 #else
670 static inline int intel_opregion_init(struct drm_device *dev) { return 0; }
671 static inline void intel_opregion_free(struct drm_device *dev) { return; }
672 static inline void opregion_asle_intr(struct drm_device *dev) { return; }
673 static inline void opregion_enable_asle(struct drm_device *dev) { return; }
674 #endif
675 
676 /* modesetting */
677 extern void intel_modeset_init(struct drm_device *dev);
678 extern void intel_modeset_cleanup(struct drm_device *dev);
679 
680 /**
681  * Lock test for when it's just for synchronization of ring access.
682  *
683  * In that case, we don't need to do it when GEM is initialized as nobody else
684  * has access to the ring.
685  */
686 #define RING_LOCK_TEST_WITH_RETURN(dev, file_priv) do {			\
687 	if (((drm_i915_private_t *)dev->dev_private)->ring.ring_obj == NULL) \
688 		LOCK_TEST_WITH_RETURN(dev, file_priv);			\
689 } while (0)
690 
691 #define I915_READ(reg)          readl(dev_priv->regs + (reg))
692 #define I915_WRITE(reg, val)     writel(val, dev_priv->regs + (reg))
693 #define I915_READ16(reg)	readw(dev_priv->regs + (reg))
694 #define I915_WRITE16(reg, val)	writel(val, dev_priv->regs + (reg))
695 #define I915_READ8(reg)		readb(dev_priv->regs + (reg))
696 #define I915_WRITE8(reg, val)	writeb(val, dev_priv->regs + (reg))
697 #ifdef writeq
698 #define I915_WRITE64(reg, val)	writeq(val, dev_priv->regs + (reg))
699 #else
700 #define I915_WRITE64(reg, val)	(writel(val, dev_priv->regs + (reg)), \
701 				 writel(upper_32_bits(val), dev_priv->regs + \
702 					(reg) + 4))
703 #endif
704 #define POSTING_READ(reg)	(void)I915_READ(reg)
705 
706 #define I915_VERBOSE 0
707 
708 #define RING_LOCALS	unsigned int outring, ringmask, outcount; \
709                         volatile char *virt;
710 
711 #define BEGIN_LP_RING(n) do {				\
712 	if (I915_VERBOSE)				\
713 		DRM_DEBUG("BEGIN_LP_RING(%d)\n", (n));	\
714 	if (dev_priv->ring.space < (n)*4)		\
715 		i915_wait_ring(dev, (n)*4, __func__);		\
716 	outcount = 0;					\
717 	outring = dev_priv->ring.tail;			\
718 	ringmask = dev_priv->ring.tail_mask;		\
719 	virt = dev_priv->ring.virtual_start;		\
720 } while (0)
721 
722 #define OUT_RING(n) do {					\
723 	if (I915_VERBOSE) DRM_DEBUG("   OUT_RING %x\n", (int)(n));	\
724 	*(volatile unsigned int *)(virt + outring) = (n);	\
725         outcount++;						\
726 	outring += 4;						\
727 	outring &= ringmask;					\
728 } while (0)
729 
730 #define ADVANCE_LP_RING() do {						\
731 	if (I915_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING %x\n", outring);	\
732 	dev_priv->ring.tail = outring;					\
733 	dev_priv->ring.space -= outcount * 4;				\
734 	I915_WRITE(PRB0_TAIL, outring);			\
735 } while(0)
736 
737 /**
738  * Reads a dword out of the status page, which is written to from the command
739  * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
740  * MI_STORE_DATA_IMM.
741  *
742  * The following dwords have a reserved meaning:
743  * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
744  * 0x04: ring 0 head pointer
745  * 0x05: ring 1 head pointer (915-class)
746  * 0x06: ring 2 head pointer (915-class)
747  * 0x10-0x1b: Context status DWords (GM45)
748  * 0x1f: Last written status offset. (GM45)
749  *
750  * The area from dword 0x20 to 0x3ff is available for driver usage.
751  */
752 #define READ_HWSP(dev_priv, reg)  (((volatile u32*)(dev_priv->hw_status_page))[reg])
753 #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
754 #define I915_GEM_HWS_INDEX		0x20
755 #define I915_BREADCRUMB_INDEX		0x21
756 
757 extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
758 
759 #define IS_I830(dev) ((dev)->pci_device == 0x3577)
760 #define IS_845G(dev) ((dev)->pci_device == 0x2562)
761 #define IS_I85X(dev) ((dev)->pci_device == 0x3582)
762 #define IS_I855(dev) ((dev)->pci_device == 0x3582)
763 #define IS_I865G(dev) ((dev)->pci_device == 0x2572)
764 
765 #define IS_I915G(dev) ((dev)->pci_device == 0x2582 || (dev)->pci_device == 0x258a)
766 #define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
767 #define IS_I945G(dev) ((dev)->pci_device == 0x2772)
768 #define IS_I945GM(dev) ((dev)->pci_device == 0x27A2 ||\
769 		        (dev)->pci_device == 0x27AE)
770 #define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \
771 		       (dev)->pci_device == 0x2982 || \
772 		       (dev)->pci_device == 0x2992 || \
773 		       (dev)->pci_device == 0x29A2 || \
774 		       (dev)->pci_device == 0x2A02 || \
775 		       (dev)->pci_device == 0x2A12 || \
776 		       (dev)->pci_device == 0x2A42 || \
777 		       (dev)->pci_device == 0x2E02 || \
778 		       (dev)->pci_device == 0x2E12 || \
779 		       (dev)->pci_device == 0x2E22)
780 
781 #define IS_I965GM(dev) ((dev)->pci_device == 0x2A02)
782 
783 #define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
784 
785 #define IS_G4X(dev) ((dev)->pci_device == 0x2E02 || \
786 		     (dev)->pci_device == 0x2E12 || \
787 		     (dev)->pci_device == 0x2E22 || \
788 		     IS_GM45(dev))
789 
790 #define IS_IGDG(dev) ((dev)->pci_device == 0xa001)
791 #define IS_IGDGM(dev) ((dev)->pci_device == 0xa011)
792 #define IS_IGD(dev) (IS_IGDG(dev) || IS_IGDGM(dev))
793 
794 #define IS_G33(dev)    ((dev)->pci_device == 0x29C2 ||	\
795 			(dev)->pci_device == 0x29B2 ||	\
796 			(dev)->pci_device == 0x29D2 ||  \
797 			(IS_IGD(dev)))
798 
799 #define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
800 		      IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev))
801 
802 #define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
803 			IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev) || \
804 			IS_IGD(dev))
805 
806 #define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev))
807 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
808  * rows, which changed the alignment requirements and fence programming.
809  */
810 #define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \
811 						      IS_I915GM(dev)))
812 #define SUPPORTS_INTEGRATED_HDMI(dev)	(IS_G4X(dev))
813 
814 #define PRIMARY_RINGBUFFER_SIZE         (128*1024)
815 
816 #endif
817