1 /* 2 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the 14 * next paragraph) shall be included in all copies or substantial portions 15 * of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 20 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 24 * 25 */ 26 27 #ifndef _I915_DRM_H_ 28 #define _I915_DRM_H_ 29 30 #include "drm.h" 31 32 /* Please note that modifications to all structs defined here are 33 * subject to backwards-compatibility constraints. 34 */ 35 36 #ifdef __KERNEL__ 37 /* For use by IPS driver */ 38 extern unsigned long i915_read_mch_val(void); 39 extern bool i915_gpu_raise(void); 40 extern bool i915_gpu_lower(void); 41 extern bool i915_gpu_busy(void); 42 extern bool i915_gpu_turbo_disable(void); 43 #endif 44 45 /* Each region is a minimum of 16k, and there are at most 255 of them. 46 */ 47 #define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use 48 * of chars for next/prev indices */ 49 #define I915_LOG_MIN_TEX_REGION_SIZE 14 50 51 typedef struct _drm_i915_init { 52 enum { 53 I915_INIT_DMA = 0x01, 54 I915_CLEANUP_DMA = 0x02, 55 I915_RESUME_DMA = 0x03 56 } func; 57 unsigned int mmio_offset; 58 int sarea_priv_offset; 59 unsigned int ring_start; 60 unsigned int ring_end; 61 unsigned int ring_size; 62 unsigned int front_offset; 63 unsigned int back_offset; 64 unsigned int depth_offset; 65 unsigned int w; 66 unsigned int h; 67 unsigned int pitch; 68 unsigned int pitch_bits; 69 unsigned int back_pitch; 70 unsigned int depth_pitch; 71 unsigned int cpp; 72 unsigned int chipset; 73 } drm_i915_init_t; 74 75 typedef struct _drm_i915_sarea { 76 struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1]; 77 int last_upload; /* last time texture was uploaded */ 78 int last_enqueue; /* last time a buffer was enqueued */ 79 int last_dispatch; /* age of the most recently dispatched buffer */ 80 int ctxOwner; /* last context to upload state */ 81 int texAge; 82 int pf_enabled; /* is pageflipping allowed? */ 83 int pf_active; 84 int pf_current_page; /* which buffer is being displayed? */ 85 int perf_boxes; /* performance boxes to be displayed */ 86 int width, height; /* screen size in pixels */ 87 88 drm_handle_t front_handle; 89 int front_offset; 90 int front_size; 91 92 drm_handle_t back_handle; 93 int back_offset; 94 int back_size; 95 96 drm_handle_t depth_handle; 97 int depth_offset; 98 int depth_size; 99 100 drm_handle_t tex_handle; 101 int tex_offset; 102 int tex_size; 103 int log_tex_granularity; 104 int pitch; 105 int rotation; /* 0, 90, 180 or 270 */ 106 int rotated_offset; 107 int rotated_size; 108 int rotated_pitch; 109 int virtualX, virtualY; 110 111 unsigned int front_tiled; 112 unsigned int back_tiled; 113 unsigned int depth_tiled; 114 unsigned int rotated_tiled; 115 unsigned int rotated2_tiled; 116 117 int pipeA_x; 118 int pipeA_y; 119 int pipeA_w; 120 int pipeA_h; 121 int pipeB_x; 122 int pipeB_y; 123 int pipeB_w; 124 int pipeB_h; 125 126 /* fill out some space for old userspace triple buffer */ 127 drm_handle_t unused_handle; 128 __u32 unused1, unused2, unused3; 129 130 /* buffer object handles for static buffers. May change 131 * over the lifetime of the client. 132 */ 133 __u32 front_bo_handle; 134 __u32 back_bo_handle; 135 __u32 unused_bo_handle; 136 __u32 depth_bo_handle; 137 138 } drm_i915_sarea_t; 139 140 /* due to userspace building against these headers we need some compat here */ 141 #define planeA_x pipeA_x 142 #define planeA_y pipeA_y 143 #define planeA_w pipeA_w 144 #define planeA_h pipeA_h 145 #define planeB_x pipeB_x 146 #define planeB_y pipeB_y 147 #define planeB_w pipeB_w 148 #define planeB_h pipeB_h 149 150 /* Flags for perf_boxes 151 */ 152 #define I915_BOX_RING_EMPTY 0x1 153 #define I915_BOX_FLIP 0x2 154 #define I915_BOX_WAIT 0x4 155 #define I915_BOX_TEXTURE_LOAD 0x8 156 #define I915_BOX_LOST_CONTEXT 0x10 157 158 /* I915 specific ioctls 159 * The device specific ioctl range is 0x40 to 0x79. 160 */ 161 #define DRM_I915_INIT 0x00 162 #define DRM_I915_FLUSH 0x01 163 #define DRM_I915_FLIP 0x02 164 #define DRM_I915_BATCHBUFFER 0x03 165 #define DRM_I915_IRQ_EMIT 0x04 166 #define DRM_I915_IRQ_WAIT 0x05 167 #define DRM_I915_GETPARAM 0x06 168 #define DRM_I915_SETPARAM 0x07 169 #define DRM_I915_ALLOC 0x08 170 #define DRM_I915_FREE 0x09 171 #define DRM_I915_INIT_HEAP 0x0a 172 #define DRM_I915_CMDBUFFER 0x0b 173 #define DRM_I915_DESTROY_HEAP 0x0c 174 #define DRM_I915_SET_VBLANK_PIPE 0x0d 175 #define DRM_I915_GET_VBLANK_PIPE 0x0e 176 #define DRM_I915_VBLANK_SWAP 0x0f 177 #define DRM_I915_HWS_ADDR 0x11 178 #define DRM_I915_GEM_INIT 0x13 179 #define DRM_I915_GEM_EXECBUFFER 0x14 180 #define DRM_I915_GEM_PIN 0x15 181 #define DRM_I915_GEM_UNPIN 0x16 182 #define DRM_I915_GEM_BUSY 0x17 183 #define DRM_I915_GEM_THROTTLE 0x18 184 #define DRM_I915_GEM_ENTERVT 0x19 185 #define DRM_I915_GEM_LEAVEVT 0x1a 186 #define DRM_I915_GEM_CREATE 0x1b 187 #define DRM_I915_GEM_PREAD 0x1c 188 #define DRM_I915_GEM_PWRITE 0x1d 189 #define DRM_I915_GEM_MMAP 0x1e 190 #define DRM_I915_GEM_SET_DOMAIN 0x1f 191 #define DRM_I915_GEM_SW_FINISH 0x20 192 #define DRM_I915_GEM_SET_TILING 0x21 193 #define DRM_I915_GEM_GET_TILING 0x22 194 #define DRM_I915_GEM_GET_APERTURE 0x23 195 #define DRM_I915_GEM_MMAP_GTT 0x24 196 #define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25 197 #define DRM_I915_GEM_MADVISE 0x26 198 #define DRM_I915_OVERLAY_PUT_IMAGE 0x27 199 #define DRM_I915_OVERLAY_ATTRS 0x28 200 #define DRM_I915_GEM_EXECBUFFER2 0x29 201 #define DRM_I915_GET_SPRITE_COLORKEY 0x2a 202 #define DRM_I915_SET_SPRITE_COLORKEY 0x2b 203 #define DRM_I915_GEM_WAIT 0x2c 204 #define DRM_I915_GEM_CONTEXT_CREATE 0x2d 205 #define DRM_I915_GEM_CONTEXT_DESTROY 0x2e 206 #define DRM_I915_GEM_SET_CACHEING 0x2f 207 #define DRM_I915_GEM_GET_CACHEING 0x30 208 #define DRM_I915_REG_READ 0x31 209 210 #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) 211 #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) 212 #define DRM_IOCTL_I915_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP) 213 #define DRM_IOCTL_I915_BATCHBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t) 214 #define DRM_IOCTL_I915_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t) 215 #define DRM_IOCTL_I915_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t) 216 #define DRM_IOCTL_I915_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t) 217 #define DRM_IOCTL_I915_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t) 218 #define DRM_IOCTL_I915_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t) 219 #define DRM_IOCTL_I915_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t) 220 #define DRM_IOCTL_I915_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t) 221 #define DRM_IOCTL_I915_CMDBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t) 222 #define DRM_IOCTL_I915_DESTROY_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t) 223 #define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t) 224 #define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t) 225 #define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t) 226 #define DRM_IOCTL_I915_HWS_ADDR DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init) 227 #define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init) 228 #define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer) 229 #define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2) 230 #define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin) 231 #define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin) 232 #define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy) 233 #define DRM_IOCTL_I915_GEM_SET_CACHEING DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHEING, struct drm_i915_gem_cacheing) 234 #define DRM_IOCTL_I915_GEM_GET_CACHEING DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHEING, struct drm_i915_gem_cacheing) 235 #define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE) 236 #define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT) 237 #define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT) 238 #define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create) 239 #define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread) 240 #define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite) 241 #define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap) 242 #define DRM_IOCTL_I915_GEM_MMAP_GTT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt) 243 #define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain) 244 #define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish) 245 #define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling) 246 #define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling) 247 #define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture) 248 #define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id) 249 #define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise) 250 #define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image) 251 #define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs) 252 #define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey) 253 #define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey) 254 #define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait) 255 #define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create) 256 #define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy) 257 #define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read) 258 259 /* Allow drivers to submit batchbuffers directly to hardware, relying 260 * on the security mechanisms provided by hardware. 261 */ 262 typedef struct drm_i915_batchbuffer { 263 int start; /* agp offset */ 264 int used; /* nr bytes in use */ 265 int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */ 266 int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */ 267 int num_cliprects; /* mulitpass with multiple cliprects? */ 268 struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */ 269 } drm_i915_batchbuffer_t; 270 271 /* As above, but pass a pointer to userspace buffer which can be 272 * validated by the kernel prior to sending to hardware. 273 */ 274 typedef struct _drm_i915_cmdbuffer { 275 char __user *buf; /* pointer to userspace command buffer */ 276 int sz; /* nr bytes in buf */ 277 int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */ 278 int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */ 279 int num_cliprects; /* mulitpass with multiple cliprects? */ 280 struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */ 281 } drm_i915_cmdbuffer_t; 282 283 /* Userspace can request & wait on irq's: 284 */ 285 typedef struct drm_i915_irq_emit { 286 int __user *irq_seq; 287 } drm_i915_irq_emit_t; 288 289 typedef struct drm_i915_irq_wait { 290 int irq_seq; 291 } drm_i915_irq_wait_t; 292 293 /* Ioctl to query kernel params: 294 */ 295 #define I915_PARAM_IRQ_ACTIVE 1 296 #define I915_PARAM_ALLOW_BATCHBUFFER 2 297 #define I915_PARAM_LAST_DISPATCH 3 298 #define I915_PARAM_CHIPSET_ID 4 299 #define I915_PARAM_HAS_GEM 5 300 #define I915_PARAM_NUM_FENCES_AVAIL 6 301 #define I915_PARAM_HAS_OVERLAY 7 302 #define I915_PARAM_HAS_PAGEFLIPPING 8 303 #define I915_PARAM_HAS_EXECBUF2 9 304 #define I915_PARAM_HAS_BSD 10 305 #define I915_PARAM_HAS_BLT 11 306 #define I915_PARAM_HAS_RELAXED_FENCING 12 307 #define I915_PARAM_HAS_COHERENT_RINGS 13 308 #define I915_PARAM_HAS_EXEC_CONSTANTS 14 309 #define I915_PARAM_HAS_RELAXED_DELTA 15 310 #define I915_PARAM_HAS_GEN7_SOL_RESET 16 311 #define I915_PARAM_HAS_LLC 17 312 #define I915_PARAM_HAS_ALIASING_PPGTT 18 313 #define I915_PARAM_HAS_WAIT_TIMEOUT 19 314 #define I915_PARAM_HAS_SEMAPHORES 20 315 #define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21 316 317 typedef struct drm_i915_getparam { 318 int param; 319 int __user *value; 320 } drm_i915_getparam_t; 321 322 /* Ioctl to set kernel params: 323 */ 324 #define I915_SETPARAM_USE_MI_BATCHBUFFER_START 1 325 #define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY 2 326 #define I915_SETPARAM_ALLOW_BATCHBUFFER 3 327 #define I915_SETPARAM_NUM_USED_FENCES 4 328 329 typedef struct drm_i915_setparam { 330 int param; 331 int value; 332 } drm_i915_setparam_t; 333 334 /* A memory manager for regions of shared memory: 335 */ 336 #define I915_MEM_REGION_AGP 1 337 338 typedef struct drm_i915_mem_alloc { 339 int region; 340 int alignment; 341 int size; 342 int __user *region_offset; /* offset from start of fb or agp */ 343 } drm_i915_mem_alloc_t; 344 345 typedef struct drm_i915_mem_free { 346 int region; 347 int region_offset; 348 } drm_i915_mem_free_t; 349 350 typedef struct drm_i915_mem_init_heap { 351 int region; 352 int size; 353 int start; 354 } drm_i915_mem_init_heap_t; 355 356 /* Allow memory manager to be torn down and re-initialized (eg on 357 * rotate): 358 */ 359 typedef struct drm_i915_mem_destroy_heap { 360 int region; 361 } drm_i915_mem_destroy_heap_t; 362 363 /* Allow X server to configure which pipes to monitor for vblank signals 364 */ 365 #define DRM_I915_VBLANK_PIPE_A 1 366 #define DRM_I915_VBLANK_PIPE_B 2 367 368 typedef struct drm_i915_vblank_pipe { 369 int pipe; 370 } drm_i915_vblank_pipe_t; 371 372 /* Schedule buffer swap at given vertical blank: 373 */ 374 typedef struct drm_i915_vblank_swap { 375 drm_drawable_t drawable; 376 enum drm_vblank_seq_type seqtype; 377 unsigned int sequence; 378 } drm_i915_vblank_swap_t; 379 380 typedef struct drm_i915_hws_addr { 381 __u64 addr; 382 } drm_i915_hws_addr_t; 383 384 struct drm_i915_gem_init { 385 /** 386 * Beginning offset in the GTT to be managed by the DRM memory 387 * manager. 388 */ 389 __u64 gtt_start; 390 /** 391 * Ending offset in the GTT to be managed by the DRM memory 392 * manager. 393 */ 394 __u64 gtt_end; 395 }; 396 397 struct drm_i915_gem_create { 398 /** 399 * Requested size for the object. 400 * 401 * The (page-aligned) allocated size for the object will be returned. 402 */ 403 __u64 size; 404 /** 405 * Returned handle for the object. 406 * 407 * Object handles are nonzero. 408 */ 409 __u32 handle; 410 __u32 pad; 411 }; 412 413 struct drm_i915_gem_pread { 414 /** Handle for the object being read. */ 415 __u32 handle; 416 __u32 pad; 417 /** Offset into the object to read from */ 418 __u64 offset; 419 /** Length of data to read */ 420 __u64 size; 421 /** 422 * Pointer to write the data into. 423 * 424 * This is a fixed-size type for 32/64 compatibility. 425 */ 426 __u64 data_ptr; 427 }; 428 429 struct drm_i915_gem_pwrite { 430 /** Handle for the object being written to. */ 431 __u32 handle; 432 __u32 pad; 433 /** Offset into the object to write to */ 434 __u64 offset; 435 /** Length of data to write */ 436 __u64 size; 437 /** 438 * Pointer to read the data from. 439 * 440 * This is a fixed-size type for 32/64 compatibility. 441 */ 442 __u64 data_ptr; 443 }; 444 445 struct drm_i915_gem_mmap { 446 /** Handle for the object being mapped. */ 447 __u32 handle; 448 __u32 pad; 449 /** Offset in the object to map. */ 450 __u64 offset; 451 /** 452 * Length of data to map. 453 * 454 * The value will be page-aligned. 455 */ 456 __u64 size; 457 /** 458 * Returned pointer the data was mapped at. 459 * 460 * This is a fixed-size type for 32/64 compatibility. 461 */ 462 __u64 addr_ptr; 463 }; 464 465 struct drm_i915_gem_mmap_gtt { 466 /** Handle for the object being mapped. */ 467 __u32 handle; 468 __u32 pad; 469 /** 470 * Fake offset to use for subsequent mmap call 471 * 472 * This is a fixed-size type for 32/64 compatibility. 473 */ 474 __u64 offset; 475 }; 476 477 struct drm_i915_gem_set_domain { 478 /** Handle for the object */ 479 __u32 handle; 480 481 /** New read domains */ 482 __u32 read_domains; 483 484 /** New write domain */ 485 __u32 write_domain; 486 }; 487 488 struct drm_i915_gem_sw_finish { 489 /** Handle for the object */ 490 __u32 handle; 491 }; 492 493 struct drm_i915_gem_relocation_entry { 494 /** 495 * Handle of the buffer being pointed to by this relocation entry. 496 * 497 * It's appealing to make this be an index into the mm_validate_entry 498 * list to refer to the buffer, but this allows the driver to create 499 * a relocation list for state buffers and not re-write it per 500 * exec using the buffer. 501 */ 502 __u32 target_handle; 503 504 /** 505 * Value to be added to the offset of the target buffer to make up 506 * the relocation entry. 507 */ 508 __u32 delta; 509 510 /** Offset in the buffer the relocation entry will be written into */ 511 __u64 offset; 512 513 /** 514 * Offset value of the target buffer that the relocation entry was last 515 * written as. 516 * 517 * If the buffer has the same offset as last time, we can skip syncing 518 * and writing the relocation. This value is written back out by 519 * the execbuffer ioctl when the relocation is written. 520 */ 521 __u64 presumed_offset; 522 523 /** 524 * Target memory domains read by this operation. 525 */ 526 __u32 read_domains; 527 528 /** 529 * Target memory domains written by this operation. 530 * 531 * Note that only one domain may be written by the whole 532 * execbuffer operation, so that where there are conflicts, 533 * the application will get -EINVAL back. 534 */ 535 __u32 write_domain; 536 }; 537 538 /** @{ 539 * Intel memory domains 540 * 541 * Most of these just align with the various caches in 542 * the system and are used to flush and invalidate as 543 * objects end up cached in different domains. 544 */ 545 /** CPU cache */ 546 #define I915_GEM_DOMAIN_CPU 0x00000001 547 /** Render cache, used by 2D and 3D drawing */ 548 #define I915_GEM_DOMAIN_RENDER 0x00000002 549 /** Sampler cache, used by texture engine */ 550 #define I915_GEM_DOMAIN_SAMPLER 0x00000004 551 /** Command queue, used to load batch buffers */ 552 #define I915_GEM_DOMAIN_COMMAND 0x00000008 553 /** Instruction cache, used by shader programs */ 554 #define I915_GEM_DOMAIN_INSTRUCTION 0x00000010 555 /** Vertex address cache */ 556 #define I915_GEM_DOMAIN_VERTEX 0x00000020 557 /** GTT domain - aperture and scanout */ 558 #define I915_GEM_DOMAIN_GTT 0x00000040 559 /** @} */ 560 561 struct drm_i915_gem_exec_object { 562 /** 563 * User's handle for a buffer to be bound into the GTT for this 564 * operation. 565 */ 566 __u32 handle; 567 568 /** Number of relocations to be performed on this buffer */ 569 __u32 relocation_count; 570 /** 571 * Pointer to array of struct drm_i915_gem_relocation_entry containing 572 * the relocations to be performed in this buffer. 573 */ 574 __u64 relocs_ptr; 575 576 /** Required alignment in graphics aperture */ 577 __u64 alignment; 578 579 /** 580 * Returned value of the updated offset of the object, for future 581 * presumed_offset writes. 582 */ 583 __u64 offset; 584 }; 585 586 struct drm_i915_gem_execbuffer { 587 /** 588 * List of buffers to be validated with their relocations to be 589 * performend on them. 590 * 591 * This is a pointer to an array of struct drm_i915_gem_validate_entry. 592 * 593 * These buffers must be listed in an order such that all relocations 594 * a buffer is performing refer to buffers that have already appeared 595 * in the validate list. 596 */ 597 __u64 buffers_ptr; 598 __u32 buffer_count; 599 600 /** Offset in the batchbuffer to start execution from. */ 601 __u32 batch_start_offset; 602 /** Bytes used in batchbuffer from batch_start_offset */ 603 __u32 batch_len; 604 __u32 DR1; 605 __u32 DR4; 606 __u32 num_cliprects; 607 /** This is a struct drm_clip_rect *cliprects */ 608 __u64 cliprects_ptr; 609 }; 610 611 struct drm_i915_gem_exec_object2 { 612 /** 613 * User's handle for a buffer to be bound into the GTT for this 614 * operation. 615 */ 616 __u32 handle; 617 618 /** Number of relocations to be performed on this buffer */ 619 __u32 relocation_count; 620 /** 621 * Pointer to array of struct drm_i915_gem_relocation_entry containing 622 * the relocations to be performed in this buffer. 623 */ 624 __u64 relocs_ptr; 625 626 /** Required alignment in graphics aperture */ 627 __u64 alignment; 628 629 /** 630 * Returned value of the updated offset of the object, for future 631 * presumed_offset writes. 632 */ 633 __u64 offset; 634 635 #define EXEC_OBJECT_NEEDS_FENCE (1<<0) 636 __u64 flags; 637 __u64 rsvd1; 638 __u64 rsvd2; 639 }; 640 641 struct drm_i915_gem_execbuffer2 { 642 /** 643 * List of gem_exec_object2 structs 644 */ 645 __u64 buffers_ptr; 646 __u32 buffer_count; 647 648 /** Offset in the batchbuffer to start execution from. */ 649 __u32 batch_start_offset; 650 /** Bytes used in batchbuffer from batch_start_offset */ 651 __u32 batch_len; 652 __u32 DR1; 653 __u32 DR4; 654 __u32 num_cliprects; 655 /** This is a struct drm_clip_rect *cliprects */ 656 __u64 cliprects_ptr; 657 #define I915_EXEC_RING_MASK (7<<0) 658 #define I915_EXEC_DEFAULT (0<<0) 659 #define I915_EXEC_RENDER (1<<0) 660 #define I915_EXEC_BSD (2<<0) 661 #define I915_EXEC_BLT (3<<0) 662 663 /* Used for switching the constants addressing mode on gen4+ RENDER ring. 664 * Gen6+ only supports relative addressing to dynamic state (default) and 665 * absolute addressing. 666 * 667 * These flags are ignored for the BSD and BLT rings. 668 */ 669 #define I915_EXEC_CONSTANTS_MASK (3<<6) 670 #define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */ 671 #define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6) 672 #define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */ 673 __u64 flags; 674 __u64 rsvd1; /* now used for context info */ 675 __u64 rsvd2; 676 }; 677 678 /** Resets the SO write offset registers for transform feedback on gen7. */ 679 #define I915_EXEC_GEN7_SOL_RESET (1<<8) 680 681 #define I915_EXEC_CONTEXT_ID_MASK (0xffffffff) 682 #define i915_execbuffer2_set_context_id(eb2, context) \ 683 (eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK 684 #define i915_execbuffer2_get_context_id(eb2) \ 685 ((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK) 686 687 struct drm_i915_gem_pin { 688 /** Handle of the buffer to be pinned. */ 689 __u32 handle; 690 __u32 pad; 691 692 /** alignment required within the aperture */ 693 __u64 alignment; 694 695 /** Returned GTT offset of the buffer. */ 696 __u64 offset; 697 }; 698 699 struct drm_i915_gem_unpin { 700 /** Handle of the buffer to be unpinned. */ 701 __u32 handle; 702 __u32 pad; 703 }; 704 705 struct drm_i915_gem_busy { 706 /** Handle of the buffer to check for busy */ 707 __u32 handle; 708 709 /** Return busy status (1 if busy, 0 if idle). 710 * The high word is used to indicate on which rings the object 711 * currently resides: 712 * 16:31 - busy (r or r/w) rings (16 render, 17 bsd, 18 blt, etc) 713 */ 714 __u32 busy; 715 }; 716 717 #define I915_CACHEING_NONE 0 718 #define I915_CACHEING_CACHED 1 719 720 struct drm_i915_gem_cacheing { 721 /** 722 * Handle of the buffer to set/get the cacheing level of. */ 723 __u32 handle; 724 725 /** 726 * Cacheing level to apply or return value 727 * 728 * bits0-15 are for generic cacheing control (i.e. the above defined 729 * values). bits16-31 are reserved for platform-specific variations 730 * (e.g. l3$ caching on gen7). */ 731 __u32 cacheing; 732 }; 733 734 #define I915_TILING_NONE 0 735 #define I915_TILING_X 1 736 #define I915_TILING_Y 2 737 738 #define I915_BIT_6_SWIZZLE_NONE 0 739 #define I915_BIT_6_SWIZZLE_9 1 740 #define I915_BIT_6_SWIZZLE_9_10 2 741 #define I915_BIT_6_SWIZZLE_9_11 3 742 #define I915_BIT_6_SWIZZLE_9_10_11 4 743 /* Not seen by userland */ 744 #define I915_BIT_6_SWIZZLE_UNKNOWN 5 745 /* Seen by userland. */ 746 #define I915_BIT_6_SWIZZLE_9_17 6 747 #define I915_BIT_6_SWIZZLE_9_10_17 7 748 749 struct drm_i915_gem_set_tiling { 750 /** Handle of the buffer to have its tiling state updated */ 751 __u32 handle; 752 753 /** 754 * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X, 755 * I915_TILING_Y). 756 * 757 * This value is to be set on request, and will be updated by the 758 * kernel on successful return with the actual chosen tiling layout. 759 * 760 * The tiling mode may be demoted to I915_TILING_NONE when the system 761 * has bit 6 swizzling that can't be managed correctly by GEM. 762 * 763 * Buffer contents become undefined when changing tiling_mode. 764 */ 765 __u32 tiling_mode; 766 767 /** 768 * Stride in bytes for the object when in I915_TILING_X or 769 * I915_TILING_Y. 770 */ 771 __u32 stride; 772 773 /** 774 * Returned address bit 6 swizzling required for CPU access through 775 * mmap mapping. 776 */ 777 __u32 swizzle_mode; 778 }; 779 780 struct drm_i915_gem_get_tiling { 781 /** Handle of the buffer to get tiling state for. */ 782 __u32 handle; 783 784 /** 785 * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X, 786 * I915_TILING_Y). 787 */ 788 __u32 tiling_mode; 789 790 /** 791 * Returned address bit 6 swizzling required for CPU access through 792 * mmap mapping. 793 */ 794 __u32 swizzle_mode; 795 }; 796 797 struct drm_i915_gem_get_aperture { 798 /** Total size of the aperture used by i915_gem_execbuffer, in bytes */ 799 __u64 aper_size; 800 801 /** 802 * Available space in the aperture used by i915_gem_execbuffer, in 803 * bytes 804 */ 805 __u64 aper_available_size; 806 }; 807 808 struct drm_i915_get_pipe_from_crtc_id { 809 /** ID of CRTC being requested **/ 810 __u32 crtc_id; 811 812 /** pipe of requested CRTC **/ 813 __u32 pipe; 814 }; 815 816 #define I915_MADV_WILLNEED 0 817 #define I915_MADV_DONTNEED 1 818 #define __I915_MADV_PURGED 2 /* internal state */ 819 820 struct drm_i915_gem_madvise { 821 /** Handle of the buffer to change the backing store advice */ 822 __u32 handle; 823 824 /* Advice: either the buffer will be needed again in the near future, 825 * or wont be and could be discarded under memory pressure. 826 */ 827 __u32 madv; 828 829 /** Whether the backing store still exists. */ 830 __u32 retained; 831 }; 832 833 /* flags */ 834 #define I915_OVERLAY_TYPE_MASK 0xff 835 #define I915_OVERLAY_YUV_PLANAR 0x01 836 #define I915_OVERLAY_YUV_PACKED 0x02 837 #define I915_OVERLAY_RGB 0x03 838 839 #define I915_OVERLAY_DEPTH_MASK 0xff00 840 #define I915_OVERLAY_RGB24 0x1000 841 #define I915_OVERLAY_RGB16 0x2000 842 #define I915_OVERLAY_RGB15 0x3000 843 #define I915_OVERLAY_YUV422 0x0100 844 #define I915_OVERLAY_YUV411 0x0200 845 #define I915_OVERLAY_YUV420 0x0300 846 #define I915_OVERLAY_YUV410 0x0400 847 848 #define I915_OVERLAY_SWAP_MASK 0xff0000 849 #define I915_OVERLAY_NO_SWAP 0x000000 850 #define I915_OVERLAY_UV_SWAP 0x010000 851 #define I915_OVERLAY_Y_SWAP 0x020000 852 #define I915_OVERLAY_Y_AND_UV_SWAP 0x030000 853 854 #define I915_OVERLAY_FLAGS_MASK 0xff000000 855 #define I915_OVERLAY_ENABLE 0x01000000 856 857 struct drm_intel_overlay_put_image { 858 /* various flags and src format description */ 859 __u32 flags; 860 /* source picture description */ 861 __u32 bo_handle; 862 /* stride values and offsets are in bytes, buffer relative */ 863 __u16 stride_Y; /* stride for packed formats */ 864 __u16 stride_UV; 865 __u32 offset_Y; /* offset for packet formats */ 866 __u32 offset_U; 867 __u32 offset_V; 868 /* in pixels */ 869 __u16 src_width; 870 __u16 src_height; 871 /* to compensate the scaling factors for partially covered surfaces */ 872 __u16 src_scan_width; 873 __u16 src_scan_height; 874 /* output crtc description */ 875 __u32 crtc_id; 876 __u16 dst_x; 877 __u16 dst_y; 878 __u16 dst_width; 879 __u16 dst_height; 880 }; 881 882 /* flags */ 883 #define I915_OVERLAY_UPDATE_ATTRS (1<<0) 884 #define I915_OVERLAY_UPDATE_GAMMA (1<<1) 885 struct drm_intel_overlay_attrs { 886 __u32 flags; 887 __u32 color_key; 888 __s32 brightness; 889 __u32 contrast; 890 __u32 saturation; 891 __u32 gamma0; 892 __u32 gamma1; 893 __u32 gamma2; 894 __u32 gamma3; 895 __u32 gamma4; 896 __u32 gamma5; 897 }; 898 899 /* 900 * Intel sprite handling 901 * 902 * Color keying works with a min/mask/max tuple. Both source and destination 903 * color keying is allowed. 904 * 905 * Source keying: 906 * Sprite pixels within the min & max values, masked against the color channels 907 * specified in the mask field, will be transparent. All other pixels will 908 * be displayed on top of the primary plane. For RGB surfaces, only the min 909 * and mask fields will be used; ranged compares are not allowed. 910 * 911 * Destination keying: 912 * Primary plane pixels that match the min value, masked against the color 913 * channels specified in the mask field, will be replaced by corresponding 914 * pixels from the sprite plane. 915 * 916 * Note that source & destination keying are exclusive; only one can be 917 * active on a given plane. 918 */ 919 920 #define I915_SET_COLORKEY_NONE (1<<0) /* disable color key matching */ 921 #define I915_SET_COLORKEY_DESTINATION (1<<1) 922 #define I915_SET_COLORKEY_SOURCE (1<<2) 923 struct drm_intel_sprite_colorkey { 924 __u32 plane_id; 925 __u32 min_value; 926 __u32 channel_mask; 927 __u32 max_value; 928 __u32 flags; 929 }; 930 931 struct drm_i915_gem_wait { 932 /** Handle of BO we shall wait on */ 933 __u32 bo_handle; 934 __u32 flags; 935 /** Number of nanoseconds to wait, Returns time remaining. */ 936 __s64 timeout_ns; 937 }; 938 939 struct drm_i915_gem_context_create { 940 /* output: id of new context*/ 941 __u32 ctx_id; 942 __u32 pad; 943 }; 944 945 struct drm_i915_gem_context_destroy { 946 __u32 ctx_id; 947 __u32 pad; 948 }; 949 950 struct drm_i915_reg_read { 951 __u64 offset; 952 __u64 val; /* Return value */ 953 }; 954 #endif /* _I915_DRM_H_ */ 955