1 /* 2 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the 14 * next paragraph) shall be included in all copies or substantial portions 15 * of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 20 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 24 * 25 */ 26 27 #ifndef _I915_DRM_H_ 28 #define _I915_DRM_H_ 29 30 #include "drm.h" 31 32 /* Please note that modifications to all structs defined here are 33 * subject to backwards-compatibility constraints. 34 */ 35 36 #ifdef __KERNEL__ 37 /* For use by IPS driver */ 38 extern unsigned long i915_read_mch_val(void); 39 extern bool i915_gpu_raise(void); 40 extern bool i915_gpu_lower(void); 41 extern bool i915_gpu_busy(void); 42 extern bool i915_gpu_turbo_disable(void); 43 #endif 44 45 /* Each region is a minimum of 16k, and there are at most 255 of them. 46 */ 47 #define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use 48 * of chars for next/prev indices */ 49 #define I915_LOG_MIN_TEX_REGION_SIZE 14 50 51 typedef struct _drm_i915_init { 52 enum { 53 I915_INIT_DMA = 0x01, 54 I915_CLEANUP_DMA = 0x02, 55 I915_RESUME_DMA = 0x03 56 } func; 57 unsigned int mmio_offset; 58 int sarea_priv_offset; 59 unsigned int ring_start; 60 unsigned int ring_end; 61 unsigned int ring_size; 62 unsigned int front_offset; 63 unsigned int back_offset; 64 unsigned int depth_offset; 65 unsigned int w; 66 unsigned int h; 67 unsigned int pitch; 68 unsigned int pitch_bits; 69 unsigned int back_pitch; 70 unsigned int depth_pitch; 71 unsigned int cpp; 72 unsigned int chipset; 73 } drm_i915_init_t; 74 75 typedef struct _drm_i915_sarea { 76 struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1]; 77 int last_upload; /* last time texture was uploaded */ 78 int last_enqueue; /* last time a buffer was enqueued */ 79 int last_dispatch; /* age of the most recently dispatched buffer */ 80 int ctxOwner; /* last context to upload state */ 81 int texAge; 82 int pf_enabled; /* is pageflipping allowed? */ 83 int pf_active; 84 int pf_current_page; /* which buffer is being displayed? */ 85 int perf_boxes; /* performance boxes to be displayed */ 86 int width, height; /* screen size in pixels */ 87 88 drm_handle_t front_handle; 89 int front_offset; 90 int front_size; 91 92 drm_handle_t back_handle; 93 int back_offset; 94 int back_size; 95 96 drm_handle_t depth_handle; 97 int depth_offset; 98 int depth_size; 99 100 drm_handle_t tex_handle; 101 int tex_offset; 102 int tex_size; 103 int log_tex_granularity; 104 int pitch; 105 int rotation; /* 0, 90, 180 or 270 */ 106 int rotated_offset; 107 int rotated_size; 108 int rotated_pitch; 109 int virtualX, virtualY; 110 111 unsigned int front_tiled; 112 unsigned int back_tiled; 113 unsigned int depth_tiled; 114 unsigned int rotated_tiled; 115 unsigned int rotated2_tiled; 116 117 int pipeA_x; 118 int pipeA_y; 119 int pipeA_w; 120 int pipeA_h; 121 int pipeB_x; 122 int pipeB_y; 123 int pipeB_w; 124 int pipeB_h; 125 126 /* fill out some space for old userspace triple buffer */ 127 drm_handle_t unused_handle; 128 __u32 unused1, unused2, unused3; 129 130 /* buffer object handles for static buffers. May change 131 * over the lifetime of the client. 132 */ 133 __u32 front_bo_handle; 134 __u32 back_bo_handle; 135 __u32 unused_bo_handle; 136 __u32 depth_bo_handle; 137 138 } drm_i915_sarea_t; 139 140 /* due to userspace building against these headers we need some compat here */ 141 #define planeA_x pipeA_x 142 #define planeA_y pipeA_y 143 #define planeA_w pipeA_w 144 #define planeA_h pipeA_h 145 #define planeB_x pipeB_x 146 #define planeB_y pipeB_y 147 #define planeB_w pipeB_w 148 #define planeB_h pipeB_h 149 150 /* Flags for perf_boxes 151 */ 152 #define I915_BOX_RING_EMPTY 0x1 153 #define I915_BOX_FLIP 0x2 154 #define I915_BOX_WAIT 0x4 155 #define I915_BOX_TEXTURE_LOAD 0x8 156 #define I915_BOX_LOST_CONTEXT 0x10 157 158 /* I915 specific ioctls 159 * The device specific ioctl range is 0x40 to 0x79. 160 */ 161 #define DRM_I915_INIT 0x00 162 #define DRM_I915_FLUSH 0x01 163 #define DRM_I915_FLIP 0x02 164 #define DRM_I915_BATCHBUFFER 0x03 165 #define DRM_I915_IRQ_EMIT 0x04 166 #define DRM_I915_IRQ_WAIT 0x05 167 #define DRM_I915_GETPARAM 0x06 168 #define DRM_I915_SETPARAM 0x07 169 #define DRM_I915_ALLOC 0x08 170 #define DRM_I915_FREE 0x09 171 #define DRM_I915_INIT_HEAP 0x0a 172 #define DRM_I915_CMDBUFFER 0x0b 173 #define DRM_I915_DESTROY_HEAP 0x0c 174 #define DRM_I915_SET_VBLANK_PIPE 0x0d 175 #define DRM_I915_GET_VBLANK_PIPE 0x0e 176 #define DRM_I915_VBLANK_SWAP 0x0f 177 #define DRM_I915_HWS_ADDR 0x11 178 #define DRM_I915_GEM_INIT 0x13 179 #define DRM_I915_GEM_EXECBUFFER 0x14 180 #define DRM_I915_GEM_PIN 0x15 181 #define DRM_I915_GEM_UNPIN 0x16 182 #define DRM_I915_GEM_BUSY 0x17 183 #define DRM_I915_GEM_THROTTLE 0x18 184 #define DRM_I915_GEM_ENTERVT 0x19 185 #define DRM_I915_GEM_LEAVEVT 0x1a 186 #define DRM_I915_GEM_CREATE 0x1b 187 #define DRM_I915_GEM_PREAD 0x1c 188 #define DRM_I915_GEM_PWRITE 0x1d 189 #define DRM_I915_GEM_MMAP 0x1e 190 #define DRM_I915_GEM_SET_DOMAIN 0x1f 191 #define DRM_I915_GEM_SW_FINISH 0x20 192 #define DRM_I915_GEM_SET_TILING 0x21 193 #define DRM_I915_GEM_GET_TILING 0x22 194 #define DRM_I915_GEM_GET_APERTURE 0x23 195 #define DRM_I915_GEM_MMAP_GTT 0x24 196 #define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25 197 #define DRM_I915_GEM_MADVISE 0x26 198 #define DRM_I915_OVERLAY_PUT_IMAGE 0x27 199 #define DRM_I915_OVERLAY_ATTRS 0x28 200 #define DRM_I915_GEM_EXECBUFFER2 0x29 201 #define DRM_I915_GET_SPRITE_COLORKEY 0x2a 202 #define DRM_I915_SET_SPRITE_COLORKEY 0x2b 203 #define DRM_I915_GEM_WAIT 0x2c 204 #define DRM_I915_GEM_CONTEXT_CREATE 0x2d 205 #define DRM_I915_GEM_CONTEXT_DESTROY 0x2e 206 #define DRM_I915_GEM_SET_CACHEING 0x2f 207 #define DRM_I915_GEM_GET_CACHEING 0x30 208 #define DRM_I915_REG_READ 0x31 209 210 #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) 211 #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) 212 #define DRM_IOCTL_I915_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP) 213 #define DRM_IOCTL_I915_BATCHBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t) 214 #define DRM_IOCTL_I915_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t) 215 #define DRM_IOCTL_I915_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t) 216 #define DRM_IOCTL_I915_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t) 217 #define DRM_IOCTL_I915_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t) 218 #define DRM_IOCTL_I915_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t) 219 #define DRM_IOCTL_I915_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t) 220 #define DRM_IOCTL_I915_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t) 221 #define DRM_IOCTL_I915_CMDBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t) 222 #define DRM_IOCTL_I915_DESTROY_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t) 223 #define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t) 224 #define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t) 225 #define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t) 226 #define DRM_IOCTL_I915_HWS_ADDR DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init) 227 #define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init) 228 #define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer) 229 #define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2) 230 #define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin) 231 #define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin) 232 #define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy) 233 #define DRM_IOCTL_I915_GEM_SET_CACHEING DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHEING, struct drm_i915_gem_cacheing) 234 #define DRM_IOCTL_I915_GEM_GET_CACHEING DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHEING, struct drm_i915_gem_cacheing) 235 #define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE) 236 #define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT) 237 #define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT) 238 #define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create) 239 #define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread) 240 #define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite) 241 #define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap) 242 #define DRM_IOCTL_I915_GEM_MMAP_GTT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt) 243 #define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain) 244 #define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish) 245 #define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling) 246 #define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling) 247 #define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture) 248 #define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id) 249 #define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise) 250 #define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image) 251 #define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs) 252 #define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey) 253 #define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey) 254 #define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait) 255 #define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create) 256 #define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy) 257 #define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read) 258 259 /* Allow drivers to submit batchbuffers directly to hardware, relying 260 * on the security mechanisms provided by hardware. 261 */ 262 typedef struct drm_i915_batchbuffer { 263 int start; /* agp offset */ 264 int used; /* nr bytes in use */ 265 int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */ 266 int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */ 267 int num_cliprects; /* mulitpass with multiple cliprects? */ 268 struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */ 269 } drm_i915_batchbuffer_t; 270 271 /* As above, but pass a pointer to userspace buffer which can be 272 * validated by the kernel prior to sending to hardware. 273 */ 274 typedef struct _drm_i915_cmdbuffer { 275 char __user *buf; /* pointer to userspace command buffer */ 276 int sz; /* nr bytes in buf */ 277 int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */ 278 int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */ 279 int num_cliprects; /* mulitpass with multiple cliprects? */ 280 struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */ 281 } drm_i915_cmdbuffer_t; 282 283 /* Userspace can request & wait on irq's: 284 */ 285 typedef struct drm_i915_irq_emit { 286 int __user *irq_seq; 287 } drm_i915_irq_emit_t; 288 289 typedef struct drm_i915_irq_wait { 290 int irq_seq; 291 } drm_i915_irq_wait_t; 292 293 /* Ioctl to query kernel params: 294 */ 295 #define I915_PARAM_IRQ_ACTIVE 1 296 #define I915_PARAM_ALLOW_BATCHBUFFER 2 297 #define I915_PARAM_LAST_DISPATCH 3 298 #define I915_PARAM_CHIPSET_ID 4 299 #define I915_PARAM_HAS_GEM 5 300 #define I915_PARAM_NUM_FENCES_AVAIL 6 301 #define I915_PARAM_HAS_OVERLAY 7 302 #define I915_PARAM_HAS_PAGEFLIPPING 8 303 #define I915_PARAM_HAS_EXECBUF2 9 304 #define I915_PARAM_HAS_BSD 10 305 #define I915_PARAM_HAS_BLT 11 306 #define I915_PARAM_HAS_RELAXED_FENCING 12 307 #define I915_PARAM_HAS_COHERENT_RINGS 13 308 #define I915_PARAM_HAS_EXEC_CONSTANTS 14 309 #define I915_PARAM_HAS_RELAXED_DELTA 15 310 #define I915_PARAM_HAS_GEN7_SOL_RESET 16 311 #define I915_PARAM_HAS_LLC 17 312 #define I915_PARAM_HAS_ALIASING_PPGTT 18 313 #define I915_PARAM_HAS_WAIT_TIMEOUT 19 314 #define I915_PARAM_HAS_SEMAPHORES 20 315 #define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21 316 #define I915_PARAM_RSVD_FOR_FUTURE_USE 22 317 #define I915_PARAM_HAS_SECURE_BATCHES 23 318 319 typedef struct drm_i915_getparam { 320 int param; 321 int __user *value; 322 } drm_i915_getparam_t; 323 324 /* Ioctl to set kernel params: 325 */ 326 #define I915_SETPARAM_USE_MI_BATCHBUFFER_START 1 327 #define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY 2 328 #define I915_SETPARAM_ALLOW_BATCHBUFFER 3 329 #define I915_SETPARAM_NUM_USED_FENCES 4 330 331 typedef struct drm_i915_setparam { 332 int param; 333 int value; 334 } drm_i915_setparam_t; 335 336 /* A memory manager for regions of shared memory: 337 */ 338 #define I915_MEM_REGION_AGP 1 339 340 typedef struct drm_i915_mem_alloc { 341 int region; 342 int alignment; 343 int size; 344 int __user *region_offset; /* offset from start of fb or agp */ 345 } drm_i915_mem_alloc_t; 346 347 typedef struct drm_i915_mem_free { 348 int region; 349 int region_offset; 350 } drm_i915_mem_free_t; 351 352 typedef struct drm_i915_mem_init_heap { 353 int region; 354 int size; 355 int start; 356 } drm_i915_mem_init_heap_t; 357 358 /* Allow memory manager to be torn down and re-initialized (eg on 359 * rotate): 360 */ 361 typedef struct drm_i915_mem_destroy_heap { 362 int region; 363 } drm_i915_mem_destroy_heap_t; 364 365 /* Allow X server to configure which pipes to monitor for vblank signals 366 */ 367 #define DRM_I915_VBLANK_PIPE_A 1 368 #define DRM_I915_VBLANK_PIPE_B 2 369 370 typedef struct drm_i915_vblank_pipe { 371 int pipe; 372 } drm_i915_vblank_pipe_t; 373 374 /* Schedule buffer swap at given vertical blank: 375 */ 376 typedef struct drm_i915_vblank_swap { 377 drm_drawable_t drawable; 378 enum drm_vblank_seq_type seqtype; 379 unsigned int sequence; 380 } drm_i915_vblank_swap_t; 381 382 typedef struct drm_i915_hws_addr { 383 __u64 addr; 384 } drm_i915_hws_addr_t; 385 386 struct drm_i915_gem_init { 387 /** 388 * Beginning offset in the GTT to be managed by the DRM memory 389 * manager. 390 */ 391 __u64 gtt_start; 392 /** 393 * Ending offset in the GTT to be managed by the DRM memory 394 * manager. 395 */ 396 __u64 gtt_end; 397 }; 398 399 struct drm_i915_gem_create { 400 /** 401 * Requested size for the object. 402 * 403 * The (page-aligned) allocated size for the object will be returned. 404 */ 405 __u64 size; 406 /** 407 * Returned handle for the object. 408 * 409 * Object handles are nonzero. 410 */ 411 __u32 handle; 412 __u32 pad; 413 }; 414 415 struct drm_i915_gem_pread { 416 /** Handle for the object being read. */ 417 __u32 handle; 418 __u32 pad; 419 /** Offset into the object to read from */ 420 __u64 offset; 421 /** Length of data to read */ 422 __u64 size; 423 /** 424 * Pointer to write the data into. 425 * 426 * This is a fixed-size type for 32/64 compatibility. 427 */ 428 __u64 data_ptr; 429 }; 430 431 struct drm_i915_gem_pwrite { 432 /** Handle for the object being written to. */ 433 __u32 handle; 434 __u32 pad; 435 /** Offset into the object to write to */ 436 __u64 offset; 437 /** Length of data to write */ 438 __u64 size; 439 /** 440 * Pointer to read the data from. 441 * 442 * This is a fixed-size type for 32/64 compatibility. 443 */ 444 __u64 data_ptr; 445 }; 446 447 struct drm_i915_gem_mmap { 448 /** Handle for the object being mapped. */ 449 __u32 handle; 450 __u32 pad; 451 /** Offset in the object to map. */ 452 __u64 offset; 453 /** 454 * Length of data to map. 455 * 456 * The value will be page-aligned. 457 */ 458 __u64 size; 459 /** 460 * Returned pointer the data was mapped at. 461 * 462 * This is a fixed-size type for 32/64 compatibility. 463 */ 464 __u64 addr_ptr; 465 }; 466 467 struct drm_i915_gem_mmap_gtt { 468 /** Handle for the object being mapped. */ 469 __u32 handle; 470 __u32 pad; 471 /** 472 * Fake offset to use for subsequent mmap call 473 * 474 * This is a fixed-size type for 32/64 compatibility. 475 */ 476 __u64 offset; 477 }; 478 479 struct drm_i915_gem_set_domain { 480 /** Handle for the object */ 481 __u32 handle; 482 483 /** New read domains */ 484 __u32 read_domains; 485 486 /** New write domain */ 487 __u32 write_domain; 488 }; 489 490 struct drm_i915_gem_sw_finish { 491 /** Handle for the object */ 492 __u32 handle; 493 }; 494 495 struct drm_i915_gem_relocation_entry { 496 /** 497 * Handle of the buffer being pointed to by this relocation entry. 498 * 499 * It's appealing to make this be an index into the mm_validate_entry 500 * list to refer to the buffer, but this allows the driver to create 501 * a relocation list for state buffers and not re-write it per 502 * exec using the buffer. 503 */ 504 __u32 target_handle; 505 506 /** 507 * Value to be added to the offset of the target buffer to make up 508 * the relocation entry. 509 */ 510 __u32 delta; 511 512 /** Offset in the buffer the relocation entry will be written into */ 513 __u64 offset; 514 515 /** 516 * Offset value of the target buffer that the relocation entry was last 517 * written as. 518 * 519 * If the buffer has the same offset as last time, we can skip syncing 520 * and writing the relocation. This value is written back out by 521 * the execbuffer ioctl when the relocation is written. 522 */ 523 __u64 presumed_offset; 524 525 /** 526 * Target memory domains read by this operation. 527 */ 528 __u32 read_domains; 529 530 /** 531 * Target memory domains written by this operation. 532 * 533 * Note that only one domain may be written by the whole 534 * execbuffer operation, so that where there are conflicts, 535 * the application will get -EINVAL back. 536 */ 537 __u32 write_domain; 538 }; 539 540 /** @{ 541 * Intel memory domains 542 * 543 * Most of these just align with the various caches in 544 * the system and are used to flush and invalidate as 545 * objects end up cached in different domains. 546 */ 547 /** CPU cache */ 548 #define I915_GEM_DOMAIN_CPU 0x00000001 549 /** Render cache, used by 2D and 3D drawing */ 550 #define I915_GEM_DOMAIN_RENDER 0x00000002 551 /** Sampler cache, used by texture engine */ 552 #define I915_GEM_DOMAIN_SAMPLER 0x00000004 553 /** Command queue, used to load batch buffers */ 554 #define I915_GEM_DOMAIN_COMMAND 0x00000008 555 /** Instruction cache, used by shader programs */ 556 #define I915_GEM_DOMAIN_INSTRUCTION 0x00000010 557 /** Vertex address cache */ 558 #define I915_GEM_DOMAIN_VERTEX 0x00000020 559 /** GTT domain - aperture and scanout */ 560 #define I915_GEM_DOMAIN_GTT 0x00000040 561 /** @} */ 562 563 struct drm_i915_gem_exec_object { 564 /** 565 * User's handle for a buffer to be bound into the GTT for this 566 * operation. 567 */ 568 __u32 handle; 569 570 /** Number of relocations to be performed on this buffer */ 571 __u32 relocation_count; 572 /** 573 * Pointer to array of struct drm_i915_gem_relocation_entry containing 574 * the relocations to be performed in this buffer. 575 */ 576 __u64 relocs_ptr; 577 578 /** Required alignment in graphics aperture */ 579 __u64 alignment; 580 581 /** 582 * Returned value of the updated offset of the object, for future 583 * presumed_offset writes. 584 */ 585 __u64 offset; 586 }; 587 588 struct drm_i915_gem_execbuffer { 589 /** 590 * List of buffers to be validated with their relocations to be 591 * performend on them. 592 * 593 * This is a pointer to an array of struct drm_i915_gem_validate_entry. 594 * 595 * These buffers must be listed in an order such that all relocations 596 * a buffer is performing refer to buffers that have already appeared 597 * in the validate list. 598 */ 599 __u64 buffers_ptr; 600 __u32 buffer_count; 601 602 /** Offset in the batchbuffer to start execution from. */ 603 __u32 batch_start_offset; 604 /** Bytes used in batchbuffer from batch_start_offset */ 605 __u32 batch_len; 606 __u32 DR1; 607 __u32 DR4; 608 __u32 num_cliprects; 609 /** This is a struct drm_clip_rect *cliprects */ 610 __u64 cliprects_ptr; 611 }; 612 613 struct drm_i915_gem_exec_object2 { 614 /** 615 * User's handle for a buffer to be bound into the GTT for this 616 * operation. 617 */ 618 __u32 handle; 619 620 /** Number of relocations to be performed on this buffer */ 621 __u32 relocation_count; 622 /** 623 * Pointer to array of struct drm_i915_gem_relocation_entry containing 624 * the relocations to be performed in this buffer. 625 */ 626 __u64 relocs_ptr; 627 628 /** Required alignment in graphics aperture */ 629 __u64 alignment; 630 631 /** 632 * Returned value of the updated offset of the object, for future 633 * presumed_offset writes. 634 */ 635 __u64 offset; 636 637 #define EXEC_OBJECT_NEEDS_FENCE (1<<0) 638 __u64 flags; 639 __u64 rsvd1; 640 __u64 rsvd2; 641 }; 642 643 struct drm_i915_gem_execbuffer2 { 644 /** 645 * List of gem_exec_object2 structs 646 */ 647 __u64 buffers_ptr; 648 __u32 buffer_count; 649 650 /** Offset in the batchbuffer to start execution from. */ 651 __u32 batch_start_offset; 652 /** Bytes used in batchbuffer from batch_start_offset */ 653 __u32 batch_len; 654 __u32 DR1; 655 __u32 DR4; 656 __u32 num_cliprects; 657 /** This is a struct drm_clip_rect *cliprects */ 658 __u64 cliprects_ptr; 659 #define I915_EXEC_RING_MASK (7<<0) 660 #define I915_EXEC_DEFAULT (0<<0) 661 #define I915_EXEC_RENDER (1<<0) 662 #define I915_EXEC_BSD (2<<0) 663 #define I915_EXEC_BLT (3<<0) 664 665 /* Used for switching the constants addressing mode on gen4+ RENDER ring. 666 * Gen6+ only supports relative addressing to dynamic state (default) and 667 * absolute addressing. 668 * 669 * These flags are ignored for the BSD and BLT rings. 670 */ 671 #define I915_EXEC_CONSTANTS_MASK (3<<6) 672 #define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */ 673 #define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6) 674 #define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */ 675 __u64 flags; 676 __u64 rsvd1; /* now used for context info */ 677 __u64 rsvd2; 678 }; 679 680 /** Resets the SO write offset registers for transform feedback on gen7. */ 681 #define I915_EXEC_GEN7_SOL_RESET (1<<8) 682 683 /** Request a privileged ("secure") batch buffer. Note only available for 684 * DRM_ROOT_ONLY | DRM_MASTER processes. 685 */ 686 #define I915_EXEC_SECURE (1<<9) 687 688 #define I915_EXEC_CONTEXT_ID_MASK (0xffffffff) 689 #define i915_execbuffer2_set_context_id(eb2, context) \ 690 (eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK 691 #define i915_execbuffer2_get_context_id(eb2) \ 692 ((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK) 693 694 struct drm_i915_gem_pin { 695 /** Handle of the buffer to be pinned. */ 696 __u32 handle; 697 __u32 pad; 698 699 /** alignment required within the aperture */ 700 __u64 alignment; 701 702 /** Returned GTT offset of the buffer. */ 703 __u64 offset; 704 }; 705 706 struct drm_i915_gem_unpin { 707 /** Handle of the buffer to be unpinned. */ 708 __u32 handle; 709 __u32 pad; 710 }; 711 712 struct drm_i915_gem_busy { 713 /** Handle of the buffer to check for busy */ 714 __u32 handle; 715 716 /** Return busy status (1 if busy, 0 if idle). 717 * The high word is used to indicate on which rings the object 718 * currently resides: 719 * 16:31 - busy (r or r/w) rings (16 render, 17 bsd, 18 blt, etc) 720 */ 721 __u32 busy; 722 }; 723 724 #define I915_CACHEING_NONE 0 725 #define I915_CACHEING_CACHED 1 726 727 struct drm_i915_gem_cacheing { 728 /** 729 * Handle of the buffer to set/get the cacheing level of. */ 730 __u32 handle; 731 732 /** 733 * Cacheing level to apply or return value 734 * 735 * bits0-15 are for generic cacheing control (i.e. the above defined 736 * values). bits16-31 are reserved for platform-specific variations 737 * (e.g. l3$ caching on gen7). */ 738 __u32 cacheing; 739 }; 740 741 #define I915_TILING_NONE 0 742 #define I915_TILING_X 1 743 #define I915_TILING_Y 2 744 745 #define I915_BIT_6_SWIZZLE_NONE 0 746 #define I915_BIT_6_SWIZZLE_9 1 747 #define I915_BIT_6_SWIZZLE_9_10 2 748 #define I915_BIT_6_SWIZZLE_9_11 3 749 #define I915_BIT_6_SWIZZLE_9_10_11 4 750 /* Not seen by userland */ 751 #define I915_BIT_6_SWIZZLE_UNKNOWN 5 752 /* Seen by userland. */ 753 #define I915_BIT_6_SWIZZLE_9_17 6 754 #define I915_BIT_6_SWIZZLE_9_10_17 7 755 756 struct drm_i915_gem_set_tiling { 757 /** Handle of the buffer to have its tiling state updated */ 758 __u32 handle; 759 760 /** 761 * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X, 762 * I915_TILING_Y). 763 * 764 * This value is to be set on request, and will be updated by the 765 * kernel on successful return with the actual chosen tiling layout. 766 * 767 * The tiling mode may be demoted to I915_TILING_NONE when the system 768 * has bit 6 swizzling that can't be managed correctly by GEM. 769 * 770 * Buffer contents become undefined when changing tiling_mode. 771 */ 772 __u32 tiling_mode; 773 774 /** 775 * Stride in bytes for the object when in I915_TILING_X or 776 * I915_TILING_Y. 777 */ 778 __u32 stride; 779 780 /** 781 * Returned address bit 6 swizzling required for CPU access through 782 * mmap mapping. 783 */ 784 __u32 swizzle_mode; 785 }; 786 787 struct drm_i915_gem_get_tiling { 788 /** Handle of the buffer to get tiling state for. */ 789 __u32 handle; 790 791 /** 792 * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X, 793 * I915_TILING_Y). 794 */ 795 __u32 tiling_mode; 796 797 /** 798 * Returned address bit 6 swizzling required for CPU access through 799 * mmap mapping. 800 */ 801 __u32 swizzle_mode; 802 }; 803 804 struct drm_i915_gem_get_aperture { 805 /** Total size of the aperture used by i915_gem_execbuffer, in bytes */ 806 __u64 aper_size; 807 808 /** 809 * Available space in the aperture used by i915_gem_execbuffer, in 810 * bytes 811 */ 812 __u64 aper_available_size; 813 }; 814 815 struct drm_i915_get_pipe_from_crtc_id { 816 /** ID of CRTC being requested **/ 817 __u32 crtc_id; 818 819 /** pipe of requested CRTC **/ 820 __u32 pipe; 821 }; 822 823 #define I915_MADV_WILLNEED 0 824 #define I915_MADV_DONTNEED 1 825 #define __I915_MADV_PURGED 2 /* internal state */ 826 827 struct drm_i915_gem_madvise { 828 /** Handle of the buffer to change the backing store advice */ 829 __u32 handle; 830 831 /* Advice: either the buffer will be needed again in the near future, 832 * or wont be and could be discarded under memory pressure. 833 */ 834 __u32 madv; 835 836 /** Whether the backing store still exists. */ 837 __u32 retained; 838 }; 839 840 /* flags */ 841 #define I915_OVERLAY_TYPE_MASK 0xff 842 #define I915_OVERLAY_YUV_PLANAR 0x01 843 #define I915_OVERLAY_YUV_PACKED 0x02 844 #define I915_OVERLAY_RGB 0x03 845 846 #define I915_OVERLAY_DEPTH_MASK 0xff00 847 #define I915_OVERLAY_RGB24 0x1000 848 #define I915_OVERLAY_RGB16 0x2000 849 #define I915_OVERLAY_RGB15 0x3000 850 #define I915_OVERLAY_YUV422 0x0100 851 #define I915_OVERLAY_YUV411 0x0200 852 #define I915_OVERLAY_YUV420 0x0300 853 #define I915_OVERLAY_YUV410 0x0400 854 855 #define I915_OVERLAY_SWAP_MASK 0xff0000 856 #define I915_OVERLAY_NO_SWAP 0x000000 857 #define I915_OVERLAY_UV_SWAP 0x010000 858 #define I915_OVERLAY_Y_SWAP 0x020000 859 #define I915_OVERLAY_Y_AND_UV_SWAP 0x030000 860 861 #define I915_OVERLAY_FLAGS_MASK 0xff000000 862 #define I915_OVERLAY_ENABLE 0x01000000 863 864 struct drm_intel_overlay_put_image { 865 /* various flags and src format description */ 866 __u32 flags; 867 /* source picture description */ 868 __u32 bo_handle; 869 /* stride values and offsets are in bytes, buffer relative */ 870 __u16 stride_Y; /* stride for packed formats */ 871 __u16 stride_UV; 872 __u32 offset_Y; /* offset for packet formats */ 873 __u32 offset_U; 874 __u32 offset_V; 875 /* in pixels */ 876 __u16 src_width; 877 __u16 src_height; 878 /* to compensate the scaling factors for partially covered surfaces */ 879 __u16 src_scan_width; 880 __u16 src_scan_height; 881 /* output crtc description */ 882 __u32 crtc_id; 883 __u16 dst_x; 884 __u16 dst_y; 885 __u16 dst_width; 886 __u16 dst_height; 887 }; 888 889 /* flags */ 890 #define I915_OVERLAY_UPDATE_ATTRS (1<<0) 891 #define I915_OVERLAY_UPDATE_GAMMA (1<<1) 892 struct drm_intel_overlay_attrs { 893 __u32 flags; 894 __u32 color_key; 895 __s32 brightness; 896 __u32 contrast; 897 __u32 saturation; 898 __u32 gamma0; 899 __u32 gamma1; 900 __u32 gamma2; 901 __u32 gamma3; 902 __u32 gamma4; 903 __u32 gamma5; 904 }; 905 906 /* 907 * Intel sprite handling 908 * 909 * Color keying works with a min/mask/max tuple. Both source and destination 910 * color keying is allowed. 911 * 912 * Source keying: 913 * Sprite pixels within the min & max values, masked against the color channels 914 * specified in the mask field, will be transparent. All other pixels will 915 * be displayed on top of the primary plane. For RGB surfaces, only the min 916 * and mask fields will be used; ranged compares are not allowed. 917 * 918 * Destination keying: 919 * Primary plane pixels that match the min value, masked against the color 920 * channels specified in the mask field, will be replaced by corresponding 921 * pixels from the sprite plane. 922 * 923 * Note that source & destination keying are exclusive; only one can be 924 * active on a given plane. 925 */ 926 927 #define I915_SET_COLORKEY_NONE (1<<0) /* disable color key matching */ 928 #define I915_SET_COLORKEY_DESTINATION (1<<1) 929 #define I915_SET_COLORKEY_SOURCE (1<<2) 930 struct drm_intel_sprite_colorkey { 931 __u32 plane_id; 932 __u32 min_value; 933 __u32 channel_mask; 934 __u32 max_value; 935 __u32 flags; 936 }; 937 938 struct drm_i915_gem_wait { 939 /** Handle of BO we shall wait on */ 940 __u32 bo_handle; 941 __u32 flags; 942 /** Number of nanoseconds to wait, Returns time remaining. */ 943 __s64 timeout_ns; 944 }; 945 946 struct drm_i915_gem_context_create { 947 /* output: id of new context*/ 948 __u32 ctx_id; 949 __u32 pad; 950 }; 951 952 struct drm_i915_gem_context_destroy { 953 __u32 ctx_id; 954 __u32 pad; 955 }; 956 957 struct drm_i915_reg_read { 958 __u64 offset; 959 __u64 val; /* Return value */ 960 }; 961 #endif /* _I915_DRM_H_ */ 962