1 /* 2 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the 14 * next paragraph) shall be included in all copies or substantial portions 15 * of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 20 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 24 * 25 */ 26 27 #ifndef _UAPI_I915_DRM_H_ 28 #define _UAPI_I915_DRM_H_ 29 30 #include "drm.h" 31 32 #if defined(__cplusplus) 33 extern "C" { 34 #endif 35 36 /* Please note that modifications to all structs defined here are 37 * subject to backwards-compatibility constraints. 38 */ 39 40 /** 41 * DOC: uevents generated by i915 on it's device node 42 * 43 * I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch 44 * event from the gpu l3 cache. Additional information supplied is ROW, 45 * BANK, SUBBANK, SLICE of the affected cacheline. Userspace should keep 46 * track of these events and if a specific cache-line seems to have a 47 * persistent error remap it with the l3 remapping tool supplied in 48 * intel-gpu-tools. The value supplied with the event is always 1. 49 * 50 * I915_ERROR_UEVENT - Generated upon error detection, currently only via 51 * hangcheck. The error detection event is a good indicator of when things 52 * began to go badly. The value supplied with the event is a 1 upon error 53 * detection, and a 0 upon reset completion, signifying no more error 54 * exists. NOTE: Disabling hangcheck or reset via module parameter will 55 * cause the related events to not be seen. 56 * 57 * I915_RESET_UEVENT - Event is generated just before an attempt to reset the 58 * the GPU. The value supplied with the event is always 1. NOTE: Disable 59 * reset via module parameter will cause this event to not be seen. 60 */ 61 #define I915_L3_PARITY_UEVENT "L3_PARITY_ERROR" 62 #define I915_ERROR_UEVENT "ERROR" 63 #define I915_RESET_UEVENT "RESET" 64 65 /* 66 * MOCS indexes used for GPU surfaces, defining the cacheability of the 67 * surface data and the coherency for this data wrt. CPU vs. GPU accesses. 68 */ 69 enum i915_mocs_table_index { 70 /* 71 * Not cached anywhere, coherency between CPU and GPU accesses is 72 * guaranteed. 73 */ 74 I915_MOCS_UNCACHED, 75 /* 76 * Cacheability and coherency controlled by the kernel automatically 77 * based on the DRM_I915_GEM_SET_CACHING IOCTL setting and the current 78 * usage of the surface (used for display scanout or not). 79 */ 80 I915_MOCS_PTE, 81 /* 82 * Cached in all GPU caches available on the platform. 83 * Coherency between CPU and GPU accesses to the surface is not 84 * guaranteed without extra synchronization. 85 */ 86 I915_MOCS_CACHED, 87 }; 88 89 /* Each region is a minimum of 16k, and there are at most 255 of them. 90 */ 91 #define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use 92 * of chars for next/prev indices */ 93 #define I915_LOG_MIN_TEX_REGION_SIZE 14 94 95 typedef struct _drm_i915_init { 96 enum { 97 I915_INIT_DMA = 0x01, 98 I915_CLEANUP_DMA = 0x02, 99 I915_RESUME_DMA = 0x03 100 } func; 101 unsigned int mmio_offset; 102 int sarea_priv_offset; 103 unsigned int ring_start; 104 unsigned int ring_end; 105 unsigned int ring_size; 106 unsigned int front_offset; 107 unsigned int back_offset; 108 unsigned int depth_offset; 109 unsigned int w; 110 unsigned int h; 111 unsigned int pitch; 112 unsigned int pitch_bits; 113 unsigned int back_pitch; 114 unsigned int depth_pitch; 115 unsigned int cpp; 116 unsigned int chipset; 117 } drm_i915_init_t; 118 119 typedef struct _drm_i915_sarea { 120 struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1]; 121 int last_upload; /* last time texture was uploaded */ 122 int last_enqueue; /* last time a buffer was enqueued */ 123 int last_dispatch; /* age of the most recently dispatched buffer */ 124 int ctxOwner; /* last context to upload state */ 125 int texAge; 126 int pf_enabled; /* is pageflipping allowed? */ 127 int pf_active; 128 int pf_current_page; /* which buffer is being displayed? */ 129 int perf_boxes; /* performance boxes to be displayed */ 130 int width, height; /* screen size in pixels */ 131 132 drm_handle_t front_handle; 133 int front_offset; 134 int front_size; 135 136 drm_handle_t back_handle; 137 int back_offset; 138 int back_size; 139 140 drm_handle_t depth_handle; 141 int depth_offset; 142 int depth_size; 143 144 drm_handle_t tex_handle; 145 int tex_offset; 146 int tex_size; 147 int log_tex_granularity; 148 int pitch; 149 int rotation; /* 0, 90, 180 or 270 */ 150 int rotated_offset; 151 int rotated_size; 152 int rotated_pitch; 153 int virtualX, virtualY; 154 155 unsigned int front_tiled; 156 unsigned int back_tiled; 157 unsigned int depth_tiled; 158 unsigned int rotated_tiled; 159 unsigned int rotated2_tiled; 160 161 int pipeA_x; 162 int pipeA_y; 163 int pipeA_w; 164 int pipeA_h; 165 int pipeB_x; 166 int pipeB_y; 167 int pipeB_w; 168 int pipeB_h; 169 170 /* fill out some space for old userspace triple buffer */ 171 drm_handle_t unused_handle; 172 __u32 unused1, unused2, unused3; 173 174 /* buffer object handles for static buffers. May change 175 * over the lifetime of the client. 176 */ 177 __u32 front_bo_handle; 178 __u32 back_bo_handle; 179 __u32 unused_bo_handle; 180 __u32 depth_bo_handle; 181 182 } drm_i915_sarea_t; 183 184 /* due to userspace building against these headers we need some compat here */ 185 #define planeA_x pipeA_x 186 #define planeA_y pipeA_y 187 #define planeA_w pipeA_w 188 #define planeA_h pipeA_h 189 #define planeB_x pipeB_x 190 #define planeB_y pipeB_y 191 #define planeB_w pipeB_w 192 #define planeB_h pipeB_h 193 194 /* Flags for perf_boxes 195 */ 196 #define I915_BOX_RING_EMPTY 0x1 197 #define I915_BOX_FLIP 0x2 198 #define I915_BOX_WAIT 0x4 199 #define I915_BOX_TEXTURE_LOAD 0x8 200 #define I915_BOX_LOST_CONTEXT 0x10 201 202 /* 203 * i915 specific ioctls. 204 * 205 * The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie 206 * [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset 207 * against DRM_COMMAND_BASE and should be between [0x0, 0x60). 208 */ 209 #define DRM_I915_INIT 0x00 210 #define DRM_I915_FLUSH 0x01 211 #define DRM_I915_FLIP 0x02 212 #define DRM_I915_BATCHBUFFER 0x03 213 #define DRM_I915_IRQ_EMIT 0x04 214 #define DRM_I915_IRQ_WAIT 0x05 215 #define DRM_I915_GETPARAM 0x06 216 #define DRM_I915_SETPARAM 0x07 217 #define DRM_I915_ALLOC 0x08 218 #define DRM_I915_FREE 0x09 219 #define DRM_I915_INIT_HEAP 0x0a 220 #define DRM_I915_CMDBUFFER 0x0b 221 #define DRM_I915_DESTROY_HEAP 0x0c 222 #define DRM_I915_SET_VBLANK_PIPE 0x0d 223 #define DRM_I915_GET_VBLANK_PIPE 0x0e 224 #define DRM_I915_VBLANK_SWAP 0x0f 225 #define DRM_I915_HWS_ADDR 0x11 226 #define DRM_I915_GEM_INIT 0x13 227 #define DRM_I915_GEM_EXECBUFFER 0x14 228 #define DRM_I915_GEM_PIN 0x15 229 #define DRM_I915_GEM_UNPIN 0x16 230 #define DRM_I915_GEM_BUSY 0x17 231 #define DRM_I915_GEM_THROTTLE 0x18 232 #define DRM_I915_GEM_ENTERVT 0x19 233 #define DRM_I915_GEM_LEAVEVT 0x1a 234 #define DRM_I915_GEM_CREATE 0x1b 235 #define DRM_I915_GEM_PREAD 0x1c 236 #define DRM_I915_GEM_PWRITE 0x1d 237 #define DRM_I915_GEM_MMAP 0x1e 238 #define DRM_I915_GEM_SET_DOMAIN 0x1f 239 #define DRM_I915_GEM_SW_FINISH 0x20 240 #define DRM_I915_GEM_SET_TILING 0x21 241 #define DRM_I915_GEM_GET_TILING 0x22 242 #define DRM_I915_GEM_GET_APERTURE 0x23 243 #define DRM_I915_GEM_MMAP_GTT 0x24 244 #define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25 245 #define DRM_I915_GEM_MADVISE 0x26 246 #define DRM_I915_OVERLAY_PUT_IMAGE 0x27 247 #define DRM_I915_OVERLAY_ATTRS 0x28 248 #define DRM_I915_GEM_EXECBUFFER2 0x29 249 #define DRM_I915_GEM_EXECBUFFER2_WR DRM_I915_GEM_EXECBUFFER2 250 #define DRM_I915_GET_SPRITE_COLORKEY 0x2a 251 #define DRM_I915_SET_SPRITE_COLORKEY 0x2b 252 #define DRM_I915_GEM_WAIT 0x2c 253 #define DRM_I915_GEM_CONTEXT_CREATE 0x2d 254 #define DRM_I915_GEM_CONTEXT_DESTROY 0x2e 255 #define DRM_I915_GEM_SET_CACHING 0x2f 256 #define DRM_I915_GEM_GET_CACHING 0x30 257 #define DRM_I915_REG_READ 0x31 258 #define DRM_I915_GET_RESET_STATS 0x32 259 #define DRM_I915_GEM_USERPTR 0x33 260 #define DRM_I915_GEM_CONTEXT_GETPARAM 0x34 261 #define DRM_I915_GEM_CONTEXT_SETPARAM 0x35 262 #define DRM_I915_PERF_OPEN 0x36 263 #define DRM_I915_PERF_ADD_CONFIG 0x37 264 #define DRM_I915_PERF_REMOVE_CONFIG 0x38 265 266 #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) 267 #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) 268 #define DRM_IOCTL_I915_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP) 269 #define DRM_IOCTL_I915_BATCHBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t) 270 #define DRM_IOCTL_I915_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t) 271 #define DRM_IOCTL_I915_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t) 272 #define DRM_IOCTL_I915_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t) 273 #define DRM_IOCTL_I915_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t) 274 #define DRM_IOCTL_I915_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t) 275 #define DRM_IOCTL_I915_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t) 276 #define DRM_IOCTL_I915_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t) 277 #define DRM_IOCTL_I915_CMDBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t) 278 #define DRM_IOCTL_I915_DESTROY_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t) 279 #define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t) 280 #define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t) 281 #define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t) 282 #define DRM_IOCTL_I915_HWS_ADDR DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init) 283 #define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init) 284 #define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer) 285 #define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2) 286 #define DRM_IOCTL_I915_GEM_EXECBUFFER2_WR DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2_WR, struct drm_i915_gem_execbuffer2) 287 #define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin) 288 #define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin) 289 #define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy) 290 #define DRM_IOCTL_I915_GEM_SET_CACHING DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHING, struct drm_i915_gem_caching) 291 #define DRM_IOCTL_I915_GEM_GET_CACHING DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHING, struct drm_i915_gem_caching) 292 #define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE) 293 #define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT) 294 #define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT) 295 #define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create) 296 #define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread) 297 #define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite) 298 #define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap) 299 #define DRM_IOCTL_I915_GEM_MMAP_GTT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt) 300 #define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain) 301 #define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish) 302 #define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling) 303 #define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling) 304 #define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture) 305 #define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id) 306 #define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise) 307 #define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image) 308 #define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs) 309 #define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey) 310 #define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey) 311 #define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait) 312 #define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create) 313 #define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy) 314 #define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read) 315 #define DRM_IOCTL_I915_GET_RESET_STATS DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats) 316 #define DRM_IOCTL_I915_GEM_USERPTR DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr) 317 #define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param) 318 #define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param) 319 #define DRM_IOCTL_I915_PERF_OPEN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param) 320 #define DRM_IOCTL_I915_PERF_ADD_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_ADD_CONFIG, struct drm_i915_perf_oa_config) 321 #define DRM_IOCTL_I915_PERF_REMOVE_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_REMOVE_CONFIG, __u64) 322 323 /* Allow drivers to submit batchbuffers directly to hardware, relying 324 * on the security mechanisms provided by hardware. 325 */ 326 typedef struct drm_i915_batchbuffer { 327 int start; /* agp offset */ 328 int used; /* nr bytes in use */ 329 int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */ 330 int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */ 331 int num_cliprects; /* mulitpass with multiple cliprects? */ 332 struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */ 333 } drm_i915_batchbuffer_t; 334 335 /* As above, but pass a pointer to userspace buffer which can be 336 * validated by the kernel prior to sending to hardware. 337 */ 338 typedef struct _drm_i915_cmdbuffer { 339 char __user *buf; /* pointer to userspace command buffer */ 340 int sz; /* nr bytes in buf */ 341 int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */ 342 int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */ 343 int num_cliprects; /* mulitpass with multiple cliprects? */ 344 struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */ 345 } drm_i915_cmdbuffer_t; 346 347 /* Userspace can request & wait on irq's: 348 */ 349 typedef struct drm_i915_irq_emit { 350 int __user *irq_seq; 351 } drm_i915_irq_emit_t; 352 353 typedef struct drm_i915_irq_wait { 354 int irq_seq; 355 } drm_i915_irq_wait_t; 356 357 /* Ioctl to query kernel params: 358 */ 359 #define I915_PARAM_IRQ_ACTIVE 1 360 #define I915_PARAM_ALLOW_BATCHBUFFER 2 361 #define I915_PARAM_LAST_DISPATCH 3 362 #define I915_PARAM_CHIPSET_ID 4 363 #define I915_PARAM_HAS_GEM 5 364 #define I915_PARAM_NUM_FENCES_AVAIL 6 365 #define I915_PARAM_HAS_OVERLAY 7 366 #define I915_PARAM_HAS_PAGEFLIPPING 8 367 #define I915_PARAM_HAS_EXECBUF2 9 368 #define I915_PARAM_HAS_BSD 10 369 #define I915_PARAM_HAS_BLT 11 370 #define I915_PARAM_HAS_RELAXED_FENCING 12 371 #define I915_PARAM_HAS_COHERENT_RINGS 13 372 #define I915_PARAM_HAS_EXEC_CONSTANTS 14 373 #define I915_PARAM_HAS_RELAXED_DELTA 15 374 #define I915_PARAM_HAS_GEN7_SOL_RESET 16 375 #define I915_PARAM_HAS_LLC 17 376 #define I915_PARAM_HAS_ALIASING_PPGTT 18 377 #define I915_PARAM_HAS_WAIT_TIMEOUT 19 378 #define I915_PARAM_HAS_SEMAPHORES 20 379 #define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21 380 #define I915_PARAM_HAS_VEBOX 22 381 #define I915_PARAM_HAS_SECURE_BATCHES 23 382 #define I915_PARAM_HAS_PINNED_BATCHES 24 383 #define I915_PARAM_HAS_EXEC_NO_RELOC 25 384 #define I915_PARAM_HAS_EXEC_HANDLE_LUT 26 385 #define I915_PARAM_HAS_WT 27 386 #define I915_PARAM_CMD_PARSER_VERSION 28 387 #define I915_PARAM_HAS_COHERENT_PHYS_GTT 29 388 #define I915_PARAM_MMAP_VERSION 30 389 #define I915_PARAM_HAS_BSD2 31 390 #define I915_PARAM_REVISION 32 391 #define I915_PARAM_SUBSLICE_TOTAL 33 392 #define I915_PARAM_EU_TOTAL 34 393 #define I915_PARAM_HAS_GPU_RESET 35 394 #define I915_PARAM_HAS_RESOURCE_STREAMER 36 395 #define I915_PARAM_HAS_EXEC_SOFTPIN 37 396 #define I915_PARAM_HAS_POOLED_EU 38 397 #define I915_PARAM_MIN_EU_IN_POOL 39 398 #define I915_PARAM_MMAP_GTT_VERSION 40 399 400 /* Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution 401 * priorities and the driver will attempt to execute batches in priority order. 402 */ 403 #define I915_PARAM_HAS_SCHEDULER 41 404 #define I915_PARAM_HUC_STATUS 42 405 406 /* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of 407 * synchronisation with implicit fencing on individual objects. 408 * See EXEC_OBJECT_ASYNC. 409 */ 410 #define I915_PARAM_HAS_EXEC_ASYNC 43 411 412 /* Query whether DRM_I915_GEM_EXECBUFFER2 supports explicit fence support - 413 * both being able to pass in a sync_file fd to wait upon before executing, 414 * and being able to return a new sync_file fd that is signaled when the 415 * current request is complete. See I915_EXEC_FENCE_IN and I915_EXEC_FENCE_OUT. 416 */ 417 #define I915_PARAM_HAS_EXEC_FENCE 44 418 419 /* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to capture 420 * user specified bufffers for post-mortem debugging of GPU hangs. See 421 * EXEC_OBJECT_CAPTURE. 422 */ 423 #define I915_PARAM_HAS_EXEC_CAPTURE 45 424 425 #define I915_PARAM_SLICE_MASK 46 426 427 /* Assuming it's uniform for each slice, this queries the mask of subslices 428 * per-slice for this system. 429 */ 430 #define I915_PARAM_SUBSLICE_MASK 47 431 432 /* 433 * Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying the batch buffer 434 * as the first execobject as opposed to the last. See I915_EXEC_BATCH_FIRST. 435 */ 436 #define I915_PARAM_HAS_EXEC_BATCH_FIRST 48 437 438 /* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of 439 * drm_i915_gem_exec_fence structures. See I915_EXEC_FENCE_ARRAY. 440 */ 441 #define I915_PARAM_HAS_EXEC_FENCE_ARRAY 49 442 443 typedef struct drm_i915_getparam { 444 __s32 param; 445 /* 446 * WARNING: Using pointers instead of fixed-size u64 means we need to write 447 * compat32 code. Don't repeat this mistake. 448 */ 449 int __user *value; 450 } drm_i915_getparam_t; 451 452 /* Ioctl to set kernel params: 453 */ 454 #define I915_SETPARAM_USE_MI_BATCHBUFFER_START 1 455 #define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY 2 456 #define I915_SETPARAM_ALLOW_BATCHBUFFER 3 457 #define I915_SETPARAM_NUM_USED_FENCES 4 458 459 typedef struct drm_i915_setparam { 460 int param; 461 int value; 462 } drm_i915_setparam_t; 463 464 /* A memory manager for regions of shared memory: 465 */ 466 #define I915_MEM_REGION_AGP 1 467 468 typedef struct drm_i915_mem_alloc { 469 int region; 470 int alignment; 471 int size; 472 int __user *region_offset; /* offset from start of fb or agp */ 473 } drm_i915_mem_alloc_t; 474 475 typedef struct drm_i915_mem_free { 476 int region; 477 int region_offset; 478 } drm_i915_mem_free_t; 479 480 typedef struct drm_i915_mem_init_heap { 481 int region; 482 int size; 483 int start; 484 } drm_i915_mem_init_heap_t; 485 486 /* Allow memory manager to be torn down and re-initialized (eg on 487 * rotate): 488 */ 489 typedef struct drm_i915_mem_destroy_heap { 490 int region; 491 } drm_i915_mem_destroy_heap_t; 492 493 /* Allow X server to configure which pipes to monitor for vblank signals 494 */ 495 #define DRM_I915_VBLANK_PIPE_A 1 496 #define DRM_I915_VBLANK_PIPE_B 2 497 498 typedef struct drm_i915_vblank_pipe { 499 int pipe; 500 } drm_i915_vblank_pipe_t; 501 502 /* Schedule buffer swap at given vertical blank: 503 */ 504 typedef struct drm_i915_vblank_swap { 505 drm_drawable_t drawable; 506 enum drm_vblank_seq_type seqtype; 507 unsigned int sequence; 508 } drm_i915_vblank_swap_t; 509 510 typedef struct drm_i915_hws_addr { 511 __u64 addr; 512 } drm_i915_hws_addr_t; 513 514 struct drm_i915_gem_init { 515 /** 516 * Beginning offset in the GTT to be managed by the DRM memory 517 * manager. 518 */ 519 __u64 gtt_start; 520 /** 521 * Ending offset in the GTT to be managed by the DRM memory 522 * manager. 523 */ 524 __u64 gtt_end; 525 }; 526 527 struct drm_i915_gem_create { 528 /** 529 * Requested size for the object. 530 * 531 * The (page-aligned) allocated size for the object will be returned. 532 */ 533 __u64 size; 534 /** 535 * Returned handle for the object. 536 * 537 * Object handles are nonzero. 538 */ 539 __u32 handle; 540 __u32 pad; 541 }; 542 543 struct drm_i915_gem_pread { 544 /** Handle for the object being read. */ 545 __u32 handle; 546 __u32 pad; 547 /** Offset into the object to read from */ 548 __u64 offset; 549 /** Length of data to read */ 550 __u64 size; 551 /** 552 * Pointer to write the data into. 553 * 554 * This is a fixed-size type for 32/64 compatibility. 555 */ 556 __u64 data_ptr; 557 }; 558 559 struct drm_i915_gem_pwrite { 560 /** Handle for the object being written to. */ 561 __u32 handle; 562 __u32 pad; 563 /** Offset into the object to write to */ 564 __u64 offset; 565 /** Length of data to write */ 566 __u64 size; 567 /** 568 * Pointer to read the data from. 569 * 570 * This is a fixed-size type for 32/64 compatibility. 571 */ 572 __u64 data_ptr; 573 }; 574 575 struct drm_i915_gem_mmap { 576 /** Handle for the object being mapped. */ 577 __u32 handle; 578 __u32 pad; 579 /** Offset in the object to map. */ 580 __u64 offset; 581 /** 582 * Length of data to map. 583 * 584 * The value will be page-aligned. 585 */ 586 __u64 size; 587 /** 588 * Returned pointer the data was mapped at. 589 * 590 * This is a fixed-size type for 32/64 compatibility. 591 */ 592 __u64 addr_ptr; 593 594 /** 595 * Flags for extended behaviour. 596 * 597 * Added in version 2. 598 */ 599 __u64 flags; 600 #define I915_MMAP_WC 0x1 601 }; 602 603 struct drm_i915_gem_mmap_gtt { 604 /** Handle for the object being mapped. */ 605 __u32 handle; 606 __u32 pad; 607 /** 608 * Fake offset to use for subsequent mmap call 609 * 610 * This is a fixed-size type for 32/64 compatibility. 611 */ 612 __u64 offset; 613 }; 614 615 struct drm_i915_gem_set_domain { 616 /** Handle for the object */ 617 __u32 handle; 618 619 /** New read domains */ 620 __u32 read_domains; 621 622 /** New write domain */ 623 __u32 write_domain; 624 }; 625 626 struct drm_i915_gem_sw_finish { 627 /** Handle for the object */ 628 __u32 handle; 629 }; 630 631 struct drm_i915_gem_relocation_entry { 632 /** 633 * Handle of the buffer being pointed to by this relocation entry. 634 * 635 * It's appealing to make this be an index into the mm_validate_entry 636 * list to refer to the buffer, but this allows the driver to create 637 * a relocation list for state buffers and not re-write it per 638 * exec using the buffer. 639 */ 640 __u32 target_handle; 641 642 /** 643 * Value to be added to the offset of the target buffer to make up 644 * the relocation entry. 645 */ 646 __u32 delta; 647 648 /** Offset in the buffer the relocation entry will be written into */ 649 __u64 offset; 650 651 /** 652 * Offset value of the target buffer that the relocation entry was last 653 * written as. 654 * 655 * If the buffer has the same offset as last time, we can skip syncing 656 * and writing the relocation. This value is written back out by 657 * the execbuffer ioctl when the relocation is written. 658 */ 659 __u64 presumed_offset; 660 661 /** 662 * Target memory domains read by this operation. 663 */ 664 __u32 read_domains; 665 666 /** 667 * Target memory domains written by this operation. 668 * 669 * Note that only one domain may be written by the whole 670 * execbuffer operation, so that where there are conflicts, 671 * the application will get -EINVAL back. 672 */ 673 __u32 write_domain; 674 }; 675 676 /** @{ 677 * Intel memory domains 678 * 679 * Most of these just align with the various caches in 680 * the system and are used to flush and invalidate as 681 * objects end up cached in different domains. 682 */ 683 /** CPU cache */ 684 #define I915_GEM_DOMAIN_CPU 0x00000001 685 /** Render cache, used by 2D and 3D drawing */ 686 #define I915_GEM_DOMAIN_RENDER 0x00000002 687 /** Sampler cache, used by texture engine */ 688 #define I915_GEM_DOMAIN_SAMPLER 0x00000004 689 /** Command queue, used to load batch buffers */ 690 #define I915_GEM_DOMAIN_COMMAND 0x00000008 691 /** Instruction cache, used by shader programs */ 692 #define I915_GEM_DOMAIN_INSTRUCTION 0x00000010 693 /** Vertex address cache */ 694 #define I915_GEM_DOMAIN_VERTEX 0x00000020 695 /** GTT domain - aperture and scanout */ 696 #define I915_GEM_DOMAIN_GTT 0x00000040 697 /** WC domain - uncached access */ 698 #define I915_GEM_DOMAIN_WC 0x00000080 699 /** @} */ 700 701 struct drm_i915_gem_exec_object { 702 /** 703 * User's handle for a buffer to be bound into the GTT for this 704 * operation. 705 */ 706 __u32 handle; 707 708 /** Number of relocations to be performed on this buffer */ 709 __u32 relocation_count; 710 /** 711 * Pointer to array of struct drm_i915_gem_relocation_entry containing 712 * the relocations to be performed in this buffer. 713 */ 714 __u64 relocs_ptr; 715 716 /** Required alignment in graphics aperture */ 717 __u64 alignment; 718 719 /** 720 * Returned value of the updated offset of the object, for future 721 * presumed_offset writes. 722 */ 723 __u64 offset; 724 }; 725 726 struct drm_i915_gem_execbuffer { 727 /** 728 * List of buffers to be validated with their relocations to be 729 * performend on them. 730 * 731 * This is a pointer to an array of struct drm_i915_gem_validate_entry. 732 * 733 * These buffers must be listed in an order such that all relocations 734 * a buffer is performing refer to buffers that have already appeared 735 * in the validate list. 736 */ 737 __u64 buffers_ptr; 738 __u32 buffer_count; 739 740 /** Offset in the batchbuffer to start execution from. */ 741 __u32 batch_start_offset; 742 /** Bytes used in batchbuffer from batch_start_offset */ 743 __u32 batch_len; 744 __u32 DR1; 745 __u32 DR4; 746 __u32 num_cliprects; 747 /** This is a struct drm_clip_rect *cliprects */ 748 __u64 cliprects_ptr; 749 }; 750 751 struct drm_i915_gem_exec_object2 { 752 /** 753 * User's handle for a buffer to be bound into the GTT for this 754 * operation. 755 */ 756 __u32 handle; 757 758 /** Number of relocations to be performed on this buffer */ 759 __u32 relocation_count; 760 /** 761 * Pointer to array of struct drm_i915_gem_relocation_entry containing 762 * the relocations to be performed in this buffer. 763 */ 764 __u64 relocs_ptr; 765 766 /** Required alignment in graphics aperture */ 767 __u64 alignment; 768 769 /** 770 * When the EXEC_OBJECT_PINNED flag is specified this is populated by 771 * the user with the GTT offset at which this object will be pinned. 772 * When the I915_EXEC_NO_RELOC flag is specified this must contain the 773 * presumed_offset of the object. 774 * During execbuffer2 the kernel populates it with the value of the 775 * current GTT offset of the object, for future presumed_offset writes. 776 */ 777 __u64 offset; 778 779 #define EXEC_OBJECT_NEEDS_FENCE (1<<0) 780 #define EXEC_OBJECT_NEEDS_GTT (1<<1) 781 #define EXEC_OBJECT_WRITE (1<<2) 782 #define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3) 783 #define EXEC_OBJECT_PINNED (1<<4) 784 #define EXEC_OBJECT_PAD_TO_SIZE (1<<5) 785 /* The kernel implicitly tracks GPU activity on all GEM objects, and 786 * synchronises operations with outstanding rendering. This includes 787 * rendering on other devices if exported via dma-buf. However, sometimes 788 * this tracking is too coarse and the user knows better. For example, 789 * if the object is split into non-overlapping ranges shared between different 790 * clients or engines (i.e. suballocating objects), the implicit tracking 791 * by kernel assumes that each operation affects the whole object rather 792 * than an individual range, causing needless synchronisation between clients. 793 * The kernel will also forgo any CPU cache flushes prior to rendering from 794 * the object as the client is expected to be also handling such domain 795 * tracking. 796 * 797 * The kernel maintains the implicit tracking in order to manage resources 798 * used by the GPU - this flag only disables the synchronisation prior to 799 * rendering with this object in this execbuf. 800 * 801 * Opting out of implicit synhronisation requires the user to do its own 802 * explicit tracking to avoid rendering corruption. See, for example, 803 * I915_PARAM_HAS_EXEC_FENCE to order execbufs and execute them asynchronously. 804 */ 805 #define EXEC_OBJECT_ASYNC (1<<6) 806 /* Request that the contents of this execobject be copied into the error 807 * state upon a GPU hang involving this batch for post-mortem debugging. 808 * These buffers are recorded in no particular order as "user" in 809 * /sys/class/drm/cardN/error. Query I915_PARAM_HAS_EXEC_CAPTURE to see 810 * if the kernel supports this flag. 811 */ 812 #define EXEC_OBJECT_CAPTURE (1<<7) 813 /* All remaining bits are MBZ and RESERVED FOR FUTURE USE */ 814 #define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_CAPTURE<<1) 815 __u64 flags; 816 817 union { 818 __u64 rsvd1; 819 __u64 pad_to_size; 820 }; 821 __u64 rsvd2; 822 }; 823 824 struct drm_i915_gem_exec_fence { 825 /** 826 * User's handle for a drm_syncobj to wait on or signal. 827 */ 828 __u32 handle; 829 830 #define I915_EXEC_FENCE_WAIT (1<<0) 831 #define I915_EXEC_FENCE_SIGNAL (1<<1) 832 __u32 flags; 833 }; 834 835 struct drm_i915_gem_execbuffer2 { 836 /** 837 * List of gem_exec_object2 structs 838 */ 839 __u64 buffers_ptr; 840 __u32 buffer_count; 841 842 /** Offset in the batchbuffer to start execution from. */ 843 __u32 batch_start_offset; 844 /** Bytes used in batchbuffer from batch_start_offset */ 845 __u32 batch_len; 846 __u32 DR1; 847 __u32 DR4; 848 __u32 num_cliprects; 849 /** 850 * This is a struct drm_clip_rect *cliprects if I915_EXEC_FENCE_ARRAY 851 * is not set. If I915_EXEC_FENCE_ARRAY is set, then this is a 852 * struct drm_i915_gem_exec_fence *fences. 853 */ 854 __u64 cliprects_ptr; 855 #define I915_EXEC_RING_MASK (7<<0) 856 #define I915_EXEC_DEFAULT (0<<0) 857 #define I915_EXEC_RENDER (1<<0) 858 #define I915_EXEC_BSD (2<<0) 859 #define I915_EXEC_BLT (3<<0) 860 #define I915_EXEC_VEBOX (4<<0) 861 862 /* Used for switching the constants addressing mode on gen4+ RENDER ring. 863 * Gen6+ only supports relative addressing to dynamic state (default) and 864 * absolute addressing. 865 * 866 * These flags are ignored for the BSD and BLT rings. 867 */ 868 #define I915_EXEC_CONSTANTS_MASK (3<<6) 869 #define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */ 870 #define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6) 871 #define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */ 872 __u64 flags; 873 __u64 rsvd1; /* now used for context info */ 874 __u64 rsvd2; 875 }; 876 877 /** Resets the SO write offset registers for transform feedback on gen7. */ 878 #define I915_EXEC_GEN7_SOL_RESET (1<<8) 879 880 /** Request a privileged ("secure") batch buffer. Note only available for 881 * DRM_ROOT_ONLY | DRM_MASTER processes. 882 */ 883 #define I915_EXEC_SECURE (1<<9) 884 885 /** Inform the kernel that the batch is and will always be pinned. This 886 * negates the requirement for a workaround to be performed to avoid 887 * an incoherent CS (such as can be found on 830/845). If this flag is 888 * not passed, the kernel will endeavour to make sure the batch is 889 * coherent with the CS before execution. If this flag is passed, 890 * userspace assumes the responsibility for ensuring the same. 891 */ 892 #define I915_EXEC_IS_PINNED (1<<10) 893 894 /** Provide a hint to the kernel that the command stream and auxiliary 895 * state buffers already holds the correct presumed addresses and so the 896 * relocation process may be skipped if no buffers need to be moved in 897 * preparation for the execbuffer. 898 */ 899 #define I915_EXEC_NO_RELOC (1<<11) 900 901 /** Use the reloc.handle as an index into the exec object array rather 902 * than as the per-file handle. 903 */ 904 #define I915_EXEC_HANDLE_LUT (1<<12) 905 906 /** Used for switching BSD rings on the platforms with two BSD rings */ 907 #define I915_EXEC_BSD_SHIFT (13) 908 #define I915_EXEC_BSD_MASK (3 << I915_EXEC_BSD_SHIFT) 909 /* default ping-pong mode */ 910 #define I915_EXEC_BSD_DEFAULT (0 << I915_EXEC_BSD_SHIFT) 911 #define I915_EXEC_BSD_RING1 (1 << I915_EXEC_BSD_SHIFT) 912 #define I915_EXEC_BSD_RING2 (2 << I915_EXEC_BSD_SHIFT) 913 914 /** Tell the kernel that the batchbuffer is processed by 915 * the resource streamer. 916 */ 917 #define I915_EXEC_RESOURCE_STREAMER (1<<15) 918 919 /* Setting I915_EXEC_FENCE_IN implies that lower_32_bits(rsvd2) represent 920 * a sync_file fd to wait upon (in a nonblocking manner) prior to executing 921 * the batch. 922 * 923 * Returns -EINVAL if the sync_file fd cannot be found. 924 */ 925 #define I915_EXEC_FENCE_IN (1<<16) 926 927 /* Setting I915_EXEC_FENCE_OUT causes the ioctl to return a sync_file fd 928 * in the upper_32_bits(rsvd2) upon success. Ownership of the fd is given 929 * to the caller, and it should be close() after use. (The fd is a regular 930 * file descriptor and will be cleaned up on process termination. It holds 931 * a reference to the request, but nothing else.) 932 * 933 * The sync_file fd can be combined with other sync_file and passed either 934 * to execbuf using I915_EXEC_FENCE_IN, to atomic KMS ioctls (so that a flip 935 * will only occur after this request completes), or to other devices. 936 * 937 * Using I915_EXEC_FENCE_OUT requires use of 938 * DRM_IOCTL_I915_GEM_EXECBUFFER2_WR ioctl so that the result is written 939 * back to userspace. Failure to do so will cause the out-fence to always 940 * be reported as zero, and the real fence fd to be leaked. 941 */ 942 #define I915_EXEC_FENCE_OUT (1<<17) 943 944 /* 945 * Traditionally the execbuf ioctl has only considered the final element in 946 * the execobject[] to be the executable batch. Often though, the client 947 * will known the batch object prior to construction and being able to place 948 * it into the execobject[] array first can simplify the relocation tracking. 949 * Setting I915_EXEC_BATCH_FIRST tells execbuf to use element 0 of the 950 * execobject[] as the * batch instead (the default is to use the last 951 * element). 952 */ 953 #define I915_EXEC_BATCH_FIRST (1<<18) 954 955 /* Setting I915_FENCE_ARRAY implies that num_cliprects and cliprects_ptr 956 * define an array of i915_gem_exec_fence structures which specify a set of 957 * dma fences to wait upon or signal. 958 */ 959 #define I915_EXEC_FENCE_ARRAY (1<<19) 960 961 #define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_ARRAY<<1)) 962 963 #define I915_EXEC_CONTEXT_ID_MASK (0xffffffff) 964 #define i915_execbuffer2_set_context_id(eb2, context) \ 965 (eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK 966 #define i915_execbuffer2_get_context_id(eb2) \ 967 ((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK) 968 969 struct drm_i915_gem_pin { 970 /** Handle of the buffer to be pinned. */ 971 __u32 handle; 972 __u32 pad; 973 974 /** alignment required within the aperture */ 975 __u64 alignment; 976 977 /** Returned GTT offset of the buffer. */ 978 __u64 offset; 979 }; 980 981 struct drm_i915_gem_unpin { 982 /** Handle of the buffer to be unpinned. */ 983 __u32 handle; 984 __u32 pad; 985 }; 986 987 struct drm_i915_gem_busy { 988 /** Handle of the buffer to check for busy */ 989 __u32 handle; 990 991 /** Return busy status 992 * 993 * A return of 0 implies that the object is idle (after 994 * having flushed any pending activity), and a non-zero return that 995 * the object is still in-flight on the GPU. (The GPU has not yet 996 * signaled completion for all pending requests that reference the 997 * object.) An object is guaranteed to become idle eventually (so 998 * long as no new GPU commands are executed upon it). Due to the 999 * asynchronous nature of the hardware, an object reported 1000 * as busy may become idle before the ioctl is completed. 1001 * 1002 * Furthermore, if the object is busy, which engine is busy is only 1003 * provided as a guide. There are race conditions which prevent the 1004 * report of which engines are busy from being always accurate. 1005 * However, the converse is not true. If the object is idle, the 1006 * result of the ioctl, that all engines are idle, is accurate. 1007 * 1008 * The returned dword is split into two fields to indicate both 1009 * the engines on which the object is being read, and the 1010 * engine on which it is currently being written (if any). 1011 * 1012 * The low word (bits 0:15) indicate if the object is being written 1013 * to by any engine (there can only be one, as the GEM implicit 1014 * synchronisation rules force writes to be serialised). Only the 1015 * engine for the last write is reported. 1016 * 1017 * The high word (bits 16:31) are a bitmask of which engines are 1018 * currently reading from the object. Multiple engines may be 1019 * reading from the object simultaneously. 1020 * 1021 * The value of each engine is the same as specified in the 1022 * EXECBUFFER2 ioctl, i.e. I915_EXEC_RENDER, I915_EXEC_BSD etc. 1023 * Note I915_EXEC_DEFAULT is a symbolic value and is mapped to 1024 * the I915_EXEC_RENDER engine for execution, and so it is never 1025 * reported as active itself. Some hardware may have parallel 1026 * execution engines, e.g. multiple media engines, which are 1027 * mapped to the same identifier in the EXECBUFFER2 ioctl and 1028 * so are not separately reported for busyness. 1029 * 1030 * Caveat emptor: 1031 * Only the boolean result of this query is reliable; that is whether 1032 * the object is idle or busy. The report of which engines are busy 1033 * should be only used as a heuristic. 1034 */ 1035 __u32 busy; 1036 }; 1037 1038 /** 1039 * I915_CACHING_NONE 1040 * 1041 * GPU access is not coherent with cpu caches. Default for machines without an 1042 * LLC. 1043 */ 1044 #define I915_CACHING_NONE 0 1045 /** 1046 * I915_CACHING_CACHED 1047 * 1048 * GPU access is coherent with cpu caches and furthermore the data is cached in 1049 * last-level caches shared between cpu cores and the gpu GT. Default on 1050 * machines with HAS_LLC. 1051 */ 1052 #define I915_CACHING_CACHED 1 1053 /** 1054 * I915_CACHING_DISPLAY 1055 * 1056 * Special GPU caching mode which is coherent with the scanout engines. 1057 * Transparently falls back to I915_CACHING_NONE on platforms where no special 1058 * cache mode (like write-through or gfdt flushing) is available. The kernel 1059 * automatically sets this mode when using a buffer as a scanout target. 1060 * Userspace can manually set this mode to avoid a costly stall and clflush in 1061 * the hotpath of drawing the first frame. 1062 */ 1063 #define I915_CACHING_DISPLAY 2 1064 1065 struct drm_i915_gem_caching { 1066 /** 1067 * Handle of the buffer to set/get the caching level of. */ 1068 __u32 handle; 1069 1070 /** 1071 * Cacheing level to apply or return value 1072 * 1073 * bits0-15 are for generic caching control (i.e. the above defined 1074 * values). bits16-31 are reserved for platform-specific variations 1075 * (e.g. l3$ caching on gen7). */ 1076 __u32 caching; 1077 }; 1078 1079 #define I915_TILING_NONE 0 1080 #define I915_TILING_X 1 1081 #define I915_TILING_Y 2 1082 #define I915_TILING_LAST I915_TILING_Y 1083 1084 #define I915_BIT_6_SWIZZLE_NONE 0 1085 #define I915_BIT_6_SWIZZLE_9 1 1086 #define I915_BIT_6_SWIZZLE_9_10 2 1087 #define I915_BIT_6_SWIZZLE_9_11 3 1088 #define I915_BIT_6_SWIZZLE_9_10_11 4 1089 /* Not seen by userland */ 1090 #define I915_BIT_6_SWIZZLE_UNKNOWN 5 1091 /* Seen by userland. */ 1092 #define I915_BIT_6_SWIZZLE_9_17 6 1093 #define I915_BIT_6_SWIZZLE_9_10_17 7 1094 1095 struct drm_i915_gem_set_tiling { 1096 /** Handle of the buffer to have its tiling state updated */ 1097 __u32 handle; 1098 1099 /** 1100 * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X, 1101 * I915_TILING_Y). 1102 * 1103 * This value is to be set on request, and will be updated by the 1104 * kernel on successful return with the actual chosen tiling layout. 1105 * 1106 * The tiling mode may be demoted to I915_TILING_NONE when the system 1107 * has bit 6 swizzling that can't be managed correctly by GEM. 1108 * 1109 * Buffer contents become undefined when changing tiling_mode. 1110 */ 1111 __u32 tiling_mode; 1112 1113 /** 1114 * Stride in bytes for the object when in I915_TILING_X or 1115 * I915_TILING_Y. 1116 */ 1117 __u32 stride; 1118 1119 /** 1120 * Returned address bit 6 swizzling required for CPU access through 1121 * mmap mapping. 1122 */ 1123 __u32 swizzle_mode; 1124 }; 1125 1126 struct drm_i915_gem_get_tiling { 1127 /** Handle of the buffer to get tiling state for. */ 1128 __u32 handle; 1129 1130 /** 1131 * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X, 1132 * I915_TILING_Y). 1133 */ 1134 __u32 tiling_mode; 1135 1136 /** 1137 * Returned address bit 6 swizzling required for CPU access through 1138 * mmap mapping. 1139 */ 1140 __u32 swizzle_mode; 1141 1142 /** 1143 * Returned address bit 6 swizzling required for CPU access through 1144 * mmap mapping whilst bound. 1145 */ 1146 __u32 phys_swizzle_mode; 1147 }; 1148 1149 struct drm_i915_gem_get_aperture { 1150 /** Total size of the aperture used by i915_gem_execbuffer, in bytes */ 1151 __u64 aper_size; 1152 1153 /** 1154 * Available space in the aperture used by i915_gem_execbuffer, in 1155 * bytes 1156 */ 1157 __u64 aper_available_size; 1158 }; 1159 1160 struct drm_i915_get_pipe_from_crtc_id { 1161 /** ID of CRTC being requested **/ 1162 __u32 crtc_id; 1163 1164 /** pipe of requested CRTC **/ 1165 __u32 pipe; 1166 }; 1167 1168 #define I915_MADV_WILLNEED 0 1169 #define I915_MADV_DONTNEED 1 1170 #define __I915_MADV_PURGED 2 /* internal state */ 1171 1172 struct drm_i915_gem_madvise { 1173 /** Handle of the buffer to change the backing store advice */ 1174 __u32 handle; 1175 1176 /* Advice: either the buffer will be needed again in the near future, 1177 * or wont be and could be discarded under memory pressure. 1178 */ 1179 __u32 madv; 1180 1181 /** Whether the backing store still exists. */ 1182 __u32 retained; 1183 }; 1184 1185 /* flags */ 1186 #define I915_OVERLAY_TYPE_MASK 0xff 1187 #define I915_OVERLAY_YUV_PLANAR 0x01 1188 #define I915_OVERLAY_YUV_PACKED 0x02 1189 #define I915_OVERLAY_RGB 0x03 1190 1191 #define I915_OVERLAY_DEPTH_MASK 0xff00 1192 #define I915_OVERLAY_RGB24 0x1000 1193 #define I915_OVERLAY_RGB16 0x2000 1194 #define I915_OVERLAY_RGB15 0x3000 1195 #define I915_OVERLAY_YUV422 0x0100 1196 #define I915_OVERLAY_YUV411 0x0200 1197 #define I915_OVERLAY_YUV420 0x0300 1198 #define I915_OVERLAY_YUV410 0x0400 1199 1200 #define I915_OVERLAY_SWAP_MASK 0xff0000 1201 #define I915_OVERLAY_NO_SWAP 0x000000 1202 #define I915_OVERLAY_UV_SWAP 0x010000 1203 #define I915_OVERLAY_Y_SWAP 0x020000 1204 #define I915_OVERLAY_Y_AND_UV_SWAP 0x030000 1205 1206 #define I915_OVERLAY_FLAGS_MASK 0xff000000 1207 #define I915_OVERLAY_ENABLE 0x01000000 1208 1209 struct drm_intel_overlay_put_image { 1210 /* various flags and src format description */ 1211 __u32 flags; 1212 /* source picture description */ 1213 __u32 bo_handle; 1214 /* stride values and offsets are in bytes, buffer relative */ 1215 __u16 stride_Y; /* stride for packed formats */ 1216 __u16 stride_UV; 1217 __u32 offset_Y; /* offset for packet formats */ 1218 __u32 offset_U; 1219 __u32 offset_V; 1220 /* in pixels */ 1221 __u16 src_width; 1222 __u16 src_height; 1223 /* to compensate the scaling factors for partially covered surfaces */ 1224 __u16 src_scan_width; 1225 __u16 src_scan_height; 1226 /* output crtc description */ 1227 __u32 crtc_id; 1228 __u16 dst_x; 1229 __u16 dst_y; 1230 __u16 dst_width; 1231 __u16 dst_height; 1232 }; 1233 1234 /* flags */ 1235 #define I915_OVERLAY_UPDATE_ATTRS (1<<0) 1236 #define I915_OVERLAY_UPDATE_GAMMA (1<<1) 1237 #define I915_OVERLAY_DISABLE_DEST_COLORKEY (1<<2) 1238 struct drm_intel_overlay_attrs { 1239 __u32 flags; 1240 __u32 color_key; 1241 __s32 brightness; 1242 __u32 contrast; 1243 __u32 saturation; 1244 __u32 gamma0; 1245 __u32 gamma1; 1246 __u32 gamma2; 1247 __u32 gamma3; 1248 __u32 gamma4; 1249 __u32 gamma5; 1250 }; 1251 1252 /* 1253 * Intel sprite handling 1254 * 1255 * Color keying works with a min/mask/max tuple. Both source and destination 1256 * color keying is allowed. 1257 * 1258 * Source keying: 1259 * Sprite pixels within the min & max values, masked against the color channels 1260 * specified in the mask field, will be transparent. All other pixels will 1261 * be displayed on top of the primary plane. For RGB surfaces, only the min 1262 * and mask fields will be used; ranged compares are not allowed. 1263 * 1264 * Destination keying: 1265 * Primary plane pixels that match the min value, masked against the color 1266 * channels specified in the mask field, will be replaced by corresponding 1267 * pixels from the sprite plane. 1268 * 1269 * Note that source & destination keying are exclusive; only one can be 1270 * active on a given plane. 1271 */ 1272 1273 #define I915_SET_COLORKEY_NONE (1<<0) /* disable color key matching */ 1274 #define I915_SET_COLORKEY_DESTINATION (1<<1) 1275 #define I915_SET_COLORKEY_SOURCE (1<<2) 1276 struct drm_intel_sprite_colorkey { 1277 __u32 plane_id; 1278 __u32 min_value; 1279 __u32 channel_mask; 1280 __u32 max_value; 1281 __u32 flags; 1282 }; 1283 1284 struct drm_i915_gem_wait { 1285 /** Handle of BO we shall wait on */ 1286 __u32 bo_handle; 1287 __u32 flags; 1288 /** Number of nanoseconds to wait, Returns time remaining. */ 1289 __s64 timeout_ns; 1290 }; 1291 1292 struct drm_i915_gem_context_create { 1293 /* output: id of new context*/ 1294 __u32 ctx_id; 1295 __u32 pad; 1296 }; 1297 1298 struct drm_i915_gem_context_destroy { 1299 __u32 ctx_id; 1300 __u32 pad; 1301 }; 1302 1303 struct drm_i915_reg_read { 1304 /* 1305 * Register offset. 1306 * For 64bit wide registers where the upper 32bits don't immediately 1307 * follow the lower 32bits, the offset of the lower 32bits must 1308 * be specified 1309 */ 1310 __u64 offset; 1311 __u64 val; /* Return value */ 1312 }; 1313 /* Known registers: 1314 * 1315 * Render engine timestamp - 0x2358 + 64bit - gen7+ 1316 * - Note this register returns an invalid value if using the default 1317 * single instruction 8byte read, in order to workaround that use 1318 * offset (0x2538 | 1) instead. 1319 * 1320 */ 1321 1322 struct drm_i915_reset_stats { 1323 __u32 ctx_id; 1324 __u32 flags; 1325 1326 /* All resets since boot/module reload, for all contexts */ 1327 __u32 reset_count; 1328 1329 /* Number of batches lost when active in GPU, for this context */ 1330 __u32 batch_active; 1331 1332 /* Number of batches lost pending for execution, for this context */ 1333 __u32 batch_pending; 1334 1335 __u32 pad; 1336 }; 1337 1338 struct drm_i915_gem_userptr { 1339 __u64 user_ptr; 1340 __u64 user_size; 1341 __u32 flags; 1342 #define I915_USERPTR_READ_ONLY 0x1 1343 #define I915_USERPTR_UNSYNCHRONIZED 0x80000000 1344 /** 1345 * Returned handle for the object. 1346 * 1347 * Object handles are nonzero. 1348 */ 1349 __u32 handle; 1350 }; 1351 1352 struct drm_i915_gem_context_param { 1353 __u32 ctx_id; 1354 __u32 size; 1355 __u64 param; 1356 #define I915_CONTEXT_PARAM_BAN_PERIOD 0x1 1357 #define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2 1358 #define I915_CONTEXT_PARAM_GTT_SIZE 0x3 1359 #define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE 0x4 1360 #define I915_CONTEXT_PARAM_BANNABLE 0x5 1361 __u64 value; 1362 }; 1363 1364 enum drm_i915_oa_format { 1365 I915_OA_FORMAT_A13 = 1, /* HSW only */ 1366 I915_OA_FORMAT_A29, /* HSW only */ 1367 I915_OA_FORMAT_A13_B8_C8, /* HSW only */ 1368 I915_OA_FORMAT_B4_C8, /* HSW only */ 1369 I915_OA_FORMAT_A45_B8_C8, /* HSW only */ 1370 I915_OA_FORMAT_B4_C8_A16, /* HSW only */ 1371 I915_OA_FORMAT_C4_B8, /* HSW+ */ 1372 1373 /* Gen8+ */ 1374 I915_OA_FORMAT_A12, 1375 I915_OA_FORMAT_A12_B8_C8, 1376 I915_OA_FORMAT_A32u40_A4u32_B8_C8, 1377 1378 I915_OA_FORMAT_MAX /* non-ABI */ 1379 }; 1380 1381 enum drm_i915_perf_property_id { 1382 /** 1383 * Open the stream for a specific context handle (as used with 1384 * execbuffer2). A stream opened for a specific context this way 1385 * won't typically require root privileges. 1386 */ 1387 DRM_I915_PERF_PROP_CTX_HANDLE = 1, 1388 1389 /** 1390 * A value of 1 requests the inclusion of raw OA unit reports as 1391 * part of stream samples. 1392 */ 1393 DRM_I915_PERF_PROP_SAMPLE_OA, 1394 1395 /** 1396 * The value specifies which set of OA unit metrics should be 1397 * be configured, defining the contents of any OA unit reports. 1398 */ 1399 DRM_I915_PERF_PROP_OA_METRICS_SET, 1400 1401 /** 1402 * The value specifies the size and layout of OA unit reports. 1403 */ 1404 DRM_I915_PERF_PROP_OA_FORMAT, 1405 1406 /** 1407 * Specifying this property implicitly requests periodic OA unit 1408 * sampling and (at least on Haswell) the sampling frequency is derived 1409 * from this exponent as follows: 1410 * 1411 * 80ns * 2^(period_exponent + 1) 1412 */ 1413 DRM_I915_PERF_PROP_OA_EXPONENT, 1414 1415 DRM_I915_PERF_PROP_MAX /* non-ABI */ 1416 }; 1417 1418 struct drm_i915_perf_open_param { 1419 __u32 flags; 1420 #define I915_PERF_FLAG_FD_CLOEXEC (1<<0) 1421 #define I915_PERF_FLAG_FD_NONBLOCK (1<<1) 1422 #define I915_PERF_FLAG_DISABLED (1<<2) 1423 1424 /** The number of u64 (id, value) pairs */ 1425 __u32 num_properties; 1426 1427 /** 1428 * Pointer to array of u64 (id, value) pairs configuring the stream 1429 * to open. 1430 */ 1431 __u64 properties_ptr; 1432 }; 1433 1434 /** 1435 * Enable data capture for a stream that was either opened in a disabled state 1436 * via I915_PERF_FLAG_DISABLED or was later disabled via 1437 * I915_PERF_IOCTL_DISABLE. 1438 * 1439 * It is intended to be cheaper to disable and enable a stream than it may be 1440 * to close and re-open a stream with the same configuration. 1441 * 1442 * It's undefined whether any pending data for the stream will be lost. 1443 */ 1444 #define I915_PERF_IOCTL_ENABLE _IO('i', 0x0) 1445 1446 /** 1447 * Disable data capture for a stream. 1448 * 1449 * It is an error to try and read a stream that is disabled. 1450 */ 1451 #define I915_PERF_IOCTL_DISABLE _IO('i', 0x1) 1452 1453 /** 1454 * Common to all i915 perf records 1455 */ 1456 struct drm_i915_perf_record_header { 1457 __u32 type; 1458 __u16 pad; 1459 __u16 size; 1460 }; 1461 1462 enum drm_i915_perf_record_type { 1463 1464 /** 1465 * Samples are the work horse record type whose contents are extensible 1466 * and defined when opening an i915 perf stream based on the given 1467 * properties. 1468 * 1469 * Boolean properties following the naming convention 1470 * DRM_I915_PERF_SAMPLE_xyz_PROP request the inclusion of 'xyz' data in 1471 * every sample. 1472 * 1473 * The order of these sample properties given by userspace has no 1474 * affect on the ordering of data within a sample. The order is 1475 * documented here. 1476 * 1477 * struct { 1478 * struct drm_i915_perf_record_header header; 1479 * 1480 * { u32 oa_report[]; } && DRM_I915_PERF_PROP_SAMPLE_OA 1481 * }; 1482 */ 1483 DRM_I915_PERF_RECORD_SAMPLE = 1, 1484 1485 /* 1486 * Indicates that one or more OA reports were not written by the 1487 * hardware. This can happen for example if an MI_REPORT_PERF_COUNT 1488 * command collides with periodic sampling - which would be more likely 1489 * at higher sampling frequencies. 1490 */ 1491 DRM_I915_PERF_RECORD_OA_REPORT_LOST = 2, 1492 1493 /** 1494 * An error occurred that resulted in all pending OA reports being lost. 1495 */ 1496 DRM_I915_PERF_RECORD_OA_BUFFER_LOST = 3, 1497 1498 DRM_I915_PERF_RECORD_MAX /* non-ABI */ 1499 }; 1500 1501 /** 1502 * Structure to upload perf dynamic configuration into the kernel. 1503 */ 1504 struct drm_i915_perf_oa_config { 1505 /** String formatted like "%08x-%04x-%04x-%04x-%012x" */ 1506 char uuid[36]; 1507 1508 __u32 n_mux_regs; 1509 __u32 n_boolean_regs; 1510 __u32 n_flex_regs; 1511 1512 __u64 __user mux_regs_ptr; 1513 __u64 __user boolean_regs_ptr; 1514 __u64 __user flex_regs_ptr; 1515 }; 1516 1517 #if defined(__cplusplus) 1518 } 1519 #endif 1520 1521 #endif /* _UAPI_I915_DRM_H_ */ 1522