1718dceddSDavid Howells /* 2718dceddSDavid Howells * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 3718dceddSDavid Howells * All Rights Reserved. 4718dceddSDavid Howells * 5718dceddSDavid Howells * Permission is hereby granted, free of charge, to any person obtaining a 6718dceddSDavid Howells * copy of this software and associated documentation files (the 7718dceddSDavid Howells * "Software"), to deal in the Software without restriction, including 8718dceddSDavid Howells * without limitation the rights to use, copy, modify, merge, publish, 9718dceddSDavid Howells * distribute, sub license, and/or sell copies of the Software, and to 10718dceddSDavid Howells * permit persons to whom the Software is furnished to do so, subject to 11718dceddSDavid Howells * the following conditions: 12718dceddSDavid Howells * 13718dceddSDavid Howells * The above copyright notice and this permission notice (including the 14718dceddSDavid Howells * next paragraph) shall be included in all copies or substantial portions 15718dceddSDavid Howells * of the Software. 16718dceddSDavid Howells * 17718dceddSDavid Howells * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 18718dceddSDavid Howells * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 19718dceddSDavid Howells * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 20718dceddSDavid Howells * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 21718dceddSDavid Howells * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 22718dceddSDavid Howells * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 23718dceddSDavid Howells * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 24718dceddSDavid Howells * 25718dceddSDavid Howells */ 26718dceddSDavid Howells 27718dceddSDavid Howells #ifndef _UAPI_I915_DRM_H_ 28718dceddSDavid Howells #define _UAPI_I915_DRM_H_ 29718dceddSDavid Howells 301049102fSGabriel Laskar #include "drm.h" 31718dceddSDavid Howells 32b1c1f5c4SEmil Velikov #if defined(__cplusplus) 33b1c1f5c4SEmil Velikov extern "C" { 34b1c1f5c4SEmil Velikov #endif 35b1c1f5c4SEmil Velikov 36718dceddSDavid Howells /* Please note that modifications to all structs defined here are 37718dceddSDavid Howells * subject to backwards-compatibility constraints. 38718dceddSDavid Howells */ 39718dceddSDavid Howells 40cce723edSBen Widawsky /** 41cce723edSBen Widawsky * DOC: uevents generated by i915 on it's device node 42cce723edSBen Widawsky * 43cce723edSBen Widawsky * I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch 44cce723edSBen Widawsky * event from the gpu l3 cache. Additional information supplied is ROW, 4535a85ac6SBen Widawsky * BANK, SUBBANK, SLICE of the affected cacheline. Userspace should keep 4635a85ac6SBen Widawsky * track of these events and if a specific cache-line seems to have a 4735a85ac6SBen Widawsky * persistent error remap it with the l3 remapping tool supplied in 4835a85ac6SBen Widawsky * intel-gpu-tools. The value supplied with the event is always 1. 49cce723edSBen Widawsky * 50cce723edSBen Widawsky * I915_ERROR_UEVENT - Generated upon error detection, currently only via 51cce723edSBen Widawsky * hangcheck. The error detection event is a good indicator of when things 52cce723edSBen Widawsky * began to go badly. The value supplied with the event is a 1 upon error 53cce723edSBen Widawsky * detection, and a 0 upon reset completion, signifying no more error 54cce723edSBen Widawsky * exists. NOTE: Disabling hangcheck or reset via module parameter will 55cce723edSBen Widawsky * cause the related events to not be seen. 56cce723edSBen Widawsky * 57cce723edSBen Widawsky * I915_RESET_UEVENT - Event is generated just before an attempt to reset the 58cce723edSBen Widawsky * the GPU. The value supplied with the event is always 1. NOTE: Disable 59cce723edSBen Widawsky * reset via module parameter will cause this event to not be seen. 60cce723edSBen Widawsky */ 61cce723edSBen Widawsky #define I915_L3_PARITY_UEVENT "L3_PARITY_ERROR" 62cce723edSBen Widawsky #define I915_ERROR_UEVENT "ERROR" 63cce723edSBen Widawsky #define I915_RESET_UEVENT "RESET" 64718dceddSDavid Howells 653373ce2eSImre Deak /* 663373ce2eSImre Deak * MOCS indexes used for GPU surfaces, defining the cacheability of the 673373ce2eSImre Deak * surface data and the coherency for this data wrt. CPU vs. GPU accesses. 683373ce2eSImre Deak */ 693373ce2eSImre Deak enum i915_mocs_table_index { 703373ce2eSImre Deak /* 713373ce2eSImre Deak * Not cached anywhere, coherency between CPU and GPU accesses is 723373ce2eSImre Deak * guaranteed. 733373ce2eSImre Deak */ 743373ce2eSImre Deak I915_MOCS_UNCACHED, 753373ce2eSImre Deak /* 763373ce2eSImre Deak * Cacheability and coherency controlled by the kernel automatically 773373ce2eSImre Deak * based on the DRM_I915_GEM_SET_CACHING IOCTL setting and the current 783373ce2eSImre Deak * usage of the surface (used for display scanout or not). 793373ce2eSImre Deak */ 803373ce2eSImre Deak I915_MOCS_PTE, 813373ce2eSImre Deak /* 823373ce2eSImre Deak * Cached in all GPU caches available on the platform. 833373ce2eSImre Deak * Coherency between CPU and GPU accesses to the surface is not 843373ce2eSImre Deak * guaranteed without extra synchronization. 853373ce2eSImre Deak */ 863373ce2eSImre Deak I915_MOCS_CACHED, 873373ce2eSImre Deak }; 883373ce2eSImre Deak 891803fcbcSTvrtko Ursulin /* 901803fcbcSTvrtko Ursulin * Different engines serve different roles, and there may be more than one 911803fcbcSTvrtko Ursulin * engine serving each role. enum drm_i915_gem_engine_class provides a 921803fcbcSTvrtko Ursulin * classification of the role of the engine, which may be used when requesting 931803fcbcSTvrtko Ursulin * operations to be performed on a certain subset of engines, or for providing 941803fcbcSTvrtko Ursulin * information about that group. 951803fcbcSTvrtko Ursulin */ 961803fcbcSTvrtko Ursulin enum drm_i915_gem_engine_class { 971803fcbcSTvrtko Ursulin I915_ENGINE_CLASS_RENDER = 0, 981803fcbcSTvrtko Ursulin I915_ENGINE_CLASS_COPY = 1, 991803fcbcSTvrtko Ursulin I915_ENGINE_CLASS_VIDEO = 2, 1001803fcbcSTvrtko Ursulin I915_ENGINE_CLASS_VIDEO_ENHANCE = 3, 1011803fcbcSTvrtko Ursulin 1021803fcbcSTvrtko Ursulin I915_ENGINE_CLASS_INVALID = -1 1031803fcbcSTvrtko Ursulin }; 1041803fcbcSTvrtko Ursulin 105b46a33e2STvrtko Ursulin /** 106b46a33e2STvrtko Ursulin * DOC: perf_events exposed by i915 through /sys/bus/event_sources/drivers/i915 107b46a33e2STvrtko Ursulin * 108b46a33e2STvrtko Ursulin */ 109b46a33e2STvrtko Ursulin 110b46a33e2STvrtko Ursulin enum drm_i915_pmu_engine_sample { 111b46a33e2STvrtko Ursulin I915_SAMPLE_BUSY = 0, 112b46a33e2STvrtko Ursulin I915_SAMPLE_WAIT = 1, 113b552ae44STvrtko Ursulin I915_SAMPLE_SEMA = 2 114b46a33e2STvrtko Ursulin }; 115b46a33e2STvrtko Ursulin 116b46a33e2STvrtko Ursulin #define I915_PMU_SAMPLE_BITS (4) 117b46a33e2STvrtko Ursulin #define I915_PMU_SAMPLE_MASK (0xf) 118b46a33e2STvrtko Ursulin #define I915_PMU_SAMPLE_INSTANCE_BITS (8) 119b46a33e2STvrtko Ursulin #define I915_PMU_CLASS_SHIFT \ 120b46a33e2STvrtko Ursulin (I915_PMU_SAMPLE_BITS + I915_PMU_SAMPLE_INSTANCE_BITS) 121b46a33e2STvrtko Ursulin 122b46a33e2STvrtko Ursulin #define __I915_PMU_ENGINE(class, instance, sample) \ 123b46a33e2STvrtko Ursulin ((class) << I915_PMU_CLASS_SHIFT | \ 124b46a33e2STvrtko Ursulin (instance) << I915_PMU_SAMPLE_BITS | \ 125b46a33e2STvrtko Ursulin (sample)) 126b46a33e2STvrtko Ursulin 127b46a33e2STvrtko Ursulin #define I915_PMU_ENGINE_BUSY(class, instance) \ 128b46a33e2STvrtko Ursulin __I915_PMU_ENGINE(class, instance, I915_SAMPLE_BUSY) 129b46a33e2STvrtko Ursulin 130b46a33e2STvrtko Ursulin #define I915_PMU_ENGINE_WAIT(class, instance) \ 131b46a33e2STvrtko Ursulin __I915_PMU_ENGINE(class, instance, I915_SAMPLE_WAIT) 132b46a33e2STvrtko Ursulin 133b46a33e2STvrtko Ursulin #define I915_PMU_ENGINE_SEMA(class, instance) \ 134b46a33e2STvrtko Ursulin __I915_PMU_ENGINE(class, instance, I915_SAMPLE_SEMA) 135b46a33e2STvrtko Ursulin 136b46a33e2STvrtko Ursulin #define __I915_PMU_OTHER(x) (__I915_PMU_ENGINE(0xff, 0xff, 0xf) + 1 + (x)) 137b46a33e2STvrtko Ursulin 138b46a33e2STvrtko Ursulin #define I915_PMU_ACTUAL_FREQUENCY __I915_PMU_OTHER(0) 139b46a33e2STvrtko Ursulin #define I915_PMU_REQUESTED_FREQUENCY __I915_PMU_OTHER(1) 1400cd4684dSTvrtko Ursulin #define I915_PMU_INTERRUPTS __I915_PMU_OTHER(2) 1416060b6aeSTvrtko Ursulin #define I915_PMU_RC6_RESIDENCY __I915_PMU_OTHER(3) 1426060b6aeSTvrtko Ursulin 1433452fa30STvrtko Ursulin #define I915_PMU_LAST I915_PMU_RC6_RESIDENCY 144b46a33e2STvrtko Ursulin 145718dceddSDavid Howells /* Each region is a minimum of 16k, and there are at most 255 of them. 146718dceddSDavid Howells */ 147718dceddSDavid Howells #define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use 148718dceddSDavid Howells * of chars for next/prev indices */ 149718dceddSDavid Howells #define I915_LOG_MIN_TEX_REGION_SIZE 14 150718dceddSDavid Howells 151718dceddSDavid Howells typedef struct _drm_i915_init { 152718dceddSDavid Howells enum { 153718dceddSDavid Howells I915_INIT_DMA = 0x01, 154718dceddSDavid Howells I915_CLEANUP_DMA = 0x02, 155718dceddSDavid Howells I915_RESUME_DMA = 0x03 156718dceddSDavid Howells } func; 157718dceddSDavid Howells unsigned int mmio_offset; 158718dceddSDavid Howells int sarea_priv_offset; 159718dceddSDavid Howells unsigned int ring_start; 160718dceddSDavid Howells unsigned int ring_end; 161718dceddSDavid Howells unsigned int ring_size; 162718dceddSDavid Howells unsigned int front_offset; 163718dceddSDavid Howells unsigned int back_offset; 164718dceddSDavid Howells unsigned int depth_offset; 165718dceddSDavid Howells unsigned int w; 166718dceddSDavid Howells unsigned int h; 167718dceddSDavid Howells unsigned int pitch; 168718dceddSDavid Howells unsigned int pitch_bits; 169718dceddSDavid Howells unsigned int back_pitch; 170718dceddSDavid Howells unsigned int depth_pitch; 171718dceddSDavid Howells unsigned int cpp; 172718dceddSDavid Howells unsigned int chipset; 173718dceddSDavid Howells } drm_i915_init_t; 174718dceddSDavid Howells 175718dceddSDavid Howells typedef struct _drm_i915_sarea { 176718dceddSDavid Howells struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1]; 177718dceddSDavid Howells int last_upload; /* last time texture was uploaded */ 178718dceddSDavid Howells int last_enqueue; /* last time a buffer was enqueued */ 179718dceddSDavid Howells int last_dispatch; /* age of the most recently dispatched buffer */ 180718dceddSDavid Howells int ctxOwner; /* last context to upload state */ 181718dceddSDavid Howells int texAge; 182718dceddSDavid Howells int pf_enabled; /* is pageflipping allowed? */ 183718dceddSDavid Howells int pf_active; 184718dceddSDavid Howells int pf_current_page; /* which buffer is being displayed? */ 185718dceddSDavid Howells int perf_boxes; /* performance boxes to be displayed */ 186718dceddSDavid Howells int width, height; /* screen size in pixels */ 187718dceddSDavid Howells 188718dceddSDavid Howells drm_handle_t front_handle; 189718dceddSDavid Howells int front_offset; 190718dceddSDavid Howells int front_size; 191718dceddSDavid Howells 192718dceddSDavid Howells drm_handle_t back_handle; 193718dceddSDavid Howells int back_offset; 194718dceddSDavid Howells int back_size; 195718dceddSDavid Howells 196718dceddSDavid Howells drm_handle_t depth_handle; 197718dceddSDavid Howells int depth_offset; 198718dceddSDavid Howells int depth_size; 199718dceddSDavid Howells 200718dceddSDavid Howells drm_handle_t tex_handle; 201718dceddSDavid Howells int tex_offset; 202718dceddSDavid Howells int tex_size; 203718dceddSDavid Howells int log_tex_granularity; 204718dceddSDavid Howells int pitch; 205718dceddSDavid Howells int rotation; /* 0, 90, 180 or 270 */ 206718dceddSDavid Howells int rotated_offset; 207718dceddSDavid Howells int rotated_size; 208718dceddSDavid Howells int rotated_pitch; 209718dceddSDavid Howells int virtualX, virtualY; 210718dceddSDavid Howells 211718dceddSDavid Howells unsigned int front_tiled; 212718dceddSDavid Howells unsigned int back_tiled; 213718dceddSDavid Howells unsigned int depth_tiled; 214718dceddSDavid Howells unsigned int rotated_tiled; 215718dceddSDavid Howells unsigned int rotated2_tiled; 216718dceddSDavid Howells 217718dceddSDavid Howells int pipeA_x; 218718dceddSDavid Howells int pipeA_y; 219718dceddSDavid Howells int pipeA_w; 220718dceddSDavid Howells int pipeA_h; 221718dceddSDavid Howells int pipeB_x; 222718dceddSDavid Howells int pipeB_y; 223718dceddSDavid Howells int pipeB_w; 224718dceddSDavid Howells int pipeB_h; 225718dceddSDavid Howells 226718dceddSDavid Howells /* fill out some space for old userspace triple buffer */ 227718dceddSDavid Howells drm_handle_t unused_handle; 228718dceddSDavid Howells __u32 unused1, unused2, unused3; 229718dceddSDavid Howells 230718dceddSDavid Howells /* buffer object handles for static buffers. May change 231718dceddSDavid Howells * over the lifetime of the client. 232718dceddSDavid Howells */ 233718dceddSDavid Howells __u32 front_bo_handle; 234718dceddSDavid Howells __u32 back_bo_handle; 235718dceddSDavid Howells __u32 unused_bo_handle; 236718dceddSDavid Howells __u32 depth_bo_handle; 237718dceddSDavid Howells 238718dceddSDavid Howells } drm_i915_sarea_t; 239718dceddSDavid Howells 240718dceddSDavid Howells /* due to userspace building against these headers we need some compat here */ 241718dceddSDavid Howells #define planeA_x pipeA_x 242718dceddSDavid Howells #define planeA_y pipeA_y 243718dceddSDavid Howells #define planeA_w pipeA_w 244718dceddSDavid Howells #define planeA_h pipeA_h 245718dceddSDavid Howells #define planeB_x pipeB_x 246718dceddSDavid Howells #define planeB_y pipeB_y 247718dceddSDavid Howells #define planeB_w pipeB_w 248718dceddSDavid Howells #define planeB_h pipeB_h 249718dceddSDavid Howells 250718dceddSDavid Howells /* Flags for perf_boxes 251718dceddSDavid Howells */ 252718dceddSDavid Howells #define I915_BOX_RING_EMPTY 0x1 253718dceddSDavid Howells #define I915_BOX_FLIP 0x2 254718dceddSDavid Howells #define I915_BOX_WAIT 0x4 255718dceddSDavid Howells #define I915_BOX_TEXTURE_LOAD 0x8 256718dceddSDavid Howells #define I915_BOX_LOST_CONTEXT 0x10 257718dceddSDavid Howells 25821631f10SDamien Lespiau /* 25921631f10SDamien Lespiau * i915 specific ioctls. 26021631f10SDamien Lespiau * 26121631f10SDamien Lespiau * The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie 26221631f10SDamien Lespiau * [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset 26321631f10SDamien Lespiau * against DRM_COMMAND_BASE and should be between [0x0, 0x60). 264718dceddSDavid Howells */ 265718dceddSDavid Howells #define DRM_I915_INIT 0x00 266718dceddSDavid Howells #define DRM_I915_FLUSH 0x01 267718dceddSDavid Howells #define DRM_I915_FLIP 0x02 268718dceddSDavid Howells #define DRM_I915_BATCHBUFFER 0x03 269718dceddSDavid Howells #define DRM_I915_IRQ_EMIT 0x04 270718dceddSDavid Howells #define DRM_I915_IRQ_WAIT 0x05 271718dceddSDavid Howells #define DRM_I915_GETPARAM 0x06 272718dceddSDavid Howells #define DRM_I915_SETPARAM 0x07 273718dceddSDavid Howells #define DRM_I915_ALLOC 0x08 274718dceddSDavid Howells #define DRM_I915_FREE 0x09 275718dceddSDavid Howells #define DRM_I915_INIT_HEAP 0x0a 276718dceddSDavid Howells #define DRM_I915_CMDBUFFER 0x0b 277718dceddSDavid Howells #define DRM_I915_DESTROY_HEAP 0x0c 278718dceddSDavid Howells #define DRM_I915_SET_VBLANK_PIPE 0x0d 279718dceddSDavid Howells #define DRM_I915_GET_VBLANK_PIPE 0x0e 280718dceddSDavid Howells #define DRM_I915_VBLANK_SWAP 0x0f 281718dceddSDavid Howells #define DRM_I915_HWS_ADDR 0x11 282718dceddSDavid Howells #define DRM_I915_GEM_INIT 0x13 283718dceddSDavid Howells #define DRM_I915_GEM_EXECBUFFER 0x14 284718dceddSDavid Howells #define DRM_I915_GEM_PIN 0x15 285718dceddSDavid Howells #define DRM_I915_GEM_UNPIN 0x16 286718dceddSDavid Howells #define DRM_I915_GEM_BUSY 0x17 287718dceddSDavid Howells #define DRM_I915_GEM_THROTTLE 0x18 288718dceddSDavid Howells #define DRM_I915_GEM_ENTERVT 0x19 289718dceddSDavid Howells #define DRM_I915_GEM_LEAVEVT 0x1a 290718dceddSDavid Howells #define DRM_I915_GEM_CREATE 0x1b 291718dceddSDavid Howells #define DRM_I915_GEM_PREAD 0x1c 292718dceddSDavid Howells #define DRM_I915_GEM_PWRITE 0x1d 293718dceddSDavid Howells #define DRM_I915_GEM_MMAP 0x1e 294718dceddSDavid Howells #define DRM_I915_GEM_SET_DOMAIN 0x1f 295718dceddSDavid Howells #define DRM_I915_GEM_SW_FINISH 0x20 296718dceddSDavid Howells #define DRM_I915_GEM_SET_TILING 0x21 297718dceddSDavid Howells #define DRM_I915_GEM_GET_TILING 0x22 298718dceddSDavid Howells #define DRM_I915_GEM_GET_APERTURE 0x23 299718dceddSDavid Howells #define DRM_I915_GEM_MMAP_GTT 0x24 300718dceddSDavid Howells #define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25 301718dceddSDavid Howells #define DRM_I915_GEM_MADVISE 0x26 302718dceddSDavid Howells #define DRM_I915_OVERLAY_PUT_IMAGE 0x27 303718dceddSDavid Howells #define DRM_I915_OVERLAY_ATTRS 0x28 304718dceddSDavid Howells #define DRM_I915_GEM_EXECBUFFER2 0x29 305fec0445cSChris Wilson #define DRM_I915_GEM_EXECBUFFER2_WR DRM_I915_GEM_EXECBUFFER2 306718dceddSDavid Howells #define DRM_I915_GET_SPRITE_COLORKEY 0x2a 307718dceddSDavid Howells #define DRM_I915_SET_SPRITE_COLORKEY 0x2b 308718dceddSDavid Howells #define DRM_I915_GEM_WAIT 0x2c 309718dceddSDavid Howells #define DRM_I915_GEM_CONTEXT_CREATE 0x2d 310718dceddSDavid Howells #define DRM_I915_GEM_CONTEXT_DESTROY 0x2e 311718dceddSDavid Howells #define DRM_I915_GEM_SET_CACHING 0x2f 312718dceddSDavid Howells #define DRM_I915_GEM_GET_CACHING 0x30 313718dceddSDavid Howells #define DRM_I915_REG_READ 0x31 314b6359918SMika Kuoppala #define DRM_I915_GET_RESET_STATS 0x32 3155cc9ed4bSChris Wilson #define DRM_I915_GEM_USERPTR 0x33 316c9dc0f35SChris Wilson #define DRM_I915_GEM_CONTEXT_GETPARAM 0x34 317c9dc0f35SChris Wilson #define DRM_I915_GEM_CONTEXT_SETPARAM 0x35 318eec688e1SRobert Bragg #define DRM_I915_PERF_OPEN 0x36 319f89823c2SLionel Landwerlin #define DRM_I915_PERF_ADD_CONFIG 0x37 320f89823c2SLionel Landwerlin #define DRM_I915_PERF_REMOVE_CONFIG 0x38 321718dceddSDavid Howells 322718dceddSDavid Howells #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) 323718dceddSDavid Howells #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) 324718dceddSDavid Howells #define DRM_IOCTL_I915_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP) 325718dceddSDavid Howells #define DRM_IOCTL_I915_BATCHBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t) 326718dceddSDavid Howells #define DRM_IOCTL_I915_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t) 327718dceddSDavid Howells #define DRM_IOCTL_I915_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t) 328718dceddSDavid Howells #define DRM_IOCTL_I915_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t) 329718dceddSDavid Howells #define DRM_IOCTL_I915_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t) 330718dceddSDavid Howells #define DRM_IOCTL_I915_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t) 331718dceddSDavid Howells #define DRM_IOCTL_I915_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t) 332718dceddSDavid Howells #define DRM_IOCTL_I915_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t) 333718dceddSDavid Howells #define DRM_IOCTL_I915_CMDBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t) 334718dceddSDavid Howells #define DRM_IOCTL_I915_DESTROY_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t) 335718dceddSDavid Howells #define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t) 336718dceddSDavid Howells #define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t) 337718dceddSDavid Howells #define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t) 338718dceddSDavid Howells #define DRM_IOCTL_I915_HWS_ADDR DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init) 339718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init) 340718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer) 341718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2) 342fec0445cSChris Wilson #define DRM_IOCTL_I915_GEM_EXECBUFFER2_WR DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2_WR, struct drm_i915_gem_execbuffer2) 343718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin) 344718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin) 345718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy) 346718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_SET_CACHING DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHING, struct drm_i915_gem_caching) 347718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_GET_CACHING DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHING, struct drm_i915_gem_caching) 348718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE) 349718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT) 350718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT) 351718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create) 352718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread) 353718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite) 354718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap) 355718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_MMAP_GTT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt) 356718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain) 357718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish) 358718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling) 359718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling) 360718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture) 361718dceddSDavid Howells #define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id) 362718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise) 363718dceddSDavid Howells #define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image) 364718dceddSDavid Howells #define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs) 365718dceddSDavid Howells #define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey) 3662c60fae1STommi Rantala #define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey) 367718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait) 368718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create) 369718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy) 370718dceddSDavid Howells #define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read) 371b6359918SMika Kuoppala #define DRM_IOCTL_I915_GET_RESET_STATS DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats) 3725cc9ed4bSChris Wilson #define DRM_IOCTL_I915_GEM_USERPTR DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr) 373c9dc0f35SChris Wilson #define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param) 374c9dc0f35SChris Wilson #define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param) 375eec688e1SRobert Bragg #define DRM_IOCTL_I915_PERF_OPEN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param) 376f89823c2SLionel Landwerlin #define DRM_IOCTL_I915_PERF_ADD_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_ADD_CONFIG, struct drm_i915_perf_oa_config) 377f89823c2SLionel Landwerlin #define DRM_IOCTL_I915_PERF_REMOVE_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_REMOVE_CONFIG, __u64) 378718dceddSDavid Howells 379718dceddSDavid Howells /* Allow drivers to submit batchbuffers directly to hardware, relying 380718dceddSDavid Howells * on the security mechanisms provided by hardware. 381718dceddSDavid Howells */ 382718dceddSDavid Howells typedef struct drm_i915_batchbuffer { 383718dceddSDavid Howells int start; /* agp offset */ 384718dceddSDavid Howells int used; /* nr bytes in use */ 385718dceddSDavid Howells int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */ 386718dceddSDavid Howells int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */ 387718dceddSDavid Howells int num_cliprects; /* mulitpass with multiple cliprects? */ 388718dceddSDavid Howells struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */ 389718dceddSDavid Howells } drm_i915_batchbuffer_t; 390718dceddSDavid Howells 391718dceddSDavid Howells /* As above, but pass a pointer to userspace buffer which can be 392718dceddSDavid Howells * validated by the kernel prior to sending to hardware. 393718dceddSDavid Howells */ 394718dceddSDavid Howells typedef struct _drm_i915_cmdbuffer { 395718dceddSDavid Howells char __user *buf; /* pointer to userspace command buffer */ 396718dceddSDavid Howells int sz; /* nr bytes in buf */ 397718dceddSDavid Howells int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */ 398718dceddSDavid Howells int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */ 399718dceddSDavid Howells int num_cliprects; /* mulitpass with multiple cliprects? */ 400718dceddSDavid Howells struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */ 401718dceddSDavid Howells } drm_i915_cmdbuffer_t; 402718dceddSDavid Howells 403718dceddSDavid Howells /* Userspace can request & wait on irq's: 404718dceddSDavid Howells */ 405718dceddSDavid Howells typedef struct drm_i915_irq_emit { 406718dceddSDavid Howells int __user *irq_seq; 407718dceddSDavid Howells } drm_i915_irq_emit_t; 408718dceddSDavid Howells 409718dceddSDavid Howells typedef struct drm_i915_irq_wait { 410718dceddSDavid Howells int irq_seq; 411718dceddSDavid Howells } drm_i915_irq_wait_t; 412718dceddSDavid Howells 413718dceddSDavid Howells /* Ioctl to query kernel params: 414718dceddSDavid Howells */ 415718dceddSDavid Howells #define I915_PARAM_IRQ_ACTIVE 1 416718dceddSDavid Howells #define I915_PARAM_ALLOW_BATCHBUFFER 2 417718dceddSDavid Howells #define I915_PARAM_LAST_DISPATCH 3 418718dceddSDavid Howells #define I915_PARAM_CHIPSET_ID 4 419718dceddSDavid Howells #define I915_PARAM_HAS_GEM 5 420718dceddSDavid Howells #define I915_PARAM_NUM_FENCES_AVAIL 6 421718dceddSDavid Howells #define I915_PARAM_HAS_OVERLAY 7 422718dceddSDavid Howells #define I915_PARAM_HAS_PAGEFLIPPING 8 423718dceddSDavid Howells #define I915_PARAM_HAS_EXECBUF2 9 424718dceddSDavid Howells #define I915_PARAM_HAS_BSD 10 425718dceddSDavid Howells #define I915_PARAM_HAS_BLT 11 426718dceddSDavid Howells #define I915_PARAM_HAS_RELAXED_FENCING 12 427718dceddSDavid Howells #define I915_PARAM_HAS_COHERENT_RINGS 13 428718dceddSDavid Howells #define I915_PARAM_HAS_EXEC_CONSTANTS 14 429718dceddSDavid Howells #define I915_PARAM_HAS_RELAXED_DELTA 15 430718dceddSDavid Howells #define I915_PARAM_HAS_GEN7_SOL_RESET 16 431718dceddSDavid Howells #define I915_PARAM_HAS_LLC 17 432718dceddSDavid Howells #define I915_PARAM_HAS_ALIASING_PPGTT 18 433718dceddSDavid Howells #define I915_PARAM_HAS_WAIT_TIMEOUT 19 434718dceddSDavid Howells #define I915_PARAM_HAS_SEMAPHORES 20 435718dceddSDavid Howells #define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21 436a1f2cc73SXiang, Haihao #define I915_PARAM_HAS_VEBOX 22 437c2fb7916SDaniel Vetter #define I915_PARAM_HAS_SECURE_BATCHES 23 438b45305fcSDaniel Vetter #define I915_PARAM_HAS_PINNED_BATCHES 24 439ed5982e6SDaniel Vetter #define I915_PARAM_HAS_EXEC_NO_RELOC 25 440eef90ccbSChris Wilson #define I915_PARAM_HAS_EXEC_HANDLE_LUT 26 441651d794fSChris Wilson #define I915_PARAM_HAS_WT 27 442d728c8efSBrad Volkin #define I915_PARAM_CMD_PARSER_VERSION 28 4436a2c4232SChris Wilson #define I915_PARAM_HAS_COHERENT_PHYS_GTT 29 4441816f923SAkash Goel #define I915_PARAM_MMAP_VERSION 30 44508e16dc8SZhipeng Gong #define I915_PARAM_HAS_BSD2 31 44627cd4461SNeil Roberts #define I915_PARAM_REVISION 32 447a1559ffeSJeff McGee #define I915_PARAM_SUBSLICE_TOTAL 33 448a1559ffeSJeff McGee #define I915_PARAM_EU_TOTAL 34 44949e4d842SChris Wilson #define I915_PARAM_HAS_GPU_RESET 35 450a9ed33caSAbdiel Janulgue #define I915_PARAM_HAS_RESOURCE_STREAMER 36 451506a8e87SChris Wilson #define I915_PARAM_HAS_EXEC_SOFTPIN 37 45237f501afSarun.siluvery@linux.intel.com #define I915_PARAM_HAS_POOLED_EU 38 45337f501afSarun.siluvery@linux.intel.com #define I915_PARAM_MIN_EU_IN_POOL 39 4544cc69075SChris Wilson #define I915_PARAM_MMAP_GTT_VERSION 40 455718dceddSDavid Howells 456bf64e0b0SChris Wilson /* 457bf64e0b0SChris Wilson * Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution 4580de9136dSChris Wilson * priorities and the driver will attempt to execute batches in priority order. 459bf64e0b0SChris Wilson * The param returns a capability bitmask, nonzero implies that the scheduler 460bf64e0b0SChris Wilson * is enabled, with different features present according to the mask. 461ac14fbd4SChris Wilson * 462ac14fbd4SChris Wilson * The initial priority for each batch is supplied by the context and is 463ac14fbd4SChris Wilson * controlled via I915_CONTEXT_PARAM_PRIORITY. 4640de9136dSChris Wilson */ 4650de9136dSChris Wilson #define I915_PARAM_HAS_SCHEDULER 41 466bf64e0b0SChris Wilson #define I915_SCHEDULER_CAP_ENABLED (1ul << 0) 467bf64e0b0SChris Wilson #define I915_SCHEDULER_CAP_PRIORITY (1ul << 1) 468bf64e0b0SChris Wilson #define I915_SCHEDULER_CAP_PREEMPTION (1ul << 2) 469bf64e0b0SChris Wilson 4705464cd65SAnusha Srivatsa #define I915_PARAM_HUC_STATUS 42 4710de9136dSChris Wilson 47277ae9957SChris Wilson /* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of 47377ae9957SChris Wilson * synchronisation with implicit fencing on individual objects. 47477ae9957SChris Wilson * See EXEC_OBJECT_ASYNC. 47577ae9957SChris Wilson */ 47677ae9957SChris Wilson #define I915_PARAM_HAS_EXEC_ASYNC 43 47777ae9957SChris Wilson 478fec0445cSChris Wilson /* Query whether DRM_I915_GEM_EXECBUFFER2 supports explicit fence support - 479fec0445cSChris Wilson * both being able to pass in a sync_file fd to wait upon before executing, 480fec0445cSChris Wilson * and being able to return a new sync_file fd that is signaled when the 481fec0445cSChris Wilson * current request is complete. See I915_EXEC_FENCE_IN and I915_EXEC_FENCE_OUT. 482fec0445cSChris Wilson */ 483fec0445cSChris Wilson #define I915_PARAM_HAS_EXEC_FENCE 44 484fec0445cSChris Wilson 485b0fd47adSChris Wilson /* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to capture 486b0fd47adSChris Wilson * user specified bufffers for post-mortem debugging of GPU hangs. See 487b0fd47adSChris Wilson * EXEC_OBJECT_CAPTURE. 488b0fd47adSChris Wilson */ 489b0fd47adSChris Wilson #define I915_PARAM_HAS_EXEC_CAPTURE 45 490b0fd47adSChris Wilson 4917fed555cSRobert Bragg #define I915_PARAM_SLICE_MASK 46 4927fed555cSRobert Bragg 493f5320233SRobert Bragg /* Assuming it's uniform for each slice, this queries the mask of subslices 494f5320233SRobert Bragg * per-slice for this system. 495f5320233SRobert Bragg */ 496f5320233SRobert Bragg #define I915_PARAM_SUBSLICE_MASK 47 497f5320233SRobert Bragg 4981a71cf2fSChris Wilson /* 4991a71cf2fSChris Wilson * Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying the batch buffer 5001a71cf2fSChris Wilson * as the first execobject as opposed to the last. See I915_EXEC_BATCH_FIRST. 5011a71cf2fSChris Wilson */ 5021a71cf2fSChris Wilson #define I915_PARAM_HAS_EXEC_BATCH_FIRST 48 5031a71cf2fSChris Wilson 504cf6e7bacSJason Ekstrand /* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of 505cf6e7bacSJason Ekstrand * drm_i915_gem_exec_fence structures. See I915_EXEC_FENCE_ARRAY. 506cf6e7bacSJason Ekstrand */ 507cf6e7bacSJason Ekstrand #define I915_PARAM_HAS_EXEC_FENCE_ARRAY 49 508cf6e7bacSJason Ekstrand 509d2b4b979SChris Wilson /* 510d2b4b979SChris Wilson * Query whether every context (both per-file default and user created) is 511d2b4b979SChris Wilson * isolated (insofar as HW supports). If this parameter is not true, then 512d2b4b979SChris Wilson * freshly created contexts may inherit values from an existing context, 513d2b4b979SChris Wilson * rather than default HW values. If true, it also ensures (insofar as HW 514d2b4b979SChris Wilson * supports) that all state set by this context will not leak to any other 515d2b4b979SChris Wilson * context. 516d2b4b979SChris Wilson * 517d2b4b979SChris Wilson * As not every engine across every gen support contexts, the returned 518d2b4b979SChris Wilson * value reports the support of context isolation for individual engines by 519d2b4b979SChris Wilson * returning a bitmask of each engine class set to true if that class supports 520d2b4b979SChris Wilson * isolation. 521d2b4b979SChris Wilson */ 522d2b4b979SChris Wilson #define I915_PARAM_HAS_CONTEXT_ISOLATION 50 523d2b4b979SChris Wilson 524dab91783SLionel Landwerlin /* Frequency of the command streamer timestamps given by the *_TIMESTAMP 525dab91783SLionel Landwerlin * registers. This used to be fixed per platform but from CNL onwards, this 526dab91783SLionel Landwerlin * might vary depending on the parts. 527dab91783SLionel Landwerlin */ 528dab91783SLionel Landwerlin #define I915_PARAM_CS_TIMESTAMP_FREQUENCY 51 529dab91783SLionel Landwerlin 530718dceddSDavid Howells typedef struct drm_i915_getparam { 53116f7249dSArtem Savkov __s32 param; 532346add78SDaniel Vetter /* 533346add78SDaniel Vetter * WARNING: Using pointers instead of fixed-size u64 means we need to write 534346add78SDaniel Vetter * compat32 code. Don't repeat this mistake. 535346add78SDaniel Vetter */ 536718dceddSDavid Howells int __user *value; 537718dceddSDavid Howells } drm_i915_getparam_t; 538718dceddSDavid Howells 539718dceddSDavid Howells /* Ioctl to set kernel params: 540718dceddSDavid Howells */ 541718dceddSDavid Howells #define I915_SETPARAM_USE_MI_BATCHBUFFER_START 1 542718dceddSDavid Howells #define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY 2 543718dceddSDavid Howells #define I915_SETPARAM_ALLOW_BATCHBUFFER 3 544718dceddSDavid Howells #define I915_SETPARAM_NUM_USED_FENCES 4 545718dceddSDavid Howells 546718dceddSDavid Howells typedef struct drm_i915_setparam { 547718dceddSDavid Howells int param; 548718dceddSDavid Howells int value; 549718dceddSDavid Howells } drm_i915_setparam_t; 550718dceddSDavid Howells 551718dceddSDavid Howells /* A memory manager for regions of shared memory: 552718dceddSDavid Howells */ 553718dceddSDavid Howells #define I915_MEM_REGION_AGP 1 554718dceddSDavid Howells 555718dceddSDavid Howells typedef struct drm_i915_mem_alloc { 556718dceddSDavid Howells int region; 557718dceddSDavid Howells int alignment; 558718dceddSDavid Howells int size; 559718dceddSDavid Howells int __user *region_offset; /* offset from start of fb or agp */ 560718dceddSDavid Howells } drm_i915_mem_alloc_t; 561718dceddSDavid Howells 562718dceddSDavid Howells typedef struct drm_i915_mem_free { 563718dceddSDavid Howells int region; 564718dceddSDavid Howells int region_offset; 565718dceddSDavid Howells } drm_i915_mem_free_t; 566718dceddSDavid Howells 567718dceddSDavid Howells typedef struct drm_i915_mem_init_heap { 568718dceddSDavid Howells int region; 569718dceddSDavid Howells int size; 570718dceddSDavid Howells int start; 571718dceddSDavid Howells } drm_i915_mem_init_heap_t; 572718dceddSDavid Howells 573718dceddSDavid Howells /* Allow memory manager to be torn down and re-initialized (eg on 574718dceddSDavid Howells * rotate): 575718dceddSDavid Howells */ 576718dceddSDavid Howells typedef struct drm_i915_mem_destroy_heap { 577718dceddSDavid Howells int region; 578718dceddSDavid Howells } drm_i915_mem_destroy_heap_t; 579718dceddSDavid Howells 580718dceddSDavid Howells /* Allow X server to configure which pipes to monitor for vblank signals 581718dceddSDavid Howells */ 582718dceddSDavid Howells #define DRM_I915_VBLANK_PIPE_A 1 583718dceddSDavid Howells #define DRM_I915_VBLANK_PIPE_B 2 584718dceddSDavid Howells 585718dceddSDavid Howells typedef struct drm_i915_vblank_pipe { 586718dceddSDavid Howells int pipe; 587718dceddSDavid Howells } drm_i915_vblank_pipe_t; 588718dceddSDavid Howells 589718dceddSDavid Howells /* Schedule buffer swap at given vertical blank: 590718dceddSDavid Howells */ 591718dceddSDavid Howells typedef struct drm_i915_vblank_swap { 592718dceddSDavid Howells drm_drawable_t drawable; 593718dceddSDavid Howells enum drm_vblank_seq_type seqtype; 594718dceddSDavid Howells unsigned int sequence; 595718dceddSDavid Howells } drm_i915_vblank_swap_t; 596718dceddSDavid Howells 597718dceddSDavid Howells typedef struct drm_i915_hws_addr { 598718dceddSDavid Howells __u64 addr; 599718dceddSDavid Howells } drm_i915_hws_addr_t; 600718dceddSDavid Howells 601718dceddSDavid Howells struct drm_i915_gem_init { 602718dceddSDavid Howells /** 603718dceddSDavid Howells * Beginning offset in the GTT to be managed by the DRM memory 604718dceddSDavid Howells * manager. 605718dceddSDavid Howells */ 606718dceddSDavid Howells __u64 gtt_start; 607718dceddSDavid Howells /** 608718dceddSDavid Howells * Ending offset in the GTT to be managed by the DRM memory 609718dceddSDavid Howells * manager. 610718dceddSDavid Howells */ 611718dceddSDavid Howells __u64 gtt_end; 612718dceddSDavid Howells }; 613718dceddSDavid Howells 614718dceddSDavid Howells struct drm_i915_gem_create { 615718dceddSDavid Howells /** 616718dceddSDavid Howells * Requested size for the object. 617718dceddSDavid Howells * 618718dceddSDavid Howells * The (page-aligned) allocated size for the object will be returned. 619718dceddSDavid Howells */ 620718dceddSDavid Howells __u64 size; 621718dceddSDavid Howells /** 622718dceddSDavid Howells * Returned handle for the object. 623718dceddSDavid Howells * 624718dceddSDavid Howells * Object handles are nonzero. 625718dceddSDavid Howells */ 626718dceddSDavid Howells __u32 handle; 627718dceddSDavid Howells __u32 pad; 628718dceddSDavid Howells }; 629718dceddSDavid Howells 630718dceddSDavid Howells struct drm_i915_gem_pread { 631718dceddSDavid Howells /** Handle for the object being read. */ 632718dceddSDavid Howells __u32 handle; 633718dceddSDavid Howells __u32 pad; 634718dceddSDavid Howells /** Offset into the object to read from */ 635718dceddSDavid Howells __u64 offset; 636718dceddSDavid Howells /** Length of data to read */ 637718dceddSDavid Howells __u64 size; 638718dceddSDavid Howells /** 639718dceddSDavid Howells * Pointer to write the data into. 640718dceddSDavid Howells * 641718dceddSDavid Howells * This is a fixed-size type for 32/64 compatibility. 642718dceddSDavid Howells */ 643718dceddSDavid Howells __u64 data_ptr; 644718dceddSDavid Howells }; 645718dceddSDavid Howells 646718dceddSDavid Howells struct drm_i915_gem_pwrite { 647718dceddSDavid Howells /** Handle for the object being written to. */ 648718dceddSDavid Howells __u32 handle; 649718dceddSDavid Howells __u32 pad; 650718dceddSDavid Howells /** Offset into the object to write to */ 651718dceddSDavid Howells __u64 offset; 652718dceddSDavid Howells /** Length of data to write */ 653718dceddSDavid Howells __u64 size; 654718dceddSDavid Howells /** 655718dceddSDavid Howells * Pointer to read the data from. 656718dceddSDavid Howells * 657718dceddSDavid Howells * This is a fixed-size type for 32/64 compatibility. 658718dceddSDavid Howells */ 659718dceddSDavid Howells __u64 data_ptr; 660718dceddSDavid Howells }; 661718dceddSDavid Howells 662718dceddSDavid Howells struct drm_i915_gem_mmap { 663718dceddSDavid Howells /** Handle for the object being mapped. */ 664718dceddSDavid Howells __u32 handle; 665718dceddSDavid Howells __u32 pad; 666718dceddSDavid Howells /** Offset in the object to map. */ 667718dceddSDavid Howells __u64 offset; 668718dceddSDavid Howells /** 669718dceddSDavid Howells * Length of data to map. 670718dceddSDavid Howells * 671718dceddSDavid Howells * The value will be page-aligned. 672718dceddSDavid Howells */ 673718dceddSDavid Howells __u64 size; 674718dceddSDavid Howells /** 675718dceddSDavid Howells * Returned pointer the data was mapped at. 676718dceddSDavid Howells * 677718dceddSDavid Howells * This is a fixed-size type for 32/64 compatibility. 678718dceddSDavid Howells */ 679718dceddSDavid Howells __u64 addr_ptr; 6801816f923SAkash Goel 6811816f923SAkash Goel /** 6821816f923SAkash Goel * Flags for extended behaviour. 6831816f923SAkash Goel * 6841816f923SAkash Goel * Added in version 2. 6851816f923SAkash Goel */ 6861816f923SAkash Goel __u64 flags; 6871816f923SAkash Goel #define I915_MMAP_WC 0x1 688718dceddSDavid Howells }; 689718dceddSDavid Howells 690718dceddSDavid Howells struct drm_i915_gem_mmap_gtt { 691718dceddSDavid Howells /** Handle for the object being mapped. */ 692718dceddSDavid Howells __u32 handle; 693718dceddSDavid Howells __u32 pad; 694718dceddSDavid Howells /** 695718dceddSDavid Howells * Fake offset to use for subsequent mmap call 696718dceddSDavid Howells * 697718dceddSDavid Howells * This is a fixed-size type for 32/64 compatibility. 698718dceddSDavid Howells */ 699718dceddSDavid Howells __u64 offset; 700718dceddSDavid Howells }; 701718dceddSDavid Howells 702718dceddSDavid Howells struct drm_i915_gem_set_domain { 703718dceddSDavid Howells /** Handle for the object */ 704718dceddSDavid Howells __u32 handle; 705718dceddSDavid Howells 706718dceddSDavid Howells /** New read domains */ 707718dceddSDavid Howells __u32 read_domains; 708718dceddSDavid Howells 709718dceddSDavid Howells /** New write domain */ 710718dceddSDavid Howells __u32 write_domain; 711718dceddSDavid Howells }; 712718dceddSDavid Howells 713718dceddSDavid Howells struct drm_i915_gem_sw_finish { 714718dceddSDavid Howells /** Handle for the object */ 715718dceddSDavid Howells __u32 handle; 716718dceddSDavid Howells }; 717718dceddSDavid Howells 718718dceddSDavid Howells struct drm_i915_gem_relocation_entry { 719718dceddSDavid Howells /** 720718dceddSDavid Howells * Handle of the buffer being pointed to by this relocation entry. 721718dceddSDavid Howells * 722718dceddSDavid Howells * It's appealing to make this be an index into the mm_validate_entry 723718dceddSDavid Howells * list to refer to the buffer, but this allows the driver to create 724718dceddSDavid Howells * a relocation list for state buffers and not re-write it per 725718dceddSDavid Howells * exec using the buffer. 726718dceddSDavid Howells */ 727718dceddSDavid Howells __u32 target_handle; 728718dceddSDavid Howells 729718dceddSDavid Howells /** 730718dceddSDavid Howells * Value to be added to the offset of the target buffer to make up 731718dceddSDavid Howells * the relocation entry. 732718dceddSDavid Howells */ 733718dceddSDavid Howells __u32 delta; 734718dceddSDavid Howells 735718dceddSDavid Howells /** Offset in the buffer the relocation entry will be written into */ 736718dceddSDavid Howells __u64 offset; 737718dceddSDavid Howells 738718dceddSDavid Howells /** 739718dceddSDavid Howells * Offset value of the target buffer that the relocation entry was last 740718dceddSDavid Howells * written as. 741718dceddSDavid Howells * 742718dceddSDavid Howells * If the buffer has the same offset as last time, we can skip syncing 743718dceddSDavid Howells * and writing the relocation. This value is written back out by 744718dceddSDavid Howells * the execbuffer ioctl when the relocation is written. 745718dceddSDavid Howells */ 746718dceddSDavid Howells __u64 presumed_offset; 747718dceddSDavid Howells 748718dceddSDavid Howells /** 749718dceddSDavid Howells * Target memory domains read by this operation. 750718dceddSDavid Howells */ 751718dceddSDavid Howells __u32 read_domains; 752718dceddSDavid Howells 753718dceddSDavid Howells /** 754718dceddSDavid Howells * Target memory domains written by this operation. 755718dceddSDavid Howells * 756718dceddSDavid Howells * Note that only one domain may be written by the whole 757718dceddSDavid Howells * execbuffer operation, so that where there are conflicts, 758718dceddSDavid Howells * the application will get -EINVAL back. 759718dceddSDavid Howells */ 760718dceddSDavid Howells __u32 write_domain; 761718dceddSDavid Howells }; 762718dceddSDavid Howells 763718dceddSDavid Howells /** @{ 764718dceddSDavid Howells * Intel memory domains 765718dceddSDavid Howells * 766718dceddSDavid Howells * Most of these just align with the various caches in 767718dceddSDavid Howells * the system and are used to flush and invalidate as 768718dceddSDavid Howells * objects end up cached in different domains. 769718dceddSDavid Howells */ 770718dceddSDavid Howells /** CPU cache */ 771718dceddSDavid Howells #define I915_GEM_DOMAIN_CPU 0x00000001 772718dceddSDavid Howells /** Render cache, used by 2D and 3D drawing */ 773718dceddSDavid Howells #define I915_GEM_DOMAIN_RENDER 0x00000002 774718dceddSDavid Howells /** Sampler cache, used by texture engine */ 775718dceddSDavid Howells #define I915_GEM_DOMAIN_SAMPLER 0x00000004 776718dceddSDavid Howells /** Command queue, used to load batch buffers */ 777718dceddSDavid Howells #define I915_GEM_DOMAIN_COMMAND 0x00000008 778718dceddSDavid Howells /** Instruction cache, used by shader programs */ 779718dceddSDavid Howells #define I915_GEM_DOMAIN_INSTRUCTION 0x00000010 780718dceddSDavid Howells /** Vertex address cache */ 781718dceddSDavid Howells #define I915_GEM_DOMAIN_VERTEX 0x00000020 782718dceddSDavid Howells /** GTT domain - aperture and scanout */ 783718dceddSDavid Howells #define I915_GEM_DOMAIN_GTT 0x00000040 784e22d8e3cSChris Wilson /** WC domain - uncached access */ 785e22d8e3cSChris Wilson #define I915_GEM_DOMAIN_WC 0x00000080 786718dceddSDavid Howells /** @} */ 787718dceddSDavid Howells 788718dceddSDavid Howells struct drm_i915_gem_exec_object { 789718dceddSDavid Howells /** 790718dceddSDavid Howells * User's handle for a buffer to be bound into the GTT for this 791718dceddSDavid Howells * operation. 792718dceddSDavid Howells */ 793718dceddSDavid Howells __u32 handle; 794718dceddSDavid Howells 795718dceddSDavid Howells /** Number of relocations to be performed on this buffer */ 796718dceddSDavid Howells __u32 relocation_count; 797718dceddSDavid Howells /** 798718dceddSDavid Howells * Pointer to array of struct drm_i915_gem_relocation_entry containing 799718dceddSDavid Howells * the relocations to be performed in this buffer. 800718dceddSDavid Howells */ 801718dceddSDavid Howells __u64 relocs_ptr; 802718dceddSDavid Howells 803718dceddSDavid Howells /** Required alignment in graphics aperture */ 804718dceddSDavid Howells __u64 alignment; 805718dceddSDavid Howells 806718dceddSDavid Howells /** 807718dceddSDavid Howells * Returned value of the updated offset of the object, for future 808718dceddSDavid Howells * presumed_offset writes. 809718dceddSDavid Howells */ 810718dceddSDavid Howells __u64 offset; 811718dceddSDavid Howells }; 812718dceddSDavid Howells 813718dceddSDavid Howells struct drm_i915_gem_execbuffer { 814718dceddSDavid Howells /** 815718dceddSDavid Howells * List of buffers to be validated with their relocations to be 816718dceddSDavid Howells * performend on them. 817718dceddSDavid Howells * 818718dceddSDavid Howells * This is a pointer to an array of struct drm_i915_gem_validate_entry. 819718dceddSDavid Howells * 820718dceddSDavid Howells * These buffers must be listed in an order such that all relocations 821718dceddSDavid Howells * a buffer is performing refer to buffers that have already appeared 822718dceddSDavid Howells * in the validate list. 823718dceddSDavid Howells */ 824718dceddSDavid Howells __u64 buffers_ptr; 825718dceddSDavid Howells __u32 buffer_count; 826718dceddSDavid Howells 827718dceddSDavid Howells /** Offset in the batchbuffer to start execution from. */ 828718dceddSDavid Howells __u32 batch_start_offset; 829718dceddSDavid Howells /** Bytes used in batchbuffer from batch_start_offset */ 830718dceddSDavid Howells __u32 batch_len; 831718dceddSDavid Howells __u32 DR1; 832718dceddSDavid Howells __u32 DR4; 833718dceddSDavid Howells __u32 num_cliprects; 834718dceddSDavid Howells /** This is a struct drm_clip_rect *cliprects */ 835718dceddSDavid Howells __u64 cliprects_ptr; 836718dceddSDavid Howells }; 837718dceddSDavid Howells 838718dceddSDavid Howells struct drm_i915_gem_exec_object2 { 839718dceddSDavid Howells /** 840718dceddSDavid Howells * User's handle for a buffer to be bound into the GTT for this 841718dceddSDavid Howells * operation. 842718dceddSDavid Howells */ 843718dceddSDavid Howells __u32 handle; 844718dceddSDavid Howells 845718dceddSDavid Howells /** Number of relocations to be performed on this buffer */ 846718dceddSDavid Howells __u32 relocation_count; 847718dceddSDavid Howells /** 848718dceddSDavid Howells * Pointer to array of struct drm_i915_gem_relocation_entry containing 849718dceddSDavid Howells * the relocations to be performed in this buffer. 850718dceddSDavid Howells */ 851718dceddSDavid Howells __u64 relocs_ptr; 852718dceddSDavid Howells 853718dceddSDavid Howells /** Required alignment in graphics aperture */ 854718dceddSDavid Howells __u64 alignment; 855718dceddSDavid Howells 856718dceddSDavid Howells /** 857506a8e87SChris Wilson * When the EXEC_OBJECT_PINNED flag is specified this is populated by 858506a8e87SChris Wilson * the user with the GTT offset at which this object will be pinned. 859506a8e87SChris Wilson * When the I915_EXEC_NO_RELOC flag is specified this must contain the 860506a8e87SChris Wilson * presumed_offset of the object. 861506a8e87SChris Wilson * During execbuffer2 the kernel populates it with the value of the 862506a8e87SChris Wilson * current GTT offset of the object, for future presumed_offset writes. 863718dceddSDavid Howells */ 864718dceddSDavid Howells __u64 offset; 865718dceddSDavid Howells 866718dceddSDavid Howells #define EXEC_OBJECT_NEEDS_FENCE (1<<0) 867ed5982e6SDaniel Vetter #define EXEC_OBJECT_NEEDS_GTT (1<<1) 868ed5982e6SDaniel Vetter #define EXEC_OBJECT_WRITE (1<<2) 869101b506aSMichel Thierry #define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3) 870506a8e87SChris Wilson #define EXEC_OBJECT_PINNED (1<<4) 87191b2db6fSChris Wilson #define EXEC_OBJECT_PAD_TO_SIZE (1<<5) 87277ae9957SChris Wilson /* The kernel implicitly tracks GPU activity on all GEM objects, and 87377ae9957SChris Wilson * synchronises operations with outstanding rendering. This includes 87477ae9957SChris Wilson * rendering on other devices if exported via dma-buf. However, sometimes 87577ae9957SChris Wilson * this tracking is too coarse and the user knows better. For example, 87677ae9957SChris Wilson * if the object is split into non-overlapping ranges shared between different 87777ae9957SChris Wilson * clients or engines (i.e. suballocating objects), the implicit tracking 87877ae9957SChris Wilson * by kernel assumes that each operation affects the whole object rather 87977ae9957SChris Wilson * than an individual range, causing needless synchronisation between clients. 88077ae9957SChris Wilson * The kernel will also forgo any CPU cache flushes prior to rendering from 88177ae9957SChris Wilson * the object as the client is expected to be also handling such domain 88277ae9957SChris Wilson * tracking. 88377ae9957SChris Wilson * 88477ae9957SChris Wilson * The kernel maintains the implicit tracking in order to manage resources 88577ae9957SChris Wilson * used by the GPU - this flag only disables the synchronisation prior to 88677ae9957SChris Wilson * rendering with this object in this execbuf. 88777ae9957SChris Wilson * 88877ae9957SChris Wilson * Opting out of implicit synhronisation requires the user to do its own 88977ae9957SChris Wilson * explicit tracking to avoid rendering corruption. See, for example, 89077ae9957SChris Wilson * I915_PARAM_HAS_EXEC_FENCE to order execbufs and execute them asynchronously. 89177ae9957SChris Wilson */ 89277ae9957SChris Wilson #define EXEC_OBJECT_ASYNC (1<<6) 893b0fd47adSChris Wilson /* Request that the contents of this execobject be copied into the error 894b0fd47adSChris Wilson * state upon a GPU hang involving this batch for post-mortem debugging. 895b0fd47adSChris Wilson * These buffers are recorded in no particular order as "user" in 896b0fd47adSChris Wilson * /sys/class/drm/cardN/error. Query I915_PARAM_HAS_EXEC_CAPTURE to see 897b0fd47adSChris Wilson * if the kernel supports this flag. 898b0fd47adSChris Wilson */ 899b0fd47adSChris Wilson #define EXEC_OBJECT_CAPTURE (1<<7) 9009e2793f6SDave Gordon /* All remaining bits are MBZ and RESERVED FOR FUTURE USE */ 901b0fd47adSChris Wilson #define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_CAPTURE<<1) 902718dceddSDavid Howells __u64 flags; 903ed5982e6SDaniel Vetter 90491b2db6fSChris Wilson union { 905718dceddSDavid Howells __u64 rsvd1; 90691b2db6fSChris Wilson __u64 pad_to_size; 90791b2db6fSChris Wilson }; 908718dceddSDavid Howells __u64 rsvd2; 909718dceddSDavid Howells }; 910718dceddSDavid Howells 911cf6e7bacSJason Ekstrand struct drm_i915_gem_exec_fence { 912cf6e7bacSJason Ekstrand /** 913cf6e7bacSJason Ekstrand * User's handle for a drm_syncobj to wait on or signal. 914cf6e7bacSJason Ekstrand */ 915cf6e7bacSJason Ekstrand __u32 handle; 916cf6e7bacSJason Ekstrand 917cf6e7bacSJason Ekstrand #define I915_EXEC_FENCE_WAIT (1<<0) 918cf6e7bacSJason Ekstrand #define I915_EXEC_FENCE_SIGNAL (1<<1) 919ebcaa1ffSTvrtko Ursulin #define __I915_EXEC_FENCE_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SIGNAL << 1)) 920cf6e7bacSJason Ekstrand __u32 flags; 921cf6e7bacSJason Ekstrand }; 922cf6e7bacSJason Ekstrand 923718dceddSDavid Howells struct drm_i915_gem_execbuffer2 { 924718dceddSDavid Howells /** 925718dceddSDavid Howells * List of gem_exec_object2 structs 926718dceddSDavid Howells */ 927718dceddSDavid Howells __u64 buffers_ptr; 928718dceddSDavid Howells __u32 buffer_count; 929718dceddSDavid Howells 930718dceddSDavid Howells /** Offset in the batchbuffer to start execution from. */ 931718dceddSDavid Howells __u32 batch_start_offset; 932718dceddSDavid Howells /** Bytes used in batchbuffer from batch_start_offset */ 933718dceddSDavid Howells __u32 batch_len; 934718dceddSDavid Howells __u32 DR1; 935718dceddSDavid Howells __u32 DR4; 936718dceddSDavid Howells __u32 num_cliprects; 937cf6e7bacSJason Ekstrand /** 938cf6e7bacSJason Ekstrand * This is a struct drm_clip_rect *cliprects if I915_EXEC_FENCE_ARRAY 939cf6e7bacSJason Ekstrand * is not set. If I915_EXEC_FENCE_ARRAY is set, then this is a 940cf6e7bacSJason Ekstrand * struct drm_i915_gem_exec_fence *fences. 941cf6e7bacSJason Ekstrand */ 942718dceddSDavid Howells __u64 cliprects_ptr; 943718dceddSDavid Howells #define I915_EXEC_RING_MASK (7<<0) 944718dceddSDavid Howells #define I915_EXEC_DEFAULT (0<<0) 945718dceddSDavid Howells #define I915_EXEC_RENDER (1<<0) 946718dceddSDavid Howells #define I915_EXEC_BSD (2<<0) 947718dceddSDavid Howells #define I915_EXEC_BLT (3<<0) 94882f91b6eSXiang, Haihao #define I915_EXEC_VEBOX (4<<0) 949718dceddSDavid Howells 950718dceddSDavid Howells /* Used for switching the constants addressing mode on gen4+ RENDER ring. 951718dceddSDavid Howells * Gen6+ only supports relative addressing to dynamic state (default) and 952718dceddSDavid Howells * absolute addressing. 953718dceddSDavid Howells * 954718dceddSDavid Howells * These flags are ignored for the BSD and BLT rings. 955718dceddSDavid Howells */ 956718dceddSDavid Howells #define I915_EXEC_CONSTANTS_MASK (3<<6) 957718dceddSDavid Howells #define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */ 958718dceddSDavid Howells #define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6) 959718dceddSDavid Howells #define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */ 960718dceddSDavid Howells __u64 flags; 961718dceddSDavid Howells __u64 rsvd1; /* now used for context info */ 962718dceddSDavid Howells __u64 rsvd2; 963718dceddSDavid Howells }; 964718dceddSDavid Howells 965718dceddSDavid Howells /** Resets the SO write offset registers for transform feedback on gen7. */ 966718dceddSDavid Howells #define I915_EXEC_GEN7_SOL_RESET (1<<8) 967718dceddSDavid Howells 968c2fb7916SDaniel Vetter /** Request a privileged ("secure") batch buffer. Note only available for 969c2fb7916SDaniel Vetter * DRM_ROOT_ONLY | DRM_MASTER processes. 970c2fb7916SDaniel Vetter */ 971c2fb7916SDaniel Vetter #define I915_EXEC_SECURE (1<<9) 972c2fb7916SDaniel Vetter 973b45305fcSDaniel Vetter /** Inform the kernel that the batch is and will always be pinned. This 974b45305fcSDaniel Vetter * negates the requirement for a workaround to be performed to avoid 975b45305fcSDaniel Vetter * an incoherent CS (such as can be found on 830/845). If this flag is 976b45305fcSDaniel Vetter * not passed, the kernel will endeavour to make sure the batch is 977b45305fcSDaniel Vetter * coherent with the CS before execution. If this flag is passed, 978b45305fcSDaniel Vetter * userspace assumes the responsibility for ensuring the same. 979b45305fcSDaniel Vetter */ 980b45305fcSDaniel Vetter #define I915_EXEC_IS_PINNED (1<<10) 981b45305fcSDaniel Vetter 982c3d19d3cSGeert Uytterhoeven /** Provide a hint to the kernel that the command stream and auxiliary 983ed5982e6SDaniel Vetter * state buffers already holds the correct presumed addresses and so the 984ed5982e6SDaniel Vetter * relocation process may be skipped if no buffers need to be moved in 985ed5982e6SDaniel Vetter * preparation for the execbuffer. 986ed5982e6SDaniel Vetter */ 987ed5982e6SDaniel Vetter #define I915_EXEC_NO_RELOC (1<<11) 988ed5982e6SDaniel Vetter 989eef90ccbSChris Wilson /** Use the reloc.handle as an index into the exec object array rather 990eef90ccbSChris Wilson * than as the per-file handle. 991eef90ccbSChris Wilson */ 992eef90ccbSChris Wilson #define I915_EXEC_HANDLE_LUT (1<<12) 993eef90ccbSChris Wilson 9948d360dffSZhipeng Gong /** Used for switching BSD rings on the platforms with two BSD rings */ 995d9da6aa0STvrtko Ursulin #define I915_EXEC_BSD_SHIFT (13) 996d9da6aa0STvrtko Ursulin #define I915_EXEC_BSD_MASK (3 << I915_EXEC_BSD_SHIFT) 997d9da6aa0STvrtko Ursulin /* default ping-pong mode */ 998d9da6aa0STvrtko Ursulin #define I915_EXEC_BSD_DEFAULT (0 << I915_EXEC_BSD_SHIFT) 999d9da6aa0STvrtko Ursulin #define I915_EXEC_BSD_RING1 (1 << I915_EXEC_BSD_SHIFT) 1000d9da6aa0STvrtko Ursulin #define I915_EXEC_BSD_RING2 (2 << I915_EXEC_BSD_SHIFT) 10018d360dffSZhipeng Gong 1002a9ed33caSAbdiel Janulgue /** Tell the kernel that the batchbuffer is processed by 1003a9ed33caSAbdiel Janulgue * the resource streamer. 1004a9ed33caSAbdiel Janulgue */ 1005a9ed33caSAbdiel Janulgue #define I915_EXEC_RESOURCE_STREAMER (1<<15) 1006a9ed33caSAbdiel Janulgue 1007fec0445cSChris Wilson /* Setting I915_EXEC_FENCE_IN implies that lower_32_bits(rsvd2) represent 1008fec0445cSChris Wilson * a sync_file fd to wait upon (in a nonblocking manner) prior to executing 1009fec0445cSChris Wilson * the batch. 1010fec0445cSChris Wilson * 1011fec0445cSChris Wilson * Returns -EINVAL if the sync_file fd cannot be found. 1012fec0445cSChris Wilson */ 1013fec0445cSChris Wilson #define I915_EXEC_FENCE_IN (1<<16) 1014fec0445cSChris Wilson 1015fec0445cSChris Wilson /* Setting I915_EXEC_FENCE_OUT causes the ioctl to return a sync_file fd 1016fec0445cSChris Wilson * in the upper_32_bits(rsvd2) upon success. Ownership of the fd is given 1017fec0445cSChris Wilson * to the caller, and it should be close() after use. (The fd is a regular 1018fec0445cSChris Wilson * file descriptor and will be cleaned up on process termination. It holds 1019fec0445cSChris Wilson * a reference to the request, but nothing else.) 1020fec0445cSChris Wilson * 1021fec0445cSChris Wilson * The sync_file fd can be combined with other sync_file and passed either 1022fec0445cSChris Wilson * to execbuf using I915_EXEC_FENCE_IN, to atomic KMS ioctls (so that a flip 1023fec0445cSChris Wilson * will only occur after this request completes), or to other devices. 1024fec0445cSChris Wilson * 1025fec0445cSChris Wilson * Using I915_EXEC_FENCE_OUT requires use of 1026fec0445cSChris Wilson * DRM_IOCTL_I915_GEM_EXECBUFFER2_WR ioctl so that the result is written 1027fec0445cSChris Wilson * back to userspace. Failure to do so will cause the out-fence to always 1028fec0445cSChris Wilson * be reported as zero, and the real fence fd to be leaked. 1029fec0445cSChris Wilson */ 1030fec0445cSChris Wilson #define I915_EXEC_FENCE_OUT (1<<17) 1031fec0445cSChris Wilson 10321a71cf2fSChris Wilson /* 10331a71cf2fSChris Wilson * Traditionally the execbuf ioctl has only considered the final element in 10341a71cf2fSChris Wilson * the execobject[] to be the executable batch. Often though, the client 10351a71cf2fSChris Wilson * will known the batch object prior to construction and being able to place 10361a71cf2fSChris Wilson * it into the execobject[] array first can simplify the relocation tracking. 10371a71cf2fSChris Wilson * Setting I915_EXEC_BATCH_FIRST tells execbuf to use element 0 of the 10381a71cf2fSChris Wilson * execobject[] as the * batch instead (the default is to use the last 10391a71cf2fSChris Wilson * element). 10401a71cf2fSChris Wilson */ 10411a71cf2fSChris Wilson #define I915_EXEC_BATCH_FIRST (1<<18) 1042cf6e7bacSJason Ekstrand 1043cf6e7bacSJason Ekstrand /* Setting I915_FENCE_ARRAY implies that num_cliprects and cliprects_ptr 1044cf6e7bacSJason Ekstrand * define an array of i915_gem_exec_fence structures which specify a set of 1045cf6e7bacSJason Ekstrand * dma fences to wait upon or signal. 1046cf6e7bacSJason Ekstrand */ 1047cf6e7bacSJason Ekstrand #define I915_EXEC_FENCE_ARRAY (1<<19) 1048cf6e7bacSJason Ekstrand 1049cf6e7bacSJason Ekstrand #define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_ARRAY<<1)) 1050ed5982e6SDaniel Vetter 1051718dceddSDavid Howells #define I915_EXEC_CONTEXT_ID_MASK (0xffffffff) 1052718dceddSDavid Howells #define i915_execbuffer2_set_context_id(eb2, context) \ 1053718dceddSDavid Howells (eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK 1054718dceddSDavid Howells #define i915_execbuffer2_get_context_id(eb2) \ 1055718dceddSDavid Howells ((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK) 1056718dceddSDavid Howells 1057718dceddSDavid Howells struct drm_i915_gem_pin { 1058718dceddSDavid Howells /** Handle of the buffer to be pinned. */ 1059718dceddSDavid Howells __u32 handle; 1060718dceddSDavid Howells __u32 pad; 1061718dceddSDavid Howells 1062718dceddSDavid Howells /** alignment required within the aperture */ 1063718dceddSDavid Howells __u64 alignment; 1064718dceddSDavid Howells 1065718dceddSDavid Howells /** Returned GTT offset of the buffer. */ 1066718dceddSDavid Howells __u64 offset; 1067718dceddSDavid Howells }; 1068718dceddSDavid Howells 1069718dceddSDavid Howells struct drm_i915_gem_unpin { 1070718dceddSDavid Howells /** Handle of the buffer to be unpinned. */ 1071718dceddSDavid Howells __u32 handle; 1072718dceddSDavid Howells __u32 pad; 1073718dceddSDavid Howells }; 1074718dceddSDavid Howells 1075718dceddSDavid Howells struct drm_i915_gem_busy { 1076718dceddSDavid Howells /** Handle of the buffer to check for busy */ 1077718dceddSDavid Howells __u32 handle; 1078718dceddSDavid Howells 1079426960beSChris Wilson /** Return busy status 1080426960beSChris Wilson * 1081426960beSChris Wilson * A return of 0 implies that the object is idle (after 1082426960beSChris Wilson * having flushed any pending activity), and a non-zero return that 1083426960beSChris Wilson * the object is still in-flight on the GPU. (The GPU has not yet 1084426960beSChris Wilson * signaled completion for all pending requests that reference the 10851255501dSChris Wilson * object.) An object is guaranteed to become idle eventually (so 10861255501dSChris Wilson * long as no new GPU commands are executed upon it). Due to the 10871255501dSChris Wilson * asynchronous nature of the hardware, an object reported 10881255501dSChris Wilson * as busy may become idle before the ioctl is completed. 10891255501dSChris Wilson * 10901255501dSChris Wilson * Furthermore, if the object is busy, which engine is busy is only 10911255501dSChris Wilson * provided as a guide. There are race conditions which prevent the 10921255501dSChris Wilson * report of which engines are busy from being always accurate. 10931255501dSChris Wilson * However, the converse is not true. If the object is idle, the 10941255501dSChris Wilson * result of the ioctl, that all engines are idle, is accurate. 1095426960beSChris Wilson * 1096426960beSChris Wilson * The returned dword is split into two fields to indicate both 1097426960beSChris Wilson * the engines on which the object is being read, and the 1098426960beSChris Wilson * engine on which it is currently being written (if any). 1099426960beSChris Wilson * 1100426960beSChris Wilson * The low word (bits 0:15) indicate if the object is being written 1101426960beSChris Wilson * to by any engine (there can only be one, as the GEM implicit 1102426960beSChris Wilson * synchronisation rules force writes to be serialised). Only the 1103426960beSChris Wilson * engine for the last write is reported. 1104426960beSChris Wilson * 1105426960beSChris Wilson * The high word (bits 16:31) are a bitmask of which engines are 1106426960beSChris Wilson * currently reading from the object. Multiple engines may be 1107426960beSChris Wilson * reading from the object simultaneously. 1108426960beSChris Wilson * 1109426960beSChris Wilson * The value of each engine is the same as specified in the 1110426960beSChris Wilson * EXECBUFFER2 ioctl, i.e. I915_EXEC_RENDER, I915_EXEC_BSD etc. 1111426960beSChris Wilson * Note I915_EXEC_DEFAULT is a symbolic value and is mapped to 1112426960beSChris Wilson * the I915_EXEC_RENDER engine for execution, and so it is never 1113426960beSChris Wilson * reported as active itself. Some hardware may have parallel 1114426960beSChris Wilson * execution engines, e.g. multiple media engines, which are 1115426960beSChris Wilson * mapped to the same identifier in the EXECBUFFER2 ioctl and 1116426960beSChris Wilson * so are not separately reported for busyness. 11171255501dSChris Wilson * 11181255501dSChris Wilson * Caveat emptor: 11191255501dSChris Wilson * Only the boolean result of this query is reliable; that is whether 11201255501dSChris Wilson * the object is idle or busy. The report of which engines are busy 11211255501dSChris Wilson * should be only used as a heuristic. 1122718dceddSDavid Howells */ 1123718dceddSDavid Howells __u32 busy; 1124718dceddSDavid Howells }; 1125718dceddSDavid Howells 112635c7ab42SDaniel Vetter /** 112735c7ab42SDaniel Vetter * I915_CACHING_NONE 112835c7ab42SDaniel Vetter * 112935c7ab42SDaniel Vetter * GPU access is not coherent with cpu caches. Default for machines without an 113035c7ab42SDaniel Vetter * LLC. 113135c7ab42SDaniel Vetter */ 1132718dceddSDavid Howells #define I915_CACHING_NONE 0 113335c7ab42SDaniel Vetter /** 113435c7ab42SDaniel Vetter * I915_CACHING_CACHED 113535c7ab42SDaniel Vetter * 113635c7ab42SDaniel Vetter * GPU access is coherent with cpu caches and furthermore the data is cached in 113735c7ab42SDaniel Vetter * last-level caches shared between cpu cores and the gpu GT. Default on 113835c7ab42SDaniel Vetter * machines with HAS_LLC. 113935c7ab42SDaniel Vetter */ 1140718dceddSDavid Howells #define I915_CACHING_CACHED 1 114135c7ab42SDaniel Vetter /** 114235c7ab42SDaniel Vetter * I915_CACHING_DISPLAY 114335c7ab42SDaniel Vetter * 114435c7ab42SDaniel Vetter * Special GPU caching mode which is coherent with the scanout engines. 114535c7ab42SDaniel Vetter * Transparently falls back to I915_CACHING_NONE on platforms where no special 114635c7ab42SDaniel Vetter * cache mode (like write-through or gfdt flushing) is available. The kernel 114735c7ab42SDaniel Vetter * automatically sets this mode when using a buffer as a scanout target. 114835c7ab42SDaniel Vetter * Userspace can manually set this mode to avoid a costly stall and clflush in 114935c7ab42SDaniel Vetter * the hotpath of drawing the first frame. 115035c7ab42SDaniel Vetter */ 115135c7ab42SDaniel Vetter #define I915_CACHING_DISPLAY 2 1152718dceddSDavid Howells 1153718dceddSDavid Howells struct drm_i915_gem_caching { 1154718dceddSDavid Howells /** 1155718dceddSDavid Howells * Handle of the buffer to set/get the caching level of. */ 1156718dceddSDavid Howells __u32 handle; 1157718dceddSDavid Howells 1158718dceddSDavid Howells /** 1159718dceddSDavid Howells * Cacheing level to apply or return value 1160718dceddSDavid Howells * 1161718dceddSDavid Howells * bits0-15 are for generic caching control (i.e. the above defined 1162718dceddSDavid Howells * values). bits16-31 are reserved for platform-specific variations 1163718dceddSDavid Howells * (e.g. l3$ caching on gen7). */ 1164718dceddSDavid Howells __u32 caching; 1165718dceddSDavid Howells }; 1166718dceddSDavid Howells 1167718dceddSDavid Howells #define I915_TILING_NONE 0 1168718dceddSDavid Howells #define I915_TILING_X 1 1169718dceddSDavid Howells #define I915_TILING_Y 2 1170deeb1519SChris Wilson #define I915_TILING_LAST I915_TILING_Y 1171718dceddSDavid Howells 1172718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_NONE 0 1173718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_9 1 1174718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_9_10 2 1175718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_9_11 3 1176718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_9_10_11 4 1177718dceddSDavid Howells /* Not seen by userland */ 1178718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_UNKNOWN 5 1179718dceddSDavid Howells /* Seen by userland. */ 1180718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_9_17 6 1181718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_9_10_17 7 1182718dceddSDavid Howells 1183718dceddSDavid Howells struct drm_i915_gem_set_tiling { 1184718dceddSDavid Howells /** Handle of the buffer to have its tiling state updated */ 1185718dceddSDavid Howells __u32 handle; 1186718dceddSDavid Howells 1187718dceddSDavid Howells /** 1188718dceddSDavid Howells * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X, 1189718dceddSDavid Howells * I915_TILING_Y). 1190718dceddSDavid Howells * 1191718dceddSDavid Howells * This value is to be set on request, and will be updated by the 1192718dceddSDavid Howells * kernel on successful return with the actual chosen tiling layout. 1193718dceddSDavid Howells * 1194718dceddSDavid Howells * The tiling mode may be demoted to I915_TILING_NONE when the system 1195718dceddSDavid Howells * has bit 6 swizzling that can't be managed correctly by GEM. 1196718dceddSDavid Howells * 1197718dceddSDavid Howells * Buffer contents become undefined when changing tiling_mode. 1198718dceddSDavid Howells */ 1199718dceddSDavid Howells __u32 tiling_mode; 1200718dceddSDavid Howells 1201718dceddSDavid Howells /** 1202718dceddSDavid Howells * Stride in bytes for the object when in I915_TILING_X or 1203718dceddSDavid Howells * I915_TILING_Y. 1204718dceddSDavid Howells */ 1205718dceddSDavid Howells __u32 stride; 1206718dceddSDavid Howells 1207718dceddSDavid Howells /** 1208718dceddSDavid Howells * Returned address bit 6 swizzling required for CPU access through 1209718dceddSDavid Howells * mmap mapping. 1210718dceddSDavid Howells */ 1211718dceddSDavid Howells __u32 swizzle_mode; 1212718dceddSDavid Howells }; 1213718dceddSDavid Howells 1214718dceddSDavid Howells struct drm_i915_gem_get_tiling { 1215718dceddSDavid Howells /** Handle of the buffer to get tiling state for. */ 1216718dceddSDavid Howells __u32 handle; 1217718dceddSDavid Howells 1218718dceddSDavid Howells /** 1219718dceddSDavid Howells * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X, 1220718dceddSDavid Howells * I915_TILING_Y). 1221718dceddSDavid Howells */ 1222718dceddSDavid Howells __u32 tiling_mode; 1223718dceddSDavid Howells 1224718dceddSDavid Howells /** 1225718dceddSDavid Howells * Returned address bit 6 swizzling required for CPU access through 1226718dceddSDavid Howells * mmap mapping. 1227718dceddSDavid Howells */ 1228718dceddSDavid Howells __u32 swizzle_mode; 122970f2f5c7SChris Wilson 123070f2f5c7SChris Wilson /** 123170f2f5c7SChris Wilson * Returned address bit 6 swizzling required for CPU access through 123270f2f5c7SChris Wilson * mmap mapping whilst bound. 123370f2f5c7SChris Wilson */ 123470f2f5c7SChris Wilson __u32 phys_swizzle_mode; 1235718dceddSDavid Howells }; 1236718dceddSDavid Howells 1237718dceddSDavid Howells struct drm_i915_gem_get_aperture { 1238718dceddSDavid Howells /** Total size of the aperture used by i915_gem_execbuffer, in bytes */ 1239718dceddSDavid Howells __u64 aper_size; 1240718dceddSDavid Howells 1241718dceddSDavid Howells /** 1242718dceddSDavid Howells * Available space in the aperture used by i915_gem_execbuffer, in 1243718dceddSDavid Howells * bytes 1244718dceddSDavid Howells */ 1245718dceddSDavid Howells __u64 aper_available_size; 1246718dceddSDavid Howells }; 1247718dceddSDavid Howells 1248718dceddSDavid Howells struct drm_i915_get_pipe_from_crtc_id { 1249718dceddSDavid Howells /** ID of CRTC being requested **/ 1250718dceddSDavid Howells __u32 crtc_id; 1251718dceddSDavid Howells 1252718dceddSDavid Howells /** pipe of requested CRTC **/ 1253718dceddSDavid Howells __u32 pipe; 1254718dceddSDavid Howells }; 1255718dceddSDavid Howells 1256718dceddSDavid Howells #define I915_MADV_WILLNEED 0 1257718dceddSDavid Howells #define I915_MADV_DONTNEED 1 1258718dceddSDavid Howells #define __I915_MADV_PURGED 2 /* internal state */ 1259718dceddSDavid Howells 1260718dceddSDavid Howells struct drm_i915_gem_madvise { 1261718dceddSDavid Howells /** Handle of the buffer to change the backing store advice */ 1262718dceddSDavid Howells __u32 handle; 1263718dceddSDavid Howells 1264718dceddSDavid Howells /* Advice: either the buffer will be needed again in the near future, 1265718dceddSDavid Howells * or wont be and could be discarded under memory pressure. 1266718dceddSDavid Howells */ 1267718dceddSDavid Howells __u32 madv; 1268718dceddSDavid Howells 1269718dceddSDavid Howells /** Whether the backing store still exists. */ 1270718dceddSDavid Howells __u32 retained; 1271718dceddSDavid Howells }; 1272718dceddSDavid Howells 1273718dceddSDavid Howells /* flags */ 1274718dceddSDavid Howells #define I915_OVERLAY_TYPE_MASK 0xff 1275718dceddSDavid Howells #define I915_OVERLAY_YUV_PLANAR 0x01 1276718dceddSDavid Howells #define I915_OVERLAY_YUV_PACKED 0x02 1277718dceddSDavid Howells #define I915_OVERLAY_RGB 0x03 1278718dceddSDavid Howells 1279718dceddSDavid Howells #define I915_OVERLAY_DEPTH_MASK 0xff00 1280718dceddSDavid Howells #define I915_OVERLAY_RGB24 0x1000 1281718dceddSDavid Howells #define I915_OVERLAY_RGB16 0x2000 1282718dceddSDavid Howells #define I915_OVERLAY_RGB15 0x3000 1283718dceddSDavid Howells #define I915_OVERLAY_YUV422 0x0100 1284718dceddSDavid Howells #define I915_OVERLAY_YUV411 0x0200 1285718dceddSDavid Howells #define I915_OVERLAY_YUV420 0x0300 1286718dceddSDavid Howells #define I915_OVERLAY_YUV410 0x0400 1287718dceddSDavid Howells 1288718dceddSDavid Howells #define I915_OVERLAY_SWAP_MASK 0xff0000 1289718dceddSDavid Howells #define I915_OVERLAY_NO_SWAP 0x000000 1290718dceddSDavid Howells #define I915_OVERLAY_UV_SWAP 0x010000 1291718dceddSDavid Howells #define I915_OVERLAY_Y_SWAP 0x020000 1292718dceddSDavid Howells #define I915_OVERLAY_Y_AND_UV_SWAP 0x030000 1293718dceddSDavid Howells 1294718dceddSDavid Howells #define I915_OVERLAY_FLAGS_MASK 0xff000000 1295718dceddSDavid Howells #define I915_OVERLAY_ENABLE 0x01000000 1296718dceddSDavid Howells 1297718dceddSDavid Howells struct drm_intel_overlay_put_image { 1298718dceddSDavid Howells /* various flags and src format description */ 1299718dceddSDavid Howells __u32 flags; 1300718dceddSDavid Howells /* source picture description */ 1301718dceddSDavid Howells __u32 bo_handle; 1302718dceddSDavid Howells /* stride values and offsets are in bytes, buffer relative */ 1303718dceddSDavid Howells __u16 stride_Y; /* stride for packed formats */ 1304718dceddSDavid Howells __u16 stride_UV; 1305718dceddSDavid Howells __u32 offset_Y; /* offset for packet formats */ 1306718dceddSDavid Howells __u32 offset_U; 1307718dceddSDavid Howells __u32 offset_V; 1308718dceddSDavid Howells /* in pixels */ 1309718dceddSDavid Howells __u16 src_width; 1310718dceddSDavid Howells __u16 src_height; 1311718dceddSDavid Howells /* to compensate the scaling factors for partially covered surfaces */ 1312718dceddSDavid Howells __u16 src_scan_width; 1313718dceddSDavid Howells __u16 src_scan_height; 1314718dceddSDavid Howells /* output crtc description */ 1315718dceddSDavid Howells __u32 crtc_id; 1316718dceddSDavid Howells __u16 dst_x; 1317718dceddSDavid Howells __u16 dst_y; 1318718dceddSDavid Howells __u16 dst_width; 1319718dceddSDavid Howells __u16 dst_height; 1320718dceddSDavid Howells }; 1321718dceddSDavid Howells 1322718dceddSDavid Howells /* flags */ 1323718dceddSDavid Howells #define I915_OVERLAY_UPDATE_ATTRS (1<<0) 1324718dceddSDavid Howells #define I915_OVERLAY_UPDATE_GAMMA (1<<1) 1325ea9da4e4SChris Wilson #define I915_OVERLAY_DISABLE_DEST_COLORKEY (1<<2) 1326718dceddSDavid Howells struct drm_intel_overlay_attrs { 1327718dceddSDavid Howells __u32 flags; 1328718dceddSDavid Howells __u32 color_key; 1329718dceddSDavid Howells __s32 brightness; 1330718dceddSDavid Howells __u32 contrast; 1331718dceddSDavid Howells __u32 saturation; 1332718dceddSDavid Howells __u32 gamma0; 1333718dceddSDavid Howells __u32 gamma1; 1334718dceddSDavid Howells __u32 gamma2; 1335718dceddSDavid Howells __u32 gamma3; 1336718dceddSDavid Howells __u32 gamma4; 1337718dceddSDavid Howells __u32 gamma5; 1338718dceddSDavid Howells }; 1339718dceddSDavid Howells 1340718dceddSDavid Howells /* 1341718dceddSDavid Howells * Intel sprite handling 1342718dceddSDavid Howells * 1343718dceddSDavid Howells * Color keying works with a min/mask/max tuple. Both source and destination 1344718dceddSDavid Howells * color keying is allowed. 1345718dceddSDavid Howells * 1346718dceddSDavid Howells * Source keying: 1347718dceddSDavid Howells * Sprite pixels within the min & max values, masked against the color channels 1348718dceddSDavid Howells * specified in the mask field, will be transparent. All other pixels will 1349718dceddSDavid Howells * be displayed on top of the primary plane. For RGB surfaces, only the min 1350718dceddSDavid Howells * and mask fields will be used; ranged compares are not allowed. 1351718dceddSDavid Howells * 1352718dceddSDavid Howells * Destination keying: 1353718dceddSDavid Howells * Primary plane pixels that match the min value, masked against the color 1354718dceddSDavid Howells * channels specified in the mask field, will be replaced by corresponding 1355718dceddSDavid Howells * pixels from the sprite plane. 1356718dceddSDavid Howells * 1357718dceddSDavid Howells * Note that source & destination keying are exclusive; only one can be 1358718dceddSDavid Howells * active on a given plane. 1359718dceddSDavid Howells */ 1360718dceddSDavid Howells 1361718dceddSDavid Howells #define I915_SET_COLORKEY_NONE (1<<0) /* disable color key matching */ 1362718dceddSDavid Howells #define I915_SET_COLORKEY_DESTINATION (1<<1) 1363718dceddSDavid Howells #define I915_SET_COLORKEY_SOURCE (1<<2) 1364718dceddSDavid Howells struct drm_intel_sprite_colorkey { 1365718dceddSDavid Howells __u32 plane_id; 1366718dceddSDavid Howells __u32 min_value; 1367718dceddSDavid Howells __u32 channel_mask; 1368718dceddSDavid Howells __u32 max_value; 1369718dceddSDavid Howells __u32 flags; 1370718dceddSDavid Howells }; 1371718dceddSDavid Howells 1372718dceddSDavid Howells struct drm_i915_gem_wait { 1373718dceddSDavid Howells /** Handle of BO we shall wait on */ 1374718dceddSDavid Howells __u32 bo_handle; 1375718dceddSDavid Howells __u32 flags; 1376718dceddSDavid Howells /** Number of nanoseconds to wait, Returns time remaining. */ 1377718dceddSDavid Howells __s64 timeout_ns; 1378718dceddSDavid Howells }; 1379718dceddSDavid Howells 1380718dceddSDavid Howells struct drm_i915_gem_context_create { 1381718dceddSDavid Howells /* output: id of new context*/ 1382718dceddSDavid Howells __u32 ctx_id; 1383718dceddSDavid Howells __u32 pad; 1384718dceddSDavid Howells }; 1385718dceddSDavid Howells 1386718dceddSDavid Howells struct drm_i915_gem_context_destroy { 1387718dceddSDavid Howells __u32 ctx_id; 1388718dceddSDavid Howells __u32 pad; 1389718dceddSDavid Howells }; 1390718dceddSDavid Howells 1391718dceddSDavid Howells struct drm_i915_reg_read { 13928697600bSVille Syrjälä /* 13938697600bSVille Syrjälä * Register offset. 13948697600bSVille Syrjälä * For 64bit wide registers where the upper 32bits don't immediately 13958697600bSVille Syrjälä * follow the lower 32bits, the offset of the lower 32bits must 13968697600bSVille Syrjälä * be specified 13978697600bSVille Syrjälä */ 1398718dceddSDavid Howells __u64 offset; 1399822a4b67SJoonas Lahtinen #define I915_REG_READ_8B_WA (1ul << 0) 14003fd3a6ffSJoonas Lahtinen 1401718dceddSDavid Howells __u64 val; /* Return value */ 1402718dceddSDavid Howells }; 1403648a9bc5SChris Wilson /* Known registers: 1404648a9bc5SChris Wilson * 1405648a9bc5SChris Wilson * Render engine timestamp - 0x2358 + 64bit - gen7+ 1406648a9bc5SChris Wilson * - Note this register returns an invalid value if using the default 14073fd3a6ffSJoonas Lahtinen * single instruction 8byte read, in order to workaround that pass 14083fd3a6ffSJoonas Lahtinen * flag I915_REG_READ_8B_WA in offset field. 1409648a9bc5SChris Wilson * 1410648a9bc5SChris Wilson */ 1411b6359918SMika Kuoppala 1412b6359918SMika Kuoppala struct drm_i915_reset_stats { 1413b6359918SMika Kuoppala __u32 ctx_id; 1414b6359918SMika Kuoppala __u32 flags; 1415b6359918SMika Kuoppala 1416b6359918SMika Kuoppala /* All resets since boot/module reload, for all contexts */ 1417b6359918SMika Kuoppala __u32 reset_count; 1418b6359918SMika Kuoppala 1419b6359918SMika Kuoppala /* Number of batches lost when active in GPU, for this context */ 1420b6359918SMika Kuoppala __u32 batch_active; 1421b6359918SMika Kuoppala 1422b6359918SMika Kuoppala /* Number of batches lost pending for execution, for this context */ 1423b6359918SMika Kuoppala __u32 batch_pending; 1424b6359918SMika Kuoppala 1425b6359918SMika Kuoppala __u32 pad; 1426b6359918SMika Kuoppala }; 1427b6359918SMika Kuoppala 14285cc9ed4bSChris Wilson struct drm_i915_gem_userptr { 14295cc9ed4bSChris Wilson __u64 user_ptr; 14305cc9ed4bSChris Wilson __u64 user_size; 14315cc9ed4bSChris Wilson __u32 flags; 14325cc9ed4bSChris Wilson #define I915_USERPTR_READ_ONLY 0x1 14335cc9ed4bSChris Wilson #define I915_USERPTR_UNSYNCHRONIZED 0x80000000 14345cc9ed4bSChris Wilson /** 14355cc9ed4bSChris Wilson * Returned handle for the object. 14365cc9ed4bSChris Wilson * 14375cc9ed4bSChris Wilson * Object handles are nonzero. 14385cc9ed4bSChris Wilson */ 14395cc9ed4bSChris Wilson __u32 handle; 14405cc9ed4bSChris Wilson }; 14415cc9ed4bSChris Wilson 1442c9dc0f35SChris Wilson struct drm_i915_gem_context_param { 1443c9dc0f35SChris Wilson __u32 ctx_id; 1444c9dc0f35SChris Wilson __u32 size; 1445c9dc0f35SChris Wilson __u64 param; 1446c9dc0f35SChris Wilson #define I915_CONTEXT_PARAM_BAN_PERIOD 0x1 1447b1b38278SDavid Weinehall #define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2 1448fa8848f2SChris Wilson #define I915_CONTEXT_PARAM_GTT_SIZE 0x3 1449bc3d6744SChris Wilson #define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE 0x4 145084102171SMika Kuoppala #define I915_CONTEXT_PARAM_BANNABLE 0x5 1451ac14fbd4SChris Wilson #define I915_CONTEXT_PARAM_PRIORITY 0x6 1452ac14fbd4SChris Wilson #define I915_CONTEXT_MAX_USER_PRIORITY 1023 /* inclusive */ 1453ac14fbd4SChris Wilson #define I915_CONTEXT_DEFAULT_PRIORITY 0 1454ac14fbd4SChris Wilson #define I915_CONTEXT_MIN_USER_PRIORITY -1023 /* inclusive */ 1455c9dc0f35SChris Wilson __u64 value; 1456c9dc0f35SChris Wilson }; 1457c9dc0f35SChris Wilson 1458d7965152SRobert Bragg enum drm_i915_oa_format { 145919f81df2SRobert Bragg I915_OA_FORMAT_A13 = 1, /* HSW only */ 146019f81df2SRobert Bragg I915_OA_FORMAT_A29, /* HSW only */ 146119f81df2SRobert Bragg I915_OA_FORMAT_A13_B8_C8, /* HSW only */ 146219f81df2SRobert Bragg I915_OA_FORMAT_B4_C8, /* HSW only */ 146319f81df2SRobert Bragg I915_OA_FORMAT_A45_B8_C8, /* HSW only */ 146419f81df2SRobert Bragg I915_OA_FORMAT_B4_C8_A16, /* HSW only */ 146519f81df2SRobert Bragg I915_OA_FORMAT_C4_B8, /* HSW+ */ 146619f81df2SRobert Bragg 146719f81df2SRobert Bragg /* Gen8+ */ 146819f81df2SRobert Bragg I915_OA_FORMAT_A12, 146919f81df2SRobert Bragg I915_OA_FORMAT_A12_B8_C8, 147019f81df2SRobert Bragg I915_OA_FORMAT_A32u40_A4u32_B8_C8, 1471d7965152SRobert Bragg 1472d7965152SRobert Bragg I915_OA_FORMAT_MAX /* non-ABI */ 1473d7965152SRobert Bragg }; 1474d7965152SRobert Bragg 1475eec688e1SRobert Bragg enum drm_i915_perf_property_id { 1476eec688e1SRobert Bragg /** 1477eec688e1SRobert Bragg * Open the stream for a specific context handle (as used with 1478eec688e1SRobert Bragg * execbuffer2). A stream opened for a specific context this way 1479eec688e1SRobert Bragg * won't typically require root privileges. 1480eec688e1SRobert Bragg */ 1481eec688e1SRobert Bragg DRM_I915_PERF_PROP_CTX_HANDLE = 1, 1482eec688e1SRobert Bragg 1483d7965152SRobert Bragg /** 1484d7965152SRobert Bragg * A value of 1 requests the inclusion of raw OA unit reports as 1485d7965152SRobert Bragg * part of stream samples. 1486d7965152SRobert Bragg */ 1487d7965152SRobert Bragg DRM_I915_PERF_PROP_SAMPLE_OA, 1488d7965152SRobert Bragg 1489d7965152SRobert Bragg /** 1490d7965152SRobert Bragg * The value specifies which set of OA unit metrics should be 1491d7965152SRobert Bragg * be configured, defining the contents of any OA unit reports. 1492d7965152SRobert Bragg */ 1493d7965152SRobert Bragg DRM_I915_PERF_PROP_OA_METRICS_SET, 1494d7965152SRobert Bragg 1495d7965152SRobert Bragg /** 1496d7965152SRobert Bragg * The value specifies the size and layout of OA unit reports. 1497d7965152SRobert Bragg */ 1498d7965152SRobert Bragg DRM_I915_PERF_PROP_OA_FORMAT, 1499d7965152SRobert Bragg 1500d7965152SRobert Bragg /** 1501d7965152SRobert Bragg * Specifying this property implicitly requests periodic OA unit 1502d7965152SRobert Bragg * sampling and (at least on Haswell) the sampling frequency is derived 1503d7965152SRobert Bragg * from this exponent as follows: 1504d7965152SRobert Bragg * 1505d7965152SRobert Bragg * 80ns * 2^(period_exponent + 1) 1506d7965152SRobert Bragg */ 1507d7965152SRobert Bragg DRM_I915_PERF_PROP_OA_EXPONENT, 1508d7965152SRobert Bragg 1509eec688e1SRobert Bragg DRM_I915_PERF_PROP_MAX /* non-ABI */ 1510eec688e1SRobert Bragg }; 1511eec688e1SRobert Bragg 1512eec688e1SRobert Bragg struct drm_i915_perf_open_param { 1513eec688e1SRobert Bragg __u32 flags; 1514eec688e1SRobert Bragg #define I915_PERF_FLAG_FD_CLOEXEC (1<<0) 1515eec688e1SRobert Bragg #define I915_PERF_FLAG_FD_NONBLOCK (1<<1) 1516eec688e1SRobert Bragg #define I915_PERF_FLAG_DISABLED (1<<2) 1517eec688e1SRobert Bragg 1518eec688e1SRobert Bragg /** The number of u64 (id, value) pairs */ 1519eec688e1SRobert Bragg __u32 num_properties; 1520eec688e1SRobert Bragg 1521eec688e1SRobert Bragg /** 1522eec688e1SRobert Bragg * Pointer to array of u64 (id, value) pairs configuring the stream 1523eec688e1SRobert Bragg * to open. 1524eec688e1SRobert Bragg */ 1525cd8bddc4SChris Wilson __u64 properties_ptr; 1526eec688e1SRobert Bragg }; 1527eec688e1SRobert Bragg 1528d7965152SRobert Bragg /** 1529d7965152SRobert Bragg * Enable data capture for a stream that was either opened in a disabled state 1530d7965152SRobert Bragg * via I915_PERF_FLAG_DISABLED or was later disabled via 1531d7965152SRobert Bragg * I915_PERF_IOCTL_DISABLE. 1532d7965152SRobert Bragg * 1533d7965152SRobert Bragg * It is intended to be cheaper to disable and enable a stream than it may be 1534d7965152SRobert Bragg * to close and re-open a stream with the same configuration. 1535d7965152SRobert Bragg * 1536d7965152SRobert Bragg * It's undefined whether any pending data for the stream will be lost. 1537d7965152SRobert Bragg */ 1538eec688e1SRobert Bragg #define I915_PERF_IOCTL_ENABLE _IO('i', 0x0) 1539d7965152SRobert Bragg 1540d7965152SRobert Bragg /** 1541d7965152SRobert Bragg * Disable data capture for a stream. 1542d7965152SRobert Bragg * 1543d7965152SRobert Bragg * It is an error to try and read a stream that is disabled. 1544d7965152SRobert Bragg */ 1545eec688e1SRobert Bragg #define I915_PERF_IOCTL_DISABLE _IO('i', 0x1) 1546eec688e1SRobert Bragg 1547eec688e1SRobert Bragg /** 1548eec688e1SRobert Bragg * Common to all i915 perf records 1549eec688e1SRobert Bragg */ 1550eec688e1SRobert Bragg struct drm_i915_perf_record_header { 1551eec688e1SRobert Bragg __u32 type; 1552eec688e1SRobert Bragg __u16 pad; 1553eec688e1SRobert Bragg __u16 size; 1554eec688e1SRobert Bragg }; 1555eec688e1SRobert Bragg 1556eec688e1SRobert Bragg enum drm_i915_perf_record_type { 1557eec688e1SRobert Bragg 1558eec688e1SRobert Bragg /** 1559eec688e1SRobert Bragg * Samples are the work horse record type whose contents are extensible 1560eec688e1SRobert Bragg * and defined when opening an i915 perf stream based on the given 1561eec688e1SRobert Bragg * properties. 1562eec688e1SRobert Bragg * 1563eec688e1SRobert Bragg * Boolean properties following the naming convention 1564eec688e1SRobert Bragg * DRM_I915_PERF_SAMPLE_xyz_PROP request the inclusion of 'xyz' data in 1565eec688e1SRobert Bragg * every sample. 1566eec688e1SRobert Bragg * 1567eec688e1SRobert Bragg * The order of these sample properties given by userspace has no 1568d7965152SRobert Bragg * affect on the ordering of data within a sample. The order is 1569eec688e1SRobert Bragg * documented here. 1570eec688e1SRobert Bragg * 1571eec688e1SRobert Bragg * struct { 1572eec688e1SRobert Bragg * struct drm_i915_perf_record_header header; 1573eec688e1SRobert Bragg * 1574d7965152SRobert Bragg * { u32 oa_report[]; } && DRM_I915_PERF_PROP_SAMPLE_OA 1575eec688e1SRobert Bragg * }; 1576eec688e1SRobert Bragg */ 1577eec688e1SRobert Bragg DRM_I915_PERF_RECORD_SAMPLE = 1, 1578eec688e1SRobert Bragg 1579d7965152SRobert Bragg /* 1580d7965152SRobert Bragg * Indicates that one or more OA reports were not written by the 1581d7965152SRobert Bragg * hardware. This can happen for example if an MI_REPORT_PERF_COUNT 1582d7965152SRobert Bragg * command collides with periodic sampling - which would be more likely 1583d7965152SRobert Bragg * at higher sampling frequencies. 1584d7965152SRobert Bragg */ 1585d7965152SRobert Bragg DRM_I915_PERF_RECORD_OA_REPORT_LOST = 2, 1586d7965152SRobert Bragg 1587d7965152SRobert Bragg /** 1588d7965152SRobert Bragg * An error occurred that resulted in all pending OA reports being lost. 1589d7965152SRobert Bragg */ 1590d7965152SRobert Bragg DRM_I915_PERF_RECORD_OA_BUFFER_LOST = 3, 1591d7965152SRobert Bragg 1592eec688e1SRobert Bragg DRM_I915_PERF_RECORD_MAX /* non-ABI */ 1593eec688e1SRobert Bragg }; 1594eec688e1SRobert Bragg 1595f89823c2SLionel Landwerlin /** 1596f89823c2SLionel Landwerlin * Structure to upload perf dynamic configuration into the kernel. 1597f89823c2SLionel Landwerlin */ 1598f89823c2SLionel Landwerlin struct drm_i915_perf_oa_config { 1599f89823c2SLionel Landwerlin /** String formatted like "%08x-%04x-%04x-%04x-%012x" */ 1600f89823c2SLionel Landwerlin char uuid[36]; 1601f89823c2SLionel Landwerlin 1602f89823c2SLionel Landwerlin __u32 n_mux_regs; 1603f89823c2SLionel Landwerlin __u32 n_boolean_regs; 1604f89823c2SLionel Landwerlin __u32 n_flex_regs; 1605f89823c2SLionel Landwerlin 1606ee427e25SLionel Landwerlin /* 1607ee427e25SLionel Landwerlin * These fields are pointers to tuples of u32 values (register 1608ee427e25SLionel Landwerlin * address, value). For example the expected length of the buffer 1609ee427e25SLionel Landwerlin * pointed by mux_regs_ptr is (2 * sizeof(u32) * n_mux_regs). 1610ee427e25SLionel Landwerlin */ 161117ad4fddSChris Wilson __u64 mux_regs_ptr; 161217ad4fddSChris Wilson __u64 boolean_regs_ptr; 161317ad4fddSChris Wilson __u64 flex_regs_ptr; 1614f89823c2SLionel Landwerlin }; 1615f89823c2SLionel Landwerlin 1616b1c1f5c4SEmil Velikov #if defined(__cplusplus) 1617b1c1f5c4SEmil Velikov } 1618b1c1f5c4SEmil Velikov #endif 1619b1c1f5c4SEmil Velikov 1620718dceddSDavid Howells #endif /* _UAPI_I915_DRM_H_ */ 1621