1718dceddSDavid Howells /* 2718dceddSDavid Howells * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 3718dceddSDavid Howells * All Rights Reserved. 4718dceddSDavid Howells * 5718dceddSDavid Howells * Permission is hereby granted, free of charge, to any person obtaining a 6718dceddSDavid Howells * copy of this software and associated documentation files (the 7718dceddSDavid Howells * "Software"), to deal in the Software without restriction, including 8718dceddSDavid Howells * without limitation the rights to use, copy, modify, merge, publish, 9718dceddSDavid Howells * distribute, sub license, and/or sell copies of the Software, and to 10718dceddSDavid Howells * permit persons to whom the Software is furnished to do so, subject to 11718dceddSDavid Howells * the following conditions: 12718dceddSDavid Howells * 13718dceddSDavid Howells * The above copyright notice and this permission notice (including the 14718dceddSDavid Howells * next paragraph) shall be included in all copies or substantial portions 15718dceddSDavid Howells * of the Software. 16718dceddSDavid Howells * 17718dceddSDavid Howells * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 18718dceddSDavid Howells * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 19718dceddSDavid Howells * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 20718dceddSDavid Howells * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 21718dceddSDavid Howells * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 22718dceddSDavid Howells * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 23718dceddSDavid Howells * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 24718dceddSDavid Howells * 25718dceddSDavid Howells */ 26718dceddSDavid Howells 27718dceddSDavid Howells #ifndef _UAPI_I915_DRM_H_ 28718dceddSDavid Howells #define _UAPI_I915_DRM_H_ 29718dceddSDavid Howells 301049102fSGabriel Laskar #include "drm.h" 31718dceddSDavid Howells 32b1c1f5c4SEmil Velikov #if defined(__cplusplus) 33b1c1f5c4SEmil Velikov extern "C" { 34b1c1f5c4SEmil Velikov #endif 35b1c1f5c4SEmil Velikov 36718dceddSDavid Howells /* Please note that modifications to all structs defined here are 37718dceddSDavid Howells * subject to backwards-compatibility constraints. 38718dceddSDavid Howells */ 39718dceddSDavid Howells 40cce723edSBen Widawsky /** 41cce723edSBen Widawsky * DOC: uevents generated by i915 on it's device node 42cce723edSBen Widawsky * 43cce723edSBen Widawsky * I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch 44cce723edSBen Widawsky * event from the gpu l3 cache. Additional information supplied is ROW, 4535a85ac6SBen Widawsky * BANK, SUBBANK, SLICE of the affected cacheline. Userspace should keep 4635a85ac6SBen Widawsky * track of these events and if a specific cache-line seems to have a 4735a85ac6SBen Widawsky * persistent error remap it with the l3 remapping tool supplied in 4835a85ac6SBen Widawsky * intel-gpu-tools. The value supplied with the event is always 1. 49cce723edSBen Widawsky * 50cce723edSBen Widawsky * I915_ERROR_UEVENT - Generated upon error detection, currently only via 51cce723edSBen Widawsky * hangcheck. The error detection event is a good indicator of when things 52cce723edSBen Widawsky * began to go badly. The value supplied with the event is a 1 upon error 53cce723edSBen Widawsky * detection, and a 0 upon reset completion, signifying no more error 54cce723edSBen Widawsky * exists. NOTE: Disabling hangcheck or reset via module parameter will 55cce723edSBen Widawsky * cause the related events to not be seen. 56cce723edSBen Widawsky * 57cce723edSBen Widawsky * I915_RESET_UEVENT - Event is generated just before an attempt to reset the 5866137f54SRandy Dunlap * GPU. The value supplied with the event is always 1. NOTE: Disable 59cce723edSBen Widawsky * reset via module parameter will cause this event to not be seen. 60cce723edSBen Widawsky */ 61cce723edSBen Widawsky #define I915_L3_PARITY_UEVENT "L3_PARITY_ERROR" 62cce723edSBen Widawsky #define I915_ERROR_UEVENT "ERROR" 63cce723edSBen Widawsky #define I915_RESET_UEVENT "RESET" 64718dceddSDavid Howells 6519d053d4SMatthew Auld /** 6619d053d4SMatthew Auld * struct i915_user_extension - Base class for defining a chain of extensions 679d1305efSChris Wilson * 689d1305efSChris Wilson * Many interfaces need to grow over time. In most cases we can simply 699d1305efSChris Wilson * extend the struct and have userspace pass in more data. Another option, 709d1305efSChris Wilson * as demonstrated by Vulkan's approach to providing extensions for forward 719d1305efSChris Wilson * and backward compatibility, is to use a list of optional structs to 729d1305efSChris Wilson * provide those extra details. 739d1305efSChris Wilson * 749d1305efSChris Wilson * The key advantage to using an extension chain is that it allows us to 759d1305efSChris Wilson * redefine the interface more easily than an ever growing struct of 769d1305efSChris Wilson * increasing complexity, and for large parts of that interface to be 779d1305efSChris Wilson * entirely optional. The downside is more pointer chasing; chasing across 789d1305efSChris Wilson * the __user boundary with pointers encapsulated inside u64. 7919d053d4SMatthew Auld * 8019d053d4SMatthew Auld * Example chaining: 8119d053d4SMatthew Auld * 8219d053d4SMatthew Auld * .. code-block:: C 8319d053d4SMatthew Auld * 8419d053d4SMatthew Auld * struct i915_user_extension ext3 { 8519d053d4SMatthew Auld * .next_extension = 0, // end 8619d053d4SMatthew Auld * .name = ..., 8719d053d4SMatthew Auld * }; 8819d053d4SMatthew Auld * struct i915_user_extension ext2 { 8919d053d4SMatthew Auld * .next_extension = (uintptr_t)&ext3, 9019d053d4SMatthew Auld * .name = ..., 9119d053d4SMatthew Auld * }; 9219d053d4SMatthew Auld * struct i915_user_extension ext1 { 9319d053d4SMatthew Auld * .next_extension = (uintptr_t)&ext2, 9419d053d4SMatthew Auld * .name = ..., 9519d053d4SMatthew Auld * }; 9619d053d4SMatthew Auld * 9719d053d4SMatthew Auld * Typically the struct i915_user_extension would be embedded in some uAPI 9819d053d4SMatthew Auld * struct, and in this case we would feed it the head of the chain(i.e ext1), 9919d053d4SMatthew Auld * which would then apply all of the above extensions. 10019d053d4SMatthew Auld * 1019d1305efSChris Wilson */ 1029d1305efSChris Wilson struct i915_user_extension { 10319d053d4SMatthew Auld /** 10419d053d4SMatthew Auld * @next_extension: 10519d053d4SMatthew Auld * 10619d053d4SMatthew Auld * Pointer to the next struct i915_user_extension, or zero if the end. 10719d053d4SMatthew Auld */ 1089d1305efSChris Wilson __u64 next_extension; 10919d053d4SMatthew Auld /** 11019d053d4SMatthew Auld * @name: Name of the extension. 11119d053d4SMatthew Auld * 11219d053d4SMatthew Auld * Note that the name here is just some integer. 11319d053d4SMatthew Auld * 11419d053d4SMatthew Auld * Also note that the name space for this is not global for the whole 11519d053d4SMatthew Auld * driver, but rather its scope/meaning is limited to the specific piece 11619d053d4SMatthew Auld * of uAPI which has embedded the struct i915_user_extension. 11719d053d4SMatthew Auld */ 1189d1305efSChris Wilson __u32 name; 11919d053d4SMatthew Auld /** 12019d053d4SMatthew Auld * @flags: MBZ 12119d053d4SMatthew Auld * 12219d053d4SMatthew Auld * All undefined bits must be zero. 12319d053d4SMatthew Auld */ 12419d053d4SMatthew Auld __u32 flags; 12519d053d4SMatthew Auld /** 12619d053d4SMatthew Auld * @rsvd: MBZ 12719d053d4SMatthew Auld * 12819d053d4SMatthew Auld * Reserved for future use; must be zero. 12919d053d4SMatthew Auld */ 13019d053d4SMatthew Auld __u32 rsvd[4]; 1319d1305efSChris Wilson }; 1329d1305efSChris Wilson 1339d1305efSChris Wilson /* 1343373ce2eSImre Deak * MOCS indexes used for GPU surfaces, defining the cacheability of the 1353373ce2eSImre Deak * surface data and the coherency for this data wrt. CPU vs. GPU accesses. 1363373ce2eSImre Deak */ 1373373ce2eSImre Deak enum i915_mocs_table_index { 1383373ce2eSImre Deak /* 1393373ce2eSImre Deak * Not cached anywhere, coherency between CPU and GPU accesses is 1403373ce2eSImre Deak * guaranteed. 1413373ce2eSImre Deak */ 1423373ce2eSImre Deak I915_MOCS_UNCACHED, 1433373ce2eSImre Deak /* 1443373ce2eSImre Deak * Cacheability and coherency controlled by the kernel automatically 1453373ce2eSImre Deak * based on the DRM_I915_GEM_SET_CACHING IOCTL setting and the current 1463373ce2eSImre Deak * usage of the surface (used for display scanout or not). 1473373ce2eSImre Deak */ 1483373ce2eSImre Deak I915_MOCS_PTE, 1493373ce2eSImre Deak /* 1503373ce2eSImre Deak * Cached in all GPU caches available on the platform. 1513373ce2eSImre Deak * Coherency between CPU and GPU accesses to the surface is not 1523373ce2eSImre Deak * guaranteed without extra synchronization. 1533373ce2eSImre Deak */ 1543373ce2eSImre Deak I915_MOCS_CACHED, 1553373ce2eSImre Deak }; 1563373ce2eSImre Deak 157991b4de3SMatt Roper /** 158991b4de3SMatt Roper * enum drm_i915_gem_engine_class - uapi engine type enumeration 159991b4de3SMatt Roper * 1601803fcbcSTvrtko Ursulin * Different engines serve different roles, and there may be more than one 161991b4de3SMatt Roper * engine serving each role. This enum provides a classification of the role 162991b4de3SMatt Roper * of the engine, which may be used when requesting operations to be performed 163991b4de3SMatt Roper * on a certain subset of engines, or for providing information about that 164991b4de3SMatt Roper * group. 1651803fcbcSTvrtko Ursulin */ 1661803fcbcSTvrtko Ursulin enum drm_i915_gem_engine_class { 167991b4de3SMatt Roper /** 168991b4de3SMatt Roper * @I915_ENGINE_CLASS_RENDER: 169991b4de3SMatt Roper * 170991b4de3SMatt Roper * Render engines support instructions used for 3D, Compute (GPGPU), 171991b4de3SMatt Roper * and programmable media workloads. These instructions fetch data and 172991b4de3SMatt Roper * dispatch individual work items to threads that operate in parallel. 173991b4de3SMatt Roper * The threads run small programs (called "kernels" or "shaders") on 174991b4de3SMatt Roper * the GPU's execution units (EUs). 175991b4de3SMatt Roper */ 1761803fcbcSTvrtko Ursulin I915_ENGINE_CLASS_RENDER = 0, 177991b4de3SMatt Roper 178991b4de3SMatt Roper /** 179991b4de3SMatt Roper * @I915_ENGINE_CLASS_COPY: 180991b4de3SMatt Roper * 181991b4de3SMatt Roper * Copy engines (also referred to as "blitters") support instructions 182991b4de3SMatt Roper * that move blocks of data from one location in memory to another, 183991b4de3SMatt Roper * or that fill a specified location of memory with fixed data. 184991b4de3SMatt Roper * Copy engines can perform pre-defined logical or bitwise operations 185991b4de3SMatt Roper * on the source, destination, or pattern data. 186991b4de3SMatt Roper */ 1871803fcbcSTvrtko Ursulin I915_ENGINE_CLASS_COPY = 1, 188991b4de3SMatt Roper 189991b4de3SMatt Roper /** 190991b4de3SMatt Roper * @I915_ENGINE_CLASS_VIDEO: 191991b4de3SMatt Roper * 192991b4de3SMatt Roper * Video engines (also referred to as "bit stream decode" (BSD) or 193991b4de3SMatt Roper * "vdbox") support instructions that perform fixed-function media 194991b4de3SMatt Roper * decode and encode. 195991b4de3SMatt Roper */ 1961803fcbcSTvrtko Ursulin I915_ENGINE_CLASS_VIDEO = 2, 197991b4de3SMatt Roper 198991b4de3SMatt Roper /** 199991b4de3SMatt Roper * @I915_ENGINE_CLASS_VIDEO_ENHANCE: 200991b4de3SMatt Roper * 201991b4de3SMatt Roper * Video enhancement engines (also referred to as "vebox") support 202991b4de3SMatt Roper * instructions related to image enhancement. 203991b4de3SMatt Roper */ 2041803fcbcSTvrtko Ursulin I915_ENGINE_CLASS_VIDEO_ENHANCE = 3, 2051803fcbcSTvrtko Ursulin 206*ecf8eca5SMatt Roper /** 207*ecf8eca5SMatt Roper * @I915_ENGINE_CLASS_COMPUTE: 208*ecf8eca5SMatt Roper * 209*ecf8eca5SMatt Roper * Compute engines support a subset of the instructions available 210*ecf8eca5SMatt Roper * on render engines: compute engines support Compute (GPGPU) and 211*ecf8eca5SMatt Roper * programmable media workloads, but do not support the 3D pipeline. 212*ecf8eca5SMatt Roper */ 213*ecf8eca5SMatt Roper I915_ENGINE_CLASS_COMPUTE = 4, 214*ecf8eca5SMatt Roper 215991b4de3SMatt Roper /* Values in this enum should be kept compact. */ 216be03564bSChris Wilson 217991b4de3SMatt Roper /** 218991b4de3SMatt Roper * @I915_ENGINE_CLASS_INVALID: 219991b4de3SMatt Roper * 220991b4de3SMatt Roper * Placeholder value to represent an invalid engine class assignment. 221991b4de3SMatt Roper */ 2221803fcbcSTvrtko Ursulin I915_ENGINE_CLASS_INVALID = -1 2231803fcbcSTvrtko Ursulin }; 2241803fcbcSTvrtko Ursulin 225c94fde8fSMatt Atwood /** 226c94fde8fSMatt Atwood * struct i915_engine_class_instance - Engine class/instance identifier 227c94fde8fSMatt Atwood * 228d1172ab3SChris Wilson * There may be more than one engine fulfilling any role within the system. 229d1172ab3SChris Wilson * Each engine of a class is given a unique instance number and therefore 230d1172ab3SChris Wilson * any engine can be specified by its class:instance tuplet. APIs that allow 231d1172ab3SChris Wilson * access to any engine in the system will use struct i915_engine_class_instance 232d1172ab3SChris Wilson * for this identification. 233d1172ab3SChris Wilson */ 234d1172ab3SChris Wilson struct i915_engine_class_instance { 235c94fde8fSMatt Atwood /** 236c94fde8fSMatt Atwood * @engine_class: 237c94fde8fSMatt Atwood * 238c94fde8fSMatt Atwood * Engine class from enum drm_i915_gem_engine_class 239c94fde8fSMatt Atwood */ 240c94fde8fSMatt Atwood __u16 engine_class; 241976b55f0SChris Wilson #define I915_ENGINE_CLASS_INVALID_NONE -1 2426d06779eSChris Wilson #define I915_ENGINE_CLASS_INVALID_VIRTUAL -2 243c94fde8fSMatt Atwood 244c94fde8fSMatt Atwood /** 245c94fde8fSMatt Atwood * @engine_instance: 246c94fde8fSMatt Atwood * 247c94fde8fSMatt Atwood * Engine instance. 248c94fde8fSMatt Atwood */ 249c94fde8fSMatt Atwood __u16 engine_instance; 250d1172ab3SChris Wilson }; 251d1172ab3SChris Wilson 252b46a33e2STvrtko Ursulin /** 253b46a33e2STvrtko Ursulin * DOC: perf_events exposed by i915 through /sys/bus/event_sources/drivers/i915 254b46a33e2STvrtko Ursulin * 255b46a33e2STvrtko Ursulin */ 256b46a33e2STvrtko Ursulin 257b46a33e2STvrtko Ursulin enum drm_i915_pmu_engine_sample { 258b46a33e2STvrtko Ursulin I915_SAMPLE_BUSY = 0, 259b46a33e2STvrtko Ursulin I915_SAMPLE_WAIT = 1, 260b552ae44STvrtko Ursulin I915_SAMPLE_SEMA = 2 261b46a33e2STvrtko Ursulin }; 262b46a33e2STvrtko Ursulin 263b46a33e2STvrtko Ursulin #define I915_PMU_SAMPLE_BITS (4) 264b46a33e2STvrtko Ursulin #define I915_PMU_SAMPLE_MASK (0xf) 265b46a33e2STvrtko Ursulin #define I915_PMU_SAMPLE_INSTANCE_BITS (8) 266b46a33e2STvrtko Ursulin #define I915_PMU_CLASS_SHIFT \ 267b46a33e2STvrtko Ursulin (I915_PMU_SAMPLE_BITS + I915_PMU_SAMPLE_INSTANCE_BITS) 268b46a33e2STvrtko Ursulin 269b46a33e2STvrtko Ursulin #define __I915_PMU_ENGINE(class, instance, sample) \ 270b46a33e2STvrtko Ursulin ((class) << I915_PMU_CLASS_SHIFT | \ 271b46a33e2STvrtko Ursulin (instance) << I915_PMU_SAMPLE_BITS | \ 272b46a33e2STvrtko Ursulin (sample)) 273b46a33e2STvrtko Ursulin 274b46a33e2STvrtko Ursulin #define I915_PMU_ENGINE_BUSY(class, instance) \ 275b46a33e2STvrtko Ursulin __I915_PMU_ENGINE(class, instance, I915_SAMPLE_BUSY) 276b46a33e2STvrtko Ursulin 277b46a33e2STvrtko Ursulin #define I915_PMU_ENGINE_WAIT(class, instance) \ 278b46a33e2STvrtko Ursulin __I915_PMU_ENGINE(class, instance, I915_SAMPLE_WAIT) 279b46a33e2STvrtko Ursulin 280b46a33e2STvrtko Ursulin #define I915_PMU_ENGINE_SEMA(class, instance) \ 281b46a33e2STvrtko Ursulin __I915_PMU_ENGINE(class, instance, I915_SAMPLE_SEMA) 282b46a33e2STvrtko Ursulin 283b46a33e2STvrtko Ursulin #define __I915_PMU_OTHER(x) (__I915_PMU_ENGINE(0xff, 0xff, 0xf) + 1 + (x)) 284b46a33e2STvrtko Ursulin 285b46a33e2STvrtko Ursulin #define I915_PMU_ACTUAL_FREQUENCY __I915_PMU_OTHER(0) 286b46a33e2STvrtko Ursulin #define I915_PMU_REQUESTED_FREQUENCY __I915_PMU_OTHER(1) 2870cd4684dSTvrtko Ursulin #define I915_PMU_INTERRUPTS __I915_PMU_OTHER(2) 2886060b6aeSTvrtko Ursulin #define I915_PMU_RC6_RESIDENCY __I915_PMU_OTHER(3) 2898c3b1ba0SChris Wilson #define I915_PMU_SOFTWARE_GT_AWAKE_TIME __I915_PMU_OTHER(4) 2906060b6aeSTvrtko Ursulin 291348fb0cbSTvrtko Ursulin #define I915_PMU_LAST /* Deprecated - do not use */ I915_PMU_RC6_RESIDENCY 292b46a33e2STvrtko Ursulin 293718dceddSDavid Howells /* Each region is a minimum of 16k, and there are at most 255 of them. 294718dceddSDavid Howells */ 295718dceddSDavid Howells #define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use 296718dceddSDavid Howells * of chars for next/prev indices */ 297718dceddSDavid Howells #define I915_LOG_MIN_TEX_REGION_SIZE 14 298718dceddSDavid Howells 299718dceddSDavid Howells typedef struct _drm_i915_init { 300718dceddSDavid Howells enum { 301718dceddSDavid Howells I915_INIT_DMA = 0x01, 302718dceddSDavid Howells I915_CLEANUP_DMA = 0x02, 303718dceddSDavid Howells I915_RESUME_DMA = 0x03 304718dceddSDavid Howells } func; 305718dceddSDavid Howells unsigned int mmio_offset; 306718dceddSDavid Howells int sarea_priv_offset; 307718dceddSDavid Howells unsigned int ring_start; 308718dceddSDavid Howells unsigned int ring_end; 309718dceddSDavid Howells unsigned int ring_size; 310718dceddSDavid Howells unsigned int front_offset; 311718dceddSDavid Howells unsigned int back_offset; 312718dceddSDavid Howells unsigned int depth_offset; 313718dceddSDavid Howells unsigned int w; 314718dceddSDavid Howells unsigned int h; 315718dceddSDavid Howells unsigned int pitch; 316718dceddSDavid Howells unsigned int pitch_bits; 317718dceddSDavid Howells unsigned int back_pitch; 318718dceddSDavid Howells unsigned int depth_pitch; 319718dceddSDavid Howells unsigned int cpp; 320718dceddSDavid Howells unsigned int chipset; 321718dceddSDavid Howells } drm_i915_init_t; 322718dceddSDavid Howells 323718dceddSDavid Howells typedef struct _drm_i915_sarea { 324718dceddSDavid Howells struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1]; 325718dceddSDavid Howells int last_upload; /* last time texture was uploaded */ 326718dceddSDavid Howells int last_enqueue; /* last time a buffer was enqueued */ 327718dceddSDavid Howells int last_dispatch; /* age of the most recently dispatched buffer */ 328718dceddSDavid Howells int ctxOwner; /* last context to upload state */ 329718dceddSDavid Howells int texAge; 330718dceddSDavid Howells int pf_enabled; /* is pageflipping allowed? */ 331718dceddSDavid Howells int pf_active; 332718dceddSDavid Howells int pf_current_page; /* which buffer is being displayed? */ 333718dceddSDavid Howells int perf_boxes; /* performance boxes to be displayed */ 334718dceddSDavid Howells int width, height; /* screen size in pixels */ 335718dceddSDavid Howells 336718dceddSDavid Howells drm_handle_t front_handle; 337718dceddSDavid Howells int front_offset; 338718dceddSDavid Howells int front_size; 339718dceddSDavid Howells 340718dceddSDavid Howells drm_handle_t back_handle; 341718dceddSDavid Howells int back_offset; 342718dceddSDavid Howells int back_size; 343718dceddSDavid Howells 344718dceddSDavid Howells drm_handle_t depth_handle; 345718dceddSDavid Howells int depth_offset; 346718dceddSDavid Howells int depth_size; 347718dceddSDavid Howells 348718dceddSDavid Howells drm_handle_t tex_handle; 349718dceddSDavid Howells int tex_offset; 350718dceddSDavid Howells int tex_size; 351718dceddSDavid Howells int log_tex_granularity; 352718dceddSDavid Howells int pitch; 353718dceddSDavid Howells int rotation; /* 0, 90, 180 or 270 */ 354718dceddSDavid Howells int rotated_offset; 355718dceddSDavid Howells int rotated_size; 356718dceddSDavid Howells int rotated_pitch; 357718dceddSDavid Howells int virtualX, virtualY; 358718dceddSDavid Howells 359718dceddSDavid Howells unsigned int front_tiled; 360718dceddSDavid Howells unsigned int back_tiled; 361718dceddSDavid Howells unsigned int depth_tiled; 362718dceddSDavid Howells unsigned int rotated_tiled; 363718dceddSDavid Howells unsigned int rotated2_tiled; 364718dceddSDavid Howells 365718dceddSDavid Howells int pipeA_x; 366718dceddSDavid Howells int pipeA_y; 367718dceddSDavid Howells int pipeA_w; 368718dceddSDavid Howells int pipeA_h; 369718dceddSDavid Howells int pipeB_x; 370718dceddSDavid Howells int pipeB_y; 371718dceddSDavid Howells int pipeB_w; 372718dceddSDavid Howells int pipeB_h; 373718dceddSDavid Howells 374718dceddSDavid Howells /* fill out some space for old userspace triple buffer */ 375718dceddSDavid Howells drm_handle_t unused_handle; 376718dceddSDavid Howells __u32 unused1, unused2, unused3; 377718dceddSDavid Howells 378718dceddSDavid Howells /* buffer object handles for static buffers. May change 379718dceddSDavid Howells * over the lifetime of the client. 380718dceddSDavid Howells */ 381718dceddSDavid Howells __u32 front_bo_handle; 382718dceddSDavid Howells __u32 back_bo_handle; 383718dceddSDavid Howells __u32 unused_bo_handle; 384718dceddSDavid Howells __u32 depth_bo_handle; 385718dceddSDavid Howells 386718dceddSDavid Howells } drm_i915_sarea_t; 387718dceddSDavid Howells 388718dceddSDavid Howells /* due to userspace building against these headers we need some compat here */ 389718dceddSDavid Howells #define planeA_x pipeA_x 390718dceddSDavid Howells #define planeA_y pipeA_y 391718dceddSDavid Howells #define planeA_w pipeA_w 392718dceddSDavid Howells #define planeA_h pipeA_h 393718dceddSDavid Howells #define planeB_x pipeB_x 394718dceddSDavid Howells #define planeB_y pipeB_y 395718dceddSDavid Howells #define planeB_w pipeB_w 396718dceddSDavid Howells #define planeB_h pipeB_h 397718dceddSDavid Howells 398718dceddSDavid Howells /* Flags for perf_boxes 399718dceddSDavid Howells */ 400718dceddSDavid Howells #define I915_BOX_RING_EMPTY 0x1 401718dceddSDavid Howells #define I915_BOX_FLIP 0x2 402718dceddSDavid Howells #define I915_BOX_WAIT 0x4 403718dceddSDavid Howells #define I915_BOX_TEXTURE_LOAD 0x8 404718dceddSDavid Howells #define I915_BOX_LOST_CONTEXT 0x10 405718dceddSDavid Howells 40621631f10SDamien Lespiau /* 40721631f10SDamien Lespiau * i915 specific ioctls. 40821631f10SDamien Lespiau * 40921631f10SDamien Lespiau * The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie 41021631f10SDamien Lespiau * [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset 41121631f10SDamien Lespiau * against DRM_COMMAND_BASE and should be between [0x0, 0x60). 412718dceddSDavid Howells */ 413718dceddSDavid Howells #define DRM_I915_INIT 0x00 414718dceddSDavid Howells #define DRM_I915_FLUSH 0x01 415718dceddSDavid Howells #define DRM_I915_FLIP 0x02 416718dceddSDavid Howells #define DRM_I915_BATCHBUFFER 0x03 417718dceddSDavid Howells #define DRM_I915_IRQ_EMIT 0x04 418718dceddSDavid Howells #define DRM_I915_IRQ_WAIT 0x05 419718dceddSDavid Howells #define DRM_I915_GETPARAM 0x06 420718dceddSDavid Howells #define DRM_I915_SETPARAM 0x07 421718dceddSDavid Howells #define DRM_I915_ALLOC 0x08 422718dceddSDavid Howells #define DRM_I915_FREE 0x09 423718dceddSDavid Howells #define DRM_I915_INIT_HEAP 0x0a 424718dceddSDavid Howells #define DRM_I915_CMDBUFFER 0x0b 425718dceddSDavid Howells #define DRM_I915_DESTROY_HEAP 0x0c 426718dceddSDavid Howells #define DRM_I915_SET_VBLANK_PIPE 0x0d 427718dceddSDavid Howells #define DRM_I915_GET_VBLANK_PIPE 0x0e 428718dceddSDavid Howells #define DRM_I915_VBLANK_SWAP 0x0f 429718dceddSDavid Howells #define DRM_I915_HWS_ADDR 0x11 430718dceddSDavid Howells #define DRM_I915_GEM_INIT 0x13 431718dceddSDavid Howells #define DRM_I915_GEM_EXECBUFFER 0x14 432718dceddSDavid Howells #define DRM_I915_GEM_PIN 0x15 433718dceddSDavid Howells #define DRM_I915_GEM_UNPIN 0x16 434718dceddSDavid Howells #define DRM_I915_GEM_BUSY 0x17 435718dceddSDavid Howells #define DRM_I915_GEM_THROTTLE 0x18 436718dceddSDavid Howells #define DRM_I915_GEM_ENTERVT 0x19 437718dceddSDavid Howells #define DRM_I915_GEM_LEAVEVT 0x1a 438718dceddSDavid Howells #define DRM_I915_GEM_CREATE 0x1b 439718dceddSDavid Howells #define DRM_I915_GEM_PREAD 0x1c 440718dceddSDavid Howells #define DRM_I915_GEM_PWRITE 0x1d 441718dceddSDavid Howells #define DRM_I915_GEM_MMAP 0x1e 442718dceddSDavid Howells #define DRM_I915_GEM_SET_DOMAIN 0x1f 443718dceddSDavid Howells #define DRM_I915_GEM_SW_FINISH 0x20 444718dceddSDavid Howells #define DRM_I915_GEM_SET_TILING 0x21 445718dceddSDavid Howells #define DRM_I915_GEM_GET_TILING 0x22 446718dceddSDavid Howells #define DRM_I915_GEM_GET_APERTURE 0x23 447718dceddSDavid Howells #define DRM_I915_GEM_MMAP_GTT 0x24 448718dceddSDavid Howells #define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25 449718dceddSDavid Howells #define DRM_I915_GEM_MADVISE 0x26 450718dceddSDavid Howells #define DRM_I915_OVERLAY_PUT_IMAGE 0x27 451718dceddSDavid Howells #define DRM_I915_OVERLAY_ATTRS 0x28 452718dceddSDavid Howells #define DRM_I915_GEM_EXECBUFFER2 0x29 453fec0445cSChris Wilson #define DRM_I915_GEM_EXECBUFFER2_WR DRM_I915_GEM_EXECBUFFER2 454718dceddSDavid Howells #define DRM_I915_GET_SPRITE_COLORKEY 0x2a 455718dceddSDavid Howells #define DRM_I915_SET_SPRITE_COLORKEY 0x2b 456718dceddSDavid Howells #define DRM_I915_GEM_WAIT 0x2c 457718dceddSDavid Howells #define DRM_I915_GEM_CONTEXT_CREATE 0x2d 458718dceddSDavid Howells #define DRM_I915_GEM_CONTEXT_DESTROY 0x2e 459718dceddSDavid Howells #define DRM_I915_GEM_SET_CACHING 0x2f 460718dceddSDavid Howells #define DRM_I915_GEM_GET_CACHING 0x30 461718dceddSDavid Howells #define DRM_I915_REG_READ 0x31 462b6359918SMika Kuoppala #define DRM_I915_GET_RESET_STATS 0x32 4635cc9ed4bSChris Wilson #define DRM_I915_GEM_USERPTR 0x33 464c9dc0f35SChris Wilson #define DRM_I915_GEM_CONTEXT_GETPARAM 0x34 465c9dc0f35SChris Wilson #define DRM_I915_GEM_CONTEXT_SETPARAM 0x35 466eec688e1SRobert Bragg #define DRM_I915_PERF_OPEN 0x36 467f89823c2SLionel Landwerlin #define DRM_I915_PERF_ADD_CONFIG 0x37 468f89823c2SLionel Landwerlin #define DRM_I915_PERF_REMOVE_CONFIG 0x38 469a446ae2cSLionel Landwerlin #define DRM_I915_QUERY 0x39 4707f3f317aSChris Wilson #define DRM_I915_GEM_VM_CREATE 0x3a 4717f3f317aSChris Wilson #define DRM_I915_GEM_VM_DESTROY 0x3b 472ebcb4029SMatthew Auld #define DRM_I915_GEM_CREATE_EXT 0x3c 473be03564bSChris Wilson /* Must be kept compact -- no holes */ 474718dceddSDavid Howells 475718dceddSDavid Howells #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) 476718dceddSDavid Howells #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) 477718dceddSDavid Howells #define DRM_IOCTL_I915_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP) 478718dceddSDavid Howells #define DRM_IOCTL_I915_BATCHBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t) 479718dceddSDavid Howells #define DRM_IOCTL_I915_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t) 480718dceddSDavid Howells #define DRM_IOCTL_I915_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t) 481718dceddSDavid Howells #define DRM_IOCTL_I915_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t) 482718dceddSDavid Howells #define DRM_IOCTL_I915_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t) 483718dceddSDavid Howells #define DRM_IOCTL_I915_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t) 484718dceddSDavid Howells #define DRM_IOCTL_I915_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t) 485718dceddSDavid Howells #define DRM_IOCTL_I915_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t) 486718dceddSDavid Howells #define DRM_IOCTL_I915_CMDBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t) 487718dceddSDavid Howells #define DRM_IOCTL_I915_DESTROY_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t) 488718dceddSDavid Howells #define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t) 489718dceddSDavid Howells #define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t) 490718dceddSDavid Howells #define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t) 491718dceddSDavid Howells #define DRM_IOCTL_I915_HWS_ADDR DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init) 492718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init) 493718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer) 494718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2) 495fec0445cSChris Wilson #define DRM_IOCTL_I915_GEM_EXECBUFFER2_WR DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2_WR, struct drm_i915_gem_execbuffer2) 496718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin) 497718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin) 498718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy) 499718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_SET_CACHING DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHING, struct drm_i915_gem_caching) 500718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_GET_CACHING DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHING, struct drm_i915_gem_caching) 501718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE) 502718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT) 503718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT) 504718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create) 505ebcb4029SMatthew Auld #define DRM_IOCTL_I915_GEM_CREATE_EXT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE_EXT, struct drm_i915_gem_create_ext) 506718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread) 507718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite) 508718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap) 509718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_MMAP_GTT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt) 510cc662126SAbdiel Janulgue #define DRM_IOCTL_I915_GEM_MMAP_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_offset) 511718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain) 512718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish) 513718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling) 514718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling) 515718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture) 516718dceddSDavid Howells #define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id) 517718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise) 518718dceddSDavid Howells #define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image) 519718dceddSDavid Howells #define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs) 520718dceddSDavid Howells #define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey) 5212c60fae1STommi Rantala #define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey) 522718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait) 523718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create) 524b9171541SChris Wilson #define DRM_IOCTL_I915_GEM_CONTEXT_CREATE_EXT DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create_ext) 525718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy) 526718dceddSDavid Howells #define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read) 527b6359918SMika Kuoppala #define DRM_IOCTL_I915_GET_RESET_STATS DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats) 5285cc9ed4bSChris Wilson #define DRM_IOCTL_I915_GEM_USERPTR DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr) 529c9dc0f35SChris Wilson #define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param) 530c9dc0f35SChris Wilson #define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param) 531eec688e1SRobert Bragg #define DRM_IOCTL_I915_PERF_OPEN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param) 532f89823c2SLionel Landwerlin #define DRM_IOCTL_I915_PERF_ADD_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_ADD_CONFIG, struct drm_i915_perf_oa_config) 533f89823c2SLionel Landwerlin #define DRM_IOCTL_I915_PERF_REMOVE_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_REMOVE_CONFIG, __u64) 534a446ae2cSLionel Landwerlin #define DRM_IOCTL_I915_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_QUERY, struct drm_i915_query) 5357f3f317aSChris Wilson #define DRM_IOCTL_I915_GEM_VM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_CREATE, struct drm_i915_gem_vm_control) 5367f3f317aSChris Wilson #define DRM_IOCTL_I915_GEM_VM_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_VM_DESTROY, struct drm_i915_gem_vm_control) 537718dceddSDavid Howells 538718dceddSDavid Howells /* Allow drivers to submit batchbuffers directly to hardware, relying 539718dceddSDavid Howells * on the security mechanisms provided by hardware. 540718dceddSDavid Howells */ 541718dceddSDavid Howells typedef struct drm_i915_batchbuffer { 542718dceddSDavid Howells int start; /* agp offset */ 543718dceddSDavid Howells int used; /* nr bytes in use */ 544718dceddSDavid Howells int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */ 545718dceddSDavid Howells int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */ 546718dceddSDavid Howells int num_cliprects; /* mulitpass with multiple cliprects? */ 547718dceddSDavid Howells struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */ 548718dceddSDavid Howells } drm_i915_batchbuffer_t; 549718dceddSDavid Howells 550718dceddSDavid Howells /* As above, but pass a pointer to userspace buffer which can be 551718dceddSDavid Howells * validated by the kernel prior to sending to hardware. 552718dceddSDavid Howells */ 553718dceddSDavid Howells typedef struct _drm_i915_cmdbuffer { 554718dceddSDavid Howells char __user *buf; /* pointer to userspace command buffer */ 555718dceddSDavid Howells int sz; /* nr bytes in buf */ 556718dceddSDavid Howells int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */ 557718dceddSDavid Howells int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */ 558718dceddSDavid Howells int num_cliprects; /* mulitpass with multiple cliprects? */ 559718dceddSDavid Howells struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */ 560718dceddSDavid Howells } drm_i915_cmdbuffer_t; 561718dceddSDavid Howells 562718dceddSDavid Howells /* Userspace can request & wait on irq's: 563718dceddSDavid Howells */ 564718dceddSDavid Howells typedef struct drm_i915_irq_emit { 565718dceddSDavid Howells int __user *irq_seq; 566718dceddSDavid Howells } drm_i915_irq_emit_t; 567718dceddSDavid Howells 568718dceddSDavid Howells typedef struct drm_i915_irq_wait { 569718dceddSDavid Howells int irq_seq; 570718dceddSDavid Howells } drm_i915_irq_wait_t; 571718dceddSDavid Howells 5724bdafb9dSChris Wilson /* 5734bdafb9dSChris Wilson * Different modes of per-process Graphics Translation Table, 5744bdafb9dSChris Wilson * see I915_PARAM_HAS_ALIASING_PPGTT 5754bdafb9dSChris Wilson */ 5764bdafb9dSChris Wilson #define I915_GEM_PPGTT_NONE 0 5774bdafb9dSChris Wilson #define I915_GEM_PPGTT_ALIASING 1 5784bdafb9dSChris Wilson #define I915_GEM_PPGTT_FULL 2 5794bdafb9dSChris Wilson 580718dceddSDavid Howells /* Ioctl to query kernel params: 581718dceddSDavid Howells */ 582718dceddSDavid Howells #define I915_PARAM_IRQ_ACTIVE 1 583718dceddSDavid Howells #define I915_PARAM_ALLOW_BATCHBUFFER 2 584718dceddSDavid Howells #define I915_PARAM_LAST_DISPATCH 3 585718dceddSDavid Howells #define I915_PARAM_CHIPSET_ID 4 586718dceddSDavid Howells #define I915_PARAM_HAS_GEM 5 587718dceddSDavid Howells #define I915_PARAM_NUM_FENCES_AVAIL 6 588718dceddSDavid Howells #define I915_PARAM_HAS_OVERLAY 7 589718dceddSDavid Howells #define I915_PARAM_HAS_PAGEFLIPPING 8 590718dceddSDavid Howells #define I915_PARAM_HAS_EXECBUF2 9 591718dceddSDavid Howells #define I915_PARAM_HAS_BSD 10 592718dceddSDavid Howells #define I915_PARAM_HAS_BLT 11 593718dceddSDavid Howells #define I915_PARAM_HAS_RELAXED_FENCING 12 594718dceddSDavid Howells #define I915_PARAM_HAS_COHERENT_RINGS 13 595718dceddSDavid Howells #define I915_PARAM_HAS_EXEC_CONSTANTS 14 596718dceddSDavid Howells #define I915_PARAM_HAS_RELAXED_DELTA 15 597718dceddSDavid Howells #define I915_PARAM_HAS_GEN7_SOL_RESET 16 598718dceddSDavid Howells #define I915_PARAM_HAS_LLC 17 599718dceddSDavid Howells #define I915_PARAM_HAS_ALIASING_PPGTT 18 600718dceddSDavid Howells #define I915_PARAM_HAS_WAIT_TIMEOUT 19 601718dceddSDavid Howells #define I915_PARAM_HAS_SEMAPHORES 20 602718dceddSDavid Howells #define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21 603a1f2cc73SXiang, Haihao #define I915_PARAM_HAS_VEBOX 22 604c2fb7916SDaniel Vetter #define I915_PARAM_HAS_SECURE_BATCHES 23 605b45305fcSDaniel Vetter #define I915_PARAM_HAS_PINNED_BATCHES 24 606ed5982e6SDaniel Vetter #define I915_PARAM_HAS_EXEC_NO_RELOC 25 607eef90ccbSChris Wilson #define I915_PARAM_HAS_EXEC_HANDLE_LUT 26 608651d794fSChris Wilson #define I915_PARAM_HAS_WT 27 609d728c8efSBrad Volkin #define I915_PARAM_CMD_PARSER_VERSION 28 6106a2c4232SChris Wilson #define I915_PARAM_HAS_COHERENT_PHYS_GTT 29 6111816f923SAkash Goel #define I915_PARAM_MMAP_VERSION 30 61208e16dc8SZhipeng Gong #define I915_PARAM_HAS_BSD2 31 61327cd4461SNeil Roberts #define I915_PARAM_REVISION 32 614a1559ffeSJeff McGee #define I915_PARAM_SUBSLICE_TOTAL 33 615a1559ffeSJeff McGee #define I915_PARAM_EU_TOTAL 34 61649e4d842SChris Wilson #define I915_PARAM_HAS_GPU_RESET 35 617a9ed33caSAbdiel Janulgue #define I915_PARAM_HAS_RESOURCE_STREAMER 36 618506a8e87SChris Wilson #define I915_PARAM_HAS_EXEC_SOFTPIN 37 61937f501afSarun.siluvery@linux.intel.com #define I915_PARAM_HAS_POOLED_EU 38 62037f501afSarun.siluvery@linux.intel.com #define I915_PARAM_MIN_EU_IN_POOL 39 6214cc69075SChris Wilson #define I915_PARAM_MMAP_GTT_VERSION 40 622718dceddSDavid Howells 623bf64e0b0SChris Wilson /* 624bf64e0b0SChris Wilson * Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution 6250de9136dSChris Wilson * priorities and the driver will attempt to execute batches in priority order. 626bf64e0b0SChris Wilson * The param returns a capability bitmask, nonzero implies that the scheduler 627bf64e0b0SChris Wilson * is enabled, with different features present according to the mask. 628ac14fbd4SChris Wilson * 629ac14fbd4SChris Wilson * The initial priority for each batch is supplied by the context and is 630ac14fbd4SChris Wilson * controlled via I915_CONTEXT_PARAM_PRIORITY. 6310de9136dSChris Wilson */ 6320de9136dSChris Wilson #define I915_PARAM_HAS_SCHEDULER 41 633bf64e0b0SChris Wilson #define I915_SCHEDULER_CAP_ENABLED (1ul << 0) 634bf64e0b0SChris Wilson #define I915_SCHEDULER_CAP_PRIORITY (1ul << 1) 635bf64e0b0SChris Wilson #define I915_SCHEDULER_CAP_PREEMPTION (1ul << 2) 636e8861964SChris Wilson #define I915_SCHEDULER_CAP_SEMAPHORES (1ul << 3) 637bf73fc0fSChris Wilson #define I915_SCHEDULER_CAP_ENGINE_BUSY_STATS (1ul << 4) 638ee242ca7SMatthew Brost /* 639ee242ca7SMatthew Brost * Indicates the 2k user priority levels are statically mapped into 3 buckets as 640ee242ca7SMatthew Brost * follows: 641ee242ca7SMatthew Brost * 642ee242ca7SMatthew Brost * -1k to -1 Low priority 643ee242ca7SMatthew Brost * 0 Normal priority 644ee242ca7SMatthew Brost * 1 to 1k Highest priority 645ee242ca7SMatthew Brost */ 646ee242ca7SMatthew Brost #define I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP (1ul << 5) 647bf64e0b0SChris Wilson 6485464cd65SAnusha Srivatsa #define I915_PARAM_HUC_STATUS 42 6490de9136dSChris Wilson 65077ae9957SChris Wilson /* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of 65177ae9957SChris Wilson * synchronisation with implicit fencing on individual objects. 65277ae9957SChris Wilson * See EXEC_OBJECT_ASYNC. 65377ae9957SChris Wilson */ 65477ae9957SChris Wilson #define I915_PARAM_HAS_EXEC_ASYNC 43 65577ae9957SChris Wilson 656fec0445cSChris Wilson /* Query whether DRM_I915_GEM_EXECBUFFER2 supports explicit fence support - 657fec0445cSChris Wilson * both being able to pass in a sync_file fd to wait upon before executing, 658fec0445cSChris Wilson * and being able to return a new sync_file fd that is signaled when the 659fec0445cSChris Wilson * current request is complete. See I915_EXEC_FENCE_IN and I915_EXEC_FENCE_OUT. 660fec0445cSChris Wilson */ 661fec0445cSChris Wilson #define I915_PARAM_HAS_EXEC_FENCE 44 662fec0445cSChris Wilson 663b0fd47adSChris Wilson /* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to capture 664b0fd47adSChris Wilson * user specified bufffers for post-mortem debugging of GPU hangs. See 665b0fd47adSChris Wilson * EXEC_OBJECT_CAPTURE. 666b0fd47adSChris Wilson */ 667b0fd47adSChris Wilson #define I915_PARAM_HAS_EXEC_CAPTURE 45 668b0fd47adSChris Wilson 6697fed555cSRobert Bragg #define I915_PARAM_SLICE_MASK 46 6707fed555cSRobert Bragg 671f5320233SRobert Bragg /* Assuming it's uniform for each slice, this queries the mask of subslices 672f5320233SRobert Bragg * per-slice for this system. 673f5320233SRobert Bragg */ 674f5320233SRobert Bragg #define I915_PARAM_SUBSLICE_MASK 47 675f5320233SRobert Bragg 6761a71cf2fSChris Wilson /* 6771a71cf2fSChris Wilson * Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying the batch buffer 6781a71cf2fSChris Wilson * as the first execobject as opposed to the last. See I915_EXEC_BATCH_FIRST. 6791a71cf2fSChris Wilson */ 6801a71cf2fSChris Wilson #define I915_PARAM_HAS_EXEC_BATCH_FIRST 48 6811a71cf2fSChris Wilson 682cf6e7bacSJason Ekstrand /* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of 683cf6e7bacSJason Ekstrand * drm_i915_gem_exec_fence structures. See I915_EXEC_FENCE_ARRAY. 684cf6e7bacSJason Ekstrand */ 685cf6e7bacSJason Ekstrand #define I915_PARAM_HAS_EXEC_FENCE_ARRAY 49 686cf6e7bacSJason Ekstrand 687d2b4b979SChris Wilson /* 688d2b4b979SChris Wilson * Query whether every context (both per-file default and user created) is 689d2b4b979SChris Wilson * isolated (insofar as HW supports). If this parameter is not true, then 690d2b4b979SChris Wilson * freshly created contexts may inherit values from an existing context, 691d2b4b979SChris Wilson * rather than default HW values. If true, it also ensures (insofar as HW 692d2b4b979SChris Wilson * supports) that all state set by this context will not leak to any other 693d2b4b979SChris Wilson * context. 694d2b4b979SChris Wilson * 695d2b4b979SChris Wilson * As not every engine across every gen support contexts, the returned 696d2b4b979SChris Wilson * value reports the support of context isolation for individual engines by 697d2b4b979SChris Wilson * returning a bitmask of each engine class set to true if that class supports 698d2b4b979SChris Wilson * isolation. 699d2b4b979SChris Wilson */ 700d2b4b979SChris Wilson #define I915_PARAM_HAS_CONTEXT_ISOLATION 50 701d2b4b979SChris Wilson 702dab91783SLionel Landwerlin /* Frequency of the command streamer timestamps given by the *_TIMESTAMP 703dab91783SLionel Landwerlin * registers. This used to be fixed per platform but from CNL onwards, this 704dab91783SLionel Landwerlin * might vary depending on the parts. 705dab91783SLionel Landwerlin */ 706dab91783SLionel Landwerlin #define I915_PARAM_CS_TIMESTAMP_FREQUENCY 51 707dab91783SLionel Landwerlin 708900ccf30SChris Wilson /* 709900ccf30SChris Wilson * Once upon a time we supposed that writes through the GGTT would be 710900ccf30SChris Wilson * immediately in physical memory (once flushed out of the CPU path). However, 711900ccf30SChris Wilson * on a few different processors and chipsets, this is not necessarily the case 712900ccf30SChris Wilson * as the writes appear to be buffered internally. Thus a read of the backing 713900ccf30SChris Wilson * storage (physical memory) via a different path (with different physical tags 714900ccf30SChris Wilson * to the indirect write via the GGTT) will see stale values from before 715900ccf30SChris Wilson * the GGTT write. Inside the kernel, we can for the most part keep track of 716900ccf30SChris Wilson * the different read/write domains in use (e.g. set-domain), but the assumption 717900ccf30SChris Wilson * of coherency is baked into the ABI, hence reporting its true state in this 718900ccf30SChris Wilson * parameter. 719900ccf30SChris Wilson * 720900ccf30SChris Wilson * Reports true when writes via mmap_gtt are immediately visible following an 721900ccf30SChris Wilson * lfence to flush the WCB. 722900ccf30SChris Wilson * 723900ccf30SChris Wilson * Reports false when writes via mmap_gtt are indeterminately delayed in an in 724900ccf30SChris Wilson * internal buffer and are _not_ immediately visible to third parties accessing 725900ccf30SChris Wilson * directly via mmap_cpu/mmap_wc. Use of mmap_gtt as part of an IPC 726900ccf30SChris Wilson * communications channel when reporting false is strongly disadvised. 727900ccf30SChris Wilson */ 728900ccf30SChris Wilson #define I915_PARAM_MMAP_GTT_COHERENT 52 729900ccf30SChris Wilson 730a88b6e4cSChris Wilson /* 731a88b6e4cSChris Wilson * Query whether DRM_I915_GEM_EXECBUFFER2 supports coordination of parallel 732a88b6e4cSChris Wilson * execution through use of explicit fence support. 733a88b6e4cSChris Wilson * See I915_EXEC_FENCE_OUT and I915_EXEC_FENCE_SUBMIT. 734a88b6e4cSChris Wilson */ 735a88b6e4cSChris Wilson #define I915_PARAM_HAS_EXEC_SUBMIT_FENCE 53 736b8d49f28SLionel Landwerlin 737b8d49f28SLionel Landwerlin /* 738b8d49f28SLionel Landwerlin * Revision of the i915-perf uAPI. The value returned helps determine what 739b8d49f28SLionel Landwerlin * i915-perf features are available. See drm_i915_perf_property_id. 740b8d49f28SLionel Landwerlin */ 741b8d49f28SLionel Landwerlin #define I915_PARAM_PERF_REVISION 54 742b8d49f28SLionel Landwerlin 74313149e8bSLionel Landwerlin /* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of 74413149e8bSLionel Landwerlin * timeline syncobj through drm_i915_gem_execbuffer_ext_timeline_fences. See 74513149e8bSLionel Landwerlin * I915_EXEC_USE_EXTENSIONS. 74613149e8bSLionel Landwerlin */ 74713149e8bSLionel Landwerlin #define I915_PARAM_HAS_EXEC_TIMELINE_FENCES 55 74813149e8bSLionel Landwerlin 749b65a9489SChris Wilson /* Query if the kernel supports the I915_USERPTR_PROBE flag. */ 750b65a9489SChris Wilson #define I915_PARAM_HAS_USERPTR_PROBE 56 751b65a9489SChris Wilson 752be03564bSChris Wilson /* Must be kept compact -- no holes and well documented */ 753be03564bSChris Wilson 754718dceddSDavid Howells typedef struct drm_i915_getparam { 75516f7249dSArtem Savkov __s32 param; 756346add78SDaniel Vetter /* 757346add78SDaniel Vetter * WARNING: Using pointers instead of fixed-size u64 means we need to write 758346add78SDaniel Vetter * compat32 code. Don't repeat this mistake. 759346add78SDaniel Vetter */ 760718dceddSDavid Howells int __user *value; 761718dceddSDavid Howells } drm_i915_getparam_t; 762718dceddSDavid Howells 763718dceddSDavid Howells /* Ioctl to set kernel params: 764718dceddSDavid Howells */ 765718dceddSDavid Howells #define I915_SETPARAM_USE_MI_BATCHBUFFER_START 1 766718dceddSDavid Howells #define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY 2 767718dceddSDavid Howells #define I915_SETPARAM_ALLOW_BATCHBUFFER 3 768718dceddSDavid Howells #define I915_SETPARAM_NUM_USED_FENCES 4 769be03564bSChris Wilson /* Must be kept compact -- no holes */ 770718dceddSDavid Howells 771718dceddSDavid Howells typedef struct drm_i915_setparam { 772718dceddSDavid Howells int param; 773718dceddSDavid Howells int value; 774718dceddSDavid Howells } drm_i915_setparam_t; 775718dceddSDavid Howells 776718dceddSDavid Howells /* A memory manager for regions of shared memory: 777718dceddSDavid Howells */ 778718dceddSDavid Howells #define I915_MEM_REGION_AGP 1 779718dceddSDavid Howells 780718dceddSDavid Howells typedef struct drm_i915_mem_alloc { 781718dceddSDavid Howells int region; 782718dceddSDavid Howells int alignment; 783718dceddSDavid Howells int size; 784718dceddSDavid Howells int __user *region_offset; /* offset from start of fb or agp */ 785718dceddSDavid Howells } drm_i915_mem_alloc_t; 786718dceddSDavid Howells 787718dceddSDavid Howells typedef struct drm_i915_mem_free { 788718dceddSDavid Howells int region; 789718dceddSDavid Howells int region_offset; 790718dceddSDavid Howells } drm_i915_mem_free_t; 791718dceddSDavid Howells 792718dceddSDavid Howells typedef struct drm_i915_mem_init_heap { 793718dceddSDavid Howells int region; 794718dceddSDavid Howells int size; 795718dceddSDavid Howells int start; 796718dceddSDavid Howells } drm_i915_mem_init_heap_t; 797718dceddSDavid Howells 798718dceddSDavid Howells /* Allow memory manager to be torn down and re-initialized (eg on 799718dceddSDavid Howells * rotate): 800718dceddSDavid Howells */ 801718dceddSDavid Howells typedef struct drm_i915_mem_destroy_heap { 802718dceddSDavid Howells int region; 803718dceddSDavid Howells } drm_i915_mem_destroy_heap_t; 804718dceddSDavid Howells 805718dceddSDavid Howells /* Allow X server to configure which pipes to monitor for vblank signals 806718dceddSDavid Howells */ 807718dceddSDavid Howells #define DRM_I915_VBLANK_PIPE_A 1 808718dceddSDavid Howells #define DRM_I915_VBLANK_PIPE_B 2 809718dceddSDavid Howells 810718dceddSDavid Howells typedef struct drm_i915_vblank_pipe { 811718dceddSDavid Howells int pipe; 812718dceddSDavid Howells } drm_i915_vblank_pipe_t; 813718dceddSDavid Howells 814718dceddSDavid Howells /* Schedule buffer swap at given vertical blank: 815718dceddSDavid Howells */ 816718dceddSDavid Howells typedef struct drm_i915_vblank_swap { 817718dceddSDavid Howells drm_drawable_t drawable; 818718dceddSDavid Howells enum drm_vblank_seq_type seqtype; 819718dceddSDavid Howells unsigned int sequence; 820718dceddSDavid Howells } drm_i915_vblank_swap_t; 821718dceddSDavid Howells 822718dceddSDavid Howells typedef struct drm_i915_hws_addr { 823718dceddSDavid Howells __u64 addr; 824718dceddSDavid Howells } drm_i915_hws_addr_t; 825718dceddSDavid Howells 826718dceddSDavid Howells struct drm_i915_gem_init { 827718dceddSDavid Howells /** 828718dceddSDavid Howells * Beginning offset in the GTT to be managed by the DRM memory 829718dceddSDavid Howells * manager. 830718dceddSDavid Howells */ 831718dceddSDavid Howells __u64 gtt_start; 832718dceddSDavid Howells /** 833718dceddSDavid Howells * Ending offset in the GTT to be managed by the DRM memory 834718dceddSDavid Howells * manager. 835718dceddSDavid Howells */ 836718dceddSDavid Howells __u64 gtt_end; 837718dceddSDavid Howells }; 838718dceddSDavid Howells 839718dceddSDavid Howells struct drm_i915_gem_create { 840718dceddSDavid Howells /** 841718dceddSDavid Howells * Requested size for the object. 842718dceddSDavid Howells * 843718dceddSDavid Howells * The (page-aligned) allocated size for the object will be returned. 844718dceddSDavid Howells */ 845718dceddSDavid Howells __u64 size; 846718dceddSDavid Howells /** 847718dceddSDavid Howells * Returned handle for the object. 848718dceddSDavid Howells * 849718dceddSDavid Howells * Object handles are nonzero. 850718dceddSDavid Howells */ 851718dceddSDavid Howells __u32 handle; 852718dceddSDavid Howells __u32 pad; 853718dceddSDavid Howells }; 854718dceddSDavid Howells 855718dceddSDavid Howells struct drm_i915_gem_pread { 856718dceddSDavid Howells /** Handle for the object being read. */ 857718dceddSDavid Howells __u32 handle; 858718dceddSDavid Howells __u32 pad; 859718dceddSDavid Howells /** Offset into the object to read from */ 860718dceddSDavid Howells __u64 offset; 861718dceddSDavid Howells /** Length of data to read */ 862718dceddSDavid Howells __u64 size; 863718dceddSDavid Howells /** 864718dceddSDavid Howells * Pointer to write the data into. 865718dceddSDavid Howells * 866718dceddSDavid Howells * This is a fixed-size type for 32/64 compatibility. 867718dceddSDavid Howells */ 868718dceddSDavid Howells __u64 data_ptr; 869718dceddSDavid Howells }; 870718dceddSDavid Howells 871718dceddSDavid Howells struct drm_i915_gem_pwrite { 872718dceddSDavid Howells /** Handle for the object being written to. */ 873718dceddSDavid Howells __u32 handle; 874718dceddSDavid Howells __u32 pad; 875718dceddSDavid Howells /** Offset into the object to write to */ 876718dceddSDavid Howells __u64 offset; 877718dceddSDavid Howells /** Length of data to write */ 878718dceddSDavid Howells __u64 size; 879718dceddSDavid Howells /** 880718dceddSDavid Howells * Pointer to read the data from. 881718dceddSDavid Howells * 882718dceddSDavid Howells * This is a fixed-size type for 32/64 compatibility. 883718dceddSDavid Howells */ 884718dceddSDavid Howells __u64 data_ptr; 885718dceddSDavid Howells }; 886718dceddSDavid Howells 887718dceddSDavid Howells struct drm_i915_gem_mmap { 888718dceddSDavid Howells /** Handle for the object being mapped. */ 889718dceddSDavid Howells __u32 handle; 890718dceddSDavid Howells __u32 pad; 891718dceddSDavid Howells /** Offset in the object to map. */ 892718dceddSDavid Howells __u64 offset; 893718dceddSDavid Howells /** 894718dceddSDavid Howells * Length of data to map. 895718dceddSDavid Howells * 896718dceddSDavid Howells * The value will be page-aligned. 897718dceddSDavid Howells */ 898718dceddSDavid Howells __u64 size; 899718dceddSDavid Howells /** 900718dceddSDavid Howells * Returned pointer the data was mapped at. 901718dceddSDavid Howells * 902718dceddSDavid Howells * This is a fixed-size type for 32/64 compatibility. 903718dceddSDavid Howells */ 904718dceddSDavid Howells __u64 addr_ptr; 9051816f923SAkash Goel 9061816f923SAkash Goel /** 9071816f923SAkash Goel * Flags for extended behaviour. 9081816f923SAkash Goel * 9091816f923SAkash Goel * Added in version 2. 9101816f923SAkash Goel */ 9111816f923SAkash Goel __u64 flags; 9121816f923SAkash Goel #define I915_MMAP_WC 0x1 913718dceddSDavid Howells }; 914718dceddSDavid Howells 915718dceddSDavid Howells struct drm_i915_gem_mmap_gtt { 916718dceddSDavid Howells /** Handle for the object being mapped. */ 917718dceddSDavid Howells __u32 handle; 918718dceddSDavid Howells __u32 pad; 919718dceddSDavid Howells /** 920718dceddSDavid Howells * Fake offset to use for subsequent mmap call 921718dceddSDavid Howells * 922718dceddSDavid Howells * This is a fixed-size type for 32/64 compatibility. 923718dceddSDavid Howells */ 924718dceddSDavid Howells __u64 offset; 925718dceddSDavid Howells }; 926718dceddSDavid Howells 9277961c5b6SMaarten Lankhorst /** 9287961c5b6SMaarten Lankhorst * struct drm_i915_gem_mmap_offset - Retrieve an offset so we can mmap this buffer object. 9297961c5b6SMaarten Lankhorst * 9307961c5b6SMaarten Lankhorst * This struct is passed as argument to the `DRM_IOCTL_I915_GEM_MMAP_OFFSET` ioctl, 9317961c5b6SMaarten Lankhorst * and is used to retrieve the fake offset to mmap an object specified by &handle. 9327961c5b6SMaarten Lankhorst * 9337961c5b6SMaarten Lankhorst * The legacy way of using `DRM_IOCTL_I915_GEM_MMAP` is removed on gen12+. 9347961c5b6SMaarten Lankhorst * `DRM_IOCTL_I915_GEM_MMAP_GTT` is an older supported alias to this struct, but will behave 9357961c5b6SMaarten Lankhorst * as setting the &extensions to 0, and &flags to `I915_MMAP_OFFSET_GTT`. 9367961c5b6SMaarten Lankhorst */ 937cc662126SAbdiel Janulgue struct drm_i915_gem_mmap_offset { 9387961c5b6SMaarten Lankhorst /** @handle: Handle for the object being mapped. */ 939cc662126SAbdiel Janulgue __u32 handle; 9407961c5b6SMaarten Lankhorst /** @pad: Must be zero */ 941cc662126SAbdiel Janulgue __u32 pad; 942cc662126SAbdiel Janulgue /** 9437961c5b6SMaarten Lankhorst * @offset: The fake offset to use for subsequent mmap call 944cc662126SAbdiel Janulgue * 945cc662126SAbdiel Janulgue * This is a fixed-size type for 32/64 compatibility. 946cc662126SAbdiel Janulgue */ 947cc662126SAbdiel Janulgue __u64 offset; 948cc662126SAbdiel Janulgue 949cc662126SAbdiel Janulgue /** 9507961c5b6SMaarten Lankhorst * @flags: Flags for extended behaviour. 951cc662126SAbdiel Janulgue * 9527961c5b6SMaarten Lankhorst * It is mandatory that one of the `MMAP_OFFSET` types 9537961c5b6SMaarten Lankhorst * should be included: 9547961c5b6SMaarten Lankhorst * 9557961c5b6SMaarten Lankhorst * - `I915_MMAP_OFFSET_GTT`: Use mmap with the object bound to GTT. (Write-Combined) 9567961c5b6SMaarten Lankhorst * - `I915_MMAP_OFFSET_WC`: Use Write-Combined caching. 9577961c5b6SMaarten Lankhorst * - `I915_MMAP_OFFSET_WB`: Use Write-Back caching. 9587961c5b6SMaarten Lankhorst * - `I915_MMAP_OFFSET_FIXED`: Use object placement to determine caching. 9597961c5b6SMaarten Lankhorst * 9607961c5b6SMaarten Lankhorst * On devices with local memory `I915_MMAP_OFFSET_FIXED` is the only valid 9617961c5b6SMaarten Lankhorst * type. On devices without local memory, this caching mode is invalid. 9627961c5b6SMaarten Lankhorst * 9637961c5b6SMaarten Lankhorst * As caching mode when specifying `I915_MMAP_OFFSET_FIXED`, WC or WB will 9647961c5b6SMaarten Lankhorst * be used, depending on the object placement on creation. WB will be used 9657961c5b6SMaarten Lankhorst * when the object can only exist in system memory, WC otherwise. 966cc662126SAbdiel Janulgue */ 967cc662126SAbdiel Janulgue __u64 flags; 9687961c5b6SMaarten Lankhorst 969cc662126SAbdiel Janulgue #define I915_MMAP_OFFSET_GTT 0 970cc662126SAbdiel Janulgue #define I915_MMAP_OFFSET_WC 1 971cc662126SAbdiel Janulgue #define I915_MMAP_OFFSET_WB 2 972cc662126SAbdiel Janulgue #define I915_MMAP_OFFSET_UC 3 9737961c5b6SMaarten Lankhorst #define I915_MMAP_OFFSET_FIXED 4 974cc662126SAbdiel Janulgue 9757961c5b6SMaarten Lankhorst /** 9767961c5b6SMaarten Lankhorst * @extensions: Zero-terminated chain of extensions. 977cc662126SAbdiel Janulgue * 978cc662126SAbdiel Janulgue * No current extensions defined; mbz. 979cc662126SAbdiel Janulgue */ 980cc662126SAbdiel Janulgue __u64 extensions; 981cc662126SAbdiel Janulgue }; 982cc662126SAbdiel Janulgue 9833aa8c57fSMatthew Auld /** 9843aa8c57fSMatthew Auld * struct drm_i915_gem_set_domain - Adjust the objects write or read domain, in 9853aa8c57fSMatthew Auld * preparation for accessing the pages via some CPU domain. 9863aa8c57fSMatthew Auld * 9873aa8c57fSMatthew Auld * Specifying a new write or read domain will flush the object out of the 9883aa8c57fSMatthew Auld * previous domain(if required), before then updating the objects domain 9893aa8c57fSMatthew Auld * tracking with the new domain. 9903aa8c57fSMatthew Auld * 9913aa8c57fSMatthew Auld * Note this might involve waiting for the object first if it is still active on 9923aa8c57fSMatthew Auld * the GPU. 9933aa8c57fSMatthew Auld * 9943aa8c57fSMatthew Auld * Supported values for @read_domains and @write_domain: 9953aa8c57fSMatthew Auld * 9963aa8c57fSMatthew Auld * - I915_GEM_DOMAIN_WC: Uncached write-combined domain 9973aa8c57fSMatthew Auld * - I915_GEM_DOMAIN_CPU: CPU cache domain 9983aa8c57fSMatthew Auld * - I915_GEM_DOMAIN_GTT: Mappable aperture domain 9993aa8c57fSMatthew Auld * 10003aa8c57fSMatthew Auld * All other domains are rejected. 100181340cf3SMatthew Auld * 100281340cf3SMatthew Auld * Note that for discrete, starting from DG1, this is no longer supported, and 100381340cf3SMatthew Auld * is instead rejected. On such platforms the CPU domain is effectively static, 100481340cf3SMatthew Auld * where we also only support a single &drm_i915_gem_mmap_offset cache mode, 100581340cf3SMatthew Auld * which can't be set explicitly and instead depends on the object placements, 100681340cf3SMatthew Auld * as per the below. 100781340cf3SMatthew Auld * 100881340cf3SMatthew Auld * Implicit caching rules, starting from DG1: 100981340cf3SMatthew Auld * 101081340cf3SMatthew Auld * - If any of the object placements (see &drm_i915_gem_create_ext_memory_regions) 101181340cf3SMatthew Auld * contain I915_MEMORY_CLASS_DEVICE then the object will be allocated and 101281340cf3SMatthew Auld * mapped as write-combined only. 101381340cf3SMatthew Auld * 101481340cf3SMatthew Auld * - Everything else is always allocated and mapped as write-back, with the 101581340cf3SMatthew Auld * guarantee that everything is also coherent with the GPU. 101681340cf3SMatthew Auld * 101781340cf3SMatthew Auld * Note that this is likely to change in the future again, where we might need 101881340cf3SMatthew Auld * more flexibility on future devices, so making this all explicit as part of a 101981340cf3SMatthew Auld * new &drm_i915_gem_create_ext extension is probable. 10203aa8c57fSMatthew Auld */ 1021718dceddSDavid Howells struct drm_i915_gem_set_domain { 10223aa8c57fSMatthew Auld /** @handle: Handle for the object. */ 1023718dceddSDavid Howells __u32 handle; 1024718dceddSDavid Howells 10253aa8c57fSMatthew Auld /** @read_domains: New read domains. */ 1026718dceddSDavid Howells __u32 read_domains; 1027718dceddSDavid Howells 10283aa8c57fSMatthew Auld /** 10293aa8c57fSMatthew Auld * @write_domain: New write domain. 10303aa8c57fSMatthew Auld * 10313aa8c57fSMatthew Auld * Note that having something in the write domain implies it's in the 10323aa8c57fSMatthew Auld * read domain, and only that read domain. 10333aa8c57fSMatthew Auld */ 1034718dceddSDavid Howells __u32 write_domain; 1035718dceddSDavid Howells }; 1036718dceddSDavid Howells 1037718dceddSDavid Howells struct drm_i915_gem_sw_finish { 1038718dceddSDavid Howells /** Handle for the object */ 1039718dceddSDavid Howells __u32 handle; 1040718dceddSDavid Howells }; 1041718dceddSDavid Howells 1042718dceddSDavid Howells struct drm_i915_gem_relocation_entry { 1043718dceddSDavid Howells /** 1044718dceddSDavid Howells * Handle of the buffer being pointed to by this relocation entry. 1045718dceddSDavid Howells * 1046718dceddSDavid Howells * It's appealing to make this be an index into the mm_validate_entry 1047718dceddSDavid Howells * list to refer to the buffer, but this allows the driver to create 1048718dceddSDavid Howells * a relocation list for state buffers and not re-write it per 1049718dceddSDavid Howells * exec using the buffer. 1050718dceddSDavid Howells */ 1051718dceddSDavid Howells __u32 target_handle; 1052718dceddSDavid Howells 1053718dceddSDavid Howells /** 1054718dceddSDavid Howells * Value to be added to the offset of the target buffer to make up 1055718dceddSDavid Howells * the relocation entry. 1056718dceddSDavid Howells */ 1057718dceddSDavid Howells __u32 delta; 1058718dceddSDavid Howells 1059718dceddSDavid Howells /** Offset in the buffer the relocation entry will be written into */ 1060718dceddSDavid Howells __u64 offset; 1061718dceddSDavid Howells 1062718dceddSDavid Howells /** 1063718dceddSDavid Howells * Offset value of the target buffer that the relocation entry was last 1064718dceddSDavid Howells * written as. 1065718dceddSDavid Howells * 1066718dceddSDavid Howells * If the buffer has the same offset as last time, we can skip syncing 1067718dceddSDavid Howells * and writing the relocation. This value is written back out by 1068718dceddSDavid Howells * the execbuffer ioctl when the relocation is written. 1069718dceddSDavid Howells */ 1070718dceddSDavid Howells __u64 presumed_offset; 1071718dceddSDavid Howells 1072718dceddSDavid Howells /** 1073718dceddSDavid Howells * Target memory domains read by this operation. 1074718dceddSDavid Howells */ 1075718dceddSDavid Howells __u32 read_domains; 1076718dceddSDavid Howells 1077718dceddSDavid Howells /** 1078718dceddSDavid Howells * Target memory domains written by this operation. 1079718dceddSDavid Howells * 1080718dceddSDavid Howells * Note that only one domain may be written by the whole 1081718dceddSDavid Howells * execbuffer operation, so that where there are conflicts, 1082718dceddSDavid Howells * the application will get -EINVAL back. 1083718dceddSDavid Howells */ 1084718dceddSDavid Howells __u32 write_domain; 1085718dceddSDavid Howells }; 1086718dceddSDavid Howells 1087718dceddSDavid Howells /** @{ 1088718dceddSDavid Howells * Intel memory domains 1089718dceddSDavid Howells * 1090718dceddSDavid Howells * Most of these just align with the various caches in 1091718dceddSDavid Howells * the system and are used to flush and invalidate as 1092718dceddSDavid Howells * objects end up cached in different domains. 1093718dceddSDavid Howells */ 1094718dceddSDavid Howells /** CPU cache */ 1095718dceddSDavid Howells #define I915_GEM_DOMAIN_CPU 0x00000001 1096718dceddSDavid Howells /** Render cache, used by 2D and 3D drawing */ 1097718dceddSDavid Howells #define I915_GEM_DOMAIN_RENDER 0x00000002 1098718dceddSDavid Howells /** Sampler cache, used by texture engine */ 1099718dceddSDavid Howells #define I915_GEM_DOMAIN_SAMPLER 0x00000004 1100718dceddSDavid Howells /** Command queue, used to load batch buffers */ 1101718dceddSDavid Howells #define I915_GEM_DOMAIN_COMMAND 0x00000008 1102718dceddSDavid Howells /** Instruction cache, used by shader programs */ 1103718dceddSDavid Howells #define I915_GEM_DOMAIN_INSTRUCTION 0x00000010 1104718dceddSDavid Howells /** Vertex address cache */ 1105718dceddSDavid Howells #define I915_GEM_DOMAIN_VERTEX 0x00000020 1106718dceddSDavid Howells /** GTT domain - aperture and scanout */ 1107718dceddSDavid Howells #define I915_GEM_DOMAIN_GTT 0x00000040 1108e22d8e3cSChris Wilson /** WC domain - uncached access */ 1109e22d8e3cSChris Wilson #define I915_GEM_DOMAIN_WC 0x00000080 1110718dceddSDavid Howells /** @} */ 1111718dceddSDavid Howells 1112718dceddSDavid Howells struct drm_i915_gem_exec_object { 1113718dceddSDavid Howells /** 1114718dceddSDavid Howells * User's handle for a buffer to be bound into the GTT for this 1115718dceddSDavid Howells * operation. 1116718dceddSDavid Howells */ 1117718dceddSDavid Howells __u32 handle; 1118718dceddSDavid Howells 1119718dceddSDavid Howells /** Number of relocations to be performed on this buffer */ 1120718dceddSDavid Howells __u32 relocation_count; 1121718dceddSDavid Howells /** 1122718dceddSDavid Howells * Pointer to array of struct drm_i915_gem_relocation_entry containing 1123718dceddSDavid Howells * the relocations to be performed in this buffer. 1124718dceddSDavid Howells */ 1125718dceddSDavid Howells __u64 relocs_ptr; 1126718dceddSDavid Howells 1127718dceddSDavid Howells /** Required alignment in graphics aperture */ 1128718dceddSDavid Howells __u64 alignment; 1129718dceddSDavid Howells 1130718dceddSDavid Howells /** 1131718dceddSDavid Howells * Returned value of the updated offset of the object, for future 1132718dceddSDavid Howells * presumed_offset writes. 1133718dceddSDavid Howells */ 1134718dceddSDavid Howells __u64 offset; 1135718dceddSDavid Howells }; 1136718dceddSDavid Howells 1137b5b6f6a6SJason Ekstrand /* DRM_IOCTL_I915_GEM_EXECBUFFER was removed in Linux 5.13 */ 1138718dceddSDavid Howells struct drm_i915_gem_execbuffer { 1139718dceddSDavid Howells /** 1140718dceddSDavid Howells * List of buffers to be validated with their relocations to be 1141718dceddSDavid Howells * performend on them. 1142718dceddSDavid Howells * 1143718dceddSDavid Howells * This is a pointer to an array of struct drm_i915_gem_validate_entry. 1144718dceddSDavid Howells * 1145718dceddSDavid Howells * These buffers must be listed in an order such that all relocations 1146718dceddSDavid Howells * a buffer is performing refer to buffers that have already appeared 1147718dceddSDavid Howells * in the validate list. 1148718dceddSDavid Howells */ 1149718dceddSDavid Howells __u64 buffers_ptr; 1150718dceddSDavid Howells __u32 buffer_count; 1151718dceddSDavid Howells 1152718dceddSDavid Howells /** Offset in the batchbuffer to start execution from. */ 1153718dceddSDavid Howells __u32 batch_start_offset; 1154718dceddSDavid Howells /** Bytes used in batchbuffer from batch_start_offset */ 1155718dceddSDavid Howells __u32 batch_len; 1156718dceddSDavid Howells __u32 DR1; 1157718dceddSDavid Howells __u32 DR4; 1158718dceddSDavid Howells __u32 num_cliprects; 1159718dceddSDavid Howells /** This is a struct drm_clip_rect *cliprects */ 1160718dceddSDavid Howells __u64 cliprects_ptr; 1161718dceddSDavid Howells }; 1162718dceddSDavid Howells 1163718dceddSDavid Howells struct drm_i915_gem_exec_object2 { 1164718dceddSDavid Howells /** 1165718dceddSDavid Howells * User's handle for a buffer to be bound into the GTT for this 1166718dceddSDavid Howells * operation. 1167718dceddSDavid Howells */ 1168718dceddSDavid Howells __u32 handle; 1169718dceddSDavid Howells 1170718dceddSDavid Howells /** Number of relocations to be performed on this buffer */ 1171718dceddSDavid Howells __u32 relocation_count; 1172718dceddSDavid Howells /** 1173718dceddSDavid Howells * Pointer to array of struct drm_i915_gem_relocation_entry containing 1174718dceddSDavid Howells * the relocations to be performed in this buffer. 1175718dceddSDavid Howells */ 1176718dceddSDavid Howells __u64 relocs_ptr; 1177718dceddSDavid Howells 1178718dceddSDavid Howells /** Required alignment in graphics aperture */ 1179718dceddSDavid Howells __u64 alignment; 1180718dceddSDavid Howells 1181718dceddSDavid Howells /** 1182506a8e87SChris Wilson * When the EXEC_OBJECT_PINNED flag is specified this is populated by 1183506a8e87SChris Wilson * the user with the GTT offset at which this object will be pinned. 1184caa574ffSMatthew Auld * 1185506a8e87SChris Wilson * When the I915_EXEC_NO_RELOC flag is specified this must contain the 1186506a8e87SChris Wilson * presumed_offset of the object. 1187caa574ffSMatthew Auld * 1188506a8e87SChris Wilson * During execbuffer2 the kernel populates it with the value of the 1189506a8e87SChris Wilson * current GTT offset of the object, for future presumed_offset writes. 1190caa574ffSMatthew Auld * 1191caa574ffSMatthew Auld * See struct drm_i915_gem_create_ext for the rules when dealing with 1192caa574ffSMatthew Auld * alignment restrictions with I915_MEMORY_CLASS_DEVICE, on devices with 1193caa574ffSMatthew Auld * minimum page sizes, like DG2. 1194718dceddSDavid Howells */ 1195718dceddSDavid Howells __u64 offset; 1196718dceddSDavid Howells 1197718dceddSDavid Howells #define EXEC_OBJECT_NEEDS_FENCE (1<<0) 1198ed5982e6SDaniel Vetter #define EXEC_OBJECT_NEEDS_GTT (1<<1) 1199ed5982e6SDaniel Vetter #define EXEC_OBJECT_WRITE (1<<2) 1200101b506aSMichel Thierry #define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3) 1201506a8e87SChris Wilson #define EXEC_OBJECT_PINNED (1<<4) 120291b2db6fSChris Wilson #define EXEC_OBJECT_PAD_TO_SIZE (1<<5) 120377ae9957SChris Wilson /* The kernel implicitly tracks GPU activity on all GEM objects, and 120477ae9957SChris Wilson * synchronises operations with outstanding rendering. This includes 120577ae9957SChris Wilson * rendering on other devices if exported via dma-buf. However, sometimes 120677ae9957SChris Wilson * this tracking is too coarse and the user knows better. For example, 120777ae9957SChris Wilson * if the object is split into non-overlapping ranges shared between different 120877ae9957SChris Wilson * clients or engines (i.e. suballocating objects), the implicit tracking 120977ae9957SChris Wilson * by kernel assumes that each operation affects the whole object rather 121077ae9957SChris Wilson * than an individual range, causing needless synchronisation between clients. 121177ae9957SChris Wilson * The kernel will also forgo any CPU cache flushes prior to rendering from 121277ae9957SChris Wilson * the object as the client is expected to be also handling such domain 121377ae9957SChris Wilson * tracking. 121477ae9957SChris Wilson * 121577ae9957SChris Wilson * The kernel maintains the implicit tracking in order to manage resources 121677ae9957SChris Wilson * used by the GPU - this flag only disables the synchronisation prior to 121777ae9957SChris Wilson * rendering with this object in this execbuf. 121877ae9957SChris Wilson * 121977ae9957SChris Wilson * Opting out of implicit synhronisation requires the user to do its own 122077ae9957SChris Wilson * explicit tracking to avoid rendering corruption. See, for example, 122177ae9957SChris Wilson * I915_PARAM_HAS_EXEC_FENCE to order execbufs and execute them asynchronously. 122277ae9957SChris Wilson */ 122377ae9957SChris Wilson #define EXEC_OBJECT_ASYNC (1<<6) 1224b0fd47adSChris Wilson /* Request that the contents of this execobject be copied into the error 1225b0fd47adSChris Wilson * state upon a GPU hang involving this batch for post-mortem debugging. 1226b0fd47adSChris Wilson * These buffers are recorded in no particular order as "user" in 1227b0fd47adSChris Wilson * /sys/class/drm/cardN/error. Query I915_PARAM_HAS_EXEC_CAPTURE to see 1228b0fd47adSChris Wilson * if the kernel supports this flag. 1229b0fd47adSChris Wilson */ 1230b0fd47adSChris Wilson #define EXEC_OBJECT_CAPTURE (1<<7) 12319e2793f6SDave Gordon /* All remaining bits are MBZ and RESERVED FOR FUTURE USE */ 1232b0fd47adSChris Wilson #define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_CAPTURE<<1) 1233718dceddSDavid Howells __u64 flags; 1234ed5982e6SDaniel Vetter 123591b2db6fSChris Wilson union { 1236718dceddSDavid Howells __u64 rsvd1; 123791b2db6fSChris Wilson __u64 pad_to_size; 123891b2db6fSChris Wilson }; 1239718dceddSDavid Howells __u64 rsvd2; 1240718dceddSDavid Howells }; 1241718dceddSDavid Howells 1242cf6e7bacSJason Ekstrand struct drm_i915_gem_exec_fence { 1243cf6e7bacSJason Ekstrand /** 1244cf6e7bacSJason Ekstrand * User's handle for a drm_syncobj to wait on or signal. 1245cf6e7bacSJason Ekstrand */ 1246cf6e7bacSJason Ekstrand __u32 handle; 1247cf6e7bacSJason Ekstrand 1248cf6e7bacSJason Ekstrand #define I915_EXEC_FENCE_WAIT (1<<0) 1249cf6e7bacSJason Ekstrand #define I915_EXEC_FENCE_SIGNAL (1<<1) 1250ebcaa1ffSTvrtko Ursulin #define __I915_EXEC_FENCE_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SIGNAL << 1)) 1251cf6e7bacSJason Ekstrand __u32 flags; 1252cf6e7bacSJason Ekstrand }; 1253cf6e7bacSJason Ekstrand 12542ef6a01fSMatthew Auld /* 125513149e8bSLionel Landwerlin * See drm_i915_gem_execbuffer_ext_timeline_fences. 125613149e8bSLionel Landwerlin */ 125713149e8bSLionel Landwerlin #define DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES 0 125813149e8bSLionel Landwerlin 12592ef6a01fSMatthew Auld /* 126013149e8bSLionel Landwerlin * This structure describes an array of drm_syncobj and associated points for 126113149e8bSLionel Landwerlin * timeline variants of drm_syncobj. It is invalid to append this structure to 126213149e8bSLionel Landwerlin * the execbuf if I915_EXEC_FENCE_ARRAY is set. 126313149e8bSLionel Landwerlin */ 126413149e8bSLionel Landwerlin struct drm_i915_gem_execbuffer_ext_timeline_fences { 126513149e8bSLionel Landwerlin struct i915_user_extension base; 126613149e8bSLionel Landwerlin 126713149e8bSLionel Landwerlin /** 126813149e8bSLionel Landwerlin * Number of element in the handles_ptr & value_ptr arrays. 126913149e8bSLionel Landwerlin */ 127013149e8bSLionel Landwerlin __u64 fence_count; 127113149e8bSLionel Landwerlin 127213149e8bSLionel Landwerlin /** 127313149e8bSLionel Landwerlin * Pointer to an array of struct drm_i915_gem_exec_fence of length 127413149e8bSLionel Landwerlin * fence_count. 127513149e8bSLionel Landwerlin */ 127613149e8bSLionel Landwerlin __u64 handles_ptr; 127713149e8bSLionel Landwerlin 127813149e8bSLionel Landwerlin /** 127913149e8bSLionel Landwerlin * Pointer to an array of u64 values of length fence_count. Values 128013149e8bSLionel Landwerlin * must be 0 for a binary drm_syncobj. A Value of 0 for a timeline 128113149e8bSLionel Landwerlin * drm_syncobj is invalid as it turns a drm_syncobj into a binary one. 128213149e8bSLionel Landwerlin */ 128313149e8bSLionel Landwerlin __u64 values_ptr; 1284cda9edd0SLionel Landwerlin }; 1285cda9edd0SLionel Landwerlin 1286718dceddSDavid Howells struct drm_i915_gem_execbuffer2 { 1287718dceddSDavid Howells /** 1288718dceddSDavid Howells * List of gem_exec_object2 structs 1289718dceddSDavid Howells */ 1290718dceddSDavid Howells __u64 buffers_ptr; 1291718dceddSDavid Howells __u32 buffer_count; 1292718dceddSDavid Howells 1293718dceddSDavid Howells /** Offset in the batchbuffer to start execution from. */ 1294718dceddSDavid Howells __u32 batch_start_offset; 1295718dceddSDavid Howells /** Bytes used in batchbuffer from batch_start_offset */ 1296718dceddSDavid Howells __u32 batch_len; 1297718dceddSDavid Howells __u32 DR1; 1298718dceddSDavid Howells __u32 DR4; 1299718dceddSDavid Howells __u32 num_cliprects; 1300cf6e7bacSJason Ekstrand /** 1301cf6e7bacSJason Ekstrand * This is a struct drm_clip_rect *cliprects if I915_EXEC_FENCE_ARRAY 1302cda9edd0SLionel Landwerlin * & I915_EXEC_USE_EXTENSIONS are not set. 1303cda9edd0SLionel Landwerlin * 1304cda9edd0SLionel Landwerlin * If I915_EXEC_FENCE_ARRAY is set, then this is a pointer to an array 1305cda9edd0SLionel Landwerlin * of struct drm_i915_gem_exec_fence and num_cliprects is the length 1306cda9edd0SLionel Landwerlin * of the array. 1307cda9edd0SLionel Landwerlin * 1308cda9edd0SLionel Landwerlin * If I915_EXEC_USE_EXTENSIONS is set, then this is a pointer to a 1309cda9edd0SLionel Landwerlin * single struct i915_user_extension and num_cliprects is 0. 1310cf6e7bacSJason Ekstrand */ 1311718dceddSDavid Howells __u64 cliprects_ptr; 1312d90c06d5SChris Wilson #define I915_EXEC_RING_MASK (0x3f) 1313718dceddSDavid Howells #define I915_EXEC_DEFAULT (0<<0) 1314718dceddSDavid Howells #define I915_EXEC_RENDER (1<<0) 1315718dceddSDavid Howells #define I915_EXEC_BSD (2<<0) 1316718dceddSDavid Howells #define I915_EXEC_BLT (3<<0) 131782f91b6eSXiang, Haihao #define I915_EXEC_VEBOX (4<<0) 1318718dceddSDavid Howells 1319718dceddSDavid Howells /* Used for switching the constants addressing mode on gen4+ RENDER ring. 1320718dceddSDavid Howells * Gen6+ only supports relative addressing to dynamic state (default) and 1321718dceddSDavid Howells * absolute addressing. 1322718dceddSDavid Howells * 1323718dceddSDavid Howells * These flags are ignored for the BSD and BLT rings. 1324718dceddSDavid Howells */ 1325718dceddSDavid Howells #define I915_EXEC_CONSTANTS_MASK (3<<6) 1326718dceddSDavid Howells #define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */ 1327718dceddSDavid Howells #define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6) 1328718dceddSDavid Howells #define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */ 1329718dceddSDavid Howells __u64 flags; 1330718dceddSDavid Howells __u64 rsvd1; /* now used for context info */ 1331718dceddSDavid Howells __u64 rsvd2; 1332718dceddSDavid Howells }; 1333718dceddSDavid Howells 1334718dceddSDavid Howells /** Resets the SO write offset registers for transform feedback on gen7. */ 1335718dceddSDavid Howells #define I915_EXEC_GEN7_SOL_RESET (1<<8) 1336718dceddSDavid Howells 1337c2fb7916SDaniel Vetter /** Request a privileged ("secure") batch buffer. Note only available for 1338c2fb7916SDaniel Vetter * DRM_ROOT_ONLY | DRM_MASTER processes. 1339c2fb7916SDaniel Vetter */ 1340c2fb7916SDaniel Vetter #define I915_EXEC_SECURE (1<<9) 1341c2fb7916SDaniel Vetter 1342b45305fcSDaniel Vetter /** Inform the kernel that the batch is and will always be pinned. This 1343b45305fcSDaniel Vetter * negates the requirement for a workaround to be performed to avoid 1344b45305fcSDaniel Vetter * an incoherent CS (such as can be found on 830/845). If this flag is 1345b45305fcSDaniel Vetter * not passed, the kernel will endeavour to make sure the batch is 1346b45305fcSDaniel Vetter * coherent with the CS before execution. If this flag is passed, 1347b45305fcSDaniel Vetter * userspace assumes the responsibility for ensuring the same. 1348b45305fcSDaniel Vetter */ 1349b45305fcSDaniel Vetter #define I915_EXEC_IS_PINNED (1<<10) 1350b45305fcSDaniel Vetter 1351c3d19d3cSGeert Uytterhoeven /** Provide a hint to the kernel that the command stream and auxiliary 1352ed5982e6SDaniel Vetter * state buffers already holds the correct presumed addresses and so the 1353ed5982e6SDaniel Vetter * relocation process may be skipped if no buffers need to be moved in 1354ed5982e6SDaniel Vetter * preparation for the execbuffer. 1355ed5982e6SDaniel Vetter */ 1356ed5982e6SDaniel Vetter #define I915_EXEC_NO_RELOC (1<<11) 1357ed5982e6SDaniel Vetter 1358eef90ccbSChris Wilson /** Use the reloc.handle as an index into the exec object array rather 1359eef90ccbSChris Wilson * than as the per-file handle. 1360eef90ccbSChris Wilson */ 1361eef90ccbSChris Wilson #define I915_EXEC_HANDLE_LUT (1<<12) 1362eef90ccbSChris Wilson 13638d360dffSZhipeng Gong /** Used for switching BSD rings on the platforms with two BSD rings */ 1364d9da6aa0STvrtko Ursulin #define I915_EXEC_BSD_SHIFT (13) 1365d9da6aa0STvrtko Ursulin #define I915_EXEC_BSD_MASK (3 << I915_EXEC_BSD_SHIFT) 1366d9da6aa0STvrtko Ursulin /* default ping-pong mode */ 1367d9da6aa0STvrtko Ursulin #define I915_EXEC_BSD_DEFAULT (0 << I915_EXEC_BSD_SHIFT) 1368d9da6aa0STvrtko Ursulin #define I915_EXEC_BSD_RING1 (1 << I915_EXEC_BSD_SHIFT) 1369d9da6aa0STvrtko Ursulin #define I915_EXEC_BSD_RING2 (2 << I915_EXEC_BSD_SHIFT) 13708d360dffSZhipeng Gong 1371a9ed33caSAbdiel Janulgue /** Tell the kernel that the batchbuffer is processed by 1372a9ed33caSAbdiel Janulgue * the resource streamer. 1373a9ed33caSAbdiel Janulgue */ 1374a9ed33caSAbdiel Janulgue #define I915_EXEC_RESOURCE_STREAMER (1<<15) 1375a9ed33caSAbdiel Janulgue 1376fec0445cSChris Wilson /* Setting I915_EXEC_FENCE_IN implies that lower_32_bits(rsvd2) represent 1377fec0445cSChris Wilson * a sync_file fd to wait upon (in a nonblocking manner) prior to executing 1378fec0445cSChris Wilson * the batch. 1379fec0445cSChris Wilson * 1380fec0445cSChris Wilson * Returns -EINVAL if the sync_file fd cannot be found. 1381fec0445cSChris Wilson */ 1382fec0445cSChris Wilson #define I915_EXEC_FENCE_IN (1<<16) 1383fec0445cSChris Wilson 1384fec0445cSChris Wilson /* Setting I915_EXEC_FENCE_OUT causes the ioctl to return a sync_file fd 1385fec0445cSChris Wilson * in the upper_32_bits(rsvd2) upon success. Ownership of the fd is given 1386fec0445cSChris Wilson * to the caller, and it should be close() after use. (The fd is a regular 1387fec0445cSChris Wilson * file descriptor and will be cleaned up on process termination. It holds 1388fec0445cSChris Wilson * a reference to the request, but nothing else.) 1389fec0445cSChris Wilson * 1390fec0445cSChris Wilson * The sync_file fd can be combined with other sync_file and passed either 1391fec0445cSChris Wilson * to execbuf using I915_EXEC_FENCE_IN, to atomic KMS ioctls (so that a flip 1392fec0445cSChris Wilson * will only occur after this request completes), or to other devices. 1393fec0445cSChris Wilson * 1394fec0445cSChris Wilson * Using I915_EXEC_FENCE_OUT requires use of 1395fec0445cSChris Wilson * DRM_IOCTL_I915_GEM_EXECBUFFER2_WR ioctl so that the result is written 1396fec0445cSChris Wilson * back to userspace. Failure to do so will cause the out-fence to always 1397fec0445cSChris Wilson * be reported as zero, and the real fence fd to be leaked. 1398fec0445cSChris Wilson */ 1399fec0445cSChris Wilson #define I915_EXEC_FENCE_OUT (1<<17) 1400fec0445cSChris Wilson 14011a71cf2fSChris Wilson /* 14021a71cf2fSChris Wilson * Traditionally the execbuf ioctl has only considered the final element in 14031a71cf2fSChris Wilson * the execobject[] to be the executable batch. Often though, the client 14041a71cf2fSChris Wilson * will known the batch object prior to construction and being able to place 14051a71cf2fSChris Wilson * it into the execobject[] array first can simplify the relocation tracking. 14061a71cf2fSChris Wilson * Setting I915_EXEC_BATCH_FIRST tells execbuf to use element 0 of the 14071a71cf2fSChris Wilson * execobject[] as the * batch instead (the default is to use the last 14081a71cf2fSChris Wilson * element). 14091a71cf2fSChris Wilson */ 14101a71cf2fSChris Wilson #define I915_EXEC_BATCH_FIRST (1<<18) 1411cf6e7bacSJason Ekstrand 1412cf6e7bacSJason Ekstrand /* Setting I915_FENCE_ARRAY implies that num_cliprects and cliprects_ptr 1413cf6e7bacSJason Ekstrand * define an array of i915_gem_exec_fence structures which specify a set of 1414cf6e7bacSJason Ekstrand * dma fences to wait upon or signal. 1415cf6e7bacSJason Ekstrand */ 1416cf6e7bacSJason Ekstrand #define I915_EXEC_FENCE_ARRAY (1<<19) 1417cf6e7bacSJason Ekstrand 1418a88b6e4cSChris Wilson /* 1419a88b6e4cSChris Wilson * Setting I915_EXEC_FENCE_SUBMIT implies that lower_32_bits(rsvd2) represent 1420a88b6e4cSChris Wilson * a sync_file fd to wait upon (in a nonblocking manner) prior to executing 1421a88b6e4cSChris Wilson * the batch. 1422a88b6e4cSChris Wilson * 1423a88b6e4cSChris Wilson * Returns -EINVAL if the sync_file fd cannot be found. 1424a88b6e4cSChris Wilson */ 1425a88b6e4cSChris Wilson #define I915_EXEC_FENCE_SUBMIT (1 << 20) 1426a88b6e4cSChris Wilson 1427cda9edd0SLionel Landwerlin /* 1428cda9edd0SLionel Landwerlin * Setting I915_EXEC_USE_EXTENSIONS implies that 1429cda9edd0SLionel Landwerlin * drm_i915_gem_execbuffer2.cliprects_ptr is treated as a pointer to an linked 1430cda9edd0SLionel Landwerlin * list of i915_user_extension. Each i915_user_extension node is the base of a 1431cda9edd0SLionel Landwerlin * larger structure. The list of supported structures are listed in the 1432cda9edd0SLionel Landwerlin * drm_i915_gem_execbuffer_ext enum. 1433cda9edd0SLionel Landwerlin */ 1434cda9edd0SLionel Landwerlin #define I915_EXEC_USE_EXTENSIONS (1 << 21) 1435cda9edd0SLionel Landwerlin 1436cda9edd0SLionel Landwerlin #define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_USE_EXTENSIONS << 1)) 1437ed5982e6SDaniel Vetter 1438718dceddSDavid Howells #define I915_EXEC_CONTEXT_ID_MASK (0xffffffff) 1439718dceddSDavid Howells #define i915_execbuffer2_set_context_id(eb2, context) \ 1440718dceddSDavid Howells (eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK 1441718dceddSDavid Howells #define i915_execbuffer2_get_context_id(eb2) \ 1442718dceddSDavid Howells ((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK) 1443718dceddSDavid Howells 1444718dceddSDavid Howells struct drm_i915_gem_pin { 1445718dceddSDavid Howells /** Handle of the buffer to be pinned. */ 1446718dceddSDavid Howells __u32 handle; 1447718dceddSDavid Howells __u32 pad; 1448718dceddSDavid Howells 1449718dceddSDavid Howells /** alignment required within the aperture */ 1450718dceddSDavid Howells __u64 alignment; 1451718dceddSDavid Howells 1452718dceddSDavid Howells /** Returned GTT offset of the buffer. */ 1453718dceddSDavid Howells __u64 offset; 1454718dceddSDavid Howells }; 1455718dceddSDavid Howells 1456718dceddSDavid Howells struct drm_i915_gem_unpin { 1457718dceddSDavid Howells /** Handle of the buffer to be unpinned. */ 1458718dceddSDavid Howells __u32 handle; 1459718dceddSDavid Howells __u32 pad; 1460718dceddSDavid Howells }; 1461718dceddSDavid Howells 1462718dceddSDavid Howells struct drm_i915_gem_busy { 1463718dceddSDavid Howells /** Handle of the buffer to check for busy */ 1464718dceddSDavid Howells __u32 handle; 1465718dceddSDavid Howells 1466426960beSChris Wilson /** Return busy status 1467426960beSChris Wilson * 1468426960beSChris Wilson * A return of 0 implies that the object is idle (after 1469426960beSChris Wilson * having flushed any pending activity), and a non-zero return that 1470426960beSChris Wilson * the object is still in-flight on the GPU. (The GPU has not yet 1471426960beSChris Wilson * signaled completion for all pending requests that reference the 14721255501dSChris Wilson * object.) An object is guaranteed to become idle eventually (so 14731255501dSChris Wilson * long as no new GPU commands are executed upon it). Due to the 14741255501dSChris Wilson * asynchronous nature of the hardware, an object reported 14751255501dSChris Wilson * as busy may become idle before the ioctl is completed. 14761255501dSChris Wilson * 14771255501dSChris Wilson * Furthermore, if the object is busy, which engine is busy is only 1478c8b50242SChris Wilson * provided as a guide and only indirectly by reporting its class 1479c8b50242SChris Wilson * (there may be more than one engine in each class). There are race 1480c8b50242SChris Wilson * conditions which prevent the report of which engines are busy from 1481c8b50242SChris Wilson * being always accurate. However, the converse is not true. If the 1482c8b50242SChris Wilson * object is idle, the result of the ioctl, that all engines are idle, 1483c8b50242SChris Wilson * is accurate. 1484426960beSChris Wilson * 1485426960beSChris Wilson * The returned dword is split into two fields to indicate both 1486c8b50242SChris Wilson * the engine classess on which the object is being read, and the 1487c8b50242SChris Wilson * engine class on which it is currently being written (if any). 1488426960beSChris Wilson * 1489426960beSChris Wilson * The low word (bits 0:15) indicate if the object is being written 1490426960beSChris Wilson * to by any engine (there can only be one, as the GEM implicit 1491426960beSChris Wilson * synchronisation rules force writes to be serialised). Only the 1492c8b50242SChris Wilson * engine class (offset by 1, I915_ENGINE_CLASS_RENDER is reported as 1493c8b50242SChris Wilson * 1 not 0 etc) for the last write is reported. 1494426960beSChris Wilson * 1495c8b50242SChris Wilson * The high word (bits 16:31) are a bitmask of which engines classes 1496c8b50242SChris Wilson * are currently reading from the object. Multiple engines may be 1497426960beSChris Wilson * reading from the object simultaneously. 1498426960beSChris Wilson * 1499c8b50242SChris Wilson * The value of each engine class is the same as specified in the 1500c649432eSTvrtko Ursulin * I915_CONTEXT_PARAM_ENGINES context parameter and via perf, i.e. 1501c8b50242SChris Wilson * I915_ENGINE_CLASS_RENDER, I915_ENGINE_CLASS_COPY, etc. 1502c649432eSTvrtko Ursulin * Some hardware may have parallel execution engines, e.g. multiple 1503c649432eSTvrtko Ursulin * media engines, which are mapped to the same class identifier and so 1504c649432eSTvrtko Ursulin * are not separately reported for busyness. 15051255501dSChris Wilson * 15061255501dSChris Wilson * Caveat emptor: 15071255501dSChris Wilson * Only the boolean result of this query is reliable; that is whether 15081255501dSChris Wilson * the object is idle or busy. The report of which engines are busy 15091255501dSChris Wilson * should be only used as a heuristic. 1510718dceddSDavid Howells */ 1511718dceddSDavid Howells __u32 busy; 1512718dceddSDavid Howells }; 1513718dceddSDavid Howells 151435c7ab42SDaniel Vetter /** 1515289f5a72SMatthew Auld * struct drm_i915_gem_caching - Set or get the caching for given object 1516289f5a72SMatthew Auld * handle. 151735c7ab42SDaniel Vetter * 1518289f5a72SMatthew Auld * Allow userspace to control the GTT caching bits for a given object when the 1519289f5a72SMatthew Auld * object is later mapped through the ppGTT(or GGTT on older platforms lacking 1520289f5a72SMatthew Auld * ppGTT support, or if the object is used for scanout). Note that this might 1521289f5a72SMatthew Auld * require unbinding the object from the GTT first, if its current caching value 1522289f5a72SMatthew Auld * doesn't match. 1523e7737b67SMatthew Auld * 1524e7737b67SMatthew Auld * Note that this all changes on discrete platforms, starting from DG1, the 1525e7737b67SMatthew Auld * set/get caching is no longer supported, and is now rejected. Instead the CPU 1526e7737b67SMatthew Auld * caching attributes(WB vs WC) will become an immutable creation time property 1527e7737b67SMatthew Auld * for the object, along with the GTT caching level. For now we don't expose any 1528e7737b67SMatthew Auld * new uAPI for this, instead on DG1 this is all implicit, although this largely 1529e7737b67SMatthew Auld * shouldn't matter since DG1 is coherent by default(without any way of 1530e7737b67SMatthew Auld * controlling it). 1531e7737b67SMatthew Auld * 1532e7737b67SMatthew Auld * Implicit caching rules, starting from DG1: 1533e7737b67SMatthew Auld * 1534e7737b67SMatthew Auld * - If any of the object placements (see &drm_i915_gem_create_ext_memory_regions) 1535e7737b67SMatthew Auld * contain I915_MEMORY_CLASS_DEVICE then the object will be allocated and 1536e7737b67SMatthew Auld * mapped as write-combined only. 1537e7737b67SMatthew Auld * 1538e7737b67SMatthew Auld * - Everything else is always allocated and mapped as write-back, with the 1539e7737b67SMatthew Auld * guarantee that everything is also coherent with the GPU. 1540e7737b67SMatthew Auld * 1541e7737b67SMatthew Auld * Note that this is likely to change in the future again, where we might need 1542e7737b67SMatthew Auld * more flexibility on future devices, so making this all explicit as part of a 1543e7737b67SMatthew Auld * new &drm_i915_gem_create_ext extension is probable. 1544e7737b67SMatthew Auld * 1545e7737b67SMatthew Auld * Side note: Part of the reason for this is that changing the at-allocation-time CPU 1546e7737b67SMatthew Auld * caching attributes for the pages might be required(and is expensive) if we 1547e7737b67SMatthew Auld * need to then CPU map the pages later with different caching attributes. This 1548e7737b67SMatthew Auld * inconsistent caching behaviour, while supported on x86, is not universally 1549e7737b67SMatthew Auld * supported on other architectures. So for simplicity we opt for setting 1550e7737b67SMatthew Auld * everything at creation time, whilst also making it immutable, on discrete 1551e7737b67SMatthew Auld * platforms. 155235c7ab42SDaniel Vetter */ 1553718dceddSDavid Howells struct drm_i915_gem_caching { 1554718dceddSDavid Howells /** 1555289f5a72SMatthew Auld * @handle: Handle of the buffer to set/get the caching level. 1556289f5a72SMatthew Auld */ 1557718dceddSDavid Howells __u32 handle; 1558718dceddSDavid Howells 1559718dceddSDavid Howells /** 1560289f5a72SMatthew Auld * @caching: The GTT caching level to apply or possible return value. 1561718dceddSDavid Howells * 1562289f5a72SMatthew Auld * The supported @caching values: 1563289f5a72SMatthew Auld * 1564289f5a72SMatthew Auld * I915_CACHING_NONE: 1565289f5a72SMatthew Auld * 1566289f5a72SMatthew Auld * GPU access is not coherent with CPU caches. Default for machines 1567289f5a72SMatthew Auld * without an LLC. This means manual flushing might be needed, if we 1568289f5a72SMatthew Auld * want GPU access to be coherent. 1569289f5a72SMatthew Auld * 1570289f5a72SMatthew Auld * I915_CACHING_CACHED: 1571289f5a72SMatthew Auld * 1572289f5a72SMatthew Auld * GPU access is coherent with CPU caches and furthermore the data is 1573289f5a72SMatthew Auld * cached in last-level caches shared between CPU cores and the GPU GT. 1574289f5a72SMatthew Auld * 1575289f5a72SMatthew Auld * I915_CACHING_DISPLAY: 1576289f5a72SMatthew Auld * 1577289f5a72SMatthew Auld * Special GPU caching mode which is coherent with the scanout engines. 1578289f5a72SMatthew Auld * Transparently falls back to I915_CACHING_NONE on platforms where no 1579289f5a72SMatthew Auld * special cache mode (like write-through or gfdt flushing) is 1580289f5a72SMatthew Auld * available. The kernel automatically sets this mode when using a 1581289f5a72SMatthew Auld * buffer as a scanout target. Userspace can manually set this mode to 1582289f5a72SMatthew Auld * avoid a costly stall and clflush in the hotpath of drawing the first 1583289f5a72SMatthew Auld * frame. 1584289f5a72SMatthew Auld */ 1585289f5a72SMatthew Auld #define I915_CACHING_NONE 0 1586289f5a72SMatthew Auld #define I915_CACHING_CACHED 1 1587289f5a72SMatthew Auld #define I915_CACHING_DISPLAY 2 1588718dceddSDavid Howells __u32 caching; 1589718dceddSDavid Howells }; 1590718dceddSDavid Howells 1591718dceddSDavid Howells #define I915_TILING_NONE 0 1592718dceddSDavid Howells #define I915_TILING_X 1 1593718dceddSDavid Howells #define I915_TILING_Y 2 1594ea673f17SMatt Roper /* 1595ea673f17SMatt Roper * Do not add new tiling types here. The I915_TILING_* values are for 1596ea673f17SMatt Roper * de-tiling fence registers that no longer exist on modern platforms. Although 1597ea673f17SMatt Roper * the hardware may support new types of tiling in general (e.g., Tile4), we 1598ea673f17SMatt Roper * do not need to add them to the uapi that is specific to now-defunct ioctls. 1599ea673f17SMatt Roper */ 1600deeb1519SChris Wilson #define I915_TILING_LAST I915_TILING_Y 1601718dceddSDavid Howells 1602718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_NONE 0 1603718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_9 1 1604718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_9_10 2 1605718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_9_11 3 1606718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_9_10_11 4 1607718dceddSDavid Howells /* Not seen by userland */ 1608718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_UNKNOWN 5 1609718dceddSDavid Howells /* Seen by userland. */ 1610718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_9_17 6 1611718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_9_10_17 7 1612718dceddSDavid Howells 1613718dceddSDavid Howells struct drm_i915_gem_set_tiling { 1614718dceddSDavid Howells /** Handle of the buffer to have its tiling state updated */ 1615718dceddSDavid Howells __u32 handle; 1616718dceddSDavid Howells 1617718dceddSDavid Howells /** 1618718dceddSDavid Howells * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X, 1619718dceddSDavid Howells * I915_TILING_Y). 1620718dceddSDavid Howells * 1621718dceddSDavid Howells * This value is to be set on request, and will be updated by the 1622718dceddSDavid Howells * kernel on successful return with the actual chosen tiling layout. 1623718dceddSDavid Howells * 1624718dceddSDavid Howells * The tiling mode may be demoted to I915_TILING_NONE when the system 1625718dceddSDavid Howells * has bit 6 swizzling that can't be managed correctly by GEM. 1626718dceddSDavid Howells * 1627718dceddSDavid Howells * Buffer contents become undefined when changing tiling_mode. 1628718dceddSDavid Howells */ 1629718dceddSDavid Howells __u32 tiling_mode; 1630718dceddSDavid Howells 1631718dceddSDavid Howells /** 1632718dceddSDavid Howells * Stride in bytes for the object when in I915_TILING_X or 1633718dceddSDavid Howells * I915_TILING_Y. 1634718dceddSDavid Howells */ 1635718dceddSDavid Howells __u32 stride; 1636718dceddSDavid Howells 1637718dceddSDavid Howells /** 1638718dceddSDavid Howells * Returned address bit 6 swizzling required for CPU access through 1639718dceddSDavid Howells * mmap mapping. 1640718dceddSDavid Howells */ 1641718dceddSDavid Howells __u32 swizzle_mode; 1642718dceddSDavid Howells }; 1643718dceddSDavid Howells 1644718dceddSDavid Howells struct drm_i915_gem_get_tiling { 1645718dceddSDavid Howells /** Handle of the buffer to get tiling state for. */ 1646718dceddSDavid Howells __u32 handle; 1647718dceddSDavid Howells 1648718dceddSDavid Howells /** 1649718dceddSDavid Howells * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X, 1650718dceddSDavid Howells * I915_TILING_Y). 1651718dceddSDavid Howells */ 1652718dceddSDavid Howells __u32 tiling_mode; 1653718dceddSDavid Howells 1654718dceddSDavid Howells /** 1655718dceddSDavid Howells * Returned address bit 6 swizzling required for CPU access through 1656718dceddSDavid Howells * mmap mapping. 1657718dceddSDavid Howells */ 1658718dceddSDavid Howells __u32 swizzle_mode; 165970f2f5c7SChris Wilson 166070f2f5c7SChris Wilson /** 166170f2f5c7SChris Wilson * Returned address bit 6 swizzling required for CPU access through 166270f2f5c7SChris Wilson * mmap mapping whilst bound. 166370f2f5c7SChris Wilson */ 166470f2f5c7SChris Wilson __u32 phys_swizzle_mode; 1665718dceddSDavid Howells }; 1666718dceddSDavid Howells 1667718dceddSDavid Howells struct drm_i915_gem_get_aperture { 1668718dceddSDavid Howells /** Total size of the aperture used by i915_gem_execbuffer, in bytes */ 1669718dceddSDavid Howells __u64 aper_size; 1670718dceddSDavid Howells 1671718dceddSDavid Howells /** 1672718dceddSDavid Howells * Available space in the aperture used by i915_gem_execbuffer, in 1673718dceddSDavid Howells * bytes 1674718dceddSDavid Howells */ 1675718dceddSDavid Howells __u64 aper_available_size; 1676718dceddSDavid Howells }; 1677718dceddSDavid Howells 1678718dceddSDavid Howells struct drm_i915_get_pipe_from_crtc_id { 1679718dceddSDavid Howells /** ID of CRTC being requested **/ 1680718dceddSDavid Howells __u32 crtc_id; 1681718dceddSDavid Howells 1682718dceddSDavid Howells /** pipe of requested CRTC **/ 1683718dceddSDavid Howells __u32 pipe; 1684718dceddSDavid Howells }; 1685718dceddSDavid Howells 1686718dceddSDavid Howells #define I915_MADV_WILLNEED 0 1687718dceddSDavid Howells #define I915_MADV_DONTNEED 1 1688718dceddSDavid Howells #define __I915_MADV_PURGED 2 /* internal state */ 1689718dceddSDavid Howells 1690718dceddSDavid Howells struct drm_i915_gem_madvise { 1691718dceddSDavid Howells /** Handle of the buffer to change the backing store advice */ 1692718dceddSDavid Howells __u32 handle; 1693718dceddSDavid Howells 1694718dceddSDavid Howells /* Advice: either the buffer will be needed again in the near future, 1695718dceddSDavid Howells * or wont be and could be discarded under memory pressure. 1696718dceddSDavid Howells */ 1697718dceddSDavid Howells __u32 madv; 1698718dceddSDavid Howells 1699718dceddSDavid Howells /** Whether the backing store still exists. */ 1700718dceddSDavid Howells __u32 retained; 1701718dceddSDavid Howells }; 1702718dceddSDavid Howells 1703718dceddSDavid Howells /* flags */ 1704718dceddSDavid Howells #define I915_OVERLAY_TYPE_MASK 0xff 1705718dceddSDavid Howells #define I915_OVERLAY_YUV_PLANAR 0x01 1706718dceddSDavid Howells #define I915_OVERLAY_YUV_PACKED 0x02 1707718dceddSDavid Howells #define I915_OVERLAY_RGB 0x03 1708718dceddSDavid Howells 1709718dceddSDavid Howells #define I915_OVERLAY_DEPTH_MASK 0xff00 1710718dceddSDavid Howells #define I915_OVERLAY_RGB24 0x1000 1711718dceddSDavid Howells #define I915_OVERLAY_RGB16 0x2000 1712718dceddSDavid Howells #define I915_OVERLAY_RGB15 0x3000 1713718dceddSDavid Howells #define I915_OVERLAY_YUV422 0x0100 1714718dceddSDavid Howells #define I915_OVERLAY_YUV411 0x0200 1715718dceddSDavid Howells #define I915_OVERLAY_YUV420 0x0300 1716718dceddSDavid Howells #define I915_OVERLAY_YUV410 0x0400 1717718dceddSDavid Howells 1718718dceddSDavid Howells #define I915_OVERLAY_SWAP_MASK 0xff0000 1719718dceddSDavid Howells #define I915_OVERLAY_NO_SWAP 0x000000 1720718dceddSDavid Howells #define I915_OVERLAY_UV_SWAP 0x010000 1721718dceddSDavid Howells #define I915_OVERLAY_Y_SWAP 0x020000 1722718dceddSDavid Howells #define I915_OVERLAY_Y_AND_UV_SWAP 0x030000 1723718dceddSDavid Howells 1724718dceddSDavid Howells #define I915_OVERLAY_FLAGS_MASK 0xff000000 1725718dceddSDavid Howells #define I915_OVERLAY_ENABLE 0x01000000 1726718dceddSDavid Howells 1727718dceddSDavid Howells struct drm_intel_overlay_put_image { 1728718dceddSDavid Howells /* various flags and src format description */ 1729718dceddSDavid Howells __u32 flags; 1730718dceddSDavid Howells /* source picture description */ 1731718dceddSDavid Howells __u32 bo_handle; 1732718dceddSDavid Howells /* stride values and offsets are in bytes, buffer relative */ 1733718dceddSDavid Howells __u16 stride_Y; /* stride for packed formats */ 1734718dceddSDavid Howells __u16 stride_UV; 1735718dceddSDavid Howells __u32 offset_Y; /* offset for packet formats */ 1736718dceddSDavid Howells __u32 offset_U; 1737718dceddSDavid Howells __u32 offset_V; 1738718dceddSDavid Howells /* in pixels */ 1739718dceddSDavid Howells __u16 src_width; 1740718dceddSDavid Howells __u16 src_height; 1741718dceddSDavid Howells /* to compensate the scaling factors for partially covered surfaces */ 1742718dceddSDavid Howells __u16 src_scan_width; 1743718dceddSDavid Howells __u16 src_scan_height; 1744718dceddSDavid Howells /* output crtc description */ 1745718dceddSDavid Howells __u32 crtc_id; 1746718dceddSDavid Howells __u16 dst_x; 1747718dceddSDavid Howells __u16 dst_y; 1748718dceddSDavid Howells __u16 dst_width; 1749718dceddSDavid Howells __u16 dst_height; 1750718dceddSDavid Howells }; 1751718dceddSDavid Howells 1752718dceddSDavid Howells /* flags */ 1753718dceddSDavid Howells #define I915_OVERLAY_UPDATE_ATTRS (1<<0) 1754718dceddSDavid Howells #define I915_OVERLAY_UPDATE_GAMMA (1<<1) 1755ea9da4e4SChris Wilson #define I915_OVERLAY_DISABLE_DEST_COLORKEY (1<<2) 1756718dceddSDavid Howells struct drm_intel_overlay_attrs { 1757718dceddSDavid Howells __u32 flags; 1758718dceddSDavid Howells __u32 color_key; 1759718dceddSDavid Howells __s32 brightness; 1760718dceddSDavid Howells __u32 contrast; 1761718dceddSDavid Howells __u32 saturation; 1762718dceddSDavid Howells __u32 gamma0; 1763718dceddSDavid Howells __u32 gamma1; 1764718dceddSDavid Howells __u32 gamma2; 1765718dceddSDavid Howells __u32 gamma3; 1766718dceddSDavid Howells __u32 gamma4; 1767718dceddSDavid Howells __u32 gamma5; 1768718dceddSDavid Howells }; 1769718dceddSDavid Howells 1770718dceddSDavid Howells /* 1771718dceddSDavid Howells * Intel sprite handling 1772718dceddSDavid Howells * 1773718dceddSDavid Howells * Color keying works with a min/mask/max tuple. Both source and destination 1774718dceddSDavid Howells * color keying is allowed. 1775718dceddSDavid Howells * 1776718dceddSDavid Howells * Source keying: 1777718dceddSDavid Howells * Sprite pixels within the min & max values, masked against the color channels 1778718dceddSDavid Howells * specified in the mask field, will be transparent. All other pixels will 1779718dceddSDavid Howells * be displayed on top of the primary plane. For RGB surfaces, only the min 1780718dceddSDavid Howells * and mask fields will be used; ranged compares are not allowed. 1781718dceddSDavid Howells * 1782718dceddSDavid Howells * Destination keying: 1783718dceddSDavid Howells * Primary plane pixels that match the min value, masked against the color 1784718dceddSDavid Howells * channels specified in the mask field, will be replaced by corresponding 1785718dceddSDavid Howells * pixels from the sprite plane. 1786718dceddSDavid Howells * 1787718dceddSDavid Howells * Note that source & destination keying are exclusive; only one can be 1788718dceddSDavid Howells * active on a given plane. 1789718dceddSDavid Howells */ 1790718dceddSDavid Howells 17916ec5bd34SVille Syrjälä #define I915_SET_COLORKEY_NONE (1<<0) /* Deprecated. Instead set 17926ec5bd34SVille Syrjälä * flags==0 to disable colorkeying. 17936ec5bd34SVille Syrjälä */ 1794718dceddSDavid Howells #define I915_SET_COLORKEY_DESTINATION (1<<1) 1795718dceddSDavid Howells #define I915_SET_COLORKEY_SOURCE (1<<2) 1796718dceddSDavid Howells struct drm_intel_sprite_colorkey { 1797718dceddSDavid Howells __u32 plane_id; 1798718dceddSDavid Howells __u32 min_value; 1799718dceddSDavid Howells __u32 channel_mask; 1800718dceddSDavid Howells __u32 max_value; 1801718dceddSDavid Howells __u32 flags; 1802718dceddSDavid Howells }; 1803718dceddSDavid Howells 1804718dceddSDavid Howells struct drm_i915_gem_wait { 1805718dceddSDavid Howells /** Handle of BO we shall wait on */ 1806718dceddSDavid Howells __u32 bo_handle; 1807718dceddSDavid Howells __u32 flags; 1808718dceddSDavid Howells /** Number of nanoseconds to wait, Returns time remaining. */ 1809718dceddSDavid Howells __s64 timeout_ns; 1810718dceddSDavid Howells }; 1811718dceddSDavid Howells 1812718dceddSDavid Howells struct drm_i915_gem_context_create { 1813b9171541SChris Wilson __u32 ctx_id; /* output: id of new context*/ 1814718dceddSDavid Howells __u32 pad; 1815718dceddSDavid Howells }; 1816718dceddSDavid Howells 1817b9171541SChris Wilson struct drm_i915_gem_context_create_ext { 1818b9171541SChris Wilson __u32 ctx_id; /* output: id of new context*/ 1819b9171541SChris Wilson __u32 flags; 1820b9171541SChris Wilson #define I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS (1u << 0) 18218319f44cSChris Wilson #define I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE (1u << 1) 1822b9171541SChris Wilson #define I915_CONTEXT_CREATE_FLAGS_UNKNOWN \ 18238319f44cSChris Wilson (-(I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE << 1)) 1824e0695db7SChris Wilson __u64 extensions; 18255cc9ed4bSChris Wilson }; 18265cc9ed4bSChris Wilson 1827c9dc0f35SChris Wilson struct drm_i915_gem_context_param { 1828c9dc0f35SChris Wilson __u32 ctx_id; 1829c9dc0f35SChris Wilson __u32 size; 1830c9dc0f35SChris Wilson __u64 param; 1831c9dc0f35SChris Wilson #define I915_CONTEXT_PARAM_BAN_PERIOD 0x1 18326ff6d61dSJason Ekstrand /* I915_CONTEXT_PARAM_NO_ZEROMAP has been removed. On the off chance 18336ff6d61dSJason Ekstrand * someone somewhere has attempted to use it, never re-use this context 18346ff6d61dSJason Ekstrand * param number. 18356ff6d61dSJason Ekstrand */ 1836b1b38278SDavid Weinehall #define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2 1837fa8848f2SChris Wilson #define I915_CONTEXT_PARAM_GTT_SIZE 0x3 1838bc3d6744SChris Wilson #define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE 0x4 183984102171SMika Kuoppala #define I915_CONTEXT_PARAM_BANNABLE 0x5 1840ac14fbd4SChris Wilson #define I915_CONTEXT_PARAM_PRIORITY 0x6 1841ac14fbd4SChris Wilson #define I915_CONTEXT_MAX_USER_PRIORITY 1023 /* inclusive */ 1842ac14fbd4SChris Wilson #define I915_CONTEXT_DEFAULT_PRIORITY 0 1843ac14fbd4SChris Wilson #define I915_CONTEXT_MIN_USER_PRIORITY -1023 /* inclusive */ 1844e46c2e99STvrtko Ursulin /* 1845e46c2e99STvrtko Ursulin * When using the following param, value should be a pointer to 1846e46c2e99STvrtko Ursulin * drm_i915_gem_context_param_sseu. 1847e46c2e99STvrtko Ursulin */ 1848e46c2e99STvrtko Ursulin #define I915_CONTEXT_PARAM_SSEU 0x7 1849ba4fda62SChris Wilson 1850ba4fda62SChris Wilson /* 1851ba4fda62SChris Wilson * Not all clients may want to attempt automatic recover of a context after 1852ba4fda62SChris Wilson * a hang (for example, some clients may only submit very small incremental 1853ba4fda62SChris Wilson * batches relying on known logical state of previous batches which will never 1854ba4fda62SChris Wilson * recover correctly and each attempt will hang), and so would prefer that 1855ba4fda62SChris Wilson * the context is forever banned instead. 1856ba4fda62SChris Wilson * 1857ba4fda62SChris Wilson * If set to false (0), after a reset, subsequent (and in flight) rendering 1858ba4fda62SChris Wilson * from this context is discarded, and the client will need to create a new 1859ba4fda62SChris Wilson * context to use instead. 1860ba4fda62SChris Wilson * 1861ba4fda62SChris Wilson * If set to true (1), the kernel will automatically attempt to recover the 1862ba4fda62SChris Wilson * context by skipping the hanging batch and executing the next batch starting 1863ba4fda62SChris Wilson * from the default context state (discarding the incomplete logical context 1864ba4fda62SChris Wilson * state lost due to the reset). 1865ba4fda62SChris Wilson * 1866ba4fda62SChris Wilson * On creation, all new contexts are marked as recoverable. 1867ba4fda62SChris Wilson */ 1868ba4fda62SChris Wilson #define I915_CONTEXT_PARAM_RECOVERABLE 0x8 18697f3f317aSChris Wilson 18707f3f317aSChris Wilson /* 18717f3f317aSChris Wilson * The id of the associated virtual memory address space (ppGTT) of 18727f3f317aSChris Wilson * this context. Can be retrieved and passed to another context 18737f3f317aSChris Wilson * (on the same fd) for both to use the same ppGTT and so share 18747f3f317aSChris Wilson * address layouts, and avoid reloading the page tables on context 18757f3f317aSChris Wilson * switches between themselves. 18767f3f317aSChris Wilson * 18777f3f317aSChris Wilson * See DRM_I915_GEM_VM_CREATE and DRM_I915_GEM_VM_DESTROY. 18787f3f317aSChris Wilson */ 18797f3f317aSChris Wilson #define I915_CONTEXT_PARAM_VM 0x9 1880976b55f0SChris Wilson 1881976b55f0SChris Wilson /* 1882976b55f0SChris Wilson * I915_CONTEXT_PARAM_ENGINES: 1883976b55f0SChris Wilson * 1884976b55f0SChris Wilson * Bind this context to operate on this subset of available engines. Henceforth, 1885976b55f0SChris Wilson * the I915_EXEC_RING selector for DRM_IOCTL_I915_GEM_EXECBUFFER2 operates as 1886976b55f0SChris Wilson * an index into this array of engines; I915_EXEC_DEFAULT selecting engine[0] 1887976b55f0SChris Wilson * and upwards. Slots 0...N are filled in using the specified (class, instance). 1888976b55f0SChris Wilson * Use 1889976b55f0SChris Wilson * engine_class: I915_ENGINE_CLASS_INVALID, 1890976b55f0SChris Wilson * engine_instance: I915_ENGINE_CLASS_INVALID_NONE 1891976b55f0SChris Wilson * to specify a gap in the array that can be filled in later, e.g. by a 1892976b55f0SChris Wilson * virtual engine used for load balancing. 1893976b55f0SChris Wilson * 1894976b55f0SChris Wilson * Setting the number of engines bound to the context to 0, by passing a zero 1895976b55f0SChris Wilson * sized argument, will revert back to default settings. 1896976b55f0SChris Wilson * 1897976b55f0SChris Wilson * See struct i915_context_param_engines. 1898ee113690SChris Wilson * 1899ee113690SChris Wilson * Extensions: 1900ee113690SChris Wilson * i915_context_engines_load_balance (I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE) 1901ee113690SChris Wilson * i915_context_engines_bond (I915_CONTEXT_ENGINES_EXT_BOND) 1902e5e32171SMatthew Brost * i915_context_engines_parallel_submit (I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT) 1903976b55f0SChris Wilson */ 1904976b55f0SChris Wilson #define I915_CONTEXT_PARAM_ENGINES 0xa 1905a0e04715SChris Wilson 1906a0e04715SChris Wilson /* 1907a0e04715SChris Wilson * I915_CONTEXT_PARAM_PERSISTENCE: 1908a0e04715SChris Wilson * 1909a0e04715SChris Wilson * Allow the context and active rendering to survive the process until 1910a0e04715SChris Wilson * completion. Persistence allows fire-and-forget clients to queue up a 1911a0e04715SChris Wilson * bunch of work, hand the output over to a display server and then quit. 1912a0e04715SChris Wilson * If the context is marked as not persistent, upon closing (either via 1913a0e04715SChris Wilson * an explicit DRM_I915_GEM_CONTEXT_DESTROY or implicitly from file closure 1914a0e04715SChris Wilson * or process termination), the context and any outstanding requests will be 1915a0e04715SChris Wilson * cancelled (and exported fences for cancelled requests marked as -EIO). 1916a0e04715SChris Wilson * 1917a0e04715SChris Wilson * By default, new contexts allow persistence. 1918a0e04715SChris Wilson */ 1919a0e04715SChris Wilson #define I915_CONTEXT_PARAM_PERSISTENCE 0xb 192088be76cdSChris Wilson 1921fe4751c3SJason Ekstrand /* This API has been removed. On the off chance someone somewhere has 1922fe4751c3SJason Ekstrand * attempted to use it, never re-use this context param number. 192388be76cdSChris Wilson */ 192488be76cdSChris Wilson #define I915_CONTEXT_PARAM_RINGSIZE 0xc 1925d3ac8d42SDaniele Ceraolo Spurio 1926d3ac8d42SDaniele Ceraolo Spurio /* 1927d3ac8d42SDaniele Ceraolo Spurio * I915_CONTEXT_PARAM_PROTECTED_CONTENT: 1928d3ac8d42SDaniele Ceraolo Spurio * 1929d3ac8d42SDaniele Ceraolo Spurio * Mark that the context makes use of protected content, which will result 1930d3ac8d42SDaniele Ceraolo Spurio * in the context being invalidated when the protected content session is. 1931d3ac8d42SDaniele Ceraolo Spurio * Given that the protected content session is killed on suspend, the device 1932d3ac8d42SDaniele Ceraolo Spurio * is kept awake for the lifetime of a protected context, so the user should 1933d3ac8d42SDaniele Ceraolo Spurio * make sure to dispose of them once done. 1934d3ac8d42SDaniele Ceraolo Spurio * This flag can only be set at context creation time and, when set to true, 1935d3ac8d42SDaniele Ceraolo Spurio * must be preceded by an explicit setting of I915_CONTEXT_PARAM_RECOVERABLE 1936d3ac8d42SDaniele Ceraolo Spurio * to false. This flag can't be set to true in conjunction with setting the 1937d3ac8d42SDaniele Ceraolo Spurio * I915_CONTEXT_PARAM_BANNABLE flag to false. Creation example: 1938d3ac8d42SDaniele Ceraolo Spurio * 1939d3ac8d42SDaniele Ceraolo Spurio * .. code-block:: C 1940d3ac8d42SDaniele Ceraolo Spurio * 1941d3ac8d42SDaniele Ceraolo Spurio * struct drm_i915_gem_context_create_ext_setparam p_protected = { 1942d3ac8d42SDaniele Ceraolo Spurio * .base = { 1943d3ac8d42SDaniele Ceraolo Spurio * .name = I915_CONTEXT_CREATE_EXT_SETPARAM, 1944d3ac8d42SDaniele Ceraolo Spurio * }, 1945d3ac8d42SDaniele Ceraolo Spurio * .param = { 1946d3ac8d42SDaniele Ceraolo Spurio * .param = I915_CONTEXT_PARAM_PROTECTED_CONTENT, 1947d3ac8d42SDaniele Ceraolo Spurio * .value = 1, 1948d3ac8d42SDaniele Ceraolo Spurio * } 1949d3ac8d42SDaniele Ceraolo Spurio * }; 1950d3ac8d42SDaniele Ceraolo Spurio * struct drm_i915_gem_context_create_ext_setparam p_norecover = { 1951d3ac8d42SDaniele Ceraolo Spurio * .base = { 1952d3ac8d42SDaniele Ceraolo Spurio * .name = I915_CONTEXT_CREATE_EXT_SETPARAM, 1953d3ac8d42SDaniele Ceraolo Spurio * .next_extension = to_user_pointer(&p_protected), 1954d3ac8d42SDaniele Ceraolo Spurio * }, 1955d3ac8d42SDaniele Ceraolo Spurio * .param = { 1956d3ac8d42SDaniele Ceraolo Spurio * .param = I915_CONTEXT_PARAM_RECOVERABLE, 1957d3ac8d42SDaniele Ceraolo Spurio * .value = 0, 1958d3ac8d42SDaniele Ceraolo Spurio * } 1959d3ac8d42SDaniele Ceraolo Spurio * }; 1960d3ac8d42SDaniele Ceraolo Spurio * struct drm_i915_gem_context_create_ext create = { 1961d3ac8d42SDaniele Ceraolo Spurio * .flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS, 1962d3ac8d42SDaniele Ceraolo Spurio * .extensions = to_user_pointer(&p_norecover); 1963d3ac8d42SDaniele Ceraolo Spurio * }; 1964d3ac8d42SDaniele Ceraolo Spurio * 1965d3ac8d42SDaniele Ceraolo Spurio * ctx_id = gem_context_create_ext(drm_fd, &create); 1966d3ac8d42SDaniele Ceraolo Spurio * 1967d3ac8d42SDaniele Ceraolo Spurio * In addition to the normal failure cases, setting this flag during context 1968d3ac8d42SDaniele Ceraolo Spurio * creation can result in the following errors: 1969d3ac8d42SDaniele Ceraolo Spurio * 1970d3ac8d42SDaniele Ceraolo Spurio * -ENODEV: feature not available 1971d3ac8d42SDaniele Ceraolo Spurio * -EPERM: trying to mark a recoverable or not bannable context as protected 1972d3ac8d42SDaniele Ceraolo Spurio */ 1973d3ac8d42SDaniele Ceraolo Spurio #define I915_CONTEXT_PARAM_PROTECTED_CONTENT 0xd 1974be03564bSChris Wilson /* Must be kept compact -- no holes and well documented */ 1975e0695db7SChris Wilson 1976c9dc0f35SChris Wilson __u64 value; 1977c9dc0f35SChris Wilson }; 1978c9dc0f35SChris Wilson 19792ef6a01fSMatthew Auld /* 1980e46c2e99STvrtko Ursulin * Context SSEU programming 1981e46c2e99STvrtko Ursulin * 1982e46c2e99STvrtko Ursulin * It may be necessary for either functional or performance reason to configure 1983e46c2e99STvrtko Ursulin * a context to run with a reduced number of SSEU (where SSEU stands for Slice/ 1984e46c2e99STvrtko Ursulin * Sub-slice/EU). 1985e46c2e99STvrtko Ursulin * 1986e46c2e99STvrtko Ursulin * This is done by configuring SSEU configuration using the below 1987e46c2e99STvrtko Ursulin * @struct drm_i915_gem_context_param_sseu for every supported engine which 1988e46c2e99STvrtko Ursulin * userspace intends to use. 1989e46c2e99STvrtko Ursulin * 1990e46c2e99STvrtko Ursulin * Not all GPUs or engines support this functionality in which case an error 1991e46c2e99STvrtko Ursulin * code -ENODEV will be returned. 1992e46c2e99STvrtko Ursulin * 1993e46c2e99STvrtko Ursulin * Also, flexibility of possible SSEU configuration permutations varies between 1994e46c2e99STvrtko Ursulin * GPU generations and software imposed limitations. Requesting such a 1995e46c2e99STvrtko Ursulin * combination will return an error code of -EINVAL. 1996e46c2e99STvrtko Ursulin * 1997e46c2e99STvrtko Ursulin * NOTE: When perf/OA is active the context's SSEU configuration is ignored in 1998e46c2e99STvrtko Ursulin * favour of a single global setting. 1999e46c2e99STvrtko Ursulin */ 2000e46c2e99STvrtko Ursulin struct drm_i915_gem_context_param_sseu { 2001e46c2e99STvrtko Ursulin /* 2002e46c2e99STvrtko Ursulin * Engine class & instance to be configured or queried. 2003e46c2e99STvrtko Ursulin */ 2004d1172ab3SChris Wilson struct i915_engine_class_instance engine; 2005e46c2e99STvrtko Ursulin 2006e46c2e99STvrtko Ursulin /* 2007e620f7b3SChris Wilson * Unknown flags must be cleared to zero. 2008e46c2e99STvrtko Ursulin */ 2009e46c2e99STvrtko Ursulin __u32 flags; 2010e620f7b3SChris Wilson #define I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX (1u << 0) 2011e46c2e99STvrtko Ursulin 2012e46c2e99STvrtko Ursulin /* 2013e46c2e99STvrtko Ursulin * Mask of slices to enable for the context. Valid values are a subset 2014e46c2e99STvrtko Ursulin * of the bitmask value returned for I915_PARAM_SLICE_MASK. 2015e46c2e99STvrtko Ursulin */ 2016e46c2e99STvrtko Ursulin __u64 slice_mask; 2017e46c2e99STvrtko Ursulin 2018e46c2e99STvrtko Ursulin /* 2019e46c2e99STvrtko Ursulin * Mask of subslices to enable for the context. Valid values are a 2020e46c2e99STvrtko Ursulin * subset of the bitmask value return by I915_PARAM_SUBSLICE_MASK. 2021e46c2e99STvrtko Ursulin */ 2022e46c2e99STvrtko Ursulin __u64 subslice_mask; 2023e46c2e99STvrtko Ursulin 2024e46c2e99STvrtko Ursulin /* 2025e46c2e99STvrtko Ursulin * Minimum/Maximum number of EUs to enable per subslice for the 2026e46c2e99STvrtko Ursulin * context. min_eus_per_subslice must be inferior or equal to 2027e46c2e99STvrtko Ursulin * max_eus_per_subslice. 2028e46c2e99STvrtko Ursulin */ 2029e46c2e99STvrtko Ursulin __u16 min_eus_per_subslice; 2030e46c2e99STvrtko Ursulin __u16 max_eus_per_subslice; 2031e46c2e99STvrtko Ursulin 2032e46c2e99STvrtko Ursulin /* 2033e46c2e99STvrtko Ursulin * Unused for now. Must be cleared to zero. 2034e46c2e99STvrtko Ursulin */ 2035e46c2e99STvrtko Ursulin __u32 rsvd; 2036e46c2e99STvrtko Ursulin }; 2037e46c2e99STvrtko Ursulin 203857772953STvrtko Ursulin /** 203957772953STvrtko Ursulin * DOC: Virtual Engine uAPI 204057772953STvrtko Ursulin * 204157772953STvrtko Ursulin * Virtual engine is a concept where userspace is able to configure a set of 204257772953STvrtko Ursulin * physical engines, submit a batch buffer, and let the driver execute it on any 204357772953STvrtko Ursulin * engine from the set as it sees fit. 204457772953STvrtko Ursulin * 204557772953STvrtko Ursulin * This is primarily useful on parts which have multiple instances of a same 204657772953STvrtko Ursulin * class engine, like for example GT3+ Skylake parts with their two VCS engines. 204757772953STvrtko Ursulin * 204857772953STvrtko Ursulin * For instance userspace can enumerate all engines of a certain class using the 204957772953STvrtko Ursulin * previously described `Engine Discovery uAPI`_. After that userspace can 205057772953STvrtko Ursulin * create a GEM context with a placeholder slot for the virtual engine (using 205157772953STvrtko Ursulin * `I915_ENGINE_CLASS_INVALID` and `I915_ENGINE_CLASS_INVALID_NONE` for class 205257772953STvrtko Ursulin * and instance respectively) and finally using the 205357772953STvrtko Ursulin * `I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE` extension place a virtual engine in 205457772953STvrtko Ursulin * the same reserved slot. 205557772953STvrtko Ursulin * 205657772953STvrtko Ursulin * Example of creating a virtual engine and submitting a batch buffer to it: 205757772953STvrtko Ursulin * 205857772953STvrtko Ursulin * .. code-block:: C 205957772953STvrtko Ursulin * 206057772953STvrtko Ursulin * I915_DEFINE_CONTEXT_ENGINES_LOAD_BALANCE(virtual, 2) = { 206157772953STvrtko Ursulin * .base.name = I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE, 206257772953STvrtko Ursulin * .engine_index = 0, // Place this virtual engine into engine map slot 0 206357772953STvrtko Ursulin * .num_siblings = 2, 206457772953STvrtko Ursulin * .engines = { { I915_ENGINE_CLASS_VIDEO, 0 }, 206557772953STvrtko Ursulin * { I915_ENGINE_CLASS_VIDEO, 1 }, }, 206657772953STvrtko Ursulin * }; 206757772953STvrtko Ursulin * I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, 1) = { 206857772953STvrtko Ursulin * .engines = { { I915_ENGINE_CLASS_INVALID, 206957772953STvrtko Ursulin * I915_ENGINE_CLASS_INVALID_NONE } }, 207057772953STvrtko Ursulin * .extensions = to_user_pointer(&virtual), // Chains after load_balance extension 207157772953STvrtko Ursulin * }; 207257772953STvrtko Ursulin * struct drm_i915_gem_context_create_ext_setparam p_engines = { 207357772953STvrtko Ursulin * .base = { 207457772953STvrtko Ursulin * .name = I915_CONTEXT_CREATE_EXT_SETPARAM, 207557772953STvrtko Ursulin * }, 207657772953STvrtko Ursulin * .param = { 207757772953STvrtko Ursulin * .param = I915_CONTEXT_PARAM_ENGINES, 207857772953STvrtko Ursulin * .value = to_user_pointer(&engines), 207957772953STvrtko Ursulin * .size = sizeof(engines), 208057772953STvrtko Ursulin * }, 208157772953STvrtko Ursulin * }; 208257772953STvrtko Ursulin * struct drm_i915_gem_context_create_ext create = { 208357772953STvrtko Ursulin * .flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS, 208457772953STvrtko Ursulin * .extensions = to_user_pointer(&p_engines); 208557772953STvrtko Ursulin * }; 208657772953STvrtko Ursulin * 208757772953STvrtko Ursulin * ctx_id = gem_context_create_ext(drm_fd, &create); 208857772953STvrtko Ursulin * 208957772953STvrtko Ursulin * // Now we have created a GEM context with its engine map containing a 209057772953STvrtko Ursulin * // single virtual engine. Submissions to this slot can go either to 209157772953STvrtko Ursulin * // vcs0 or vcs1, depending on the load balancing algorithm used inside 209257772953STvrtko Ursulin * // the driver. The load balancing is dynamic from one batch buffer to 209357772953STvrtko Ursulin * // another and transparent to userspace. 209457772953STvrtko Ursulin * 209557772953STvrtko Ursulin * ... 209657772953STvrtko Ursulin * execbuf.rsvd1 = ctx_id; 209757772953STvrtko Ursulin * execbuf.flags = 0; // Submits to index 0 which is the virtual engine 209857772953STvrtko Ursulin * gem_execbuf(drm_fd, &execbuf); 209957772953STvrtko Ursulin */ 210057772953STvrtko Ursulin 21016d06779eSChris Wilson /* 21026d06779eSChris Wilson * i915_context_engines_load_balance: 21036d06779eSChris Wilson * 21046d06779eSChris Wilson * Enable load balancing across this set of engines. 21056d06779eSChris Wilson * 21066d06779eSChris Wilson * Into the I915_EXEC_DEFAULT slot [0], a virtual engine is created that when 21076d06779eSChris Wilson * used will proxy the execbuffer request onto one of the set of engines 21086d06779eSChris Wilson * in such a way as to distribute the load evenly across the set. 21096d06779eSChris Wilson * 21106d06779eSChris Wilson * The set of engines must be compatible (e.g. the same HW class) as they 21116d06779eSChris Wilson * will share the same logical GPU context and ring. 21126d06779eSChris Wilson * 21136d06779eSChris Wilson * To intermix rendering with the virtual engine and direct rendering onto 21146d06779eSChris Wilson * the backing engines (bypassing the load balancing proxy), the context must 21156d06779eSChris Wilson * be defined to use a single timeline for all engines. 21166d06779eSChris Wilson */ 21176d06779eSChris Wilson struct i915_context_engines_load_balance { 21186d06779eSChris Wilson struct i915_user_extension base; 21196d06779eSChris Wilson 21206d06779eSChris Wilson __u16 engine_index; 21216d06779eSChris Wilson __u16 num_siblings; 21226d06779eSChris Wilson __u32 flags; /* all undefined flags must be zero */ 21236d06779eSChris Wilson 21246d06779eSChris Wilson __u64 mbz64; /* reserved for future use; must be zero */ 21256d06779eSChris Wilson 21266d06779eSChris Wilson struct i915_engine_class_instance engines[0]; 21276d06779eSChris Wilson } __attribute__((packed)); 21286d06779eSChris Wilson 21296d06779eSChris Wilson #define I915_DEFINE_CONTEXT_ENGINES_LOAD_BALANCE(name__, N__) struct { \ 21306d06779eSChris Wilson struct i915_user_extension base; \ 21316d06779eSChris Wilson __u16 engine_index; \ 21326d06779eSChris Wilson __u16 num_siblings; \ 21336d06779eSChris Wilson __u32 flags; \ 21346d06779eSChris Wilson __u64 mbz64; \ 21356d06779eSChris Wilson struct i915_engine_class_instance engines[N__]; \ 21366d06779eSChris Wilson } __attribute__((packed)) name__ 21376d06779eSChris Wilson 2138ee113690SChris Wilson /* 2139ee113690SChris Wilson * i915_context_engines_bond: 2140ee113690SChris Wilson * 2141ee113690SChris Wilson * Constructed bonded pairs for execution within a virtual engine. 2142ee113690SChris Wilson * 2143ee113690SChris Wilson * All engines are equal, but some are more equal than others. Given 2144ee113690SChris Wilson * the distribution of resources in the HW, it may be preferable to run 2145ee113690SChris Wilson * a request on a given subset of engines in parallel to a request on a 2146ee113690SChris Wilson * specific engine. We enable this selection of engines within a virtual 2147ee113690SChris Wilson * engine by specifying bonding pairs, for any given master engine we will 2148ee113690SChris Wilson * only execute on one of the corresponding siblings within the virtual engine. 2149ee113690SChris Wilson * 2150ee113690SChris Wilson * To execute a request in parallel on the master engine and a sibling requires 2151ee113690SChris Wilson * coordination with a I915_EXEC_FENCE_SUBMIT. 2152ee113690SChris Wilson */ 2153ee113690SChris Wilson struct i915_context_engines_bond { 2154ee113690SChris Wilson struct i915_user_extension base; 2155ee113690SChris Wilson 2156ee113690SChris Wilson struct i915_engine_class_instance master; 2157ee113690SChris Wilson 2158ee113690SChris Wilson __u16 virtual_index; /* index of virtual engine in ctx->engines[] */ 2159ee113690SChris Wilson __u16 num_bonds; 2160ee113690SChris Wilson 2161ee113690SChris Wilson __u64 flags; /* all undefined flags must be zero */ 2162ee113690SChris Wilson __u64 mbz64[4]; /* reserved for future use; must be zero */ 2163ee113690SChris Wilson 2164ee113690SChris Wilson struct i915_engine_class_instance engines[0]; 2165ee113690SChris Wilson } __attribute__((packed)); 2166ee113690SChris Wilson 2167ee113690SChris Wilson #define I915_DEFINE_CONTEXT_ENGINES_BOND(name__, N__) struct { \ 2168ee113690SChris Wilson struct i915_user_extension base; \ 2169ee113690SChris Wilson struct i915_engine_class_instance master; \ 2170ee113690SChris Wilson __u16 virtual_index; \ 2171ee113690SChris Wilson __u16 num_bonds; \ 2172ee113690SChris Wilson __u64 flags; \ 2173ee113690SChris Wilson __u64 mbz64[4]; \ 2174ee113690SChris Wilson struct i915_engine_class_instance engines[N__]; \ 2175ee113690SChris Wilson } __attribute__((packed)) name__ 2176ee113690SChris Wilson 217757772953STvrtko Ursulin /** 2178e5e32171SMatthew Brost * struct i915_context_engines_parallel_submit - Configure engine for 2179e5e32171SMatthew Brost * parallel submission. 2180e5e32171SMatthew Brost * 2181e5e32171SMatthew Brost * Setup a slot in the context engine map to allow multiple BBs to be submitted 2182e5e32171SMatthew Brost * in a single execbuf IOCTL. Those BBs will then be scheduled to run on the GPU 2183e5e32171SMatthew Brost * in parallel. Multiple hardware contexts are created internally in the i915 to 2184e5e32171SMatthew Brost * run these BBs. Once a slot is configured for N BBs only N BBs can be 2185e5e32171SMatthew Brost * submitted in each execbuf IOCTL and this is implicit behavior e.g. The user 2186e5e32171SMatthew Brost * doesn't tell the execbuf IOCTL there are N BBs, the execbuf IOCTL knows how 2187e5e32171SMatthew Brost * many BBs there are based on the slot's configuration. The N BBs are the last 2188e5e32171SMatthew Brost * N buffer objects or first N if I915_EXEC_BATCH_FIRST is set. 2189e5e32171SMatthew Brost * 2190e5e32171SMatthew Brost * The default placement behavior is to create implicit bonds between each 2191e5e32171SMatthew Brost * context if each context maps to more than 1 physical engine (e.g. context is 2192e5e32171SMatthew Brost * a virtual engine). Also we only allow contexts of same engine class and these 2193e5e32171SMatthew Brost * contexts must be in logically contiguous order. Examples of the placement 2194e5e32171SMatthew Brost * behavior are described below. Lastly, the default is to not allow BBs to be 2195e5e32171SMatthew Brost * preempted mid-batch. Rather insert coordinated preemption points on all 2196e5e32171SMatthew Brost * hardware contexts between each set of BBs. Flags could be added in the future 2197e5e32171SMatthew Brost * to change both of these default behaviors. 2198e5e32171SMatthew Brost * 2199e5e32171SMatthew Brost * Returns -EINVAL if hardware context placement configuration is invalid or if 2200e5e32171SMatthew Brost * the placement configuration isn't supported on the platform / submission 2201e5e32171SMatthew Brost * interface. 2202e5e32171SMatthew Brost * Returns -ENODEV if extension isn't supported on the platform / submission 2203e5e32171SMatthew Brost * interface. 2204e5e32171SMatthew Brost * 2205e5e32171SMatthew Brost * .. code-block:: none 2206e5e32171SMatthew Brost * 2207e5e32171SMatthew Brost * Examples syntax: 2208e5e32171SMatthew Brost * CS[X] = generic engine of same class, logical instance X 2209e5e32171SMatthew Brost * INVALID = I915_ENGINE_CLASS_INVALID, I915_ENGINE_CLASS_INVALID_NONE 2210e5e32171SMatthew Brost * 2211e5e32171SMatthew Brost * Example 1 pseudo code: 2212e5e32171SMatthew Brost * set_engines(INVALID) 2213e5e32171SMatthew Brost * set_parallel(engine_index=0, width=2, num_siblings=1, 2214e5e32171SMatthew Brost * engines=CS[0],CS[1]) 2215e5e32171SMatthew Brost * 2216e5e32171SMatthew Brost * Results in the following valid placement: 2217e5e32171SMatthew Brost * CS[0], CS[1] 2218e5e32171SMatthew Brost * 2219e5e32171SMatthew Brost * Example 2 pseudo code: 2220e5e32171SMatthew Brost * set_engines(INVALID) 2221e5e32171SMatthew Brost * set_parallel(engine_index=0, width=2, num_siblings=2, 2222e5e32171SMatthew Brost * engines=CS[0],CS[2],CS[1],CS[3]) 2223e5e32171SMatthew Brost * 2224e5e32171SMatthew Brost * Results in the following valid placements: 2225e5e32171SMatthew Brost * CS[0], CS[1] 2226e5e32171SMatthew Brost * CS[2], CS[3] 2227e5e32171SMatthew Brost * 2228e5e32171SMatthew Brost * This can be thought of as two virtual engines, each containing two 2229e5e32171SMatthew Brost * engines thereby making a 2D array. However, there are bonds tying the 2230e5e32171SMatthew Brost * entries together and placing restrictions on how they can be scheduled. 2231e5e32171SMatthew Brost * Specifically, the scheduler can choose only vertical columns from the 2D 2232e5e32171SMatthew Brost * array. That is, CS[0] is bonded to CS[1] and CS[2] to CS[3]. So if the 2233e5e32171SMatthew Brost * scheduler wants to submit to CS[0], it must also choose CS[1] and vice 2234e5e32171SMatthew Brost * versa. Same for CS[2] requires also using CS[3]. 2235e5e32171SMatthew Brost * VE[0] = CS[0], CS[2] 2236e5e32171SMatthew Brost * VE[1] = CS[1], CS[3] 2237e5e32171SMatthew Brost * 2238e5e32171SMatthew Brost * Example 3 pseudo code: 2239e5e32171SMatthew Brost * set_engines(INVALID) 2240e5e32171SMatthew Brost * set_parallel(engine_index=0, width=2, num_siblings=2, 2241e5e32171SMatthew Brost * engines=CS[0],CS[1],CS[1],CS[3]) 2242e5e32171SMatthew Brost * 2243e5e32171SMatthew Brost * Results in the following valid and invalid placements: 2244e5e32171SMatthew Brost * CS[0], CS[1] 2245e5e32171SMatthew Brost * CS[1], CS[3] - Not logically contiguous, return -EINVAL 2246e5e32171SMatthew Brost */ 2247e5e32171SMatthew Brost struct i915_context_engines_parallel_submit { 2248e5e32171SMatthew Brost /** 2249e5e32171SMatthew Brost * @base: base user extension. 2250e5e32171SMatthew Brost */ 2251e5e32171SMatthew Brost struct i915_user_extension base; 2252e5e32171SMatthew Brost 2253e5e32171SMatthew Brost /** 2254e5e32171SMatthew Brost * @engine_index: slot for parallel engine 2255e5e32171SMatthew Brost */ 2256e5e32171SMatthew Brost __u16 engine_index; 2257e5e32171SMatthew Brost 2258e5e32171SMatthew Brost /** 2259e5e32171SMatthew Brost * @width: number of contexts per parallel engine or in other words the 2260e5e32171SMatthew Brost * number of batches in each submission 2261e5e32171SMatthew Brost */ 2262e5e32171SMatthew Brost __u16 width; 2263e5e32171SMatthew Brost 2264e5e32171SMatthew Brost /** 2265e5e32171SMatthew Brost * @num_siblings: number of siblings per context or in other words the 2266e5e32171SMatthew Brost * number of possible placements for each submission 2267e5e32171SMatthew Brost */ 2268e5e32171SMatthew Brost __u16 num_siblings; 2269e5e32171SMatthew Brost 2270e5e32171SMatthew Brost /** 2271e5e32171SMatthew Brost * @mbz16: reserved for future use; must be zero 2272e5e32171SMatthew Brost */ 2273e5e32171SMatthew Brost __u16 mbz16; 2274e5e32171SMatthew Brost 2275e5e32171SMatthew Brost /** 2276e5e32171SMatthew Brost * @flags: all undefined flags must be zero, currently not defined flags 2277e5e32171SMatthew Brost */ 2278e5e32171SMatthew Brost __u64 flags; 2279e5e32171SMatthew Brost 2280e5e32171SMatthew Brost /** 2281e5e32171SMatthew Brost * @mbz64: reserved for future use; must be zero 2282e5e32171SMatthew Brost */ 2283e5e32171SMatthew Brost __u64 mbz64[3]; 2284e5e32171SMatthew Brost 2285e5e32171SMatthew Brost /** 2286e5e32171SMatthew Brost * @engines: 2-d array of engine instances to configure parallel engine 2287e5e32171SMatthew Brost * 2288e5e32171SMatthew Brost * length = width (i) * num_siblings (j) 2289e5e32171SMatthew Brost * index = j + i * num_siblings 2290e5e32171SMatthew Brost */ 2291e5e32171SMatthew Brost struct i915_engine_class_instance engines[0]; 2292e5e32171SMatthew Brost 2293e5e32171SMatthew Brost } __packed; 2294e5e32171SMatthew Brost 2295e5e32171SMatthew Brost #define I915_DEFINE_CONTEXT_ENGINES_PARALLEL_SUBMIT(name__, N__) struct { \ 2296e5e32171SMatthew Brost struct i915_user_extension base; \ 2297e5e32171SMatthew Brost __u16 engine_index; \ 2298e5e32171SMatthew Brost __u16 width; \ 2299e5e32171SMatthew Brost __u16 num_siblings; \ 2300e5e32171SMatthew Brost __u16 mbz16; \ 2301e5e32171SMatthew Brost __u64 flags; \ 2302e5e32171SMatthew Brost __u64 mbz64[3]; \ 2303e5e32171SMatthew Brost struct i915_engine_class_instance engines[N__]; \ 2304e5e32171SMatthew Brost } __attribute__((packed)) name__ 2305e5e32171SMatthew Brost 2306e5e32171SMatthew Brost /** 230757772953STvrtko Ursulin * DOC: Context Engine Map uAPI 230857772953STvrtko Ursulin * 230957772953STvrtko Ursulin * Context engine map is a new way of addressing engines when submitting batch- 231057772953STvrtko Ursulin * buffers, replacing the existing way of using identifiers like `I915_EXEC_BLT` 231157772953STvrtko Ursulin * inside the flags field of `struct drm_i915_gem_execbuffer2`. 231257772953STvrtko Ursulin * 231357772953STvrtko Ursulin * To use it created GEM contexts need to be configured with a list of engines 231457772953STvrtko Ursulin * the user is intending to submit to. This is accomplished using the 231557772953STvrtko Ursulin * `I915_CONTEXT_PARAM_ENGINES` parameter and `struct 231657772953STvrtko Ursulin * i915_context_param_engines`. 231757772953STvrtko Ursulin * 231857772953STvrtko Ursulin * For such contexts the `I915_EXEC_RING_MASK` field becomes an index into the 231957772953STvrtko Ursulin * configured map. 232057772953STvrtko Ursulin * 232157772953STvrtko Ursulin * Example of creating such context and submitting against it: 232257772953STvrtko Ursulin * 232357772953STvrtko Ursulin * .. code-block:: C 232457772953STvrtko Ursulin * 232557772953STvrtko Ursulin * I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, 2) = { 232657772953STvrtko Ursulin * .engines = { { I915_ENGINE_CLASS_RENDER, 0 }, 232757772953STvrtko Ursulin * { I915_ENGINE_CLASS_COPY, 0 } } 232857772953STvrtko Ursulin * }; 232957772953STvrtko Ursulin * struct drm_i915_gem_context_create_ext_setparam p_engines = { 233057772953STvrtko Ursulin * .base = { 233157772953STvrtko Ursulin * .name = I915_CONTEXT_CREATE_EXT_SETPARAM, 233257772953STvrtko Ursulin * }, 233357772953STvrtko Ursulin * .param = { 233457772953STvrtko Ursulin * .param = I915_CONTEXT_PARAM_ENGINES, 233557772953STvrtko Ursulin * .value = to_user_pointer(&engines), 233657772953STvrtko Ursulin * .size = sizeof(engines), 233757772953STvrtko Ursulin * }, 233857772953STvrtko Ursulin * }; 233957772953STvrtko Ursulin * struct drm_i915_gem_context_create_ext create = { 234057772953STvrtko Ursulin * .flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS, 234157772953STvrtko Ursulin * .extensions = to_user_pointer(&p_engines); 234257772953STvrtko Ursulin * }; 234357772953STvrtko Ursulin * 234457772953STvrtko Ursulin * ctx_id = gem_context_create_ext(drm_fd, &create); 234557772953STvrtko Ursulin * 234657772953STvrtko Ursulin * // We have now created a GEM context with two engines in the map: 234757772953STvrtko Ursulin * // Index 0 points to rcs0 while index 1 points to bcs0. Other engines 234857772953STvrtko Ursulin * // will not be accessible from this context. 234957772953STvrtko Ursulin * 235057772953STvrtko Ursulin * ... 235157772953STvrtko Ursulin * execbuf.rsvd1 = ctx_id; 235257772953STvrtko Ursulin * execbuf.flags = 0; // Submits to index 0, which is rcs0 for this context 235357772953STvrtko Ursulin * gem_execbuf(drm_fd, &execbuf); 235457772953STvrtko Ursulin * 235557772953STvrtko Ursulin * ... 235657772953STvrtko Ursulin * execbuf.rsvd1 = ctx_id; 235757772953STvrtko Ursulin * execbuf.flags = 1; // Submits to index 0, which is bcs0 for this context 235857772953STvrtko Ursulin * gem_execbuf(drm_fd, &execbuf); 235957772953STvrtko Ursulin */ 236057772953STvrtko Ursulin 2361976b55f0SChris Wilson struct i915_context_param_engines { 2362976b55f0SChris Wilson __u64 extensions; /* linked chain of extension blocks, 0 terminates */ 23636d06779eSChris Wilson #define I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE 0 /* see i915_context_engines_load_balance */ 2364ee113690SChris Wilson #define I915_CONTEXT_ENGINES_EXT_BOND 1 /* see i915_context_engines_bond */ 2365e5e32171SMatthew Brost #define I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT 2 /* see i915_context_engines_parallel_submit */ 2366976b55f0SChris Wilson struct i915_engine_class_instance engines[0]; 2367976b55f0SChris Wilson } __attribute__((packed)); 2368976b55f0SChris Wilson 2369976b55f0SChris Wilson #define I915_DEFINE_CONTEXT_PARAM_ENGINES(name__, N__) struct { \ 2370976b55f0SChris Wilson __u64 extensions; \ 2371976b55f0SChris Wilson struct i915_engine_class_instance engines[N__]; \ 2372976b55f0SChris Wilson } __attribute__((packed)) name__ 2373976b55f0SChris Wilson 2374b9171541SChris Wilson struct drm_i915_gem_context_create_ext_setparam { 2375b9171541SChris Wilson #define I915_CONTEXT_CREATE_EXT_SETPARAM 0 2376b9171541SChris Wilson struct i915_user_extension base; 2377b9171541SChris Wilson struct drm_i915_gem_context_param param; 2378b9171541SChris Wilson }; 2379b9171541SChris Wilson 23804a766ae4SJason Ekstrand /* This API has been removed. On the off chance someone somewhere has 23814a766ae4SJason Ekstrand * attempted to use it, never re-use this extension number. 23824a766ae4SJason Ekstrand */ 2383b81dde71SChris Wilson #define I915_CONTEXT_CREATE_EXT_CLONE 1 2384b81dde71SChris Wilson 2385b9171541SChris Wilson struct drm_i915_gem_context_destroy { 2386b9171541SChris Wilson __u32 ctx_id; 2387b9171541SChris Wilson __u32 pad; 2388b9171541SChris Wilson }; 2389b9171541SChris Wilson 2390b9171541SChris Wilson /* 2391b9171541SChris Wilson * DRM_I915_GEM_VM_CREATE - 2392b9171541SChris Wilson * 2393b9171541SChris Wilson * Create a new virtual memory address space (ppGTT) for use within a context 2394b9171541SChris Wilson * on the same file. Extensions can be provided to configure exactly how the 2395b9171541SChris Wilson * address space is setup upon creation. 2396b9171541SChris Wilson * 2397b9171541SChris Wilson * The id of new VM (bound to the fd) for use with I915_CONTEXT_PARAM_VM is 2398b9171541SChris Wilson * returned in the outparam @id. 2399b9171541SChris Wilson * 2400b9171541SChris Wilson * No flags are defined, with all bits reserved and must be zero. 2401b9171541SChris Wilson * 2402b9171541SChris Wilson * An extension chain maybe provided, starting with @extensions, and terminated 2403b9171541SChris Wilson * by the @next_extension being 0. Currently, no extensions are defined. 2404b9171541SChris Wilson * 2405b9171541SChris Wilson * DRM_I915_GEM_VM_DESTROY - 2406b9171541SChris Wilson * 2407b9171541SChris Wilson * Destroys a previously created VM id, specified in @id. 2408b9171541SChris Wilson * 2409b9171541SChris Wilson * No extensions or flags are allowed currently, and so must be zero. 2410b9171541SChris Wilson */ 2411b9171541SChris Wilson struct drm_i915_gem_vm_control { 2412b9171541SChris Wilson __u64 extensions; 2413b9171541SChris Wilson __u32 flags; 2414b9171541SChris Wilson __u32 vm_id; 2415b9171541SChris Wilson }; 2416b9171541SChris Wilson 2417b9171541SChris Wilson struct drm_i915_reg_read { 2418b9171541SChris Wilson /* 2419b9171541SChris Wilson * Register offset. 2420b9171541SChris Wilson * For 64bit wide registers where the upper 32bits don't immediately 2421b9171541SChris Wilson * follow the lower 32bits, the offset of the lower 32bits must 2422b9171541SChris Wilson * be specified 2423b9171541SChris Wilson */ 2424b9171541SChris Wilson __u64 offset; 2425b9171541SChris Wilson #define I915_REG_READ_8B_WA (1ul << 0) 2426b9171541SChris Wilson 2427b9171541SChris Wilson __u64 val; /* Return value */ 2428b9171541SChris Wilson }; 2429b9171541SChris Wilson 2430b9171541SChris Wilson /* Known registers: 2431b9171541SChris Wilson * 2432b9171541SChris Wilson * Render engine timestamp - 0x2358 + 64bit - gen7+ 2433b9171541SChris Wilson * - Note this register returns an invalid value if using the default 2434b9171541SChris Wilson * single instruction 8byte read, in order to workaround that pass 2435b9171541SChris Wilson * flag I915_REG_READ_8B_WA in offset field. 2436b9171541SChris Wilson * 2437b9171541SChris Wilson */ 2438b9171541SChris Wilson 2439b9171541SChris Wilson struct drm_i915_reset_stats { 2440b9171541SChris Wilson __u32 ctx_id; 2441b9171541SChris Wilson __u32 flags; 2442b9171541SChris Wilson 2443b9171541SChris Wilson /* All resets since boot/module reload, for all contexts */ 2444b9171541SChris Wilson __u32 reset_count; 2445b9171541SChris Wilson 2446b9171541SChris Wilson /* Number of batches lost when active in GPU, for this context */ 2447b9171541SChris Wilson __u32 batch_active; 2448b9171541SChris Wilson 2449b9171541SChris Wilson /* Number of batches lost pending for execution, for this context */ 2450b9171541SChris Wilson __u32 batch_pending; 2451b9171541SChris Wilson 2452b9171541SChris Wilson __u32 pad; 2453b9171541SChris Wilson }; 2454b9171541SChris Wilson 2455aef7b67aSMatthew Auld /** 2456aef7b67aSMatthew Auld * struct drm_i915_gem_userptr - Create GEM object from user allocated memory. 2457aef7b67aSMatthew Auld * 2458aef7b67aSMatthew Auld * Userptr objects have several restrictions on what ioctls can be used with the 2459aef7b67aSMatthew Auld * object handle. 2460aef7b67aSMatthew Auld */ 2461b9171541SChris Wilson struct drm_i915_gem_userptr { 2462aef7b67aSMatthew Auld /** 2463aef7b67aSMatthew Auld * @user_ptr: The pointer to the allocated memory. 2464aef7b67aSMatthew Auld * 2465aef7b67aSMatthew Auld * Needs to be aligned to PAGE_SIZE. 2466aef7b67aSMatthew Auld */ 2467b9171541SChris Wilson __u64 user_ptr; 2468aef7b67aSMatthew Auld 2469aef7b67aSMatthew Auld /** 2470aef7b67aSMatthew Auld * @user_size: 2471aef7b67aSMatthew Auld * 2472aef7b67aSMatthew Auld * The size in bytes for the allocated memory. This will also become the 2473aef7b67aSMatthew Auld * object size. 2474aef7b67aSMatthew Auld * 2475aef7b67aSMatthew Auld * Needs to be aligned to PAGE_SIZE, and should be at least PAGE_SIZE, 2476aef7b67aSMatthew Auld * or larger. 2477aef7b67aSMatthew Auld */ 2478b9171541SChris Wilson __u64 user_size; 2479aef7b67aSMatthew Auld 2480aef7b67aSMatthew Auld /** 2481aef7b67aSMatthew Auld * @flags: 2482aef7b67aSMatthew Auld * 2483aef7b67aSMatthew Auld * Supported flags: 2484aef7b67aSMatthew Auld * 2485aef7b67aSMatthew Auld * I915_USERPTR_READ_ONLY: 2486aef7b67aSMatthew Auld * 2487aef7b67aSMatthew Auld * Mark the object as readonly, this also means GPU access can only be 2488aef7b67aSMatthew Auld * readonly. This is only supported on HW which supports readonly access 2489aef7b67aSMatthew Auld * through the GTT. If the HW can't support readonly access, an error is 2490aef7b67aSMatthew Auld * returned. 2491aef7b67aSMatthew Auld * 2492b65a9489SChris Wilson * I915_USERPTR_PROBE: 2493b65a9489SChris Wilson * 2494b65a9489SChris Wilson * Probe the provided @user_ptr range and validate that the @user_ptr is 2495b65a9489SChris Wilson * indeed pointing to normal memory and that the range is also valid. 2496b65a9489SChris Wilson * For example if some garbage address is given to the kernel, then this 2497b65a9489SChris Wilson * should complain. 2498b65a9489SChris Wilson * 2499b65a9489SChris Wilson * Returns -EFAULT if the probe failed. 2500b65a9489SChris Wilson * 2501b65a9489SChris Wilson * Note that this doesn't populate the backing pages, and also doesn't 2502b65a9489SChris Wilson * guarantee that the object will remain valid when the object is 2503b65a9489SChris Wilson * eventually used. 2504b65a9489SChris Wilson * 2505b65a9489SChris Wilson * The kernel supports this feature if I915_PARAM_HAS_USERPTR_PROBE 2506b65a9489SChris Wilson * returns a non-zero value. 2507b65a9489SChris Wilson * 2508aef7b67aSMatthew Auld * I915_USERPTR_UNSYNCHRONIZED: 2509aef7b67aSMatthew Auld * 2510aef7b67aSMatthew Auld * NOT USED. Setting this flag will result in an error. 2511aef7b67aSMatthew Auld */ 2512b9171541SChris Wilson __u32 flags; 2513b9171541SChris Wilson #define I915_USERPTR_READ_ONLY 0x1 2514b65a9489SChris Wilson #define I915_USERPTR_PROBE 0x2 2515b9171541SChris Wilson #define I915_USERPTR_UNSYNCHRONIZED 0x80000000 2516b9171541SChris Wilson /** 2517aef7b67aSMatthew Auld * @handle: Returned handle for the object. 2518b9171541SChris Wilson * 2519b9171541SChris Wilson * Object handles are nonzero. 2520b9171541SChris Wilson */ 2521b9171541SChris Wilson __u32 handle; 2522b9171541SChris Wilson }; 2523b9171541SChris Wilson 2524d7965152SRobert Bragg enum drm_i915_oa_format { 252519f81df2SRobert Bragg I915_OA_FORMAT_A13 = 1, /* HSW only */ 252619f81df2SRobert Bragg I915_OA_FORMAT_A29, /* HSW only */ 252719f81df2SRobert Bragg I915_OA_FORMAT_A13_B8_C8, /* HSW only */ 252819f81df2SRobert Bragg I915_OA_FORMAT_B4_C8, /* HSW only */ 252919f81df2SRobert Bragg I915_OA_FORMAT_A45_B8_C8, /* HSW only */ 253019f81df2SRobert Bragg I915_OA_FORMAT_B4_C8_A16, /* HSW only */ 253119f81df2SRobert Bragg I915_OA_FORMAT_C4_B8, /* HSW+ */ 253219f81df2SRobert Bragg 253319f81df2SRobert Bragg /* Gen8+ */ 253419f81df2SRobert Bragg I915_OA_FORMAT_A12, 253519f81df2SRobert Bragg I915_OA_FORMAT_A12_B8_C8, 253619f81df2SRobert Bragg I915_OA_FORMAT_A32u40_A4u32_B8_C8, 2537d7965152SRobert Bragg 2538d7965152SRobert Bragg I915_OA_FORMAT_MAX /* non-ABI */ 2539d7965152SRobert Bragg }; 2540d7965152SRobert Bragg 2541eec688e1SRobert Bragg enum drm_i915_perf_property_id { 2542eec688e1SRobert Bragg /** 2543eec688e1SRobert Bragg * Open the stream for a specific context handle (as used with 2544eec688e1SRobert Bragg * execbuffer2). A stream opened for a specific context this way 2545eec688e1SRobert Bragg * won't typically require root privileges. 2546b8d49f28SLionel Landwerlin * 2547b8d49f28SLionel Landwerlin * This property is available in perf revision 1. 2548eec688e1SRobert Bragg */ 2549eec688e1SRobert Bragg DRM_I915_PERF_PROP_CTX_HANDLE = 1, 2550eec688e1SRobert Bragg 2551d7965152SRobert Bragg /** 2552d7965152SRobert Bragg * A value of 1 requests the inclusion of raw OA unit reports as 2553d7965152SRobert Bragg * part of stream samples. 2554b8d49f28SLionel Landwerlin * 2555b8d49f28SLionel Landwerlin * This property is available in perf revision 1. 2556d7965152SRobert Bragg */ 2557d7965152SRobert Bragg DRM_I915_PERF_PROP_SAMPLE_OA, 2558d7965152SRobert Bragg 2559d7965152SRobert Bragg /** 2560d7965152SRobert Bragg * The value specifies which set of OA unit metrics should be 256166137f54SRandy Dunlap * configured, defining the contents of any OA unit reports. 2562b8d49f28SLionel Landwerlin * 2563b8d49f28SLionel Landwerlin * This property is available in perf revision 1. 2564d7965152SRobert Bragg */ 2565d7965152SRobert Bragg DRM_I915_PERF_PROP_OA_METRICS_SET, 2566d7965152SRobert Bragg 2567d7965152SRobert Bragg /** 2568d7965152SRobert Bragg * The value specifies the size and layout of OA unit reports. 2569b8d49f28SLionel Landwerlin * 2570b8d49f28SLionel Landwerlin * This property is available in perf revision 1. 2571d7965152SRobert Bragg */ 2572d7965152SRobert Bragg DRM_I915_PERF_PROP_OA_FORMAT, 2573d7965152SRobert Bragg 2574d7965152SRobert Bragg /** 2575d7965152SRobert Bragg * Specifying this property implicitly requests periodic OA unit 2576d7965152SRobert Bragg * sampling and (at least on Haswell) the sampling frequency is derived 2577d7965152SRobert Bragg * from this exponent as follows: 2578d7965152SRobert Bragg * 2579d7965152SRobert Bragg * 80ns * 2^(period_exponent + 1) 2580b8d49f28SLionel Landwerlin * 2581b8d49f28SLionel Landwerlin * This property is available in perf revision 1. 2582d7965152SRobert Bragg */ 2583d7965152SRobert Bragg DRM_I915_PERF_PROP_OA_EXPONENT, 2584d7965152SRobert Bragg 25859cd20ef7SLionel Landwerlin /** 25869cd20ef7SLionel Landwerlin * Specifying this property is only valid when specify a context to 25879cd20ef7SLionel Landwerlin * filter with DRM_I915_PERF_PROP_CTX_HANDLE. Specifying this property 25889cd20ef7SLionel Landwerlin * will hold preemption of the particular context we want to gather 25899cd20ef7SLionel Landwerlin * performance data about. The execbuf2 submissions must include a 25909cd20ef7SLionel Landwerlin * drm_i915_gem_execbuffer_ext_perf parameter for this to apply. 25919cd20ef7SLionel Landwerlin * 25929cd20ef7SLionel Landwerlin * This property is available in perf revision 3. 25939cd20ef7SLionel Landwerlin */ 25949cd20ef7SLionel Landwerlin DRM_I915_PERF_PROP_HOLD_PREEMPTION, 25959cd20ef7SLionel Landwerlin 259611ecbdddSLionel Landwerlin /** 259711ecbdddSLionel Landwerlin * Specifying this pins all contexts to the specified SSEU power 259811ecbdddSLionel Landwerlin * configuration for the duration of the recording. 259911ecbdddSLionel Landwerlin * 260011ecbdddSLionel Landwerlin * This parameter's value is a pointer to a struct 260111ecbdddSLionel Landwerlin * drm_i915_gem_context_param_sseu. 260211ecbdddSLionel Landwerlin * 260311ecbdddSLionel Landwerlin * This property is available in perf revision 4. 260411ecbdddSLionel Landwerlin */ 260511ecbdddSLionel Landwerlin DRM_I915_PERF_PROP_GLOBAL_SSEU, 260611ecbdddSLionel Landwerlin 26074ef10fe0SLionel Landwerlin /** 26084ef10fe0SLionel Landwerlin * This optional parameter specifies the timer interval in nanoseconds 26094ef10fe0SLionel Landwerlin * at which the i915 driver will check the OA buffer for available data. 26104ef10fe0SLionel Landwerlin * Minimum allowed value is 100 microseconds. A default value is used by 26114ef10fe0SLionel Landwerlin * the driver if this parameter is not specified. Note that larger timer 26124ef10fe0SLionel Landwerlin * values will reduce cpu consumption during OA perf captures. However, 26134ef10fe0SLionel Landwerlin * excessively large values would potentially result in OA buffer 26144ef10fe0SLionel Landwerlin * overwrites as captures reach end of the OA buffer. 26154ef10fe0SLionel Landwerlin * 26164ef10fe0SLionel Landwerlin * This property is available in perf revision 5. 26174ef10fe0SLionel Landwerlin */ 26184ef10fe0SLionel Landwerlin DRM_I915_PERF_PROP_POLL_OA_PERIOD, 26194ef10fe0SLionel Landwerlin 2620eec688e1SRobert Bragg DRM_I915_PERF_PROP_MAX /* non-ABI */ 2621eec688e1SRobert Bragg }; 2622eec688e1SRobert Bragg 2623eec688e1SRobert Bragg struct drm_i915_perf_open_param { 2624eec688e1SRobert Bragg __u32 flags; 2625eec688e1SRobert Bragg #define I915_PERF_FLAG_FD_CLOEXEC (1<<0) 2626eec688e1SRobert Bragg #define I915_PERF_FLAG_FD_NONBLOCK (1<<1) 2627eec688e1SRobert Bragg #define I915_PERF_FLAG_DISABLED (1<<2) 2628eec688e1SRobert Bragg 2629eec688e1SRobert Bragg /** The number of u64 (id, value) pairs */ 2630eec688e1SRobert Bragg __u32 num_properties; 2631eec688e1SRobert Bragg 2632eec688e1SRobert Bragg /** 2633eec688e1SRobert Bragg * Pointer to array of u64 (id, value) pairs configuring the stream 2634eec688e1SRobert Bragg * to open. 2635eec688e1SRobert Bragg */ 2636cd8bddc4SChris Wilson __u64 properties_ptr; 2637eec688e1SRobert Bragg }; 2638eec688e1SRobert Bragg 26392ef6a01fSMatthew Auld /* 2640d7965152SRobert Bragg * Enable data capture for a stream that was either opened in a disabled state 2641d7965152SRobert Bragg * via I915_PERF_FLAG_DISABLED or was later disabled via 2642d7965152SRobert Bragg * I915_PERF_IOCTL_DISABLE. 2643d7965152SRobert Bragg * 2644d7965152SRobert Bragg * It is intended to be cheaper to disable and enable a stream than it may be 2645d7965152SRobert Bragg * to close and re-open a stream with the same configuration. 2646d7965152SRobert Bragg * 2647d7965152SRobert Bragg * It's undefined whether any pending data for the stream will be lost. 2648b8d49f28SLionel Landwerlin * 2649b8d49f28SLionel Landwerlin * This ioctl is available in perf revision 1. 2650d7965152SRobert Bragg */ 2651eec688e1SRobert Bragg #define I915_PERF_IOCTL_ENABLE _IO('i', 0x0) 2652d7965152SRobert Bragg 26532ef6a01fSMatthew Auld /* 2654d7965152SRobert Bragg * Disable data capture for a stream. 2655d7965152SRobert Bragg * 2656d7965152SRobert Bragg * It is an error to try and read a stream that is disabled. 2657b8d49f28SLionel Landwerlin * 2658b8d49f28SLionel Landwerlin * This ioctl is available in perf revision 1. 2659d7965152SRobert Bragg */ 2660eec688e1SRobert Bragg #define I915_PERF_IOCTL_DISABLE _IO('i', 0x1) 2661eec688e1SRobert Bragg 26622ef6a01fSMatthew Auld /* 26637831e9a9SChris Wilson * Change metrics_set captured by a stream. 26647831e9a9SChris Wilson * 26657831e9a9SChris Wilson * If the stream is bound to a specific context, the configuration change 26667831e9a9SChris Wilson * will performed inline with that context such that it takes effect before 26677831e9a9SChris Wilson * the next execbuf submission. 26687831e9a9SChris Wilson * 26697831e9a9SChris Wilson * Returns the previously bound metrics set id, or a negative error code. 26707831e9a9SChris Wilson * 26717831e9a9SChris Wilson * This ioctl is available in perf revision 2. 26727831e9a9SChris Wilson */ 26737831e9a9SChris Wilson #define I915_PERF_IOCTL_CONFIG _IO('i', 0x2) 26747831e9a9SChris Wilson 26752ef6a01fSMatthew Auld /* 2676eec688e1SRobert Bragg * Common to all i915 perf records 2677eec688e1SRobert Bragg */ 2678eec688e1SRobert Bragg struct drm_i915_perf_record_header { 2679eec688e1SRobert Bragg __u32 type; 2680eec688e1SRobert Bragg __u16 pad; 2681eec688e1SRobert Bragg __u16 size; 2682eec688e1SRobert Bragg }; 2683eec688e1SRobert Bragg 2684eec688e1SRobert Bragg enum drm_i915_perf_record_type { 2685eec688e1SRobert Bragg 2686eec688e1SRobert Bragg /** 2687eec688e1SRobert Bragg * Samples are the work horse record type whose contents are extensible 2688eec688e1SRobert Bragg * and defined when opening an i915 perf stream based on the given 2689eec688e1SRobert Bragg * properties. 2690eec688e1SRobert Bragg * 2691eec688e1SRobert Bragg * Boolean properties following the naming convention 2692eec688e1SRobert Bragg * DRM_I915_PERF_SAMPLE_xyz_PROP request the inclusion of 'xyz' data in 2693eec688e1SRobert Bragg * every sample. 2694eec688e1SRobert Bragg * 2695eec688e1SRobert Bragg * The order of these sample properties given by userspace has no 2696d7965152SRobert Bragg * affect on the ordering of data within a sample. The order is 2697eec688e1SRobert Bragg * documented here. 2698eec688e1SRobert Bragg * 2699eec688e1SRobert Bragg * struct { 2700eec688e1SRobert Bragg * struct drm_i915_perf_record_header header; 2701eec688e1SRobert Bragg * 2702d7965152SRobert Bragg * { u32 oa_report[]; } && DRM_I915_PERF_PROP_SAMPLE_OA 2703eec688e1SRobert Bragg * }; 2704eec688e1SRobert Bragg */ 2705eec688e1SRobert Bragg DRM_I915_PERF_RECORD_SAMPLE = 1, 2706eec688e1SRobert Bragg 2707d7965152SRobert Bragg /* 2708d7965152SRobert Bragg * Indicates that one or more OA reports were not written by the 2709d7965152SRobert Bragg * hardware. This can happen for example if an MI_REPORT_PERF_COUNT 2710d7965152SRobert Bragg * command collides with periodic sampling - which would be more likely 2711d7965152SRobert Bragg * at higher sampling frequencies. 2712d7965152SRobert Bragg */ 2713d7965152SRobert Bragg DRM_I915_PERF_RECORD_OA_REPORT_LOST = 2, 2714d7965152SRobert Bragg 2715d7965152SRobert Bragg /** 2716d7965152SRobert Bragg * An error occurred that resulted in all pending OA reports being lost. 2717d7965152SRobert Bragg */ 2718d7965152SRobert Bragg DRM_I915_PERF_RECORD_OA_BUFFER_LOST = 3, 2719d7965152SRobert Bragg 2720eec688e1SRobert Bragg DRM_I915_PERF_RECORD_MAX /* non-ABI */ 2721eec688e1SRobert Bragg }; 2722eec688e1SRobert Bragg 2723a2e54026SMatt Roper /** 2724a2e54026SMatt Roper * struct drm_i915_perf_oa_config 2725a2e54026SMatt Roper * 2726f89823c2SLionel Landwerlin * Structure to upload perf dynamic configuration into the kernel. 2727f89823c2SLionel Landwerlin */ 2728f89823c2SLionel Landwerlin struct drm_i915_perf_oa_config { 2729a2e54026SMatt Roper /** 2730a2e54026SMatt Roper * @uuid: 2731a2e54026SMatt Roper * 2732a2e54026SMatt Roper * String formatted like "%\08x-%\04x-%\04x-%\04x-%\012x" 2733a2e54026SMatt Roper */ 2734f89823c2SLionel Landwerlin char uuid[36]; 2735f89823c2SLionel Landwerlin 2736a2e54026SMatt Roper /** 2737a2e54026SMatt Roper * @n_mux_regs: 2738a2e54026SMatt Roper * 2739a2e54026SMatt Roper * Number of mux regs in &mux_regs_ptr. 2740a2e54026SMatt Roper */ 2741f89823c2SLionel Landwerlin __u32 n_mux_regs; 2742a2e54026SMatt Roper 2743a2e54026SMatt Roper /** 2744a2e54026SMatt Roper * @n_boolean_regs: 2745a2e54026SMatt Roper * 2746a2e54026SMatt Roper * Number of boolean regs in &boolean_regs_ptr. 2747a2e54026SMatt Roper */ 2748f89823c2SLionel Landwerlin __u32 n_boolean_regs; 2749a2e54026SMatt Roper 2750a2e54026SMatt Roper /** 2751a2e54026SMatt Roper * @n_flex_regs: 2752a2e54026SMatt Roper * 2753a2e54026SMatt Roper * Number of flex regs in &flex_regs_ptr. 2754a2e54026SMatt Roper */ 2755f89823c2SLionel Landwerlin __u32 n_flex_regs; 2756f89823c2SLionel Landwerlin 2757a2e54026SMatt Roper /** 2758a2e54026SMatt Roper * @mux_regs_ptr: 2759a2e54026SMatt Roper * 2760a2e54026SMatt Roper * Pointer to tuples of u32 values (register address, value) for mux 2761a2e54026SMatt Roper * registers. Expected length of buffer is (2 * sizeof(u32) * 2762a2e54026SMatt Roper * &n_mux_regs). 2763ee427e25SLionel Landwerlin */ 276417ad4fddSChris Wilson __u64 mux_regs_ptr; 2765a2e54026SMatt Roper 2766a2e54026SMatt Roper /** 2767a2e54026SMatt Roper * @boolean_regs_ptr: 2768a2e54026SMatt Roper * 2769a2e54026SMatt Roper * Pointer to tuples of u32 values (register address, value) for mux 2770a2e54026SMatt Roper * registers. Expected length of buffer is (2 * sizeof(u32) * 2771a2e54026SMatt Roper * &n_boolean_regs). 2772a2e54026SMatt Roper */ 277317ad4fddSChris Wilson __u64 boolean_regs_ptr; 2774a2e54026SMatt Roper 2775a2e54026SMatt Roper /** 2776a2e54026SMatt Roper * @flex_regs_ptr: 2777a2e54026SMatt Roper * 2778a2e54026SMatt Roper * Pointer to tuples of u32 values (register address, value) for mux 2779a2e54026SMatt Roper * registers. Expected length of buffer is (2 * sizeof(u32) * 2780a2e54026SMatt Roper * &n_flex_regs). 2781a2e54026SMatt Roper */ 278217ad4fddSChris Wilson __u64 flex_regs_ptr; 2783f89823c2SLionel Landwerlin }; 2784f89823c2SLionel Landwerlin 2785e3bdccafSMatthew Auld /** 2786e3bdccafSMatthew Auld * struct drm_i915_query_item - An individual query for the kernel to process. 2787e3bdccafSMatthew Auld * 2788e3bdccafSMatthew Auld * The behaviour is determined by the @query_id. Note that exactly what 2789e3bdccafSMatthew Auld * @data_ptr is also depends on the specific @query_id. 2790e3bdccafSMatthew Auld */ 2791a446ae2cSLionel Landwerlin struct drm_i915_query_item { 27921c671ad7SMatt Roper /** 27931c671ad7SMatt Roper * @query_id: 27941c671ad7SMatt Roper * 27951c671ad7SMatt Roper * The id for this query. Currently accepted query IDs are: 27961c671ad7SMatt Roper * - %DRM_I915_QUERY_TOPOLOGY_INFO (see struct drm_i915_query_topology_info) 27971c671ad7SMatt Roper * - %DRM_I915_QUERY_ENGINE_INFO (see struct drm_i915_engine_info) 27981c671ad7SMatt Roper * - %DRM_I915_QUERY_PERF_CONFIG (see struct drm_i915_query_perf_config) 27991c671ad7SMatt Roper * - %DRM_I915_QUERY_MEMORY_REGIONS (see struct drm_i915_query_memory_regions) 28001c671ad7SMatt Roper * - %DRM_I915_QUERY_HWCONFIG_BLOB (see `GuC HWCONFIG blob uAPI`) 2801c94fde8fSMatt Atwood * - %DRM_I915_QUERY_GEOMETRY_SUBSLICES (see struct drm_i915_query_topology_info) 28021c671ad7SMatt Roper */ 2803a446ae2cSLionel Landwerlin __u64 query_id; 2804c822e059SLionel Landwerlin #define DRM_I915_QUERY_TOPOLOGY_INFO 1 2805c5d3e39cSTvrtko Ursulin #define DRM_I915_QUERY_ENGINE_INFO 2 28064f6ccc74SLionel Landwerlin #define DRM_I915_QUERY_PERF_CONFIG 3 280771021729SAbdiel Janulgue #define DRM_I915_QUERY_MEMORY_REGIONS 4 280878e1fb31SRodrigo Vivi #define DRM_I915_QUERY_HWCONFIG_BLOB 5 2809c94fde8fSMatt Atwood #define DRM_I915_QUERY_GEOMETRY_SUBSLICES 6 2810be03564bSChris Wilson /* Must be kept compact -- no holes and well documented */ 2811a446ae2cSLionel Landwerlin 2812e3bdccafSMatthew Auld /** 2813e3bdccafSMatthew Auld * @length: 2814e3bdccafSMatthew Auld * 2815a446ae2cSLionel Landwerlin * When set to zero by userspace, this is filled with the size of the 2816e3bdccafSMatthew Auld * data to be written at the @data_ptr pointer. The kernel sets this 2817a446ae2cSLionel Landwerlin * value to a negative value to signal an error on a particular query 2818a446ae2cSLionel Landwerlin * item. 2819a446ae2cSLionel Landwerlin */ 2820a446ae2cSLionel Landwerlin __s32 length; 2821a446ae2cSLionel Landwerlin 2822e3bdccafSMatthew Auld /** 2823e3bdccafSMatthew Auld * @flags: 2824e3bdccafSMatthew Auld * 28251c671ad7SMatt Roper * When &query_id == %DRM_I915_QUERY_TOPOLOGY_INFO, must be 0. 28264f6ccc74SLionel Landwerlin * 28271c671ad7SMatt Roper * When &query_id == %DRM_I915_QUERY_PERF_CONFIG, must be one of the 28284f6ccc74SLionel Landwerlin * following: 2829e3bdccafSMatthew Auld * 28301c671ad7SMatt Roper * - %DRM_I915_QUERY_PERF_CONFIG_LIST 28311c671ad7SMatt Roper * - %DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID 28321c671ad7SMatt Roper * - %DRM_I915_QUERY_PERF_CONFIG_FOR_UUID 2833c94fde8fSMatt Atwood * 2834c94fde8fSMatt Atwood * When &query_id == %DRM_I915_QUERY_GEOMETRY_SUBSLICES must contain 2835c94fde8fSMatt Atwood * a struct i915_engine_class_instance that references a render engine. 2836a446ae2cSLionel Landwerlin */ 2837a446ae2cSLionel Landwerlin __u32 flags; 28384f6ccc74SLionel Landwerlin #define DRM_I915_QUERY_PERF_CONFIG_LIST 1 28394f6ccc74SLionel Landwerlin #define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID 2 28404f6ccc74SLionel Landwerlin #define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID 3 2841a446ae2cSLionel Landwerlin 2842e3bdccafSMatthew Auld /** 2843e3bdccafSMatthew Auld * @data_ptr: 2844e3bdccafSMatthew Auld * 2845e3bdccafSMatthew Auld * Data will be written at the location pointed by @data_ptr when the 2846e3bdccafSMatthew Auld * value of @length matches the length of the data to be written by the 2847a446ae2cSLionel Landwerlin * kernel. 2848a446ae2cSLionel Landwerlin */ 2849a446ae2cSLionel Landwerlin __u64 data_ptr; 2850a446ae2cSLionel Landwerlin }; 2851a446ae2cSLionel Landwerlin 2852e3bdccafSMatthew Auld /** 2853e3bdccafSMatthew Auld * struct drm_i915_query - Supply an array of struct drm_i915_query_item for the 2854e3bdccafSMatthew Auld * kernel to fill out. 2855e3bdccafSMatthew Auld * 2856e3bdccafSMatthew Auld * Note that this is generally a two step process for each struct 2857e3bdccafSMatthew Auld * drm_i915_query_item in the array: 2858e3bdccafSMatthew Auld * 2859e3bdccafSMatthew Auld * 1. Call the DRM_IOCTL_I915_QUERY, giving it our array of struct 2860e3bdccafSMatthew Auld * drm_i915_query_item, with &drm_i915_query_item.length set to zero. The 2861e3bdccafSMatthew Auld * kernel will then fill in the size, in bytes, which tells userspace how 2862e3bdccafSMatthew Auld * memory it needs to allocate for the blob(say for an array of properties). 2863e3bdccafSMatthew Auld * 2864e3bdccafSMatthew Auld * 2. Next we call DRM_IOCTL_I915_QUERY again, this time with the 2865e3bdccafSMatthew Auld * &drm_i915_query_item.data_ptr equal to our newly allocated blob. Note that 2866e3bdccafSMatthew Auld * the &drm_i915_query_item.length should still be the same as what the 2867e3bdccafSMatthew Auld * kernel previously set. At this point the kernel can fill in the blob. 2868e3bdccafSMatthew Auld * 2869e3bdccafSMatthew Auld * Note that for some query items it can make sense for userspace to just pass 2870e3bdccafSMatthew Auld * in a buffer/blob equal to or larger than the required size. In this case only 2871e3bdccafSMatthew Auld * a single ioctl call is needed. For some smaller query items this can work 2872e3bdccafSMatthew Auld * quite well. 2873e3bdccafSMatthew Auld * 2874e3bdccafSMatthew Auld */ 2875a446ae2cSLionel Landwerlin struct drm_i915_query { 2876e3bdccafSMatthew Auld /** @num_items: The number of elements in the @items_ptr array */ 2877a446ae2cSLionel Landwerlin __u32 num_items; 2878a446ae2cSLionel Landwerlin 2879e3bdccafSMatthew Auld /** 2880e3bdccafSMatthew Auld * @flags: Unused for now. Must be cleared to zero. 2881a446ae2cSLionel Landwerlin */ 2882a446ae2cSLionel Landwerlin __u32 flags; 2883a446ae2cSLionel Landwerlin 2884e3bdccafSMatthew Auld /** 2885e3bdccafSMatthew Auld * @items_ptr: 2886e3bdccafSMatthew Auld * 2887e3bdccafSMatthew Auld * Pointer to an array of struct drm_i915_query_item. The number of 2888e3bdccafSMatthew Auld * array elements is @num_items. 2889a446ae2cSLionel Landwerlin */ 2890a446ae2cSLionel Landwerlin __u64 items_ptr; 2891a446ae2cSLionel Landwerlin }; 2892a446ae2cSLionel Landwerlin 2893462ac1cdSMatt Roper /** 2894462ac1cdSMatt Roper * struct drm_i915_query_topology_info 2895c822e059SLionel Landwerlin * 2896462ac1cdSMatt Roper * Describes slice/subslice/EU information queried by 2897462ac1cdSMatt Roper * %DRM_I915_QUERY_TOPOLOGY_INFO 2898c822e059SLionel Landwerlin */ 2899c822e059SLionel Landwerlin struct drm_i915_query_topology_info { 2900462ac1cdSMatt Roper /** 2901462ac1cdSMatt Roper * @flags: 2902462ac1cdSMatt Roper * 2903c822e059SLionel Landwerlin * Unused for now. Must be cleared to zero. 2904c822e059SLionel Landwerlin */ 2905c822e059SLionel Landwerlin __u16 flags; 2906c822e059SLionel Landwerlin 2907462ac1cdSMatt Roper /** 2908462ac1cdSMatt Roper * @max_slices: 2909462ac1cdSMatt Roper * 2910462ac1cdSMatt Roper * The number of bits used to express the slice mask. 2911462ac1cdSMatt Roper */ 2912c822e059SLionel Landwerlin __u16 max_slices; 2913462ac1cdSMatt Roper 2914462ac1cdSMatt Roper /** 2915462ac1cdSMatt Roper * @max_subslices: 2916462ac1cdSMatt Roper * 2917462ac1cdSMatt Roper * The number of bits used to express the subslice mask. 2918462ac1cdSMatt Roper */ 2919c822e059SLionel Landwerlin __u16 max_subslices; 2920462ac1cdSMatt Roper 2921462ac1cdSMatt Roper /** 2922462ac1cdSMatt Roper * @max_eus_per_subslice: 2923462ac1cdSMatt Roper * 2924462ac1cdSMatt Roper * The number of bits in the EU mask that correspond to a single 2925462ac1cdSMatt Roper * subslice's EUs. 2926462ac1cdSMatt Roper */ 2927c822e059SLionel Landwerlin __u16 max_eus_per_subslice; 2928c822e059SLionel Landwerlin 2929462ac1cdSMatt Roper /** 2930462ac1cdSMatt Roper * @subslice_offset: 2931462ac1cdSMatt Roper * 2932c822e059SLionel Landwerlin * Offset in data[] at which the subslice masks are stored. 2933c822e059SLionel Landwerlin */ 2934c822e059SLionel Landwerlin __u16 subslice_offset; 2935c822e059SLionel Landwerlin 2936462ac1cdSMatt Roper /** 2937462ac1cdSMatt Roper * @subslice_stride: 2938462ac1cdSMatt Roper * 2939c822e059SLionel Landwerlin * Stride at which each of the subslice masks for each slice are 2940c822e059SLionel Landwerlin * stored. 2941c822e059SLionel Landwerlin */ 2942c822e059SLionel Landwerlin __u16 subslice_stride; 2943c822e059SLionel Landwerlin 2944462ac1cdSMatt Roper /** 2945462ac1cdSMatt Roper * @eu_offset: 2946462ac1cdSMatt Roper * 2947c822e059SLionel Landwerlin * Offset in data[] at which the EU masks are stored. 2948c822e059SLionel Landwerlin */ 2949c822e059SLionel Landwerlin __u16 eu_offset; 2950c822e059SLionel Landwerlin 2951462ac1cdSMatt Roper /** 2952462ac1cdSMatt Roper * @eu_stride: 2953462ac1cdSMatt Roper * 2954c822e059SLionel Landwerlin * Stride at which each of the EU masks for each subslice are stored. 2955c822e059SLionel Landwerlin */ 2956c822e059SLionel Landwerlin __u16 eu_stride; 2957c822e059SLionel Landwerlin 2958462ac1cdSMatt Roper /** 2959462ac1cdSMatt Roper * @data: 2960462ac1cdSMatt Roper * 2961462ac1cdSMatt Roper * Contains 3 pieces of information : 2962462ac1cdSMatt Roper * 2963462ac1cdSMatt Roper * - The slice mask with one bit per slice telling whether a slice is 2964462ac1cdSMatt Roper * available. The availability of slice X can be queried with the 2965462ac1cdSMatt Roper * following formula : 2966462ac1cdSMatt Roper * 2967462ac1cdSMatt Roper * .. code:: c 2968462ac1cdSMatt Roper * 2969462ac1cdSMatt Roper * (data[X / 8] >> (X % 8)) & 1 2970462ac1cdSMatt Roper * 2971462ac1cdSMatt Roper * Starting with Xe_HP platforms, Intel hardware no longer has 2972462ac1cdSMatt Roper * traditional slices so i915 will always report a single slice 2973462ac1cdSMatt Roper * (hardcoded slicemask = 0x1) which contains all of the platform's 2974462ac1cdSMatt Roper * subslices. I.e., the mask here does not reflect any of the newer 2975462ac1cdSMatt Roper * hardware concepts such as "gslices" or "cslices" since userspace 2976462ac1cdSMatt Roper * is capable of inferring those from the subslice mask. 2977462ac1cdSMatt Roper * 2978462ac1cdSMatt Roper * - The subslice mask for each slice with one bit per subslice telling 2979462ac1cdSMatt Roper * whether a subslice is available. Starting with Gen12 we use the 2980462ac1cdSMatt Roper * term "subslice" to refer to what the hardware documentation 2981462ac1cdSMatt Roper * describes as a "dual-subslices." The availability of subslice Y 2982462ac1cdSMatt Roper * in slice X can be queried with the following formula : 2983462ac1cdSMatt Roper * 2984462ac1cdSMatt Roper * .. code:: c 2985462ac1cdSMatt Roper * 2986462ac1cdSMatt Roper * (data[subslice_offset + X * subslice_stride + Y / 8] >> (Y % 8)) & 1 2987462ac1cdSMatt Roper * 2988462ac1cdSMatt Roper * - The EU mask for each subslice in each slice, with one bit per EU 2989462ac1cdSMatt Roper * telling whether an EU is available. The availability of EU Z in 2990462ac1cdSMatt Roper * subslice Y in slice X can be queried with the following formula : 2991462ac1cdSMatt Roper * 2992462ac1cdSMatt Roper * .. code:: c 2993462ac1cdSMatt Roper * 2994462ac1cdSMatt Roper * (data[eu_offset + 2995462ac1cdSMatt Roper * (X * max_subslices + Y) * eu_stride + 2996462ac1cdSMatt Roper * Z / 8 2997462ac1cdSMatt Roper * ] >> (Z % 8)) & 1 2998462ac1cdSMatt Roper */ 2999c822e059SLionel Landwerlin __u8 data[]; 3000c822e059SLionel Landwerlin }; 3001c822e059SLionel Landwerlin 3002c5d3e39cSTvrtko Ursulin /** 300357772953STvrtko Ursulin * DOC: Engine Discovery uAPI 300457772953STvrtko Ursulin * 300557772953STvrtko Ursulin * Engine discovery uAPI is a way of enumerating physical engines present in a 300657772953STvrtko Ursulin * GPU associated with an open i915 DRM file descriptor. This supersedes the old 300757772953STvrtko Ursulin * way of using `DRM_IOCTL_I915_GETPARAM` and engine identifiers like 300857772953STvrtko Ursulin * `I915_PARAM_HAS_BLT`. 300957772953STvrtko Ursulin * 301057772953STvrtko Ursulin * The need for this interface came starting with Icelake and newer GPUs, which 301157772953STvrtko Ursulin * started to establish a pattern of having multiple engines of a same class, 301257772953STvrtko Ursulin * where not all instances were always completely functionally equivalent. 301357772953STvrtko Ursulin * 301457772953STvrtko Ursulin * Entry point for this uapi is `DRM_IOCTL_I915_QUERY` with the 301557772953STvrtko Ursulin * `DRM_I915_QUERY_ENGINE_INFO` as the queried item id. 301657772953STvrtko Ursulin * 301757772953STvrtko Ursulin * Example for getting the list of engines: 301857772953STvrtko Ursulin * 301957772953STvrtko Ursulin * .. code-block:: C 302057772953STvrtko Ursulin * 302157772953STvrtko Ursulin * struct drm_i915_query_engine_info *info; 302257772953STvrtko Ursulin * struct drm_i915_query_item item = { 302357772953STvrtko Ursulin * .query_id = DRM_I915_QUERY_ENGINE_INFO; 302457772953STvrtko Ursulin * }; 302557772953STvrtko Ursulin * struct drm_i915_query query = { 302657772953STvrtko Ursulin * .num_items = 1, 302757772953STvrtko Ursulin * .items_ptr = (uintptr_t)&item, 302857772953STvrtko Ursulin * }; 302957772953STvrtko Ursulin * int err, i; 303057772953STvrtko Ursulin * 303157772953STvrtko Ursulin * // First query the size of the blob we need, this needs to be large 303257772953STvrtko Ursulin * // enough to hold our array of engines. The kernel will fill out the 303357772953STvrtko Ursulin * // item.length for us, which is the number of bytes we need. 303457772953STvrtko Ursulin * // 303557772953STvrtko Ursulin * // Alternatively a large buffer can be allocated straight away enabling 303657772953STvrtko Ursulin * // querying in one pass, in which case item.length should contain the 303757772953STvrtko Ursulin * // length of the provided buffer. 303857772953STvrtko Ursulin * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query); 303957772953STvrtko Ursulin * if (err) ... 304057772953STvrtko Ursulin * 304157772953STvrtko Ursulin * info = calloc(1, item.length); 304257772953STvrtko Ursulin * // Now that we allocated the required number of bytes, we call the ioctl 304357772953STvrtko Ursulin * // again, this time with the data_ptr pointing to our newly allocated 304457772953STvrtko Ursulin * // blob, which the kernel can then populate with info on all engines. 304557772953STvrtko Ursulin * item.data_ptr = (uintptr_t)&info, 304657772953STvrtko Ursulin * 304757772953STvrtko Ursulin * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query); 304857772953STvrtko Ursulin * if (err) ... 304957772953STvrtko Ursulin * 305057772953STvrtko Ursulin * // We can now access each engine in the array 305157772953STvrtko Ursulin * for (i = 0; i < info->num_engines; i++) { 305257772953STvrtko Ursulin * struct drm_i915_engine_info einfo = info->engines[i]; 305357772953STvrtko Ursulin * u16 class = einfo.engine.class; 305457772953STvrtko Ursulin * u16 instance = einfo.engine.instance; 305557772953STvrtko Ursulin * .... 305657772953STvrtko Ursulin * } 305757772953STvrtko Ursulin * 305857772953STvrtko Ursulin * free(info); 305957772953STvrtko Ursulin * 306057772953STvrtko Ursulin * Each of the enumerated engines, apart from being defined by its class and 306157772953STvrtko Ursulin * instance (see `struct i915_engine_class_instance`), also can have flags and 306257772953STvrtko Ursulin * capabilities defined as documented in i915_drm.h. 306357772953STvrtko Ursulin * 306457772953STvrtko Ursulin * For instance video engines which support HEVC encoding will have the 306557772953STvrtko Ursulin * `I915_VIDEO_CLASS_CAPABILITY_HEVC` capability bit set. 306657772953STvrtko Ursulin * 306757772953STvrtko Ursulin * Engine discovery only fully comes to its own when combined with the new way 306857772953STvrtko Ursulin * of addressing engines when submitting batch buffers using contexts with 306957772953STvrtko Ursulin * engine maps configured. 307057772953STvrtko Ursulin */ 307157772953STvrtko Ursulin 307257772953STvrtko Ursulin /** 3073c5d3e39cSTvrtko Ursulin * struct drm_i915_engine_info 3074c5d3e39cSTvrtko Ursulin * 3075c5d3e39cSTvrtko Ursulin * Describes one engine and it's capabilities as known to the driver. 3076c5d3e39cSTvrtko Ursulin */ 3077c5d3e39cSTvrtko Ursulin struct drm_i915_engine_info { 30782ef6a01fSMatthew Auld /** @engine: Engine class and instance. */ 3079c5d3e39cSTvrtko Ursulin struct i915_engine_class_instance engine; 3080c5d3e39cSTvrtko Ursulin 30812ef6a01fSMatthew Auld /** @rsvd0: Reserved field. */ 3082c5d3e39cSTvrtko Ursulin __u32 rsvd0; 3083c5d3e39cSTvrtko Ursulin 30842ef6a01fSMatthew Auld /** @flags: Engine flags. */ 3085c5d3e39cSTvrtko Ursulin __u64 flags; 30869409eb35SMatthew Brost #define I915_ENGINE_INFO_HAS_LOGICAL_INSTANCE (1 << 0) 3087c5d3e39cSTvrtko Ursulin 30882ef6a01fSMatthew Auld /** @capabilities: Capabilities of this engine. */ 3089c5d3e39cSTvrtko Ursulin __u64 capabilities; 3090c5d3e39cSTvrtko Ursulin #define I915_VIDEO_CLASS_CAPABILITY_HEVC (1 << 0) 3091c5d3e39cSTvrtko Ursulin #define I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC (1 << 1) 3092c5d3e39cSTvrtko Ursulin 30939409eb35SMatthew Brost /** @logical_instance: Logical instance of engine */ 30949409eb35SMatthew Brost __u16 logical_instance; 30959409eb35SMatthew Brost 30962ef6a01fSMatthew Auld /** @rsvd1: Reserved fields. */ 30979409eb35SMatthew Brost __u16 rsvd1[3]; 30989409eb35SMatthew Brost /** @rsvd2: Reserved fields. */ 30999409eb35SMatthew Brost __u64 rsvd2[3]; 3100c5d3e39cSTvrtko Ursulin }; 3101c5d3e39cSTvrtko Ursulin 3102c5d3e39cSTvrtko Ursulin /** 3103c5d3e39cSTvrtko Ursulin * struct drm_i915_query_engine_info 3104c5d3e39cSTvrtko Ursulin * 3105c5d3e39cSTvrtko Ursulin * Engine info query enumerates all engines known to the driver by filling in 3106c5d3e39cSTvrtko Ursulin * an array of struct drm_i915_engine_info structures. 3107c5d3e39cSTvrtko Ursulin */ 3108c5d3e39cSTvrtko Ursulin struct drm_i915_query_engine_info { 31092ef6a01fSMatthew Auld /** @num_engines: Number of struct drm_i915_engine_info structs following. */ 3110c5d3e39cSTvrtko Ursulin __u32 num_engines; 3111c5d3e39cSTvrtko Ursulin 31122ef6a01fSMatthew Auld /** @rsvd: MBZ */ 3113c5d3e39cSTvrtko Ursulin __u32 rsvd[3]; 3114c5d3e39cSTvrtko Ursulin 31152ef6a01fSMatthew Auld /** @engines: Marker for drm_i915_engine_info structures. */ 3116c5d3e39cSTvrtko Ursulin struct drm_i915_engine_info engines[]; 3117c5d3e39cSTvrtko Ursulin }; 3118c5d3e39cSTvrtko Ursulin 3119a2e54026SMatt Roper /** 3120a2e54026SMatt Roper * struct drm_i915_query_perf_config 3121a2e54026SMatt Roper * 3122c94fde8fSMatt Atwood * Data written by the kernel with query %DRM_I915_QUERY_PERF_CONFIG and 3123c94fde8fSMatt Atwood * %DRM_I915_QUERY_GEOMETRY_SUBSLICES. 31244f6ccc74SLionel Landwerlin */ 31254f6ccc74SLionel Landwerlin struct drm_i915_query_perf_config { 31264f6ccc74SLionel Landwerlin union { 3127a2e54026SMatt Roper /** 3128a2e54026SMatt Roper * @n_configs: 3129a2e54026SMatt Roper * 3130a2e54026SMatt Roper * When &drm_i915_query_item.flags == 3131a2e54026SMatt Roper * %DRM_I915_QUERY_PERF_CONFIG_LIST, i915 sets this fields to 3132a2e54026SMatt Roper * the number of configurations available. 31334f6ccc74SLionel Landwerlin */ 31344f6ccc74SLionel Landwerlin __u64 n_configs; 31354f6ccc74SLionel Landwerlin 3136a2e54026SMatt Roper /** 3137a2e54026SMatt Roper * @config: 3138a2e54026SMatt Roper * 3139a2e54026SMatt Roper * When &drm_i915_query_item.flags == 3140a2e54026SMatt Roper * %DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID, i915 will use the 3141a2e54026SMatt Roper * value in this field as configuration identifier to decide 3142a2e54026SMatt Roper * what data to write into config_ptr. 31434f6ccc74SLionel Landwerlin */ 31444f6ccc74SLionel Landwerlin __u64 config; 31454f6ccc74SLionel Landwerlin 3146a2e54026SMatt Roper /** 3147a2e54026SMatt Roper * @uuid: 3148a2e54026SMatt Roper * 3149a2e54026SMatt Roper * When &drm_i915_query_item.flags == 3150a2e54026SMatt Roper * %DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID, i915 will use the 3151a2e54026SMatt Roper * value in this field as configuration identifier to decide 3152a2e54026SMatt Roper * what data to write into config_ptr. 31534f6ccc74SLionel Landwerlin * 31544f6ccc74SLionel Landwerlin * String formatted like "%08x-%04x-%04x-%04x-%012x" 31554f6ccc74SLionel Landwerlin */ 31564f6ccc74SLionel Landwerlin char uuid[36]; 31574f6ccc74SLionel Landwerlin }; 31584f6ccc74SLionel Landwerlin 3159a2e54026SMatt Roper /** 3160a2e54026SMatt Roper * @flags: 3161a2e54026SMatt Roper * 31624f6ccc74SLionel Landwerlin * Unused for now. Must be cleared to zero. 31634f6ccc74SLionel Landwerlin */ 31644f6ccc74SLionel Landwerlin __u32 flags; 31654f6ccc74SLionel Landwerlin 3166a2e54026SMatt Roper /** 3167a2e54026SMatt Roper * @data: 31684f6ccc74SLionel Landwerlin * 3169a2e54026SMatt Roper * When &drm_i915_query_item.flags == %DRM_I915_QUERY_PERF_CONFIG_LIST, 3170a2e54026SMatt Roper * i915 will write an array of __u64 of configuration identifiers. 3171a2e54026SMatt Roper * 3172a2e54026SMatt Roper * When &drm_i915_query_item.flags == %DRM_I915_QUERY_PERF_CONFIG_DATA, 3173a2e54026SMatt Roper * i915 will write a struct drm_i915_perf_oa_config. If the following 3174a2e54026SMatt Roper * fields of struct drm_i915_perf_oa_config are not set to 0, i915 will 3175a2e54026SMatt Roper * write into the associated pointers the values of submitted when the 31764f6ccc74SLionel Landwerlin * configuration was created : 31774f6ccc74SLionel Landwerlin * 3178a2e54026SMatt Roper * - &drm_i915_perf_oa_config.n_mux_regs 3179a2e54026SMatt Roper * - &drm_i915_perf_oa_config.n_boolean_regs 3180a2e54026SMatt Roper * - &drm_i915_perf_oa_config.n_flex_regs 31814f6ccc74SLionel Landwerlin */ 31824f6ccc74SLionel Landwerlin __u8 data[]; 31834f6ccc74SLionel Landwerlin }; 31844f6ccc74SLionel Landwerlin 318571021729SAbdiel Janulgue /** 318671021729SAbdiel Janulgue * enum drm_i915_gem_memory_class - Supported memory classes 318771021729SAbdiel Janulgue */ 318871021729SAbdiel Janulgue enum drm_i915_gem_memory_class { 318971021729SAbdiel Janulgue /** @I915_MEMORY_CLASS_SYSTEM: System memory */ 319071021729SAbdiel Janulgue I915_MEMORY_CLASS_SYSTEM = 0, 319171021729SAbdiel Janulgue /** @I915_MEMORY_CLASS_DEVICE: Device local-memory */ 319271021729SAbdiel Janulgue I915_MEMORY_CLASS_DEVICE, 319371021729SAbdiel Janulgue }; 319471021729SAbdiel Janulgue 319571021729SAbdiel Janulgue /** 319671021729SAbdiel Janulgue * struct drm_i915_gem_memory_class_instance - Identify particular memory region 319771021729SAbdiel Janulgue */ 319871021729SAbdiel Janulgue struct drm_i915_gem_memory_class_instance { 319971021729SAbdiel Janulgue /** @memory_class: See enum drm_i915_gem_memory_class */ 320071021729SAbdiel Janulgue __u16 memory_class; 320171021729SAbdiel Janulgue 320271021729SAbdiel Janulgue /** @memory_instance: Which instance */ 320371021729SAbdiel Janulgue __u16 memory_instance; 320471021729SAbdiel Janulgue }; 320571021729SAbdiel Janulgue 320671021729SAbdiel Janulgue /** 320771021729SAbdiel Janulgue * struct drm_i915_memory_region_info - Describes one region as known to the 320871021729SAbdiel Janulgue * driver. 320971021729SAbdiel Janulgue * 321071021729SAbdiel Janulgue * Note that we reserve some stuff here for potential future work. As an example 321171021729SAbdiel Janulgue * we might want expose the capabilities for a given region, which could include 321271021729SAbdiel Janulgue * things like if the region is CPU mappable/accessible, what are the supported 321371021729SAbdiel Janulgue * mapping types etc. 321471021729SAbdiel Janulgue * 321571021729SAbdiel Janulgue * Note that to extend struct drm_i915_memory_region_info and struct 321671021729SAbdiel Janulgue * drm_i915_query_memory_regions in the future the plan is to do the following: 321771021729SAbdiel Janulgue * 321871021729SAbdiel Janulgue * .. code-block:: C 321971021729SAbdiel Janulgue * 322071021729SAbdiel Janulgue * struct drm_i915_memory_region_info { 322171021729SAbdiel Janulgue * struct drm_i915_gem_memory_class_instance region; 322271021729SAbdiel Janulgue * union { 322371021729SAbdiel Janulgue * __u32 rsvd0; 322471021729SAbdiel Janulgue * __u32 new_thing1; 322571021729SAbdiel Janulgue * }; 322671021729SAbdiel Janulgue * ... 322771021729SAbdiel Janulgue * union { 322871021729SAbdiel Janulgue * __u64 rsvd1[8]; 322971021729SAbdiel Janulgue * struct { 323071021729SAbdiel Janulgue * __u64 new_thing2; 323171021729SAbdiel Janulgue * __u64 new_thing3; 323271021729SAbdiel Janulgue * ... 323371021729SAbdiel Janulgue * }; 323471021729SAbdiel Janulgue * }; 323571021729SAbdiel Janulgue * }; 323671021729SAbdiel Janulgue * 323771021729SAbdiel Janulgue * With this things should remain source compatible between versions for 323871021729SAbdiel Janulgue * userspace, even as we add new fields. 323971021729SAbdiel Janulgue * 324071021729SAbdiel Janulgue * Note this is using both struct drm_i915_query_item and struct drm_i915_query. 324171021729SAbdiel Janulgue * For this new query we are adding the new query id DRM_I915_QUERY_MEMORY_REGIONS 324271021729SAbdiel Janulgue * at &drm_i915_query_item.query_id. 324371021729SAbdiel Janulgue */ 324471021729SAbdiel Janulgue struct drm_i915_memory_region_info { 324571021729SAbdiel Janulgue /** @region: The class:instance pair encoding */ 324671021729SAbdiel Janulgue struct drm_i915_gem_memory_class_instance region; 324771021729SAbdiel Janulgue 324871021729SAbdiel Janulgue /** @rsvd0: MBZ */ 324971021729SAbdiel Janulgue __u32 rsvd0; 325071021729SAbdiel Janulgue 325171021729SAbdiel Janulgue /** @probed_size: Memory probed by the driver (-1 = unknown) */ 325271021729SAbdiel Janulgue __u64 probed_size; 325371021729SAbdiel Janulgue 325471021729SAbdiel Janulgue /** @unallocated_size: Estimate of memory remaining (-1 = unknown) */ 325571021729SAbdiel Janulgue __u64 unallocated_size; 325671021729SAbdiel Janulgue 325771021729SAbdiel Janulgue /** @rsvd1: MBZ */ 325871021729SAbdiel Janulgue __u64 rsvd1[8]; 325971021729SAbdiel Janulgue }; 326071021729SAbdiel Janulgue 326171021729SAbdiel Janulgue /** 326271021729SAbdiel Janulgue * struct drm_i915_query_memory_regions 326371021729SAbdiel Janulgue * 326471021729SAbdiel Janulgue * The region info query enumerates all regions known to the driver by filling 326571021729SAbdiel Janulgue * in an array of struct drm_i915_memory_region_info structures. 326671021729SAbdiel Janulgue * 326771021729SAbdiel Janulgue * Example for getting the list of supported regions: 326871021729SAbdiel Janulgue * 326971021729SAbdiel Janulgue * .. code-block:: C 327071021729SAbdiel Janulgue * 327171021729SAbdiel Janulgue * struct drm_i915_query_memory_regions *info; 327271021729SAbdiel Janulgue * struct drm_i915_query_item item = { 327371021729SAbdiel Janulgue * .query_id = DRM_I915_QUERY_MEMORY_REGIONS; 327471021729SAbdiel Janulgue * }; 327571021729SAbdiel Janulgue * struct drm_i915_query query = { 327671021729SAbdiel Janulgue * .num_items = 1, 327771021729SAbdiel Janulgue * .items_ptr = (uintptr_t)&item, 327871021729SAbdiel Janulgue * }; 327971021729SAbdiel Janulgue * int err, i; 328071021729SAbdiel Janulgue * 328171021729SAbdiel Janulgue * // First query the size of the blob we need, this needs to be large 328271021729SAbdiel Janulgue * // enough to hold our array of regions. The kernel will fill out the 328371021729SAbdiel Janulgue * // item.length for us, which is the number of bytes we need. 328471021729SAbdiel Janulgue * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query); 328571021729SAbdiel Janulgue * if (err) ... 328671021729SAbdiel Janulgue * 328771021729SAbdiel Janulgue * info = calloc(1, item.length); 328871021729SAbdiel Janulgue * // Now that we allocated the required number of bytes, we call the ioctl 328971021729SAbdiel Janulgue * // again, this time with the data_ptr pointing to our newly allocated 329071021729SAbdiel Janulgue * // blob, which the kernel can then populate with the all the region info. 329171021729SAbdiel Janulgue * item.data_ptr = (uintptr_t)&info, 329271021729SAbdiel Janulgue * 329371021729SAbdiel Janulgue * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query); 329471021729SAbdiel Janulgue * if (err) ... 329571021729SAbdiel Janulgue * 329671021729SAbdiel Janulgue * // We can now access each region in the array 329771021729SAbdiel Janulgue * for (i = 0; i < info->num_regions; i++) { 329871021729SAbdiel Janulgue * struct drm_i915_memory_region_info mr = info->regions[i]; 329971021729SAbdiel Janulgue * u16 class = mr.region.class; 330071021729SAbdiel Janulgue * u16 instance = mr.region.instance; 330171021729SAbdiel Janulgue * 330271021729SAbdiel Janulgue * .... 330371021729SAbdiel Janulgue * } 330471021729SAbdiel Janulgue * 330571021729SAbdiel Janulgue * free(info); 330671021729SAbdiel Janulgue */ 330771021729SAbdiel Janulgue struct drm_i915_query_memory_regions { 330871021729SAbdiel Janulgue /** @num_regions: Number of supported regions */ 330971021729SAbdiel Janulgue __u32 num_regions; 331071021729SAbdiel Janulgue 331171021729SAbdiel Janulgue /** @rsvd: MBZ */ 331271021729SAbdiel Janulgue __u32 rsvd[3]; 331371021729SAbdiel Janulgue 331471021729SAbdiel Janulgue /** @regions: Info about each supported region */ 331571021729SAbdiel Janulgue struct drm_i915_memory_region_info regions[]; 331671021729SAbdiel Janulgue }; 331771021729SAbdiel Janulgue 3318ebcb4029SMatthew Auld /** 3319034d47b2STvrtko Ursulin * DOC: GuC HWCONFIG blob uAPI 3320034d47b2STvrtko Ursulin * 3321034d47b2STvrtko Ursulin * The GuC produces a blob with information about the current device. 3322034d47b2STvrtko Ursulin * i915 reads this blob from GuC and makes it available via this uAPI. 3323034d47b2STvrtko Ursulin * 3324034d47b2STvrtko Ursulin * The format and meaning of the blob content are documented in the 3325034d47b2STvrtko Ursulin * Programmer's Reference Manual. 3326034d47b2STvrtko Ursulin */ 3327034d47b2STvrtko Ursulin 3328034d47b2STvrtko Ursulin /** 3329ebcb4029SMatthew Auld * struct drm_i915_gem_create_ext - Existing gem_create behaviour, with added 3330ebcb4029SMatthew Auld * extension support using struct i915_user_extension. 3331ebcb4029SMatthew Auld * 3332ebcb4029SMatthew Auld * Note that in the future we want to have our buffer flags here, at least for 3333ebcb4029SMatthew Auld * the stuff that is immutable. Previously we would have two ioctls, one to 3334ebcb4029SMatthew Auld * create the object with gem_create, and another to apply various parameters, 3335ebcb4029SMatthew Auld * however this creates some ambiguity for the params which are considered 3336ebcb4029SMatthew Auld * immutable. Also in general we're phasing out the various SET/GET ioctls. 3337ebcb4029SMatthew Auld */ 3338ebcb4029SMatthew Auld struct drm_i915_gem_create_ext { 3339ebcb4029SMatthew Auld /** 3340ebcb4029SMatthew Auld * @size: Requested size for the object. 3341ebcb4029SMatthew Auld * 3342ebcb4029SMatthew Auld * The (page-aligned) allocated size for the object will be returned. 3343ebcb4029SMatthew Auld * 3344caa574ffSMatthew Auld * 3345caa574ffSMatthew Auld * DG2 64K min page size implications: 3346caa574ffSMatthew Auld * 3347caa574ffSMatthew Auld * On discrete platforms, starting from DG2, we have to contend with GTT 3348caa574ffSMatthew Auld * page size restrictions when dealing with I915_MEMORY_CLASS_DEVICE 3349caa574ffSMatthew Auld * objects. Specifically the hardware only supports 64K or larger GTT 3350caa574ffSMatthew Auld * page sizes for such memory. The kernel will already ensure that all 3351caa574ffSMatthew Auld * I915_MEMORY_CLASS_DEVICE memory is allocated using 64K or larger page 3352caa574ffSMatthew Auld * sizes underneath. 3353caa574ffSMatthew Auld * 3354caa574ffSMatthew Auld * Note that the returned size here will always reflect any required 3355caa574ffSMatthew Auld * rounding up done by the kernel, i.e 4K will now become 64K on devices 3356caa574ffSMatthew Auld * such as DG2. 3357caa574ffSMatthew Auld * 3358caa574ffSMatthew Auld * Special DG2 GTT address alignment requirement: 3359caa574ffSMatthew Auld * 3360caa574ffSMatthew Auld * The GTT alignment will also need to be at least 2M for such objects. 3361caa574ffSMatthew Auld * 3362caa574ffSMatthew Auld * Note that due to how the hardware implements 64K GTT page support, we 3363caa574ffSMatthew Auld * have some further complications: 3364caa574ffSMatthew Auld * 3365caa574ffSMatthew Auld * 1) The entire PDE (which covers a 2MB virtual address range), must 3366caa574ffSMatthew Auld * contain only 64K PTEs, i.e mixing 4K and 64K PTEs in the same 3367caa574ffSMatthew Auld * PDE is forbidden by the hardware. 3368caa574ffSMatthew Auld * 3369caa574ffSMatthew Auld * 2) We still need to support 4K PTEs for I915_MEMORY_CLASS_SYSTEM 3370caa574ffSMatthew Auld * objects. 3371caa574ffSMatthew Auld * 3372caa574ffSMatthew Auld * To keep things simple for userland, we mandate that any GTT mappings 3373caa574ffSMatthew Auld * must be aligned to and rounded up to 2MB. The kernel will internally 3374caa574ffSMatthew Auld * pad them out to the next 2MB boundary. As this only wastes virtual 3375caa574ffSMatthew Auld * address space and avoids userland having to copy any needlessly 3376caa574ffSMatthew Auld * complicated PDE sharing scheme (coloring) and only affects DG2, this 3377caa574ffSMatthew Auld * is deemed to be a good compromise. 3378ebcb4029SMatthew Auld */ 3379ebcb4029SMatthew Auld __u64 size; 3380ebcb4029SMatthew Auld /** 3381ebcb4029SMatthew Auld * @handle: Returned handle for the object. 3382ebcb4029SMatthew Auld * 3383ebcb4029SMatthew Auld * Object handles are nonzero. 3384ebcb4029SMatthew Auld */ 3385ebcb4029SMatthew Auld __u32 handle; 3386ebcb4029SMatthew Auld /** @flags: MBZ */ 3387ebcb4029SMatthew Auld __u32 flags; 3388ebcb4029SMatthew Auld /** 3389ebcb4029SMatthew Auld * @extensions: The chain of extensions to apply to this object. 3390ebcb4029SMatthew Auld * 3391ebcb4029SMatthew Auld * This will be useful in the future when we need to support several 3392ebcb4029SMatthew Auld * different extensions, and we need to apply more than one when 3393ebcb4029SMatthew Auld * creating the object. See struct i915_user_extension. 3394ebcb4029SMatthew Auld * 3395ebcb4029SMatthew Auld * If we don't supply any extensions then we get the same old gem_create 3396ebcb4029SMatthew Auld * behaviour. 3397ebcb4029SMatthew Auld * 33982459e56fSMatthew Auld * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see 33992459e56fSMatthew Auld * struct drm_i915_gem_create_ext_memory_regions. 3400d3ac8d42SDaniele Ceraolo Spurio * 3401d3ac8d42SDaniele Ceraolo Spurio * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see 3402d3ac8d42SDaniele Ceraolo Spurio * struct drm_i915_gem_create_ext_protected_content. 3403ebcb4029SMatthew Auld */ 34042459e56fSMatthew Auld #define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0 3405d3ac8d42SDaniele Ceraolo Spurio #define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1 3406ebcb4029SMatthew Auld __u64 extensions; 3407ebcb4029SMatthew Auld }; 3408ebcb4029SMatthew Auld 34092459e56fSMatthew Auld /** 34102459e56fSMatthew Auld * struct drm_i915_gem_create_ext_memory_regions - The 34112459e56fSMatthew Auld * I915_GEM_CREATE_EXT_MEMORY_REGIONS extension. 34122459e56fSMatthew Auld * 34132459e56fSMatthew Auld * Set the object with the desired set of placements/regions in priority 34142459e56fSMatthew Auld * order. Each entry must be unique and supported by the device. 34152459e56fSMatthew Auld * 34162459e56fSMatthew Auld * This is provided as an array of struct drm_i915_gem_memory_class_instance, or 34172459e56fSMatthew Auld * an equivalent layout of class:instance pair encodings. See struct 34182459e56fSMatthew Auld * drm_i915_query_memory_regions and DRM_I915_QUERY_MEMORY_REGIONS for how to 34192459e56fSMatthew Auld * query the supported regions for a device. 34202459e56fSMatthew Auld * 34212459e56fSMatthew Auld * As an example, on discrete devices, if we wish to set the placement as 34222459e56fSMatthew Auld * device local-memory we can do something like: 34232459e56fSMatthew Auld * 34242459e56fSMatthew Auld * .. code-block:: C 34252459e56fSMatthew Auld * 34262459e56fSMatthew Auld * struct drm_i915_gem_memory_class_instance region_lmem = { 34272459e56fSMatthew Auld * .memory_class = I915_MEMORY_CLASS_DEVICE, 34282459e56fSMatthew Auld * .memory_instance = 0, 34292459e56fSMatthew Auld * }; 34302459e56fSMatthew Auld * struct drm_i915_gem_create_ext_memory_regions regions = { 34312459e56fSMatthew Auld * .base = { .name = I915_GEM_CREATE_EXT_MEMORY_REGIONS }, 34322459e56fSMatthew Auld * .regions = (uintptr_t)®ion_lmem, 34332459e56fSMatthew Auld * .num_regions = 1, 34342459e56fSMatthew Auld * }; 34352459e56fSMatthew Auld * struct drm_i915_gem_create_ext create_ext = { 34362459e56fSMatthew Auld * .size = 16 * PAGE_SIZE, 34372459e56fSMatthew Auld * .extensions = (uintptr_t)®ions, 34382459e56fSMatthew Auld * }; 34392459e56fSMatthew Auld * 34402459e56fSMatthew Auld * int err = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE_EXT, &create_ext); 34412459e56fSMatthew Auld * if (err) ... 34422459e56fSMatthew Auld * 34432459e56fSMatthew Auld * At which point we get the object handle in &drm_i915_gem_create_ext.handle, 34442459e56fSMatthew Auld * along with the final object size in &drm_i915_gem_create_ext.size, which 34452459e56fSMatthew Auld * should account for any rounding up, if required. 34462459e56fSMatthew Auld */ 34472459e56fSMatthew Auld struct drm_i915_gem_create_ext_memory_regions { 34482459e56fSMatthew Auld /** @base: Extension link. See struct i915_user_extension. */ 34492459e56fSMatthew Auld struct i915_user_extension base; 34502459e56fSMatthew Auld 34512459e56fSMatthew Auld /** @pad: MBZ */ 34522459e56fSMatthew Auld __u32 pad; 34532459e56fSMatthew Auld /** @num_regions: Number of elements in the @regions array. */ 34542459e56fSMatthew Auld __u32 num_regions; 34552459e56fSMatthew Auld /** 34562459e56fSMatthew Auld * @regions: The regions/placements array. 34572459e56fSMatthew Auld * 34582459e56fSMatthew Auld * An array of struct drm_i915_gem_memory_class_instance. 34592459e56fSMatthew Auld */ 34602459e56fSMatthew Auld __u64 regions; 34612459e56fSMatthew Auld }; 34622459e56fSMatthew Auld 3463d3ac8d42SDaniele Ceraolo Spurio /** 3464d3ac8d42SDaniele Ceraolo Spurio * struct drm_i915_gem_create_ext_protected_content - The 3465d3ac8d42SDaniele Ceraolo Spurio * I915_OBJECT_PARAM_PROTECTED_CONTENT extension. 3466d3ac8d42SDaniele Ceraolo Spurio * 3467d3ac8d42SDaniele Ceraolo Spurio * If this extension is provided, buffer contents are expected to be protected 3468d3ac8d42SDaniele Ceraolo Spurio * by PXP encryption and require decryption for scan out and processing. This 3469d3ac8d42SDaniele Ceraolo Spurio * is only possible on platforms that have PXP enabled, on all other scenarios 3470d3ac8d42SDaniele Ceraolo Spurio * using this extension will cause the ioctl to fail and return -ENODEV. The 3471d3ac8d42SDaniele Ceraolo Spurio * flags parameter is reserved for future expansion and must currently be set 3472d3ac8d42SDaniele Ceraolo Spurio * to zero. 3473d3ac8d42SDaniele Ceraolo Spurio * 3474d3ac8d42SDaniele Ceraolo Spurio * The buffer contents are considered invalid after a PXP session teardown. 3475d3ac8d42SDaniele Ceraolo Spurio * 3476d3ac8d42SDaniele Ceraolo Spurio * The encryption is guaranteed to be processed correctly only if the object 3477d3ac8d42SDaniele Ceraolo Spurio * is submitted with a context created using the 3478d3ac8d42SDaniele Ceraolo Spurio * I915_CONTEXT_PARAM_PROTECTED_CONTENT flag. This will also enable extra checks 3479d3ac8d42SDaniele Ceraolo Spurio * at submission time on the validity of the objects involved. 3480d3ac8d42SDaniele Ceraolo Spurio * 3481d3ac8d42SDaniele Ceraolo Spurio * Below is an example on how to create a protected object: 3482d3ac8d42SDaniele Ceraolo Spurio * 3483d3ac8d42SDaniele Ceraolo Spurio * .. code-block:: C 3484d3ac8d42SDaniele Ceraolo Spurio * 3485d3ac8d42SDaniele Ceraolo Spurio * struct drm_i915_gem_create_ext_protected_content protected_ext = { 3486d3ac8d42SDaniele Ceraolo Spurio * .base = { .name = I915_GEM_CREATE_EXT_PROTECTED_CONTENT }, 3487d3ac8d42SDaniele Ceraolo Spurio * .flags = 0, 3488d3ac8d42SDaniele Ceraolo Spurio * }; 3489d3ac8d42SDaniele Ceraolo Spurio * struct drm_i915_gem_create_ext create_ext = { 3490d3ac8d42SDaniele Ceraolo Spurio * .size = PAGE_SIZE, 3491d3ac8d42SDaniele Ceraolo Spurio * .extensions = (uintptr_t)&protected_ext, 3492d3ac8d42SDaniele Ceraolo Spurio * }; 3493d3ac8d42SDaniele Ceraolo Spurio * 3494d3ac8d42SDaniele Ceraolo Spurio * int err = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE_EXT, &create_ext); 3495d3ac8d42SDaniele Ceraolo Spurio * if (err) ... 3496d3ac8d42SDaniele Ceraolo Spurio */ 3497d3ac8d42SDaniele Ceraolo Spurio struct drm_i915_gem_create_ext_protected_content { 3498d3ac8d42SDaniele Ceraolo Spurio /** @base: Extension link. See struct i915_user_extension. */ 3499d3ac8d42SDaniele Ceraolo Spurio struct i915_user_extension base; 3500d3ac8d42SDaniele Ceraolo Spurio /** @flags: reserved for future usage, currently MBZ */ 3501d3ac8d42SDaniele Ceraolo Spurio __u32 flags; 3502d3ac8d42SDaniele Ceraolo Spurio }; 3503d3ac8d42SDaniele Ceraolo Spurio 3504cbbd3764SHuang, Sean Z /* ID of the protected content session managed by i915 when PXP is active */ 3505cbbd3764SHuang, Sean Z #define I915_PROTECTED_CONTENT_DEFAULT_SESSION 0xf 3506cbbd3764SHuang, Sean Z 3507b1c1f5c4SEmil Velikov #if defined(__cplusplus) 3508b1c1f5c4SEmil Velikov } 3509b1c1f5c4SEmil Velikov #endif 3510b1c1f5c4SEmil Velikov 3511718dceddSDavid Howells #endif /* _UAPI_I915_DRM_H_ */ 3512