1718dceddSDavid Howells /* 2718dceddSDavid Howells * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 3718dceddSDavid Howells * All Rights Reserved. 4718dceddSDavid Howells * 5718dceddSDavid Howells * Permission is hereby granted, free of charge, to any person obtaining a 6718dceddSDavid Howells * copy of this software and associated documentation files (the 7718dceddSDavid Howells * "Software"), to deal in the Software without restriction, including 8718dceddSDavid Howells * without limitation the rights to use, copy, modify, merge, publish, 9718dceddSDavid Howells * distribute, sub license, and/or sell copies of the Software, and to 10718dceddSDavid Howells * permit persons to whom the Software is furnished to do so, subject to 11718dceddSDavid Howells * the following conditions: 12718dceddSDavid Howells * 13718dceddSDavid Howells * The above copyright notice and this permission notice (including the 14718dceddSDavid Howells * next paragraph) shall be included in all copies or substantial portions 15718dceddSDavid Howells * of the Software. 16718dceddSDavid Howells * 17718dceddSDavid Howells * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 18718dceddSDavid Howells * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 19718dceddSDavid Howells * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 20718dceddSDavid Howells * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 21718dceddSDavid Howells * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 22718dceddSDavid Howells * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 23718dceddSDavid Howells * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 24718dceddSDavid Howells * 25718dceddSDavid Howells */ 26718dceddSDavid Howells 27718dceddSDavid Howells #ifndef _UAPI_I915_DRM_H_ 28718dceddSDavid Howells #define _UAPI_I915_DRM_H_ 29718dceddSDavid Howells 301049102fSGabriel Laskar #include "drm.h" 31718dceddSDavid Howells 32b1c1f5c4SEmil Velikov #if defined(__cplusplus) 33b1c1f5c4SEmil Velikov extern "C" { 34b1c1f5c4SEmil Velikov #endif 35b1c1f5c4SEmil Velikov 36718dceddSDavid Howells /* Please note that modifications to all structs defined here are 37718dceddSDavid Howells * subject to backwards-compatibility constraints. 38718dceddSDavid Howells */ 39718dceddSDavid Howells 40cce723edSBen Widawsky /** 41cce723edSBen Widawsky * DOC: uevents generated by i915 on it's device node 42cce723edSBen Widawsky * 43cce723edSBen Widawsky * I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch 44cce723edSBen Widawsky * event from the gpu l3 cache. Additional information supplied is ROW, 4535a85ac6SBen Widawsky * BANK, SUBBANK, SLICE of the affected cacheline. Userspace should keep 4635a85ac6SBen Widawsky * track of these events and if a specific cache-line seems to have a 4735a85ac6SBen Widawsky * persistent error remap it with the l3 remapping tool supplied in 4835a85ac6SBen Widawsky * intel-gpu-tools. The value supplied with the event is always 1. 49cce723edSBen Widawsky * 50cce723edSBen Widawsky * I915_ERROR_UEVENT - Generated upon error detection, currently only via 51cce723edSBen Widawsky * hangcheck. The error detection event is a good indicator of when things 52cce723edSBen Widawsky * began to go badly. The value supplied with the event is a 1 upon error 53cce723edSBen Widawsky * detection, and a 0 upon reset completion, signifying no more error 54cce723edSBen Widawsky * exists. NOTE: Disabling hangcheck or reset via module parameter will 55cce723edSBen Widawsky * cause the related events to not be seen. 56cce723edSBen Widawsky * 57cce723edSBen Widawsky * I915_RESET_UEVENT - Event is generated just before an attempt to reset the 5866137f54SRandy Dunlap * GPU. The value supplied with the event is always 1. NOTE: Disable 59cce723edSBen Widawsky * reset via module parameter will cause this event to not be seen. 60cce723edSBen Widawsky */ 61cce723edSBen Widawsky #define I915_L3_PARITY_UEVENT "L3_PARITY_ERROR" 62cce723edSBen Widawsky #define I915_ERROR_UEVENT "ERROR" 63cce723edSBen Widawsky #define I915_RESET_UEVENT "RESET" 64718dceddSDavid Howells 6519d053d4SMatthew Auld /** 6619d053d4SMatthew Auld * struct i915_user_extension - Base class for defining a chain of extensions 679d1305efSChris Wilson * 689d1305efSChris Wilson * Many interfaces need to grow over time. In most cases we can simply 699d1305efSChris Wilson * extend the struct and have userspace pass in more data. Another option, 709d1305efSChris Wilson * as demonstrated by Vulkan's approach to providing extensions for forward 719d1305efSChris Wilson * and backward compatibility, is to use a list of optional structs to 729d1305efSChris Wilson * provide those extra details. 739d1305efSChris Wilson * 749d1305efSChris Wilson * The key advantage to using an extension chain is that it allows us to 759d1305efSChris Wilson * redefine the interface more easily than an ever growing struct of 769d1305efSChris Wilson * increasing complexity, and for large parts of that interface to be 779d1305efSChris Wilson * entirely optional. The downside is more pointer chasing; chasing across 789d1305efSChris Wilson * the __user boundary with pointers encapsulated inside u64. 7919d053d4SMatthew Auld * 8019d053d4SMatthew Auld * Example chaining: 8119d053d4SMatthew Auld * 8219d053d4SMatthew Auld * .. code-block:: C 8319d053d4SMatthew Auld * 8419d053d4SMatthew Auld * struct i915_user_extension ext3 { 8519d053d4SMatthew Auld * .next_extension = 0, // end 8619d053d4SMatthew Auld * .name = ..., 8719d053d4SMatthew Auld * }; 8819d053d4SMatthew Auld * struct i915_user_extension ext2 { 8919d053d4SMatthew Auld * .next_extension = (uintptr_t)&ext3, 9019d053d4SMatthew Auld * .name = ..., 9119d053d4SMatthew Auld * }; 9219d053d4SMatthew Auld * struct i915_user_extension ext1 { 9319d053d4SMatthew Auld * .next_extension = (uintptr_t)&ext2, 9419d053d4SMatthew Auld * .name = ..., 9519d053d4SMatthew Auld * }; 9619d053d4SMatthew Auld * 9719d053d4SMatthew Auld * Typically the struct i915_user_extension would be embedded in some uAPI 9819d053d4SMatthew Auld * struct, and in this case we would feed it the head of the chain(i.e ext1), 9919d053d4SMatthew Auld * which would then apply all of the above extensions. 10019d053d4SMatthew Auld * 1019d1305efSChris Wilson */ 1029d1305efSChris Wilson struct i915_user_extension { 10319d053d4SMatthew Auld /** 10419d053d4SMatthew Auld * @next_extension: 10519d053d4SMatthew Auld * 10619d053d4SMatthew Auld * Pointer to the next struct i915_user_extension, or zero if the end. 10719d053d4SMatthew Auld */ 1089d1305efSChris Wilson __u64 next_extension; 10919d053d4SMatthew Auld /** 11019d053d4SMatthew Auld * @name: Name of the extension. 11119d053d4SMatthew Auld * 11219d053d4SMatthew Auld * Note that the name here is just some integer. 11319d053d4SMatthew Auld * 11419d053d4SMatthew Auld * Also note that the name space for this is not global for the whole 11519d053d4SMatthew Auld * driver, but rather its scope/meaning is limited to the specific piece 11619d053d4SMatthew Auld * of uAPI which has embedded the struct i915_user_extension. 11719d053d4SMatthew Auld */ 1189d1305efSChris Wilson __u32 name; 11919d053d4SMatthew Auld /** 12019d053d4SMatthew Auld * @flags: MBZ 12119d053d4SMatthew Auld * 12219d053d4SMatthew Auld * All undefined bits must be zero. 12319d053d4SMatthew Auld */ 12419d053d4SMatthew Auld __u32 flags; 12519d053d4SMatthew Auld /** 12619d053d4SMatthew Auld * @rsvd: MBZ 12719d053d4SMatthew Auld * 12819d053d4SMatthew Auld * Reserved for future use; must be zero. 12919d053d4SMatthew Auld */ 13019d053d4SMatthew Auld __u32 rsvd[4]; 1319d1305efSChris Wilson }; 1329d1305efSChris Wilson 1339d1305efSChris Wilson /* 1343373ce2eSImre Deak * MOCS indexes used for GPU surfaces, defining the cacheability of the 1353373ce2eSImre Deak * surface data and the coherency for this data wrt. CPU vs. GPU accesses. 1363373ce2eSImre Deak */ 1373373ce2eSImre Deak enum i915_mocs_table_index { 1383373ce2eSImre Deak /* 1393373ce2eSImre Deak * Not cached anywhere, coherency between CPU and GPU accesses is 1403373ce2eSImre Deak * guaranteed. 1413373ce2eSImre Deak */ 1423373ce2eSImre Deak I915_MOCS_UNCACHED, 1433373ce2eSImre Deak /* 1443373ce2eSImre Deak * Cacheability and coherency controlled by the kernel automatically 1453373ce2eSImre Deak * based on the DRM_I915_GEM_SET_CACHING IOCTL setting and the current 1463373ce2eSImre Deak * usage of the surface (used for display scanout or not). 1473373ce2eSImre Deak */ 1483373ce2eSImre Deak I915_MOCS_PTE, 1493373ce2eSImre Deak /* 1503373ce2eSImre Deak * Cached in all GPU caches available on the platform. 1513373ce2eSImre Deak * Coherency between CPU and GPU accesses to the surface is not 1523373ce2eSImre Deak * guaranteed without extra synchronization. 1533373ce2eSImre Deak */ 1543373ce2eSImre Deak I915_MOCS_CACHED, 1553373ce2eSImre Deak }; 1563373ce2eSImre Deak 157991b4de3SMatt Roper /** 158991b4de3SMatt Roper * enum drm_i915_gem_engine_class - uapi engine type enumeration 159991b4de3SMatt Roper * 1601803fcbcSTvrtko Ursulin * Different engines serve different roles, and there may be more than one 161991b4de3SMatt Roper * engine serving each role. This enum provides a classification of the role 162991b4de3SMatt Roper * of the engine, which may be used when requesting operations to be performed 163991b4de3SMatt Roper * on a certain subset of engines, or for providing information about that 164991b4de3SMatt Roper * group. 1651803fcbcSTvrtko Ursulin */ 1661803fcbcSTvrtko Ursulin enum drm_i915_gem_engine_class { 167991b4de3SMatt Roper /** 168991b4de3SMatt Roper * @I915_ENGINE_CLASS_RENDER: 169991b4de3SMatt Roper * 170991b4de3SMatt Roper * Render engines support instructions used for 3D, Compute (GPGPU), 171991b4de3SMatt Roper * and programmable media workloads. These instructions fetch data and 172991b4de3SMatt Roper * dispatch individual work items to threads that operate in parallel. 173991b4de3SMatt Roper * The threads run small programs (called "kernels" or "shaders") on 174991b4de3SMatt Roper * the GPU's execution units (EUs). 175991b4de3SMatt Roper */ 1761803fcbcSTvrtko Ursulin I915_ENGINE_CLASS_RENDER = 0, 177991b4de3SMatt Roper 178991b4de3SMatt Roper /** 179991b4de3SMatt Roper * @I915_ENGINE_CLASS_COPY: 180991b4de3SMatt Roper * 181991b4de3SMatt Roper * Copy engines (also referred to as "blitters") support instructions 182991b4de3SMatt Roper * that move blocks of data from one location in memory to another, 183991b4de3SMatt Roper * or that fill a specified location of memory with fixed data. 184991b4de3SMatt Roper * Copy engines can perform pre-defined logical or bitwise operations 185991b4de3SMatt Roper * on the source, destination, or pattern data. 186991b4de3SMatt Roper */ 1871803fcbcSTvrtko Ursulin I915_ENGINE_CLASS_COPY = 1, 188991b4de3SMatt Roper 189991b4de3SMatt Roper /** 190991b4de3SMatt Roper * @I915_ENGINE_CLASS_VIDEO: 191991b4de3SMatt Roper * 192991b4de3SMatt Roper * Video engines (also referred to as "bit stream decode" (BSD) or 193991b4de3SMatt Roper * "vdbox") support instructions that perform fixed-function media 194991b4de3SMatt Roper * decode and encode. 195991b4de3SMatt Roper */ 1961803fcbcSTvrtko Ursulin I915_ENGINE_CLASS_VIDEO = 2, 197991b4de3SMatt Roper 198991b4de3SMatt Roper /** 199991b4de3SMatt Roper * @I915_ENGINE_CLASS_VIDEO_ENHANCE: 200991b4de3SMatt Roper * 201991b4de3SMatt Roper * Video enhancement engines (also referred to as "vebox") support 202991b4de3SMatt Roper * instructions related to image enhancement. 203991b4de3SMatt Roper */ 2041803fcbcSTvrtko Ursulin I915_ENGINE_CLASS_VIDEO_ENHANCE = 3, 2051803fcbcSTvrtko Ursulin 206ecf8eca5SMatt Roper /** 207ecf8eca5SMatt Roper * @I915_ENGINE_CLASS_COMPUTE: 208ecf8eca5SMatt Roper * 209ecf8eca5SMatt Roper * Compute engines support a subset of the instructions available 210ecf8eca5SMatt Roper * on render engines: compute engines support Compute (GPGPU) and 211ecf8eca5SMatt Roper * programmable media workloads, but do not support the 3D pipeline. 212ecf8eca5SMatt Roper */ 213ecf8eca5SMatt Roper I915_ENGINE_CLASS_COMPUTE = 4, 214ecf8eca5SMatt Roper 215991b4de3SMatt Roper /* Values in this enum should be kept compact. */ 216be03564bSChris Wilson 217991b4de3SMatt Roper /** 218991b4de3SMatt Roper * @I915_ENGINE_CLASS_INVALID: 219991b4de3SMatt Roper * 220991b4de3SMatt Roper * Placeholder value to represent an invalid engine class assignment. 221991b4de3SMatt Roper */ 2221803fcbcSTvrtko Ursulin I915_ENGINE_CLASS_INVALID = -1 2231803fcbcSTvrtko Ursulin }; 2241803fcbcSTvrtko Ursulin 225c94fde8fSMatt Atwood /** 226c94fde8fSMatt Atwood * struct i915_engine_class_instance - Engine class/instance identifier 227c94fde8fSMatt Atwood * 228d1172ab3SChris Wilson * There may be more than one engine fulfilling any role within the system. 229d1172ab3SChris Wilson * Each engine of a class is given a unique instance number and therefore 230d1172ab3SChris Wilson * any engine can be specified by its class:instance tuplet. APIs that allow 231d1172ab3SChris Wilson * access to any engine in the system will use struct i915_engine_class_instance 232d1172ab3SChris Wilson * for this identification. 233d1172ab3SChris Wilson */ 234d1172ab3SChris Wilson struct i915_engine_class_instance { 235c94fde8fSMatt Atwood /** 236c94fde8fSMatt Atwood * @engine_class: 237c94fde8fSMatt Atwood * 238c94fde8fSMatt Atwood * Engine class from enum drm_i915_gem_engine_class 239c94fde8fSMatt Atwood */ 240c94fde8fSMatt Atwood __u16 engine_class; 241976b55f0SChris Wilson #define I915_ENGINE_CLASS_INVALID_NONE -1 2426d06779eSChris Wilson #define I915_ENGINE_CLASS_INVALID_VIRTUAL -2 243c94fde8fSMatt Atwood 244c94fde8fSMatt Atwood /** 245c94fde8fSMatt Atwood * @engine_instance: 246c94fde8fSMatt Atwood * 247c94fde8fSMatt Atwood * Engine instance. 248c94fde8fSMatt Atwood */ 249c94fde8fSMatt Atwood __u16 engine_instance; 250d1172ab3SChris Wilson }; 251d1172ab3SChris Wilson 252b46a33e2STvrtko Ursulin /** 253b46a33e2STvrtko Ursulin * DOC: perf_events exposed by i915 through /sys/bus/event_sources/drivers/i915 254b46a33e2STvrtko Ursulin * 255b46a33e2STvrtko Ursulin */ 256b46a33e2STvrtko Ursulin 257b46a33e2STvrtko Ursulin enum drm_i915_pmu_engine_sample { 258b46a33e2STvrtko Ursulin I915_SAMPLE_BUSY = 0, 259b46a33e2STvrtko Ursulin I915_SAMPLE_WAIT = 1, 260b552ae44STvrtko Ursulin I915_SAMPLE_SEMA = 2 261b46a33e2STvrtko Ursulin }; 262b46a33e2STvrtko Ursulin 263b46a33e2STvrtko Ursulin #define I915_PMU_SAMPLE_BITS (4) 264b46a33e2STvrtko Ursulin #define I915_PMU_SAMPLE_MASK (0xf) 265b46a33e2STvrtko Ursulin #define I915_PMU_SAMPLE_INSTANCE_BITS (8) 266b46a33e2STvrtko Ursulin #define I915_PMU_CLASS_SHIFT \ 267b46a33e2STvrtko Ursulin (I915_PMU_SAMPLE_BITS + I915_PMU_SAMPLE_INSTANCE_BITS) 268b46a33e2STvrtko Ursulin 269b46a33e2STvrtko Ursulin #define __I915_PMU_ENGINE(class, instance, sample) \ 270b46a33e2STvrtko Ursulin ((class) << I915_PMU_CLASS_SHIFT | \ 271b46a33e2STvrtko Ursulin (instance) << I915_PMU_SAMPLE_BITS | \ 272b46a33e2STvrtko Ursulin (sample)) 273b46a33e2STvrtko Ursulin 274b46a33e2STvrtko Ursulin #define I915_PMU_ENGINE_BUSY(class, instance) \ 275b46a33e2STvrtko Ursulin __I915_PMU_ENGINE(class, instance, I915_SAMPLE_BUSY) 276b46a33e2STvrtko Ursulin 277b46a33e2STvrtko Ursulin #define I915_PMU_ENGINE_WAIT(class, instance) \ 278b46a33e2STvrtko Ursulin __I915_PMU_ENGINE(class, instance, I915_SAMPLE_WAIT) 279b46a33e2STvrtko Ursulin 280b46a33e2STvrtko Ursulin #define I915_PMU_ENGINE_SEMA(class, instance) \ 281b46a33e2STvrtko Ursulin __I915_PMU_ENGINE(class, instance, I915_SAMPLE_SEMA) 282b46a33e2STvrtko Ursulin 283b46a33e2STvrtko Ursulin /* 284b46a33e2STvrtko Ursulin * Top 4 bits of every non-engine counter are GT id. 285b46a33e2STvrtko Ursulin */ 286b46a33e2STvrtko Ursulin #define __I915_PMU_GT_SHIFT (60) 2870cd4684dSTvrtko Ursulin 2886060b6aeSTvrtko Ursulin #define ___I915_PMU_OTHER(gt, x) \ 2898c3b1ba0SChris Wilson (((__u64)__I915_PMU_ENGINE(0xff, 0xff, 0xf) + 1 + (x)) | \ 2906060b6aeSTvrtko Ursulin ((__u64)(gt) << __I915_PMU_GT_SHIFT)) 291348fb0cbSTvrtko Ursulin 292b46a33e2STvrtko Ursulin #define __I915_PMU_OTHER(x) ___I915_PMU_OTHER(0, x) 293718dceddSDavid Howells 294718dceddSDavid Howells #define I915_PMU_ACTUAL_FREQUENCY __I915_PMU_OTHER(0) 295718dceddSDavid Howells #define I915_PMU_REQUESTED_FREQUENCY __I915_PMU_OTHER(1) 296718dceddSDavid Howells #define I915_PMU_INTERRUPTS __I915_PMU_OTHER(2) 297718dceddSDavid Howells #define I915_PMU_RC6_RESIDENCY __I915_PMU_OTHER(3) 298718dceddSDavid Howells #define I915_PMU_SOFTWARE_GT_AWAKE_TIME __I915_PMU_OTHER(4) 299718dceddSDavid Howells 300718dceddSDavid Howells #define I915_PMU_LAST /* Deprecated - do not use */ I915_PMU_RC6_RESIDENCY 301718dceddSDavid Howells 302718dceddSDavid Howells #define __I915_PMU_ACTUAL_FREQUENCY(gt) ___I915_PMU_OTHER(gt, 0) 303718dceddSDavid Howells #define __I915_PMU_REQUESTED_FREQUENCY(gt) ___I915_PMU_OTHER(gt, 1) 304718dceddSDavid Howells #define __I915_PMU_INTERRUPTS(gt) ___I915_PMU_OTHER(gt, 2) 305718dceddSDavid Howells #define __I915_PMU_RC6_RESIDENCY(gt) ___I915_PMU_OTHER(gt, 3) 306718dceddSDavid Howells #define __I915_PMU_SOFTWARE_GT_AWAKE_TIME(gt) ___I915_PMU_OTHER(gt, 4) 307718dceddSDavid Howells 308718dceddSDavid Howells /* Each region is a minimum of 16k, and there are at most 255 of them. 309718dceddSDavid Howells */ 310718dceddSDavid Howells #define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use 311718dceddSDavid Howells * of chars for next/prev indices */ 312718dceddSDavid Howells #define I915_LOG_MIN_TEX_REGION_SIZE 14 313718dceddSDavid Howells 314718dceddSDavid Howells typedef struct _drm_i915_init { 315718dceddSDavid Howells enum { 316718dceddSDavid Howells I915_INIT_DMA = 0x01, 317718dceddSDavid Howells I915_CLEANUP_DMA = 0x02, 318718dceddSDavid Howells I915_RESUME_DMA = 0x03 319718dceddSDavid Howells } func; 320718dceddSDavid Howells unsigned int mmio_offset; 321718dceddSDavid Howells int sarea_priv_offset; 322718dceddSDavid Howells unsigned int ring_start; 323718dceddSDavid Howells unsigned int ring_end; 324718dceddSDavid Howells unsigned int ring_size; 325718dceddSDavid Howells unsigned int front_offset; 326718dceddSDavid Howells unsigned int back_offset; 327718dceddSDavid Howells unsigned int depth_offset; 328718dceddSDavid Howells unsigned int w; 329718dceddSDavid Howells unsigned int h; 330718dceddSDavid Howells unsigned int pitch; 331718dceddSDavid Howells unsigned int pitch_bits; 332718dceddSDavid Howells unsigned int back_pitch; 333718dceddSDavid Howells unsigned int depth_pitch; 334718dceddSDavid Howells unsigned int cpp; 335718dceddSDavid Howells unsigned int chipset; 336718dceddSDavid Howells } drm_i915_init_t; 337718dceddSDavid Howells 338718dceddSDavid Howells typedef struct _drm_i915_sarea { 339718dceddSDavid Howells struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1]; 340718dceddSDavid Howells int last_upload; /* last time texture was uploaded */ 341718dceddSDavid Howells int last_enqueue; /* last time a buffer was enqueued */ 342718dceddSDavid Howells int last_dispatch; /* age of the most recently dispatched buffer */ 343718dceddSDavid Howells int ctxOwner; /* last context to upload state */ 344718dceddSDavid Howells int texAge; 345718dceddSDavid Howells int pf_enabled; /* is pageflipping allowed? */ 346718dceddSDavid Howells int pf_active; 347718dceddSDavid Howells int pf_current_page; /* which buffer is being displayed? */ 348718dceddSDavid Howells int perf_boxes; /* performance boxes to be displayed */ 349718dceddSDavid Howells int width, height; /* screen size in pixels */ 350718dceddSDavid Howells 351718dceddSDavid Howells drm_handle_t front_handle; 352718dceddSDavid Howells int front_offset; 353718dceddSDavid Howells int front_size; 354718dceddSDavid Howells 355718dceddSDavid Howells drm_handle_t back_handle; 356718dceddSDavid Howells int back_offset; 357718dceddSDavid Howells int back_size; 358718dceddSDavid Howells 359718dceddSDavid Howells drm_handle_t depth_handle; 360718dceddSDavid Howells int depth_offset; 361718dceddSDavid Howells int depth_size; 362718dceddSDavid Howells 363718dceddSDavid Howells drm_handle_t tex_handle; 364718dceddSDavid Howells int tex_offset; 365718dceddSDavid Howells int tex_size; 366718dceddSDavid Howells int log_tex_granularity; 367718dceddSDavid Howells int pitch; 368718dceddSDavid Howells int rotation; /* 0, 90, 180 or 270 */ 369718dceddSDavid Howells int rotated_offset; 370718dceddSDavid Howells int rotated_size; 371718dceddSDavid Howells int rotated_pitch; 372718dceddSDavid Howells int virtualX, virtualY; 373718dceddSDavid Howells 374718dceddSDavid Howells unsigned int front_tiled; 375718dceddSDavid Howells unsigned int back_tiled; 376718dceddSDavid Howells unsigned int depth_tiled; 377718dceddSDavid Howells unsigned int rotated_tiled; 378718dceddSDavid Howells unsigned int rotated2_tiled; 379718dceddSDavid Howells 380718dceddSDavid Howells int pipeA_x; 381718dceddSDavid Howells int pipeA_y; 382718dceddSDavid Howells int pipeA_w; 383718dceddSDavid Howells int pipeA_h; 384718dceddSDavid Howells int pipeB_x; 385718dceddSDavid Howells int pipeB_y; 386718dceddSDavid Howells int pipeB_w; 387718dceddSDavid Howells int pipeB_h; 388718dceddSDavid Howells 389718dceddSDavid Howells /* fill out some space for old userspace triple buffer */ 390718dceddSDavid Howells drm_handle_t unused_handle; 391718dceddSDavid Howells __u32 unused1, unused2, unused3; 392718dceddSDavid Howells 393718dceddSDavid Howells /* buffer object handles for static buffers. May change 394718dceddSDavid Howells * over the lifetime of the client. 395718dceddSDavid Howells */ 396718dceddSDavid Howells __u32 front_bo_handle; 397718dceddSDavid Howells __u32 back_bo_handle; 398718dceddSDavid Howells __u32 unused_bo_handle; 399718dceddSDavid Howells __u32 depth_bo_handle; 400718dceddSDavid Howells 401718dceddSDavid Howells } drm_i915_sarea_t; 402718dceddSDavid Howells 403718dceddSDavid Howells /* due to userspace building against these headers we need some compat here */ 404718dceddSDavid Howells #define planeA_x pipeA_x 405718dceddSDavid Howells #define planeA_y pipeA_y 40621631f10SDamien Lespiau #define planeA_w pipeA_w 40721631f10SDamien Lespiau #define planeA_h pipeA_h 40821631f10SDamien Lespiau #define planeB_x pipeB_x 40921631f10SDamien Lespiau #define planeB_y pipeB_y 41021631f10SDamien Lespiau #define planeB_w pipeB_w 41121631f10SDamien Lespiau #define planeB_h pipeB_h 412718dceddSDavid Howells 413718dceddSDavid Howells /* Flags for perf_boxes 414718dceddSDavid Howells */ 415718dceddSDavid Howells #define I915_BOX_RING_EMPTY 0x1 416718dceddSDavid Howells #define I915_BOX_FLIP 0x2 417718dceddSDavid Howells #define I915_BOX_WAIT 0x4 418718dceddSDavid Howells #define I915_BOX_TEXTURE_LOAD 0x8 419718dceddSDavid Howells #define I915_BOX_LOST_CONTEXT 0x10 420718dceddSDavid Howells 421718dceddSDavid Howells /* 422718dceddSDavid Howells * i915 specific ioctls. 423718dceddSDavid Howells * 424718dceddSDavid Howells * The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie 425718dceddSDavid Howells * [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset 426718dceddSDavid Howells * against DRM_COMMAND_BASE and should be between [0x0, 0x60). 427718dceddSDavid Howells */ 428718dceddSDavid Howells #define DRM_I915_INIT 0x00 429718dceddSDavid Howells #define DRM_I915_FLUSH 0x01 430718dceddSDavid Howells #define DRM_I915_FLIP 0x02 431718dceddSDavid Howells #define DRM_I915_BATCHBUFFER 0x03 432718dceddSDavid Howells #define DRM_I915_IRQ_EMIT 0x04 433718dceddSDavid Howells #define DRM_I915_IRQ_WAIT 0x05 434718dceddSDavid Howells #define DRM_I915_GETPARAM 0x06 435718dceddSDavid Howells #define DRM_I915_SETPARAM 0x07 436718dceddSDavid Howells #define DRM_I915_ALLOC 0x08 437718dceddSDavid Howells #define DRM_I915_FREE 0x09 438718dceddSDavid Howells #define DRM_I915_INIT_HEAP 0x0a 439718dceddSDavid Howells #define DRM_I915_CMDBUFFER 0x0b 440718dceddSDavid Howells #define DRM_I915_DESTROY_HEAP 0x0c 441718dceddSDavid Howells #define DRM_I915_SET_VBLANK_PIPE 0x0d 442718dceddSDavid Howells #define DRM_I915_GET_VBLANK_PIPE 0x0e 443718dceddSDavid Howells #define DRM_I915_VBLANK_SWAP 0x0f 444718dceddSDavid Howells #define DRM_I915_HWS_ADDR 0x11 445718dceddSDavid Howells #define DRM_I915_GEM_INIT 0x13 446718dceddSDavid Howells #define DRM_I915_GEM_EXECBUFFER 0x14 447718dceddSDavid Howells #define DRM_I915_GEM_PIN 0x15 448718dceddSDavid Howells #define DRM_I915_GEM_UNPIN 0x16 449718dceddSDavid Howells #define DRM_I915_GEM_BUSY 0x17 450718dceddSDavid Howells #define DRM_I915_GEM_THROTTLE 0x18 451718dceddSDavid Howells #define DRM_I915_GEM_ENTERVT 0x19 452718dceddSDavid Howells #define DRM_I915_GEM_LEAVEVT 0x1a 453fec0445cSChris Wilson #define DRM_I915_GEM_CREATE 0x1b 454718dceddSDavid Howells #define DRM_I915_GEM_PREAD 0x1c 455718dceddSDavid Howells #define DRM_I915_GEM_PWRITE 0x1d 456718dceddSDavid Howells #define DRM_I915_GEM_MMAP 0x1e 457718dceddSDavid Howells #define DRM_I915_GEM_SET_DOMAIN 0x1f 458718dceddSDavid Howells #define DRM_I915_GEM_SW_FINISH 0x20 459718dceddSDavid Howells #define DRM_I915_GEM_SET_TILING 0x21 460718dceddSDavid Howells #define DRM_I915_GEM_GET_TILING 0x22 461718dceddSDavid Howells #define DRM_I915_GEM_GET_APERTURE 0x23 462b6359918SMika Kuoppala #define DRM_I915_GEM_MMAP_GTT 0x24 4635cc9ed4bSChris Wilson #define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25 464c9dc0f35SChris Wilson #define DRM_I915_GEM_MADVISE 0x26 465c9dc0f35SChris Wilson #define DRM_I915_OVERLAY_PUT_IMAGE 0x27 466eec688e1SRobert Bragg #define DRM_I915_OVERLAY_ATTRS 0x28 467f89823c2SLionel Landwerlin #define DRM_I915_GEM_EXECBUFFER2 0x29 468f89823c2SLionel Landwerlin #define DRM_I915_GEM_EXECBUFFER2_WR DRM_I915_GEM_EXECBUFFER2 469a446ae2cSLionel Landwerlin #define DRM_I915_GET_SPRITE_COLORKEY 0x2a 4707f3f317aSChris Wilson #define DRM_I915_SET_SPRITE_COLORKEY 0x2b 4717f3f317aSChris Wilson #define DRM_I915_GEM_WAIT 0x2c 472ebcb4029SMatthew Auld #define DRM_I915_GEM_CONTEXT_CREATE 0x2d 473be03564bSChris Wilson #define DRM_I915_GEM_CONTEXT_DESTROY 0x2e 474718dceddSDavid Howells #define DRM_I915_GEM_SET_CACHING 0x2f 475718dceddSDavid Howells #define DRM_I915_GEM_GET_CACHING 0x30 476718dceddSDavid Howells #define DRM_I915_REG_READ 0x31 477718dceddSDavid Howells #define DRM_I915_GET_RESET_STATS 0x32 478718dceddSDavid Howells #define DRM_I915_GEM_USERPTR 0x33 479718dceddSDavid Howells #define DRM_I915_GEM_CONTEXT_GETPARAM 0x34 480718dceddSDavid Howells #define DRM_I915_GEM_CONTEXT_SETPARAM 0x35 481718dceddSDavid Howells #define DRM_I915_PERF_OPEN 0x36 482718dceddSDavid Howells #define DRM_I915_PERF_ADD_CONFIG 0x37 483718dceddSDavid Howells #define DRM_I915_PERF_REMOVE_CONFIG 0x38 484718dceddSDavid Howells #define DRM_I915_QUERY 0x39 485718dceddSDavid Howells #define DRM_I915_GEM_VM_CREATE 0x3a 486718dceddSDavid Howells #define DRM_I915_GEM_VM_DESTROY 0x3b 487718dceddSDavid Howells #define DRM_I915_GEM_CREATE_EXT 0x3c 488718dceddSDavid Howells /* Must be kept compact -- no holes */ 489718dceddSDavid Howells 490718dceddSDavid Howells #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) 491718dceddSDavid Howells #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) 492718dceddSDavid Howells #define DRM_IOCTL_I915_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP) 493718dceddSDavid Howells #define DRM_IOCTL_I915_BATCHBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t) 494718dceddSDavid Howells #define DRM_IOCTL_I915_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t) 495fec0445cSChris Wilson #define DRM_IOCTL_I915_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t) 496718dceddSDavid Howells #define DRM_IOCTL_I915_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t) 497718dceddSDavid Howells #define DRM_IOCTL_I915_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t) 498718dceddSDavid Howells #define DRM_IOCTL_I915_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t) 499718dceddSDavid Howells #define DRM_IOCTL_I915_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t) 500718dceddSDavid Howells #define DRM_IOCTL_I915_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t) 501718dceddSDavid Howells #define DRM_IOCTL_I915_CMDBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t) 502718dceddSDavid Howells #define DRM_IOCTL_I915_DESTROY_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t) 503718dceddSDavid Howells #define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t) 504718dceddSDavid Howells #define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t) 505ebcb4029SMatthew Auld #define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t) 506718dceddSDavid Howells #define DRM_IOCTL_I915_HWS_ADDR DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init) 507718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init) 508718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer) 509718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2) 510cc662126SAbdiel Janulgue #define DRM_IOCTL_I915_GEM_EXECBUFFER2_WR DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2_WR, struct drm_i915_gem_execbuffer2) 511718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin) 512718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin) 513718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy) 514718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_SET_CACHING DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHING, struct drm_i915_gem_caching) 515718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_GET_CACHING DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHING, struct drm_i915_gem_caching) 516718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE) 517718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT) 518718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT) 519718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create) 520718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_CREATE_EXT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE_EXT, struct drm_i915_gem_create_ext) 5212c60fae1STommi Rantala #define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread) 522718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite) 523718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap) 524b9171541SChris Wilson #define DRM_IOCTL_I915_GEM_MMAP_GTT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt) 525718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_MMAP_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_offset) 526718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain) 527b6359918SMika Kuoppala #define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish) 5285cc9ed4bSChris Wilson #define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling) 529c9dc0f35SChris Wilson #define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling) 530c9dc0f35SChris Wilson #define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture) 531eec688e1SRobert Bragg #define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id) 532f89823c2SLionel Landwerlin #define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise) 533f89823c2SLionel Landwerlin #define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image) 534a446ae2cSLionel Landwerlin #define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs) 5357f3f317aSChris Wilson #define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey) 5367f3f317aSChris Wilson #define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey) 537718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait) 538718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create) 539718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_CONTEXT_CREATE_EXT DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create_ext) 540718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy) 541718dceddSDavid Howells #define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read) 542718dceddSDavid Howells #define DRM_IOCTL_I915_GET_RESET_STATS DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats) 543718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_USERPTR DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr) 544718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param) 545718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param) 546718dceddSDavid Howells #define DRM_IOCTL_I915_PERF_OPEN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param) 547718dceddSDavid Howells #define DRM_IOCTL_I915_PERF_ADD_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_ADD_CONFIG, struct drm_i915_perf_oa_config) 548718dceddSDavid Howells #define DRM_IOCTL_I915_PERF_REMOVE_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_REMOVE_CONFIG, __u64) 549718dceddSDavid Howells #define DRM_IOCTL_I915_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_QUERY, struct drm_i915_query) 550718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_VM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_CREATE, struct drm_i915_gem_vm_control) 551718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_VM_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_VM_DESTROY, struct drm_i915_gem_vm_control) 552718dceddSDavid Howells 553718dceddSDavid Howells /* Allow drivers to submit batchbuffers directly to hardware, relying 554718dceddSDavid Howells * on the security mechanisms provided by hardware. 555718dceddSDavid Howells */ 556718dceddSDavid Howells typedef struct drm_i915_batchbuffer { 557718dceddSDavid Howells int start; /* agp offset */ 558718dceddSDavid Howells int used; /* nr bytes in use */ 559718dceddSDavid Howells int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */ 560718dceddSDavid Howells int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */ 561718dceddSDavid Howells int num_cliprects; /* mulitpass with multiple cliprects? */ 562718dceddSDavid Howells struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */ 563718dceddSDavid Howells } drm_i915_batchbuffer_t; 564718dceddSDavid Howells 565718dceddSDavid Howells /* As above, but pass a pointer to userspace buffer which can be 566718dceddSDavid Howells * validated by the kernel prior to sending to hardware. 567718dceddSDavid Howells */ 568718dceddSDavid Howells typedef struct _drm_i915_cmdbuffer { 569718dceddSDavid Howells char __user *buf; /* pointer to userspace command buffer */ 570718dceddSDavid Howells int sz; /* nr bytes in buf */ 571718dceddSDavid Howells int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */ 5724bdafb9dSChris Wilson int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */ 5734bdafb9dSChris Wilson int num_cliprects; /* mulitpass with multiple cliprects? */ 5744bdafb9dSChris Wilson struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */ 5754bdafb9dSChris Wilson } drm_i915_cmdbuffer_t; 5764bdafb9dSChris Wilson 5774bdafb9dSChris Wilson /* Userspace can request & wait on irq's: 5784bdafb9dSChris Wilson */ 5794bdafb9dSChris Wilson typedef struct drm_i915_irq_emit { 580718dceddSDavid Howells int __user *irq_seq; 581718dceddSDavid Howells } drm_i915_irq_emit_t; 582718dceddSDavid Howells 583718dceddSDavid Howells typedef struct drm_i915_irq_wait { 584718dceddSDavid Howells int irq_seq; 585718dceddSDavid Howells } drm_i915_irq_wait_t; 586718dceddSDavid Howells 587718dceddSDavid Howells /* 588718dceddSDavid Howells * Different modes of per-process Graphics Translation Table, 589718dceddSDavid Howells * see I915_PARAM_HAS_ALIASING_PPGTT 590718dceddSDavid Howells */ 591718dceddSDavid Howells #define I915_GEM_PPGTT_NONE 0 592718dceddSDavid Howells #define I915_GEM_PPGTT_ALIASING 1 593718dceddSDavid Howells #define I915_GEM_PPGTT_FULL 2 594718dceddSDavid Howells 595718dceddSDavid Howells /* Ioctl to query kernel params: 596718dceddSDavid Howells */ 597718dceddSDavid Howells #define I915_PARAM_IRQ_ACTIVE 1 598718dceddSDavid Howells #define I915_PARAM_ALLOW_BATCHBUFFER 2 599718dceddSDavid Howells #define I915_PARAM_LAST_DISPATCH 3 600718dceddSDavid Howells #define I915_PARAM_CHIPSET_ID 4 601718dceddSDavid Howells #define I915_PARAM_HAS_GEM 5 602718dceddSDavid Howells #define I915_PARAM_NUM_FENCES_AVAIL 6 603a1f2cc73SXiang, Haihao #define I915_PARAM_HAS_OVERLAY 7 604c2fb7916SDaniel Vetter #define I915_PARAM_HAS_PAGEFLIPPING 8 605b45305fcSDaniel Vetter #define I915_PARAM_HAS_EXECBUF2 9 606ed5982e6SDaniel Vetter #define I915_PARAM_HAS_BSD 10 607eef90ccbSChris Wilson #define I915_PARAM_HAS_BLT 11 608651d794fSChris Wilson #define I915_PARAM_HAS_RELAXED_FENCING 12 609d728c8efSBrad Volkin #define I915_PARAM_HAS_COHERENT_RINGS 13 6106a2c4232SChris Wilson #define I915_PARAM_HAS_EXEC_CONSTANTS 14 6111816f923SAkash Goel #define I915_PARAM_HAS_RELAXED_DELTA 15 61208e16dc8SZhipeng Gong #define I915_PARAM_HAS_GEN7_SOL_RESET 16 61327cd4461SNeil Roberts #define I915_PARAM_HAS_LLC 17 614a1559ffeSJeff McGee #define I915_PARAM_HAS_ALIASING_PPGTT 18 615a1559ffeSJeff McGee #define I915_PARAM_HAS_WAIT_TIMEOUT 19 61649e4d842SChris Wilson #define I915_PARAM_HAS_SEMAPHORES 20 617a9ed33caSAbdiel Janulgue #define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21 618506a8e87SChris Wilson #define I915_PARAM_HAS_VEBOX 22 61937f501afSarun.siluvery@linux.intel.com #define I915_PARAM_HAS_SECURE_BATCHES 23 62037f501afSarun.siluvery@linux.intel.com #define I915_PARAM_HAS_PINNED_BATCHES 24 6214cc69075SChris Wilson #define I915_PARAM_HAS_EXEC_NO_RELOC 25 622718dceddSDavid Howells #define I915_PARAM_HAS_EXEC_HANDLE_LUT 26 623bf64e0b0SChris Wilson #define I915_PARAM_HAS_WT 27 624bf64e0b0SChris Wilson #define I915_PARAM_CMD_PARSER_VERSION 28 6250de9136dSChris Wilson #define I915_PARAM_HAS_COHERENT_PHYS_GTT 29 626bf64e0b0SChris Wilson #define I915_PARAM_MMAP_VERSION 30 627bf64e0b0SChris Wilson #define I915_PARAM_HAS_BSD2 31 628ac14fbd4SChris Wilson #define I915_PARAM_REVISION 32 629ac14fbd4SChris Wilson #define I915_PARAM_SUBSLICE_TOTAL 33 630ac14fbd4SChris Wilson #define I915_PARAM_EU_TOTAL 34 6310de9136dSChris Wilson #define I915_PARAM_HAS_GPU_RESET 35 6320de9136dSChris Wilson #define I915_PARAM_HAS_RESOURCE_STREAMER 36 633bf64e0b0SChris Wilson #define I915_PARAM_HAS_EXEC_SOFTPIN 37 634bf64e0b0SChris Wilson #define I915_PARAM_HAS_POOLED_EU 38 635bf64e0b0SChris Wilson #define I915_PARAM_MIN_EU_IN_POOL 39 636e8861964SChris Wilson #define I915_PARAM_MMAP_GTT_VERSION 40 637bf73fc0fSChris Wilson 638ee242ca7SMatthew Brost /* 639ee242ca7SMatthew Brost * Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution 640ee242ca7SMatthew Brost * priorities and the driver will attempt to execute batches in priority order. 641ee242ca7SMatthew Brost * The param returns a capability bitmask, nonzero implies that the scheduler 642ee242ca7SMatthew Brost * is enabled, with different features present according to the mask. 643ee242ca7SMatthew Brost * 644ee242ca7SMatthew Brost * The initial priority for each batch is supplied by the context and is 645ee242ca7SMatthew Brost * controlled via I915_CONTEXT_PARAM_PRIORITY. 646ee242ca7SMatthew Brost */ 647bf64e0b0SChris Wilson #define I915_PARAM_HAS_SCHEDULER 41 648b76c14c8SDaniele Ceraolo Spurio #define I915_SCHEDULER_CAP_ENABLED (1ul << 0) 649b76c14c8SDaniele Ceraolo Spurio #define I915_SCHEDULER_CAP_PRIORITY (1ul << 1) 650b76c14c8SDaniele Ceraolo Spurio #define I915_SCHEDULER_CAP_PREEMPTION (1ul << 2) 651b76c14c8SDaniele Ceraolo Spurio #define I915_SCHEDULER_CAP_SEMAPHORES (1ul << 3) 652b76c14c8SDaniele Ceraolo Spurio #define I915_SCHEDULER_CAP_ENGINE_BUSY_STATS (1ul << 4) 653b76c14c8SDaniele Ceraolo Spurio /* 654b76c14c8SDaniele Ceraolo Spurio * Indicates the 2k user priority levels are statically mapped into 3 buckets as 655b76c14c8SDaniele Ceraolo Spurio * follows: 656b76c14c8SDaniele Ceraolo Spurio * 657b76c14c8SDaniele Ceraolo Spurio * -1k to -1 Low priority 658b76c14c8SDaniele Ceraolo Spurio * 0 Normal priority 659b76c14c8SDaniele Ceraolo Spurio * 1 to 1k Highest priority 660b76c14c8SDaniele Ceraolo Spurio */ 661b76c14c8SDaniele Ceraolo Spurio #define I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP (1ul << 5) 662b76c14c8SDaniele Ceraolo Spurio 663b76c14c8SDaniele Ceraolo Spurio /* 6645464cd65SAnusha Srivatsa * Query the status of HuC load. 6650de9136dSChris Wilson * 66677ae9957SChris Wilson * The query can fail in the following scenarios with the listed error codes: 66777ae9957SChris Wilson * -ENODEV if HuC is not present on this platform, 66877ae9957SChris Wilson * -EOPNOTSUPP if HuC firmware usage is disabled, 66977ae9957SChris Wilson * -ENOPKG if HuC firmware fetch failed, 67077ae9957SChris Wilson * -ENOEXEC if HuC firmware is invalid or mismatched, 67177ae9957SChris Wilson * -ENOMEM if i915 failed to prepare the FW objects for transfer to the uC, 672fec0445cSChris Wilson * -EIO if the FW transfer or the FW authentication failed. 673fec0445cSChris Wilson * 674fec0445cSChris Wilson * If the IOCTL is successful, the returned parameter will be set to one of the 675fec0445cSChris Wilson * following values: 676fec0445cSChris Wilson * * 0 if HuC firmware load is not complete, 677fec0445cSChris Wilson * * 1 if HuC firmware is loaded and fully authenticated, 678fec0445cSChris Wilson * * 2 if HuC firmware is loaded and authenticated for clear media only 679b0fd47adSChris Wilson */ 680b0fd47adSChris Wilson #define I915_PARAM_HUC_STATUS 42 681b0fd47adSChris Wilson 682b0fd47adSChris Wilson /* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of 683b0fd47adSChris Wilson * synchronisation with implicit fencing on individual objects. 684b0fd47adSChris Wilson * See EXEC_OBJECT_ASYNC. 6857fed555cSRobert Bragg */ 6867fed555cSRobert Bragg #define I915_PARAM_HAS_EXEC_ASYNC 43 687f5320233SRobert Bragg 688f5320233SRobert Bragg /* Query whether DRM_I915_GEM_EXECBUFFER2 supports explicit fence support - 689f5320233SRobert Bragg * both being able to pass in a sync_file fd to wait upon before executing, 690f5320233SRobert Bragg * and being able to return a new sync_file fd that is signaled when the 691f5320233SRobert Bragg * current request is complete. See I915_EXEC_FENCE_IN and I915_EXEC_FENCE_OUT. 6921a71cf2fSChris Wilson */ 6931a71cf2fSChris Wilson #define I915_PARAM_HAS_EXEC_FENCE 44 6941a71cf2fSChris Wilson 6951a71cf2fSChris Wilson /* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to capture 6961a71cf2fSChris Wilson * user specified bufffers for post-mortem debugging of GPU hangs. See 6971a71cf2fSChris Wilson * EXEC_OBJECT_CAPTURE. 698cf6e7bacSJason Ekstrand */ 699cf6e7bacSJason Ekstrand #define I915_PARAM_HAS_EXEC_CAPTURE 45 700cf6e7bacSJason Ekstrand 701cf6e7bacSJason Ekstrand #define I915_PARAM_SLICE_MASK 46 702cf6e7bacSJason Ekstrand 703d2b4b979SChris Wilson /* Assuming it's uniform for each slice, this queries the mask of subslices 704d2b4b979SChris Wilson * per-slice for this system. 705d2b4b979SChris Wilson */ 706d2b4b979SChris Wilson #define I915_PARAM_SUBSLICE_MASK 47 707d2b4b979SChris Wilson 708d2b4b979SChris Wilson /* 709d2b4b979SChris Wilson * Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying the batch buffer 710d2b4b979SChris Wilson * as the first execobject as opposed to the last. See I915_EXEC_BATCH_FIRST. 711d2b4b979SChris Wilson */ 712d2b4b979SChris Wilson #define I915_PARAM_HAS_EXEC_BATCH_FIRST 48 713d2b4b979SChris Wilson 714d2b4b979SChris Wilson /* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of 715d2b4b979SChris Wilson * drm_i915_gem_exec_fence structures. See I915_EXEC_FENCE_ARRAY. 716d2b4b979SChris Wilson */ 717d2b4b979SChris Wilson #define I915_PARAM_HAS_EXEC_FENCE_ARRAY 49 718dab91783SLionel Landwerlin 719dab91783SLionel Landwerlin /* 720dab91783SLionel Landwerlin * Query whether every context (both per-file default and user created) is 721dab91783SLionel Landwerlin * isolated (insofar as HW supports). If this parameter is not true, then 722dab91783SLionel Landwerlin * freshly created contexts may inherit values from an existing context, 723dab91783SLionel Landwerlin * rather than default HW values. If true, it also ensures (insofar as HW 724900ccf30SChris Wilson * supports) that all state set by this context will not leak to any other 725900ccf30SChris Wilson * context. 726900ccf30SChris Wilson * 727900ccf30SChris Wilson * As not every engine across every gen support contexts, the returned 728900ccf30SChris Wilson * value reports the support of context isolation for individual engines by 729900ccf30SChris Wilson * returning a bitmask of each engine class set to true if that class supports 730900ccf30SChris Wilson * isolation. 731900ccf30SChris Wilson */ 732900ccf30SChris Wilson #define I915_PARAM_HAS_CONTEXT_ISOLATION 50 733900ccf30SChris Wilson 734900ccf30SChris Wilson /* Frequency of the command streamer timestamps given by the *_TIMESTAMP 735900ccf30SChris Wilson * registers. This used to be fixed per platform but from CNL onwards, this 736900ccf30SChris Wilson * might vary depending on the parts. 737900ccf30SChris Wilson */ 738900ccf30SChris Wilson #define I915_PARAM_CS_TIMESTAMP_FREQUENCY 51 739900ccf30SChris Wilson 740900ccf30SChris Wilson /* 741900ccf30SChris Wilson * Once upon a time we supposed that writes through the GGTT would be 742900ccf30SChris Wilson * immediately in physical memory (once flushed out of the CPU path). However, 743900ccf30SChris Wilson * on a few different processors and chipsets, this is not necessarily the case 744900ccf30SChris Wilson * as the writes appear to be buffered internally. Thus a read of the backing 745900ccf30SChris Wilson * storage (physical memory) via a different path (with different physical tags 746a88b6e4cSChris Wilson * to the indirect write via the GGTT) will see stale values from before 747a88b6e4cSChris Wilson * the GGTT write. Inside the kernel, we can for the most part keep track of 748a88b6e4cSChris Wilson * the different read/write domains in use (e.g. set-domain), but the assumption 749a88b6e4cSChris Wilson * of coherency is baked into the ABI, hence reporting its true state in this 750a88b6e4cSChris Wilson * parameter. 751a88b6e4cSChris Wilson * 752b8d49f28SLionel Landwerlin * Reports true when writes via mmap_gtt are immediately visible following an 753b8d49f28SLionel Landwerlin * lfence to flush the WCB. 754b8d49f28SLionel Landwerlin * 755b8d49f28SLionel Landwerlin * Reports false when writes via mmap_gtt are indeterminately delayed in an in 756b8d49f28SLionel Landwerlin * internal buffer and are _not_ immediately visible to third parties accessing 757b8d49f28SLionel Landwerlin * directly via mmap_cpu/mmap_wc. Use of mmap_gtt as part of an IPC 758b8d49f28SLionel Landwerlin * communications channel when reporting false is strongly disadvised. 75913149e8bSLionel Landwerlin */ 76013149e8bSLionel Landwerlin #define I915_PARAM_MMAP_GTT_COHERENT 52 76113149e8bSLionel Landwerlin 76213149e8bSLionel Landwerlin /* 76313149e8bSLionel Landwerlin * Query whether DRM_I915_GEM_EXECBUFFER2 supports coordination of parallel 76413149e8bSLionel Landwerlin * execution through use of explicit fence support. 765b65a9489SChris Wilson * See I915_EXEC_FENCE_OUT and I915_EXEC_FENCE_SUBMIT. 766b65a9489SChris Wilson */ 767b65a9489SChris Wilson #define I915_PARAM_HAS_EXEC_SUBMIT_FENCE 53 768bc7ed4d3SUmesh Nerlige Ramappa 769bc7ed4d3SUmesh Nerlige Ramappa /* 770bc7ed4d3SUmesh Nerlige Ramappa * Revision of the i915-perf uAPI. The value returned helps determine what 771bc7ed4d3SUmesh Nerlige Ramappa * i915-perf features are available. See drm_i915_perf_property_id. 772bc7ed4d3SUmesh Nerlige Ramappa */ 773bc7ed4d3SUmesh Nerlige Ramappa #define I915_PARAM_PERF_REVISION 54 774be03564bSChris Wilson 775be03564bSChris Wilson /* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of 776a913bde8SNiranjana Vishwanathapura * timeline syncobj through drm_i915_gem_execbuffer_ext_timeline_fences. See 777a913bde8SNiranjana Vishwanathapura * I915_EXEC_USE_EXTENSIONS. 778a913bde8SNiranjana Vishwanathapura */ 779a913bde8SNiranjana Vishwanathapura #define I915_PARAM_HAS_EXEC_TIMELINE_FENCES 55 780a913bde8SNiranjana Vishwanathapura 78116f7249dSArtem Savkov /* Query if the kernel supports the I915_USERPTR_PROBE flag. */ 782a913bde8SNiranjana Vishwanathapura #define I915_PARAM_HAS_USERPTR_PROBE 56 783a913bde8SNiranjana Vishwanathapura 784a913bde8SNiranjana Vishwanathapura /* 785a913bde8SNiranjana Vishwanathapura * Frequency of the timestamps in OA reports. This used to be the same as the CS 786346add78SDaniel Vetter * timestamp frequency, but differs on some platforms. 787346add78SDaniel Vetter */ 788346add78SDaniel Vetter #define I915_PARAM_OA_TIMESTAMP_FREQUENCY 57 789718dceddSDavid Howells 790a913bde8SNiranjana Vishwanathapura /* 791a913bde8SNiranjana Vishwanathapura * Query the status of PXP support in i915. 792a913bde8SNiranjana Vishwanathapura * 793a913bde8SNiranjana Vishwanathapura * The query can fail in the following scenarios with the listed error codes: 794a913bde8SNiranjana Vishwanathapura * -ENODEV = PXP support is not available on the GPU device or in the 795a913bde8SNiranjana Vishwanathapura * kernel due to missing component drivers or kernel configs. 796a913bde8SNiranjana Vishwanathapura * 797718dceddSDavid Howells * If the IOCTL is successful, the returned parameter will be set to one of 798718dceddSDavid Howells * the following values: 799718dceddSDavid Howells * 1 = PXP feature is supported and is ready for use. 800718dceddSDavid Howells * 2 = PXP feature is supported but should be ready soon (pending 801718dceddSDavid Howells * initialization of non-i915 system dependencies). 802718dceddSDavid Howells * 803718dceddSDavid Howells * NOTE: When param is supported (positive return values), user space should 804be03564bSChris Wilson * still refer to the GEM PXP context-creation UAPI header specs to be 805718dceddSDavid Howells * aware of possible failure due to system state machine at the time. 806718dceddSDavid Howells */ 807718dceddSDavid Howells #define I915_PARAM_PXP_STATUS 58 808718dceddSDavid Howells 809718dceddSDavid Howells /* Must be kept compact -- no holes and well documented */ 810718dceddSDavid Howells 811718dceddSDavid Howells /** 812718dceddSDavid Howells * struct drm_i915_getparam - Driver parameter query structure. 813718dceddSDavid Howells */ 814718dceddSDavid Howells struct drm_i915_getparam { 815718dceddSDavid Howells /** @param: Driver parameter to query. */ 816718dceddSDavid Howells __s32 param; 817718dceddSDavid Howells 818718dceddSDavid Howells /** 819718dceddSDavid Howells * @value: Address of memory where queried value should be put. 820718dceddSDavid Howells * 821718dceddSDavid Howells * WARNING: Using pointers instead of fixed-size u64 means we need to write 822718dceddSDavid Howells * compat32 code. Don't repeat this mistake. 823718dceddSDavid Howells */ 824718dceddSDavid Howells int __user *value; 825718dceddSDavid Howells }; 826718dceddSDavid Howells 827718dceddSDavid Howells /** 828718dceddSDavid Howells * typedef drm_i915_getparam_t - Driver parameter query structure. 829718dceddSDavid Howells * See struct drm_i915_getparam. 830718dceddSDavid Howells */ 831718dceddSDavid Howells typedef struct drm_i915_getparam drm_i915_getparam_t; 832718dceddSDavid Howells 833718dceddSDavid Howells /* Ioctl to set kernel params: 834718dceddSDavid Howells */ 835718dceddSDavid Howells #define I915_SETPARAM_USE_MI_BATCHBUFFER_START 1 836718dceddSDavid Howells #define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY 2 837718dceddSDavid Howells #define I915_SETPARAM_ALLOW_BATCHBUFFER 3 838718dceddSDavid Howells #define I915_SETPARAM_NUM_USED_FENCES 4 839718dceddSDavid Howells /* Must be kept compact -- no holes */ 840718dceddSDavid Howells 841718dceddSDavid Howells typedef struct drm_i915_setparam { 842718dceddSDavid Howells int param; 843718dceddSDavid Howells int value; 844718dceddSDavid Howells } drm_i915_setparam_t; 845718dceddSDavid Howells 846718dceddSDavid Howells /* A memory manager for regions of shared memory: 847718dceddSDavid Howells */ 848718dceddSDavid Howells #define I915_MEM_REGION_AGP 1 849718dceddSDavid Howells 850718dceddSDavid Howells typedef struct drm_i915_mem_alloc { 851718dceddSDavid Howells int region; 852718dceddSDavid Howells int alignment; 853718dceddSDavid Howells int size; 854718dceddSDavid Howells int __user *region_offset; /* offset from start of fb or agp */ 855718dceddSDavid Howells } drm_i915_mem_alloc_t; 856718dceddSDavid Howells 857718dceddSDavid Howells typedef struct drm_i915_mem_free { 858718dceddSDavid Howells int region; 859718dceddSDavid Howells int region_offset; 860718dceddSDavid Howells } drm_i915_mem_free_t; 861718dceddSDavid Howells 862718dceddSDavid Howells typedef struct drm_i915_mem_init_heap { 863718dceddSDavid Howells int region; 864718dceddSDavid Howells int size; 865718dceddSDavid Howells int start; 866718dceddSDavid Howells } drm_i915_mem_init_heap_t; 867718dceddSDavid Howells 868718dceddSDavid Howells /* Allow memory manager to be torn down and re-initialized (eg on 869718dceddSDavid Howells * rotate): 870718dceddSDavid Howells */ 871718dceddSDavid Howells typedef struct drm_i915_mem_destroy_heap { 872718dceddSDavid Howells int region; 873718dceddSDavid Howells } drm_i915_mem_destroy_heap_t; 874718dceddSDavid Howells 875718dceddSDavid Howells /* Allow X server to configure which pipes to monitor for vblank signals 876718dceddSDavid Howells */ 877718dceddSDavid Howells #define DRM_I915_VBLANK_PIPE_A 1 878718dceddSDavid Howells #define DRM_I915_VBLANK_PIPE_B 2 879718dceddSDavid Howells 880718dceddSDavid Howells typedef struct drm_i915_vblank_pipe { 881718dceddSDavid Howells int pipe; 882718dceddSDavid Howells } drm_i915_vblank_pipe_t; 883718dceddSDavid Howells 884718dceddSDavid Howells /* Schedule buffer swap at given vertical blank: 885718dceddSDavid Howells */ 886718dceddSDavid Howells typedef struct drm_i915_vblank_swap { 887718dceddSDavid Howells drm_drawable_t drawable; 888718dceddSDavid Howells enum drm_vblank_seq_type seqtype; 889718dceddSDavid Howells unsigned int sequence; 890718dceddSDavid Howells } drm_i915_vblank_swap_t; 891718dceddSDavid Howells 892718dceddSDavid Howells typedef struct drm_i915_hws_addr { 893718dceddSDavid Howells __u64 addr; 894718dceddSDavid Howells } drm_i915_hws_addr_t; 895718dceddSDavid Howells 896718dceddSDavid Howells struct drm_i915_gem_init { 897718dceddSDavid Howells /** 898718dceddSDavid Howells * Beginning offset in the GTT to be managed by the DRM memory 899718dceddSDavid Howells * manager. 900718dceddSDavid Howells */ 901718dceddSDavid Howells __u64 gtt_start; 902718dceddSDavid Howells /** 903718dceddSDavid Howells * Ending offset in the GTT to be managed by the DRM memory 904718dceddSDavid Howells * manager. 905718dceddSDavid Howells */ 906718dceddSDavid Howells __u64 gtt_end; 907718dceddSDavid Howells }; 908718dceddSDavid Howells 909718dceddSDavid Howells struct drm_i915_gem_create { 910718dceddSDavid Howells /** 911718dceddSDavid Howells * Requested size for the object. 912718dceddSDavid Howells * 913718dceddSDavid Howells * The (page-aligned) allocated size for the object will be returned. 914718dceddSDavid Howells */ 915718dceddSDavid Howells __u64 size; 916718dceddSDavid Howells /** 917718dceddSDavid Howells * Returned handle for the object. 918718dceddSDavid Howells * 919718dceddSDavid Howells * Object handles are nonzero. 920718dceddSDavid Howells */ 921718dceddSDavid Howells __u32 handle; 922718dceddSDavid Howells __u32 pad; 923718dceddSDavid Howells }; 924718dceddSDavid Howells 925718dceddSDavid Howells struct drm_i915_gem_pread { 926718dceddSDavid Howells /** Handle for the object being read. */ 927718dceddSDavid Howells __u32 handle; 928718dceddSDavid Howells __u32 pad; 929718dceddSDavid Howells /** Offset into the object to read from */ 930718dceddSDavid Howells __u64 offset; 931718dceddSDavid Howells /** Length of data to read */ 932718dceddSDavid Howells __u64 size; 933718dceddSDavid Howells /** 934718dceddSDavid Howells * Pointer to write the data into. 935718dceddSDavid Howells * 936718dceddSDavid Howells * This is a fixed-size type for 32/64 compatibility. 937718dceddSDavid Howells */ 938718dceddSDavid Howells __u64 data_ptr; 939718dceddSDavid Howells }; 9401816f923SAkash Goel 9411816f923SAkash Goel struct drm_i915_gem_pwrite { 9421816f923SAkash Goel /** Handle for the object being written to. */ 9431816f923SAkash Goel __u32 handle; 9441816f923SAkash Goel __u32 pad; 9451816f923SAkash Goel /** Offset into the object to write to */ 9461816f923SAkash Goel __u64 offset; 9471816f923SAkash Goel /** Length of data to write */ 948718dceddSDavid Howells __u64 size; 949718dceddSDavid Howells /** 950718dceddSDavid Howells * Pointer to read the data from. 951718dceddSDavid Howells * 952718dceddSDavid Howells * This is a fixed-size type for 32/64 compatibility. 953718dceddSDavid Howells */ 954718dceddSDavid Howells __u64 data_ptr; 955718dceddSDavid Howells }; 956718dceddSDavid Howells 957718dceddSDavid Howells struct drm_i915_gem_mmap { 958718dceddSDavid Howells /** Handle for the object being mapped. */ 959718dceddSDavid Howells __u32 handle; 960718dceddSDavid Howells __u32 pad; 961718dceddSDavid Howells /** Offset in the object to map. */ 9627961c5b6SMaarten Lankhorst __u64 offset; 9637961c5b6SMaarten Lankhorst /** 9647961c5b6SMaarten Lankhorst * Length of data to map. 9657961c5b6SMaarten Lankhorst * 9667961c5b6SMaarten Lankhorst * The value will be page-aligned. 9677961c5b6SMaarten Lankhorst */ 9687961c5b6SMaarten Lankhorst __u64 size; 9697961c5b6SMaarten Lankhorst /** 9707961c5b6SMaarten Lankhorst * Returned pointer the data was mapped at. 9717961c5b6SMaarten Lankhorst * 972cc662126SAbdiel Janulgue * This is a fixed-size type for 32/64 compatibility. 9737961c5b6SMaarten Lankhorst */ 974cc662126SAbdiel Janulgue __u64 addr_ptr; 9757961c5b6SMaarten Lankhorst 976cc662126SAbdiel Janulgue /** 977cc662126SAbdiel Janulgue * Flags for extended behaviour. 9787961c5b6SMaarten Lankhorst * 979cc662126SAbdiel Janulgue * Added in version 2. 980cc662126SAbdiel Janulgue */ 981cc662126SAbdiel Janulgue __u64 flags; 982cc662126SAbdiel Janulgue #define I915_MMAP_WC 0x1 983cc662126SAbdiel Janulgue }; 984cc662126SAbdiel Janulgue 9857961c5b6SMaarten Lankhorst struct drm_i915_gem_mmap_gtt { 986cc662126SAbdiel Janulgue /** Handle for the object being mapped. */ 9877961c5b6SMaarten Lankhorst __u32 handle; 9887961c5b6SMaarten Lankhorst __u32 pad; 9897961c5b6SMaarten Lankhorst /** 9907961c5b6SMaarten Lankhorst * Fake offset to use for subsequent mmap call 9917961c5b6SMaarten Lankhorst * 9927961c5b6SMaarten Lankhorst * This is a fixed-size type for 32/64 compatibility. 9937961c5b6SMaarten Lankhorst */ 9947961c5b6SMaarten Lankhorst __u64 offset; 9957961c5b6SMaarten Lankhorst }; 9967961c5b6SMaarten Lankhorst 9977961c5b6SMaarten Lankhorst /** 9987961c5b6SMaarten Lankhorst * struct drm_i915_gem_mmap_offset - Retrieve an offset so we can mmap this buffer object. 9997961c5b6SMaarten Lankhorst * 10007961c5b6SMaarten Lankhorst * This struct is passed as argument to the `DRM_IOCTL_I915_GEM_MMAP_OFFSET` ioctl, 1001cc662126SAbdiel Janulgue * and is used to retrieve the fake offset to mmap an object specified by &handle. 1002cc662126SAbdiel Janulgue * 10037961c5b6SMaarten Lankhorst * The legacy way of using `DRM_IOCTL_I915_GEM_MMAP` is removed on gen12+. 1004cc662126SAbdiel Janulgue * `DRM_IOCTL_I915_GEM_MMAP_GTT` is an older supported alias to this struct, but will behave 1005cc662126SAbdiel Janulgue * as setting the &extensions to 0, and &flags to `I915_MMAP_OFFSET_GTT`. 1006cc662126SAbdiel Janulgue */ 1007cc662126SAbdiel Janulgue struct drm_i915_gem_mmap_offset { 10087961c5b6SMaarten Lankhorst /** @handle: Handle for the object being mapped. */ 1009cc662126SAbdiel Janulgue __u32 handle; 10107961c5b6SMaarten Lankhorst /** @pad: Must be zero */ 10117961c5b6SMaarten Lankhorst __u32 pad; 1012cc662126SAbdiel Janulgue /** 1013cc662126SAbdiel Janulgue * @offset: The fake offset to use for subsequent mmap call 1014cc662126SAbdiel Janulgue * 1015cc662126SAbdiel Janulgue * This is a fixed-size type for 32/64 compatibility. 1016cc662126SAbdiel Janulgue */ 1017cc662126SAbdiel Janulgue __u64 offset; 10183aa8c57fSMatthew Auld 10193aa8c57fSMatthew Auld /** 10203aa8c57fSMatthew Auld * @flags: Flags for extended behaviour. 10213aa8c57fSMatthew Auld * 10223aa8c57fSMatthew Auld * It is mandatory that one of the `MMAP_OFFSET` types 10233aa8c57fSMatthew Auld * should be included: 10243aa8c57fSMatthew Auld * 10253aa8c57fSMatthew Auld * - `I915_MMAP_OFFSET_GTT`: Use mmap with the object bound to GTT. (Write-Combined) 10263aa8c57fSMatthew Auld * - `I915_MMAP_OFFSET_WC`: Use Write-Combined caching. 10273aa8c57fSMatthew Auld * - `I915_MMAP_OFFSET_WB`: Use Write-Back caching. 10283aa8c57fSMatthew Auld * - `I915_MMAP_OFFSET_FIXED`: Use object placement to determine caching. 10293aa8c57fSMatthew Auld * 10303aa8c57fSMatthew Auld * On devices with local memory `I915_MMAP_OFFSET_FIXED` is the only valid 10313aa8c57fSMatthew Auld * type. On devices without local memory, this caching mode is invalid. 10323aa8c57fSMatthew Auld * 10333aa8c57fSMatthew Auld * As caching mode when specifying `I915_MMAP_OFFSET_FIXED`, WC or WB will 10343aa8c57fSMatthew Auld * be used, depending on the object placement on creation. WB will be used 10353aa8c57fSMatthew Auld * when the object can only exist in system memory, WC otherwise. 103681340cf3SMatthew Auld */ 103781340cf3SMatthew Auld __u64 flags; 103881340cf3SMatthew Auld 103981340cf3SMatthew Auld #define I915_MMAP_OFFSET_GTT 0 104081340cf3SMatthew Auld #define I915_MMAP_OFFSET_WC 1 104181340cf3SMatthew Auld #define I915_MMAP_OFFSET_WB 2 104281340cf3SMatthew Auld #define I915_MMAP_OFFSET_UC 3 104381340cf3SMatthew Auld #define I915_MMAP_OFFSET_FIXED 4 104481340cf3SMatthew Auld 104581340cf3SMatthew Auld /** 104681340cf3SMatthew Auld * @extensions: Zero-terminated chain of extensions. 104781340cf3SMatthew Auld * 104881340cf3SMatthew Auld * No current extensions defined; mbz. 104981340cf3SMatthew Auld */ 105081340cf3SMatthew Auld __u64 extensions; 105181340cf3SMatthew Auld }; 105281340cf3SMatthew Auld 105381340cf3SMatthew Auld /** 105481340cf3SMatthew Auld * struct drm_i915_gem_set_domain - Adjust the objects write or read domain, in 10553aa8c57fSMatthew Auld * preparation for accessing the pages via some CPU domain. 1056718dceddSDavid Howells * 10573aa8c57fSMatthew Auld * Specifying a new write or read domain will flush the object out of the 1058718dceddSDavid Howells * previous domain(if required), before then updating the objects domain 1059718dceddSDavid Howells * tracking with the new domain. 10603aa8c57fSMatthew Auld * 1061718dceddSDavid Howells * Note this might involve waiting for the object first if it is still active on 1062718dceddSDavid Howells * the GPU. 10633aa8c57fSMatthew Auld * 10643aa8c57fSMatthew Auld * Supported values for @read_domains and @write_domain: 10653aa8c57fSMatthew Auld * 10663aa8c57fSMatthew Auld * - I915_GEM_DOMAIN_WC: Uncached write-combined domain 10673aa8c57fSMatthew Auld * - I915_GEM_DOMAIN_CPU: CPU cache domain 10683aa8c57fSMatthew Auld * - I915_GEM_DOMAIN_GTT: Mappable aperture domain 1069718dceddSDavid Howells * 1070718dceddSDavid Howells * All other domains are rejected. 1071718dceddSDavid Howells * 1072718dceddSDavid Howells * Note that for discrete, starting from DG1, this is no longer supported, and 1073718dceddSDavid Howells * is instead rejected. On such platforms the CPU domain is effectively static, 1074718dceddSDavid Howells * where we also only support a single &drm_i915_gem_mmap_offset cache mode, 1075718dceddSDavid Howells * which can't be set explicitly and instead depends on the object placements, 1076718dceddSDavid Howells * as per the below. 1077718dceddSDavid Howells * 1078718dceddSDavid Howells * Implicit caching rules, starting from DG1: 1079718dceddSDavid Howells * 1080718dceddSDavid Howells * - If any of the object placements (see &drm_i915_gem_create_ext_memory_regions) 1081718dceddSDavid Howells * contain I915_MEMORY_CLASS_DEVICE then the object will be allocated and 1082718dceddSDavid Howells * mapped as write-combined only. 1083718dceddSDavid Howells * 1084718dceddSDavid Howells * - Everything else is always allocated and mapped as write-back, with the 1085718dceddSDavid Howells * guarantee that everything is also coherent with the GPU. 1086718dceddSDavid Howells * 1087718dceddSDavid Howells * Note that this is likely to change in the future again, where we might need 1088718dceddSDavid Howells * more flexibility on future devices, so making this all explicit as part of a 1089718dceddSDavid Howells * new &drm_i915_gem_create_ext extension is probable. 1090718dceddSDavid Howells */ 1091718dceddSDavid Howells struct drm_i915_gem_set_domain { 1092718dceddSDavid Howells /** @handle: Handle for the object. */ 1093718dceddSDavid Howells __u32 handle; 1094718dceddSDavid Howells 1095718dceddSDavid Howells /** @read_domains: New read domains. */ 1096718dceddSDavid Howells __u32 read_domains; 1097718dceddSDavid Howells 1098718dceddSDavid Howells /** 1099718dceddSDavid Howells * @write_domain: New write domain. 1100718dceddSDavid Howells * 1101718dceddSDavid Howells * Note that having something in the write domain implies it's in the 1102718dceddSDavid Howells * read domain, and only that read domain. 1103718dceddSDavid Howells */ 1104718dceddSDavid Howells __u32 write_domain; 1105718dceddSDavid Howells }; 1106718dceddSDavid Howells 1107718dceddSDavid Howells struct drm_i915_gem_sw_finish { 1108718dceddSDavid Howells /** Handle for the object */ 1109718dceddSDavid Howells __u32 handle; 1110718dceddSDavid Howells }; 1111718dceddSDavid Howells 1112718dceddSDavid Howells struct drm_i915_gem_relocation_entry { 1113718dceddSDavid Howells /** 1114718dceddSDavid Howells * Handle of the buffer being pointed to by this relocation entry. 1115718dceddSDavid Howells * 1116718dceddSDavid Howells * It's appealing to make this be an index into the mm_validate_entry 1117718dceddSDavid Howells * list to refer to the buffer, but this allows the driver to create 1118718dceddSDavid Howells * a relocation list for state buffers and not re-write it per 1119718dceddSDavid Howells * exec using the buffer. 1120718dceddSDavid Howells */ 1121718dceddSDavid Howells __u32 target_handle; 1122718dceddSDavid Howells 1123718dceddSDavid Howells /** 1124718dceddSDavid Howells * Value to be added to the offset of the target buffer to make up 1125718dceddSDavid Howells * the relocation entry. 1126718dceddSDavid Howells */ 1127718dceddSDavid Howells __u32 delta; 1128718dceddSDavid Howells 1129718dceddSDavid Howells /** Offset in the buffer the relocation entry will be written into */ 1130718dceddSDavid Howells __u64 offset; 1131718dceddSDavid Howells 1132718dceddSDavid Howells /** 1133718dceddSDavid Howells * Offset value of the target buffer that the relocation entry was last 1134718dceddSDavid Howells * written as. 1135718dceddSDavid Howells * 1136718dceddSDavid Howells * If the buffer has the same offset as last time, we can skip syncing 1137718dceddSDavid Howells * and writing the relocation. This value is written back out by 1138718dceddSDavid Howells * the execbuffer ioctl when the relocation is written. 1139718dceddSDavid Howells */ 1140718dceddSDavid Howells __u64 presumed_offset; 1141718dceddSDavid Howells 1142718dceddSDavid Howells /** 1143e22d8e3cSChris Wilson * Target memory domains read by this operation. 1144e22d8e3cSChris Wilson */ 1145718dceddSDavid Howells __u32 read_domains; 1146718dceddSDavid Howells 1147718dceddSDavid Howells /** 1148718dceddSDavid Howells * Target memory domains written by this operation. 1149718dceddSDavid Howells * 1150718dceddSDavid Howells * Note that only one domain may be written by the whole 1151718dceddSDavid Howells * execbuffer operation, so that where there are conflicts, 1152718dceddSDavid Howells * the application will get -EINVAL back. 1153718dceddSDavid Howells */ 1154718dceddSDavid Howells __u32 write_domain; 1155718dceddSDavid Howells }; 1156718dceddSDavid Howells 1157718dceddSDavid Howells /** @{ 1158718dceddSDavid Howells * Intel memory domains 1159718dceddSDavid Howells * 1160718dceddSDavid Howells * Most of these just align with the various caches in 1161718dceddSDavid Howells * the system and are used to flush and invalidate as 1162718dceddSDavid Howells * objects end up cached in different domains. 1163718dceddSDavid Howells */ 1164718dceddSDavid Howells /** CPU cache */ 1165718dceddSDavid Howells #define I915_GEM_DOMAIN_CPU 0x00000001 1166718dceddSDavid Howells /** Render cache, used by 2D and 3D drawing */ 1167718dceddSDavid Howells #define I915_GEM_DOMAIN_RENDER 0x00000002 1168718dceddSDavid Howells /** Sampler cache, used by texture engine */ 1169718dceddSDavid Howells #define I915_GEM_DOMAIN_SAMPLER 0x00000004 1170718dceddSDavid Howells /** Command queue, used to load batch buffers */ 1171718dceddSDavid Howells #define I915_GEM_DOMAIN_COMMAND 0x00000008 1172b5b6f6a6SJason Ekstrand /** Instruction cache, used by shader programs */ 1173718dceddSDavid Howells #define I915_GEM_DOMAIN_INSTRUCTION 0x00000010 1174718dceddSDavid Howells /** Vertex address cache */ 1175718dceddSDavid Howells #define I915_GEM_DOMAIN_VERTEX 0x00000020 1176718dceddSDavid Howells /** GTT domain - aperture and scanout */ 1177718dceddSDavid Howells #define I915_GEM_DOMAIN_GTT 0x00000040 1178718dceddSDavid Howells /** WC domain - uncached access */ 1179718dceddSDavid Howells #define I915_GEM_DOMAIN_WC 0x00000080 1180718dceddSDavid Howells /** @} */ 1181718dceddSDavid Howells 1182718dceddSDavid Howells struct drm_i915_gem_exec_object { 1183718dceddSDavid Howells /** 1184718dceddSDavid Howells * User's handle for a buffer to be bound into the GTT for this 1185718dceddSDavid Howells * operation. 1186718dceddSDavid Howells */ 1187718dceddSDavid Howells __u32 handle; 1188718dceddSDavid Howells 1189718dceddSDavid Howells /** Number of relocations to be performed on this buffer */ 1190718dceddSDavid Howells __u32 relocation_count; 1191718dceddSDavid Howells /** 1192718dceddSDavid Howells * Pointer to array of struct drm_i915_gem_relocation_entry containing 1193718dceddSDavid Howells * the relocations to be performed in this buffer. 1194718dceddSDavid Howells */ 1195718dceddSDavid Howells __u64 relocs_ptr; 1196718dceddSDavid Howells 1197718dceddSDavid Howells /** Required alignment in graphics aperture */ 1198718dceddSDavid Howells __u64 alignment; 1199718dceddSDavid Howells 1200718dceddSDavid Howells /** 1201718dceddSDavid Howells * Returned value of the updated offset of the object, for future 1202718dceddSDavid Howells * presumed_offset writes. 1203718dceddSDavid Howells */ 1204718dceddSDavid Howells __u64 offset; 1205718dceddSDavid Howells }; 1206718dceddSDavid Howells 1207718dceddSDavid Howells /* DRM_IOCTL_I915_GEM_EXECBUFFER was removed in Linux 5.13 */ 1208718dceddSDavid Howells struct drm_i915_gem_execbuffer { 1209718dceddSDavid Howells /** 1210718dceddSDavid Howells * List of buffers to be validated with their relocations to be 1211718dceddSDavid Howells * performend on them. 1212718dceddSDavid Howells * 1213718dceddSDavid Howells * This is a pointer to an array of struct drm_i915_gem_validate_entry. 1214718dceddSDavid Howells * 1215718dceddSDavid Howells * These buffers must be listed in an order such that all relocations 1216718dceddSDavid Howells * a buffer is performing refer to buffers that have already appeared 1217506a8e87SChris Wilson * in the validate list. 1218506a8e87SChris Wilson */ 1219caa574ffSMatthew Auld __u64 buffers_ptr; 1220506a8e87SChris Wilson __u32 buffer_count; 1221506a8e87SChris Wilson 1222caa574ffSMatthew Auld /** Offset in the batchbuffer to start execution from. */ 1223506a8e87SChris Wilson __u32 batch_start_offset; 1224506a8e87SChris Wilson /** Bytes used in batchbuffer from batch_start_offset */ 1225caa574ffSMatthew Auld __u32 batch_len; 1226caa574ffSMatthew Auld __u32 DR1; 1227caa574ffSMatthew Auld __u32 DR4; 1228caa574ffSMatthew Auld __u32 num_cliprects; 1229718dceddSDavid Howells /** This is a struct drm_clip_rect *cliprects */ 1230718dceddSDavid Howells __u64 cliprects_ptr; 1231718dceddSDavid Howells }; 1232718dceddSDavid Howells 1233ed5982e6SDaniel Vetter struct drm_i915_gem_exec_object2 { 1234ed5982e6SDaniel Vetter /** 1235101b506aSMichel Thierry * User's handle for a buffer to be bound into the GTT for this 1236506a8e87SChris Wilson * operation. 123791b2db6fSChris Wilson */ 123877ae9957SChris Wilson __u32 handle; 123977ae9957SChris Wilson 124077ae9957SChris Wilson /** Number of relocations to be performed on this buffer */ 124177ae9957SChris Wilson __u32 relocation_count; 124277ae9957SChris Wilson /** 124377ae9957SChris Wilson * Pointer to array of struct drm_i915_gem_relocation_entry containing 124477ae9957SChris Wilson * the relocations to be performed in this buffer. 124577ae9957SChris Wilson */ 124677ae9957SChris Wilson __u64 relocs_ptr; 124777ae9957SChris Wilson 124877ae9957SChris Wilson /** Required alignment in graphics aperture */ 124977ae9957SChris Wilson __u64 alignment; 125077ae9957SChris Wilson 125177ae9957SChris Wilson /** 125277ae9957SChris Wilson * When the EXEC_OBJECT_PINNED flag is specified this is populated by 125377ae9957SChris Wilson * the user with the GTT offset at which this object will be pinned. 125477ae9957SChris Wilson * 125577ae9957SChris Wilson * When the I915_EXEC_NO_RELOC flag is specified this must contain the 125677ae9957SChris Wilson * presumed_offset of the object. 125777ae9957SChris Wilson * 125877ae9957SChris Wilson * During execbuffer2 the kernel populates it with the value of the 1259b0fd47adSChris Wilson * current GTT offset of the object, for future presumed_offset writes. 1260b0fd47adSChris Wilson * 1261b0fd47adSChris Wilson * See struct drm_i915_gem_create_ext for the rules when dealing with 1262b0fd47adSChris Wilson * alignment restrictions with I915_MEMORY_CLASS_DEVICE, on devices with 1263b0fd47adSChris Wilson * minimum page sizes, like DG2. 1264b0fd47adSChris Wilson */ 1265b0fd47adSChris Wilson __u64 offset; 12669e2793f6SDave Gordon 1267b0fd47adSChris Wilson #define EXEC_OBJECT_NEEDS_FENCE (1<<0) 1268718dceddSDavid Howells #define EXEC_OBJECT_NEEDS_GTT (1<<1) 1269ed5982e6SDaniel Vetter #define EXEC_OBJECT_WRITE (1<<2) 127091b2db6fSChris Wilson #define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3) 1271718dceddSDavid Howells #define EXEC_OBJECT_PINNED (1<<4) 127291b2db6fSChris Wilson #define EXEC_OBJECT_PAD_TO_SIZE (1<<5) 127391b2db6fSChris Wilson /* The kernel implicitly tracks GPU activity on all GEM objects, and 1274718dceddSDavid Howells * synchronises operations with outstanding rendering. This includes 1275718dceddSDavid Howells * rendering on other devices if exported via dma-buf. However, sometimes 1276718dceddSDavid Howells * this tracking is too coarse and the user knows better. For example, 1277cf6e7bacSJason Ekstrand * if the object is split into non-overlapping ranges shared between different 1278a913bde8SNiranjana Vishwanathapura * clients or engines (i.e. suballocating objects), the implicit tracking 1279a913bde8SNiranjana Vishwanathapura * by kernel assumes that each operation affects the whole object rather 1280a913bde8SNiranjana Vishwanathapura * than an individual range, causing needless synchronisation between clients. 1281a913bde8SNiranjana Vishwanathapura * The kernel will also forgo any CPU cache flushes prior to rendering from 1282a913bde8SNiranjana Vishwanathapura * the object as the client is expected to be also handling such domain 1283a913bde8SNiranjana Vishwanathapura * tracking. 1284a913bde8SNiranjana Vishwanathapura * 1285cf6e7bacSJason Ekstrand * The kernel maintains the implicit tracking in order to manage resources 1286a913bde8SNiranjana Vishwanathapura * used by the GPU - this flag only disables the synchronisation prior to 1287a913bde8SNiranjana Vishwanathapura * rendering with this object in this execbuf. 1288cf6e7bacSJason Ekstrand * 1289cf6e7bacSJason Ekstrand * Opting out of implicit synhronisation requires the user to do its own 1290a913bde8SNiranjana Vishwanathapura * explicit tracking to avoid rendering corruption. See, for example, 1291a913bde8SNiranjana Vishwanathapura * I915_PARAM_HAS_EXEC_FENCE to order execbufs and execute them asynchronously. 1292a913bde8SNiranjana Vishwanathapura */ 1293a913bde8SNiranjana Vishwanathapura #define EXEC_OBJECT_ASYNC (1<<6) 1294a913bde8SNiranjana Vishwanathapura /* Request that the contents of this execobject be copied into the error 1295a913bde8SNiranjana Vishwanathapura * state upon a GPU hang involving this batch for post-mortem debugging. 1296a913bde8SNiranjana Vishwanathapura * These buffers are recorded in no particular order as "user" in 1297a913bde8SNiranjana Vishwanathapura * /sys/class/drm/cardN/error. Query I915_PARAM_HAS_EXEC_CAPTURE to see 1298a913bde8SNiranjana Vishwanathapura * if the kernel supports this flag. 1299a913bde8SNiranjana Vishwanathapura */ 1300cf6e7bacSJason Ekstrand #define EXEC_OBJECT_CAPTURE (1<<7) 1301cf6e7bacSJason Ekstrand /* All remaining bits are MBZ and RESERVED FOR FUTURE USE */ 1302ebcaa1ffSTvrtko Ursulin #define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_CAPTURE<<1) 1303cf6e7bacSJason Ekstrand __u64 flags; 1304cf6e7bacSJason Ekstrand 1305a913bde8SNiranjana Vishwanathapura union { 1306a913bde8SNiranjana Vishwanathapura __u64 rsvd1; 1307a913bde8SNiranjana Vishwanathapura __u64 pad_to_size; 1308a913bde8SNiranjana Vishwanathapura }; 130913149e8bSLionel Landwerlin __u64 rsvd2; 131013149e8bSLionel Landwerlin }; 131113149e8bSLionel Landwerlin 131213149e8bSLionel Landwerlin /** 131313149e8bSLionel Landwerlin * struct drm_i915_gem_exec_fence - An input or output fence for the execbuf 1314a913bde8SNiranjana Vishwanathapura * ioctl. 1315a913bde8SNiranjana Vishwanathapura * 131613149e8bSLionel Landwerlin * The request will wait for input fence to signal before submission. 131713149e8bSLionel Landwerlin * 131813149e8bSLionel Landwerlin * The returned output fence will be signaled after the completion of the 1319a913bde8SNiranjana Vishwanathapura * request. 1320a913bde8SNiranjana Vishwanathapura */ 132113149e8bSLionel Landwerlin struct drm_i915_gem_exec_fence { 132213149e8bSLionel Landwerlin /** @handle: User's handle for a drm_syncobj to wait on or signal. */ 132313149e8bSLionel Landwerlin __u32 handle; 132413149e8bSLionel Landwerlin 1325a913bde8SNiranjana Vishwanathapura /** 1326a913bde8SNiranjana Vishwanathapura * @flags: Supported flags are: 132713149e8bSLionel Landwerlin * 132813149e8bSLionel Landwerlin * I915_EXEC_FENCE_WAIT: 132913149e8bSLionel Landwerlin * Wait for the input fence before request submission. 133013149e8bSLionel Landwerlin * 1331a913bde8SNiranjana Vishwanathapura * I915_EXEC_FENCE_SIGNAL: 1332a913bde8SNiranjana Vishwanathapura * Return request completion fence as output 1333a913bde8SNiranjana Vishwanathapura */ 1334a913bde8SNiranjana Vishwanathapura __u32 flags; 1335a913bde8SNiranjana Vishwanathapura #define I915_EXEC_FENCE_WAIT (1<<0) 133613149e8bSLionel Landwerlin #define I915_EXEC_FENCE_SIGNAL (1<<1) 133713149e8bSLionel Landwerlin #define __I915_EXEC_FENCE_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SIGNAL << 1)) 1338cda9edd0SLionel Landwerlin }; 1339cda9edd0SLionel Landwerlin 1340718dceddSDavid Howells /** 1341a913bde8SNiranjana Vishwanathapura * struct drm_i915_gem_execbuffer_ext_timeline_fences - Timeline fences 1342a913bde8SNiranjana Vishwanathapura * for execbuf ioctl. 1343718dceddSDavid Howells * 1344a913bde8SNiranjana Vishwanathapura * This structure describes an array of drm_syncobj and associated points for 1345a913bde8SNiranjana Vishwanathapura * timeline variants of drm_syncobj. It is invalid to append this structure to 1346718dceddSDavid Howells * the execbuf if I915_EXEC_FENCE_ARRAY is set. 1347a913bde8SNiranjana Vishwanathapura */ 1348a913bde8SNiranjana Vishwanathapura struct drm_i915_gem_execbuffer_ext_timeline_fences { 1349718dceddSDavid Howells #define DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES 0 1350718dceddSDavid Howells /** @base: Extension link. See struct i915_user_extension. */ 1351cf6e7bacSJason Ekstrand struct i915_user_extension base; 1352a913bde8SNiranjana Vishwanathapura 1353a913bde8SNiranjana Vishwanathapura /** 1354a913bde8SNiranjana Vishwanathapura * @fence_count: Number of elements in the @handles_ptr & @value_ptr 1355a913bde8SNiranjana Vishwanathapura * arrays. 1356a913bde8SNiranjana Vishwanathapura */ 1357a913bde8SNiranjana Vishwanathapura __u64 fence_count; 1358a913bde8SNiranjana Vishwanathapura 1359a913bde8SNiranjana Vishwanathapura /** 1360a913bde8SNiranjana Vishwanathapura * @handles_ptr: Pointer to an array of struct drm_i915_gem_exec_fence 1361a913bde8SNiranjana Vishwanathapura * of length @fence_count. 1362a913bde8SNiranjana Vishwanathapura */ 1363a913bde8SNiranjana Vishwanathapura __u64 handles_ptr; 1364a913bde8SNiranjana Vishwanathapura 1365a913bde8SNiranjana Vishwanathapura /** 1366a913bde8SNiranjana Vishwanathapura * @values_ptr: Pointer to an array of u64 values of length 1367a913bde8SNiranjana Vishwanathapura * @fence_count. 1368a913bde8SNiranjana Vishwanathapura * Values must be 0 for a binary drm_syncobj. A Value of 0 for a 1369a913bde8SNiranjana Vishwanathapura * timeline drm_syncobj is invalid as it turns a drm_syncobj into a 1370a913bde8SNiranjana Vishwanathapura * binary one. 1371a913bde8SNiranjana Vishwanathapura */ 1372a913bde8SNiranjana Vishwanathapura __u64 values_ptr; 1373a913bde8SNiranjana Vishwanathapura }; 1374a913bde8SNiranjana Vishwanathapura 1375a913bde8SNiranjana Vishwanathapura /** 1376a913bde8SNiranjana Vishwanathapura * struct drm_i915_gem_execbuffer2 - Structure for DRM_I915_GEM_EXECBUFFER2 1377a913bde8SNiranjana Vishwanathapura * ioctl. 1378cda9edd0SLionel Landwerlin */ 1379cda9edd0SLionel Landwerlin struct drm_i915_gem_execbuffer2 { 1380a913bde8SNiranjana Vishwanathapura /** @buffers_ptr: Pointer to a list of gem_exec_object2 structs */ 1381a913bde8SNiranjana Vishwanathapura __u64 buffers_ptr; 1382cda9edd0SLionel Landwerlin 1383cda9edd0SLionel Landwerlin /** @buffer_count: Number of elements in @buffers_ptr array */ 1384a913bde8SNiranjana Vishwanathapura __u32 buffer_count; 1385cf6e7bacSJason Ekstrand 1386718dceddSDavid Howells /** 1387a913bde8SNiranjana Vishwanathapura * @batch_start_offset: Offset in the batchbuffer to start execution 1388a913bde8SNiranjana Vishwanathapura * from. 1389a913bde8SNiranjana Vishwanathapura */ 1390d90c06d5SChris Wilson __u32 batch_start_offset; 1391718dceddSDavid Howells 1392718dceddSDavid Howells /** 1393718dceddSDavid Howells * @batch_len: Length in bytes of the batch buffer, starting from the 1394718dceddSDavid Howells * @batch_start_offset. If 0, length is assumed to be the batch buffer 139582f91b6eSXiang, Haihao * object size. 1396718dceddSDavid Howells */ 1397718dceddSDavid Howells __u32 batch_len; 1398718dceddSDavid Howells 1399718dceddSDavid Howells /** @DR1: deprecated */ 1400718dceddSDavid Howells __u32 DR1; 1401718dceddSDavid Howells 1402718dceddSDavid Howells /** @DR4: deprecated */ 1403718dceddSDavid Howells __u32 DR4; 1404718dceddSDavid Howells 1405718dceddSDavid Howells /** @num_cliprects: See @cliprects_ptr */ 1406718dceddSDavid Howells __u32 num_cliprects; 1407718dceddSDavid Howells 1408718dceddSDavid Howells /** 1409718dceddSDavid Howells * @cliprects_ptr: Kernel clipping was a DRI1 misfeature. 1410718dceddSDavid Howells * 1411c2fb7916SDaniel Vetter * It is invalid to use this field if I915_EXEC_FENCE_ARRAY or 1412c2fb7916SDaniel Vetter * I915_EXEC_USE_EXTENSIONS flags are not set. 1413c2fb7916SDaniel Vetter * 1414c2fb7916SDaniel Vetter * If I915_EXEC_FENCE_ARRAY is set, then this is a pointer to an array 1415c2fb7916SDaniel Vetter * of &drm_i915_gem_exec_fence and @num_cliprects is the length of the 1416b45305fcSDaniel Vetter * array. 1417b45305fcSDaniel Vetter * 1418b45305fcSDaniel Vetter * If I915_EXEC_USE_EXTENSIONS is set, then this is a pointer to a 1419b45305fcSDaniel Vetter * single &i915_user_extension and num_cliprects is 0. 1420b45305fcSDaniel Vetter */ 1421b45305fcSDaniel Vetter __u64 cliprects_ptr; 1422b45305fcSDaniel Vetter 1423b45305fcSDaniel Vetter /** @flags: Execbuf flags */ 1424b45305fcSDaniel Vetter __u64 flags; 1425c3d19d3cSGeert Uytterhoeven #define I915_EXEC_RING_MASK (0x3f) 1426ed5982e6SDaniel Vetter #define I915_EXEC_DEFAULT (0<<0) 1427ed5982e6SDaniel Vetter #define I915_EXEC_RENDER (1<<0) 1428ed5982e6SDaniel Vetter #define I915_EXEC_BSD (2<<0) 1429ed5982e6SDaniel Vetter #define I915_EXEC_BLT (3<<0) 1430ed5982e6SDaniel Vetter #define I915_EXEC_VEBOX (4<<0) 1431ed5982e6SDaniel Vetter 1432eef90ccbSChris Wilson /* Used for switching the constants addressing mode on gen4+ RENDER ring. 1433eef90ccbSChris Wilson * Gen6+ only supports relative addressing to dynamic state (default) and 1434eef90ccbSChris Wilson * absolute addressing. 1435eef90ccbSChris Wilson * 1436eef90ccbSChris Wilson * These flags are ignored for the BSD and BLT rings. 14378d360dffSZhipeng Gong */ 1438d9da6aa0STvrtko Ursulin #define I915_EXEC_CONSTANTS_MASK (3<<6) 1439d9da6aa0STvrtko Ursulin #define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */ 1440d9da6aa0STvrtko Ursulin #define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6) 1441d9da6aa0STvrtko Ursulin #define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */ 1442d9da6aa0STvrtko Ursulin 1443d9da6aa0STvrtko Ursulin /** Resets the SO write offset registers for transform feedback on gen7. */ 14448d360dffSZhipeng Gong #define I915_EXEC_GEN7_SOL_RESET (1<<8) 1445a9ed33caSAbdiel Janulgue 1446a9ed33caSAbdiel Janulgue /** Request a privileged ("secure") batch buffer. Note only available for 1447a9ed33caSAbdiel Janulgue * DRM_ROOT_ONLY | DRM_MASTER processes. 1448a9ed33caSAbdiel Janulgue */ 1449a9ed33caSAbdiel Janulgue #define I915_EXEC_SECURE (1<<9) 1450fec0445cSChris Wilson 1451fec0445cSChris Wilson /** Inform the kernel that the batch is and will always be pinned. This 1452fec0445cSChris Wilson * negates the requirement for a workaround to be performed to avoid 1453fec0445cSChris Wilson * an incoherent CS (such as can be found on 830/845). If this flag is 1454fec0445cSChris Wilson * not passed, the kernel will endeavour to make sure the batch is 1455fec0445cSChris Wilson * coherent with the CS before execution. If this flag is passed, 1456fec0445cSChris Wilson * userspace assumes the responsibility for ensuring the same. 1457fec0445cSChris Wilson */ 1458fec0445cSChris Wilson #define I915_EXEC_IS_PINNED (1<<10) 1459fec0445cSChris Wilson 1460fec0445cSChris Wilson /** Provide a hint to the kernel that the command stream and auxiliary 1461fec0445cSChris Wilson * state buffers already holds the correct presumed addresses and so the 1462fec0445cSChris Wilson * relocation process may be skipped if no buffers need to be moved in 1463fec0445cSChris Wilson * preparation for the execbuffer. 1464fec0445cSChris Wilson */ 1465fec0445cSChris Wilson #define I915_EXEC_NO_RELOC (1<<11) 1466fec0445cSChris Wilson 1467fec0445cSChris Wilson /** Use the reloc.handle as an index into the exec object array rather 1468fec0445cSChris Wilson * than as the per-file handle. 1469fec0445cSChris Wilson */ 1470fec0445cSChris Wilson #define I915_EXEC_HANDLE_LUT (1<<12) 1471fec0445cSChris Wilson 1472fec0445cSChris Wilson /** Used for switching BSD rings on the platforms with two BSD rings */ 1473fec0445cSChris Wilson #define I915_EXEC_BSD_SHIFT (13) 1474fec0445cSChris Wilson #define I915_EXEC_BSD_MASK (3 << I915_EXEC_BSD_SHIFT) 14751a71cf2fSChris Wilson /* default ping-pong mode */ 14761a71cf2fSChris Wilson #define I915_EXEC_BSD_DEFAULT (0 << I915_EXEC_BSD_SHIFT) 14771a71cf2fSChris Wilson #define I915_EXEC_BSD_RING1 (1 << I915_EXEC_BSD_SHIFT) 14781a71cf2fSChris Wilson #define I915_EXEC_BSD_RING2 (2 << I915_EXEC_BSD_SHIFT) 14791a71cf2fSChris Wilson 14801a71cf2fSChris Wilson /** Tell the kernel that the batchbuffer is processed by 14811a71cf2fSChris Wilson * the resource streamer. 14821a71cf2fSChris Wilson */ 14831a71cf2fSChris Wilson #define I915_EXEC_RESOURCE_STREAMER (1<<15) 14841a71cf2fSChris Wilson 1485cf6e7bacSJason Ekstrand /* Setting I915_EXEC_FENCE_IN implies that lower_32_bits(rsvd2) represent 1486cf6e7bacSJason Ekstrand * a sync_file fd to wait upon (in a nonblocking manner) prior to executing 1487cf6e7bacSJason Ekstrand * the batch. 1488cf6e7bacSJason Ekstrand * 1489cf6e7bacSJason Ekstrand * Returns -EINVAL if the sync_file fd cannot be found. 1490cf6e7bacSJason Ekstrand */ 1491cf6e7bacSJason Ekstrand #define I915_EXEC_FENCE_IN (1<<16) 1492a88b6e4cSChris Wilson 1493a88b6e4cSChris Wilson /* Setting I915_EXEC_FENCE_OUT causes the ioctl to return a sync_file fd 1494a88b6e4cSChris Wilson * in the upper_32_bits(rsvd2) upon success. Ownership of the fd is given 1495a88b6e4cSChris Wilson * to the caller, and it should be close() after use. (The fd is a regular 1496a88b6e4cSChris Wilson * file descriptor and will be cleaned up on process termination. It holds 1497a88b6e4cSChris Wilson * a reference to the request, but nothing else.) 1498a88b6e4cSChris Wilson * 1499a88b6e4cSChris Wilson * The sync_file fd can be combined with other sync_file and passed either 1500a88b6e4cSChris Wilson * to execbuf using I915_EXEC_FENCE_IN, to atomic KMS ioctls (so that a flip 1501cda9edd0SLionel Landwerlin * will only occur after this request completes), or to other devices. 1502cda9edd0SLionel Landwerlin * 1503cda9edd0SLionel Landwerlin * Using I915_EXEC_FENCE_OUT requires use of 1504cda9edd0SLionel Landwerlin * DRM_IOCTL_I915_GEM_EXECBUFFER2_WR ioctl so that the result is written 1505cda9edd0SLionel Landwerlin * back to userspace. Failure to do so will cause the out-fence to always 1506cda9edd0SLionel Landwerlin * be reported as zero, and the real fence fd to be leaked. 1507cda9edd0SLionel Landwerlin */ 1508cda9edd0SLionel Landwerlin #define I915_EXEC_FENCE_OUT (1<<17) 1509cda9edd0SLionel Landwerlin 1510ed5982e6SDaniel Vetter /* 1511a913bde8SNiranjana Vishwanathapura * Traditionally the execbuf ioctl has only considered the final element in 1512a913bde8SNiranjana Vishwanathapura * the execobject[] to be the executable batch. Often though, the client 1513a913bde8SNiranjana Vishwanathapura * will known the batch object prior to construction and being able to place 1514a913bde8SNiranjana Vishwanathapura * it into the execobject[] array first can simplify the relocation tracking. 1515a913bde8SNiranjana Vishwanathapura * Setting I915_EXEC_BATCH_FIRST tells execbuf to use element 0 of the 1516a913bde8SNiranjana Vishwanathapura * execobject[] as the * batch instead (the default is to use the last 1517a913bde8SNiranjana Vishwanathapura * element). 1518a913bde8SNiranjana Vishwanathapura */ 1519a913bde8SNiranjana Vishwanathapura #define I915_EXEC_BATCH_FIRST (1<<18) 1520a913bde8SNiranjana Vishwanathapura 1521a913bde8SNiranjana Vishwanathapura /* Setting I915_FENCE_ARRAY implies that num_cliprects and cliprects_ptr 1522a913bde8SNiranjana Vishwanathapura * define an array of i915_gem_exec_fence structures which specify a set of 1523a913bde8SNiranjana Vishwanathapura * dma fences to wait upon or signal. 1524a913bde8SNiranjana Vishwanathapura */ 1525a913bde8SNiranjana Vishwanathapura #define I915_EXEC_FENCE_ARRAY (1<<19) 1526718dceddSDavid Howells 1527718dceddSDavid Howells /* 1528718dceddSDavid Howells * Setting I915_EXEC_FENCE_SUBMIT implies that lower_32_bits(rsvd2) represent 1529718dceddSDavid Howells * a sync_file fd to wait upon (in a nonblocking manner) prior to executing 1530718dceddSDavid Howells * the batch. 1531718dceddSDavid Howells * 1532718dceddSDavid Howells * Returns -EINVAL if the sync_file fd cannot be found. 1533718dceddSDavid Howells */ 1534718dceddSDavid Howells #define I915_EXEC_FENCE_SUBMIT (1 << 20) 1535718dceddSDavid Howells 1536718dceddSDavid Howells /* 1537718dceddSDavid Howells * Setting I915_EXEC_USE_EXTENSIONS implies that 1538718dceddSDavid Howells * drm_i915_gem_execbuffer2.cliprects_ptr is treated as a pointer to an linked 1539718dceddSDavid Howells * list of i915_user_extension. Each i915_user_extension node is the base of a 1540718dceddSDavid Howells * larger structure. The list of supported structures are listed in the 1541718dceddSDavid Howells * drm_i915_gem_execbuffer_ext enum. 1542718dceddSDavid Howells */ 1543718dceddSDavid Howells #define I915_EXEC_USE_EXTENSIONS (1 << 21) 1544718dceddSDavid Howells #define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_USE_EXTENSIONS << 1)) 1545718dceddSDavid Howells 1546718dceddSDavid Howells /** @rsvd1: Context id */ 1547718dceddSDavid Howells __u64 rsvd1; 1548718dceddSDavid Howells 1549718dceddSDavid Howells /** 1550718dceddSDavid Howells * @rsvd2: in and out sync_file file descriptors. 1551718dceddSDavid Howells * 1552718dceddSDavid Howells * When I915_EXEC_FENCE_IN or I915_EXEC_FENCE_SUBMIT flag is set, the 1553718dceddSDavid Howells * lower 32 bits of this field will have the in sync_file fd (input). 1554426960beSChris Wilson * 1555426960beSChris Wilson * When I915_EXEC_FENCE_OUT flag is set, the upper 32 bits of this 1556426960beSChris Wilson * field will have the out sync_file fd (output). 1557426960beSChris Wilson */ 1558426960beSChris Wilson __u64 rsvd2; 1559426960beSChris Wilson }; 15601255501dSChris Wilson 15611255501dSChris Wilson #define I915_EXEC_CONTEXT_ID_MASK (0xffffffff) 15621255501dSChris Wilson #define i915_execbuffer2_set_context_id(eb2, context) \ 15631255501dSChris Wilson (eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK 15641255501dSChris Wilson #define i915_execbuffer2_get_context_id(eb2) \ 15651255501dSChris Wilson ((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK) 1566c8b50242SChris Wilson 1567c8b50242SChris Wilson struct drm_i915_gem_pin { 1568c8b50242SChris Wilson /** Handle of the buffer to be pinned. */ 1569c8b50242SChris Wilson __u32 handle; 1570c8b50242SChris Wilson __u32 pad; 1571c8b50242SChris Wilson 1572426960beSChris Wilson /** alignment required within the aperture */ 1573426960beSChris Wilson __u64 alignment; 1574c8b50242SChris Wilson 1575c8b50242SChris Wilson /** Returned GTT offset of the buffer. */ 1576426960beSChris Wilson __u64 offset; 1577426960beSChris Wilson }; 1578426960beSChris Wilson 1579426960beSChris Wilson struct drm_i915_gem_unpin { 1580c8b50242SChris Wilson /** Handle of the buffer to be unpinned. */ 1581c8b50242SChris Wilson __u32 handle; 1582426960beSChris Wilson __u32 pad; 1583c8b50242SChris Wilson }; 1584c8b50242SChris Wilson 1585426960beSChris Wilson struct drm_i915_gem_busy { 1586426960beSChris Wilson /** Handle of the buffer to check for busy */ 1587c8b50242SChris Wilson __u32 handle; 1588c649432eSTvrtko Ursulin 1589c8b50242SChris Wilson /** Return busy status 1590c649432eSTvrtko Ursulin * 1591c649432eSTvrtko Ursulin * A return of 0 implies that the object is idle (after 1592c649432eSTvrtko Ursulin * having flushed any pending activity), and a non-zero return that 15931255501dSChris Wilson * the object is still in-flight on the GPU. (The GPU has not yet 15941255501dSChris Wilson * signaled completion for all pending requests that reference the 15951255501dSChris Wilson * object.) An object is guaranteed to become idle eventually (so 15961255501dSChris Wilson * long as no new GPU commands are executed upon it). Due to the 15971255501dSChris Wilson * asynchronous nature of the hardware, an object reported 1598718dceddSDavid Howells * as busy may become idle before the ioctl is completed. 1599718dceddSDavid Howells * 1600718dceddSDavid Howells * Furthermore, if the object is busy, which engine is busy is only 1601718dceddSDavid Howells * provided as a guide and only indirectly by reporting its class 160235c7ab42SDaniel Vetter * (there may be more than one engine in each class). There are race 1603289f5a72SMatthew Auld * conditions which prevent the report of which engines are busy from 1604289f5a72SMatthew Auld * being always accurate. However, the converse is not true. If the 160535c7ab42SDaniel Vetter * object is idle, the result of the ioctl, that all engines are idle, 1606289f5a72SMatthew Auld * is accurate. 1607289f5a72SMatthew Auld * 1608289f5a72SMatthew Auld * The returned dword is split into two fields to indicate both 1609289f5a72SMatthew Auld * the engine classess on which the object is being read, and the 1610289f5a72SMatthew Auld * engine class on which it is currently being written (if any). 1611e7737b67SMatthew Auld * 1612e7737b67SMatthew Auld * The low word (bits 0:15) indicate if the object is being written 1613e7737b67SMatthew Auld * to by any engine (there can only be one, as the GEM implicit 1614e7737b67SMatthew Auld * synchronisation rules force writes to be serialised). Only the 1615e7737b67SMatthew Auld * engine class (offset by 1, I915_ENGINE_CLASS_RENDER is reported as 1616e7737b67SMatthew Auld * 1 not 0 etc) for the last write is reported. 1617e7737b67SMatthew Auld * 1618e7737b67SMatthew Auld * The high word (bits 16:31) are a bitmask of which engines classes 1619e7737b67SMatthew Auld * are currently reading from the object. Multiple engines may be 1620e7737b67SMatthew Auld * reading from the object simultaneously. 1621e7737b67SMatthew Auld * 1622e7737b67SMatthew Auld * The value of each engine class is the same as specified in the 1623e7737b67SMatthew Auld * I915_CONTEXT_PARAM_ENGINES context parameter and via perf, i.e. 1624e7737b67SMatthew Auld * I915_ENGINE_CLASS_RENDER, I915_ENGINE_CLASS_COPY, etc. 1625e7737b67SMatthew Auld * Some hardware may have parallel execution engines, e.g. multiple 1626e7737b67SMatthew Auld * media engines, which are mapped to the same class identifier and so 1627e7737b67SMatthew Auld * are not separately reported for busyness. 1628e7737b67SMatthew Auld * 1629e7737b67SMatthew Auld * Caveat emptor: 1630e7737b67SMatthew Auld * Only the boolean result of this query is reliable; that is whether 1631e7737b67SMatthew Auld * the object is idle or busy. The report of which engines are busy 1632e7737b67SMatthew Auld * should be only used as a heuristic. 1633e7737b67SMatthew Auld */ 1634e7737b67SMatthew Auld __u32 busy; 1635e7737b67SMatthew Auld }; 1636e7737b67SMatthew Auld 1637e7737b67SMatthew Auld /** 1638e7737b67SMatthew Auld * struct drm_i915_gem_caching - Set or get the caching for given object 1639e7737b67SMatthew Auld * handle. 164035c7ab42SDaniel Vetter * 1641718dceddSDavid Howells * Allow userspace to control the GTT caching bits for a given object when the 1642718dceddSDavid Howells * object is later mapped through the ppGTT(or GGTT on older platforms lacking 1643289f5a72SMatthew Auld * ppGTT support, or if the object is used for scanout). Note that this might 1644289f5a72SMatthew Auld * require unbinding the object from the GTT first, if its current caching value 1645718dceddSDavid Howells * doesn't match. 1646718dceddSDavid Howells * 1647718dceddSDavid Howells * Note that this all changes on discrete platforms, starting from DG1, the 1648289f5a72SMatthew Auld * set/get caching is no longer supported, and is now rejected. Instead the CPU 1649718dceddSDavid Howells * caching attributes(WB vs WC) will become an immutable creation time property 1650289f5a72SMatthew Auld * for the object, along with the GTT caching level. For now we don't expose any 1651289f5a72SMatthew Auld * new uAPI for this, instead on DG1 this is all implicit, although this largely 1652289f5a72SMatthew Auld * shouldn't matter since DG1 is coherent by default(without any way of 1653289f5a72SMatthew Auld * controlling it). 1654289f5a72SMatthew Auld * 1655289f5a72SMatthew Auld * Implicit caching rules, starting from DG1: 1656289f5a72SMatthew Auld * 1657289f5a72SMatthew Auld * - If any of the object placements (see &drm_i915_gem_create_ext_memory_regions) 1658289f5a72SMatthew Auld * contain I915_MEMORY_CLASS_DEVICE then the object will be allocated and 1659289f5a72SMatthew Auld * mapped as write-combined only. 1660289f5a72SMatthew Auld * 1661289f5a72SMatthew Auld * - Everything else is always allocated and mapped as write-back, with the 1662289f5a72SMatthew Auld * guarantee that everything is also coherent with the GPU. 1663289f5a72SMatthew Auld * 1664289f5a72SMatthew Auld * Note that this is likely to change in the future again, where we might need 1665289f5a72SMatthew Auld * more flexibility on future devices, so making this all explicit as part of a 1666289f5a72SMatthew Auld * new &drm_i915_gem_create_ext extension is probable. 1667289f5a72SMatthew Auld * 1668289f5a72SMatthew Auld * Side note: Part of the reason for this is that changing the at-allocation-time CPU 1669289f5a72SMatthew Auld * caching attributes for the pages might be required(and is expensive) if we 1670289f5a72SMatthew Auld * need to then CPU map the pages later with different caching attributes. This 1671289f5a72SMatthew Auld * inconsistent caching behaviour, while supported on x86, is not universally 1672289f5a72SMatthew Auld * supported on other architectures. So for simplicity we opt for setting 1673289f5a72SMatthew Auld * everything at creation time, whilst also making it immutable, on discrete 1674289f5a72SMatthew Auld * platforms. 1675289f5a72SMatthew Auld */ 1676718dceddSDavid Howells struct drm_i915_gem_caching { 1677718dceddSDavid Howells /** 1678718dceddSDavid Howells * @handle: Handle of the buffer to set/get the caching level. 1679718dceddSDavid Howells */ 1680718dceddSDavid Howells __u32 handle; 1681718dceddSDavid Howells 1682ea673f17SMatt Roper /** 1683ea673f17SMatt Roper * @caching: The GTT caching level to apply or possible return value. 1684ea673f17SMatt Roper * 1685ea673f17SMatt Roper * The supported @caching values: 1686ea673f17SMatt Roper * 1687ea673f17SMatt Roper * I915_CACHING_NONE: 1688deeb1519SChris Wilson * 1689718dceddSDavid Howells * GPU access is not coherent with CPU caches. Default for machines 1690718dceddSDavid Howells * without an LLC. This means manual flushing might be needed, if we 1691718dceddSDavid Howells * want GPU access to be coherent. 1692718dceddSDavid Howells * 1693718dceddSDavid Howells * I915_CACHING_CACHED: 1694718dceddSDavid Howells * 1695718dceddSDavid Howells * GPU access is coherent with CPU caches and furthermore the data is 1696718dceddSDavid Howells * cached in last-level caches shared between CPU cores and the GPU GT. 1697718dceddSDavid Howells * 1698718dceddSDavid Howells * I915_CACHING_DISPLAY: 1699718dceddSDavid Howells * 1700718dceddSDavid Howells * Special GPU caching mode which is coherent with the scanout engines. 1701718dceddSDavid Howells * Transparently falls back to I915_CACHING_NONE on platforms where no 1702718dceddSDavid Howells * special cache mode (like write-through or gfdt flushing) is 1703718dceddSDavid Howells * available. The kernel automatically sets this mode when using a 1704718dceddSDavid Howells * buffer as a scanout target. Userspace can manually set this mode to 1705718dceddSDavid Howells * avoid a costly stall and clflush in the hotpath of drawing the first 1706718dceddSDavid Howells * frame. 1707718dceddSDavid Howells */ 1708718dceddSDavid Howells #define I915_CACHING_NONE 0 1709718dceddSDavid Howells #define I915_CACHING_CACHED 1 1710718dceddSDavid Howells #define I915_CACHING_DISPLAY 2 1711718dceddSDavid Howells __u32 caching; 1712718dceddSDavid Howells }; 1713718dceddSDavid Howells 1714718dceddSDavid Howells #define I915_TILING_NONE 0 1715718dceddSDavid Howells #define I915_TILING_X 1 1716718dceddSDavid Howells #define I915_TILING_Y 2 1717718dceddSDavid Howells /* 1718718dceddSDavid Howells * Do not add new tiling types here. The I915_TILING_* values are for 1719718dceddSDavid Howells * de-tiling fence registers that no longer exist on modern platforms. Although 1720718dceddSDavid Howells * the hardware may support new types of tiling in general (e.g., Tile4), we 1721718dceddSDavid Howells * do not need to add them to the uapi that is specific to now-defunct ioctls. 1722718dceddSDavid Howells */ 1723718dceddSDavid Howells #define I915_TILING_LAST I915_TILING_Y 1724718dceddSDavid Howells 1725718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_NONE 0 1726718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_9 1 1727718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_9_10 2 1728718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_9_11 3 1729718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_9_10_11 4 1730718dceddSDavid Howells /* Not seen by userland */ 1731718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_UNKNOWN 5 1732718dceddSDavid Howells /* Seen by userland. */ 1733718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_9_17 6 1734718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_9_10_17 7 1735718dceddSDavid Howells 1736718dceddSDavid Howells struct drm_i915_gem_set_tiling { 1737718dceddSDavid Howells /** Handle of the buffer to have its tiling state updated */ 1738718dceddSDavid Howells __u32 handle; 1739718dceddSDavid Howells 1740718dceddSDavid Howells /** 1741718dceddSDavid Howells * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X, 1742718dceddSDavid Howells * I915_TILING_Y). 1743718dceddSDavid Howells * 1744718dceddSDavid Howells * This value is to be set on request, and will be updated by the 1745718dceddSDavid Howells * kernel on successful return with the actual chosen tiling layout. 1746718dceddSDavid Howells * 174770f2f5c7SChris Wilson * The tiling mode may be demoted to I915_TILING_NONE when the system 174870f2f5c7SChris Wilson * has bit 6 swizzling that can't be managed correctly by GEM. 174970f2f5c7SChris Wilson * 175070f2f5c7SChris Wilson * Buffer contents become undefined when changing tiling_mode. 175170f2f5c7SChris Wilson */ 175270f2f5c7SChris Wilson __u32 tiling_mode; 1753718dceddSDavid Howells 1754718dceddSDavid Howells /** 1755718dceddSDavid Howells * Stride in bytes for the object when in I915_TILING_X or 1756718dceddSDavid Howells * I915_TILING_Y. 1757718dceddSDavid Howells */ 1758718dceddSDavid Howells __u32 stride; 1759718dceddSDavid Howells 1760718dceddSDavid Howells /** 1761718dceddSDavid Howells * Returned address bit 6 swizzling required for CPU access through 1762718dceddSDavid Howells * mmap mapping. 1763718dceddSDavid Howells */ 1764718dceddSDavid Howells __u32 swizzle_mode; 1765718dceddSDavid Howells }; 1766718dceddSDavid Howells 1767718dceddSDavid Howells struct drm_i915_gem_get_tiling { 1768718dceddSDavid Howells /** Handle of the buffer to get tiling state for. */ 1769718dceddSDavid Howells __u32 handle; 1770718dceddSDavid Howells 1771718dceddSDavid Howells /** 1772718dceddSDavid Howells * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X, 1773718dceddSDavid Howells * I915_TILING_Y). 1774718dceddSDavid Howells */ 1775718dceddSDavid Howells __u32 tiling_mode; 1776718dceddSDavid Howells 1777718dceddSDavid Howells /** 1778718dceddSDavid Howells * Returned address bit 6 swizzling required for CPU access through 1779718dceddSDavid Howells * mmap mapping. 1780718dceddSDavid Howells */ 1781718dceddSDavid Howells __u32 swizzle_mode; 1782718dceddSDavid Howells 1783718dceddSDavid Howells /** 1784718dceddSDavid Howells * Returned address bit 6 swizzling required for CPU access through 1785718dceddSDavid Howells * mmap mapping whilst bound. 1786718dceddSDavid Howells */ 1787718dceddSDavid Howells __u32 phys_swizzle_mode; 1788718dceddSDavid Howells }; 1789718dceddSDavid Howells 1790718dceddSDavid Howells struct drm_i915_gem_get_aperture { 1791718dceddSDavid Howells /** Total size of the aperture used by i915_gem_execbuffer, in bytes */ 1792718dceddSDavid Howells __u64 aper_size; 1793718dceddSDavid Howells 1794718dceddSDavid Howells /** 1795718dceddSDavid Howells * Available space in the aperture used by i915_gem_execbuffer, in 1796718dceddSDavid Howells * bytes 1797718dceddSDavid Howells */ 1798718dceddSDavid Howells __u64 aper_available_size; 1799718dceddSDavid Howells }; 1800718dceddSDavid Howells 1801718dceddSDavid Howells struct drm_i915_get_pipe_from_crtc_id { 1802718dceddSDavid Howells /** ID of CRTC being requested **/ 1803718dceddSDavid Howells __u32 crtc_id; 1804718dceddSDavid Howells 1805718dceddSDavid Howells /** pipe of requested CRTC **/ 1806718dceddSDavid Howells __u32 pipe; 1807718dceddSDavid Howells }; 1808718dceddSDavid Howells 1809718dceddSDavid Howells #define I915_MADV_WILLNEED 0 1810718dceddSDavid Howells #define I915_MADV_DONTNEED 1 1811718dceddSDavid Howells #define __I915_MADV_PURGED 2 /* internal state */ 1812718dceddSDavid Howells 1813718dceddSDavid Howells struct drm_i915_gem_madvise { 1814718dceddSDavid Howells /** Handle of the buffer to change the backing store advice */ 1815718dceddSDavid Howells __u32 handle; 1816718dceddSDavid Howells 1817718dceddSDavid Howells /* Advice: either the buffer will be needed again in the near future, 1818718dceddSDavid Howells * or wont be and could be discarded under memory pressure. 1819718dceddSDavid Howells */ 1820718dceddSDavid Howells __u32 madv; 1821718dceddSDavid Howells 1822718dceddSDavid Howells /** Whether the backing store still exists. */ 1823718dceddSDavid Howells __u32 retained; 1824718dceddSDavid Howells }; 1825718dceddSDavid Howells 1826718dceddSDavid Howells /* flags */ 1827718dceddSDavid Howells #define I915_OVERLAY_TYPE_MASK 0xff 1828718dceddSDavid Howells #define I915_OVERLAY_YUV_PLANAR 0x01 1829718dceddSDavid Howells #define I915_OVERLAY_YUV_PACKED 0x02 1830718dceddSDavid Howells #define I915_OVERLAY_RGB 0x03 1831718dceddSDavid Howells 1832718dceddSDavid Howells #define I915_OVERLAY_DEPTH_MASK 0xff00 1833718dceddSDavid Howells #define I915_OVERLAY_RGB24 0x1000 1834718dceddSDavid Howells #define I915_OVERLAY_RGB16 0x2000 1835718dceddSDavid Howells #define I915_OVERLAY_RGB15 0x3000 1836718dceddSDavid Howells #define I915_OVERLAY_YUV422 0x0100 1837718dceddSDavid Howells #define I915_OVERLAY_YUV411 0x0200 1838718dceddSDavid Howells #define I915_OVERLAY_YUV420 0x0300 1839718dceddSDavid Howells #define I915_OVERLAY_YUV410 0x0400 1840718dceddSDavid Howells 1841718dceddSDavid Howells #define I915_OVERLAY_SWAP_MASK 0xff0000 1842718dceddSDavid Howells #define I915_OVERLAY_NO_SWAP 0x000000 1843ea9da4e4SChris Wilson #define I915_OVERLAY_UV_SWAP 0x010000 1844718dceddSDavid Howells #define I915_OVERLAY_Y_SWAP 0x020000 1845718dceddSDavid Howells #define I915_OVERLAY_Y_AND_UV_SWAP 0x030000 1846718dceddSDavid Howells 1847718dceddSDavid Howells #define I915_OVERLAY_FLAGS_MASK 0xff000000 1848718dceddSDavid Howells #define I915_OVERLAY_ENABLE 0x01000000 1849718dceddSDavid Howells 1850718dceddSDavid Howells struct drm_intel_overlay_put_image { 1851718dceddSDavid Howells /* various flags and src format description */ 1852718dceddSDavid Howells __u32 flags; 1853718dceddSDavid Howells /* source picture description */ 1854718dceddSDavid Howells __u32 bo_handle; 1855718dceddSDavid Howells /* stride values and offsets are in bytes, buffer relative */ 1856718dceddSDavid Howells __u16 stride_Y; /* stride for packed formats */ 1857718dceddSDavid Howells __u16 stride_UV; 1858718dceddSDavid Howells __u32 offset_Y; /* offset for packet formats */ 1859718dceddSDavid Howells __u32 offset_U; 1860718dceddSDavid Howells __u32 offset_V; 1861718dceddSDavid Howells /* in pixels */ 1862718dceddSDavid Howells __u16 src_width; 1863718dceddSDavid Howells __u16 src_height; 1864718dceddSDavid Howells /* to compensate the scaling factors for partially covered surfaces */ 1865718dceddSDavid Howells __u16 src_scan_width; 1866718dceddSDavid Howells __u16 src_scan_height; 1867718dceddSDavid Howells /* output crtc description */ 1868718dceddSDavid Howells __u32 crtc_id; 1869718dceddSDavid Howells __u16 dst_x; 1870718dceddSDavid Howells __u16 dst_y; 1871718dceddSDavid Howells __u16 dst_width; 1872718dceddSDavid Howells __u16 dst_height; 1873718dceddSDavid Howells }; 1874718dceddSDavid Howells 1875718dceddSDavid Howells /* flags */ 1876718dceddSDavid Howells #define I915_OVERLAY_UPDATE_ATTRS (1<<0) 1877718dceddSDavid Howells #define I915_OVERLAY_UPDATE_GAMMA (1<<1) 1878718dceddSDavid Howells #define I915_OVERLAY_DISABLE_DEST_COLORKEY (1<<2) 18796ec5bd34SVille Syrjälä struct drm_intel_overlay_attrs { 18806ec5bd34SVille Syrjälä __u32 flags; 18816ec5bd34SVille Syrjälä __u32 color_key; 1882718dceddSDavid Howells __s32 brightness; 1883718dceddSDavid Howells __u32 contrast; 1884718dceddSDavid Howells __u32 saturation; 1885718dceddSDavid Howells __u32 gamma0; 1886718dceddSDavid Howells __u32 gamma1; 1887718dceddSDavid Howells __u32 gamma2; 1888718dceddSDavid Howells __u32 gamma3; 1889718dceddSDavid Howells __u32 gamma4; 1890718dceddSDavid Howells __u32 gamma5; 1891718dceddSDavid Howells }; 1892718dceddSDavid Howells 1893718dceddSDavid Howells /* 1894718dceddSDavid Howells * Intel sprite handling 1895718dceddSDavid Howells * 1896718dceddSDavid Howells * Color keying works with a min/mask/max tuple. Both source and destination 1897718dceddSDavid Howells * color keying is allowed. 1898718dceddSDavid Howells * 1899718dceddSDavid Howells * Source keying: 1900718dceddSDavid Howells * Sprite pixels within the min & max values, masked against the color channels 1901b9171541SChris Wilson * specified in the mask field, will be transparent. All other pixels will 1902718dceddSDavid Howells * be displayed on top of the primary plane. For RGB surfaces, only the min 1903718dceddSDavid Howells * and mask fields will be used; ranged compares are not allowed. 1904718dceddSDavid Howells * 1905a913bde8SNiranjana Vishwanathapura * Destination keying: 1906a913bde8SNiranjana Vishwanathapura * Primary plane pixels that match the min value, masked against the color 1907a913bde8SNiranjana Vishwanathapura * channels specified in the mask field, will be replaced by corresponding 1908b9171541SChris Wilson * pixels from the sprite plane. 1909a913bde8SNiranjana Vishwanathapura * 1910a913bde8SNiranjana Vishwanathapura * Note that source & destination keying are exclusive; only one can be 1911a913bde8SNiranjana Vishwanathapura * active on a given plane. 1912a913bde8SNiranjana Vishwanathapura */ 1913a913bde8SNiranjana Vishwanathapura 1914a913bde8SNiranjana Vishwanathapura #define I915_SET_COLORKEY_NONE (1<<0) /* Deprecated. Instead set 1915a913bde8SNiranjana Vishwanathapura * flags==0 to disable colorkeying. 1916a913bde8SNiranjana Vishwanathapura */ 1917a913bde8SNiranjana Vishwanathapura #define I915_SET_COLORKEY_DESTINATION (1<<1) 1918a913bde8SNiranjana Vishwanathapura #define I915_SET_COLORKEY_SOURCE (1<<2) 1919a913bde8SNiranjana Vishwanathapura struct drm_intel_sprite_colorkey { 1920a913bde8SNiranjana Vishwanathapura __u32 plane_id; 1921a913bde8SNiranjana Vishwanathapura __u32 min_value; 1922a913bde8SNiranjana Vishwanathapura __u32 channel_mask; 1923a913bde8SNiranjana Vishwanathapura __u32 max_value; 1924b9171541SChris Wilson __u32 flags; 1925b9171541SChris Wilson }; 19268319f44cSChris Wilson 1927b9171541SChris Wilson struct drm_i915_gem_wait { 19288319f44cSChris Wilson /** Handle of BO we shall wait on */ 1929a913bde8SNiranjana Vishwanathapura __u32 bo_handle; 1930a913bde8SNiranjana Vishwanathapura __u32 flags; 1931a913bde8SNiranjana Vishwanathapura /** Number of nanoseconds to wait, Returns time remaining. */ 1932a913bde8SNiranjana Vishwanathapura __s64 timeout_ns; 1933a913bde8SNiranjana Vishwanathapura }; 1934a913bde8SNiranjana Vishwanathapura 1935a913bde8SNiranjana Vishwanathapura struct drm_i915_gem_context_create { 1936a913bde8SNiranjana Vishwanathapura __u32 ctx_id; /* output: id of new context*/ 1937a913bde8SNiranjana Vishwanathapura __u32 pad; 1938a913bde8SNiranjana Vishwanathapura }; 1939a913bde8SNiranjana Vishwanathapura 1940a913bde8SNiranjana Vishwanathapura /** 1941e0695db7SChris Wilson * struct drm_i915_gem_context_create_ext - Structure for creating contexts. 1942a913bde8SNiranjana Vishwanathapura */ 1943a913bde8SNiranjana Vishwanathapura struct drm_i915_gem_context_create_ext { 19445cc9ed4bSChris Wilson /** @ctx_id: Id of the created context (output) */ 19455cc9ed4bSChris Wilson __u32 ctx_id; 1946a913bde8SNiranjana Vishwanathapura 1947a913bde8SNiranjana Vishwanathapura /** 1948a913bde8SNiranjana Vishwanathapura * @flags: Supported flags are: 1949c9dc0f35SChris Wilson * 1950a913bde8SNiranjana Vishwanathapura * I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS: 1951c9dc0f35SChris Wilson * 1952a913bde8SNiranjana Vishwanathapura * Extensions may be appended to this structure and driver must check 1953a913bde8SNiranjana Vishwanathapura * for those. See @extensions. 1954c9dc0f35SChris Wilson * 1955a913bde8SNiranjana Vishwanathapura * I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE 1956a913bde8SNiranjana Vishwanathapura * 1957c9dc0f35SChris Wilson * Created context will have single timeline. 1958c9dc0f35SChris Wilson */ 19596ff6d61dSJason Ekstrand __u32 flags; 19606ff6d61dSJason Ekstrand #define I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS (1u << 0) 19616ff6d61dSJason Ekstrand #define I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE (1u << 1) 19626ff6d61dSJason Ekstrand #define I915_CONTEXT_CREATE_FLAGS_UNKNOWN \ 1963b1b38278SDavid Weinehall (-(I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE << 1)) 1964fa8848f2SChris Wilson 1965bc3d6744SChris Wilson /** 196684102171SMika Kuoppala * @extensions: Zero-terminated chain of extensions. 1967ac14fbd4SChris Wilson * 1968ac14fbd4SChris Wilson * I915_CONTEXT_CREATE_EXT_SETPARAM: 1969ac14fbd4SChris Wilson * Context parameter to set or query during context creation. 1970ac14fbd4SChris Wilson * See struct drm_i915_gem_context_create_ext_setparam. 1971e46c2e99STvrtko Ursulin * 1972e46c2e99STvrtko Ursulin * I915_CONTEXT_CREATE_EXT_CLONE: 1973e46c2e99STvrtko Ursulin * This extension has been removed. On the off chance someone somewhere 1974e46c2e99STvrtko Ursulin * has attempted to use it, never re-use this extension number. 1975e46c2e99STvrtko Ursulin */ 1976ba4fda62SChris Wilson __u64 extensions; 1977ba4fda62SChris Wilson #define I915_CONTEXT_CREATE_EXT_SETPARAM 0 1978ba4fda62SChris Wilson #define I915_CONTEXT_CREATE_EXT_CLONE 1 1979ba4fda62SChris Wilson }; 1980ba4fda62SChris Wilson 1981ba4fda62SChris Wilson /** 1982ba4fda62SChris Wilson * struct drm_i915_gem_context_param - Context parameter to set or query. 1983ba4fda62SChris Wilson */ 1984ba4fda62SChris Wilson struct drm_i915_gem_context_param { 1985ba4fda62SChris Wilson /** @ctx_id: Context id */ 1986ba4fda62SChris Wilson __u32 ctx_id; 1987ba4fda62SChris Wilson 1988ba4fda62SChris Wilson /** @size: Size of the parameter @value */ 1989ba4fda62SChris Wilson __u32 size; 1990ba4fda62SChris Wilson 1991ba4fda62SChris Wilson /** @param: Parameter to set or query */ 1992ba4fda62SChris Wilson __u64 param; 1993ba4fda62SChris Wilson #define I915_CONTEXT_PARAM_BAN_PERIOD 0x1 1994ba4fda62SChris Wilson /* I915_CONTEXT_PARAM_NO_ZEROMAP has been removed. On the off chance 1995ba4fda62SChris Wilson * someone somewhere has attempted to use it, never re-use this context 19967f3f317aSChris Wilson * param number. 19977f3f317aSChris Wilson */ 19987f3f317aSChris Wilson #define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2 19997f3f317aSChris Wilson #define I915_CONTEXT_PARAM_GTT_SIZE 0x3 20007f3f317aSChris Wilson #define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE 0x4 20017f3f317aSChris Wilson #define I915_CONTEXT_PARAM_BANNABLE 0x5 20027f3f317aSChris Wilson #define I915_CONTEXT_PARAM_PRIORITY 0x6 20037f3f317aSChris Wilson #define I915_CONTEXT_MAX_USER_PRIORITY 1023 /* inclusive */ 20047f3f317aSChris Wilson #define I915_CONTEXT_DEFAULT_PRIORITY 0 20057f3f317aSChris Wilson #define I915_CONTEXT_MIN_USER_PRIORITY -1023 /* inclusive */ 20067f3f317aSChris Wilson /* 2007976b55f0SChris Wilson * When using the following param, value should be a pointer to 2008976b55f0SChris Wilson * drm_i915_gem_context_param_sseu. 2009976b55f0SChris Wilson */ 2010976b55f0SChris Wilson #define I915_CONTEXT_PARAM_SSEU 0x7 2011976b55f0SChris Wilson 2012976b55f0SChris Wilson /* 2013976b55f0SChris Wilson * Not all clients may want to attempt automatic recover of a context after 2014976b55f0SChris Wilson * a hang (for example, some clients may only submit very small incremental 2015976b55f0SChris Wilson * batches relying on known logical state of previous batches which will never 2016976b55f0SChris Wilson * recover correctly and each attempt will hang), and so would prefer that 2017976b55f0SChris Wilson * the context is forever banned instead. 2018976b55f0SChris Wilson * 2019976b55f0SChris Wilson * If set to false (0), after a reset, subsequent (and in flight) rendering 2020976b55f0SChris Wilson * from this context is discarded, and the client will need to create a new 2021976b55f0SChris Wilson * context to use instead. 2022976b55f0SChris Wilson * 2023976b55f0SChris Wilson * If set to true (1), the kernel will automatically attempt to recover the 2024976b55f0SChris Wilson * context by skipping the hanging batch and executing the next batch starting 2025ee113690SChris Wilson * from the default context state (discarding the incomplete logical context 2026ee113690SChris Wilson * state lost due to the reset). 2027ee113690SChris Wilson * 2028ee113690SChris Wilson * On creation, all new contexts are marked as recoverable. 2029e5e32171SMatthew Brost */ 2030976b55f0SChris Wilson #define I915_CONTEXT_PARAM_RECOVERABLE 0x8 2031976b55f0SChris Wilson 2032a0e04715SChris Wilson /* 2033a0e04715SChris Wilson * The id of the associated virtual memory address space (ppGTT) of 2034a0e04715SChris Wilson * this context. Can be retrieved and passed to another context 2035a0e04715SChris Wilson * (on the same fd) for both to use the same ppGTT and so share 2036a0e04715SChris Wilson * address layouts, and avoid reloading the page tables on context 2037a0e04715SChris Wilson * switches between themselves. 2038a0e04715SChris Wilson * 2039a0e04715SChris Wilson * See DRM_I915_GEM_VM_CREATE and DRM_I915_GEM_VM_DESTROY. 2040a0e04715SChris Wilson */ 2041a0e04715SChris Wilson #define I915_CONTEXT_PARAM_VM 0x9 2042a0e04715SChris Wilson 2043a0e04715SChris Wilson /* 2044a0e04715SChris Wilson * I915_CONTEXT_PARAM_ENGINES: 2045a0e04715SChris Wilson * 2046a0e04715SChris Wilson * Bind this context to operate on this subset of available engines. Henceforth, 204788be76cdSChris Wilson * the I915_EXEC_RING selector for DRM_IOCTL_I915_GEM_EXECBUFFER2 operates as 2048fe4751c3SJason Ekstrand * an index into this array of engines; I915_EXEC_DEFAULT selecting engine[0] 2049fe4751c3SJason Ekstrand * and upwards. Slots 0...N are filled in using the specified (class, instance). 205088be76cdSChris Wilson * Use 205188be76cdSChris Wilson * engine_class: I915_ENGINE_CLASS_INVALID, 2052d3ac8d42SDaniele Ceraolo Spurio * engine_instance: I915_ENGINE_CLASS_INVALID_NONE 2053d3ac8d42SDaniele Ceraolo Spurio * to specify a gap in the array that can be filled in later, e.g. by a 2054d3ac8d42SDaniele Ceraolo Spurio * virtual engine used for load balancing. 2055d3ac8d42SDaniele Ceraolo Spurio * 2056d3ac8d42SDaniele Ceraolo Spurio * Setting the number of engines bound to the context to 0, by passing a zero 2057d3ac8d42SDaniele Ceraolo Spurio * sized argument, will revert back to default settings. 2058d3ac8d42SDaniele Ceraolo Spurio * 2059d3ac8d42SDaniele Ceraolo Spurio * See struct i915_context_param_engines. 2060d3ac8d42SDaniele Ceraolo Spurio * 2061d3ac8d42SDaniele Ceraolo Spurio * Extensions: 2062d3ac8d42SDaniele Ceraolo Spurio * i915_context_engines_load_balance (I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE) 2063d3ac8d42SDaniele Ceraolo Spurio * i915_context_engines_bond (I915_CONTEXT_ENGINES_EXT_BOND) 2064d3ac8d42SDaniele Ceraolo Spurio * i915_context_engines_parallel_submit (I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT) 2065d3ac8d42SDaniele Ceraolo Spurio */ 2066d3ac8d42SDaniele Ceraolo Spurio #define I915_CONTEXT_PARAM_ENGINES 0xa 2067d3ac8d42SDaniele Ceraolo Spurio 2068d3ac8d42SDaniele Ceraolo Spurio /* 2069d3ac8d42SDaniele Ceraolo Spurio * I915_CONTEXT_PARAM_PERSISTENCE: 2070d3ac8d42SDaniele Ceraolo Spurio * 2071d3ac8d42SDaniele Ceraolo Spurio * Allow the context and active rendering to survive the process until 2072d3ac8d42SDaniele Ceraolo Spurio * completion. Persistence allows fire-and-forget clients to queue up a 2073d3ac8d42SDaniele Ceraolo Spurio * bunch of work, hand the output over to a display server and then quit. 2074d3ac8d42SDaniele Ceraolo Spurio * If the context is marked as not persistent, upon closing (either via 2075d3ac8d42SDaniele Ceraolo Spurio * an explicit DRM_I915_GEM_CONTEXT_DESTROY or implicitly from file closure 2076d3ac8d42SDaniele Ceraolo Spurio * or process termination), the context and any outstanding requests will be 2077d3ac8d42SDaniele Ceraolo Spurio * cancelled (and exported fences for cancelled requests marked as -EIO). 2078d3ac8d42SDaniele Ceraolo Spurio * 2079d3ac8d42SDaniele Ceraolo Spurio * By default, new contexts allow persistence. 2080d3ac8d42SDaniele Ceraolo Spurio */ 2081d3ac8d42SDaniele Ceraolo Spurio #define I915_CONTEXT_PARAM_PERSISTENCE 0xb 2082d3ac8d42SDaniele Ceraolo Spurio 2083d3ac8d42SDaniele Ceraolo Spurio /* This API has been removed. On the off chance someone somewhere has 2084d3ac8d42SDaniele Ceraolo Spurio * attempted to use it, never re-use this context param number. 2085d3ac8d42SDaniele Ceraolo Spurio */ 2086d3ac8d42SDaniele Ceraolo Spurio #define I915_CONTEXT_PARAM_RINGSIZE 0xc 2087d3ac8d42SDaniele Ceraolo Spurio 2088d3ac8d42SDaniele Ceraolo Spurio /* 2089d3ac8d42SDaniele Ceraolo Spurio * I915_CONTEXT_PARAM_PROTECTED_CONTENT: 2090d3ac8d42SDaniele Ceraolo Spurio * 2091d3ac8d42SDaniele Ceraolo Spurio * Mark that the context makes use of protected content, which will result 2092d3ac8d42SDaniele Ceraolo Spurio * in the context being invalidated when the protected content session is. 2093d3ac8d42SDaniele Ceraolo Spurio * Given that the protected content session is killed on suspend, the device 2094d3ac8d42SDaniele Ceraolo Spurio * is kept awake for the lifetime of a protected context, so the user should 2095d3ac8d42SDaniele Ceraolo Spurio * make sure to dispose of them once done. 2096d3ac8d42SDaniele Ceraolo Spurio * This flag can only be set at context creation time and, when set to true, 2097d3ac8d42SDaniele Ceraolo Spurio * must be preceded by an explicit setting of I915_CONTEXT_PARAM_RECOVERABLE 2098d3ac8d42SDaniele Ceraolo Spurio * to false. This flag can't be set to true in conjunction with setting the 2099d3ac8d42SDaniele Ceraolo Spurio * I915_CONTEXT_PARAM_BANNABLE flag to false. Creation example: 2100d3ac8d42SDaniele Ceraolo Spurio * 2101be03564bSChris Wilson * .. code-block:: C 2102e0695db7SChris Wilson * 2103a913bde8SNiranjana Vishwanathapura * struct drm_i915_gem_context_create_ext_setparam p_protected = { 2104c9dc0f35SChris Wilson * .base = { 2105c9dc0f35SChris Wilson * .name = I915_CONTEXT_CREATE_EXT_SETPARAM, 2106c9dc0f35SChris Wilson * }, 21072ef6a01fSMatthew Auld * .param = { 2108e46c2e99STvrtko Ursulin * .param = I915_CONTEXT_PARAM_PROTECTED_CONTENT, 2109e46c2e99STvrtko Ursulin * .value = 1, 2110e46c2e99STvrtko Ursulin * } 2111e46c2e99STvrtko Ursulin * }; 2112e46c2e99STvrtko Ursulin * struct drm_i915_gem_context_create_ext_setparam p_norecover = { 2113e46c2e99STvrtko Ursulin * .base = { 2114e46c2e99STvrtko Ursulin * .name = I915_CONTEXT_CREATE_EXT_SETPARAM, 2115e46c2e99STvrtko Ursulin * .next_extension = to_user_pointer(&p_protected), 2116e46c2e99STvrtko Ursulin * }, 2117e46c2e99STvrtko Ursulin * .param = { 2118e46c2e99STvrtko Ursulin * .param = I915_CONTEXT_PARAM_RECOVERABLE, 2119e46c2e99STvrtko Ursulin * .value = 0, 2120e46c2e99STvrtko Ursulin * } 2121e46c2e99STvrtko Ursulin * }; 2122e46c2e99STvrtko Ursulin * struct drm_i915_gem_context_create_ext create = { 2123e46c2e99STvrtko Ursulin * .flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS, 2124e46c2e99STvrtko Ursulin * .extensions = to_user_pointer(&p_norecover); 2125e46c2e99STvrtko Ursulin * }; 2126e46c2e99STvrtko Ursulin * 2127e46c2e99STvrtko Ursulin * ctx_id = gem_context_create_ext(drm_fd, &create); 2128e46c2e99STvrtko Ursulin * 2129e46c2e99STvrtko Ursulin * In addition to the normal failure cases, setting this flag during context 2130e46c2e99STvrtko Ursulin * creation can result in the following errors: 2131e46c2e99STvrtko Ursulin * 2132d1172ab3SChris Wilson * -ENODEV: feature not available 2133e46c2e99STvrtko Ursulin * -EPERM: trying to mark a recoverable or not bannable context as protected 2134e46c2e99STvrtko Ursulin * -ENXIO: A dependency such as a component driver or firmware is not yet 2135e620f7b3SChris Wilson * loaded so user space may need to attempt again. Depending on the 2136e46c2e99STvrtko Ursulin * device, this error may be reported if protected context creation is 2137e46c2e99STvrtko Ursulin * attempted very early after kernel start because the internal timeout 2138e620f7b3SChris Wilson * waiting for such dependencies is not guaranteed to be larger than 2139e46c2e99STvrtko Ursulin * required (numbers differ depending on system and kernel config): 2140e46c2e99STvrtko Ursulin * - ADL/RPL: dependencies may take up to 3 seconds from kernel start 2141e46c2e99STvrtko Ursulin * while context creation internal timeout is 250 milisecs 2142e46c2e99STvrtko Ursulin * - MTL: dependencies may take up to 8 seconds from kernel start 2143e46c2e99STvrtko Ursulin * while context creation internal timeout is 250 milisecs 2144e46c2e99STvrtko Ursulin * NOTE: such dependencies happen once, so a subsequent call to create a 2145e46c2e99STvrtko Ursulin * protected context after a prior successful call will not experience 2146e46c2e99STvrtko Ursulin * such timeouts and will not return -ENXIO (unless the driver is reloaded, 2147e46c2e99STvrtko Ursulin * or, depending on the device, resumes from a suspended state). 2148e46c2e99STvrtko Ursulin * -EIO: The firmware did not succeed in creating the protected context. 2149e46c2e99STvrtko Ursulin */ 2150e46c2e99STvrtko Ursulin #define I915_CONTEXT_PARAM_PROTECTED_CONTENT 0xd 2151e46c2e99STvrtko Ursulin /* Must be kept compact -- no holes and well documented */ 2152e46c2e99STvrtko Ursulin 2153e46c2e99STvrtko Ursulin /** @value: Context parameter value to be set or queried */ 2154e46c2e99STvrtko Ursulin __u64 value; 2155e46c2e99STvrtko Ursulin }; 2156e46c2e99STvrtko Ursulin 2157e46c2e99STvrtko Ursulin /* 2158e46c2e99STvrtko Ursulin * Context SSEU programming 2159e46c2e99STvrtko Ursulin * 2160e46c2e99STvrtko Ursulin * It may be necessary for either functional or performance reason to configure 2161e46c2e99STvrtko Ursulin * a context to run with a reduced number of SSEU (where SSEU stands for Slice/ 2162e46c2e99STvrtko Ursulin * Sub-slice/EU). 2163e46c2e99STvrtko Ursulin * 2164e46c2e99STvrtko Ursulin * This is done by configuring SSEU configuration using the below 2165e46c2e99STvrtko Ursulin * @struct drm_i915_gem_context_param_sseu for every supported engine which 216657772953STvrtko Ursulin * userspace intends to use. 216757772953STvrtko Ursulin * 216857772953STvrtko Ursulin * Not all GPUs or engines support this functionality in which case an error 216957772953STvrtko Ursulin * code -ENODEV will be returned. 217057772953STvrtko Ursulin * 217157772953STvrtko Ursulin * Also, flexibility of possible SSEU configuration permutations varies between 217257772953STvrtko Ursulin * GPU generations and software imposed limitations. Requesting such a 217357772953STvrtko Ursulin * combination will return an error code of -EINVAL. 217457772953STvrtko Ursulin * 217557772953STvrtko Ursulin * NOTE: When perf/OA is active the context's SSEU configuration is ignored in 217657772953STvrtko Ursulin * favour of a single global setting. 217757772953STvrtko Ursulin */ 217857772953STvrtko Ursulin struct drm_i915_gem_context_param_sseu { 217957772953STvrtko Ursulin /* 218057772953STvrtko Ursulin * Engine class & instance to be configured or queried. 218157772953STvrtko Ursulin */ 218257772953STvrtko Ursulin struct i915_engine_class_instance engine; 218357772953STvrtko Ursulin 218457772953STvrtko Ursulin /* 218557772953STvrtko Ursulin * Unknown flags must be cleared to zero. 218657772953STvrtko Ursulin */ 218757772953STvrtko Ursulin __u32 flags; 218857772953STvrtko Ursulin #define I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX (1u << 0) 218957772953STvrtko Ursulin 219057772953STvrtko Ursulin /* 219157772953STvrtko Ursulin * Mask of slices to enable for the context. Valid values are a subset 219257772953STvrtko Ursulin * of the bitmask value returned for I915_PARAM_SLICE_MASK. 219357772953STvrtko Ursulin */ 219457772953STvrtko Ursulin __u64 slice_mask; 219557772953STvrtko Ursulin 219657772953STvrtko Ursulin /* 219757772953STvrtko Ursulin * Mask of subslices to enable for the context. Valid values are a 219857772953STvrtko Ursulin * subset of the bitmask value return by I915_PARAM_SUBSLICE_MASK. 219957772953STvrtko Ursulin */ 220057772953STvrtko Ursulin __u64 subslice_mask; 220157772953STvrtko Ursulin 220257772953STvrtko Ursulin /* 220357772953STvrtko Ursulin * Minimum/Maximum number of EUs to enable per subslice for the 220457772953STvrtko Ursulin * context. min_eus_per_subslice must be inferior or equal to 220557772953STvrtko Ursulin * max_eus_per_subslice. 220657772953STvrtko Ursulin */ 220757772953STvrtko Ursulin __u16 min_eus_per_subslice; 220857772953STvrtko Ursulin __u16 max_eus_per_subslice; 220957772953STvrtko Ursulin 221057772953STvrtko Ursulin /* 221157772953STvrtko Ursulin * Unused for now. Must be cleared to zero. 221257772953STvrtko Ursulin */ 221357772953STvrtko Ursulin __u32 rsvd; 221457772953STvrtko Ursulin }; 221557772953STvrtko Ursulin 221657772953STvrtko Ursulin /** 221757772953STvrtko Ursulin * DOC: Virtual Engine uAPI 221857772953STvrtko Ursulin * 221957772953STvrtko Ursulin * Virtual engine is a concept where userspace is able to configure a set of 222057772953STvrtko Ursulin * physical engines, submit a batch buffer, and let the driver execute it on any 222157772953STvrtko Ursulin * engine from the set as it sees fit. 222257772953STvrtko Ursulin * 222357772953STvrtko Ursulin * This is primarily useful on parts which have multiple instances of a same 222457772953STvrtko Ursulin * class engine, like for example GT3+ Skylake parts with their two VCS engines. 222557772953STvrtko Ursulin * 222657772953STvrtko Ursulin * For instance userspace can enumerate all engines of a certain class using the 222757772953STvrtko Ursulin * previously described `Engine Discovery uAPI`_. After that userspace can 222857772953STvrtko Ursulin * create a GEM context with a placeholder slot for the virtual engine (using 22296d06779eSChris Wilson * `I915_ENGINE_CLASS_INVALID` and `I915_ENGINE_CLASS_INVALID_NONE` for class 22306d06779eSChris Wilson * and instance respectively) and finally using the 22316d06779eSChris Wilson * `I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE` extension place a virtual engine in 22326d06779eSChris Wilson * the same reserved slot. 22336d06779eSChris Wilson * 22346d06779eSChris Wilson * Example of creating a virtual engine and submitting a batch buffer to it: 22356d06779eSChris Wilson * 22366d06779eSChris Wilson * .. code-block:: C 22376d06779eSChris Wilson * 22386d06779eSChris Wilson * I915_DEFINE_CONTEXT_ENGINES_LOAD_BALANCE(virtual, 2) = { 22396d06779eSChris Wilson * .base.name = I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE, 22406d06779eSChris Wilson * .engine_index = 0, // Place this virtual engine into engine map slot 0 22416d06779eSChris Wilson * .num_siblings = 2, 22426d06779eSChris Wilson * .engines = { { I915_ENGINE_CLASS_VIDEO, 0 }, 22436d06779eSChris Wilson * { I915_ENGINE_CLASS_VIDEO, 1 }, }, 22446d06779eSChris Wilson * }; 22456d06779eSChris Wilson * I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, 1) = { 22466d06779eSChris Wilson * .engines = { { I915_ENGINE_CLASS_INVALID, 22476d06779eSChris Wilson * I915_ENGINE_CLASS_INVALID_NONE } }, 22486d06779eSChris Wilson * .extensions = to_user_pointer(&virtual), // Chains after load_balance extension 22496d06779eSChris Wilson * }; 22506d06779eSChris Wilson * struct drm_i915_gem_context_create_ext_setparam p_engines = { 22516d06779eSChris Wilson * .base = { 22526d06779eSChris Wilson * .name = I915_CONTEXT_CREATE_EXT_SETPARAM, 22536d06779eSChris Wilson * }, 225494dfc73eSGustavo A. R. Silva * .param = { 22556d06779eSChris Wilson * .param = I915_CONTEXT_PARAM_ENGINES, 22566d06779eSChris Wilson * .value = to_user_pointer(&engines), 22576d06779eSChris Wilson * .size = sizeof(engines), 22586d06779eSChris Wilson * }, 22596d06779eSChris Wilson * }; 22606d06779eSChris Wilson * struct drm_i915_gem_context_create_ext create = { 22616d06779eSChris Wilson * .flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS, 22626d06779eSChris Wilson * .extensions = to_user_pointer(&p_engines); 22636d06779eSChris Wilson * }; 22646d06779eSChris Wilson * 22656d06779eSChris Wilson * ctx_id = gem_context_create_ext(drm_fd, &create); 2266ee113690SChris Wilson * 2267ee113690SChris Wilson * // Now we have created a GEM context with its engine map containing a 2268ee113690SChris Wilson * // single virtual engine. Submissions to this slot can go either to 2269ee113690SChris Wilson * // vcs0 or vcs1, depending on the load balancing algorithm used inside 2270ee113690SChris Wilson * // the driver. The load balancing is dynamic from one batch buffer to 2271ee113690SChris Wilson * // another and transparent to userspace. 2272ee113690SChris Wilson * 2273ee113690SChris Wilson * ... 2274ee113690SChris Wilson * execbuf.rsvd1 = ctx_id; 2275ee113690SChris Wilson * execbuf.flags = 0; // Submits to index 0 which is the virtual engine 2276ee113690SChris Wilson * gem_execbuf(drm_fd, &execbuf); 2277ee113690SChris Wilson */ 2278ee113690SChris Wilson 2279ee113690SChris Wilson /* 2280ee113690SChris Wilson * i915_context_engines_load_balance: 2281ee113690SChris Wilson * 2282ee113690SChris Wilson * Enable load balancing across this set of engines. 2283ee113690SChris Wilson * 2284ee113690SChris Wilson * Into the I915_EXEC_DEFAULT slot [0], a virtual engine is created that when 2285ee113690SChris Wilson * used will proxy the execbuffer request onto one of the set of engines 2286ee113690SChris Wilson * in such a way as to distribute the load evenly across the set. 2287ee113690SChris Wilson * 2288ee113690SChris Wilson * The set of engines must be compatible (e.g. the same HW class) as they 2289ee113690SChris Wilson * will share the same logical GPU context and ring. 2290ee113690SChris Wilson * 2291ee113690SChris Wilson * To intermix rendering with the virtual engine and direct rendering onto 229294dfc73eSGustavo A. R. Silva * the backing engines (bypassing the load balancing proxy), the context must 2293ee113690SChris Wilson * be defined to use a single timeline for all engines. 2294ee113690SChris Wilson */ 2295ee113690SChris Wilson struct i915_context_engines_load_balance { 2296ee113690SChris Wilson struct i915_user_extension base; 2297ee113690SChris Wilson 2298ee113690SChris Wilson __u16 engine_index; 2299ee113690SChris Wilson __u16 num_siblings; 2300ee113690SChris Wilson __u32 flags; /* all undefined flags must be zero */ 2301ee113690SChris Wilson 2302ee113690SChris Wilson __u64 mbz64; /* reserved for future use; must be zero */ 2303ee113690SChris Wilson 2304ee113690SChris Wilson struct i915_engine_class_instance engines[]; 230557772953STvrtko Ursulin } __attribute__((packed)); 2306e5e32171SMatthew Brost 2307e5e32171SMatthew Brost #define I915_DEFINE_CONTEXT_ENGINES_LOAD_BALANCE(name__, N__) struct { \ 2308e5e32171SMatthew Brost struct i915_user_extension base; \ 2309e5e32171SMatthew Brost __u16 engine_index; \ 2310e5e32171SMatthew Brost __u16 num_siblings; \ 2311e5e32171SMatthew Brost __u32 flags; \ 2312e5e32171SMatthew Brost __u64 mbz64; \ 2313e5e32171SMatthew Brost struct i915_engine_class_instance engines[N__]; \ 2314e5e32171SMatthew Brost } __attribute__((packed)) name__ 2315e5e32171SMatthew Brost 2316e5e32171SMatthew Brost /* 2317e5e32171SMatthew Brost * i915_context_engines_bond: 2318e5e32171SMatthew Brost * 2319e5e32171SMatthew Brost * Constructed bonded pairs for execution within a virtual engine. 2320e5e32171SMatthew Brost * 2321e5e32171SMatthew Brost * All engines are equal, but some are more equal than others. Given 2322e5e32171SMatthew Brost * the distribution of resources in the HW, it may be preferable to run 2323e5e32171SMatthew Brost * a request on a given subset of engines in parallel to a request on a 2324e5e32171SMatthew Brost * specific engine. We enable this selection of engines within a virtual 2325e5e32171SMatthew Brost * engine by specifying bonding pairs, for any given master engine we will 2326e5e32171SMatthew Brost * only execute on one of the corresponding siblings within the virtual engine. 2327e5e32171SMatthew Brost * 2328e5e32171SMatthew Brost * To execute a request in parallel on the master engine and a sibling requires 2329e5e32171SMatthew Brost * coordination with a I915_EXEC_FENCE_SUBMIT. 2330e5e32171SMatthew Brost */ 2331e5e32171SMatthew Brost struct i915_context_engines_bond { 2332e5e32171SMatthew Brost struct i915_user_extension base; 2333e5e32171SMatthew Brost 2334e5e32171SMatthew Brost struct i915_engine_class_instance master; 2335e5e32171SMatthew Brost 2336e5e32171SMatthew Brost __u16 virtual_index; /* index of virtual engine in ctx->engines[] */ 2337e5e32171SMatthew Brost __u16 num_bonds; 2338e5e32171SMatthew Brost 2339e5e32171SMatthew Brost __u64 flags; /* all undefined flags must be zero */ 2340e5e32171SMatthew Brost __u64 mbz64[4]; /* reserved for future use; must be zero */ 2341e5e32171SMatthew Brost 2342e5e32171SMatthew Brost struct i915_engine_class_instance engines[]; 2343e5e32171SMatthew Brost } __attribute__((packed)); 2344e5e32171SMatthew Brost 2345e5e32171SMatthew Brost #define I915_DEFINE_CONTEXT_ENGINES_BOND(name__, N__) struct { \ 2346e5e32171SMatthew Brost struct i915_user_extension base; \ 2347e5e32171SMatthew Brost struct i915_engine_class_instance master; \ 2348e5e32171SMatthew Brost __u16 virtual_index; \ 2349e5e32171SMatthew Brost __u16 num_bonds; \ 2350e5e32171SMatthew Brost __u64 flags; \ 2351e5e32171SMatthew Brost __u64 mbz64[4]; \ 2352e5e32171SMatthew Brost struct i915_engine_class_instance engines[N__]; \ 2353e5e32171SMatthew Brost } __attribute__((packed)) name__ 2354e5e32171SMatthew Brost 2355e5e32171SMatthew Brost /** 2356e5e32171SMatthew Brost * struct i915_context_engines_parallel_submit - Configure engine for 2357e5e32171SMatthew Brost * parallel submission. 2358e5e32171SMatthew Brost * 2359e5e32171SMatthew Brost * Setup a slot in the context engine map to allow multiple BBs to be submitted 2360e5e32171SMatthew Brost * in a single execbuf IOCTL. Those BBs will then be scheduled to run on the GPU 2361e5e32171SMatthew Brost * in parallel. Multiple hardware contexts are created internally in the i915 to 2362e5e32171SMatthew Brost * run these BBs. Once a slot is configured for N BBs only N BBs can be 2363e5e32171SMatthew Brost * submitted in each execbuf IOCTL and this is implicit behavior e.g. The user 2364e5e32171SMatthew Brost * doesn't tell the execbuf IOCTL there are N BBs, the execbuf IOCTL knows how 2365e5e32171SMatthew Brost * many BBs there are based on the slot's configuration. The N BBs are the last 2366e5e32171SMatthew Brost * N buffer objects or first N if I915_EXEC_BATCH_FIRST is set. 2367e5e32171SMatthew Brost * 2368e5e32171SMatthew Brost * The default placement behavior is to create implicit bonds between each 2369e5e32171SMatthew Brost * context if each context maps to more than 1 physical engine (e.g. context is 2370e5e32171SMatthew Brost * a virtual engine). Also we only allow contexts of same engine class and these 2371e5e32171SMatthew Brost * contexts must be in logically contiguous order. Examples of the placement 2372e5e32171SMatthew Brost * behavior are described below. Lastly, the default is to not allow BBs to be 2373e5e32171SMatthew Brost * preempted mid-batch. Rather insert coordinated preemption points on all 2374e5e32171SMatthew Brost * hardware contexts between each set of BBs. Flags could be added in the future 2375e5e32171SMatthew Brost * to change both of these default behaviors. 2376e5e32171SMatthew Brost * 2377e5e32171SMatthew Brost * Returns -EINVAL if hardware context placement configuration is invalid or if 2378e5e32171SMatthew Brost * the placement configuration isn't supported on the platform / submission 2379e5e32171SMatthew Brost * interface. 2380e5e32171SMatthew Brost * Returns -ENODEV if extension isn't supported on the platform / submission 2381e5e32171SMatthew Brost * interface. 2382e5e32171SMatthew Brost * 2383e5e32171SMatthew Brost * .. code-block:: none 2384e5e32171SMatthew Brost * 2385e5e32171SMatthew Brost * Examples syntax: 2386e5e32171SMatthew Brost * CS[X] = generic engine of same class, logical instance X 2387e5e32171SMatthew Brost * INVALID = I915_ENGINE_CLASS_INVALID, I915_ENGINE_CLASS_INVALID_NONE 2388e5e32171SMatthew Brost * 2389e5e32171SMatthew Brost * Example 1 pseudo code: 2390e5e32171SMatthew Brost * set_engines(INVALID) 2391e5e32171SMatthew Brost * set_parallel(engine_index=0, width=2, num_siblings=1, 2392e5e32171SMatthew Brost * engines=CS[0],CS[1]) 2393e5e32171SMatthew Brost * 2394e5e32171SMatthew Brost * Results in the following valid placement: 2395e5e32171SMatthew Brost * CS[0], CS[1] 2396e5e32171SMatthew Brost * 2397e5e32171SMatthew Brost * Example 2 pseudo code: 2398e5e32171SMatthew Brost * set_engines(INVALID) 2399e5e32171SMatthew Brost * set_parallel(engine_index=0, width=2, num_siblings=2, 2400e5e32171SMatthew Brost * engines=CS[0],CS[2],CS[1],CS[3]) 2401e5e32171SMatthew Brost * 2402e5e32171SMatthew Brost * Results in the following valid placements: 2403e5e32171SMatthew Brost * CS[0], CS[1] 2404e5e32171SMatthew Brost * CS[2], CS[3] 2405e5e32171SMatthew Brost * 2406e5e32171SMatthew Brost * This can be thought of as two virtual engines, each containing two 2407e5e32171SMatthew Brost * engines thereby making a 2D array. However, there are bonds tying the 2408e5e32171SMatthew Brost * entries together and placing restrictions on how they can be scheduled. 2409e5e32171SMatthew Brost * Specifically, the scheduler can choose only vertical columns from the 2D 2410e5e32171SMatthew Brost * array. That is, CS[0] is bonded to CS[1] and CS[2] to CS[3]. So if the 2411e5e32171SMatthew Brost * scheduler wants to submit to CS[0], it must also choose CS[1] and vice 2412e5e32171SMatthew Brost * versa. Same for CS[2] requires also using CS[3]. 2413e5e32171SMatthew Brost * VE[0] = CS[0], CS[2] 2414e5e32171SMatthew Brost * VE[1] = CS[1], CS[3] 2415e5e32171SMatthew Brost * 2416e5e32171SMatthew Brost * Example 3 pseudo code: 2417e5e32171SMatthew Brost * set_engines(INVALID) 2418e5e32171SMatthew Brost * set_parallel(engine_index=0, width=2, num_siblings=2, 241994dfc73eSGustavo A. R. Silva * engines=CS[0],CS[1],CS[1],CS[3]) 2420e5e32171SMatthew Brost * 2421e5e32171SMatthew Brost * Results in the following valid and invalid placements: 2422e5e32171SMatthew Brost * CS[0], CS[1] 2423e5e32171SMatthew Brost * CS[1], CS[3] - Not logically contiguous, return -EINVAL 2424e5e32171SMatthew Brost */ 2425e5e32171SMatthew Brost struct i915_context_engines_parallel_submit { 2426e5e32171SMatthew Brost /** 2427e5e32171SMatthew Brost * @base: base user extension. 2428e5e32171SMatthew Brost */ 2429e5e32171SMatthew Brost struct i915_user_extension base; 2430e5e32171SMatthew Brost 2431e5e32171SMatthew Brost /** 2432e5e32171SMatthew Brost * @engine_index: slot for parallel engine 2433e5e32171SMatthew Brost */ 2434e5e32171SMatthew Brost __u16 engine_index; 243557772953STvrtko Ursulin 243657772953STvrtko Ursulin /** 243757772953STvrtko Ursulin * @width: number of contexts per parallel engine or in other words the 243857772953STvrtko Ursulin * number of batches in each submission 243957772953STvrtko Ursulin */ 244057772953STvrtko Ursulin __u16 width; 244157772953STvrtko Ursulin 244257772953STvrtko Ursulin /** 244357772953STvrtko Ursulin * @num_siblings: number of siblings per context or in other words the 244457772953STvrtko Ursulin * number of possible placements for each submission 244557772953STvrtko Ursulin */ 244657772953STvrtko Ursulin __u16 num_siblings; 244757772953STvrtko Ursulin 244857772953STvrtko Ursulin /** 244957772953STvrtko Ursulin * @mbz16: reserved for future use; must be zero 245057772953STvrtko Ursulin */ 245157772953STvrtko Ursulin __u16 mbz16; 245257772953STvrtko Ursulin 245357772953STvrtko Ursulin /** 245457772953STvrtko Ursulin * @flags: all undefined flags must be zero, currently not defined flags 245557772953STvrtko Ursulin */ 245657772953STvrtko Ursulin __u64 flags; 245757772953STvrtko Ursulin 245857772953STvrtko Ursulin /** 245957772953STvrtko Ursulin * @mbz64: reserved for future use; must be zero 246057772953STvrtko Ursulin */ 246157772953STvrtko Ursulin __u64 mbz64[3]; 246257772953STvrtko Ursulin 246357772953STvrtko Ursulin /** 246457772953STvrtko Ursulin * @engines: 2-d array of engine instances to configure parallel engine 246557772953STvrtko Ursulin * 246657772953STvrtko Ursulin * length = width (i) * num_siblings (j) 246757772953STvrtko Ursulin * index = j + i * num_siblings 246857772953STvrtko Ursulin */ 246957772953STvrtko Ursulin struct i915_engine_class_instance engines[]; 247057772953STvrtko Ursulin 247157772953STvrtko Ursulin } __packed; 247257772953STvrtko Ursulin 247357772953STvrtko Ursulin #define I915_DEFINE_CONTEXT_ENGINES_PARALLEL_SUBMIT(name__, N__) struct { \ 247457772953STvrtko Ursulin struct i915_user_extension base; \ 247557772953STvrtko Ursulin __u16 engine_index; \ 247657772953STvrtko Ursulin __u16 width; \ 247757772953STvrtko Ursulin __u16 num_siblings; \ 247857772953STvrtko Ursulin __u16 mbz16; \ 247957772953STvrtko Ursulin __u64 flags; \ 248057772953STvrtko Ursulin __u64 mbz64[3]; \ 248157772953STvrtko Ursulin struct i915_engine_class_instance engines[N__]; \ 248257772953STvrtko Ursulin } __attribute__((packed)) name__ 248357772953STvrtko Ursulin 248457772953STvrtko Ursulin /** 248557772953STvrtko Ursulin * DOC: Context Engine Map uAPI 248657772953STvrtko Ursulin * 248757772953STvrtko Ursulin * Context engine map is a new way of addressing engines when submitting batch- 248857772953STvrtko Ursulin * buffers, replacing the existing way of using identifiers like `I915_EXEC_BLT` 2489976b55f0SChris Wilson * inside the flags field of `struct drm_i915_gem_execbuffer2`. 2490976b55f0SChris Wilson * 24916d06779eSChris Wilson * To use it created GEM contexts need to be configured with a list of engines 2492ee113690SChris Wilson * the user is intending to submit to. This is accomplished using the 2493e5e32171SMatthew Brost * `I915_CONTEXT_PARAM_ENGINES` parameter and `struct 249402abecdeSGustavo A. R. Silva * i915_context_param_engines`. 2495976b55f0SChris Wilson * 2496976b55f0SChris Wilson * For such contexts the `I915_EXEC_RING_MASK` field becomes an index into the 2497976b55f0SChris Wilson * configured map. 2498976b55f0SChris Wilson * 2499976b55f0SChris Wilson * Example of creating such context and submitting against it: 2500976b55f0SChris Wilson * 2501976b55f0SChris Wilson * .. code-block:: C 2502a913bde8SNiranjana Vishwanathapura * 2503a913bde8SNiranjana Vishwanathapura * I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, 2) = { 2504a913bde8SNiranjana Vishwanathapura * .engines = { { I915_ENGINE_CLASS_RENDER, 0 }, 2505a913bde8SNiranjana Vishwanathapura * { I915_ENGINE_CLASS_COPY, 0 } } 2506b9171541SChris Wilson * }; 2507a913bde8SNiranjana Vishwanathapura * struct drm_i915_gem_context_create_ext_setparam p_engines = { 2508b9171541SChris Wilson * .base = { 2509a913bde8SNiranjana Vishwanathapura * .name = I915_CONTEXT_CREATE_EXT_SETPARAM, 2510a913bde8SNiranjana Vishwanathapura * }, 2511a913bde8SNiranjana Vishwanathapura * .param = { 2512a913bde8SNiranjana Vishwanathapura * .param = I915_CONTEXT_PARAM_ENGINES, 2513a913bde8SNiranjana Vishwanathapura * .value = to_user_pointer(&engines), 2514b9171541SChris Wilson * .size = sizeof(engines), 2515b9171541SChris Wilson * }, 2516b9171541SChris Wilson * }; 2517b9171541SChris Wilson * struct drm_i915_gem_context_create_ext create = { 2518b9171541SChris Wilson * .flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS, 2519b9171541SChris Wilson * .extensions = to_user_pointer(&p_engines); 2520b9171541SChris Wilson * }; 2521b9171541SChris Wilson * 2522a913bde8SNiranjana Vishwanathapura * ctx_id = gem_context_create_ext(drm_fd, &create); 2523a913bde8SNiranjana Vishwanathapura * 2524a913bde8SNiranjana Vishwanathapura * // We have now created a GEM context with two engines in the map: 2525b9171541SChris Wilson * // Index 0 points to rcs0 while index 1 points to bcs0. Other engines 2526b9171541SChris Wilson * // will not be accessible from this context. 2527b9171541SChris Wilson * 2528b9171541SChris Wilson * ... 2529b9171541SChris Wilson * execbuf.rsvd1 = ctx_id; 2530b9171541SChris Wilson * execbuf.flags = 0; // Submits to index 0, which is rcs0 for this context 2531b9171541SChris Wilson * gem_execbuf(drm_fd, &execbuf); 2532b9171541SChris Wilson * 2533b9171541SChris Wilson * ... 2534b9171541SChris Wilson * execbuf.rsvd1 = ctx_id; 2535b9171541SChris Wilson * execbuf.flags = 1; // Submits to index 0, which is bcs0 for this context 2536b9171541SChris Wilson * gem_execbuf(drm_fd, &execbuf); 2537b9171541SChris Wilson */ 2538b9171541SChris Wilson 2539a913bde8SNiranjana Vishwanathapura struct i915_context_param_engines { 2540b9171541SChris Wilson __u64 extensions; /* linked chain of extension blocks, 0 terminates */ 2541b9171541SChris Wilson #define I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE 0 /* see i915_context_engines_load_balance */ 2542b9171541SChris Wilson #define I915_CONTEXT_ENGINES_EXT_BOND 1 /* see i915_context_engines_bond */ 2543b9171541SChris Wilson #define I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT 2 /* see i915_context_engines_parallel_submit */ 2544a913bde8SNiranjana Vishwanathapura struct i915_engine_class_instance engines[]; 2545b9171541SChris Wilson } __attribute__((packed)); 2546a913bde8SNiranjana Vishwanathapura 2547a913bde8SNiranjana Vishwanathapura #define I915_DEFINE_CONTEXT_PARAM_ENGINES(name__, N__) struct { \ 2548b9171541SChris Wilson __u64 extensions; \ 2549a913bde8SNiranjana Vishwanathapura struct i915_engine_class_instance engines[N__]; \ 2550a913bde8SNiranjana Vishwanathapura } __attribute__((packed)) name__ 2551b9171541SChris Wilson 2552b9171541SChris Wilson /** 2553b9171541SChris Wilson * struct drm_i915_gem_context_create_ext_setparam - Context parameter 2554b9171541SChris Wilson * to set or query during context creation. 2555b9171541SChris Wilson */ 2556b9171541SChris Wilson struct drm_i915_gem_context_create_ext_setparam { 2557b9171541SChris Wilson /** @base: Extension link. See struct i915_user_extension. */ 2558b9171541SChris Wilson struct i915_user_extension base; 2559b9171541SChris Wilson 2560b9171541SChris Wilson /** 2561b9171541SChris Wilson * @param: Context parameter to set or query. 2562b9171541SChris Wilson * See struct drm_i915_gem_context_param. 2563b9171541SChris Wilson */ 2564b9171541SChris Wilson struct drm_i915_gem_context_param param; 2565b9171541SChris Wilson }; 2566b9171541SChris Wilson 2567b9171541SChris Wilson struct drm_i915_gem_context_destroy { 2568b9171541SChris Wilson __u32 ctx_id; 2569b9171541SChris Wilson __u32 pad; 2570b9171541SChris Wilson }; 2571b9171541SChris Wilson 2572b9171541SChris Wilson /** 2573b9171541SChris Wilson * struct drm_i915_gem_vm_control - Structure to create or destroy VM. 2574b9171541SChris Wilson * 2575b9171541SChris Wilson * DRM_I915_GEM_VM_CREATE - 2576b9171541SChris Wilson * 2577b9171541SChris Wilson * Create a new virtual memory address space (ppGTT) for use within a context 2578b9171541SChris Wilson * on the same file. Extensions can be provided to configure exactly how the 2579b9171541SChris Wilson * address space is setup upon creation. 2580b9171541SChris Wilson * 2581b9171541SChris Wilson * The id of new VM (bound to the fd) for use with I915_CONTEXT_PARAM_VM is 2582b9171541SChris Wilson * returned in the outparam @id. 2583b9171541SChris Wilson * 2584b9171541SChris Wilson * An extension chain maybe provided, starting with @extensions, and terminated 2585b9171541SChris Wilson * by the @next_extension being 0. Currently, no extensions are defined. 2586b9171541SChris Wilson * 2587b9171541SChris Wilson * DRM_I915_GEM_VM_DESTROY - 2588b9171541SChris Wilson * 2589b9171541SChris Wilson * Destroys a previously created VM id, specified in @vm_id. 2590b9171541SChris Wilson * 2591b9171541SChris Wilson * No extensions or flags are allowed currently, and so must be zero. 2592aef7b67aSMatthew Auld */ 2593aef7b67aSMatthew Auld struct drm_i915_gem_vm_control { 2594aef7b67aSMatthew Auld /** @extensions: Zero-terminated chain of extensions. */ 2595aef7b67aSMatthew Auld __u64 extensions; 2596aef7b67aSMatthew Auld 2597aef7b67aSMatthew Auld /** @flags: reserved for future usage, currently MBZ */ 2598b9171541SChris Wilson __u32 flags; 2599aef7b67aSMatthew Auld 2600aef7b67aSMatthew Auld /** @vm_id: Id of the VM created or to be destroyed */ 2601aef7b67aSMatthew Auld __u32 vm_id; 2602aef7b67aSMatthew Auld }; 2603aef7b67aSMatthew Auld 2604b9171541SChris Wilson struct drm_i915_reg_read { 2605aef7b67aSMatthew Auld /* 2606aef7b67aSMatthew Auld * Register offset. 2607aef7b67aSMatthew Auld * For 64bit wide registers where the upper 32bits don't immediately 2608aef7b67aSMatthew Auld * follow the lower 32bits, the offset of the lower 32bits must 2609aef7b67aSMatthew Auld * be specified 2610aef7b67aSMatthew Auld */ 2611aef7b67aSMatthew Auld __u64 offset; 2612aef7b67aSMatthew Auld #define I915_REG_READ_8B_WA (1ul << 0) 2613aef7b67aSMatthew Auld 2614aef7b67aSMatthew Auld __u64 val; /* Return value */ 2615b9171541SChris Wilson }; 2616aef7b67aSMatthew Auld 2617aef7b67aSMatthew Auld /* Known registers: 2618aef7b67aSMatthew Auld * 2619aef7b67aSMatthew Auld * Render engine timestamp - 0x2358 + 64bit - gen7+ 2620aef7b67aSMatthew Auld * - Note this register returns an invalid value if using the default 2621aef7b67aSMatthew Auld * single instruction 8byte read, in order to workaround that pass 2622aef7b67aSMatthew Auld * flag I915_REG_READ_8B_WA in offset field. 2623aef7b67aSMatthew Auld * 2624aef7b67aSMatthew Auld */ 2625aef7b67aSMatthew Auld 2626aef7b67aSMatthew Auld struct drm_i915_reset_stats { 2627aef7b67aSMatthew Auld __u32 ctx_id; 2628aef7b67aSMatthew Auld __u32 flags; 2629b65a9489SChris Wilson 2630b65a9489SChris Wilson /* All resets since boot/module reload, for all contexts */ 2631b65a9489SChris Wilson __u32 reset_count; 2632b65a9489SChris Wilson 2633b65a9489SChris Wilson /* Number of batches lost when active in GPU, for this context */ 2634b65a9489SChris Wilson __u32 batch_active; 2635b65a9489SChris Wilson 2636b65a9489SChris Wilson /* Number of batches lost pending for execution, for this context */ 2637b65a9489SChris Wilson __u32 batch_pending; 2638b65a9489SChris Wilson 2639b65a9489SChris Wilson __u32 pad; 2640b65a9489SChris Wilson }; 2641b65a9489SChris Wilson 2642b65a9489SChris Wilson /** 2643b65a9489SChris Wilson * struct drm_i915_gem_userptr - Create GEM object from user allocated memory. 2644b65a9489SChris Wilson * 2645aef7b67aSMatthew Auld * Userptr objects have several restrictions on what ioctls can be used with the 2646aef7b67aSMatthew Auld * object handle. 2647aef7b67aSMatthew Auld */ 2648aef7b67aSMatthew Auld struct drm_i915_gem_userptr { 2649b9171541SChris Wilson /** 2650b9171541SChris Wilson * @user_ptr: The pointer to the allocated memory. 2651b65a9489SChris Wilson * 2652b9171541SChris Wilson * Needs to be aligned to PAGE_SIZE. 2653b9171541SChris Wilson */ 2654aef7b67aSMatthew Auld __u64 user_ptr; 2655b9171541SChris Wilson 2656b9171541SChris Wilson /** 2657b9171541SChris Wilson * @user_size: 2658b9171541SChris Wilson * 2659b9171541SChris Wilson * The size in bytes for the allocated memory. This will also become the 2660b9171541SChris Wilson * object size. 2661d7965152SRobert Bragg * 266219f81df2SRobert Bragg * Needs to be aligned to PAGE_SIZE, and should be at least PAGE_SIZE, 266319f81df2SRobert Bragg * or larger. 266419f81df2SRobert Bragg */ 266519f81df2SRobert Bragg __u64 user_size; 266619f81df2SRobert Bragg 266719f81df2SRobert Bragg /** 266819f81df2SRobert Bragg * @flags: 266919f81df2SRobert Bragg * 267019f81df2SRobert Bragg * Supported flags: 267119f81df2SRobert Bragg * 267219f81df2SRobert Bragg * I915_USERPTR_READ_ONLY: 267319f81df2SRobert Bragg * 2674d7965152SRobert Bragg * Mark the object as readonly, this also means GPU access can only be 267581d5f7d9SUmesh Nerlige Ramappa * readonly. This is only supported on HW which supports readonly access 267681d5f7d9SUmesh Nerlige Ramappa * through the GTT. If the HW can't support readonly access, an error is 267781d5f7d9SUmesh Nerlige Ramappa * returned. 267881d5f7d9SUmesh Nerlige Ramappa * 2679*1cc064dcSUmesh Nerlige Ramappa * I915_USERPTR_PROBE: 2680*1cc064dcSUmesh Nerlige Ramappa * 2681*1cc064dcSUmesh Nerlige Ramappa * Probe the provided @user_ptr range and validate that the @user_ptr is 2682*1cc064dcSUmesh Nerlige Ramappa * indeed pointing to normal memory and that the range is also valid. 2683d7965152SRobert Bragg * For example if some garbage address is given to the kernel, then this 2684d7965152SRobert Bragg * should complain. 2685d7965152SRobert Bragg * 2686eec688e1SRobert Bragg * Returns -EFAULT if the probe failed. 2687eec688e1SRobert Bragg * 2688eec688e1SRobert Bragg * Note that this doesn't populate the backing pages, and also doesn't 2689eec688e1SRobert Bragg * guarantee that the object will remain valid when the object is 2690eec688e1SRobert Bragg * eventually used. 2691b8d49f28SLionel Landwerlin * 2692b8d49f28SLionel Landwerlin * The kernel supports this feature if I915_PARAM_HAS_USERPTR_PROBE 2693eec688e1SRobert Bragg * returns a non-zero value. 2694eec688e1SRobert Bragg * 2695eec688e1SRobert Bragg * I915_USERPTR_UNSYNCHRONIZED: 2696d7965152SRobert Bragg * 2697d7965152SRobert Bragg * NOT USED. Setting this flag will result in an error. 2698d7965152SRobert Bragg */ 2699b8d49f28SLionel Landwerlin __u32 flags; 2700b8d49f28SLionel Landwerlin #define I915_USERPTR_READ_ONLY 0x1 2701d7965152SRobert Bragg #define I915_USERPTR_PROBE 0x2 2702d7965152SRobert Bragg #define I915_USERPTR_UNSYNCHRONIZED 0x80000000 2703d7965152SRobert Bragg /** 2704d7965152SRobert Bragg * @handle: Returned handle for the object. 2705d7965152SRobert Bragg * 270666137f54SRandy Dunlap * Object handles are nonzero. 2707b8d49f28SLionel Landwerlin */ 2708b8d49f28SLionel Landwerlin __u32 handle; 2709d7965152SRobert Bragg }; 2710d7965152SRobert Bragg 2711d7965152SRobert Bragg enum drm_i915_oa_format { 2712d7965152SRobert Bragg I915_OA_FORMAT_A13 = 1, /* HSW only */ 2713d7965152SRobert Bragg I915_OA_FORMAT_A29, /* HSW only */ 2714b8d49f28SLionel Landwerlin I915_OA_FORMAT_A13_B8_C8, /* HSW only */ 2715b8d49f28SLionel Landwerlin I915_OA_FORMAT_B4_C8, /* HSW only */ 2716d7965152SRobert Bragg I915_OA_FORMAT_A45_B8_C8, /* HSW only */ 2717d7965152SRobert Bragg I915_OA_FORMAT_B4_C8_A16, /* HSW only */ 2718d7965152SRobert Bragg I915_OA_FORMAT_C4_B8, /* HSW+ */ 2719d7965152SRobert Bragg 2720d7965152SRobert Bragg /* Gen8+ */ 2721d7965152SRobert Bragg I915_OA_FORMAT_A12, 2722d7965152SRobert Bragg I915_OA_FORMAT_A12_B8_C8, 2723d7965152SRobert Bragg I915_OA_FORMAT_A32u40_A4u32_B8_C8, 2724d7965152SRobert Bragg 2725b8d49f28SLionel Landwerlin /* DG2 */ 2726b8d49f28SLionel Landwerlin I915_OAR_FORMAT_A32u40_A4u32_B8_C8, 2727d7965152SRobert Bragg I915_OA_FORMAT_A24u40_A14u32_B8_C8, 2728d7965152SRobert Bragg 2729d7965152SRobert Bragg /* MTL OAM */ 27309cd20ef7SLionel Landwerlin I915_OAM_FORMAT_MPEC8u64_B8_C8, 27319cd20ef7SLionel Landwerlin I915_OAM_FORMAT_MPEC8u32_B8_C8, 27329cd20ef7SLionel Landwerlin 27339cd20ef7SLionel Landwerlin I915_OA_FORMAT_MAX /* non-ABI */ 27349cd20ef7SLionel Landwerlin }; 27359cd20ef7SLionel Landwerlin 27369cd20ef7SLionel Landwerlin enum drm_i915_perf_property_id { 27379cd20ef7SLionel Landwerlin /** 27389cd20ef7SLionel Landwerlin * Open the stream for a specific context handle (as used with 27399cd20ef7SLionel Landwerlin * execbuffer2). A stream opened for a specific context this way 27409cd20ef7SLionel Landwerlin * won't typically require root privileges. 274111ecbdddSLionel Landwerlin * 274211ecbdddSLionel Landwerlin * This property is available in perf revision 1. 274311ecbdddSLionel Landwerlin */ 274411ecbdddSLionel Landwerlin DRM_I915_PERF_PROP_CTX_HANDLE = 1, 274511ecbdddSLionel Landwerlin 274611ecbdddSLionel Landwerlin /** 274711ecbdddSLionel Landwerlin * A value of 1 requests the inclusion of raw OA unit reports as 274811ecbdddSLionel Landwerlin * part of stream samples. 274911ecbdddSLionel Landwerlin * 275011ecbdddSLionel Landwerlin * This property is available in perf revision 1. 275111ecbdddSLionel Landwerlin */ 27524ef10fe0SLionel Landwerlin DRM_I915_PERF_PROP_SAMPLE_OA, 27534ef10fe0SLionel Landwerlin 27544ef10fe0SLionel Landwerlin /** 27554ef10fe0SLionel Landwerlin * The value specifies which set of OA unit metrics should be 27564ef10fe0SLionel Landwerlin * configured, defining the contents of any OA unit reports. 27574ef10fe0SLionel Landwerlin * 27584ef10fe0SLionel Landwerlin * This property is available in perf revision 1. 27594ef10fe0SLionel Landwerlin */ 27604ef10fe0SLionel Landwerlin DRM_I915_PERF_PROP_OA_METRICS_SET, 27614ef10fe0SLionel Landwerlin 27624ef10fe0SLionel Landwerlin /** 27634ef10fe0SLionel Landwerlin * The value specifies the size and layout of OA unit reports. 27644ef10fe0SLionel Landwerlin * 2765c61d04c9SUmesh Nerlige Ramappa * This property is available in perf revision 1. 2766c61d04c9SUmesh Nerlige Ramappa */ 2767c61d04c9SUmesh Nerlige Ramappa DRM_I915_PERF_PROP_OA_FORMAT, 2768c61d04c9SUmesh Nerlige Ramappa 2769c61d04c9SUmesh Nerlige Ramappa /** 2770c61d04c9SUmesh Nerlige Ramappa * Specifying this property implicitly requests periodic OA unit 2771c61d04c9SUmesh Nerlige Ramappa * sampling and (at least on Haswell) the sampling frequency is derived 2772c61d04c9SUmesh Nerlige Ramappa * from this exponent as follows: 2773c61d04c9SUmesh Nerlige Ramappa * 2774c61d04c9SUmesh Nerlige Ramappa * 80ns * 2^(period_exponent + 1) 2775c61d04c9SUmesh Nerlige Ramappa * 2776c61d04c9SUmesh Nerlige Ramappa * This property is available in perf revision 1. 2777c61d04c9SUmesh Nerlige Ramappa */ 2778c61d04c9SUmesh Nerlige Ramappa DRM_I915_PERF_PROP_OA_EXPONENT, 2779c61d04c9SUmesh Nerlige Ramappa 2780c61d04c9SUmesh Nerlige Ramappa /** 2781c61d04c9SUmesh Nerlige Ramappa * Specifying this property is only valid when specify a context to 2782c61d04c9SUmesh Nerlige Ramappa * filter with DRM_I915_PERF_PROP_CTX_HANDLE. Specifying this property 2783c61d04c9SUmesh Nerlige Ramappa * will hold preemption of the particular context we want to gather 2784eec688e1SRobert Bragg * performance data about. The execbuf2 submissions must include a 2785eec688e1SRobert Bragg * drm_i915_gem_execbuffer_ext_perf parameter for this to apply. 2786eec688e1SRobert Bragg * 2787eec688e1SRobert Bragg * This property is available in perf revision 3. 2788eec688e1SRobert Bragg */ 2789eec688e1SRobert Bragg DRM_I915_PERF_PROP_HOLD_PREEMPTION, 2790eec688e1SRobert Bragg 2791eec688e1SRobert Bragg /** 2792eec688e1SRobert Bragg * Specifying this pins all contexts to the specified SSEU power 2793eec688e1SRobert Bragg * configuration for the duration of the recording. 2794eec688e1SRobert Bragg * 2795eec688e1SRobert Bragg * This parameter's value is a pointer to a struct 2796eec688e1SRobert Bragg * drm_i915_gem_context_param_sseu. 2797eec688e1SRobert Bragg * 2798eec688e1SRobert Bragg * This property is available in perf revision 4. 2799eec688e1SRobert Bragg */ 2800cd8bddc4SChris Wilson DRM_I915_PERF_PROP_GLOBAL_SSEU, 2801eec688e1SRobert Bragg 2802eec688e1SRobert Bragg /** 28032ef6a01fSMatthew Auld * This optional parameter specifies the timer interval in nanoseconds 2804d7965152SRobert Bragg * at which the i915 driver will check the OA buffer for available data. 2805d7965152SRobert Bragg * Minimum allowed value is 100 microseconds. A default value is used by 2806d7965152SRobert Bragg * the driver if this parameter is not specified. Note that larger timer 2807d7965152SRobert Bragg * values will reduce cpu consumption during OA perf captures. However, 2808d7965152SRobert Bragg * excessively large values would potentially result in OA buffer 2809d7965152SRobert Bragg * overwrites as captures reach end of the OA buffer. 2810d7965152SRobert Bragg * 2811d7965152SRobert Bragg * This property is available in perf revision 5. 2812b8d49f28SLionel Landwerlin */ 2813b8d49f28SLionel Landwerlin DRM_I915_PERF_PROP_POLL_OA_PERIOD, 2814d7965152SRobert Bragg 2815eec688e1SRobert Bragg /** 2816d7965152SRobert Bragg * Multiple engines may be mapped to the same OA unit. The OA unit is 28172ef6a01fSMatthew Auld * identified by class:instance of any engine mapped to it. 2818d7965152SRobert Bragg * 2819d7965152SRobert Bragg * This parameter specifies the engine class and must be passed along 2820d7965152SRobert Bragg * with DRM_I915_PERF_PROP_OA_ENGINE_INSTANCE. 2821b8d49f28SLionel Landwerlin * 2822b8d49f28SLionel Landwerlin * This property is available in perf revision 6. 2823d7965152SRobert Bragg */ 2824eec688e1SRobert Bragg DRM_I915_PERF_PROP_OA_ENGINE_CLASS, 2825eec688e1SRobert Bragg 28262ef6a01fSMatthew Auld /** 28277831e9a9SChris Wilson * This parameter specifies the engine instance and must be passed along 28287831e9a9SChris Wilson * with DRM_I915_PERF_PROP_OA_ENGINE_CLASS. 28297831e9a9SChris Wilson * 28307831e9a9SChris Wilson * This property is available in perf revision 6. 28317831e9a9SChris Wilson */ 28327831e9a9SChris Wilson DRM_I915_PERF_PROP_OA_ENGINE_INSTANCE, 28337831e9a9SChris Wilson 28347831e9a9SChris Wilson DRM_I915_PERF_PROP_MAX /* non-ABI */ 28357831e9a9SChris Wilson }; 28367831e9a9SChris Wilson 28377831e9a9SChris Wilson struct drm_i915_perf_open_param { 28387831e9a9SChris Wilson __u32 flags; 28392ef6a01fSMatthew Auld #define I915_PERF_FLAG_FD_CLOEXEC (1<<0) 2840eec688e1SRobert Bragg #define I915_PERF_FLAG_FD_NONBLOCK (1<<1) 2841eec688e1SRobert Bragg #define I915_PERF_FLAG_DISABLED (1<<2) 2842eec688e1SRobert Bragg 2843eec688e1SRobert Bragg /** The number of u64 (id, value) pairs */ 2844eec688e1SRobert Bragg __u32 num_properties; 2845eec688e1SRobert Bragg 2846eec688e1SRobert Bragg /** 2847eec688e1SRobert Bragg * Pointer to array of u64 (id, value) pairs configuring the stream 2848eec688e1SRobert Bragg * to open. 2849eec688e1SRobert Bragg */ 2850eec688e1SRobert Bragg __u64 properties_ptr; 2851eec688e1SRobert Bragg }; 2852eec688e1SRobert Bragg 2853eec688e1SRobert Bragg /* 2854eec688e1SRobert Bragg * Enable data capture for a stream that was either opened in a disabled state 2855eec688e1SRobert Bragg * via I915_PERF_FLAG_DISABLED or was later disabled via 2856eec688e1SRobert Bragg * I915_PERF_IOCTL_DISABLE. 2857eec688e1SRobert Bragg * 2858eec688e1SRobert Bragg * It is intended to be cheaper to disable and enable a stream than it may be 2859eec688e1SRobert Bragg * to close and re-open a stream with the same configuration. 2860d7965152SRobert Bragg * 2861eec688e1SRobert Bragg * It's undefined whether any pending data for the stream will be lost. 2862eec688e1SRobert Bragg * 2863eec688e1SRobert Bragg * This ioctl is available in perf revision 1. 2864eec688e1SRobert Bragg */ 2865eec688e1SRobert Bragg #define I915_PERF_IOCTL_ENABLE _IO('i', 0x0) 2866d7965152SRobert Bragg 2867eec688e1SRobert Bragg /* 2868eec688e1SRobert Bragg * Disable data capture for a stream. 2869eec688e1SRobert Bragg * 2870eec688e1SRobert Bragg * It is an error to try and read a stream that is disabled. 2871d7965152SRobert Bragg * 2872d7965152SRobert Bragg * This ioctl is available in perf revision 1. 2873d7965152SRobert Bragg */ 2874d7965152SRobert Bragg #define I915_PERF_IOCTL_DISABLE _IO('i', 0x1) 2875d7965152SRobert Bragg 2876d7965152SRobert Bragg /* 2877d7965152SRobert Bragg * Change metrics_set captured by a stream. 2878d7965152SRobert Bragg * 2879d7965152SRobert Bragg * If the stream is bound to a specific context, the configuration change 2880d7965152SRobert Bragg * will performed inline with that context such that it takes effect before 2881d7965152SRobert Bragg * the next execbuf submission. 2882d7965152SRobert Bragg * 2883d7965152SRobert Bragg * Returns the previously bound metrics set id, or a negative error code. 2884eec688e1SRobert Bragg * 2885eec688e1SRobert Bragg * This ioctl is available in perf revision 2. 2886eec688e1SRobert Bragg */ 2887a2e54026SMatt Roper #define I915_PERF_IOCTL_CONFIG _IO('i', 0x2) 2888a2e54026SMatt Roper 2889a2e54026SMatt Roper /* 2890f89823c2SLionel Landwerlin * Common to all i915 perf records 2891f89823c2SLionel Landwerlin */ 2892f89823c2SLionel Landwerlin struct drm_i915_perf_record_header { 2893a2e54026SMatt Roper __u32 type; 2894a2e54026SMatt Roper __u16 pad; 2895a2e54026SMatt Roper __u16 size; 2896a2e54026SMatt Roper }; 2897a2e54026SMatt Roper 2898f89823c2SLionel Landwerlin enum drm_i915_perf_record_type { 2899f89823c2SLionel Landwerlin 2900a2e54026SMatt Roper /** 2901a2e54026SMatt Roper * Samples are the work horse record type whose contents are extensible 2902a2e54026SMatt Roper * and defined when opening an i915 perf stream based on the given 2903a2e54026SMatt Roper * properties. 2904a2e54026SMatt Roper * 2905f89823c2SLionel Landwerlin * Boolean properties following the naming convention 2906a2e54026SMatt Roper * DRM_I915_PERF_SAMPLE_xyz_PROP request the inclusion of 'xyz' data in 2907a2e54026SMatt Roper * every sample. 2908a2e54026SMatt Roper * 2909a2e54026SMatt Roper * The order of these sample properties given by userspace has no 2910a2e54026SMatt Roper * affect on the ordering of data within a sample. The order is 2911a2e54026SMatt Roper * documented here. 2912f89823c2SLionel Landwerlin * 2913a2e54026SMatt Roper * struct { 2914a2e54026SMatt Roper * struct drm_i915_perf_record_header header; 2915a2e54026SMatt Roper * 2916a2e54026SMatt Roper * { u32 oa_report[]; } && DRM_I915_PERF_PROP_SAMPLE_OA 2917a2e54026SMatt Roper * }; 2918a2e54026SMatt Roper */ 2919f89823c2SLionel Landwerlin DRM_I915_PERF_RECORD_SAMPLE = 1, 2920f89823c2SLionel Landwerlin 2921a2e54026SMatt Roper /* 2922a2e54026SMatt Roper * Indicates that one or more OA reports were not written by the 2923a2e54026SMatt Roper * hardware. This can happen for example if an MI_REPORT_PERF_COUNT 2924a2e54026SMatt Roper * command collides with periodic sampling - which would be more likely 2925a2e54026SMatt Roper * at higher sampling frequencies. 2926a2e54026SMatt Roper */ 2927ee427e25SLionel Landwerlin DRM_I915_PERF_RECORD_OA_REPORT_LOST = 2, 292817ad4fddSChris Wilson 2929a2e54026SMatt Roper /** 2930a2e54026SMatt Roper * An error occurred that resulted in all pending OA reports being lost. 2931a2e54026SMatt Roper */ 2932a2e54026SMatt Roper DRM_I915_PERF_RECORD_OA_BUFFER_LOST = 3, 2933a2e54026SMatt Roper 2934a2e54026SMatt Roper DRM_I915_PERF_RECORD_MAX /* non-ABI */ 2935a2e54026SMatt Roper }; 2936a2e54026SMatt Roper 293717ad4fddSChris Wilson /** 2938a2e54026SMatt Roper * struct drm_i915_perf_oa_config 2939a2e54026SMatt Roper * 2940a2e54026SMatt Roper * Structure to upload perf dynamic configuration into the kernel. 2941a2e54026SMatt Roper */ 2942a2e54026SMatt Roper struct drm_i915_perf_oa_config { 2943a2e54026SMatt Roper /** 2944a2e54026SMatt Roper * @uuid: 2945a2e54026SMatt Roper * 294617ad4fddSChris Wilson * String formatted like "%\08x-%\04x-%\04x-%\04x-%\012x" 2947f89823c2SLionel Landwerlin */ 2948f89823c2SLionel Landwerlin char uuid[36]; 2949e3bdccafSMatthew Auld 2950e3bdccafSMatthew Auld /** 2951e3bdccafSMatthew Auld * @n_mux_regs: 2952e3bdccafSMatthew Auld * 2953e3bdccafSMatthew Auld * Number of mux regs in &mux_regs_ptr. 2954e3bdccafSMatthew Auld */ 2955a446ae2cSLionel Landwerlin __u32 n_mux_regs; 29561c671ad7SMatt Roper 29571c671ad7SMatt Roper /** 29581c671ad7SMatt Roper * @n_boolean_regs: 29591c671ad7SMatt Roper * 29601c671ad7SMatt Roper * Number of boolean regs in &boolean_regs_ptr. 29611c671ad7SMatt Roper */ 29621c671ad7SMatt Roper __u32 n_boolean_regs; 29631c671ad7SMatt Roper 29641c671ad7SMatt Roper /** 2965c94fde8fSMatt Atwood * @n_flex_regs: 29661c671ad7SMatt Roper * 2967a446ae2cSLionel Landwerlin * Number of flex regs in &flex_regs_ptr. 2968c822e059SLionel Landwerlin */ 2969c5d3e39cSTvrtko Ursulin __u32 n_flex_regs; 29704f6ccc74SLionel Landwerlin 297171021729SAbdiel Janulgue /** 297278e1fb31SRodrigo Vivi * @mux_regs_ptr: 2973c94fde8fSMatt Atwood * 2974be03564bSChris Wilson * Pointer to tuples of u32 values (register address, value) for mux 2975a446ae2cSLionel Landwerlin * registers. Expected length of buffer is (2 * sizeof(u32) * 2976e3bdccafSMatthew Auld * &n_mux_regs). 2977e3bdccafSMatthew Auld */ 2978e3bdccafSMatthew Auld __u64 mux_regs_ptr; 2979a446ae2cSLionel Landwerlin 2980e3bdccafSMatthew Auld /** 2981a446ae2cSLionel Landwerlin * @boolean_regs_ptr: 2982a446ae2cSLionel Landwerlin * 2983a446ae2cSLionel Landwerlin * Pointer to tuples of u32 values (register address, value) for mux 2984a446ae2cSLionel Landwerlin * registers. Expected length of buffer is (2 * sizeof(u32) * 2985a446ae2cSLionel Landwerlin * &n_boolean_regs). 2986e3bdccafSMatthew Auld */ 2987e3bdccafSMatthew Auld __u64 boolean_regs_ptr; 2988e3bdccafSMatthew Auld 29891c671ad7SMatt Roper /** 29904f6ccc74SLionel Landwerlin * @flex_regs_ptr: 29911c671ad7SMatt Roper * 29924f6ccc74SLionel Landwerlin * Pointer to tuples of u32 values (register address, value) for mux 2993e3bdccafSMatthew Auld * registers. Expected length of buffer is (2 * sizeof(u32) * 29941c671ad7SMatt Roper * &n_flex_regs). 29951c671ad7SMatt Roper */ 29961c671ad7SMatt Roper __u64 flex_regs_ptr; 2997c94fde8fSMatt Atwood }; 2998c94fde8fSMatt Atwood 2999c94fde8fSMatt Atwood /** 3000a446ae2cSLionel Landwerlin * struct drm_i915_query_item - An individual query for the kernel to process. 3001a446ae2cSLionel Landwerlin * 30024f6ccc74SLionel Landwerlin * The behaviour is determined by the @query_id. Note that exactly what 30034f6ccc74SLionel Landwerlin * @data_ptr is also depends on the specific @query_id. 30044f6ccc74SLionel Landwerlin */ 3005a446ae2cSLionel Landwerlin struct drm_i915_query_item { 3006e3bdccafSMatthew Auld /** 3007e3bdccafSMatthew Auld * @query_id: 3008e3bdccafSMatthew Auld * 3009e3bdccafSMatthew Auld * The id for this query. Currently accepted query IDs are: 3010e3bdccafSMatthew Auld * - %DRM_I915_QUERY_TOPOLOGY_INFO (see struct drm_i915_query_topology_info) 3011a446ae2cSLionel Landwerlin * - %DRM_I915_QUERY_ENGINE_INFO (see struct drm_i915_engine_info) 3012a446ae2cSLionel Landwerlin * - %DRM_I915_QUERY_PERF_CONFIG (see struct drm_i915_query_perf_config) 3013a446ae2cSLionel Landwerlin * - %DRM_I915_QUERY_MEMORY_REGIONS (see struct drm_i915_query_memory_regions) 3014a446ae2cSLionel Landwerlin * - %DRM_I915_QUERY_HWCONFIG_BLOB (see `GuC HWCONFIG blob uAPI`) 3015a446ae2cSLionel Landwerlin * - %DRM_I915_QUERY_GEOMETRY_SUBSLICES (see struct drm_i915_query_topology_info) 3016e3bdccafSMatthew Auld */ 3017e3bdccafSMatthew Auld __u64 query_id; 3018e3bdccafSMatthew Auld #define DRM_I915_QUERY_TOPOLOGY_INFO 1 3019e3bdccafSMatthew Auld #define DRM_I915_QUERY_ENGINE_INFO 2 3020e3bdccafSMatthew Auld #define DRM_I915_QUERY_PERF_CONFIG 3 3021e3bdccafSMatthew Auld #define DRM_I915_QUERY_MEMORY_REGIONS 4 3022e3bdccafSMatthew Auld #define DRM_I915_QUERY_HWCONFIG_BLOB 5 3023e3bdccafSMatthew Auld #define DRM_I915_QUERY_GEOMETRY_SUBSLICES 6 3024e3bdccafSMatthew Auld /* Must be kept compact -- no holes and well documented */ 3025e3bdccafSMatthew Auld 3026e3bdccafSMatthew Auld /** 3027e3bdccafSMatthew Auld * @length: 3028e3bdccafSMatthew Auld * 3029e3bdccafSMatthew Auld * When set to zero by userspace, this is filled with the size of the 3030e3bdccafSMatthew Auld * data to be written at the @data_ptr pointer. The kernel sets this 3031e3bdccafSMatthew Auld * value to a negative value to signal an error on a particular query 3032e3bdccafSMatthew Auld * item. 3033e3bdccafSMatthew Auld */ 3034e3bdccafSMatthew Auld __s32 length; 3035e3bdccafSMatthew Auld 3036e3bdccafSMatthew Auld /** 3037e3bdccafSMatthew Auld * @flags: 3038e3bdccafSMatthew Auld * 3039a446ae2cSLionel Landwerlin * When &query_id == %DRM_I915_QUERY_TOPOLOGY_INFO, must be 0. 3040e3bdccafSMatthew Auld * 3041a446ae2cSLionel Landwerlin * When &query_id == %DRM_I915_QUERY_PERF_CONFIG, must be one of the 3042a446ae2cSLionel Landwerlin * following: 3043e3bdccafSMatthew Auld * 3044e3bdccafSMatthew Auld * - %DRM_I915_QUERY_PERF_CONFIG_LIST 3045a446ae2cSLionel Landwerlin * - %DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID 3046a446ae2cSLionel Landwerlin * - %DRM_I915_QUERY_PERF_CONFIG_FOR_UUID 3047a446ae2cSLionel Landwerlin * 3048e3bdccafSMatthew Auld * When &query_id == %DRM_I915_QUERY_GEOMETRY_SUBSLICES must contain 3049e3bdccafSMatthew Auld * a struct i915_engine_class_instance that references a render engine. 3050e3bdccafSMatthew Auld */ 3051e3bdccafSMatthew Auld __u32 flags; 3052e3bdccafSMatthew Auld #define DRM_I915_QUERY_PERF_CONFIG_LIST 1 3053a446ae2cSLionel Landwerlin #define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID 2 3054a446ae2cSLionel Landwerlin #define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID 3 3055a446ae2cSLionel Landwerlin 3056a446ae2cSLionel Landwerlin /** 3057462ac1cdSMatt Roper * @data_ptr: 3058462ac1cdSMatt Roper * 3059c822e059SLionel Landwerlin * Data will be written at the location pointed by @data_ptr when the 3060462ac1cdSMatt Roper * value of @length matches the length of the data to be written by the 3061462ac1cdSMatt Roper * kernel. 3062c822e059SLionel Landwerlin */ 3063c822e059SLionel Landwerlin __u64 data_ptr; 3064462ac1cdSMatt Roper }; 3065462ac1cdSMatt Roper 3066462ac1cdSMatt Roper /** 3067c822e059SLionel Landwerlin * struct drm_i915_query - Supply an array of struct drm_i915_query_item for the 3068c822e059SLionel Landwerlin * kernel to fill out. 3069c822e059SLionel Landwerlin * 3070c822e059SLionel Landwerlin * Note that this is generally a two step process for each struct 3071462ac1cdSMatt Roper * drm_i915_query_item in the array: 3072462ac1cdSMatt Roper * 3073462ac1cdSMatt Roper * 1. Call the DRM_IOCTL_I915_QUERY, giving it our array of struct 3074462ac1cdSMatt Roper * drm_i915_query_item, with &drm_i915_query_item.length set to zero. The 3075462ac1cdSMatt Roper * kernel will then fill in the size, in bytes, which tells userspace how 3076c822e059SLionel Landwerlin * memory it needs to allocate for the blob(say for an array of properties). 3077462ac1cdSMatt Roper * 3078462ac1cdSMatt Roper * 2. Next we call DRM_IOCTL_I915_QUERY again, this time with the 3079462ac1cdSMatt Roper * &drm_i915_query_item.data_ptr equal to our newly allocated blob. Note that 3080462ac1cdSMatt Roper * the &drm_i915_query_item.length should still be the same as what the 3081462ac1cdSMatt Roper * kernel previously set. At this point the kernel can fill in the blob. 3082462ac1cdSMatt Roper * 3083c822e059SLionel Landwerlin * Note that for some query items it can make sense for userspace to just pass 3084462ac1cdSMatt Roper * in a buffer/blob equal to or larger than the required size. In this case only 3085462ac1cdSMatt Roper * a single ioctl call is needed. For some smaller query items this can work 3086462ac1cdSMatt Roper * quite well. 3087462ac1cdSMatt Roper * 3088462ac1cdSMatt Roper */ 3089462ac1cdSMatt Roper struct drm_i915_query { 3090462ac1cdSMatt Roper /** @num_items: The number of elements in the @items_ptr array */ 3091c822e059SLionel Landwerlin __u32 num_items; 3092c822e059SLionel Landwerlin 3093462ac1cdSMatt Roper /** 3094462ac1cdSMatt Roper * @flags: Unused for now. Must be cleared to zero. 3095462ac1cdSMatt Roper */ 3096c822e059SLionel Landwerlin __u32 flags; 3097c822e059SLionel Landwerlin 3098c822e059SLionel Landwerlin /** 3099c822e059SLionel Landwerlin * @items_ptr: 3100462ac1cdSMatt Roper * 3101462ac1cdSMatt Roper * Pointer to an array of struct drm_i915_query_item. The number of 3102462ac1cdSMatt Roper * array elements is @num_items. 3103c822e059SLionel Landwerlin */ 3104c822e059SLionel Landwerlin __u64 items_ptr; 3105c822e059SLionel Landwerlin }; 3106c822e059SLionel Landwerlin 3107c822e059SLionel Landwerlin /** 3108462ac1cdSMatt Roper * struct drm_i915_query_topology_info 3109462ac1cdSMatt Roper * 3110462ac1cdSMatt Roper * Describes slice/subslice/EU information queried by 3111c822e059SLionel Landwerlin * %DRM_I915_QUERY_TOPOLOGY_INFO 3112c822e059SLionel Landwerlin */ 3113c822e059SLionel Landwerlin struct drm_i915_query_topology_info { 3114c822e059SLionel Landwerlin /** 3115462ac1cdSMatt Roper * @flags: 3116462ac1cdSMatt Roper * 3117462ac1cdSMatt Roper * Unused for now. Must be cleared to zero. 3118c822e059SLionel Landwerlin */ 3119c822e059SLionel Landwerlin __u16 flags; 3120c822e059SLionel Landwerlin 3121c822e059SLionel Landwerlin /** 3122462ac1cdSMatt Roper * @max_slices: 3123462ac1cdSMatt Roper * 3124462ac1cdSMatt Roper * The number of bits used to express the slice mask. 3125462ac1cdSMatt Roper */ 3126462ac1cdSMatt Roper __u16 max_slices; 3127462ac1cdSMatt Roper 3128462ac1cdSMatt Roper /** 3129462ac1cdSMatt Roper * @max_subslices: 3130462ac1cdSMatt Roper * 3131462ac1cdSMatt Roper * The number of bits used to express the subslice mask. 3132462ac1cdSMatt Roper */ 3133462ac1cdSMatt Roper __u16 max_subslices; 3134462ac1cdSMatt Roper 3135462ac1cdSMatt Roper /** 3136462ac1cdSMatt Roper * @max_eus_per_subslice: 3137462ac1cdSMatt Roper * 3138462ac1cdSMatt Roper * The number of bits in the EU mask that correspond to a single 3139462ac1cdSMatt Roper * subslice's EUs. 3140462ac1cdSMatt Roper */ 3141462ac1cdSMatt Roper __u16 max_eus_per_subslice; 3142462ac1cdSMatt Roper 3143462ac1cdSMatt Roper /** 3144462ac1cdSMatt Roper * @subslice_offset: 3145462ac1cdSMatt Roper * 3146462ac1cdSMatt Roper * Offset in data[] at which the subslice masks are stored. 3147462ac1cdSMatt Roper */ 3148462ac1cdSMatt Roper __u16 subslice_offset; 3149462ac1cdSMatt Roper 3150462ac1cdSMatt Roper /** 3151462ac1cdSMatt Roper * @subslice_stride: 3152462ac1cdSMatt Roper * 3153462ac1cdSMatt Roper * Stride at which each of the subslice masks for each slice are 3154462ac1cdSMatt Roper * stored. 3155462ac1cdSMatt Roper */ 3156462ac1cdSMatt Roper __u16 subslice_stride; 3157462ac1cdSMatt Roper 3158462ac1cdSMatt Roper /** 3159462ac1cdSMatt Roper * @eu_offset: 3160462ac1cdSMatt Roper * 3161462ac1cdSMatt Roper * Offset in data[] at which the EU masks are stored. 3162462ac1cdSMatt Roper */ 3163c822e059SLionel Landwerlin __u16 eu_offset; 3164c822e059SLionel Landwerlin 3165c822e059SLionel Landwerlin /** 3166c5d3e39cSTvrtko Ursulin * @eu_stride: 316757772953STvrtko Ursulin * 316857772953STvrtko Ursulin * Stride at which each of the EU masks for each subslice are stored. 316957772953STvrtko Ursulin */ 317057772953STvrtko Ursulin __u16 eu_stride; 317157772953STvrtko Ursulin 317257772953STvrtko Ursulin /** 317357772953STvrtko Ursulin * @data: 317457772953STvrtko Ursulin * 317557772953STvrtko Ursulin * Contains 3 pieces of information : 317657772953STvrtko Ursulin * 317757772953STvrtko Ursulin * - The slice mask with one bit per slice telling whether a slice is 317857772953STvrtko Ursulin * available. The availability of slice X can be queried with the 317957772953STvrtko Ursulin * following formula : 318057772953STvrtko Ursulin * 318157772953STvrtko Ursulin * .. code:: c 318257772953STvrtko Ursulin * 318357772953STvrtko Ursulin * (data[X / 8] >> (X % 8)) & 1 318457772953STvrtko Ursulin * 318557772953STvrtko Ursulin * Starting with Xe_HP platforms, Intel hardware no longer has 318657772953STvrtko Ursulin * traditional slices so i915 will always report a single slice 318757772953STvrtko Ursulin * (hardcoded slicemask = 0x1) which contains all of the platform's 318857772953STvrtko Ursulin * subslices. I.e., the mask here does not reflect any of the newer 318957772953STvrtko Ursulin * hardware concepts such as "gslices" or "cslices" since userspace 319057772953STvrtko Ursulin * is capable of inferring those from the subslice mask. 319157772953STvrtko Ursulin * 319257772953STvrtko Ursulin * - The subslice mask for each slice with one bit per subslice telling 319357772953STvrtko Ursulin * whether a subslice is available. Starting with Gen12 we use the 319457772953STvrtko Ursulin * term "subslice" to refer to what the hardware documentation 319557772953STvrtko Ursulin * describes as a "dual-subslices." The availability of subslice Y 319657772953STvrtko Ursulin * in slice X can be queried with the following formula : 319757772953STvrtko Ursulin * 319857772953STvrtko Ursulin * .. code:: c 319957772953STvrtko Ursulin * 320057772953STvrtko Ursulin * (data[subslice_offset + X * subslice_stride + Y / 8] >> (Y % 8)) & 1 320157772953STvrtko Ursulin * 320257772953STvrtko Ursulin * - The EU mask for each subslice in each slice, with one bit per EU 320357772953STvrtko Ursulin * telling whether an EU is available. The availability of EU Z in 320457772953STvrtko Ursulin * subslice Y in slice X can be queried with the following formula : 320557772953STvrtko Ursulin * 320657772953STvrtko Ursulin * .. code:: c 320757772953STvrtko Ursulin * 320857772953STvrtko Ursulin * (data[eu_offset + 320957772953STvrtko Ursulin * (X * max_subslices + Y) * eu_stride + 321057772953STvrtko Ursulin * Z / 8 321157772953STvrtko Ursulin * ] >> (Z % 8)) & 1 321257772953STvrtko Ursulin */ 321357772953STvrtko Ursulin __u8 data[]; 321457772953STvrtko Ursulin }; 321557772953STvrtko Ursulin 321657772953STvrtko Ursulin /** 321757772953STvrtko Ursulin * DOC: Engine Discovery uAPI 321857772953STvrtko Ursulin * 321957772953STvrtko Ursulin * Engine discovery uAPI is a way of enumerating physical engines present in a 322057772953STvrtko Ursulin * GPU associated with an open i915 DRM file descriptor. This supersedes the old 322157772953STvrtko Ursulin * way of using `DRM_IOCTL_I915_GETPARAM` and engine identifiers like 322257772953STvrtko Ursulin * `I915_PARAM_HAS_BLT`. 322357772953STvrtko Ursulin * 322457772953STvrtko Ursulin * The need for this interface came starting with Icelake and newer GPUs, which 322557772953STvrtko Ursulin * started to establish a pattern of having multiple engines of a same class, 322657772953STvrtko Ursulin * where not all instances were always completely functionally equivalent. 322757772953STvrtko Ursulin * 322857772953STvrtko Ursulin * Entry point for this uapi is `DRM_IOCTL_I915_QUERY` with the 322957772953STvrtko Ursulin * `DRM_I915_QUERY_ENGINE_INFO` as the queried item id. 323057772953STvrtko Ursulin * 323157772953STvrtko Ursulin * Example for getting the list of engines: 323257772953STvrtko Ursulin * 323357772953STvrtko Ursulin * .. code-block:: C 323457772953STvrtko Ursulin * 323557772953STvrtko Ursulin * struct drm_i915_query_engine_info *info; 323657772953STvrtko Ursulin * struct drm_i915_query_item item = { 3237c5d3e39cSTvrtko Ursulin * .query_id = DRM_I915_QUERY_ENGINE_INFO; 3238c5d3e39cSTvrtko Ursulin * }; 3239c5d3e39cSTvrtko Ursulin * struct drm_i915_query query = { 3240c5d3e39cSTvrtko Ursulin * .num_items = 1, 3241c5d3e39cSTvrtko Ursulin * .items_ptr = (uintptr_t)&item, 32422ef6a01fSMatthew Auld * }; 3243c5d3e39cSTvrtko Ursulin * int err, i; 3244c5d3e39cSTvrtko Ursulin * 32452ef6a01fSMatthew Auld * // First query the size of the blob we need, this needs to be large 3246c5d3e39cSTvrtko Ursulin * // enough to hold our array of engines. The kernel will fill out the 3247c5d3e39cSTvrtko Ursulin * // item.length for us, which is the number of bytes we need. 32482ef6a01fSMatthew Auld * // 3249c5d3e39cSTvrtko Ursulin * // Alternatively a large buffer can be allocated straight away enabling 32509409eb35SMatthew Brost * // querying in one pass, in which case item.length should contain the 3251c5d3e39cSTvrtko Ursulin * // length of the provided buffer. 32522ef6a01fSMatthew Auld * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query); 3253c5d3e39cSTvrtko Ursulin * if (err) ... 3254c5d3e39cSTvrtko Ursulin * 3255c5d3e39cSTvrtko Ursulin * info = calloc(1, item.length); 3256c5d3e39cSTvrtko Ursulin * // Now that we allocated the required number of bytes, we call the ioctl 32579409eb35SMatthew Brost * // again, this time with the data_ptr pointing to our newly allocated 32589409eb35SMatthew Brost * // blob, which the kernel can then populate with info on all engines. 32599409eb35SMatthew Brost * item.data_ptr = (uintptr_t)&info, 32602ef6a01fSMatthew Auld * 32619409eb35SMatthew Brost * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query); 32629409eb35SMatthew Brost * if (err) ... 32639409eb35SMatthew Brost * 3264c5d3e39cSTvrtko Ursulin * // We can now access each engine in the array 3265c5d3e39cSTvrtko Ursulin * for (i = 0; i < info->num_engines; i++) { 3266c5d3e39cSTvrtko Ursulin * struct drm_i915_engine_info einfo = info->engines[i]; 3267c5d3e39cSTvrtko Ursulin * u16 class = einfo.engine.class; 3268c5d3e39cSTvrtko Ursulin * u16 instance = einfo.engine.instance; 3269c5d3e39cSTvrtko Ursulin * .... 3270c5d3e39cSTvrtko Ursulin * } 3271c5d3e39cSTvrtko Ursulin * 3272c5d3e39cSTvrtko Ursulin * free(info); 32732ef6a01fSMatthew Auld * 3274c5d3e39cSTvrtko Ursulin * Each of the enumerated engines, apart from being defined by its class and 3275c5d3e39cSTvrtko Ursulin * instance (see `struct i915_engine_class_instance`), also can have flags and 32762ef6a01fSMatthew Auld * capabilities defined as documented in i915_drm.h. 3277c5d3e39cSTvrtko Ursulin * 3278c5d3e39cSTvrtko Ursulin * For instance video engines which support HEVC encoding will have the 32792ef6a01fSMatthew Auld * `I915_VIDEO_CLASS_CAPABILITY_HEVC` capability bit set. 3280c5d3e39cSTvrtko Ursulin * 3281c5d3e39cSTvrtko Ursulin * Engine discovery only fully comes to its own when combined with the new way 3282c5d3e39cSTvrtko Ursulin * of addressing engines when submitting batch buffers using contexts with 3283a2e54026SMatt Roper * engine maps configured. 3284a2e54026SMatt Roper */ 3285a2e54026SMatt Roper 3286c94fde8fSMatt Atwood /** 3287c94fde8fSMatt Atwood * struct drm_i915_engine_info 32884f6ccc74SLionel Landwerlin * 32894f6ccc74SLionel Landwerlin * Describes one engine and it's capabilities as known to the driver. 32904f6ccc74SLionel Landwerlin */ 3291a2e54026SMatt Roper struct drm_i915_engine_info { 3292a2e54026SMatt Roper /** @engine: Engine class and instance. */ 3293a2e54026SMatt Roper struct i915_engine_class_instance engine; 3294a2e54026SMatt Roper 3295a2e54026SMatt Roper /** @rsvd0: Reserved field. */ 3296a2e54026SMatt Roper __u32 rsvd0; 32974f6ccc74SLionel Landwerlin 32984f6ccc74SLionel Landwerlin /** @flags: Engine flags. */ 32994f6ccc74SLionel Landwerlin __u64 flags; 3300a2e54026SMatt Roper #define I915_ENGINE_INFO_HAS_LOGICAL_INSTANCE (1 << 0) 3301a2e54026SMatt Roper 3302a2e54026SMatt Roper /** @capabilities: Capabilities of this engine. */ 3303a2e54026SMatt Roper __u64 capabilities; 3304a2e54026SMatt Roper #define I915_VIDEO_CLASS_CAPABILITY_HEVC (1 << 0) 3305a2e54026SMatt Roper #define I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC (1 << 1) 3306a2e54026SMatt Roper 33074f6ccc74SLionel Landwerlin /** @logical_instance: Logical instance of engine */ 33084f6ccc74SLionel Landwerlin __u16 logical_instance; 33094f6ccc74SLionel Landwerlin 3310a2e54026SMatt Roper /** @rsvd1: Reserved fields. */ 3311a2e54026SMatt Roper __u16 rsvd1[3]; 3312a2e54026SMatt Roper /** @rsvd2: Reserved fields. */ 3313a2e54026SMatt Roper __u64 rsvd2[3]; 3314a2e54026SMatt Roper }; 3315a2e54026SMatt Roper 3316a2e54026SMatt Roper /** 33174f6ccc74SLionel Landwerlin * struct drm_i915_query_engine_info 33184f6ccc74SLionel Landwerlin * 33194f6ccc74SLionel Landwerlin * Engine info query enumerates all engines known to the driver by filling in 33204f6ccc74SLionel Landwerlin * an array of struct drm_i915_engine_info structures. 33214f6ccc74SLionel Landwerlin */ 33224f6ccc74SLionel Landwerlin struct drm_i915_query_engine_info { 3323a2e54026SMatt Roper /** @num_engines: Number of struct drm_i915_engine_info structs following. */ 3324a2e54026SMatt Roper __u32 num_engines; 3325a2e54026SMatt Roper 33264f6ccc74SLionel Landwerlin /** @rsvd: MBZ */ 33274f6ccc74SLionel Landwerlin __u32 rsvd[3]; 33284f6ccc74SLionel Landwerlin 33294f6ccc74SLionel Landwerlin /** @engines: Marker for drm_i915_engine_info structures. */ 3330a2e54026SMatt Roper struct drm_i915_engine_info engines[]; 3331a2e54026SMatt Roper }; 33324f6ccc74SLionel Landwerlin 3333a2e54026SMatt Roper /** 3334a2e54026SMatt Roper * struct drm_i915_query_perf_config 3335a2e54026SMatt Roper * 3336a2e54026SMatt Roper * Data written by the kernel with query %DRM_I915_QUERY_PERF_CONFIG and 3337a2e54026SMatt Roper * %DRM_I915_QUERY_GEOMETRY_SUBSLICES. 3338a2e54026SMatt Roper */ 3339a2e54026SMatt Roper struct drm_i915_query_perf_config { 33404f6ccc74SLionel Landwerlin union { 33414f6ccc74SLionel Landwerlin /** 3342a2e54026SMatt Roper * @n_configs: 3343a2e54026SMatt Roper * 3344a2e54026SMatt Roper * When &drm_i915_query_item.flags == 33454f6ccc74SLionel Landwerlin * %DRM_I915_QUERY_PERF_CONFIG_LIST, i915 sets this fields to 33464f6ccc74SLionel Landwerlin * the number of configurations available. 33474f6ccc74SLionel Landwerlin */ 33484f6ccc74SLionel Landwerlin __u64 n_configs; 334971021729SAbdiel Janulgue 335071021729SAbdiel Janulgue /** 335171021729SAbdiel Janulgue * @config: 335271021729SAbdiel Janulgue * 335371021729SAbdiel Janulgue * When &drm_i915_query_item.flags == 335471021729SAbdiel Janulgue * %DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID, i915 will use the 335571021729SAbdiel Janulgue * value in this field as configuration identifier to decide 335671021729SAbdiel Janulgue * what data to write into config_ptr. 335771021729SAbdiel Janulgue */ 335871021729SAbdiel Janulgue __u64 config; 335971021729SAbdiel Janulgue 336071021729SAbdiel Janulgue /** 336171021729SAbdiel Janulgue * @uuid: 336271021729SAbdiel Janulgue * 336371021729SAbdiel Janulgue * When &drm_i915_query_item.flags == 336471021729SAbdiel Janulgue * %DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID, i915 will use the 336571021729SAbdiel Janulgue * value in this field as configuration identifier to decide 336671021729SAbdiel Janulgue * what data to write into config_ptr. 336771021729SAbdiel Janulgue * 336871021729SAbdiel Janulgue * String formatted like "%08x-%04x-%04x-%04x-%012x" 336971021729SAbdiel Janulgue */ 337071021729SAbdiel Janulgue char uuid[36]; 337171021729SAbdiel Janulgue }; 337271021729SAbdiel Janulgue 337371021729SAbdiel Janulgue /** 337471021729SAbdiel Janulgue * @flags: 337571021729SAbdiel Janulgue * 337671021729SAbdiel Janulgue * Unused for now. Must be cleared to zero. 337771021729SAbdiel Janulgue */ 337871021729SAbdiel Janulgue __u32 flags; 337971021729SAbdiel Janulgue 338071021729SAbdiel Janulgue /** 338171021729SAbdiel Janulgue * @data: 338271021729SAbdiel Janulgue * 338371021729SAbdiel Janulgue * When &drm_i915_query_item.flags == %DRM_I915_QUERY_PERF_CONFIG_LIST, 338471021729SAbdiel Janulgue * i915 will write an array of __u64 of configuration identifiers. 33853f4309cbSMatthew Auld * 33863f4309cbSMatthew Auld * When &drm_i915_query_item.flags == %DRM_I915_QUERY_PERF_CONFIG_DATA, 33873f4309cbSMatthew Auld * i915 will write a struct drm_i915_perf_oa_config. If the following 33883f4309cbSMatthew Auld * fields of struct drm_i915_perf_oa_config are not set to 0, i915 will 33893f4309cbSMatthew Auld * write into the associated pointers the values of submitted when the 33903f4309cbSMatthew Auld * configuration was created : 33913f4309cbSMatthew Auld * 33923f4309cbSMatthew Auld * - &drm_i915_perf_oa_config.n_mux_regs 339371021729SAbdiel Janulgue * - &drm_i915_perf_oa_config.n_boolean_regs 339471021729SAbdiel Janulgue * - &drm_i915_perf_oa_config.n_flex_regs 3395141f733bSMatthew Auld */ 3396141f733bSMatthew Auld __u8 data[]; 3397141f733bSMatthew Auld }; 3398141f733bSMatthew Auld 3399141f733bSMatthew Auld /** 3400141f733bSMatthew Auld * enum drm_i915_gem_memory_class - Supported memory classes 3401141f733bSMatthew Auld */ 3402141f733bSMatthew Auld enum drm_i915_gem_memory_class { 3403141f733bSMatthew Auld /** @I915_MEMORY_CLASS_SYSTEM: System memory */ 340471021729SAbdiel Janulgue I915_MEMORY_CLASS_SYSTEM = 0, 340571021729SAbdiel Janulgue /** @I915_MEMORY_CLASS_DEVICE: Device local-memory */ 34063f4309cbSMatthew Auld I915_MEMORY_CLASS_DEVICE, 340771021729SAbdiel Janulgue }; 340871021729SAbdiel Janulgue 34093f4309cbSMatthew Auld /** 34103f4309cbSMatthew Auld * struct drm_i915_gem_memory_class_instance - Identify particular memory region 34113f4309cbSMatthew Auld */ 34123f4309cbSMatthew Auld struct drm_i915_gem_memory_class_instance { 34133f4309cbSMatthew Auld /** @memory_class: See enum drm_i915_gem_memory_class */ 34143f4309cbSMatthew Auld __u16 memory_class; 34153f4309cbSMatthew Auld 34163f4309cbSMatthew Auld /** @memory_instance: Which instance */ 34173f4309cbSMatthew Auld __u16 memory_instance; 34183f4309cbSMatthew Auld }; 34193f4309cbSMatthew Auld 34203f4309cbSMatthew Auld /** 34213f4309cbSMatthew Auld * struct drm_i915_memory_region_info - Describes one region as known to the 34223f4309cbSMatthew Auld * driver. 34233f4309cbSMatthew Auld * 34243f4309cbSMatthew Auld * Note this is using both struct drm_i915_query_item and struct drm_i915_query. 34253f4309cbSMatthew Auld * For this new query we are adding the new query id DRM_I915_QUERY_MEMORY_REGIONS 34263f4309cbSMatthew Auld * at &drm_i915_query_item.query_id. 34273f4309cbSMatthew Auld */ 34283f4309cbSMatthew Auld struct drm_i915_memory_region_info { 34293f4309cbSMatthew Auld /** @region: The class:instance pair encoding */ 34303f4309cbSMatthew Auld struct drm_i915_gem_memory_class_instance region; 34313f4309cbSMatthew Auld 34323f4309cbSMatthew Auld /** @rsvd0: MBZ */ 34333f4309cbSMatthew Auld __u32 rsvd0; 34343f4309cbSMatthew Auld 34353f4309cbSMatthew Auld /** 34363f4309cbSMatthew Auld * @probed_size: Memory probed by the driver 3437141f733bSMatthew Auld * 3438141f733bSMatthew Auld * Note that it should not be possible to ever encounter a zero value 3439141f733bSMatthew Auld * here, also note that no current region type will ever return -1 here. 3440141f733bSMatthew Auld * Although for future region types, this might be a possibility. The 3441141f733bSMatthew Auld * same applies to the other size fields. 3442141f733bSMatthew Auld */ 3443141f733bSMatthew Auld __u64 probed_size; 3444141f733bSMatthew Auld 3445141f733bSMatthew Auld /** 3446141f733bSMatthew Auld * @unallocated_size: Estimate of memory remaining 3447141f733bSMatthew Auld * 3448141f733bSMatthew Auld * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable accounting. 3449141f733bSMatthew Auld * Without this (or if this is an older kernel) the value here will 3450141f733bSMatthew Auld * always equal the @probed_size. Note this is only currently tracked 3451141f733bSMatthew Auld * for I915_MEMORY_CLASS_DEVICE regions (for other types the value here 3452141f733bSMatthew Auld * will always equal the @probed_size). 3453141f733bSMatthew Auld */ 3454141f733bSMatthew Auld __u64 unallocated_size; 3455141f733bSMatthew Auld 3456141f733bSMatthew Auld union { 3457141f733bSMatthew Auld /** @rsvd1: MBZ */ 34583f4309cbSMatthew Auld __u64 rsvd1[8]; 34593f4309cbSMatthew Auld struct { 346071021729SAbdiel Janulgue /** 346171021729SAbdiel Janulgue * @probed_cpu_visible_size: Memory probed by the driver 346271021729SAbdiel Janulgue * that is CPU accessible. 346371021729SAbdiel Janulgue * 346471021729SAbdiel Janulgue * This will be always be <= @probed_size, and the 346571021729SAbdiel Janulgue * remainder (if there is any) will not be CPU 346671021729SAbdiel Janulgue * accessible. 346771021729SAbdiel Janulgue * 346871021729SAbdiel Janulgue * On systems without small BAR, the @probed_size will 346971021729SAbdiel Janulgue * always equal the @probed_cpu_visible_size, since all 347071021729SAbdiel Janulgue * of it will be CPU accessible. 347171021729SAbdiel Janulgue * 347271021729SAbdiel Janulgue * Note this is only tracked for 347371021729SAbdiel Janulgue * I915_MEMORY_CLASS_DEVICE regions (for other types the 347471021729SAbdiel Janulgue * value here will always equal the @probed_size). 347571021729SAbdiel Janulgue * 347671021729SAbdiel Janulgue * Note that if the value returned here is zero, then 347771021729SAbdiel Janulgue * this must be an old kernel which lacks the relevant 347871021729SAbdiel Janulgue * small-bar uAPI support (including 347971021729SAbdiel Janulgue * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS), but on 348071021729SAbdiel Janulgue * such systems we should never actually end up with a 348171021729SAbdiel Janulgue * small BAR configuration, assuming we are able to load 348271021729SAbdiel Janulgue * the kernel module. Hence it should be safe to treat 348371021729SAbdiel Janulgue * this the same as when @probed_cpu_visible_size == 348471021729SAbdiel Janulgue * @probed_size. 348571021729SAbdiel Janulgue */ 348671021729SAbdiel Janulgue __u64 probed_cpu_visible_size; 348771021729SAbdiel Janulgue 348871021729SAbdiel Janulgue /** 348971021729SAbdiel Janulgue * @unallocated_cpu_visible_size: Estimate of CPU 349071021729SAbdiel Janulgue * visible memory remaining. 349171021729SAbdiel Janulgue * 349271021729SAbdiel Janulgue * Note this is only tracked for 349371021729SAbdiel Janulgue * I915_MEMORY_CLASS_DEVICE regions (for other types the 349471021729SAbdiel Janulgue * value here will always equal the 349571021729SAbdiel Janulgue * @probed_cpu_visible_size). 349671021729SAbdiel Janulgue * 349771021729SAbdiel Janulgue * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable 349871021729SAbdiel Janulgue * accounting. Without this the value here will always 349971021729SAbdiel Janulgue * equal the @probed_cpu_visible_size. Note this is only 350071021729SAbdiel Janulgue * currently tracked for I915_MEMORY_CLASS_DEVICE 350171021729SAbdiel Janulgue * regions (for other types the value here will also 350271021729SAbdiel Janulgue * always equal the @probed_cpu_visible_size). 350371021729SAbdiel Janulgue * 350471021729SAbdiel Janulgue * If this is an older kernel the value here will be 350571021729SAbdiel Janulgue * zero, see also @probed_cpu_visible_size. 350671021729SAbdiel Janulgue */ 350771021729SAbdiel Janulgue __u64 unallocated_cpu_visible_size; 350871021729SAbdiel Janulgue }; 350971021729SAbdiel Janulgue }; 351071021729SAbdiel Janulgue }; 351171021729SAbdiel Janulgue 351271021729SAbdiel Janulgue /** 351371021729SAbdiel Janulgue * struct drm_i915_query_memory_regions 351471021729SAbdiel Janulgue * 351571021729SAbdiel Janulgue * The region info query enumerates all regions known to the driver by filling 351671021729SAbdiel Janulgue * in an array of struct drm_i915_memory_region_info structures. 351771021729SAbdiel Janulgue * 351871021729SAbdiel Janulgue * Example for getting the list of supported regions: 3519ebcb4029SMatthew Auld * 3520034d47b2STvrtko Ursulin * .. code-block:: C 3521034d47b2STvrtko Ursulin * 3522034d47b2STvrtko Ursulin * struct drm_i915_query_memory_regions *info; 3523034d47b2STvrtko Ursulin * struct drm_i915_query_item item = { 3524034d47b2STvrtko Ursulin * .query_id = DRM_I915_QUERY_MEMORY_REGIONS; 3525034d47b2STvrtko Ursulin * }; 3526034d47b2STvrtko Ursulin * struct drm_i915_query query = { 3527034d47b2STvrtko Ursulin * .num_items = 1, 3528034d47b2STvrtko Ursulin * .items_ptr = (uintptr_t)&item, 3529034d47b2STvrtko Ursulin * }; 3530ebcb4029SMatthew Auld * int err, i; 3531ebcb4029SMatthew Auld * 3532ebcb4029SMatthew Auld * // First query the size of the blob we need, this needs to be large 3533525e93f6SMatthew Auld * // enough to hold our array of regions. The kernel will fill out the 3534525e93f6SMatthew Auld * // item.length for us, which is the number of bytes we need. 3535525e93f6SMatthew Auld * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query); 3536525e93f6SMatthew Auld * if (err) ... 3537525e93f6SMatthew Auld * 3538ebcb4029SMatthew Auld * info = calloc(1, item.length); 3539ebcb4029SMatthew Auld * // Now that we allocated the required number of bytes, we call the ioctl 3540ebcb4029SMatthew Auld * // again, this time with the data_ptr pointing to our newly allocated 3541ebcb4029SMatthew Auld * // blob, which the kernel can then populate with the all the region info. 3542ebcb4029SMatthew Auld * item.data_ptr = (uintptr_t)&info, 3543ebcb4029SMatthew Auld * 3544ebcb4029SMatthew Auld * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query); 35458133a6daSMatthew Auld * if (err) ... 35468133a6daSMatthew Auld * 35478133a6daSMatthew Auld * // We can now access each region in the array 3548caa574ffSMatthew Auld * for (i = 0; i < info->num_regions; i++) { 35498133a6daSMatthew Auld * struct drm_i915_memory_region_info mr = info->regions[i]; 35508133a6daSMatthew Auld * u16 class = mr.region.class; 35518133a6daSMatthew Auld * u16 instance = mr.region.instance; 3552caa574ffSMatthew Auld * 3553caa574ffSMatthew Auld * .... 3554caa574ffSMatthew Auld * } 3555caa574ffSMatthew Auld * 3556caa574ffSMatthew Auld * free(info); 3557caa574ffSMatthew Auld */ 3558caa574ffSMatthew Auld struct drm_i915_query_memory_regions { 3559caa574ffSMatthew Auld /** @num_regions: Number of supported regions */ 35608133a6daSMatthew Auld __u32 num_regions; 35618133a6daSMatthew Auld 35628133a6daSMatthew Auld /** @rsvd: MBZ */ 35638133a6daSMatthew Auld __u32 rsvd[3]; 3564ebcb4029SMatthew Auld 3565ebcb4029SMatthew Auld /** @regions: Info about each supported region */ 3566525e93f6SMatthew Auld struct drm_i915_memory_region_info regions[]; 3567ebcb4029SMatthew Auld }; 3568ebcb4029SMatthew Auld 3569ebcb4029SMatthew Auld /** 3570ebcb4029SMatthew Auld * DOC: GuC HWCONFIG blob uAPI 3571ebcb4029SMatthew Auld * 3572ebcb4029SMatthew Auld * The GuC produces a blob with information about the current device. 3573525e93f6SMatthew Auld * i915 reads this blob from GuC and makes it available via this uAPI. 3574525e93f6SMatthew Auld * 3575525e93f6SMatthew Auld * The format and meaning of the blob content are documented in the 3576525e93f6SMatthew Auld * Programmer's Reference Manual. 3577525e93f6SMatthew Auld */ 3578525e93f6SMatthew Auld 3579525e93f6SMatthew Auld /** 3580525e93f6SMatthew Auld * struct drm_i915_gem_create_ext - Existing gem_create behaviour, with added 3581525e93f6SMatthew Auld * extension support using struct i915_user_extension. 3582525e93f6SMatthew Auld * 3583525e93f6SMatthew Auld * Note that new buffer flags should be added here, at least for the stuff that 3584525e93f6SMatthew Auld * is immutable. Previously we would have two ioctls, one to create the object 3585525e93f6SMatthew Auld * with gem_create, and another to apply various parameters, however this 3586525e93f6SMatthew Auld * creates some ambiguity for the params which are considered immutable. Also in 3587525e93f6SMatthew Auld * general we're phasing out the various SET/GET ioctls. 3588525e93f6SMatthew Auld */ 3589525e93f6SMatthew Auld struct drm_i915_gem_create_ext { 3590525e93f6SMatthew Auld /** 3591525e93f6SMatthew Auld * @size: Requested size for the object. 3592525e93f6SMatthew Auld * 3593525e93f6SMatthew Auld * The (page-aligned) allocated size for the object will be returned. 3594525e93f6SMatthew Auld * 3595525e93f6SMatthew Auld * On platforms like DG2/ATS the kernel will always use 64K or larger 3596525e93f6SMatthew Auld * pages for I915_MEMORY_CLASS_DEVICE. The kernel also requires a 3597525e93f6SMatthew Auld * minimum of 64K GTT alignment for such objects. 3598525e93f6SMatthew Auld * 3599525e93f6SMatthew Auld * NOTE: Previously the ABI here required a minimum GTT alignment of 2M 3600525e93f6SMatthew Auld * on DG2/ATS, due to how the hardware implemented 64K GTT page support, 3601525e93f6SMatthew Auld * where we had the following complications: 3602525e93f6SMatthew Auld * 3603525e93f6SMatthew Auld * 1) The entire PDE (which covers a 2MB virtual address range), must 3604525e93f6SMatthew Auld * contain only 64K PTEs, i.e mixing 4K and 64K PTEs in the same 3605525e93f6SMatthew Auld * PDE is forbidden by the hardware. 3606525e93f6SMatthew Auld * 3607525e93f6SMatthew Auld * 2) We still need to support 4K PTEs for I915_MEMORY_CLASS_SYSTEM 3608525e93f6SMatthew Auld * objects. 3609525e93f6SMatthew Auld * 3610525e93f6SMatthew Auld * However on actual production HW this was completely changed to now 3611525e93f6SMatthew Auld * allow setting a TLB hint at the PTE level (see PS64), which is a lot 3612525e93f6SMatthew Auld * more flexible than the above. With this the 2M restriction was 3613525e93f6SMatthew Auld * dropped where we now only require 64K. 3614525e93f6SMatthew Auld */ 3615525e93f6SMatthew Auld __u64 size; 3616ebcb4029SMatthew Auld 3617525e93f6SMatthew Auld /** 3618ebcb4029SMatthew Auld * @handle: Returned handle for the object. 3619ebcb4029SMatthew Auld * 3620ebcb4029SMatthew Auld * Object handles are nonzero. 3621ebcb4029SMatthew Auld */ 3622ebcb4029SMatthew Auld __u32 handle; 3623ebcb4029SMatthew Auld 3624ebcb4029SMatthew Auld /** 3625ebcb4029SMatthew Auld * @flags: Optional flags. 3626ebcb4029SMatthew Auld * 3627ebcb4029SMatthew Auld * Supported values: 36282459e56fSMatthew Auld * 36292459e56fSMatthew Auld * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS - Signal to the kernel that 3630d3ac8d42SDaniele Ceraolo Spurio * the object will need to be accessed via the CPU. 3631d3ac8d42SDaniele Ceraolo Spurio * 3632d3ac8d42SDaniele Ceraolo Spurio * Only valid when placing objects in I915_MEMORY_CLASS_DEVICE, and only 3633ebcb4029SMatthew Auld * strictly required on configurations where some subset of the device 36342459e56fSMatthew Auld * memory is directly visible/mappable through the CPU (which we also 3635d3ac8d42SDaniele Ceraolo Spurio * call small BAR), like on some DG2+ systems. Note that this is quite 3636ebcb4029SMatthew Auld * undesirable, but due to various factors like the client CPU, BIOS etc 3637ebcb4029SMatthew Auld * it's something we can expect to see in the wild. See 3638ebcb4029SMatthew Auld * &drm_i915_memory_region_info.probed_cpu_visible_size for how to 36392459e56fSMatthew Auld * determine if this system applies. 36402459e56fSMatthew Auld * 36412459e56fSMatthew Auld * Note that one of the placements MUST be I915_MEMORY_CLASS_SYSTEM, to 36422459e56fSMatthew Auld * ensure the kernel can always spill the allocation to system memory, 36432459e56fSMatthew Auld * if the object can't be allocated in the mappable part of 36442459e56fSMatthew Auld * I915_MEMORY_CLASS_DEVICE. 36452459e56fSMatthew Auld * 36462459e56fSMatthew Auld * Also note that since the kernel only supports flat-CCS on objects 36472459e56fSMatthew Auld * that can *only* be placed in I915_MEMORY_CLASS_DEVICE, we therefore 36482459e56fSMatthew Auld * don't support I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS together with 36492459e56fSMatthew Auld * flat-CCS. 36502459e56fSMatthew Auld * 36512459e56fSMatthew Auld * Without this hint, the kernel will assume that non-mappable 36522459e56fSMatthew Auld * I915_MEMORY_CLASS_DEVICE is preferred for this object. Note that the 36532459e56fSMatthew Auld * kernel can still migrate the object to the mappable part, as a last 36542459e56fSMatthew Auld * resort, if userspace ever CPU faults this object, but this might be 36552459e56fSMatthew Auld * expensive, and so ideally should be avoided. 36562459e56fSMatthew Auld * 36572459e56fSMatthew Auld * On older kernels which lack the relevant small-bar uAPI support (see 36582459e56fSMatthew Auld * also &drm_i915_memory_region_info.probed_cpu_visible_size), 36592459e56fSMatthew Auld * usage of the flag will result in an error, but it should NEVER be 36602459e56fSMatthew Auld * possible to end up with a small BAR configuration, assuming we can 36612459e56fSMatthew Auld * also successfully load the i915 kernel module. In such cases the 36622459e56fSMatthew Auld * entire I915_MEMORY_CLASS_DEVICE region will be CPU accessible, and as 36632459e56fSMatthew Auld * such there are zero restrictions on where the object can be placed. 36642459e56fSMatthew Auld */ 36652459e56fSMatthew Auld #define I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS (1 << 0) 36662459e56fSMatthew Auld __u32 flags; 36672459e56fSMatthew Auld 36682459e56fSMatthew Auld /** 36692459e56fSMatthew Auld * @extensions: The chain of extensions to apply to this object. 36702459e56fSMatthew Auld * 36712459e56fSMatthew Auld * This will be useful in the future when we need to support several 36722459e56fSMatthew Auld * different extensions, and we need to apply more than one when 36732459e56fSMatthew Auld * creating the object. See struct i915_user_extension. 36742459e56fSMatthew Auld * 36752459e56fSMatthew Auld * If we don't supply any extensions then we get the same old gem_create 3676a50794f2SRamalingam C * behaviour. 3677a50794f2SRamalingam C * 3678a50794f2SRamalingam C * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see 3679a50794f2SRamalingam C * struct drm_i915_gem_create_ext_memory_regions. 3680a50794f2SRamalingam C * 3681a50794f2SRamalingam C * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see 3682a50794f2SRamalingam C * struct drm_i915_gem_create_ext_protected_content. 3683a50794f2SRamalingam C * 3684a50794f2SRamalingam C * For I915_GEM_CREATE_EXT_SET_PAT usage see 3685a50794f2SRamalingam C * struct drm_i915_gem_create_ext_set_pat. 3686a50794f2SRamalingam C */ 3687a50794f2SRamalingam C #define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0 3688a50794f2SRamalingam C #define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1 3689a50794f2SRamalingam C #define I915_GEM_CREATE_EXT_SET_PAT 2 3690a50794f2SRamalingam C __u64 extensions; 3691a50794f2SRamalingam C }; 36922459e56fSMatthew Auld 36932459e56fSMatthew Auld /** 36942459e56fSMatthew Auld * struct drm_i915_gem_create_ext_memory_regions - The 36952459e56fSMatthew Auld * I915_GEM_CREATE_EXT_MEMORY_REGIONS extension. 36962459e56fSMatthew Auld * 36972459e56fSMatthew Auld * Set the object with the desired set of placements/regions in priority 36982459e56fSMatthew Auld * order. Each entry must be unique and supported by the device. 36992459e56fSMatthew Auld * 37002459e56fSMatthew Auld * This is provided as an array of struct drm_i915_gem_memory_class_instance, or 37012459e56fSMatthew Auld * an equivalent layout of class:instance pair encodings. See struct 37022459e56fSMatthew Auld * drm_i915_query_memory_regions and DRM_I915_QUERY_MEMORY_REGIONS for how to 37032459e56fSMatthew Auld * query the supported regions for a device. 37042459e56fSMatthew Auld * 37052459e56fSMatthew Auld * As an example, on discrete devices, if we wish to set the placement as 37062459e56fSMatthew Auld * device local-memory we can do something like: 37072459e56fSMatthew Auld * 37082459e56fSMatthew Auld * .. code-block:: C 3709d3ac8d42SDaniele Ceraolo Spurio * 3710d3ac8d42SDaniele Ceraolo Spurio * struct drm_i915_gem_memory_class_instance region_lmem = { 3711d3ac8d42SDaniele Ceraolo Spurio * .memory_class = I915_MEMORY_CLASS_DEVICE, 3712d3ac8d42SDaniele Ceraolo Spurio * .memory_instance = 0, 3713d3ac8d42SDaniele Ceraolo Spurio * }; 3714d3ac8d42SDaniele Ceraolo Spurio * struct drm_i915_gem_create_ext_memory_regions regions = { 3715d3ac8d42SDaniele Ceraolo Spurio * .base = { .name = I915_GEM_CREATE_EXT_MEMORY_REGIONS }, 3716d3ac8d42SDaniele Ceraolo Spurio * .regions = (uintptr_t)®ion_lmem, 3717d3ac8d42SDaniele Ceraolo Spurio * .num_regions = 1, 3718d3ac8d42SDaniele Ceraolo Spurio * }; 3719d3ac8d42SDaniele Ceraolo Spurio * struct drm_i915_gem_create_ext create_ext = { 3720d3ac8d42SDaniele Ceraolo Spurio * .size = 16 * PAGE_SIZE, 3721d3ac8d42SDaniele Ceraolo Spurio * .extensions = (uintptr_t)®ions, 3722d3ac8d42SDaniele Ceraolo Spurio * }; 3723d3ac8d42SDaniele Ceraolo Spurio * 3724d3ac8d42SDaniele Ceraolo Spurio * int err = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE_EXT, &create_ext); 3725d3ac8d42SDaniele Ceraolo Spurio * if (err) ... 3726d3ac8d42SDaniele Ceraolo Spurio * 3727d3ac8d42SDaniele Ceraolo Spurio * At which point we get the object handle in &drm_i915_gem_create_ext.handle, 3728d3ac8d42SDaniele Ceraolo Spurio * along with the final object size in &drm_i915_gem_create_ext.size, which 3729d3ac8d42SDaniele Ceraolo Spurio * should account for any rounding up, if required. 3730d3ac8d42SDaniele Ceraolo Spurio * 3731d3ac8d42SDaniele Ceraolo Spurio * Note that userspace has no means of knowing the current backing region 3732d3ac8d42SDaniele Ceraolo Spurio * for objects where @num_regions is larger than one. The kernel will only 3733d3ac8d42SDaniele Ceraolo Spurio * ensure that the priority order of the @regions array is honoured, either 3734d3ac8d42SDaniele Ceraolo Spurio * when initially placing the object, or when moving memory around due to 3735d3ac8d42SDaniele Ceraolo Spurio * memory pressure 3736d3ac8d42SDaniele Ceraolo Spurio * 3737d3ac8d42SDaniele Ceraolo Spurio * On Flat-CCS capable HW, compression is supported for the objects residing 3738d3ac8d42SDaniele Ceraolo Spurio * in I915_MEMORY_CLASS_DEVICE. When such objects (compressed) have other 3739d3ac8d42SDaniele Ceraolo Spurio * memory class in @regions and migrated (by i915, due to memory 3740d3ac8d42SDaniele Ceraolo Spurio * constraints) to the non I915_MEMORY_CLASS_DEVICE region, then i915 needs to 3741d3ac8d42SDaniele Ceraolo Spurio * decompress the content. But i915 doesn't have the required information to 3742d3ac8d42SDaniele Ceraolo Spurio * decompress the userspace compressed objects. 3743d3ac8d42SDaniele Ceraolo Spurio * 3744d3ac8d42SDaniele Ceraolo Spurio * So i915 supports Flat-CCS, on the objects which can reside only on 3745d3ac8d42SDaniele Ceraolo Spurio * I915_MEMORY_CLASS_DEVICE regions. 3746d3ac8d42SDaniele Ceraolo Spurio */ 3747d3ac8d42SDaniele Ceraolo Spurio struct drm_i915_gem_create_ext_memory_regions { 3748d3ac8d42SDaniele Ceraolo Spurio /** @base: Extension link. See struct i915_user_extension. */ 3749d3ac8d42SDaniele Ceraolo Spurio struct i915_user_extension base; 3750cbbd3764SHuang, Sean Z 3751cbbd3764SHuang, Sean Z /** @pad: MBZ */ 3752cbbd3764SHuang, Sean Z __u32 pad; 3753b1c1f5c4SEmil Velikov /** @num_regions: Number of elements in the @regions array. */ 3754b1c1f5c4SEmil Velikov __u32 num_regions; 3755b1c1f5c4SEmil Velikov /** 3756b1c1f5c4SEmil Velikov * @regions: The regions/placements array. 3757718dceddSDavid Howells * 3758 * An array of struct drm_i915_gem_memory_class_instance. 3759 */ 3760 __u64 regions; 3761 }; 3762 3763 /** 3764 * struct drm_i915_gem_create_ext_protected_content - The 3765 * I915_OBJECT_PARAM_PROTECTED_CONTENT extension. 3766 * 3767 * If this extension is provided, buffer contents are expected to be protected 3768 * by PXP encryption and require decryption for scan out and processing. This 3769 * is only possible on platforms that have PXP enabled, on all other scenarios 3770 * using this extension will cause the ioctl to fail and return -ENODEV. The 3771 * flags parameter is reserved for future expansion and must currently be set 3772 * to zero. 3773 * 3774 * The buffer contents are considered invalid after a PXP session teardown. 3775 * 3776 * The encryption is guaranteed to be processed correctly only if the object 3777 * is submitted with a context created using the 3778 * I915_CONTEXT_PARAM_PROTECTED_CONTENT flag. This will also enable extra checks 3779 * at submission time on the validity of the objects involved. 3780 * 3781 * Below is an example on how to create a protected object: 3782 * 3783 * .. code-block:: C 3784 * 3785 * struct drm_i915_gem_create_ext_protected_content protected_ext = { 3786 * .base = { .name = I915_GEM_CREATE_EXT_PROTECTED_CONTENT }, 3787 * .flags = 0, 3788 * }; 3789 * struct drm_i915_gem_create_ext create_ext = { 3790 * .size = PAGE_SIZE, 3791 * .extensions = (uintptr_t)&protected_ext, 3792 * }; 3793 * 3794 * int err = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE_EXT, &create_ext); 3795 * if (err) ... 3796 */ 3797 struct drm_i915_gem_create_ext_protected_content { 3798 /** @base: Extension link. See struct i915_user_extension. */ 3799 struct i915_user_extension base; 3800 /** @flags: reserved for future usage, currently MBZ */ 3801 __u32 flags; 3802 }; 3803 3804 /** 3805 * struct drm_i915_gem_create_ext_set_pat - The 3806 * I915_GEM_CREATE_EXT_SET_PAT extension. 3807 * 3808 * If this extension is provided, the specified caching policy (PAT index) is 3809 * applied to the buffer object. 3810 * 3811 * Below is an example on how to create an object with specific caching policy: 3812 * 3813 * .. code-block:: C 3814 * 3815 * struct drm_i915_gem_create_ext_set_pat set_pat_ext = { 3816 * .base = { .name = I915_GEM_CREATE_EXT_SET_PAT }, 3817 * .pat_index = 0, 3818 * }; 3819 * struct drm_i915_gem_create_ext create_ext = { 3820 * .size = PAGE_SIZE, 3821 * .extensions = (uintptr_t)&set_pat_ext, 3822 * }; 3823 * 3824 * int err = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE_EXT, &create_ext); 3825 * if (err) ... 3826 */ 3827 struct drm_i915_gem_create_ext_set_pat { 3828 /** @base: Extension link. See struct i915_user_extension. */ 3829 struct i915_user_extension base; 3830 /** 3831 * @pat_index: PAT index to be set 3832 * PAT index is a bit field in Page Table Entry to control caching 3833 * behaviors for GPU accesses. The definition of PAT index is 3834 * platform dependent and can be found in hardware specifications, 3835 */ 3836 __u32 pat_index; 3837 /** @rsvd: reserved for future use */ 3838 __u32 rsvd; 3839 }; 3840 3841 /* ID of the protected content session managed by i915 when PXP is active */ 3842 #define I915_PROTECTED_CONTENT_DEFAULT_SESSION 0xf 3843 3844 #if defined(__cplusplus) 3845 } 3846 #endif 3847 3848 #endif /* _UAPI_I915_DRM_H_ */ 3849