xref: /openbmc/linux/include/uapi/drm/i915_drm.h (revision ee242ca7)
1718dceddSDavid Howells /*
2718dceddSDavid Howells  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
3718dceddSDavid Howells  * All Rights Reserved.
4718dceddSDavid Howells  *
5718dceddSDavid Howells  * Permission is hereby granted, free of charge, to any person obtaining a
6718dceddSDavid Howells  * copy of this software and associated documentation files (the
7718dceddSDavid Howells  * "Software"), to deal in the Software without restriction, including
8718dceddSDavid Howells  * without limitation the rights to use, copy, modify, merge, publish,
9718dceddSDavid Howells  * distribute, sub license, and/or sell copies of the Software, and to
10718dceddSDavid Howells  * permit persons to whom the Software is furnished to do so, subject to
11718dceddSDavid Howells  * the following conditions:
12718dceddSDavid Howells  *
13718dceddSDavid Howells  * The above copyright notice and this permission notice (including the
14718dceddSDavid Howells  * next paragraph) shall be included in all copies or substantial portions
15718dceddSDavid Howells  * of the Software.
16718dceddSDavid Howells  *
17718dceddSDavid Howells  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18718dceddSDavid Howells  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19718dceddSDavid Howells  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
20718dceddSDavid Howells  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
21718dceddSDavid Howells  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22718dceddSDavid Howells  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23718dceddSDavid Howells  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24718dceddSDavid Howells  *
25718dceddSDavid Howells  */
26718dceddSDavid Howells 
27718dceddSDavid Howells #ifndef _UAPI_I915_DRM_H_
28718dceddSDavid Howells #define _UAPI_I915_DRM_H_
29718dceddSDavid Howells 
301049102fSGabriel Laskar #include "drm.h"
31718dceddSDavid Howells 
32b1c1f5c4SEmil Velikov #if defined(__cplusplus)
33b1c1f5c4SEmil Velikov extern "C" {
34b1c1f5c4SEmil Velikov #endif
35b1c1f5c4SEmil Velikov 
36718dceddSDavid Howells /* Please note that modifications to all structs defined here are
37718dceddSDavid Howells  * subject to backwards-compatibility constraints.
38718dceddSDavid Howells  */
39718dceddSDavid Howells 
40cce723edSBen Widawsky /**
41cce723edSBen Widawsky  * DOC: uevents generated by i915 on it's device node
42cce723edSBen Widawsky  *
43cce723edSBen Widawsky  * I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch
44cce723edSBen Widawsky  *	event from the gpu l3 cache. Additional information supplied is ROW,
4535a85ac6SBen Widawsky  *	BANK, SUBBANK, SLICE of the affected cacheline. Userspace should keep
4635a85ac6SBen Widawsky  *	track of these events and if a specific cache-line seems to have a
4735a85ac6SBen Widawsky  *	persistent error remap it with the l3 remapping tool supplied in
4835a85ac6SBen Widawsky  *	intel-gpu-tools.  The value supplied with the event is always 1.
49cce723edSBen Widawsky  *
50cce723edSBen Widawsky  * I915_ERROR_UEVENT - Generated upon error detection, currently only via
51cce723edSBen Widawsky  *	hangcheck. The error detection event is a good indicator of when things
52cce723edSBen Widawsky  *	began to go badly. The value supplied with the event is a 1 upon error
53cce723edSBen Widawsky  *	detection, and a 0 upon reset completion, signifying no more error
54cce723edSBen Widawsky  *	exists. NOTE: Disabling hangcheck or reset via module parameter will
55cce723edSBen Widawsky  *	cause the related events to not be seen.
56cce723edSBen Widawsky  *
57cce723edSBen Widawsky  * I915_RESET_UEVENT - Event is generated just before an attempt to reset the
5866137f54SRandy Dunlap  *	GPU. The value supplied with the event is always 1. NOTE: Disable
59cce723edSBen Widawsky  *	reset via module parameter will cause this event to not be seen.
60cce723edSBen Widawsky  */
61cce723edSBen Widawsky #define I915_L3_PARITY_UEVENT		"L3_PARITY_ERROR"
62cce723edSBen Widawsky #define I915_ERROR_UEVENT		"ERROR"
63cce723edSBen Widawsky #define I915_RESET_UEVENT		"RESET"
64718dceddSDavid Howells 
6519d053d4SMatthew Auld /**
6619d053d4SMatthew Auld  * struct i915_user_extension - Base class for defining a chain of extensions
679d1305efSChris Wilson  *
689d1305efSChris Wilson  * Many interfaces need to grow over time. In most cases we can simply
699d1305efSChris Wilson  * extend the struct and have userspace pass in more data. Another option,
709d1305efSChris Wilson  * as demonstrated by Vulkan's approach to providing extensions for forward
719d1305efSChris Wilson  * and backward compatibility, is to use a list of optional structs to
729d1305efSChris Wilson  * provide those extra details.
739d1305efSChris Wilson  *
749d1305efSChris Wilson  * The key advantage to using an extension chain is that it allows us to
759d1305efSChris Wilson  * redefine the interface more easily than an ever growing struct of
769d1305efSChris Wilson  * increasing complexity, and for large parts of that interface to be
779d1305efSChris Wilson  * entirely optional. The downside is more pointer chasing; chasing across
789d1305efSChris Wilson  * the __user boundary with pointers encapsulated inside u64.
7919d053d4SMatthew Auld  *
8019d053d4SMatthew Auld  * Example chaining:
8119d053d4SMatthew Auld  *
8219d053d4SMatthew Auld  * .. code-block:: C
8319d053d4SMatthew Auld  *
8419d053d4SMatthew Auld  *	struct i915_user_extension ext3 {
8519d053d4SMatthew Auld  *		.next_extension = 0, // end
8619d053d4SMatthew Auld  *		.name = ...,
8719d053d4SMatthew Auld  *	};
8819d053d4SMatthew Auld  *	struct i915_user_extension ext2 {
8919d053d4SMatthew Auld  *		.next_extension = (uintptr_t)&ext3,
9019d053d4SMatthew Auld  *		.name = ...,
9119d053d4SMatthew Auld  *	};
9219d053d4SMatthew Auld  *	struct i915_user_extension ext1 {
9319d053d4SMatthew Auld  *		.next_extension = (uintptr_t)&ext2,
9419d053d4SMatthew Auld  *		.name = ...,
9519d053d4SMatthew Auld  *	};
9619d053d4SMatthew Auld  *
9719d053d4SMatthew Auld  * Typically the struct i915_user_extension would be embedded in some uAPI
9819d053d4SMatthew Auld  * struct, and in this case we would feed it the head of the chain(i.e ext1),
9919d053d4SMatthew Auld  * which would then apply all of the above extensions.
10019d053d4SMatthew Auld  *
1019d1305efSChris Wilson  */
1029d1305efSChris Wilson struct i915_user_extension {
10319d053d4SMatthew Auld 	/**
10419d053d4SMatthew Auld 	 * @next_extension:
10519d053d4SMatthew Auld 	 *
10619d053d4SMatthew Auld 	 * Pointer to the next struct i915_user_extension, or zero if the end.
10719d053d4SMatthew Auld 	 */
1089d1305efSChris Wilson 	__u64 next_extension;
10919d053d4SMatthew Auld 	/**
11019d053d4SMatthew Auld 	 * @name: Name of the extension.
11119d053d4SMatthew Auld 	 *
11219d053d4SMatthew Auld 	 * Note that the name here is just some integer.
11319d053d4SMatthew Auld 	 *
11419d053d4SMatthew Auld 	 * Also note that the name space for this is not global for the whole
11519d053d4SMatthew Auld 	 * driver, but rather its scope/meaning is limited to the specific piece
11619d053d4SMatthew Auld 	 * of uAPI which has embedded the struct i915_user_extension.
11719d053d4SMatthew Auld 	 */
1189d1305efSChris Wilson 	__u32 name;
11919d053d4SMatthew Auld 	/**
12019d053d4SMatthew Auld 	 * @flags: MBZ
12119d053d4SMatthew Auld 	 *
12219d053d4SMatthew Auld 	 * All undefined bits must be zero.
12319d053d4SMatthew Auld 	 */
12419d053d4SMatthew Auld 	__u32 flags;
12519d053d4SMatthew Auld 	/**
12619d053d4SMatthew Auld 	 * @rsvd: MBZ
12719d053d4SMatthew Auld 	 *
12819d053d4SMatthew Auld 	 * Reserved for future use; must be zero.
12919d053d4SMatthew Auld 	 */
13019d053d4SMatthew Auld 	__u32 rsvd[4];
1319d1305efSChris Wilson };
1329d1305efSChris Wilson 
1339d1305efSChris Wilson /*
1343373ce2eSImre Deak  * MOCS indexes used for GPU surfaces, defining the cacheability of the
1353373ce2eSImre Deak  * surface data and the coherency for this data wrt. CPU vs. GPU accesses.
1363373ce2eSImre Deak  */
1373373ce2eSImre Deak enum i915_mocs_table_index {
1383373ce2eSImre Deak 	/*
1393373ce2eSImre Deak 	 * Not cached anywhere, coherency between CPU and GPU accesses is
1403373ce2eSImre Deak 	 * guaranteed.
1413373ce2eSImre Deak 	 */
1423373ce2eSImre Deak 	I915_MOCS_UNCACHED,
1433373ce2eSImre Deak 	/*
1443373ce2eSImre Deak 	 * Cacheability and coherency controlled by the kernel automatically
1453373ce2eSImre Deak 	 * based on the DRM_I915_GEM_SET_CACHING IOCTL setting and the current
1463373ce2eSImre Deak 	 * usage of the surface (used for display scanout or not).
1473373ce2eSImre Deak 	 */
1483373ce2eSImre Deak 	I915_MOCS_PTE,
1493373ce2eSImre Deak 	/*
1503373ce2eSImre Deak 	 * Cached in all GPU caches available on the platform.
1513373ce2eSImre Deak 	 * Coherency between CPU and GPU accesses to the surface is not
1523373ce2eSImre Deak 	 * guaranteed without extra synchronization.
1533373ce2eSImre Deak 	 */
1543373ce2eSImre Deak 	I915_MOCS_CACHED,
1553373ce2eSImre Deak };
1563373ce2eSImre Deak 
1571803fcbcSTvrtko Ursulin /*
1581803fcbcSTvrtko Ursulin  * Different engines serve different roles, and there may be more than one
1591803fcbcSTvrtko Ursulin  * engine serving each role. enum drm_i915_gem_engine_class provides a
1601803fcbcSTvrtko Ursulin  * classification of the role of the engine, which may be used when requesting
1611803fcbcSTvrtko Ursulin  * operations to be performed on a certain subset of engines, or for providing
1621803fcbcSTvrtko Ursulin  * information about that group.
1631803fcbcSTvrtko Ursulin  */
1641803fcbcSTvrtko Ursulin enum drm_i915_gem_engine_class {
1651803fcbcSTvrtko Ursulin 	I915_ENGINE_CLASS_RENDER	= 0,
1661803fcbcSTvrtko Ursulin 	I915_ENGINE_CLASS_COPY		= 1,
1671803fcbcSTvrtko Ursulin 	I915_ENGINE_CLASS_VIDEO		= 2,
1681803fcbcSTvrtko Ursulin 	I915_ENGINE_CLASS_VIDEO_ENHANCE	= 3,
1691803fcbcSTvrtko Ursulin 
170be03564bSChris Wilson 	/* should be kept compact */
171be03564bSChris Wilson 
1721803fcbcSTvrtko Ursulin 	I915_ENGINE_CLASS_INVALID	= -1
1731803fcbcSTvrtko Ursulin };
1741803fcbcSTvrtko Ursulin 
175d1172ab3SChris Wilson /*
176d1172ab3SChris Wilson  * There may be more than one engine fulfilling any role within the system.
177d1172ab3SChris Wilson  * Each engine of a class is given a unique instance number and therefore
178d1172ab3SChris Wilson  * any engine can be specified by its class:instance tuplet. APIs that allow
179d1172ab3SChris Wilson  * access to any engine in the system will use struct i915_engine_class_instance
180d1172ab3SChris Wilson  * for this identification.
181d1172ab3SChris Wilson  */
182d1172ab3SChris Wilson struct i915_engine_class_instance {
183d1172ab3SChris Wilson 	__u16 engine_class; /* see enum drm_i915_gem_engine_class */
184d1172ab3SChris Wilson 	__u16 engine_instance;
185976b55f0SChris Wilson #define I915_ENGINE_CLASS_INVALID_NONE -1
1866d06779eSChris Wilson #define I915_ENGINE_CLASS_INVALID_VIRTUAL -2
187d1172ab3SChris Wilson };
188d1172ab3SChris Wilson 
189b46a33e2STvrtko Ursulin /**
190b46a33e2STvrtko Ursulin  * DOC: perf_events exposed by i915 through /sys/bus/event_sources/drivers/i915
191b46a33e2STvrtko Ursulin  *
192b46a33e2STvrtko Ursulin  */
193b46a33e2STvrtko Ursulin 
194b46a33e2STvrtko Ursulin enum drm_i915_pmu_engine_sample {
195b46a33e2STvrtko Ursulin 	I915_SAMPLE_BUSY = 0,
196b46a33e2STvrtko Ursulin 	I915_SAMPLE_WAIT = 1,
197b552ae44STvrtko Ursulin 	I915_SAMPLE_SEMA = 2
198b46a33e2STvrtko Ursulin };
199b46a33e2STvrtko Ursulin 
200b46a33e2STvrtko Ursulin #define I915_PMU_SAMPLE_BITS (4)
201b46a33e2STvrtko Ursulin #define I915_PMU_SAMPLE_MASK (0xf)
202b46a33e2STvrtko Ursulin #define I915_PMU_SAMPLE_INSTANCE_BITS (8)
203b46a33e2STvrtko Ursulin #define I915_PMU_CLASS_SHIFT \
204b46a33e2STvrtko Ursulin 	(I915_PMU_SAMPLE_BITS + I915_PMU_SAMPLE_INSTANCE_BITS)
205b46a33e2STvrtko Ursulin 
206b46a33e2STvrtko Ursulin #define __I915_PMU_ENGINE(class, instance, sample) \
207b46a33e2STvrtko Ursulin 	((class) << I915_PMU_CLASS_SHIFT | \
208b46a33e2STvrtko Ursulin 	(instance) << I915_PMU_SAMPLE_BITS | \
209b46a33e2STvrtko Ursulin 	(sample))
210b46a33e2STvrtko Ursulin 
211b46a33e2STvrtko Ursulin #define I915_PMU_ENGINE_BUSY(class, instance) \
212b46a33e2STvrtko Ursulin 	__I915_PMU_ENGINE(class, instance, I915_SAMPLE_BUSY)
213b46a33e2STvrtko Ursulin 
214b46a33e2STvrtko Ursulin #define I915_PMU_ENGINE_WAIT(class, instance) \
215b46a33e2STvrtko Ursulin 	__I915_PMU_ENGINE(class, instance, I915_SAMPLE_WAIT)
216b46a33e2STvrtko Ursulin 
217b46a33e2STvrtko Ursulin #define I915_PMU_ENGINE_SEMA(class, instance) \
218b46a33e2STvrtko Ursulin 	__I915_PMU_ENGINE(class, instance, I915_SAMPLE_SEMA)
219b46a33e2STvrtko Ursulin 
220b46a33e2STvrtko Ursulin #define __I915_PMU_OTHER(x) (__I915_PMU_ENGINE(0xff, 0xff, 0xf) + 1 + (x))
221b46a33e2STvrtko Ursulin 
222b46a33e2STvrtko Ursulin #define I915_PMU_ACTUAL_FREQUENCY	__I915_PMU_OTHER(0)
223b46a33e2STvrtko Ursulin #define I915_PMU_REQUESTED_FREQUENCY	__I915_PMU_OTHER(1)
2240cd4684dSTvrtko Ursulin #define I915_PMU_INTERRUPTS		__I915_PMU_OTHER(2)
2256060b6aeSTvrtko Ursulin #define I915_PMU_RC6_RESIDENCY		__I915_PMU_OTHER(3)
2268c3b1ba0SChris Wilson #define I915_PMU_SOFTWARE_GT_AWAKE_TIME	__I915_PMU_OTHER(4)
2276060b6aeSTvrtko Ursulin 
228348fb0cbSTvrtko Ursulin #define I915_PMU_LAST /* Deprecated - do not use */ I915_PMU_RC6_RESIDENCY
229b46a33e2STvrtko Ursulin 
230718dceddSDavid Howells /* Each region is a minimum of 16k, and there are at most 255 of them.
231718dceddSDavid Howells  */
232718dceddSDavid Howells #define I915_NR_TEX_REGIONS 255	/* table size 2k - maximum due to use
233718dceddSDavid Howells 				 * of chars for next/prev indices */
234718dceddSDavid Howells #define I915_LOG_MIN_TEX_REGION_SIZE 14
235718dceddSDavid Howells 
236718dceddSDavid Howells typedef struct _drm_i915_init {
237718dceddSDavid Howells 	enum {
238718dceddSDavid Howells 		I915_INIT_DMA = 0x01,
239718dceddSDavid Howells 		I915_CLEANUP_DMA = 0x02,
240718dceddSDavid Howells 		I915_RESUME_DMA = 0x03
241718dceddSDavid Howells 	} func;
242718dceddSDavid Howells 	unsigned int mmio_offset;
243718dceddSDavid Howells 	int sarea_priv_offset;
244718dceddSDavid Howells 	unsigned int ring_start;
245718dceddSDavid Howells 	unsigned int ring_end;
246718dceddSDavid Howells 	unsigned int ring_size;
247718dceddSDavid Howells 	unsigned int front_offset;
248718dceddSDavid Howells 	unsigned int back_offset;
249718dceddSDavid Howells 	unsigned int depth_offset;
250718dceddSDavid Howells 	unsigned int w;
251718dceddSDavid Howells 	unsigned int h;
252718dceddSDavid Howells 	unsigned int pitch;
253718dceddSDavid Howells 	unsigned int pitch_bits;
254718dceddSDavid Howells 	unsigned int back_pitch;
255718dceddSDavid Howells 	unsigned int depth_pitch;
256718dceddSDavid Howells 	unsigned int cpp;
257718dceddSDavid Howells 	unsigned int chipset;
258718dceddSDavid Howells } drm_i915_init_t;
259718dceddSDavid Howells 
260718dceddSDavid Howells typedef struct _drm_i915_sarea {
261718dceddSDavid Howells 	struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1];
262718dceddSDavid Howells 	int last_upload;	/* last time texture was uploaded */
263718dceddSDavid Howells 	int last_enqueue;	/* last time a buffer was enqueued */
264718dceddSDavid Howells 	int last_dispatch;	/* age of the most recently dispatched buffer */
265718dceddSDavid Howells 	int ctxOwner;		/* last context to upload state */
266718dceddSDavid Howells 	int texAge;
267718dceddSDavid Howells 	int pf_enabled;		/* is pageflipping allowed? */
268718dceddSDavid Howells 	int pf_active;
269718dceddSDavid Howells 	int pf_current_page;	/* which buffer is being displayed? */
270718dceddSDavid Howells 	int perf_boxes;		/* performance boxes to be displayed */
271718dceddSDavid Howells 	int width, height;      /* screen size in pixels */
272718dceddSDavid Howells 
273718dceddSDavid Howells 	drm_handle_t front_handle;
274718dceddSDavid Howells 	int front_offset;
275718dceddSDavid Howells 	int front_size;
276718dceddSDavid Howells 
277718dceddSDavid Howells 	drm_handle_t back_handle;
278718dceddSDavid Howells 	int back_offset;
279718dceddSDavid Howells 	int back_size;
280718dceddSDavid Howells 
281718dceddSDavid Howells 	drm_handle_t depth_handle;
282718dceddSDavid Howells 	int depth_offset;
283718dceddSDavid Howells 	int depth_size;
284718dceddSDavid Howells 
285718dceddSDavid Howells 	drm_handle_t tex_handle;
286718dceddSDavid Howells 	int tex_offset;
287718dceddSDavid Howells 	int tex_size;
288718dceddSDavid Howells 	int log_tex_granularity;
289718dceddSDavid Howells 	int pitch;
290718dceddSDavid Howells 	int rotation;           /* 0, 90, 180 or 270 */
291718dceddSDavid Howells 	int rotated_offset;
292718dceddSDavid Howells 	int rotated_size;
293718dceddSDavid Howells 	int rotated_pitch;
294718dceddSDavid Howells 	int virtualX, virtualY;
295718dceddSDavid Howells 
296718dceddSDavid Howells 	unsigned int front_tiled;
297718dceddSDavid Howells 	unsigned int back_tiled;
298718dceddSDavid Howells 	unsigned int depth_tiled;
299718dceddSDavid Howells 	unsigned int rotated_tiled;
300718dceddSDavid Howells 	unsigned int rotated2_tiled;
301718dceddSDavid Howells 
302718dceddSDavid Howells 	int pipeA_x;
303718dceddSDavid Howells 	int pipeA_y;
304718dceddSDavid Howells 	int pipeA_w;
305718dceddSDavid Howells 	int pipeA_h;
306718dceddSDavid Howells 	int pipeB_x;
307718dceddSDavid Howells 	int pipeB_y;
308718dceddSDavid Howells 	int pipeB_w;
309718dceddSDavid Howells 	int pipeB_h;
310718dceddSDavid Howells 
311718dceddSDavid Howells 	/* fill out some space for old userspace triple buffer */
312718dceddSDavid Howells 	drm_handle_t unused_handle;
313718dceddSDavid Howells 	__u32 unused1, unused2, unused3;
314718dceddSDavid Howells 
315718dceddSDavid Howells 	/* buffer object handles for static buffers. May change
316718dceddSDavid Howells 	 * over the lifetime of the client.
317718dceddSDavid Howells 	 */
318718dceddSDavid Howells 	__u32 front_bo_handle;
319718dceddSDavid Howells 	__u32 back_bo_handle;
320718dceddSDavid Howells 	__u32 unused_bo_handle;
321718dceddSDavid Howells 	__u32 depth_bo_handle;
322718dceddSDavid Howells 
323718dceddSDavid Howells } drm_i915_sarea_t;
324718dceddSDavid Howells 
325718dceddSDavid Howells /* due to userspace building against these headers we need some compat here */
326718dceddSDavid Howells #define planeA_x pipeA_x
327718dceddSDavid Howells #define planeA_y pipeA_y
328718dceddSDavid Howells #define planeA_w pipeA_w
329718dceddSDavid Howells #define planeA_h pipeA_h
330718dceddSDavid Howells #define planeB_x pipeB_x
331718dceddSDavid Howells #define planeB_y pipeB_y
332718dceddSDavid Howells #define planeB_w pipeB_w
333718dceddSDavid Howells #define planeB_h pipeB_h
334718dceddSDavid Howells 
335718dceddSDavid Howells /* Flags for perf_boxes
336718dceddSDavid Howells  */
337718dceddSDavid Howells #define I915_BOX_RING_EMPTY    0x1
338718dceddSDavid Howells #define I915_BOX_FLIP          0x2
339718dceddSDavid Howells #define I915_BOX_WAIT          0x4
340718dceddSDavid Howells #define I915_BOX_TEXTURE_LOAD  0x8
341718dceddSDavid Howells #define I915_BOX_LOST_CONTEXT  0x10
342718dceddSDavid Howells 
34321631f10SDamien Lespiau /*
34421631f10SDamien Lespiau  * i915 specific ioctls.
34521631f10SDamien Lespiau  *
34621631f10SDamien Lespiau  * The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie
34721631f10SDamien Lespiau  * [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset
34821631f10SDamien Lespiau  * against DRM_COMMAND_BASE and should be between [0x0, 0x60).
349718dceddSDavid Howells  */
350718dceddSDavid Howells #define DRM_I915_INIT		0x00
351718dceddSDavid Howells #define DRM_I915_FLUSH		0x01
352718dceddSDavid Howells #define DRM_I915_FLIP		0x02
353718dceddSDavid Howells #define DRM_I915_BATCHBUFFER	0x03
354718dceddSDavid Howells #define DRM_I915_IRQ_EMIT	0x04
355718dceddSDavid Howells #define DRM_I915_IRQ_WAIT	0x05
356718dceddSDavid Howells #define DRM_I915_GETPARAM	0x06
357718dceddSDavid Howells #define DRM_I915_SETPARAM	0x07
358718dceddSDavid Howells #define DRM_I915_ALLOC		0x08
359718dceddSDavid Howells #define DRM_I915_FREE		0x09
360718dceddSDavid Howells #define DRM_I915_INIT_HEAP	0x0a
361718dceddSDavid Howells #define DRM_I915_CMDBUFFER	0x0b
362718dceddSDavid Howells #define DRM_I915_DESTROY_HEAP	0x0c
363718dceddSDavid Howells #define DRM_I915_SET_VBLANK_PIPE	0x0d
364718dceddSDavid Howells #define DRM_I915_GET_VBLANK_PIPE	0x0e
365718dceddSDavid Howells #define DRM_I915_VBLANK_SWAP	0x0f
366718dceddSDavid Howells #define DRM_I915_HWS_ADDR	0x11
367718dceddSDavid Howells #define DRM_I915_GEM_INIT	0x13
368718dceddSDavid Howells #define DRM_I915_GEM_EXECBUFFER	0x14
369718dceddSDavid Howells #define DRM_I915_GEM_PIN	0x15
370718dceddSDavid Howells #define DRM_I915_GEM_UNPIN	0x16
371718dceddSDavid Howells #define DRM_I915_GEM_BUSY	0x17
372718dceddSDavid Howells #define DRM_I915_GEM_THROTTLE	0x18
373718dceddSDavid Howells #define DRM_I915_GEM_ENTERVT	0x19
374718dceddSDavid Howells #define DRM_I915_GEM_LEAVEVT	0x1a
375718dceddSDavid Howells #define DRM_I915_GEM_CREATE	0x1b
376718dceddSDavid Howells #define DRM_I915_GEM_PREAD	0x1c
377718dceddSDavid Howells #define DRM_I915_GEM_PWRITE	0x1d
378718dceddSDavid Howells #define DRM_I915_GEM_MMAP	0x1e
379718dceddSDavid Howells #define DRM_I915_GEM_SET_DOMAIN	0x1f
380718dceddSDavid Howells #define DRM_I915_GEM_SW_FINISH	0x20
381718dceddSDavid Howells #define DRM_I915_GEM_SET_TILING	0x21
382718dceddSDavid Howells #define DRM_I915_GEM_GET_TILING	0x22
383718dceddSDavid Howells #define DRM_I915_GEM_GET_APERTURE 0x23
384718dceddSDavid Howells #define DRM_I915_GEM_MMAP_GTT	0x24
385718dceddSDavid Howells #define DRM_I915_GET_PIPE_FROM_CRTC_ID	0x25
386718dceddSDavid Howells #define DRM_I915_GEM_MADVISE	0x26
387718dceddSDavid Howells #define DRM_I915_OVERLAY_PUT_IMAGE	0x27
388718dceddSDavid Howells #define DRM_I915_OVERLAY_ATTRS	0x28
389718dceddSDavid Howells #define DRM_I915_GEM_EXECBUFFER2	0x29
390fec0445cSChris Wilson #define DRM_I915_GEM_EXECBUFFER2_WR	DRM_I915_GEM_EXECBUFFER2
391718dceddSDavid Howells #define DRM_I915_GET_SPRITE_COLORKEY	0x2a
392718dceddSDavid Howells #define DRM_I915_SET_SPRITE_COLORKEY	0x2b
393718dceddSDavid Howells #define DRM_I915_GEM_WAIT	0x2c
394718dceddSDavid Howells #define DRM_I915_GEM_CONTEXT_CREATE	0x2d
395718dceddSDavid Howells #define DRM_I915_GEM_CONTEXT_DESTROY	0x2e
396718dceddSDavid Howells #define DRM_I915_GEM_SET_CACHING	0x2f
397718dceddSDavid Howells #define DRM_I915_GEM_GET_CACHING	0x30
398718dceddSDavid Howells #define DRM_I915_REG_READ		0x31
399b6359918SMika Kuoppala #define DRM_I915_GET_RESET_STATS	0x32
4005cc9ed4bSChris Wilson #define DRM_I915_GEM_USERPTR		0x33
401c9dc0f35SChris Wilson #define DRM_I915_GEM_CONTEXT_GETPARAM	0x34
402c9dc0f35SChris Wilson #define DRM_I915_GEM_CONTEXT_SETPARAM	0x35
403eec688e1SRobert Bragg #define DRM_I915_PERF_OPEN		0x36
404f89823c2SLionel Landwerlin #define DRM_I915_PERF_ADD_CONFIG	0x37
405f89823c2SLionel Landwerlin #define DRM_I915_PERF_REMOVE_CONFIG	0x38
406a446ae2cSLionel Landwerlin #define DRM_I915_QUERY			0x39
4077f3f317aSChris Wilson #define DRM_I915_GEM_VM_CREATE		0x3a
4087f3f317aSChris Wilson #define DRM_I915_GEM_VM_DESTROY		0x3b
409ebcb4029SMatthew Auld #define DRM_I915_GEM_CREATE_EXT		0x3c
410be03564bSChris Wilson /* Must be kept compact -- no holes */
411718dceddSDavid Howells 
412718dceddSDavid Howells #define DRM_IOCTL_I915_INIT		DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
413718dceddSDavid Howells #define DRM_IOCTL_I915_FLUSH		DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
414718dceddSDavid Howells #define DRM_IOCTL_I915_FLIP		DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP)
415718dceddSDavid Howells #define DRM_IOCTL_I915_BATCHBUFFER	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t)
416718dceddSDavid Howells #define DRM_IOCTL_I915_IRQ_EMIT         DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t)
417718dceddSDavid Howells #define DRM_IOCTL_I915_IRQ_WAIT         DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t)
418718dceddSDavid Howells #define DRM_IOCTL_I915_GETPARAM         DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t)
419718dceddSDavid Howells #define DRM_IOCTL_I915_SETPARAM         DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t)
420718dceddSDavid Howells #define DRM_IOCTL_I915_ALLOC            DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t)
421718dceddSDavid Howells #define DRM_IOCTL_I915_FREE             DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t)
422718dceddSDavid Howells #define DRM_IOCTL_I915_INIT_HEAP        DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t)
423718dceddSDavid Howells #define DRM_IOCTL_I915_CMDBUFFER	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t)
424718dceddSDavid Howells #define DRM_IOCTL_I915_DESTROY_HEAP	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t)
425718dceddSDavid Howells #define DRM_IOCTL_I915_SET_VBLANK_PIPE	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
426718dceddSDavid Howells #define DRM_IOCTL_I915_GET_VBLANK_PIPE	DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
427718dceddSDavid Howells #define DRM_IOCTL_I915_VBLANK_SWAP	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
428718dceddSDavid Howells #define DRM_IOCTL_I915_HWS_ADDR		DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init)
429718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_INIT		DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
430718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_EXECBUFFER	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
431718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_EXECBUFFER2	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
432fec0445cSChris Wilson #define DRM_IOCTL_I915_GEM_EXECBUFFER2_WR	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2_WR, struct drm_i915_gem_execbuffer2)
433718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_PIN		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
434718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_UNPIN	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
435718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_BUSY		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
436718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_SET_CACHING		DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHING, struct drm_i915_gem_caching)
437718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_GET_CACHING		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHING, struct drm_i915_gem_caching)
438718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_THROTTLE	DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
439718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_ENTERVT	DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
440718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_LEAVEVT	DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
441718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_CREATE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
442ebcb4029SMatthew Auld #define DRM_IOCTL_I915_GEM_CREATE_EXT	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE_EXT, struct drm_i915_gem_create_ext)
443718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_PREAD	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
444718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_PWRITE	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
445718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_MMAP		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
446718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_MMAP_GTT	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt)
447cc662126SAbdiel Janulgue #define DRM_IOCTL_I915_GEM_MMAP_OFFSET	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_offset)
448718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_SET_DOMAIN	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
449718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_SW_FINISH	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
450718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_SET_TILING	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
451718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_GET_TILING	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
452718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_GET_APERTURE	DRM_IOR  (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture)
453718dceddSDavid Howells #define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id)
454718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_MADVISE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
455718dceddSDavid Howells #define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image)
456718dceddSDavid Howells #define DRM_IOCTL_I915_OVERLAY_ATTRS	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
457718dceddSDavid Howells #define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
4582c60fae1STommi Rantala #define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
459718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_WAIT		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait)
460718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_CONTEXT_CREATE	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
461b9171541SChris Wilson #define DRM_IOCTL_I915_GEM_CONTEXT_CREATE_EXT	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create_ext)
462718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
463718dceddSDavid Howells #define DRM_IOCTL_I915_REG_READ			DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
464b6359918SMika Kuoppala #define DRM_IOCTL_I915_GET_RESET_STATS		DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats)
4655cc9ed4bSChris Wilson #define DRM_IOCTL_I915_GEM_USERPTR			DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr)
466c9dc0f35SChris Wilson #define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param)
467c9dc0f35SChris Wilson #define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param)
468eec688e1SRobert Bragg #define DRM_IOCTL_I915_PERF_OPEN	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param)
469f89823c2SLionel Landwerlin #define DRM_IOCTL_I915_PERF_ADD_CONFIG	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_ADD_CONFIG, struct drm_i915_perf_oa_config)
470f89823c2SLionel Landwerlin #define DRM_IOCTL_I915_PERF_REMOVE_CONFIG	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_REMOVE_CONFIG, __u64)
471a446ae2cSLionel Landwerlin #define DRM_IOCTL_I915_QUERY			DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_QUERY, struct drm_i915_query)
4727f3f317aSChris Wilson #define DRM_IOCTL_I915_GEM_VM_CREATE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_CREATE, struct drm_i915_gem_vm_control)
4737f3f317aSChris Wilson #define DRM_IOCTL_I915_GEM_VM_DESTROY	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_VM_DESTROY, struct drm_i915_gem_vm_control)
474718dceddSDavid Howells 
475718dceddSDavid Howells /* Allow drivers to submit batchbuffers directly to hardware, relying
476718dceddSDavid Howells  * on the security mechanisms provided by hardware.
477718dceddSDavid Howells  */
478718dceddSDavid Howells typedef struct drm_i915_batchbuffer {
479718dceddSDavid Howells 	int start;		/* agp offset */
480718dceddSDavid Howells 	int used;		/* nr bytes in use */
481718dceddSDavid Howells 	int DR1;		/* hw flags for GFX_OP_DRAWRECT_INFO */
482718dceddSDavid Howells 	int DR4;		/* window origin for GFX_OP_DRAWRECT_INFO */
483718dceddSDavid Howells 	int num_cliprects;	/* mulitpass with multiple cliprects? */
484718dceddSDavid Howells 	struct drm_clip_rect __user *cliprects;	/* pointer to userspace cliprects */
485718dceddSDavid Howells } drm_i915_batchbuffer_t;
486718dceddSDavid Howells 
487718dceddSDavid Howells /* As above, but pass a pointer to userspace buffer which can be
488718dceddSDavid Howells  * validated by the kernel prior to sending to hardware.
489718dceddSDavid Howells  */
490718dceddSDavid Howells typedef struct _drm_i915_cmdbuffer {
491718dceddSDavid Howells 	char __user *buf;	/* pointer to userspace command buffer */
492718dceddSDavid Howells 	int sz;			/* nr bytes in buf */
493718dceddSDavid Howells 	int DR1;		/* hw flags for GFX_OP_DRAWRECT_INFO */
494718dceddSDavid Howells 	int DR4;		/* window origin for GFX_OP_DRAWRECT_INFO */
495718dceddSDavid Howells 	int num_cliprects;	/* mulitpass with multiple cliprects? */
496718dceddSDavid Howells 	struct drm_clip_rect __user *cliprects;	/* pointer to userspace cliprects */
497718dceddSDavid Howells } drm_i915_cmdbuffer_t;
498718dceddSDavid Howells 
499718dceddSDavid Howells /* Userspace can request & wait on irq's:
500718dceddSDavid Howells  */
501718dceddSDavid Howells typedef struct drm_i915_irq_emit {
502718dceddSDavid Howells 	int __user *irq_seq;
503718dceddSDavid Howells } drm_i915_irq_emit_t;
504718dceddSDavid Howells 
505718dceddSDavid Howells typedef struct drm_i915_irq_wait {
506718dceddSDavid Howells 	int irq_seq;
507718dceddSDavid Howells } drm_i915_irq_wait_t;
508718dceddSDavid Howells 
5094bdafb9dSChris Wilson /*
5104bdafb9dSChris Wilson  * Different modes of per-process Graphics Translation Table,
5114bdafb9dSChris Wilson  * see I915_PARAM_HAS_ALIASING_PPGTT
5124bdafb9dSChris Wilson  */
5134bdafb9dSChris Wilson #define I915_GEM_PPGTT_NONE	0
5144bdafb9dSChris Wilson #define I915_GEM_PPGTT_ALIASING	1
5154bdafb9dSChris Wilson #define I915_GEM_PPGTT_FULL	2
5164bdafb9dSChris Wilson 
517718dceddSDavid Howells /* Ioctl to query kernel params:
518718dceddSDavid Howells  */
519718dceddSDavid Howells #define I915_PARAM_IRQ_ACTIVE            1
520718dceddSDavid Howells #define I915_PARAM_ALLOW_BATCHBUFFER     2
521718dceddSDavid Howells #define I915_PARAM_LAST_DISPATCH         3
522718dceddSDavid Howells #define I915_PARAM_CHIPSET_ID            4
523718dceddSDavid Howells #define I915_PARAM_HAS_GEM               5
524718dceddSDavid Howells #define I915_PARAM_NUM_FENCES_AVAIL      6
525718dceddSDavid Howells #define I915_PARAM_HAS_OVERLAY           7
526718dceddSDavid Howells #define I915_PARAM_HAS_PAGEFLIPPING	 8
527718dceddSDavid Howells #define I915_PARAM_HAS_EXECBUF2          9
528718dceddSDavid Howells #define I915_PARAM_HAS_BSD		 10
529718dceddSDavid Howells #define I915_PARAM_HAS_BLT		 11
530718dceddSDavid Howells #define I915_PARAM_HAS_RELAXED_FENCING	 12
531718dceddSDavid Howells #define I915_PARAM_HAS_COHERENT_RINGS	 13
532718dceddSDavid Howells #define I915_PARAM_HAS_EXEC_CONSTANTS	 14
533718dceddSDavid Howells #define I915_PARAM_HAS_RELAXED_DELTA	 15
534718dceddSDavid Howells #define I915_PARAM_HAS_GEN7_SOL_RESET	 16
535718dceddSDavid Howells #define I915_PARAM_HAS_LLC     	 	 17
536718dceddSDavid Howells #define I915_PARAM_HAS_ALIASING_PPGTT	 18
537718dceddSDavid Howells #define I915_PARAM_HAS_WAIT_TIMEOUT	 19
538718dceddSDavid Howells #define I915_PARAM_HAS_SEMAPHORES	 20
539718dceddSDavid Howells #define I915_PARAM_HAS_PRIME_VMAP_FLUSH	 21
540a1f2cc73SXiang, Haihao #define I915_PARAM_HAS_VEBOX		 22
541c2fb7916SDaniel Vetter #define I915_PARAM_HAS_SECURE_BATCHES	 23
542b45305fcSDaniel Vetter #define I915_PARAM_HAS_PINNED_BATCHES	 24
543ed5982e6SDaniel Vetter #define I915_PARAM_HAS_EXEC_NO_RELOC	 25
544eef90ccbSChris Wilson #define I915_PARAM_HAS_EXEC_HANDLE_LUT   26
545651d794fSChris Wilson #define I915_PARAM_HAS_WT     	 	 27
546d728c8efSBrad Volkin #define I915_PARAM_CMD_PARSER_VERSION	 28
5476a2c4232SChris Wilson #define I915_PARAM_HAS_COHERENT_PHYS_GTT 29
5481816f923SAkash Goel #define I915_PARAM_MMAP_VERSION          30
54908e16dc8SZhipeng Gong #define I915_PARAM_HAS_BSD2		 31
55027cd4461SNeil Roberts #define I915_PARAM_REVISION              32
551a1559ffeSJeff McGee #define I915_PARAM_SUBSLICE_TOTAL	 33
552a1559ffeSJeff McGee #define I915_PARAM_EU_TOTAL		 34
55349e4d842SChris Wilson #define I915_PARAM_HAS_GPU_RESET	 35
554a9ed33caSAbdiel Janulgue #define I915_PARAM_HAS_RESOURCE_STREAMER 36
555506a8e87SChris Wilson #define I915_PARAM_HAS_EXEC_SOFTPIN	 37
55637f501afSarun.siluvery@linux.intel.com #define I915_PARAM_HAS_POOLED_EU	 38
55737f501afSarun.siluvery@linux.intel.com #define I915_PARAM_MIN_EU_IN_POOL	 39
5584cc69075SChris Wilson #define I915_PARAM_MMAP_GTT_VERSION	 40
559718dceddSDavid Howells 
560bf64e0b0SChris Wilson /*
561bf64e0b0SChris Wilson  * Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution
5620de9136dSChris Wilson  * priorities and the driver will attempt to execute batches in priority order.
563bf64e0b0SChris Wilson  * The param returns a capability bitmask, nonzero implies that the scheduler
564bf64e0b0SChris Wilson  * is enabled, with different features present according to the mask.
565ac14fbd4SChris Wilson  *
566ac14fbd4SChris Wilson  * The initial priority for each batch is supplied by the context and is
567ac14fbd4SChris Wilson  * controlled via I915_CONTEXT_PARAM_PRIORITY.
5680de9136dSChris Wilson  */
5690de9136dSChris Wilson #define I915_PARAM_HAS_SCHEDULER	 41
570bf64e0b0SChris Wilson #define   I915_SCHEDULER_CAP_ENABLED	(1ul << 0)
571bf64e0b0SChris Wilson #define   I915_SCHEDULER_CAP_PRIORITY	(1ul << 1)
572bf64e0b0SChris Wilson #define   I915_SCHEDULER_CAP_PREEMPTION	(1ul << 2)
573e8861964SChris Wilson #define   I915_SCHEDULER_CAP_SEMAPHORES	(1ul << 3)
574bf73fc0fSChris Wilson #define   I915_SCHEDULER_CAP_ENGINE_BUSY_STATS	(1ul << 4)
575*ee242ca7SMatthew Brost /*
576*ee242ca7SMatthew Brost  * Indicates the 2k user priority levels are statically mapped into 3 buckets as
577*ee242ca7SMatthew Brost  * follows:
578*ee242ca7SMatthew Brost  *
579*ee242ca7SMatthew Brost  * -1k to -1	Low priority
580*ee242ca7SMatthew Brost  * 0		Normal priority
581*ee242ca7SMatthew Brost  * 1 to 1k	Highest priority
582*ee242ca7SMatthew Brost  */
583*ee242ca7SMatthew Brost #define   I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP	(1ul << 5)
584bf64e0b0SChris Wilson 
5855464cd65SAnusha Srivatsa #define I915_PARAM_HUC_STATUS		 42
5860de9136dSChris Wilson 
58777ae9957SChris Wilson /* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of
58877ae9957SChris Wilson  * synchronisation with implicit fencing on individual objects.
58977ae9957SChris Wilson  * See EXEC_OBJECT_ASYNC.
59077ae9957SChris Wilson  */
59177ae9957SChris Wilson #define I915_PARAM_HAS_EXEC_ASYNC	 43
59277ae9957SChris Wilson 
593fec0445cSChris Wilson /* Query whether DRM_I915_GEM_EXECBUFFER2 supports explicit fence support -
594fec0445cSChris Wilson  * both being able to pass in a sync_file fd to wait upon before executing,
595fec0445cSChris Wilson  * and being able to return a new sync_file fd that is signaled when the
596fec0445cSChris Wilson  * current request is complete. See I915_EXEC_FENCE_IN and I915_EXEC_FENCE_OUT.
597fec0445cSChris Wilson  */
598fec0445cSChris Wilson #define I915_PARAM_HAS_EXEC_FENCE	 44
599fec0445cSChris Wilson 
600b0fd47adSChris Wilson /* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to capture
601b0fd47adSChris Wilson  * user specified bufffers for post-mortem debugging of GPU hangs. See
602b0fd47adSChris Wilson  * EXEC_OBJECT_CAPTURE.
603b0fd47adSChris Wilson  */
604b0fd47adSChris Wilson #define I915_PARAM_HAS_EXEC_CAPTURE	 45
605b0fd47adSChris Wilson 
6067fed555cSRobert Bragg #define I915_PARAM_SLICE_MASK		 46
6077fed555cSRobert Bragg 
608f5320233SRobert Bragg /* Assuming it's uniform for each slice, this queries the mask of subslices
609f5320233SRobert Bragg  * per-slice for this system.
610f5320233SRobert Bragg  */
611f5320233SRobert Bragg #define I915_PARAM_SUBSLICE_MASK	 47
612f5320233SRobert Bragg 
6131a71cf2fSChris Wilson /*
6141a71cf2fSChris Wilson  * Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying the batch buffer
6151a71cf2fSChris Wilson  * as the first execobject as opposed to the last. See I915_EXEC_BATCH_FIRST.
6161a71cf2fSChris Wilson  */
6171a71cf2fSChris Wilson #define I915_PARAM_HAS_EXEC_BATCH_FIRST	 48
6181a71cf2fSChris Wilson 
619cf6e7bacSJason Ekstrand /* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of
620cf6e7bacSJason Ekstrand  * drm_i915_gem_exec_fence structures.  See I915_EXEC_FENCE_ARRAY.
621cf6e7bacSJason Ekstrand  */
622cf6e7bacSJason Ekstrand #define I915_PARAM_HAS_EXEC_FENCE_ARRAY  49
623cf6e7bacSJason Ekstrand 
624d2b4b979SChris Wilson /*
625d2b4b979SChris Wilson  * Query whether every context (both per-file default and user created) is
626d2b4b979SChris Wilson  * isolated (insofar as HW supports). If this parameter is not true, then
627d2b4b979SChris Wilson  * freshly created contexts may inherit values from an existing context,
628d2b4b979SChris Wilson  * rather than default HW values. If true, it also ensures (insofar as HW
629d2b4b979SChris Wilson  * supports) that all state set by this context will not leak to any other
630d2b4b979SChris Wilson  * context.
631d2b4b979SChris Wilson  *
632d2b4b979SChris Wilson  * As not every engine across every gen support contexts, the returned
633d2b4b979SChris Wilson  * value reports the support of context isolation for individual engines by
634d2b4b979SChris Wilson  * returning a bitmask of each engine class set to true if that class supports
635d2b4b979SChris Wilson  * isolation.
636d2b4b979SChris Wilson  */
637d2b4b979SChris Wilson #define I915_PARAM_HAS_CONTEXT_ISOLATION 50
638d2b4b979SChris Wilson 
639dab91783SLionel Landwerlin /* Frequency of the command streamer timestamps given by the *_TIMESTAMP
640dab91783SLionel Landwerlin  * registers. This used to be fixed per platform but from CNL onwards, this
641dab91783SLionel Landwerlin  * might vary depending on the parts.
642dab91783SLionel Landwerlin  */
643dab91783SLionel Landwerlin #define I915_PARAM_CS_TIMESTAMP_FREQUENCY 51
644dab91783SLionel Landwerlin 
645900ccf30SChris Wilson /*
646900ccf30SChris Wilson  * Once upon a time we supposed that writes through the GGTT would be
647900ccf30SChris Wilson  * immediately in physical memory (once flushed out of the CPU path). However,
648900ccf30SChris Wilson  * on a few different processors and chipsets, this is not necessarily the case
649900ccf30SChris Wilson  * as the writes appear to be buffered internally. Thus a read of the backing
650900ccf30SChris Wilson  * storage (physical memory) via a different path (with different physical tags
651900ccf30SChris Wilson  * to the indirect write via the GGTT) will see stale values from before
652900ccf30SChris Wilson  * the GGTT write. Inside the kernel, we can for the most part keep track of
653900ccf30SChris Wilson  * the different read/write domains in use (e.g. set-domain), but the assumption
654900ccf30SChris Wilson  * of coherency is baked into the ABI, hence reporting its true state in this
655900ccf30SChris Wilson  * parameter.
656900ccf30SChris Wilson  *
657900ccf30SChris Wilson  * Reports true when writes via mmap_gtt are immediately visible following an
658900ccf30SChris Wilson  * lfence to flush the WCB.
659900ccf30SChris Wilson  *
660900ccf30SChris Wilson  * Reports false when writes via mmap_gtt are indeterminately delayed in an in
661900ccf30SChris Wilson  * internal buffer and are _not_ immediately visible to third parties accessing
662900ccf30SChris Wilson  * directly via mmap_cpu/mmap_wc. Use of mmap_gtt as part of an IPC
663900ccf30SChris Wilson  * communications channel when reporting false is strongly disadvised.
664900ccf30SChris Wilson  */
665900ccf30SChris Wilson #define I915_PARAM_MMAP_GTT_COHERENT	52
666900ccf30SChris Wilson 
667a88b6e4cSChris Wilson /*
668a88b6e4cSChris Wilson  * Query whether DRM_I915_GEM_EXECBUFFER2 supports coordination of parallel
669a88b6e4cSChris Wilson  * execution through use of explicit fence support.
670a88b6e4cSChris Wilson  * See I915_EXEC_FENCE_OUT and I915_EXEC_FENCE_SUBMIT.
671a88b6e4cSChris Wilson  */
672a88b6e4cSChris Wilson #define I915_PARAM_HAS_EXEC_SUBMIT_FENCE 53
673b8d49f28SLionel Landwerlin 
674b8d49f28SLionel Landwerlin /*
675b8d49f28SLionel Landwerlin  * Revision of the i915-perf uAPI. The value returned helps determine what
676b8d49f28SLionel Landwerlin  * i915-perf features are available. See drm_i915_perf_property_id.
677b8d49f28SLionel Landwerlin  */
678b8d49f28SLionel Landwerlin #define I915_PARAM_PERF_REVISION	54
679b8d49f28SLionel Landwerlin 
68013149e8bSLionel Landwerlin /* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of
68113149e8bSLionel Landwerlin  * timeline syncobj through drm_i915_gem_execbuffer_ext_timeline_fences. See
68213149e8bSLionel Landwerlin  * I915_EXEC_USE_EXTENSIONS.
68313149e8bSLionel Landwerlin  */
68413149e8bSLionel Landwerlin #define I915_PARAM_HAS_EXEC_TIMELINE_FENCES 55
68513149e8bSLionel Landwerlin 
686be03564bSChris Wilson /* Must be kept compact -- no holes and well documented */
687be03564bSChris Wilson 
688718dceddSDavid Howells typedef struct drm_i915_getparam {
68916f7249dSArtem Savkov 	__s32 param;
690346add78SDaniel Vetter 	/*
691346add78SDaniel Vetter 	 * WARNING: Using pointers instead of fixed-size u64 means we need to write
692346add78SDaniel Vetter 	 * compat32 code. Don't repeat this mistake.
693346add78SDaniel Vetter 	 */
694718dceddSDavid Howells 	int __user *value;
695718dceddSDavid Howells } drm_i915_getparam_t;
696718dceddSDavid Howells 
697718dceddSDavid Howells /* Ioctl to set kernel params:
698718dceddSDavid Howells  */
699718dceddSDavid Howells #define I915_SETPARAM_USE_MI_BATCHBUFFER_START            1
700718dceddSDavid Howells #define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY             2
701718dceddSDavid Howells #define I915_SETPARAM_ALLOW_BATCHBUFFER                   3
702718dceddSDavid Howells #define I915_SETPARAM_NUM_USED_FENCES                     4
703be03564bSChris Wilson /* Must be kept compact -- no holes */
704718dceddSDavid Howells 
705718dceddSDavid Howells typedef struct drm_i915_setparam {
706718dceddSDavid Howells 	int param;
707718dceddSDavid Howells 	int value;
708718dceddSDavid Howells } drm_i915_setparam_t;
709718dceddSDavid Howells 
710718dceddSDavid Howells /* A memory manager for regions of shared memory:
711718dceddSDavid Howells  */
712718dceddSDavid Howells #define I915_MEM_REGION_AGP 1
713718dceddSDavid Howells 
714718dceddSDavid Howells typedef struct drm_i915_mem_alloc {
715718dceddSDavid Howells 	int region;
716718dceddSDavid Howells 	int alignment;
717718dceddSDavid Howells 	int size;
718718dceddSDavid Howells 	int __user *region_offset;	/* offset from start of fb or agp */
719718dceddSDavid Howells } drm_i915_mem_alloc_t;
720718dceddSDavid Howells 
721718dceddSDavid Howells typedef struct drm_i915_mem_free {
722718dceddSDavid Howells 	int region;
723718dceddSDavid Howells 	int region_offset;
724718dceddSDavid Howells } drm_i915_mem_free_t;
725718dceddSDavid Howells 
726718dceddSDavid Howells typedef struct drm_i915_mem_init_heap {
727718dceddSDavid Howells 	int region;
728718dceddSDavid Howells 	int size;
729718dceddSDavid Howells 	int start;
730718dceddSDavid Howells } drm_i915_mem_init_heap_t;
731718dceddSDavid Howells 
732718dceddSDavid Howells /* Allow memory manager to be torn down and re-initialized (eg on
733718dceddSDavid Howells  * rotate):
734718dceddSDavid Howells  */
735718dceddSDavid Howells typedef struct drm_i915_mem_destroy_heap {
736718dceddSDavid Howells 	int region;
737718dceddSDavid Howells } drm_i915_mem_destroy_heap_t;
738718dceddSDavid Howells 
739718dceddSDavid Howells /* Allow X server to configure which pipes to monitor for vblank signals
740718dceddSDavid Howells  */
741718dceddSDavid Howells #define	DRM_I915_VBLANK_PIPE_A	1
742718dceddSDavid Howells #define	DRM_I915_VBLANK_PIPE_B	2
743718dceddSDavid Howells 
744718dceddSDavid Howells typedef struct drm_i915_vblank_pipe {
745718dceddSDavid Howells 	int pipe;
746718dceddSDavid Howells } drm_i915_vblank_pipe_t;
747718dceddSDavid Howells 
748718dceddSDavid Howells /* Schedule buffer swap at given vertical blank:
749718dceddSDavid Howells  */
750718dceddSDavid Howells typedef struct drm_i915_vblank_swap {
751718dceddSDavid Howells 	drm_drawable_t drawable;
752718dceddSDavid Howells 	enum drm_vblank_seq_type seqtype;
753718dceddSDavid Howells 	unsigned int sequence;
754718dceddSDavid Howells } drm_i915_vblank_swap_t;
755718dceddSDavid Howells 
756718dceddSDavid Howells typedef struct drm_i915_hws_addr {
757718dceddSDavid Howells 	__u64 addr;
758718dceddSDavid Howells } drm_i915_hws_addr_t;
759718dceddSDavid Howells 
760718dceddSDavid Howells struct drm_i915_gem_init {
761718dceddSDavid Howells 	/**
762718dceddSDavid Howells 	 * Beginning offset in the GTT to be managed by the DRM memory
763718dceddSDavid Howells 	 * manager.
764718dceddSDavid Howells 	 */
765718dceddSDavid Howells 	__u64 gtt_start;
766718dceddSDavid Howells 	/**
767718dceddSDavid Howells 	 * Ending offset in the GTT to be managed by the DRM memory
768718dceddSDavid Howells 	 * manager.
769718dceddSDavid Howells 	 */
770718dceddSDavid Howells 	__u64 gtt_end;
771718dceddSDavid Howells };
772718dceddSDavid Howells 
773718dceddSDavid Howells struct drm_i915_gem_create {
774718dceddSDavid Howells 	/**
775718dceddSDavid Howells 	 * Requested size for the object.
776718dceddSDavid Howells 	 *
777718dceddSDavid Howells 	 * The (page-aligned) allocated size for the object will be returned.
778718dceddSDavid Howells 	 */
779718dceddSDavid Howells 	__u64 size;
780718dceddSDavid Howells 	/**
781718dceddSDavid Howells 	 * Returned handle for the object.
782718dceddSDavid Howells 	 *
783718dceddSDavid Howells 	 * Object handles are nonzero.
784718dceddSDavid Howells 	 */
785718dceddSDavid Howells 	__u32 handle;
786718dceddSDavid Howells 	__u32 pad;
787718dceddSDavid Howells };
788718dceddSDavid Howells 
789718dceddSDavid Howells struct drm_i915_gem_pread {
790718dceddSDavid Howells 	/** Handle for the object being read. */
791718dceddSDavid Howells 	__u32 handle;
792718dceddSDavid Howells 	__u32 pad;
793718dceddSDavid Howells 	/** Offset into the object to read from */
794718dceddSDavid Howells 	__u64 offset;
795718dceddSDavid Howells 	/** Length of data to read */
796718dceddSDavid Howells 	__u64 size;
797718dceddSDavid Howells 	/**
798718dceddSDavid Howells 	 * Pointer to write the data into.
799718dceddSDavid Howells 	 *
800718dceddSDavid Howells 	 * This is a fixed-size type for 32/64 compatibility.
801718dceddSDavid Howells 	 */
802718dceddSDavid Howells 	__u64 data_ptr;
803718dceddSDavid Howells };
804718dceddSDavid Howells 
805718dceddSDavid Howells struct drm_i915_gem_pwrite {
806718dceddSDavid Howells 	/** Handle for the object being written to. */
807718dceddSDavid Howells 	__u32 handle;
808718dceddSDavid Howells 	__u32 pad;
809718dceddSDavid Howells 	/** Offset into the object to write to */
810718dceddSDavid Howells 	__u64 offset;
811718dceddSDavid Howells 	/** Length of data to write */
812718dceddSDavid Howells 	__u64 size;
813718dceddSDavid Howells 	/**
814718dceddSDavid Howells 	 * Pointer to read the data from.
815718dceddSDavid Howells 	 *
816718dceddSDavid Howells 	 * This is a fixed-size type for 32/64 compatibility.
817718dceddSDavid Howells 	 */
818718dceddSDavid Howells 	__u64 data_ptr;
819718dceddSDavid Howells };
820718dceddSDavid Howells 
821718dceddSDavid Howells struct drm_i915_gem_mmap {
822718dceddSDavid Howells 	/** Handle for the object being mapped. */
823718dceddSDavid Howells 	__u32 handle;
824718dceddSDavid Howells 	__u32 pad;
825718dceddSDavid Howells 	/** Offset in the object to map. */
826718dceddSDavid Howells 	__u64 offset;
827718dceddSDavid Howells 	/**
828718dceddSDavid Howells 	 * Length of data to map.
829718dceddSDavid Howells 	 *
830718dceddSDavid Howells 	 * The value will be page-aligned.
831718dceddSDavid Howells 	 */
832718dceddSDavid Howells 	__u64 size;
833718dceddSDavid Howells 	/**
834718dceddSDavid Howells 	 * Returned pointer the data was mapped at.
835718dceddSDavid Howells 	 *
836718dceddSDavid Howells 	 * This is a fixed-size type for 32/64 compatibility.
837718dceddSDavid Howells 	 */
838718dceddSDavid Howells 	__u64 addr_ptr;
8391816f923SAkash Goel 
8401816f923SAkash Goel 	/**
8411816f923SAkash Goel 	 * Flags for extended behaviour.
8421816f923SAkash Goel 	 *
8431816f923SAkash Goel 	 * Added in version 2.
8441816f923SAkash Goel 	 */
8451816f923SAkash Goel 	__u64 flags;
8461816f923SAkash Goel #define I915_MMAP_WC 0x1
847718dceddSDavid Howells };
848718dceddSDavid Howells 
849718dceddSDavid Howells struct drm_i915_gem_mmap_gtt {
850718dceddSDavid Howells 	/** Handle for the object being mapped. */
851718dceddSDavid Howells 	__u32 handle;
852718dceddSDavid Howells 	__u32 pad;
853718dceddSDavid Howells 	/**
854718dceddSDavid Howells 	 * Fake offset to use for subsequent mmap call
855718dceddSDavid Howells 	 *
856718dceddSDavid Howells 	 * This is a fixed-size type for 32/64 compatibility.
857718dceddSDavid Howells 	 */
858718dceddSDavid Howells 	__u64 offset;
859718dceddSDavid Howells };
860718dceddSDavid Howells 
8617961c5b6SMaarten Lankhorst /**
8627961c5b6SMaarten Lankhorst  * struct drm_i915_gem_mmap_offset - Retrieve an offset so we can mmap this buffer object.
8637961c5b6SMaarten Lankhorst  *
8647961c5b6SMaarten Lankhorst  * This struct is passed as argument to the `DRM_IOCTL_I915_GEM_MMAP_OFFSET` ioctl,
8657961c5b6SMaarten Lankhorst  * and is used to retrieve the fake offset to mmap an object specified by &handle.
8667961c5b6SMaarten Lankhorst  *
8677961c5b6SMaarten Lankhorst  * The legacy way of using `DRM_IOCTL_I915_GEM_MMAP` is removed on gen12+.
8687961c5b6SMaarten Lankhorst  * `DRM_IOCTL_I915_GEM_MMAP_GTT` is an older supported alias to this struct, but will behave
8697961c5b6SMaarten Lankhorst  * as setting the &extensions to 0, and &flags to `I915_MMAP_OFFSET_GTT`.
8707961c5b6SMaarten Lankhorst  */
871cc662126SAbdiel Janulgue struct drm_i915_gem_mmap_offset {
8727961c5b6SMaarten Lankhorst 	/** @handle: Handle for the object being mapped. */
873cc662126SAbdiel Janulgue 	__u32 handle;
8747961c5b6SMaarten Lankhorst 	/** @pad: Must be zero */
875cc662126SAbdiel Janulgue 	__u32 pad;
876cc662126SAbdiel Janulgue 	/**
8777961c5b6SMaarten Lankhorst 	 * @offset: The fake offset to use for subsequent mmap call
878cc662126SAbdiel Janulgue 	 *
879cc662126SAbdiel Janulgue 	 * This is a fixed-size type for 32/64 compatibility.
880cc662126SAbdiel Janulgue 	 */
881cc662126SAbdiel Janulgue 	__u64 offset;
882cc662126SAbdiel Janulgue 
883cc662126SAbdiel Janulgue 	/**
8847961c5b6SMaarten Lankhorst 	 * @flags: Flags for extended behaviour.
885cc662126SAbdiel Janulgue 	 *
8867961c5b6SMaarten Lankhorst 	 * It is mandatory that one of the `MMAP_OFFSET` types
8877961c5b6SMaarten Lankhorst 	 * should be included:
8887961c5b6SMaarten Lankhorst 	 *
8897961c5b6SMaarten Lankhorst 	 * - `I915_MMAP_OFFSET_GTT`: Use mmap with the object bound to GTT. (Write-Combined)
8907961c5b6SMaarten Lankhorst 	 * - `I915_MMAP_OFFSET_WC`: Use Write-Combined caching.
8917961c5b6SMaarten Lankhorst 	 * - `I915_MMAP_OFFSET_WB`: Use Write-Back caching.
8927961c5b6SMaarten Lankhorst 	 * - `I915_MMAP_OFFSET_FIXED`: Use object placement to determine caching.
8937961c5b6SMaarten Lankhorst 	 *
8947961c5b6SMaarten Lankhorst 	 * On devices with local memory `I915_MMAP_OFFSET_FIXED` is the only valid
8957961c5b6SMaarten Lankhorst 	 * type. On devices without local memory, this caching mode is invalid.
8967961c5b6SMaarten Lankhorst 	 *
8977961c5b6SMaarten Lankhorst 	 * As caching mode when specifying `I915_MMAP_OFFSET_FIXED`, WC or WB will
8987961c5b6SMaarten Lankhorst 	 * be used, depending on the object placement on creation. WB will be used
8997961c5b6SMaarten Lankhorst 	 * when the object can only exist in system memory, WC otherwise.
900cc662126SAbdiel Janulgue 	 */
901cc662126SAbdiel Janulgue 	__u64 flags;
9027961c5b6SMaarten Lankhorst 
903cc662126SAbdiel Janulgue #define I915_MMAP_OFFSET_GTT	0
904cc662126SAbdiel Janulgue #define I915_MMAP_OFFSET_WC	1
905cc662126SAbdiel Janulgue #define I915_MMAP_OFFSET_WB	2
906cc662126SAbdiel Janulgue #define I915_MMAP_OFFSET_UC	3
9077961c5b6SMaarten Lankhorst #define I915_MMAP_OFFSET_FIXED	4
908cc662126SAbdiel Janulgue 
9097961c5b6SMaarten Lankhorst 	/**
9107961c5b6SMaarten Lankhorst 	 * @extensions: Zero-terminated chain of extensions.
911cc662126SAbdiel Janulgue 	 *
912cc662126SAbdiel Janulgue 	 * No current extensions defined; mbz.
913cc662126SAbdiel Janulgue 	 */
914cc662126SAbdiel Janulgue 	__u64 extensions;
915cc662126SAbdiel Janulgue };
916cc662126SAbdiel Janulgue 
9173aa8c57fSMatthew Auld /**
9183aa8c57fSMatthew Auld  * struct drm_i915_gem_set_domain - Adjust the objects write or read domain, in
9193aa8c57fSMatthew Auld  * preparation for accessing the pages via some CPU domain.
9203aa8c57fSMatthew Auld  *
9213aa8c57fSMatthew Auld  * Specifying a new write or read domain will flush the object out of the
9223aa8c57fSMatthew Auld  * previous domain(if required), before then updating the objects domain
9233aa8c57fSMatthew Auld  * tracking with the new domain.
9243aa8c57fSMatthew Auld  *
9253aa8c57fSMatthew Auld  * Note this might involve waiting for the object first if it is still active on
9263aa8c57fSMatthew Auld  * the GPU.
9273aa8c57fSMatthew Auld  *
9283aa8c57fSMatthew Auld  * Supported values for @read_domains and @write_domain:
9293aa8c57fSMatthew Auld  *
9303aa8c57fSMatthew Auld  *	- I915_GEM_DOMAIN_WC: Uncached write-combined domain
9313aa8c57fSMatthew Auld  *	- I915_GEM_DOMAIN_CPU: CPU cache domain
9323aa8c57fSMatthew Auld  *	- I915_GEM_DOMAIN_GTT: Mappable aperture domain
9333aa8c57fSMatthew Auld  *
9343aa8c57fSMatthew Auld  * All other domains are rejected.
93581340cf3SMatthew Auld  *
93681340cf3SMatthew Auld  * Note that for discrete, starting from DG1, this is no longer supported, and
93781340cf3SMatthew Auld  * is instead rejected. On such platforms the CPU domain is effectively static,
93881340cf3SMatthew Auld  * where we also only support a single &drm_i915_gem_mmap_offset cache mode,
93981340cf3SMatthew Auld  * which can't be set explicitly and instead depends on the object placements,
94081340cf3SMatthew Auld  * as per the below.
94181340cf3SMatthew Auld  *
94281340cf3SMatthew Auld  * Implicit caching rules, starting from DG1:
94381340cf3SMatthew Auld  *
94481340cf3SMatthew Auld  *	- If any of the object placements (see &drm_i915_gem_create_ext_memory_regions)
94581340cf3SMatthew Auld  *	  contain I915_MEMORY_CLASS_DEVICE then the object will be allocated and
94681340cf3SMatthew Auld  *	  mapped as write-combined only.
94781340cf3SMatthew Auld  *
94881340cf3SMatthew Auld  *	- Everything else is always allocated and mapped as write-back, with the
94981340cf3SMatthew Auld  *	  guarantee that everything is also coherent with the GPU.
95081340cf3SMatthew Auld  *
95181340cf3SMatthew Auld  * Note that this is likely to change in the future again, where we might need
95281340cf3SMatthew Auld  * more flexibility on future devices, so making this all explicit as part of a
95381340cf3SMatthew Auld  * new &drm_i915_gem_create_ext extension is probable.
9543aa8c57fSMatthew Auld  */
955718dceddSDavid Howells struct drm_i915_gem_set_domain {
9563aa8c57fSMatthew Auld 	/** @handle: Handle for the object. */
957718dceddSDavid Howells 	__u32 handle;
958718dceddSDavid Howells 
9593aa8c57fSMatthew Auld 	/** @read_domains: New read domains. */
960718dceddSDavid Howells 	__u32 read_domains;
961718dceddSDavid Howells 
9623aa8c57fSMatthew Auld 	/**
9633aa8c57fSMatthew Auld 	 * @write_domain: New write domain.
9643aa8c57fSMatthew Auld 	 *
9653aa8c57fSMatthew Auld 	 * Note that having something in the write domain implies it's in the
9663aa8c57fSMatthew Auld 	 * read domain, and only that read domain.
9673aa8c57fSMatthew Auld 	 */
968718dceddSDavid Howells 	__u32 write_domain;
969718dceddSDavid Howells };
970718dceddSDavid Howells 
971718dceddSDavid Howells struct drm_i915_gem_sw_finish {
972718dceddSDavid Howells 	/** Handle for the object */
973718dceddSDavid Howells 	__u32 handle;
974718dceddSDavid Howells };
975718dceddSDavid Howells 
976718dceddSDavid Howells struct drm_i915_gem_relocation_entry {
977718dceddSDavid Howells 	/**
978718dceddSDavid Howells 	 * Handle of the buffer being pointed to by this relocation entry.
979718dceddSDavid Howells 	 *
980718dceddSDavid Howells 	 * It's appealing to make this be an index into the mm_validate_entry
981718dceddSDavid Howells 	 * list to refer to the buffer, but this allows the driver to create
982718dceddSDavid Howells 	 * a relocation list for state buffers and not re-write it per
983718dceddSDavid Howells 	 * exec using the buffer.
984718dceddSDavid Howells 	 */
985718dceddSDavid Howells 	__u32 target_handle;
986718dceddSDavid Howells 
987718dceddSDavid Howells 	/**
988718dceddSDavid Howells 	 * Value to be added to the offset of the target buffer to make up
989718dceddSDavid Howells 	 * the relocation entry.
990718dceddSDavid Howells 	 */
991718dceddSDavid Howells 	__u32 delta;
992718dceddSDavid Howells 
993718dceddSDavid Howells 	/** Offset in the buffer the relocation entry will be written into */
994718dceddSDavid Howells 	__u64 offset;
995718dceddSDavid Howells 
996718dceddSDavid Howells 	/**
997718dceddSDavid Howells 	 * Offset value of the target buffer that the relocation entry was last
998718dceddSDavid Howells 	 * written as.
999718dceddSDavid Howells 	 *
1000718dceddSDavid Howells 	 * If the buffer has the same offset as last time, we can skip syncing
1001718dceddSDavid Howells 	 * and writing the relocation.  This value is written back out by
1002718dceddSDavid Howells 	 * the execbuffer ioctl when the relocation is written.
1003718dceddSDavid Howells 	 */
1004718dceddSDavid Howells 	__u64 presumed_offset;
1005718dceddSDavid Howells 
1006718dceddSDavid Howells 	/**
1007718dceddSDavid Howells 	 * Target memory domains read by this operation.
1008718dceddSDavid Howells 	 */
1009718dceddSDavid Howells 	__u32 read_domains;
1010718dceddSDavid Howells 
1011718dceddSDavid Howells 	/**
1012718dceddSDavid Howells 	 * Target memory domains written by this operation.
1013718dceddSDavid Howells 	 *
1014718dceddSDavid Howells 	 * Note that only one domain may be written by the whole
1015718dceddSDavid Howells 	 * execbuffer operation, so that where there are conflicts,
1016718dceddSDavid Howells 	 * the application will get -EINVAL back.
1017718dceddSDavid Howells 	 */
1018718dceddSDavid Howells 	__u32 write_domain;
1019718dceddSDavid Howells };
1020718dceddSDavid Howells 
1021718dceddSDavid Howells /** @{
1022718dceddSDavid Howells  * Intel memory domains
1023718dceddSDavid Howells  *
1024718dceddSDavid Howells  * Most of these just align with the various caches in
1025718dceddSDavid Howells  * the system and are used to flush and invalidate as
1026718dceddSDavid Howells  * objects end up cached in different domains.
1027718dceddSDavid Howells  */
1028718dceddSDavid Howells /** CPU cache */
1029718dceddSDavid Howells #define I915_GEM_DOMAIN_CPU		0x00000001
1030718dceddSDavid Howells /** Render cache, used by 2D and 3D drawing */
1031718dceddSDavid Howells #define I915_GEM_DOMAIN_RENDER		0x00000002
1032718dceddSDavid Howells /** Sampler cache, used by texture engine */
1033718dceddSDavid Howells #define I915_GEM_DOMAIN_SAMPLER		0x00000004
1034718dceddSDavid Howells /** Command queue, used to load batch buffers */
1035718dceddSDavid Howells #define I915_GEM_DOMAIN_COMMAND		0x00000008
1036718dceddSDavid Howells /** Instruction cache, used by shader programs */
1037718dceddSDavid Howells #define I915_GEM_DOMAIN_INSTRUCTION	0x00000010
1038718dceddSDavid Howells /** Vertex address cache */
1039718dceddSDavid Howells #define I915_GEM_DOMAIN_VERTEX		0x00000020
1040718dceddSDavid Howells /** GTT domain - aperture and scanout */
1041718dceddSDavid Howells #define I915_GEM_DOMAIN_GTT		0x00000040
1042e22d8e3cSChris Wilson /** WC domain - uncached access */
1043e22d8e3cSChris Wilson #define I915_GEM_DOMAIN_WC		0x00000080
1044718dceddSDavid Howells /** @} */
1045718dceddSDavid Howells 
1046718dceddSDavid Howells struct drm_i915_gem_exec_object {
1047718dceddSDavid Howells 	/**
1048718dceddSDavid Howells 	 * User's handle for a buffer to be bound into the GTT for this
1049718dceddSDavid Howells 	 * operation.
1050718dceddSDavid Howells 	 */
1051718dceddSDavid Howells 	__u32 handle;
1052718dceddSDavid Howells 
1053718dceddSDavid Howells 	/** Number of relocations to be performed on this buffer */
1054718dceddSDavid Howells 	__u32 relocation_count;
1055718dceddSDavid Howells 	/**
1056718dceddSDavid Howells 	 * Pointer to array of struct drm_i915_gem_relocation_entry containing
1057718dceddSDavid Howells 	 * the relocations to be performed in this buffer.
1058718dceddSDavid Howells 	 */
1059718dceddSDavid Howells 	__u64 relocs_ptr;
1060718dceddSDavid Howells 
1061718dceddSDavid Howells 	/** Required alignment in graphics aperture */
1062718dceddSDavid Howells 	__u64 alignment;
1063718dceddSDavid Howells 
1064718dceddSDavid Howells 	/**
1065718dceddSDavid Howells 	 * Returned value of the updated offset of the object, for future
1066718dceddSDavid Howells 	 * presumed_offset writes.
1067718dceddSDavid Howells 	 */
1068718dceddSDavid Howells 	__u64 offset;
1069718dceddSDavid Howells };
1070718dceddSDavid Howells 
1071b5b6f6a6SJason Ekstrand /* DRM_IOCTL_I915_GEM_EXECBUFFER was removed in Linux 5.13 */
1072718dceddSDavid Howells struct drm_i915_gem_execbuffer {
1073718dceddSDavid Howells 	/**
1074718dceddSDavid Howells 	 * List of buffers to be validated with their relocations to be
1075718dceddSDavid Howells 	 * performend on them.
1076718dceddSDavid Howells 	 *
1077718dceddSDavid Howells 	 * This is a pointer to an array of struct drm_i915_gem_validate_entry.
1078718dceddSDavid Howells 	 *
1079718dceddSDavid Howells 	 * These buffers must be listed in an order such that all relocations
1080718dceddSDavid Howells 	 * a buffer is performing refer to buffers that have already appeared
1081718dceddSDavid Howells 	 * in the validate list.
1082718dceddSDavid Howells 	 */
1083718dceddSDavid Howells 	__u64 buffers_ptr;
1084718dceddSDavid Howells 	__u32 buffer_count;
1085718dceddSDavid Howells 
1086718dceddSDavid Howells 	/** Offset in the batchbuffer to start execution from. */
1087718dceddSDavid Howells 	__u32 batch_start_offset;
1088718dceddSDavid Howells 	/** Bytes used in batchbuffer from batch_start_offset */
1089718dceddSDavid Howells 	__u32 batch_len;
1090718dceddSDavid Howells 	__u32 DR1;
1091718dceddSDavid Howells 	__u32 DR4;
1092718dceddSDavid Howells 	__u32 num_cliprects;
1093718dceddSDavid Howells 	/** This is a struct drm_clip_rect *cliprects */
1094718dceddSDavid Howells 	__u64 cliprects_ptr;
1095718dceddSDavid Howells };
1096718dceddSDavid Howells 
1097718dceddSDavid Howells struct drm_i915_gem_exec_object2 {
1098718dceddSDavid Howells 	/**
1099718dceddSDavid Howells 	 * User's handle for a buffer to be bound into the GTT for this
1100718dceddSDavid Howells 	 * operation.
1101718dceddSDavid Howells 	 */
1102718dceddSDavid Howells 	__u32 handle;
1103718dceddSDavid Howells 
1104718dceddSDavid Howells 	/** Number of relocations to be performed on this buffer */
1105718dceddSDavid Howells 	__u32 relocation_count;
1106718dceddSDavid Howells 	/**
1107718dceddSDavid Howells 	 * Pointer to array of struct drm_i915_gem_relocation_entry containing
1108718dceddSDavid Howells 	 * the relocations to be performed in this buffer.
1109718dceddSDavid Howells 	 */
1110718dceddSDavid Howells 	__u64 relocs_ptr;
1111718dceddSDavid Howells 
1112718dceddSDavid Howells 	/** Required alignment in graphics aperture */
1113718dceddSDavid Howells 	__u64 alignment;
1114718dceddSDavid Howells 
1115718dceddSDavid Howells 	/**
1116506a8e87SChris Wilson 	 * When the EXEC_OBJECT_PINNED flag is specified this is populated by
1117506a8e87SChris Wilson 	 * the user with the GTT offset at which this object will be pinned.
1118506a8e87SChris Wilson 	 * When the I915_EXEC_NO_RELOC flag is specified this must contain the
1119506a8e87SChris Wilson 	 * presumed_offset of the object.
1120506a8e87SChris Wilson 	 * During execbuffer2 the kernel populates it with the value of the
1121506a8e87SChris Wilson 	 * current GTT offset of the object, for future presumed_offset writes.
1122718dceddSDavid Howells 	 */
1123718dceddSDavid Howells 	__u64 offset;
1124718dceddSDavid Howells 
1125718dceddSDavid Howells #define EXEC_OBJECT_NEEDS_FENCE		 (1<<0)
1126ed5982e6SDaniel Vetter #define EXEC_OBJECT_NEEDS_GTT		 (1<<1)
1127ed5982e6SDaniel Vetter #define EXEC_OBJECT_WRITE		 (1<<2)
1128101b506aSMichel Thierry #define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3)
1129506a8e87SChris Wilson #define EXEC_OBJECT_PINNED		 (1<<4)
113091b2db6fSChris Wilson #define EXEC_OBJECT_PAD_TO_SIZE		 (1<<5)
113177ae9957SChris Wilson /* The kernel implicitly tracks GPU activity on all GEM objects, and
113277ae9957SChris Wilson  * synchronises operations with outstanding rendering. This includes
113377ae9957SChris Wilson  * rendering on other devices if exported via dma-buf. However, sometimes
113477ae9957SChris Wilson  * this tracking is too coarse and the user knows better. For example,
113577ae9957SChris Wilson  * if the object is split into non-overlapping ranges shared between different
113677ae9957SChris Wilson  * clients or engines (i.e. suballocating objects), the implicit tracking
113777ae9957SChris Wilson  * by kernel assumes that each operation affects the whole object rather
113877ae9957SChris Wilson  * than an individual range, causing needless synchronisation between clients.
113977ae9957SChris Wilson  * The kernel will also forgo any CPU cache flushes prior to rendering from
114077ae9957SChris Wilson  * the object as the client is expected to be also handling such domain
114177ae9957SChris Wilson  * tracking.
114277ae9957SChris Wilson  *
114377ae9957SChris Wilson  * The kernel maintains the implicit tracking in order to manage resources
114477ae9957SChris Wilson  * used by the GPU - this flag only disables the synchronisation prior to
114577ae9957SChris Wilson  * rendering with this object in this execbuf.
114677ae9957SChris Wilson  *
114777ae9957SChris Wilson  * Opting out of implicit synhronisation requires the user to do its own
114877ae9957SChris Wilson  * explicit tracking to avoid rendering corruption. See, for example,
114977ae9957SChris Wilson  * I915_PARAM_HAS_EXEC_FENCE to order execbufs and execute them asynchronously.
115077ae9957SChris Wilson  */
115177ae9957SChris Wilson #define EXEC_OBJECT_ASYNC		(1<<6)
1152b0fd47adSChris Wilson /* Request that the contents of this execobject be copied into the error
1153b0fd47adSChris Wilson  * state upon a GPU hang involving this batch for post-mortem debugging.
1154b0fd47adSChris Wilson  * These buffers are recorded in no particular order as "user" in
1155b0fd47adSChris Wilson  * /sys/class/drm/cardN/error. Query I915_PARAM_HAS_EXEC_CAPTURE to see
1156b0fd47adSChris Wilson  * if the kernel supports this flag.
1157b0fd47adSChris Wilson  */
1158b0fd47adSChris Wilson #define EXEC_OBJECT_CAPTURE		(1<<7)
11599e2793f6SDave Gordon /* All remaining bits are MBZ and RESERVED FOR FUTURE USE */
1160b0fd47adSChris Wilson #define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_CAPTURE<<1)
1161718dceddSDavid Howells 	__u64 flags;
1162ed5982e6SDaniel Vetter 
116391b2db6fSChris Wilson 	union {
1164718dceddSDavid Howells 		__u64 rsvd1;
116591b2db6fSChris Wilson 		__u64 pad_to_size;
116691b2db6fSChris Wilson 	};
1167718dceddSDavid Howells 	__u64 rsvd2;
1168718dceddSDavid Howells };
1169718dceddSDavid Howells 
1170cf6e7bacSJason Ekstrand struct drm_i915_gem_exec_fence {
1171cf6e7bacSJason Ekstrand 	/**
1172cf6e7bacSJason Ekstrand 	 * User's handle for a drm_syncobj to wait on or signal.
1173cf6e7bacSJason Ekstrand 	 */
1174cf6e7bacSJason Ekstrand 	__u32 handle;
1175cf6e7bacSJason Ekstrand 
1176cf6e7bacSJason Ekstrand #define I915_EXEC_FENCE_WAIT            (1<<0)
1177cf6e7bacSJason Ekstrand #define I915_EXEC_FENCE_SIGNAL          (1<<1)
1178ebcaa1ffSTvrtko Ursulin #define __I915_EXEC_FENCE_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SIGNAL << 1))
1179cf6e7bacSJason Ekstrand 	__u32 flags;
1180cf6e7bacSJason Ekstrand };
1181cf6e7bacSJason Ekstrand 
11822ef6a01fSMatthew Auld /*
118313149e8bSLionel Landwerlin  * See drm_i915_gem_execbuffer_ext_timeline_fences.
118413149e8bSLionel Landwerlin  */
118513149e8bSLionel Landwerlin #define DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES 0
118613149e8bSLionel Landwerlin 
11872ef6a01fSMatthew Auld /*
118813149e8bSLionel Landwerlin  * This structure describes an array of drm_syncobj and associated points for
118913149e8bSLionel Landwerlin  * timeline variants of drm_syncobj. It is invalid to append this structure to
119013149e8bSLionel Landwerlin  * the execbuf if I915_EXEC_FENCE_ARRAY is set.
119113149e8bSLionel Landwerlin  */
119213149e8bSLionel Landwerlin struct drm_i915_gem_execbuffer_ext_timeline_fences {
119313149e8bSLionel Landwerlin 	struct i915_user_extension base;
119413149e8bSLionel Landwerlin 
119513149e8bSLionel Landwerlin 	/**
119613149e8bSLionel Landwerlin 	 * Number of element in the handles_ptr & value_ptr arrays.
119713149e8bSLionel Landwerlin 	 */
119813149e8bSLionel Landwerlin 	__u64 fence_count;
119913149e8bSLionel Landwerlin 
120013149e8bSLionel Landwerlin 	/**
120113149e8bSLionel Landwerlin 	 * Pointer to an array of struct drm_i915_gem_exec_fence of length
120213149e8bSLionel Landwerlin 	 * fence_count.
120313149e8bSLionel Landwerlin 	 */
120413149e8bSLionel Landwerlin 	__u64 handles_ptr;
120513149e8bSLionel Landwerlin 
120613149e8bSLionel Landwerlin 	/**
120713149e8bSLionel Landwerlin 	 * Pointer to an array of u64 values of length fence_count. Values
120813149e8bSLionel Landwerlin 	 * must be 0 for a binary drm_syncobj. A Value of 0 for a timeline
120913149e8bSLionel Landwerlin 	 * drm_syncobj is invalid as it turns a drm_syncobj into a binary one.
121013149e8bSLionel Landwerlin 	 */
121113149e8bSLionel Landwerlin 	__u64 values_ptr;
1212cda9edd0SLionel Landwerlin };
1213cda9edd0SLionel Landwerlin 
1214718dceddSDavid Howells struct drm_i915_gem_execbuffer2 {
1215718dceddSDavid Howells 	/**
1216718dceddSDavid Howells 	 * List of gem_exec_object2 structs
1217718dceddSDavid Howells 	 */
1218718dceddSDavid Howells 	__u64 buffers_ptr;
1219718dceddSDavid Howells 	__u32 buffer_count;
1220718dceddSDavid Howells 
1221718dceddSDavid Howells 	/** Offset in the batchbuffer to start execution from. */
1222718dceddSDavid Howells 	__u32 batch_start_offset;
1223718dceddSDavid Howells 	/** Bytes used in batchbuffer from batch_start_offset */
1224718dceddSDavid Howells 	__u32 batch_len;
1225718dceddSDavid Howells 	__u32 DR1;
1226718dceddSDavid Howells 	__u32 DR4;
1227718dceddSDavid Howells 	__u32 num_cliprects;
1228cf6e7bacSJason Ekstrand 	/**
1229cf6e7bacSJason Ekstrand 	 * This is a struct drm_clip_rect *cliprects if I915_EXEC_FENCE_ARRAY
1230cda9edd0SLionel Landwerlin 	 * & I915_EXEC_USE_EXTENSIONS are not set.
1231cda9edd0SLionel Landwerlin 	 *
1232cda9edd0SLionel Landwerlin 	 * If I915_EXEC_FENCE_ARRAY is set, then this is a pointer to an array
1233cda9edd0SLionel Landwerlin 	 * of struct drm_i915_gem_exec_fence and num_cliprects is the length
1234cda9edd0SLionel Landwerlin 	 * of the array.
1235cda9edd0SLionel Landwerlin 	 *
1236cda9edd0SLionel Landwerlin 	 * If I915_EXEC_USE_EXTENSIONS is set, then this is a pointer to a
1237cda9edd0SLionel Landwerlin 	 * single struct i915_user_extension and num_cliprects is 0.
1238cf6e7bacSJason Ekstrand 	 */
1239718dceddSDavid Howells 	__u64 cliprects_ptr;
1240d90c06d5SChris Wilson #define I915_EXEC_RING_MASK              (0x3f)
1241718dceddSDavid Howells #define I915_EXEC_DEFAULT                (0<<0)
1242718dceddSDavid Howells #define I915_EXEC_RENDER                 (1<<0)
1243718dceddSDavid Howells #define I915_EXEC_BSD                    (2<<0)
1244718dceddSDavid Howells #define I915_EXEC_BLT                    (3<<0)
124582f91b6eSXiang, Haihao #define I915_EXEC_VEBOX                  (4<<0)
1246718dceddSDavid Howells 
1247718dceddSDavid Howells /* Used for switching the constants addressing mode on gen4+ RENDER ring.
1248718dceddSDavid Howells  * Gen6+ only supports relative addressing to dynamic state (default) and
1249718dceddSDavid Howells  * absolute addressing.
1250718dceddSDavid Howells  *
1251718dceddSDavid Howells  * These flags are ignored for the BSD and BLT rings.
1252718dceddSDavid Howells  */
1253718dceddSDavid Howells #define I915_EXEC_CONSTANTS_MASK 	(3<<6)
1254718dceddSDavid Howells #define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */
1255718dceddSDavid Howells #define I915_EXEC_CONSTANTS_ABSOLUTE 	(1<<6)
1256718dceddSDavid Howells #define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
1257718dceddSDavid Howells 	__u64 flags;
1258718dceddSDavid Howells 	__u64 rsvd1; /* now used for context info */
1259718dceddSDavid Howells 	__u64 rsvd2;
1260718dceddSDavid Howells };
1261718dceddSDavid Howells 
1262718dceddSDavid Howells /** Resets the SO write offset registers for transform feedback on gen7. */
1263718dceddSDavid Howells #define I915_EXEC_GEN7_SOL_RESET	(1<<8)
1264718dceddSDavid Howells 
1265c2fb7916SDaniel Vetter /** Request a privileged ("secure") batch buffer. Note only available for
1266c2fb7916SDaniel Vetter  * DRM_ROOT_ONLY | DRM_MASTER processes.
1267c2fb7916SDaniel Vetter  */
1268c2fb7916SDaniel Vetter #define I915_EXEC_SECURE		(1<<9)
1269c2fb7916SDaniel Vetter 
1270b45305fcSDaniel Vetter /** Inform the kernel that the batch is and will always be pinned. This
1271b45305fcSDaniel Vetter  * negates the requirement for a workaround to be performed to avoid
1272b45305fcSDaniel Vetter  * an incoherent CS (such as can be found on 830/845). If this flag is
1273b45305fcSDaniel Vetter  * not passed, the kernel will endeavour to make sure the batch is
1274b45305fcSDaniel Vetter  * coherent with the CS before execution. If this flag is passed,
1275b45305fcSDaniel Vetter  * userspace assumes the responsibility for ensuring the same.
1276b45305fcSDaniel Vetter  */
1277b45305fcSDaniel Vetter #define I915_EXEC_IS_PINNED		(1<<10)
1278b45305fcSDaniel Vetter 
1279c3d19d3cSGeert Uytterhoeven /** Provide a hint to the kernel that the command stream and auxiliary
1280ed5982e6SDaniel Vetter  * state buffers already holds the correct presumed addresses and so the
1281ed5982e6SDaniel Vetter  * relocation process may be skipped if no buffers need to be moved in
1282ed5982e6SDaniel Vetter  * preparation for the execbuffer.
1283ed5982e6SDaniel Vetter  */
1284ed5982e6SDaniel Vetter #define I915_EXEC_NO_RELOC		(1<<11)
1285ed5982e6SDaniel Vetter 
1286eef90ccbSChris Wilson /** Use the reloc.handle as an index into the exec object array rather
1287eef90ccbSChris Wilson  * than as the per-file handle.
1288eef90ccbSChris Wilson  */
1289eef90ccbSChris Wilson #define I915_EXEC_HANDLE_LUT		(1<<12)
1290eef90ccbSChris Wilson 
12918d360dffSZhipeng Gong /** Used for switching BSD rings on the platforms with two BSD rings */
1292d9da6aa0STvrtko Ursulin #define I915_EXEC_BSD_SHIFT	 (13)
1293d9da6aa0STvrtko Ursulin #define I915_EXEC_BSD_MASK	 (3 << I915_EXEC_BSD_SHIFT)
1294d9da6aa0STvrtko Ursulin /* default ping-pong mode */
1295d9da6aa0STvrtko Ursulin #define I915_EXEC_BSD_DEFAULT	 (0 << I915_EXEC_BSD_SHIFT)
1296d9da6aa0STvrtko Ursulin #define I915_EXEC_BSD_RING1	 (1 << I915_EXEC_BSD_SHIFT)
1297d9da6aa0STvrtko Ursulin #define I915_EXEC_BSD_RING2	 (2 << I915_EXEC_BSD_SHIFT)
12988d360dffSZhipeng Gong 
1299a9ed33caSAbdiel Janulgue /** Tell the kernel that the batchbuffer is processed by
1300a9ed33caSAbdiel Janulgue  *  the resource streamer.
1301a9ed33caSAbdiel Janulgue  */
1302a9ed33caSAbdiel Janulgue #define I915_EXEC_RESOURCE_STREAMER     (1<<15)
1303a9ed33caSAbdiel Janulgue 
1304fec0445cSChris Wilson /* Setting I915_EXEC_FENCE_IN implies that lower_32_bits(rsvd2) represent
1305fec0445cSChris Wilson  * a sync_file fd to wait upon (in a nonblocking manner) prior to executing
1306fec0445cSChris Wilson  * the batch.
1307fec0445cSChris Wilson  *
1308fec0445cSChris Wilson  * Returns -EINVAL if the sync_file fd cannot be found.
1309fec0445cSChris Wilson  */
1310fec0445cSChris Wilson #define I915_EXEC_FENCE_IN		(1<<16)
1311fec0445cSChris Wilson 
1312fec0445cSChris Wilson /* Setting I915_EXEC_FENCE_OUT causes the ioctl to return a sync_file fd
1313fec0445cSChris Wilson  * in the upper_32_bits(rsvd2) upon success. Ownership of the fd is given
1314fec0445cSChris Wilson  * to the caller, and it should be close() after use. (The fd is a regular
1315fec0445cSChris Wilson  * file descriptor and will be cleaned up on process termination. It holds
1316fec0445cSChris Wilson  * a reference to the request, but nothing else.)
1317fec0445cSChris Wilson  *
1318fec0445cSChris Wilson  * The sync_file fd can be combined with other sync_file and passed either
1319fec0445cSChris Wilson  * to execbuf using I915_EXEC_FENCE_IN, to atomic KMS ioctls (so that a flip
1320fec0445cSChris Wilson  * will only occur after this request completes), or to other devices.
1321fec0445cSChris Wilson  *
1322fec0445cSChris Wilson  * Using I915_EXEC_FENCE_OUT requires use of
1323fec0445cSChris Wilson  * DRM_IOCTL_I915_GEM_EXECBUFFER2_WR ioctl so that the result is written
1324fec0445cSChris Wilson  * back to userspace. Failure to do so will cause the out-fence to always
1325fec0445cSChris Wilson  * be reported as zero, and the real fence fd to be leaked.
1326fec0445cSChris Wilson  */
1327fec0445cSChris Wilson #define I915_EXEC_FENCE_OUT		(1<<17)
1328fec0445cSChris Wilson 
13291a71cf2fSChris Wilson /*
13301a71cf2fSChris Wilson  * Traditionally the execbuf ioctl has only considered the final element in
13311a71cf2fSChris Wilson  * the execobject[] to be the executable batch. Often though, the client
13321a71cf2fSChris Wilson  * will known the batch object prior to construction and being able to place
13331a71cf2fSChris Wilson  * it into the execobject[] array first can simplify the relocation tracking.
13341a71cf2fSChris Wilson  * Setting I915_EXEC_BATCH_FIRST tells execbuf to use element 0 of the
13351a71cf2fSChris Wilson  * execobject[] as the * batch instead (the default is to use the last
13361a71cf2fSChris Wilson  * element).
13371a71cf2fSChris Wilson  */
13381a71cf2fSChris Wilson #define I915_EXEC_BATCH_FIRST		(1<<18)
1339cf6e7bacSJason Ekstrand 
1340cf6e7bacSJason Ekstrand /* Setting I915_FENCE_ARRAY implies that num_cliprects and cliprects_ptr
1341cf6e7bacSJason Ekstrand  * define an array of i915_gem_exec_fence structures which specify a set of
1342cf6e7bacSJason Ekstrand  * dma fences to wait upon or signal.
1343cf6e7bacSJason Ekstrand  */
1344cf6e7bacSJason Ekstrand #define I915_EXEC_FENCE_ARRAY   (1<<19)
1345cf6e7bacSJason Ekstrand 
1346a88b6e4cSChris Wilson /*
1347a88b6e4cSChris Wilson  * Setting I915_EXEC_FENCE_SUBMIT implies that lower_32_bits(rsvd2) represent
1348a88b6e4cSChris Wilson  * a sync_file fd to wait upon (in a nonblocking manner) prior to executing
1349a88b6e4cSChris Wilson  * the batch.
1350a88b6e4cSChris Wilson  *
1351a88b6e4cSChris Wilson  * Returns -EINVAL if the sync_file fd cannot be found.
1352a88b6e4cSChris Wilson  */
1353a88b6e4cSChris Wilson #define I915_EXEC_FENCE_SUBMIT		(1 << 20)
1354a88b6e4cSChris Wilson 
1355cda9edd0SLionel Landwerlin /*
1356cda9edd0SLionel Landwerlin  * Setting I915_EXEC_USE_EXTENSIONS implies that
1357cda9edd0SLionel Landwerlin  * drm_i915_gem_execbuffer2.cliprects_ptr is treated as a pointer to an linked
1358cda9edd0SLionel Landwerlin  * list of i915_user_extension. Each i915_user_extension node is the base of a
1359cda9edd0SLionel Landwerlin  * larger structure. The list of supported structures are listed in the
1360cda9edd0SLionel Landwerlin  * drm_i915_gem_execbuffer_ext enum.
1361cda9edd0SLionel Landwerlin  */
1362cda9edd0SLionel Landwerlin #define I915_EXEC_USE_EXTENSIONS	(1 << 21)
1363cda9edd0SLionel Landwerlin 
1364cda9edd0SLionel Landwerlin #define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_USE_EXTENSIONS << 1))
1365ed5982e6SDaniel Vetter 
1366718dceddSDavid Howells #define I915_EXEC_CONTEXT_ID_MASK	(0xffffffff)
1367718dceddSDavid Howells #define i915_execbuffer2_set_context_id(eb2, context) \
1368718dceddSDavid Howells 	(eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK
1369718dceddSDavid Howells #define i915_execbuffer2_get_context_id(eb2) \
1370718dceddSDavid Howells 	((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK)
1371718dceddSDavid Howells 
1372718dceddSDavid Howells struct drm_i915_gem_pin {
1373718dceddSDavid Howells 	/** Handle of the buffer to be pinned. */
1374718dceddSDavid Howells 	__u32 handle;
1375718dceddSDavid Howells 	__u32 pad;
1376718dceddSDavid Howells 
1377718dceddSDavid Howells 	/** alignment required within the aperture */
1378718dceddSDavid Howells 	__u64 alignment;
1379718dceddSDavid Howells 
1380718dceddSDavid Howells 	/** Returned GTT offset of the buffer. */
1381718dceddSDavid Howells 	__u64 offset;
1382718dceddSDavid Howells };
1383718dceddSDavid Howells 
1384718dceddSDavid Howells struct drm_i915_gem_unpin {
1385718dceddSDavid Howells 	/** Handle of the buffer to be unpinned. */
1386718dceddSDavid Howells 	__u32 handle;
1387718dceddSDavid Howells 	__u32 pad;
1388718dceddSDavid Howells };
1389718dceddSDavid Howells 
1390718dceddSDavid Howells struct drm_i915_gem_busy {
1391718dceddSDavid Howells 	/** Handle of the buffer to check for busy */
1392718dceddSDavid Howells 	__u32 handle;
1393718dceddSDavid Howells 
1394426960beSChris Wilson 	/** Return busy status
1395426960beSChris Wilson 	 *
1396426960beSChris Wilson 	 * A return of 0 implies that the object is idle (after
1397426960beSChris Wilson 	 * having flushed any pending activity), and a non-zero return that
1398426960beSChris Wilson 	 * the object is still in-flight on the GPU. (The GPU has not yet
1399426960beSChris Wilson 	 * signaled completion for all pending requests that reference the
14001255501dSChris Wilson 	 * object.) An object is guaranteed to become idle eventually (so
14011255501dSChris Wilson 	 * long as no new GPU commands are executed upon it). Due to the
14021255501dSChris Wilson 	 * asynchronous nature of the hardware, an object reported
14031255501dSChris Wilson 	 * as busy may become idle before the ioctl is completed.
14041255501dSChris Wilson 	 *
14051255501dSChris Wilson 	 * Furthermore, if the object is busy, which engine is busy is only
1406c8b50242SChris Wilson 	 * provided as a guide and only indirectly by reporting its class
1407c8b50242SChris Wilson 	 * (there may be more than one engine in each class). There are race
1408c8b50242SChris Wilson 	 * conditions which prevent the report of which engines are busy from
1409c8b50242SChris Wilson 	 * being always accurate.  However, the converse is not true. If the
1410c8b50242SChris Wilson 	 * object is idle, the result of the ioctl, that all engines are idle,
1411c8b50242SChris Wilson 	 * is accurate.
1412426960beSChris Wilson 	 *
1413426960beSChris Wilson 	 * The returned dword is split into two fields to indicate both
1414c8b50242SChris Wilson 	 * the engine classess on which the object is being read, and the
1415c8b50242SChris Wilson 	 * engine class on which it is currently being written (if any).
1416426960beSChris Wilson 	 *
1417426960beSChris Wilson 	 * The low word (bits 0:15) indicate if the object is being written
1418426960beSChris Wilson 	 * to by any engine (there can only be one, as the GEM implicit
1419426960beSChris Wilson 	 * synchronisation rules force writes to be serialised). Only the
1420c8b50242SChris Wilson 	 * engine class (offset by 1, I915_ENGINE_CLASS_RENDER is reported as
1421c8b50242SChris Wilson 	 * 1 not 0 etc) for the last write is reported.
1422426960beSChris Wilson 	 *
1423c8b50242SChris Wilson 	 * The high word (bits 16:31) are a bitmask of which engines classes
1424c8b50242SChris Wilson 	 * are currently reading from the object. Multiple engines may be
1425426960beSChris Wilson 	 * reading from the object simultaneously.
1426426960beSChris Wilson 	 *
1427c8b50242SChris Wilson 	 * The value of each engine class is the same as specified in the
1428c649432eSTvrtko Ursulin 	 * I915_CONTEXT_PARAM_ENGINES context parameter and via perf, i.e.
1429c8b50242SChris Wilson 	 * I915_ENGINE_CLASS_RENDER, I915_ENGINE_CLASS_COPY, etc.
1430c649432eSTvrtko Ursulin 	 * Some hardware may have parallel execution engines, e.g. multiple
1431c649432eSTvrtko Ursulin 	 * media engines, which are mapped to the same class identifier and so
1432c649432eSTvrtko Ursulin 	 * are not separately reported for busyness.
14331255501dSChris Wilson 	 *
14341255501dSChris Wilson 	 * Caveat emptor:
14351255501dSChris Wilson 	 * Only the boolean result of this query is reliable; that is whether
14361255501dSChris Wilson 	 * the object is idle or busy. The report of which engines are busy
14371255501dSChris Wilson 	 * should be only used as a heuristic.
1438718dceddSDavid Howells 	 */
1439718dceddSDavid Howells 	__u32 busy;
1440718dceddSDavid Howells };
1441718dceddSDavid Howells 
144235c7ab42SDaniel Vetter /**
1443289f5a72SMatthew Auld  * struct drm_i915_gem_caching - Set or get the caching for given object
1444289f5a72SMatthew Auld  * handle.
144535c7ab42SDaniel Vetter  *
1446289f5a72SMatthew Auld  * Allow userspace to control the GTT caching bits for a given object when the
1447289f5a72SMatthew Auld  * object is later mapped through the ppGTT(or GGTT on older platforms lacking
1448289f5a72SMatthew Auld  * ppGTT support, or if the object is used for scanout). Note that this might
1449289f5a72SMatthew Auld  * require unbinding the object from the GTT first, if its current caching value
1450289f5a72SMatthew Auld  * doesn't match.
1451e7737b67SMatthew Auld  *
1452e7737b67SMatthew Auld  * Note that this all changes on discrete platforms, starting from DG1, the
1453e7737b67SMatthew Auld  * set/get caching is no longer supported, and is now rejected.  Instead the CPU
1454e7737b67SMatthew Auld  * caching attributes(WB vs WC) will become an immutable creation time property
1455e7737b67SMatthew Auld  * for the object, along with the GTT caching level. For now we don't expose any
1456e7737b67SMatthew Auld  * new uAPI for this, instead on DG1 this is all implicit, although this largely
1457e7737b67SMatthew Auld  * shouldn't matter since DG1 is coherent by default(without any way of
1458e7737b67SMatthew Auld  * controlling it).
1459e7737b67SMatthew Auld  *
1460e7737b67SMatthew Auld  * Implicit caching rules, starting from DG1:
1461e7737b67SMatthew Auld  *
1462e7737b67SMatthew Auld  *     - If any of the object placements (see &drm_i915_gem_create_ext_memory_regions)
1463e7737b67SMatthew Auld  *       contain I915_MEMORY_CLASS_DEVICE then the object will be allocated and
1464e7737b67SMatthew Auld  *       mapped as write-combined only.
1465e7737b67SMatthew Auld  *
1466e7737b67SMatthew Auld  *     - Everything else is always allocated and mapped as write-back, with the
1467e7737b67SMatthew Auld  *       guarantee that everything is also coherent with the GPU.
1468e7737b67SMatthew Auld  *
1469e7737b67SMatthew Auld  * Note that this is likely to change in the future again, where we might need
1470e7737b67SMatthew Auld  * more flexibility on future devices, so making this all explicit as part of a
1471e7737b67SMatthew Auld  * new &drm_i915_gem_create_ext extension is probable.
1472e7737b67SMatthew Auld  *
1473e7737b67SMatthew Auld  * Side note: Part of the reason for this is that changing the at-allocation-time CPU
1474e7737b67SMatthew Auld  * caching attributes for the pages might be required(and is expensive) if we
1475e7737b67SMatthew Auld  * need to then CPU map the pages later with different caching attributes. This
1476e7737b67SMatthew Auld  * inconsistent caching behaviour, while supported on x86, is not universally
1477e7737b67SMatthew Auld  * supported on other architectures. So for simplicity we opt for setting
1478e7737b67SMatthew Auld  * everything at creation time, whilst also making it immutable, on discrete
1479e7737b67SMatthew Auld  * platforms.
148035c7ab42SDaniel Vetter  */
1481718dceddSDavid Howells struct drm_i915_gem_caching {
1482718dceddSDavid Howells 	/**
1483289f5a72SMatthew Auld 	 * @handle: Handle of the buffer to set/get the caching level.
1484289f5a72SMatthew Auld 	 */
1485718dceddSDavid Howells 	__u32 handle;
1486718dceddSDavid Howells 
1487718dceddSDavid Howells 	/**
1488289f5a72SMatthew Auld 	 * @caching: The GTT caching level to apply or possible return value.
1489718dceddSDavid Howells 	 *
1490289f5a72SMatthew Auld 	 * The supported @caching values:
1491289f5a72SMatthew Auld 	 *
1492289f5a72SMatthew Auld 	 * I915_CACHING_NONE:
1493289f5a72SMatthew Auld 	 *
1494289f5a72SMatthew Auld 	 * GPU access is not coherent with CPU caches.  Default for machines
1495289f5a72SMatthew Auld 	 * without an LLC. This means manual flushing might be needed, if we
1496289f5a72SMatthew Auld 	 * want GPU access to be coherent.
1497289f5a72SMatthew Auld 	 *
1498289f5a72SMatthew Auld 	 * I915_CACHING_CACHED:
1499289f5a72SMatthew Auld 	 *
1500289f5a72SMatthew Auld 	 * GPU access is coherent with CPU caches and furthermore the data is
1501289f5a72SMatthew Auld 	 * cached in last-level caches shared between CPU cores and the GPU GT.
1502289f5a72SMatthew Auld 	 *
1503289f5a72SMatthew Auld 	 * I915_CACHING_DISPLAY:
1504289f5a72SMatthew Auld 	 *
1505289f5a72SMatthew Auld 	 * Special GPU caching mode which is coherent with the scanout engines.
1506289f5a72SMatthew Auld 	 * Transparently falls back to I915_CACHING_NONE on platforms where no
1507289f5a72SMatthew Auld 	 * special cache mode (like write-through or gfdt flushing) is
1508289f5a72SMatthew Auld 	 * available. The kernel automatically sets this mode when using a
1509289f5a72SMatthew Auld 	 * buffer as a scanout target.  Userspace can manually set this mode to
1510289f5a72SMatthew Auld 	 * avoid a costly stall and clflush in the hotpath of drawing the first
1511289f5a72SMatthew Auld 	 * frame.
1512289f5a72SMatthew Auld 	 */
1513289f5a72SMatthew Auld #define I915_CACHING_NONE		0
1514289f5a72SMatthew Auld #define I915_CACHING_CACHED		1
1515289f5a72SMatthew Auld #define I915_CACHING_DISPLAY		2
1516718dceddSDavid Howells 	__u32 caching;
1517718dceddSDavid Howells };
1518718dceddSDavid Howells 
1519718dceddSDavid Howells #define I915_TILING_NONE	0
1520718dceddSDavid Howells #define I915_TILING_X		1
1521718dceddSDavid Howells #define I915_TILING_Y		2
1522deeb1519SChris Wilson #define I915_TILING_LAST	I915_TILING_Y
1523718dceddSDavid Howells 
1524718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_NONE		0
1525718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_9		1
1526718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_9_10		2
1527718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_9_11		3
1528718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_9_10_11	4
1529718dceddSDavid Howells /* Not seen by userland */
1530718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_UNKNOWN	5
1531718dceddSDavid Howells /* Seen by userland. */
1532718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_9_17		6
1533718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_9_10_17	7
1534718dceddSDavid Howells 
1535718dceddSDavid Howells struct drm_i915_gem_set_tiling {
1536718dceddSDavid Howells 	/** Handle of the buffer to have its tiling state updated */
1537718dceddSDavid Howells 	__u32 handle;
1538718dceddSDavid Howells 
1539718dceddSDavid Howells 	/**
1540718dceddSDavid Howells 	 * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
1541718dceddSDavid Howells 	 * I915_TILING_Y).
1542718dceddSDavid Howells 	 *
1543718dceddSDavid Howells 	 * This value is to be set on request, and will be updated by the
1544718dceddSDavid Howells 	 * kernel on successful return with the actual chosen tiling layout.
1545718dceddSDavid Howells 	 *
1546718dceddSDavid Howells 	 * The tiling mode may be demoted to I915_TILING_NONE when the system
1547718dceddSDavid Howells 	 * has bit 6 swizzling that can't be managed correctly by GEM.
1548718dceddSDavid Howells 	 *
1549718dceddSDavid Howells 	 * Buffer contents become undefined when changing tiling_mode.
1550718dceddSDavid Howells 	 */
1551718dceddSDavid Howells 	__u32 tiling_mode;
1552718dceddSDavid Howells 
1553718dceddSDavid Howells 	/**
1554718dceddSDavid Howells 	 * Stride in bytes for the object when in I915_TILING_X or
1555718dceddSDavid Howells 	 * I915_TILING_Y.
1556718dceddSDavid Howells 	 */
1557718dceddSDavid Howells 	__u32 stride;
1558718dceddSDavid Howells 
1559718dceddSDavid Howells 	/**
1560718dceddSDavid Howells 	 * Returned address bit 6 swizzling required for CPU access through
1561718dceddSDavid Howells 	 * mmap mapping.
1562718dceddSDavid Howells 	 */
1563718dceddSDavid Howells 	__u32 swizzle_mode;
1564718dceddSDavid Howells };
1565718dceddSDavid Howells 
1566718dceddSDavid Howells struct drm_i915_gem_get_tiling {
1567718dceddSDavid Howells 	/** Handle of the buffer to get tiling state for. */
1568718dceddSDavid Howells 	__u32 handle;
1569718dceddSDavid Howells 
1570718dceddSDavid Howells 	/**
1571718dceddSDavid Howells 	 * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
1572718dceddSDavid Howells 	 * I915_TILING_Y).
1573718dceddSDavid Howells 	 */
1574718dceddSDavid Howells 	__u32 tiling_mode;
1575718dceddSDavid Howells 
1576718dceddSDavid Howells 	/**
1577718dceddSDavid Howells 	 * Returned address bit 6 swizzling required for CPU access through
1578718dceddSDavid Howells 	 * mmap mapping.
1579718dceddSDavid Howells 	 */
1580718dceddSDavid Howells 	__u32 swizzle_mode;
158170f2f5c7SChris Wilson 
158270f2f5c7SChris Wilson 	/**
158370f2f5c7SChris Wilson 	 * Returned address bit 6 swizzling required for CPU access through
158470f2f5c7SChris Wilson 	 * mmap mapping whilst bound.
158570f2f5c7SChris Wilson 	 */
158670f2f5c7SChris Wilson 	__u32 phys_swizzle_mode;
1587718dceddSDavid Howells };
1588718dceddSDavid Howells 
1589718dceddSDavid Howells struct drm_i915_gem_get_aperture {
1590718dceddSDavid Howells 	/** Total size of the aperture used by i915_gem_execbuffer, in bytes */
1591718dceddSDavid Howells 	__u64 aper_size;
1592718dceddSDavid Howells 
1593718dceddSDavid Howells 	/**
1594718dceddSDavid Howells 	 * Available space in the aperture used by i915_gem_execbuffer, in
1595718dceddSDavid Howells 	 * bytes
1596718dceddSDavid Howells 	 */
1597718dceddSDavid Howells 	__u64 aper_available_size;
1598718dceddSDavid Howells };
1599718dceddSDavid Howells 
1600718dceddSDavid Howells struct drm_i915_get_pipe_from_crtc_id {
1601718dceddSDavid Howells 	/** ID of CRTC being requested **/
1602718dceddSDavid Howells 	__u32 crtc_id;
1603718dceddSDavid Howells 
1604718dceddSDavid Howells 	/** pipe of requested CRTC **/
1605718dceddSDavid Howells 	__u32 pipe;
1606718dceddSDavid Howells };
1607718dceddSDavid Howells 
1608718dceddSDavid Howells #define I915_MADV_WILLNEED 0
1609718dceddSDavid Howells #define I915_MADV_DONTNEED 1
1610718dceddSDavid Howells #define __I915_MADV_PURGED 2 /* internal state */
1611718dceddSDavid Howells 
1612718dceddSDavid Howells struct drm_i915_gem_madvise {
1613718dceddSDavid Howells 	/** Handle of the buffer to change the backing store advice */
1614718dceddSDavid Howells 	__u32 handle;
1615718dceddSDavid Howells 
1616718dceddSDavid Howells 	/* Advice: either the buffer will be needed again in the near future,
1617718dceddSDavid Howells 	 *         or wont be and could be discarded under memory pressure.
1618718dceddSDavid Howells 	 */
1619718dceddSDavid Howells 	__u32 madv;
1620718dceddSDavid Howells 
1621718dceddSDavid Howells 	/** Whether the backing store still exists. */
1622718dceddSDavid Howells 	__u32 retained;
1623718dceddSDavid Howells };
1624718dceddSDavid Howells 
1625718dceddSDavid Howells /* flags */
1626718dceddSDavid Howells #define I915_OVERLAY_TYPE_MASK 		0xff
1627718dceddSDavid Howells #define I915_OVERLAY_YUV_PLANAR 	0x01
1628718dceddSDavid Howells #define I915_OVERLAY_YUV_PACKED 	0x02
1629718dceddSDavid Howells #define I915_OVERLAY_RGB		0x03
1630718dceddSDavid Howells 
1631718dceddSDavid Howells #define I915_OVERLAY_DEPTH_MASK		0xff00
1632718dceddSDavid Howells #define I915_OVERLAY_RGB24		0x1000
1633718dceddSDavid Howells #define I915_OVERLAY_RGB16		0x2000
1634718dceddSDavid Howells #define I915_OVERLAY_RGB15		0x3000
1635718dceddSDavid Howells #define I915_OVERLAY_YUV422		0x0100
1636718dceddSDavid Howells #define I915_OVERLAY_YUV411		0x0200
1637718dceddSDavid Howells #define I915_OVERLAY_YUV420		0x0300
1638718dceddSDavid Howells #define I915_OVERLAY_YUV410		0x0400
1639718dceddSDavid Howells 
1640718dceddSDavid Howells #define I915_OVERLAY_SWAP_MASK		0xff0000
1641718dceddSDavid Howells #define I915_OVERLAY_NO_SWAP		0x000000
1642718dceddSDavid Howells #define I915_OVERLAY_UV_SWAP		0x010000
1643718dceddSDavid Howells #define I915_OVERLAY_Y_SWAP		0x020000
1644718dceddSDavid Howells #define I915_OVERLAY_Y_AND_UV_SWAP	0x030000
1645718dceddSDavid Howells 
1646718dceddSDavid Howells #define I915_OVERLAY_FLAGS_MASK		0xff000000
1647718dceddSDavid Howells #define I915_OVERLAY_ENABLE		0x01000000
1648718dceddSDavid Howells 
1649718dceddSDavid Howells struct drm_intel_overlay_put_image {
1650718dceddSDavid Howells 	/* various flags and src format description */
1651718dceddSDavid Howells 	__u32 flags;
1652718dceddSDavid Howells 	/* source picture description */
1653718dceddSDavid Howells 	__u32 bo_handle;
1654718dceddSDavid Howells 	/* stride values and offsets are in bytes, buffer relative */
1655718dceddSDavid Howells 	__u16 stride_Y; /* stride for packed formats */
1656718dceddSDavid Howells 	__u16 stride_UV;
1657718dceddSDavid Howells 	__u32 offset_Y; /* offset for packet formats */
1658718dceddSDavid Howells 	__u32 offset_U;
1659718dceddSDavid Howells 	__u32 offset_V;
1660718dceddSDavid Howells 	/* in pixels */
1661718dceddSDavid Howells 	__u16 src_width;
1662718dceddSDavid Howells 	__u16 src_height;
1663718dceddSDavid Howells 	/* to compensate the scaling factors for partially covered surfaces */
1664718dceddSDavid Howells 	__u16 src_scan_width;
1665718dceddSDavid Howells 	__u16 src_scan_height;
1666718dceddSDavid Howells 	/* output crtc description */
1667718dceddSDavid Howells 	__u32 crtc_id;
1668718dceddSDavid Howells 	__u16 dst_x;
1669718dceddSDavid Howells 	__u16 dst_y;
1670718dceddSDavid Howells 	__u16 dst_width;
1671718dceddSDavid Howells 	__u16 dst_height;
1672718dceddSDavid Howells };
1673718dceddSDavid Howells 
1674718dceddSDavid Howells /* flags */
1675718dceddSDavid Howells #define I915_OVERLAY_UPDATE_ATTRS	(1<<0)
1676718dceddSDavid Howells #define I915_OVERLAY_UPDATE_GAMMA	(1<<1)
1677ea9da4e4SChris Wilson #define I915_OVERLAY_DISABLE_DEST_COLORKEY	(1<<2)
1678718dceddSDavid Howells struct drm_intel_overlay_attrs {
1679718dceddSDavid Howells 	__u32 flags;
1680718dceddSDavid Howells 	__u32 color_key;
1681718dceddSDavid Howells 	__s32 brightness;
1682718dceddSDavid Howells 	__u32 contrast;
1683718dceddSDavid Howells 	__u32 saturation;
1684718dceddSDavid Howells 	__u32 gamma0;
1685718dceddSDavid Howells 	__u32 gamma1;
1686718dceddSDavid Howells 	__u32 gamma2;
1687718dceddSDavid Howells 	__u32 gamma3;
1688718dceddSDavid Howells 	__u32 gamma4;
1689718dceddSDavid Howells 	__u32 gamma5;
1690718dceddSDavid Howells };
1691718dceddSDavid Howells 
1692718dceddSDavid Howells /*
1693718dceddSDavid Howells  * Intel sprite handling
1694718dceddSDavid Howells  *
1695718dceddSDavid Howells  * Color keying works with a min/mask/max tuple.  Both source and destination
1696718dceddSDavid Howells  * color keying is allowed.
1697718dceddSDavid Howells  *
1698718dceddSDavid Howells  * Source keying:
1699718dceddSDavid Howells  * Sprite pixels within the min & max values, masked against the color channels
1700718dceddSDavid Howells  * specified in the mask field, will be transparent.  All other pixels will
1701718dceddSDavid Howells  * be displayed on top of the primary plane.  For RGB surfaces, only the min
1702718dceddSDavid Howells  * and mask fields will be used; ranged compares are not allowed.
1703718dceddSDavid Howells  *
1704718dceddSDavid Howells  * Destination keying:
1705718dceddSDavid Howells  * Primary plane pixels that match the min value, masked against the color
1706718dceddSDavid Howells  * channels specified in the mask field, will be replaced by corresponding
1707718dceddSDavid Howells  * pixels from the sprite plane.
1708718dceddSDavid Howells  *
1709718dceddSDavid Howells  * Note that source & destination keying are exclusive; only one can be
1710718dceddSDavid Howells  * active on a given plane.
1711718dceddSDavid Howells  */
1712718dceddSDavid Howells 
17136ec5bd34SVille Syrjälä #define I915_SET_COLORKEY_NONE		(1<<0) /* Deprecated. Instead set
17146ec5bd34SVille Syrjälä 						* flags==0 to disable colorkeying.
17156ec5bd34SVille Syrjälä 						*/
1716718dceddSDavid Howells #define I915_SET_COLORKEY_DESTINATION	(1<<1)
1717718dceddSDavid Howells #define I915_SET_COLORKEY_SOURCE	(1<<2)
1718718dceddSDavid Howells struct drm_intel_sprite_colorkey {
1719718dceddSDavid Howells 	__u32 plane_id;
1720718dceddSDavid Howells 	__u32 min_value;
1721718dceddSDavid Howells 	__u32 channel_mask;
1722718dceddSDavid Howells 	__u32 max_value;
1723718dceddSDavid Howells 	__u32 flags;
1724718dceddSDavid Howells };
1725718dceddSDavid Howells 
1726718dceddSDavid Howells struct drm_i915_gem_wait {
1727718dceddSDavid Howells 	/** Handle of BO we shall wait on */
1728718dceddSDavid Howells 	__u32 bo_handle;
1729718dceddSDavid Howells 	__u32 flags;
1730718dceddSDavid Howells 	/** Number of nanoseconds to wait, Returns time remaining. */
1731718dceddSDavid Howells 	__s64 timeout_ns;
1732718dceddSDavid Howells };
1733718dceddSDavid Howells 
1734718dceddSDavid Howells struct drm_i915_gem_context_create {
1735b9171541SChris Wilson 	__u32 ctx_id; /* output: id of new context*/
1736718dceddSDavid Howells 	__u32 pad;
1737718dceddSDavid Howells };
1738718dceddSDavid Howells 
1739b9171541SChris Wilson struct drm_i915_gem_context_create_ext {
1740b9171541SChris Wilson 	__u32 ctx_id; /* output: id of new context*/
1741b9171541SChris Wilson 	__u32 flags;
1742b9171541SChris Wilson #define I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS	(1u << 0)
17438319f44cSChris Wilson #define I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE	(1u << 1)
1744b9171541SChris Wilson #define I915_CONTEXT_CREATE_FLAGS_UNKNOWN \
17458319f44cSChris Wilson 	(-(I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE << 1))
1746e0695db7SChris Wilson 	__u64 extensions;
17475cc9ed4bSChris Wilson };
17485cc9ed4bSChris Wilson 
1749c9dc0f35SChris Wilson struct drm_i915_gem_context_param {
1750c9dc0f35SChris Wilson 	__u32 ctx_id;
1751c9dc0f35SChris Wilson 	__u32 size;
1752c9dc0f35SChris Wilson 	__u64 param;
1753c9dc0f35SChris Wilson #define I915_CONTEXT_PARAM_BAN_PERIOD	0x1
17546ff6d61dSJason Ekstrand /* I915_CONTEXT_PARAM_NO_ZEROMAP has been removed.  On the off chance
17556ff6d61dSJason Ekstrand  * someone somewhere has attempted to use it, never re-use this context
17566ff6d61dSJason Ekstrand  * param number.
17576ff6d61dSJason Ekstrand  */
1758b1b38278SDavid Weinehall #define I915_CONTEXT_PARAM_NO_ZEROMAP	0x2
1759fa8848f2SChris Wilson #define I915_CONTEXT_PARAM_GTT_SIZE	0x3
1760bc3d6744SChris Wilson #define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE	0x4
176184102171SMika Kuoppala #define I915_CONTEXT_PARAM_BANNABLE	0x5
1762ac14fbd4SChris Wilson #define I915_CONTEXT_PARAM_PRIORITY	0x6
1763ac14fbd4SChris Wilson #define   I915_CONTEXT_MAX_USER_PRIORITY	1023 /* inclusive */
1764ac14fbd4SChris Wilson #define   I915_CONTEXT_DEFAULT_PRIORITY		0
1765ac14fbd4SChris Wilson #define   I915_CONTEXT_MIN_USER_PRIORITY	-1023 /* inclusive */
1766e46c2e99STvrtko Ursulin 	/*
1767e46c2e99STvrtko Ursulin 	 * When using the following param, value should be a pointer to
1768e46c2e99STvrtko Ursulin 	 * drm_i915_gem_context_param_sseu.
1769e46c2e99STvrtko Ursulin 	 */
1770e46c2e99STvrtko Ursulin #define I915_CONTEXT_PARAM_SSEU		0x7
1771ba4fda62SChris Wilson 
1772ba4fda62SChris Wilson /*
1773ba4fda62SChris Wilson  * Not all clients may want to attempt automatic recover of a context after
1774ba4fda62SChris Wilson  * a hang (for example, some clients may only submit very small incremental
1775ba4fda62SChris Wilson  * batches relying on known logical state of previous batches which will never
1776ba4fda62SChris Wilson  * recover correctly and each attempt will hang), and so would prefer that
1777ba4fda62SChris Wilson  * the context is forever banned instead.
1778ba4fda62SChris Wilson  *
1779ba4fda62SChris Wilson  * If set to false (0), after a reset, subsequent (and in flight) rendering
1780ba4fda62SChris Wilson  * from this context is discarded, and the client will need to create a new
1781ba4fda62SChris Wilson  * context to use instead.
1782ba4fda62SChris Wilson  *
1783ba4fda62SChris Wilson  * If set to true (1), the kernel will automatically attempt to recover the
1784ba4fda62SChris Wilson  * context by skipping the hanging batch and executing the next batch starting
1785ba4fda62SChris Wilson  * from the default context state (discarding the incomplete logical context
1786ba4fda62SChris Wilson  * state lost due to the reset).
1787ba4fda62SChris Wilson  *
1788ba4fda62SChris Wilson  * On creation, all new contexts are marked as recoverable.
1789ba4fda62SChris Wilson  */
1790ba4fda62SChris Wilson #define I915_CONTEXT_PARAM_RECOVERABLE	0x8
17917f3f317aSChris Wilson 
17927f3f317aSChris Wilson 	/*
17937f3f317aSChris Wilson 	 * The id of the associated virtual memory address space (ppGTT) of
17947f3f317aSChris Wilson 	 * this context. Can be retrieved and passed to another context
17957f3f317aSChris Wilson 	 * (on the same fd) for both to use the same ppGTT and so share
17967f3f317aSChris Wilson 	 * address layouts, and avoid reloading the page tables on context
17977f3f317aSChris Wilson 	 * switches between themselves.
17987f3f317aSChris Wilson 	 *
17997f3f317aSChris Wilson 	 * See DRM_I915_GEM_VM_CREATE and DRM_I915_GEM_VM_DESTROY.
18007f3f317aSChris Wilson 	 */
18017f3f317aSChris Wilson #define I915_CONTEXT_PARAM_VM		0x9
1802976b55f0SChris Wilson 
1803976b55f0SChris Wilson /*
1804976b55f0SChris Wilson  * I915_CONTEXT_PARAM_ENGINES:
1805976b55f0SChris Wilson  *
1806976b55f0SChris Wilson  * Bind this context to operate on this subset of available engines. Henceforth,
1807976b55f0SChris Wilson  * the I915_EXEC_RING selector for DRM_IOCTL_I915_GEM_EXECBUFFER2 operates as
1808976b55f0SChris Wilson  * an index into this array of engines; I915_EXEC_DEFAULT selecting engine[0]
1809976b55f0SChris Wilson  * and upwards. Slots 0...N are filled in using the specified (class, instance).
1810976b55f0SChris Wilson  * Use
1811976b55f0SChris Wilson  *	engine_class: I915_ENGINE_CLASS_INVALID,
1812976b55f0SChris Wilson  *	engine_instance: I915_ENGINE_CLASS_INVALID_NONE
1813976b55f0SChris Wilson  * to specify a gap in the array that can be filled in later, e.g. by a
1814976b55f0SChris Wilson  * virtual engine used for load balancing.
1815976b55f0SChris Wilson  *
1816976b55f0SChris Wilson  * Setting the number of engines bound to the context to 0, by passing a zero
1817976b55f0SChris Wilson  * sized argument, will revert back to default settings.
1818976b55f0SChris Wilson  *
1819976b55f0SChris Wilson  * See struct i915_context_param_engines.
1820ee113690SChris Wilson  *
1821ee113690SChris Wilson  * Extensions:
1822ee113690SChris Wilson  *   i915_context_engines_load_balance (I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE)
1823ee113690SChris Wilson  *   i915_context_engines_bond (I915_CONTEXT_ENGINES_EXT_BOND)
1824976b55f0SChris Wilson  */
1825976b55f0SChris Wilson #define I915_CONTEXT_PARAM_ENGINES	0xa
1826a0e04715SChris Wilson 
1827a0e04715SChris Wilson /*
1828a0e04715SChris Wilson  * I915_CONTEXT_PARAM_PERSISTENCE:
1829a0e04715SChris Wilson  *
1830a0e04715SChris Wilson  * Allow the context and active rendering to survive the process until
1831a0e04715SChris Wilson  * completion. Persistence allows fire-and-forget clients to queue up a
1832a0e04715SChris Wilson  * bunch of work, hand the output over to a display server and then quit.
1833a0e04715SChris Wilson  * If the context is marked as not persistent, upon closing (either via
1834a0e04715SChris Wilson  * an explicit DRM_I915_GEM_CONTEXT_DESTROY or implicitly from file closure
1835a0e04715SChris Wilson  * or process termination), the context and any outstanding requests will be
1836a0e04715SChris Wilson  * cancelled (and exported fences for cancelled requests marked as -EIO).
1837a0e04715SChris Wilson  *
1838a0e04715SChris Wilson  * By default, new contexts allow persistence.
1839a0e04715SChris Wilson  */
1840a0e04715SChris Wilson #define I915_CONTEXT_PARAM_PERSISTENCE	0xb
184188be76cdSChris Wilson 
1842fe4751c3SJason Ekstrand /* This API has been removed.  On the off chance someone somewhere has
1843fe4751c3SJason Ekstrand  * attempted to use it, never re-use this context param number.
184488be76cdSChris Wilson  */
184588be76cdSChris Wilson #define I915_CONTEXT_PARAM_RINGSIZE	0xc
1846be03564bSChris Wilson /* Must be kept compact -- no holes and well documented */
1847e0695db7SChris Wilson 
1848c9dc0f35SChris Wilson 	__u64 value;
1849c9dc0f35SChris Wilson };
1850c9dc0f35SChris Wilson 
18512ef6a01fSMatthew Auld /*
1852e46c2e99STvrtko Ursulin  * Context SSEU programming
1853e46c2e99STvrtko Ursulin  *
1854e46c2e99STvrtko Ursulin  * It may be necessary for either functional or performance reason to configure
1855e46c2e99STvrtko Ursulin  * a context to run with a reduced number of SSEU (where SSEU stands for Slice/
1856e46c2e99STvrtko Ursulin  * Sub-slice/EU).
1857e46c2e99STvrtko Ursulin  *
1858e46c2e99STvrtko Ursulin  * This is done by configuring SSEU configuration using the below
1859e46c2e99STvrtko Ursulin  * @struct drm_i915_gem_context_param_sseu for every supported engine which
1860e46c2e99STvrtko Ursulin  * userspace intends to use.
1861e46c2e99STvrtko Ursulin  *
1862e46c2e99STvrtko Ursulin  * Not all GPUs or engines support this functionality in which case an error
1863e46c2e99STvrtko Ursulin  * code -ENODEV will be returned.
1864e46c2e99STvrtko Ursulin  *
1865e46c2e99STvrtko Ursulin  * Also, flexibility of possible SSEU configuration permutations varies between
1866e46c2e99STvrtko Ursulin  * GPU generations and software imposed limitations. Requesting such a
1867e46c2e99STvrtko Ursulin  * combination will return an error code of -EINVAL.
1868e46c2e99STvrtko Ursulin  *
1869e46c2e99STvrtko Ursulin  * NOTE: When perf/OA is active the context's SSEU configuration is ignored in
1870e46c2e99STvrtko Ursulin  * favour of a single global setting.
1871e46c2e99STvrtko Ursulin  */
1872e46c2e99STvrtko Ursulin struct drm_i915_gem_context_param_sseu {
1873e46c2e99STvrtko Ursulin 	/*
1874e46c2e99STvrtko Ursulin 	 * Engine class & instance to be configured or queried.
1875e46c2e99STvrtko Ursulin 	 */
1876d1172ab3SChris Wilson 	struct i915_engine_class_instance engine;
1877e46c2e99STvrtko Ursulin 
1878e46c2e99STvrtko Ursulin 	/*
1879e620f7b3SChris Wilson 	 * Unknown flags must be cleared to zero.
1880e46c2e99STvrtko Ursulin 	 */
1881e46c2e99STvrtko Ursulin 	__u32 flags;
1882e620f7b3SChris Wilson #define I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX (1u << 0)
1883e46c2e99STvrtko Ursulin 
1884e46c2e99STvrtko Ursulin 	/*
1885e46c2e99STvrtko Ursulin 	 * Mask of slices to enable for the context. Valid values are a subset
1886e46c2e99STvrtko Ursulin 	 * of the bitmask value returned for I915_PARAM_SLICE_MASK.
1887e46c2e99STvrtko Ursulin 	 */
1888e46c2e99STvrtko Ursulin 	__u64 slice_mask;
1889e46c2e99STvrtko Ursulin 
1890e46c2e99STvrtko Ursulin 	/*
1891e46c2e99STvrtko Ursulin 	 * Mask of subslices to enable for the context. Valid values are a
1892e46c2e99STvrtko Ursulin 	 * subset of the bitmask value return by I915_PARAM_SUBSLICE_MASK.
1893e46c2e99STvrtko Ursulin 	 */
1894e46c2e99STvrtko Ursulin 	__u64 subslice_mask;
1895e46c2e99STvrtko Ursulin 
1896e46c2e99STvrtko Ursulin 	/*
1897e46c2e99STvrtko Ursulin 	 * Minimum/Maximum number of EUs to enable per subslice for the
1898e46c2e99STvrtko Ursulin 	 * context. min_eus_per_subslice must be inferior or equal to
1899e46c2e99STvrtko Ursulin 	 * max_eus_per_subslice.
1900e46c2e99STvrtko Ursulin 	 */
1901e46c2e99STvrtko Ursulin 	__u16 min_eus_per_subslice;
1902e46c2e99STvrtko Ursulin 	__u16 max_eus_per_subslice;
1903e46c2e99STvrtko Ursulin 
1904e46c2e99STvrtko Ursulin 	/*
1905e46c2e99STvrtko Ursulin 	 * Unused for now. Must be cleared to zero.
1906e46c2e99STvrtko Ursulin 	 */
1907e46c2e99STvrtko Ursulin 	__u32 rsvd;
1908e46c2e99STvrtko Ursulin };
1909e46c2e99STvrtko Ursulin 
191057772953STvrtko Ursulin /**
191157772953STvrtko Ursulin  * DOC: Virtual Engine uAPI
191257772953STvrtko Ursulin  *
191357772953STvrtko Ursulin  * Virtual engine is a concept where userspace is able to configure a set of
191457772953STvrtko Ursulin  * physical engines, submit a batch buffer, and let the driver execute it on any
191557772953STvrtko Ursulin  * engine from the set as it sees fit.
191657772953STvrtko Ursulin  *
191757772953STvrtko Ursulin  * This is primarily useful on parts which have multiple instances of a same
191857772953STvrtko Ursulin  * class engine, like for example GT3+ Skylake parts with their two VCS engines.
191957772953STvrtko Ursulin  *
192057772953STvrtko Ursulin  * For instance userspace can enumerate all engines of a certain class using the
192157772953STvrtko Ursulin  * previously described `Engine Discovery uAPI`_. After that userspace can
192257772953STvrtko Ursulin  * create a GEM context with a placeholder slot for the virtual engine (using
192357772953STvrtko Ursulin  * `I915_ENGINE_CLASS_INVALID` and `I915_ENGINE_CLASS_INVALID_NONE` for class
192457772953STvrtko Ursulin  * and instance respectively) and finally using the
192557772953STvrtko Ursulin  * `I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE` extension place a virtual engine in
192657772953STvrtko Ursulin  * the same reserved slot.
192757772953STvrtko Ursulin  *
192857772953STvrtko Ursulin  * Example of creating a virtual engine and submitting a batch buffer to it:
192957772953STvrtko Ursulin  *
193057772953STvrtko Ursulin  * .. code-block:: C
193157772953STvrtko Ursulin  *
193257772953STvrtko Ursulin  * 	I915_DEFINE_CONTEXT_ENGINES_LOAD_BALANCE(virtual, 2) = {
193357772953STvrtko Ursulin  * 		.base.name = I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE,
193457772953STvrtko Ursulin  * 		.engine_index = 0, // Place this virtual engine into engine map slot 0
193557772953STvrtko Ursulin  * 		.num_siblings = 2,
193657772953STvrtko Ursulin  * 		.engines = { { I915_ENGINE_CLASS_VIDEO, 0 },
193757772953STvrtko Ursulin  * 			     { I915_ENGINE_CLASS_VIDEO, 1 }, },
193857772953STvrtko Ursulin  * 	};
193957772953STvrtko Ursulin  * 	I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, 1) = {
194057772953STvrtko Ursulin  * 		.engines = { { I915_ENGINE_CLASS_INVALID,
194157772953STvrtko Ursulin  * 			       I915_ENGINE_CLASS_INVALID_NONE } },
194257772953STvrtko Ursulin  * 		.extensions = to_user_pointer(&virtual), // Chains after load_balance extension
194357772953STvrtko Ursulin  * 	};
194457772953STvrtko Ursulin  * 	struct drm_i915_gem_context_create_ext_setparam p_engines = {
194557772953STvrtko Ursulin  * 		.base = {
194657772953STvrtko Ursulin  * 			.name = I915_CONTEXT_CREATE_EXT_SETPARAM,
194757772953STvrtko Ursulin  * 		},
194857772953STvrtko Ursulin  * 		.param = {
194957772953STvrtko Ursulin  * 			.param = I915_CONTEXT_PARAM_ENGINES,
195057772953STvrtko Ursulin  * 			.value = to_user_pointer(&engines),
195157772953STvrtko Ursulin  * 			.size = sizeof(engines),
195257772953STvrtko Ursulin  * 		},
195357772953STvrtko Ursulin  * 	};
195457772953STvrtko Ursulin  * 	struct drm_i915_gem_context_create_ext create = {
195557772953STvrtko Ursulin  * 		.flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
195657772953STvrtko Ursulin  * 		.extensions = to_user_pointer(&p_engines);
195757772953STvrtko Ursulin  * 	};
195857772953STvrtko Ursulin  *
195957772953STvrtko Ursulin  * 	ctx_id = gem_context_create_ext(drm_fd, &create);
196057772953STvrtko Ursulin  *
196157772953STvrtko Ursulin  * 	// Now we have created a GEM context with its engine map containing a
196257772953STvrtko Ursulin  * 	// single virtual engine. Submissions to this slot can go either to
196357772953STvrtko Ursulin  * 	// vcs0 or vcs1, depending on the load balancing algorithm used inside
196457772953STvrtko Ursulin  * 	// the driver. The load balancing is dynamic from one batch buffer to
196557772953STvrtko Ursulin  * 	// another and transparent to userspace.
196657772953STvrtko Ursulin  *
196757772953STvrtko Ursulin  * 	...
196857772953STvrtko Ursulin  * 	execbuf.rsvd1 = ctx_id;
196957772953STvrtko Ursulin  * 	execbuf.flags = 0; // Submits to index 0 which is the virtual engine
197057772953STvrtko Ursulin  * 	gem_execbuf(drm_fd, &execbuf);
197157772953STvrtko Ursulin  */
197257772953STvrtko Ursulin 
19736d06779eSChris Wilson /*
19746d06779eSChris Wilson  * i915_context_engines_load_balance:
19756d06779eSChris Wilson  *
19766d06779eSChris Wilson  * Enable load balancing across this set of engines.
19776d06779eSChris Wilson  *
19786d06779eSChris Wilson  * Into the I915_EXEC_DEFAULT slot [0], a virtual engine is created that when
19796d06779eSChris Wilson  * used will proxy the execbuffer request onto one of the set of engines
19806d06779eSChris Wilson  * in such a way as to distribute the load evenly across the set.
19816d06779eSChris Wilson  *
19826d06779eSChris Wilson  * The set of engines must be compatible (e.g. the same HW class) as they
19836d06779eSChris Wilson  * will share the same logical GPU context and ring.
19846d06779eSChris Wilson  *
19856d06779eSChris Wilson  * To intermix rendering with the virtual engine and direct rendering onto
19866d06779eSChris Wilson  * the backing engines (bypassing the load balancing proxy), the context must
19876d06779eSChris Wilson  * be defined to use a single timeline for all engines.
19886d06779eSChris Wilson  */
19896d06779eSChris Wilson struct i915_context_engines_load_balance {
19906d06779eSChris Wilson 	struct i915_user_extension base;
19916d06779eSChris Wilson 
19926d06779eSChris Wilson 	__u16 engine_index;
19936d06779eSChris Wilson 	__u16 num_siblings;
19946d06779eSChris Wilson 	__u32 flags; /* all undefined flags must be zero */
19956d06779eSChris Wilson 
19966d06779eSChris Wilson 	__u64 mbz64; /* reserved for future use; must be zero */
19976d06779eSChris Wilson 
19986d06779eSChris Wilson 	struct i915_engine_class_instance engines[0];
19996d06779eSChris Wilson } __attribute__((packed));
20006d06779eSChris Wilson 
20016d06779eSChris Wilson #define I915_DEFINE_CONTEXT_ENGINES_LOAD_BALANCE(name__, N__) struct { \
20026d06779eSChris Wilson 	struct i915_user_extension base; \
20036d06779eSChris Wilson 	__u16 engine_index; \
20046d06779eSChris Wilson 	__u16 num_siblings; \
20056d06779eSChris Wilson 	__u32 flags; \
20066d06779eSChris Wilson 	__u64 mbz64; \
20076d06779eSChris Wilson 	struct i915_engine_class_instance engines[N__]; \
20086d06779eSChris Wilson } __attribute__((packed)) name__
20096d06779eSChris Wilson 
2010ee113690SChris Wilson /*
2011ee113690SChris Wilson  * i915_context_engines_bond:
2012ee113690SChris Wilson  *
2013ee113690SChris Wilson  * Constructed bonded pairs for execution within a virtual engine.
2014ee113690SChris Wilson  *
2015ee113690SChris Wilson  * All engines are equal, but some are more equal than others. Given
2016ee113690SChris Wilson  * the distribution of resources in the HW, it may be preferable to run
2017ee113690SChris Wilson  * a request on a given subset of engines in parallel to a request on a
2018ee113690SChris Wilson  * specific engine. We enable this selection of engines within a virtual
2019ee113690SChris Wilson  * engine by specifying bonding pairs, for any given master engine we will
2020ee113690SChris Wilson  * only execute on one of the corresponding siblings within the virtual engine.
2021ee113690SChris Wilson  *
2022ee113690SChris Wilson  * To execute a request in parallel on the master engine and a sibling requires
2023ee113690SChris Wilson  * coordination with a I915_EXEC_FENCE_SUBMIT.
2024ee113690SChris Wilson  */
2025ee113690SChris Wilson struct i915_context_engines_bond {
2026ee113690SChris Wilson 	struct i915_user_extension base;
2027ee113690SChris Wilson 
2028ee113690SChris Wilson 	struct i915_engine_class_instance master;
2029ee113690SChris Wilson 
2030ee113690SChris Wilson 	__u16 virtual_index; /* index of virtual engine in ctx->engines[] */
2031ee113690SChris Wilson 	__u16 num_bonds;
2032ee113690SChris Wilson 
2033ee113690SChris Wilson 	__u64 flags; /* all undefined flags must be zero */
2034ee113690SChris Wilson 	__u64 mbz64[4]; /* reserved for future use; must be zero */
2035ee113690SChris Wilson 
2036ee113690SChris Wilson 	struct i915_engine_class_instance engines[0];
2037ee113690SChris Wilson } __attribute__((packed));
2038ee113690SChris Wilson 
2039ee113690SChris Wilson #define I915_DEFINE_CONTEXT_ENGINES_BOND(name__, N__) struct { \
2040ee113690SChris Wilson 	struct i915_user_extension base; \
2041ee113690SChris Wilson 	struct i915_engine_class_instance master; \
2042ee113690SChris Wilson 	__u16 virtual_index; \
2043ee113690SChris Wilson 	__u16 num_bonds; \
2044ee113690SChris Wilson 	__u64 flags; \
2045ee113690SChris Wilson 	__u64 mbz64[4]; \
2046ee113690SChris Wilson 	struct i915_engine_class_instance engines[N__]; \
2047ee113690SChris Wilson } __attribute__((packed)) name__
2048ee113690SChris Wilson 
204957772953STvrtko Ursulin /**
205057772953STvrtko Ursulin  * DOC: Context Engine Map uAPI
205157772953STvrtko Ursulin  *
205257772953STvrtko Ursulin  * Context engine map is a new way of addressing engines when submitting batch-
205357772953STvrtko Ursulin  * buffers, replacing the existing way of using identifiers like `I915_EXEC_BLT`
205457772953STvrtko Ursulin  * inside the flags field of `struct drm_i915_gem_execbuffer2`.
205557772953STvrtko Ursulin  *
205657772953STvrtko Ursulin  * To use it created GEM contexts need to be configured with a list of engines
205757772953STvrtko Ursulin  * the user is intending to submit to. This is accomplished using the
205857772953STvrtko Ursulin  * `I915_CONTEXT_PARAM_ENGINES` parameter and `struct
205957772953STvrtko Ursulin  * i915_context_param_engines`.
206057772953STvrtko Ursulin  *
206157772953STvrtko Ursulin  * For such contexts the `I915_EXEC_RING_MASK` field becomes an index into the
206257772953STvrtko Ursulin  * configured map.
206357772953STvrtko Ursulin  *
206457772953STvrtko Ursulin  * Example of creating such context and submitting against it:
206557772953STvrtko Ursulin  *
206657772953STvrtko Ursulin  * .. code-block:: C
206757772953STvrtko Ursulin  *
206857772953STvrtko Ursulin  * 	I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, 2) = {
206957772953STvrtko Ursulin  * 		.engines = { { I915_ENGINE_CLASS_RENDER, 0 },
207057772953STvrtko Ursulin  * 			     { I915_ENGINE_CLASS_COPY, 0 } }
207157772953STvrtko Ursulin  * 	};
207257772953STvrtko Ursulin  * 	struct drm_i915_gem_context_create_ext_setparam p_engines = {
207357772953STvrtko Ursulin  * 		.base = {
207457772953STvrtko Ursulin  * 			.name = I915_CONTEXT_CREATE_EXT_SETPARAM,
207557772953STvrtko Ursulin  * 		},
207657772953STvrtko Ursulin  * 		.param = {
207757772953STvrtko Ursulin  * 			.param = I915_CONTEXT_PARAM_ENGINES,
207857772953STvrtko Ursulin  * 			.value = to_user_pointer(&engines),
207957772953STvrtko Ursulin  * 			.size = sizeof(engines),
208057772953STvrtko Ursulin  * 		},
208157772953STvrtko Ursulin  * 	};
208257772953STvrtko Ursulin  * 	struct drm_i915_gem_context_create_ext create = {
208357772953STvrtko Ursulin  * 		.flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
208457772953STvrtko Ursulin  * 		.extensions = to_user_pointer(&p_engines);
208557772953STvrtko Ursulin  * 	};
208657772953STvrtko Ursulin  *
208757772953STvrtko Ursulin  * 	ctx_id = gem_context_create_ext(drm_fd, &create);
208857772953STvrtko Ursulin  *
208957772953STvrtko Ursulin  * 	// We have now created a GEM context with two engines in the map:
209057772953STvrtko Ursulin  * 	// Index 0 points to rcs0 while index 1 points to bcs0. Other engines
209157772953STvrtko Ursulin  * 	// will not be accessible from this context.
209257772953STvrtko Ursulin  *
209357772953STvrtko Ursulin  * 	...
209457772953STvrtko Ursulin  * 	execbuf.rsvd1 = ctx_id;
209557772953STvrtko Ursulin  * 	execbuf.flags = 0; // Submits to index 0, which is rcs0 for this context
209657772953STvrtko Ursulin  * 	gem_execbuf(drm_fd, &execbuf);
209757772953STvrtko Ursulin  *
209857772953STvrtko Ursulin  * 	...
209957772953STvrtko Ursulin  * 	execbuf.rsvd1 = ctx_id;
210057772953STvrtko Ursulin  * 	execbuf.flags = 1; // Submits to index 0, which is bcs0 for this context
210157772953STvrtko Ursulin  * 	gem_execbuf(drm_fd, &execbuf);
210257772953STvrtko Ursulin  */
210357772953STvrtko Ursulin 
2104976b55f0SChris Wilson struct i915_context_param_engines {
2105976b55f0SChris Wilson 	__u64 extensions; /* linked chain of extension blocks, 0 terminates */
21066d06779eSChris Wilson #define I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE 0 /* see i915_context_engines_load_balance */
2107ee113690SChris Wilson #define I915_CONTEXT_ENGINES_EXT_BOND 1 /* see i915_context_engines_bond */
2108976b55f0SChris Wilson 	struct i915_engine_class_instance engines[0];
2109976b55f0SChris Wilson } __attribute__((packed));
2110976b55f0SChris Wilson 
2111976b55f0SChris Wilson #define I915_DEFINE_CONTEXT_PARAM_ENGINES(name__, N__) struct { \
2112976b55f0SChris Wilson 	__u64 extensions; \
2113976b55f0SChris Wilson 	struct i915_engine_class_instance engines[N__]; \
2114976b55f0SChris Wilson } __attribute__((packed)) name__
2115976b55f0SChris Wilson 
2116b9171541SChris Wilson struct drm_i915_gem_context_create_ext_setparam {
2117b9171541SChris Wilson #define I915_CONTEXT_CREATE_EXT_SETPARAM 0
2118b9171541SChris Wilson 	struct i915_user_extension base;
2119b9171541SChris Wilson 	struct drm_i915_gem_context_param param;
2120b9171541SChris Wilson };
2121b9171541SChris Wilson 
21224a766ae4SJason Ekstrand /* This API has been removed.  On the off chance someone somewhere has
21234a766ae4SJason Ekstrand  * attempted to use it, never re-use this extension number.
21244a766ae4SJason Ekstrand  */
2125b81dde71SChris Wilson #define I915_CONTEXT_CREATE_EXT_CLONE 1
2126b81dde71SChris Wilson 
2127b9171541SChris Wilson struct drm_i915_gem_context_destroy {
2128b9171541SChris Wilson 	__u32 ctx_id;
2129b9171541SChris Wilson 	__u32 pad;
2130b9171541SChris Wilson };
2131b9171541SChris Wilson 
2132b9171541SChris Wilson /*
2133b9171541SChris Wilson  * DRM_I915_GEM_VM_CREATE -
2134b9171541SChris Wilson  *
2135b9171541SChris Wilson  * Create a new virtual memory address space (ppGTT) for use within a context
2136b9171541SChris Wilson  * on the same file. Extensions can be provided to configure exactly how the
2137b9171541SChris Wilson  * address space is setup upon creation.
2138b9171541SChris Wilson  *
2139b9171541SChris Wilson  * The id of new VM (bound to the fd) for use with I915_CONTEXT_PARAM_VM is
2140b9171541SChris Wilson  * returned in the outparam @id.
2141b9171541SChris Wilson  *
2142b9171541SChris Wilson  * No flags are defined, with all bits reserved and must be zero.
2143b9171541SChris Wilson  *
2144b9171541SChris Wilson  * An extension chain maybe provided, starting with @extensions, and terminated
2145b9171541SChris Wilson  * by the @next_extension being 0. Currently, no extensions are defined.
2146b9171541SChris Wilson  *
2147b9171541SChris Wilson  * DRM_I915_GEM_VM_DESTROY -
2148b9171541SChris Wilson  *
2149b9171541SChris Wilson  * Destroys a previously created VM id, specified in @id.
2150b9171541SChris Wilson  *
2151b9171541SChris Wilson  * No extensions or flags are allowed currently, and so must be zero.
2152b9171541SChris Wilson  */
2153b9171541SChris Wilson struct drm_i915_gem_vm_control {
2154b9171541SChris Wilson 	__u64 extensions;
2155b9171541SChris Wilson 	__u32 flags;
2156b9171541SChris Wilson 	__u32 vm_id;
2157b9171541SChris Wilson };
2158b9171541SChris Wilson 
2159b9171541SChris Wilson struct drm_i915_reg_read {
2160b9171541SChris Wilson 	/*
2161b9171541SChris Wilson 	 * Register offset.
2162b9171541SChris Wilson 	 * For 64bit wide registers where the upper 32bits don't immediately
2163b9171541SChris Wilson 	 * follow the lower 32bits, the offset of the lower 32bits must
2164b9171541SChris Wilson 	 * be specified
2165b9171541SChris Wilson 	 */
2166b9171541SChris Wilson 	__u64 offset;
2167b9171541SChris Wilson #define I915_REG_READ_8B_WA (1ul << 0)
2168b9171541SChris Wilson 
2169b9171541SChris Wilson 	__u64 val; /* Return value */
2170b9171541SChris Wilson };
2171b9171541SChris Wilson 
2172b9171541SChris Wilson /* Known registers:
2173b9171541SChris Wilson  *
2174b9171541SChris Wilson  * Render engine timestamp - 0x2358 + 64bit - gen7+
2175b9171541SChris Wilson  * - Note this register returns an invalid value if using the default
2176b9171541SChris Wilson  *   single instruction 8byte read, in order to workaround that pass
2177b9171541SChris Wilson  *   flag I915_REG_READ_8B_WA in offset field.
2178b9171541SChris Wilson  *
2179b9171541SChris Wilson  */
2180b9171541SChris Wilson 
2181b9171541SChris Wilson struct drm_i915_reset_stats {
2182b9171541SChris Wilson 	__u32 ctx_id;
2183b9171541SChris Wilson 	__u32 flags;
2184b9171541SChris Wilson 
2185b9171541SChris Wilson 	/* All resets since boot/module reload, for all contexts */
2186b9171541SChris Wilson 	__u32 reset_count;
2187b9171541SChris Wilson 
2188b9171541SChris Wilson 	/* Number of batches lost when active in GPU, for this context */
2189b9171541SChris Wilson 	__u32 batch_active;
2190b9171541SChris Wilson 
2191b9171541SChris Wilson 	/* Number of batches lost pending for execution, for this context */
2192b9171541SChris Wilson 	__u32 batch_pending;
2193b9171541SChris Wilson 
2194b9171541SChris Wilson 	__u32 pad;
2195b9171541SChris Wilson };
2196b9171541SChris Wilson 
2197aef7b67aSMatthew Auld /**
2198aef7b67aSMatthew Auld  * struct drm_i915_gem_userptr - Create GEM object from user allocated memory.
2199aef7b67aSMatthew Auld  *
2200aef7b67aSMatthew Auld  * Userptr objects have several restrictions on what ioctls can be used with the
2201aef7b67aSMatthew Auld  * object handle.
2202aef7b67aSMatthew Auld  */
2203b9171541SChris Wilson struct drm_i915_gem_userptr {
2204aef7b67aSMatthew Auld 	/**
2205aef7b67aSMatthew Auld 	 * @user_ptr: The pointer to the allocated memory.
2206aef7b67aSMatthew Auld 	 *
2207aef7b67aSMatthew Auld 	 * Needs to be aligned to PAGE_SIZE.
2208aef7b67aSMatthew Auld 	 */
2209b9171541SChris Wilson 	__u64 user_ptr;
2210aef7b67aSMatthew Auld 
2211aef7b67aSMatthew Auld 	/**
2212aef7b67aSMatthew Auld 	 * @user_size:
2213aef7b67aSMatthew Auld 	 *
2214aef7b67aSMatthew Auld 	 * The size in bytes for the allocated memory. This will also become the
2215aef7b67aSMatthew Auld 	 * object size.
2216aef7b67aSMatthew Auld 	 *
2217aef7b67aSMatthew Auld 	 * Needs to be aligned to PAGE_SIZE, and should be at least PAGE_SIZE,
2218aef7b67aSMatthew Auld 	 * or larger.
2219aef7b67aSMatthew Auld 	 */
2220b9171541SChris Wilson 	__u64 user_size;
2221aef7b67aSMatthew Auld 
2222aef7b67aSMatthew Auld 	/**
2223aef7b67aSMatthew Auld 	 * @flags:
2224aef7b67aSMatthew Auld 	 *
2225aef7b67aSMatthew Auld 	 * Supported flags:
2226aef7b67aSMatthew Auld 	 *
2227aef7b67aSMatthew Auld 	 * I915_USERPTR_READ_ONLY:
2228aef7b67aSMatthew Auld 	 *
2229aef7b67aSMatthew Auld 	 * Mark the object as readonly, this also means GPU access can only be
2230aef7b67aSMatthew Auld 	 * readonly. This is only supported on HW which supports readonly access
2231aef7b67aSMatthew Auld 	 * through the GTT. If the HW can't support readonly access, an error is
2232aef7b67aSMatthew Auld 	 * returned.
2233aef7b67aSMatthew Auld 	 *
2234aef7b67aSMatthew Auld 	 * I915_USERPTR_UNSYNCHRONIZED:
2235aef7b67aSMatthew Auld 	 *
2236aef7b67aSMatthew Auld 	 * NOT USED. Setting this flag will result in an error.
2237aef7b67aSMatthew Auld 	 */
2238b9171541SChris Wilson 	__u32 flags;
2239b9171541SChris Wilson #define I915_USERPTR_READ_ONLY 0x1
2240b9171541SChris Wilson #define I915_USERPTR_UNSYNCHRONIZED 0x80000000
2241b9171541SChris Wilson 	/**
2242aef7b67aSMatthew Auld 	 * @handle: Returned handle for the object.
2243b9171541SChris Wilson 	 *
2244b9171541SChris Wilson 	 * Object handles are nonzero.
2245b9171541SChris Wilson 	 */
2246b9171541SChris Wilson 	__u32 handle;
2247b9171541SChris Wilson };
2248b9171541SChris Wilson 
2249d7965152SRobert Bragg enum drm_i915_oa_format {
225019f81df2SRobert Bragg 	I915_OA_FORMAT_A13 = 1,	    /* HSW only */
225119f81df2SRobert Bragg 	I915_OA_FORMAT_A29,	    /* HSW only */
225219f81df2SRobert Bragg 	I915_OA_FORMAT_A13_B8_C8,   /* HSW only */
225319f81df2SRobert Bragg 	I915_OA_FORMAT_B4_C8,	    /* HSW only */
225419f81df2SRobert Bragg 	I915_OA_FORMAT_A45_B8_C8,   /* HSW only */
225519f81df2SRobert Bragg 	I915_OA_FORMAT_B4_C8_A16,   /* HSW only */
225619f81df2SRobert Bragg 	I915_OA_FORMAT_C4_B8,	    /* HSW+ */
225719f81df2SRobert Bragg 
225819f81df2SRobert Bragg 	/* Gen8+ */
225919f81df2SRobert Bragg 	I915_OA_FORMAT_A12,
226019f81df2SRobert Bragg 	I915_OA_FORMAT_A12_B8_C8,
226119f81df2SRobert Bragg 	I915_OA_FORMAT_A32u40_A4u32_B8_C8,
2262d7965152SRobert Bragg 
2263d7965152SRobert Bragg 	I915_OA_FORMAT_MAX	    /* non-ABI */
2264d7965152SRobert Bragg };
2265d7965152SRobert Bragg 
2266eec688e1SRobert Bragg enum drm_i915_perf_property_id {
2267eec688e1SRobert Bragg 	/**
2268eec688e1SRobert Bragg 	 * Open the stream for a specific context handle (as used with
2269eec688e1SRobert Bragg 	 * execbuffer2). A stream opened for a specific context this way
2270eec688e1SRobert Bragg 	 * won't typically require root privileges.
2271b8d49f28SLionel Landwerlin 	 *
2272b8d49f28SLionel Landwerlin 	 * This property is available in perf revision 1.
2273eec688e1SRobert Bragg 	 */
2274eec688e1SRobert Bragg 	DRM_I915_PERF_PROP_CTX_HANDLE = 1,
2275eec688e1SRobert Bragg 
2276d7965152SRobert Bragg 	/**
2277d7965152SRobert Bragg 	 * A value of 1 requests the inclusion of raw OA unit reports as
2278d7965152SRobert Bragg 	 * part of stream samples.
2279b8d49f28SLionel Landwerlin 	 *
2280b8d49f28SLionel Landwerlin 	 * This property is available in perf revision 1.
2281d7965152SRobert Bragg 	 */
2282d7965152SRobert Bragg 	DRM_I915_PERF_PROP_SAMPLE_OA,
2283d7965152SRobert Bragg 
2284d7965152SRobert Bragg 	/**
2285d7965152SRobert Bragg 	 * The value specifies which set of OA unit metrics should be
228666137f54SRandy Dunlap 	 * configured, defining the contents of any OA unit reports.
2287b8d49f28SLionel Landwerlin 	 *
2288b8d49f28SLionel Landwerlin 	 * This property is available in perf revision 1.
2289d7965152SRobert Bragg 	 */
2290d7965152SRobert Bragg 	DRM_I915_PERF_PROP_OA_METRICS_SET,
2291d7965152SRobert Bragg 
2292d7965152SRobert Bragg 	/**
2293d7965152SRobert Bragg 	 * The value specifies the size and layout of OA unit reports.
2294b8d49f28SLionel Landwerlin 	 *
2295b8d49f28SLionel Landwerlin 	 * This property is available in perf revision 1.
2296d7965152SRobert Bragg 	 */
2297d7965152SRobert Bragg 	DRM_I915_PERF_PROP_OA_FORMAT,
2298d7965152SRobert Bragg 
2299d7965152SRobert Bragg 	/**
2300d7965152SRobert Bragg 	 * Specifying this property implicitly requests periodic OA unit
2301d7965152SRobert Bragg 	 * sampling and (at least on Haswell) the sampling frequency is derived
2302d7965152SRobert Bragg 	 * from this exponent as follows:
2303d7965152SRobert Bragg 	 *
2304d7965152SRobert Bragg 	 *   80ns * 2^(period_exponent + 1)
2305b8d49f28SLionel Landwerlin 	 *
2306b8d49f28SLionel Landwerlin 	 * This property is available in perf revision 1.
2307d7965152SRobert Bragg 	 */
2308d7965152SRobert Bragg 	DRM_I915_PERF_PROP_OA_EXPONENT,
2309d7965152SRobert Bragg 
23109cd20ef7SLionel Landwerlin 	/**
23119cd20ef7SLionel Landwerlin 	 * Specifying this property is only valid when specify a context to
23129cd20ef7SLionel Landwerlin 	 * filter with DRM_I915_PERF_PROP_CTX_HANDLE. Specifying this property
23139cd20ef7SLionel Landwerlin 	 * will hold preemption of the particular context we want to gather
23149cd20ef7SLionel Landwerlin 	 * performance data about. The execbuf2 submissions must include a
23159cd20ef7SLionel Landwerlin 	 * drm_i915_gem_execbuffer_ext_perf parameter for this to apply.
23169cd20ef7SLionel Landwerlin 	 *
23179cd20ef7SLionel Landwerlin 	 * This property is available in perf revision 3.
23189cd20ef7SLionel Landwerlin 	 */
23199cd20ef7SLionel Landwerlin 	DRM_I915_PERF_PROP_HOLD_PREEMPTION,
23209cd20ef7SLionel Landwerlin 
232111ecbdddSLionel Landwerlin 	/**
232211ecbdddSLionel Landwerlin 	 * Specifying this pins all contexts to the specified SSEU power
232311ecbdddSLionel Landwerlin 	 * configuration for the duration of the recording.
232411ecbdddSLionel Landwerlin 	 *
232511ecbdddSLionel Landwerlin 	 * This parameter's value is a pointer to a struct
232611ecbdddSLionel Landwerlin 	 * drm_i915_gem_context_param_sseu.
232711ecbdddSLionel Landwerlin 	 *
232811ecbdddSLionel Landwerlin 	 * This property is available in perf revision 4.
232911ecbdddSLionel Landwerlin 	 */
233011ecbdddSLionel Landwerlin 	DRM_I915_PERF_PROP_GLOBAL_SSEU,
233111ecbdddSLionel Landwerlin 
23324ef10fe0SLionel Landwerlin 	/**
23334ef10fe0SLionel Landwerlin 	 * This optional parameter specifies the timer interval in nanoseconds
23344ef10fe0SLionel Landwerlin 	 * at which the i915 driver will check the OA buffer for available data.
23354ef10fe0SLionel Landwerlin 	 * Minimum allowed value is 100 microseconds. A default value is used by
23364ef10fe0SLionel Landwerlin 	 * the driver if this parameter is not specified. Note that larger timer
23374ef10fe0SLionel Landwerlin 	 * values will reduce cpu consumption during OA perf captures. However,
23384ef10fe0SLionel Landwerlin 	 * excessively large values would potentially result in OA buffer
23394ef10fe0SLionel Landwerlin 	 * overwrites as captures reach end of the OA buffer.
23404ef10fe0SLionel Landwerlin 	 *
23414ef10fe0SLionel Landwerlin 	 * This property is available in perf revision 5.
23424ef10fe0SLionel Landwerlin 	 */
23434ef10fe0SLionel Landwerlin 	DRM_I915_PERF_PROP_POLL_OA_PERIOD,
23444ef10fe0SLionel Landwerlin 
2345eec688e1SRobert Bragg 	DRM_I915_PERF_PROP_MAX /* non-ABI */
2346eec688e1SRobert Bragg };
2347eec688e1SRobert Bragg 
2348eec688e1SRobert Bragg struct drm_i915_perf_open_param {
2349eec688e1SRobert Bragg 	__u32 flags;
2350eec688e1SRobert Bragg #define I915_PERF_FLAG_FD_CLOEXEC	(1<<0)
2351eec688e1SRobert Bragg #define I915_PERF_FLAG_FD_NONBLOCK	(1<<1)
2352eec688e1SRobert Bragg #define I915_PERF_FLAG_DISABLED		(1<<2)
2353eec688e1SRobert Bragg 
2354eec688e1SRobert Bragg 	/** The number of u64 (id, value) pairs */
2355eec688e1SRobert Bragg 	__u32 num_properties;
2356eec688e1SRobert Bragg 
2357eec688e1SRobert Bragg 	/**
2358eec688e1SRobert Bragg 	 * Pointer to array of u64 (id, value) pairs configuring the stream
2359eec688e1SRobert Bragg 	 * to open.
2360eec688e1SRobert Bragg 	 */
2361cd8bddc4SChris Wilson 	__u64 properties_ptr;
2362eec688e1SRobert Bragg };
2363eec688e1SRobert Bragg 
23642ef6a01fSMatthew Auld /*
2365d7965152SRobert Bragg  * Enable data capture for a stream that was either opened in a disabled state
2366d7965152SRobert Bragg  * via I915_PERF_FLAG_DISABLED or was later disabled via
2367d7965152SRobert Bragg  * I915_PERF_IOCTL_DISABLE.
2368d7965152SRobert Bragg  *
2369d7965152SRobert Bragg  * It is intended to be cheaper to disable and enable a stream than it may be
2370d7965152SRobert Bragg  * to close and re-open a stream with the same configuration.
2371d7965152SRobert Bragg  *
2372d7965152SRobert Bragg  * It's undefined whether any pending data for the stream will be lost.
2373b8d49f28SLionel Landwerlin  *
2374b8d49f28SLionel Landwerlin  * This ioctl is available in perf revision 1.
2375d7965152SRobert Bragg  */
2376eec688e1SRobert Bragg #define I915_PERF_IOCTL_ENABLE	_IO('i', 0x0)
2377d7965152SRobert Bragg 
23782ef6a01fSMatthew Auld /*
2379d7965152SRobert Bragg  * Disable data capture for a stream.
2380d7965152SRobert Bragg  *
2381d7965152SRobert Bragg  * It is an error to try and read a stream that is disabled.
2382b8d49f28SLionel Landwerlin  *
2383b8d49f28SLionel Landwerlin  * This ioctl is available in perf revision 1.
2384d7965152SRobert Bragg  */
2385eec688e1SRobert Bragg #define I915_PERF_IOCTL_DISABLE	_IO('i', 0x1)
2386eec688e1SRobert Bragg 
23872ef6a01fSMatthew Auld /*
23887831e9a9SChris Wilson  * Change metrics_set captured by a stream.
23897831e9a9SChris Wilson  *
23907831e9a9SChris Wilson  * If the stream is bound to a specific context, the configuration change
23917831e9a9SChris Wilson  * will performed inline with that context such that it takes effect before
23927831e9a9SChris Wilson  * the next execbuf submission.
23937831e9a9SChris Wilson  *
23947831e9a9SChris Wilson  * Returns the previously bound metrics set id, or a negative error code.
23957831e9a9SChris Wilson  *
23967831e9a9SChris Wilson  * This ioctl is available in perf revision 2.
23977831e9a9SChris Wilson  */
23987831e9a9SChris Wilson #define I915_PERF_IOCTL_CONFIG	_IO('i', 0x2)
23997831e9a9SChris Wilson 
24002ef6a01fSMatthew Auld /*
2401eec688e1SRobert Bragg  * Common to all i915 perf records
2402eec688e1SRobert Bragg  */
2403eec688e1SRobert Bragg struct drm_i915_perf_record_header {
2404eec688e1SRobert Bragg 	__u32 type;
2405eec688e1SRobert Bragg 	__u16 pad;
2406eec688e1SRobert Bragg 	__u16 size;
2407eec688e1SRobert Bragg };
2408eec688e1SRobert Bragg 
2409eec688e1SRobert Bragg enum drm_i915_perf_record_type {
2410eec688e1SRobert Bragg 
2411eec688e1SRobert Bragg 	/**
2412eec688e1SRobert Bragg 	 * Samples are the work horse record type whose contents are extensible
2413eec688e1SRobert Bragg 	 * and defined when opening an i915 perf stream based on the given
2414eec688e1SRobert Bragg 	 * properties.
2415eec688e1SRobert Bragg 	 *
2416eec688e1SRobert Bragg 	 * Boolean properties following the naming convention
2417eec688e1SRobert Bragg 	 * DRM_I915_PERF_SAMPLE_xyz_PROP request the inclusion of 'xyz' data in
2418eec688e1SRobert Bragg 	 * every sample.
2419eec688e1SRobert Bragg 	 *
2420eec688e1SRobert Bragg 	 * The order of these sample properties given by userspace has no
2421d7965152SRobert Bragg 	 * affect on the ordering of data within a sample. The order is
2422eec688e1SRobert Bragg 	 * documented here.
2423eec688e1SRobert Bragg 	 *
2424eec688e1SRobert Bragg 	 * struct {
2425eec688e1SRobert Bragg 	 *     struct drm_i915_perf_record_header header;
2426eec688e1SRobert Bragg 	 *
2427d7965152SRobert Bragg 	 *     { u32 oa_report[]; } && DRM_I915_PERF_PROP_SAMPLE_OA
2428eec688e1SRobert Bragg 	 * };
2429eec688e1SRobert Bragg 	 */
2430eec688e1SRobert Bragg 	DRM_I915_PERF_RECORD_SAMPLE = 1,
2431eec688e1SRobert Bragg 
2432d7965152SRobert Bragg 	/*
2433d7965152SRobert Bragg 	 * Indicates that one or more OA reports were not written by the
2434d7965152SRobert Bragg 	 * hardware. This can happen for example if an MI_REPORT_PERF_COUNT
2435d7965152SRobert Bragg 	 * command collides with periodic sampling - which would be more likely
2436d7965152SRobert Bragg 	 * at higher sampling frequencies.
2437d7965152SRobert Bragg 	 */
2438d7965152SRobert Bragg 	DRM_I915_PERF_RECORD_OA_REPORT_LOST = 2,
2439d7965152SRobert Bragg 
2440d7965152SRobert Bragg 	/**
2441d7965152SRobert Bragg 	 * An error occurred that resulted in all pending OA reports being lost.
2442d7965152SRobert Bragg 	 */
2443d7965152SRobert Bragg 	DRM_I915_PERF_RECORD_OA_BUFFER_LOST = 3,
2444d7965152SRobert Bragg 
2445eec688e1SRobert Bragg 	DRM_I915_PERF_RECORD_MAX /* non-ABI */
2446eec688e1SRobert Bragg };
2447eec688e1SRobert Bragg 
24482ef6a01fSMatthew Auld /*
2449f89823c2SLionel Landwerlin  * Structure to upload perf dynamic configuration into the kernel.
2450f89823c2SLionel Landwerlin  */
2451f89823c2SLionel Landwerlin struct drm_i915_perf_oa_config {
2452f89823c2SLionel Landwerlin 	/** String formatted like "%08x-%04x-%04x-%04x-%012x" */
2453f89823c2SLionel Landwerlin 	char uuid[36];
2454f89823c2SLionel Landwerlin 
2455f89823c2SLionel Landwerlin 	__u32 n_mux_regs;
2456f89823c2SLionel Landwerlin 	__u32 n_boolean_regs;
2457f89823c2SLionel Landwerlin 	__u32 n_flex_regs;
2458f89823c2SLionel Landwerlin 
2459ee427e25SLionel Landwerlin 	/*
2460a446ae2cSLionel Landwerlin 	 * These fields are pointers to tuples of u32 values (register address,
2461a446ae2cSLionel Landwerlin 	 * value). For example the expected length of the buffer pointed by
2462a446ae2cSLionel Landwerlin 	 * mux_regs_ptr is (2 * sizeof(u32) * n_mux_regs).
2463ee427e25SLionel Landwerlin 	 */
246417ad4fddSChris Wilson 	__u64 mux_regs_ptr;
246517ad4fddSChris Wilson 	__u64 boolean_regs_ptr;
246617ad4fddSChris Wilson 	__u64 flex_regs_ptr;
2467f89823c2SLionel Landwerlin };
2468f89823c2SLionel Landwerlin 
2469e3bdccafSMatthew Auld /**
2470e3bdccafSMatthew Auld  * struct drm_i915_query_item - An individual query for the kernel to process.
2471e3bdccafSMatthew Auld  *
2472e3bdccafSMatthew Auld  * The behaviour is determined by the @query_id. Note that exactly what
2473e3bdccafSMatthew Auld  * @data_ptr is also depends on the specific @query_id.
2474e3bdccafSMatthew Auld  */
2475a446ae2cSLionel Landwerlin struct drm_i915_query_item {
2476e3bdccafSMatthew Auld 	/** @query_id: The id for this query */
2477a446ae2cSLionel Landwerlin 	__u64 query_id;
2478c822e059SLionel Landwerlin #define DRM_I915_QUERY_TOPOLOGY_INFO    1
2479c5d3e39cSTvrtko Ursulin #define DRM_I915_QUERY_ENGINE_INFO	2
24804f6ccc74SLionel Landwerlin #define DRM_I915_QUERY_PERF_CONFIG      3
248171021729SAbdiel Janulgue #define DRM_I915_QUERY_MEMORY_REGIONS   4
2482be03564bSChris Wilson /* Must be kept compact -- no holes and well documented */
2483a446ae2cSLionel Landwerlin 
2484e3bdccafSMatthew Auld 	/**
2485e3bdccafSMatthew Auld 	 * @length:
2486e3bdccafSMatthew Auld 	 *
2487a446ae2cSLionel Landwerlin 	 * When set to zero by userspace, this is filled with the size of the
2488e3bdccafSMatthew Auld 	 * data to be written at the @data_ptr pointer. The kernel sets this
2489a446ae2cSLionel Landwerlin 	 * value to a negative value to signal an error on a particular query
2490a446ae2cSLionel Landwerlin 	 * item.
2491a446ae2cSLionel Landwerlin 	 */
2492a446ae2cSLionel Landwerlin 	__s32 length;
2493a446ae2cSLionel Landwerlin 
2494e3bdccafSMatthew Auld 	/**
2495e3bdccafSMatthew Auld 	 * @flags:
2496e3bdccafSMatthew Auld 	 *
24974f6ccc74SLionel Landwerlin 	 * When query_id == DRM_I915_QUERY_TOPOLOGY_INFO, must be 0.
24984f6ccc74SLionel Landwerlin 	 *
24994f6ccc74SLionel Landwerlin 	 * When query_id == DRM_I915_QUERY_PERF_CONFIG, must be one of the
25004f6ccc74SLionel Landwerlin 	 * following:
2501e3bdccafSMatthew Auld 	 *
25024f6ccc74SLionel Landwerlin 	 *	- DRM_I915_QUERY_PERF_CONFIG_LIST
25034f6ccc74SLionel Landwerlin 	 *      - DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID
25044f6ccc74SLionel Landwerlin 	 *      - DRM_I915_QUERY_PERF_CONFIG_FOR_UUID
2505a446ae2cSLionel Landwerlin 	 */
2506a446ae2cSLionel Landwerlin 	__u32 flags;
25074f6ccc74SLionel Landwerlin #define DRM_I915_QUERY_PERF_CONFIG_LIST          1
25084f6ccc74SLionel Landwerlin #define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID 2
25094f6ccc74SLionel Landwerlin #define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID   3
2510a446ae2cSLionel Landwerlin 
2511e3bdccafSMatthew Auld 	/**
2512e3bdccafSMatthew Auld 	 * @data_ptr:
2513e3bdccafSMatthew Auld 	 *
2514e3bdccafSMatthew Auld 	 * Data will be written at the location pointed by @data_ptr when the
2515e3bdccafSMatthew Auld 	 * value of @length matches the length of the data to be written by the
2516a446ae2cSLionel Landwerlin 	 * kernel.
2517a446ae2cSLionel Landwerlin 	 */
2518a446ae2cSLionel Landwerlin 	__u64 data_ptr;
2519a446ae2cSLionel Landwerlin };
2520a446ae2cSLionel Landwerlin 
2521e3bdccafSMatthew Auld /**
2522e3bdccafSMatthew Auld  * struct drm_i915_query - Supply an array of struct drm_i915_query_item for the
2523e3bdccafSMatthew Auld  * kernel to fill out.
2524e3bdccafSMatthew Auld  *
2525e3bdccafSMatthew Auld  * Note that this is generally a two step process for each struct
2526e3bdccafSMatthew Auld  * drm_i915_query_item in the array:
2527e3bdccafSMatthew Auld  *
2528e3bdccafSMatthew Auld  * 1. Call the DRM_IOCTL_I915_QUERY, giving it our array of struct
2529e3bdccafSMatthew Auld  *    drm_i915_query_item, with &drm_i915_query_item.length set to zero. The
2530e3bdccafSMatthew Auld  *    kernel will then fill in the size, in bytes, which tells userspace how
2531e3bdccafSMatthew Auld  *    memory it needs to allocate for the blob(say for an array of properties).
2532e3bdccafSMatthew Auld  *
2533e3bdccafSMatthew Auld  * 2. Next we call DRM_IOCTL_I915_QUERY again, this time with the
2534e3bdccafSMatthew Auld  *    &drm_i915_query_item.data_ptr equal to our newly allocated blob. Note that
2535e3bdccafSMatthew Auld  *    the &drm_i915_query_item.length should still be the same as what the
2536e3bdccafSMatthew Auld  *    kernel previously set. At this point the kernel can fill in the blob.
2537e3bdccafSMatthew Auld  *
2538e3bdccafSMatthew Auld  * Note that for some query items it can make sense for userspace to just pass
2539e3bdccafSMatthew Auld  * in a buffer/blob equal to or larger than the required size. In this case only
2540e3bdccafSMatthew Auld  * a single ioctl call is needed. For some smaller query items this can work
2541e3bdccafSMatthew Auld  * quite well.
2542e3bdccafSMatthew Auld  *
2543e3bdccafSMatthew Auld  */
2544a446ae2cSLionel Landwerlin struct drm_i915_query {
2545e3bdccafSMatthew Auld 	/** @num_items: The number of elements in the @items_ptr array */
2546a446ae2cSLionel Landwerlin 	__u32 num_items;
2547a446ae2cSLionel Landwerlin 
2548e3bdccafSMatthew Auld 	/**
2549e3bdccafSMatthew Auld 	 * @flags: Unused for now. Must be cleared to zero.
2550a446ae2cSLionel Landwerlin 	 */
2551a446ae2cSLionel Landwerlin 	__u32 flags;
2552a446ae2cSLionel Landwerlin 
2553e3bdccafSMatthew Auld 	/**
2554e3bdccafSMatthew Auld 	 * @items_ptr:
2555e3bdccafSMatthew Auld 	 *
2556e3bdccafSMatthew Auld 	 * Pointer to an array of struct drm_i915_query_item. The number of
2557e3bdccafSMatthew Auld 	 * array elements is @num_items.
2558a446ae2cSLionel Landwerlin 	 */
2559a446ae2cSLionel Landwerlin 	__u64 items_ptr;
2560a446ae2cSLionel Landwerlin };
2561a446ae2cSLionel Landwerlin 
2562c822e059SLionel Landwerlin /*
2563c822e059SLionel Landwerlin  * Data written by the kernel with query DRM_I915_QUERY_TOPOLOGY_INFO :
2564c822e059SLionel Landwerlin  *
2565c822e059SLionel Landwerlin  * data: contains the 3 pieces of information :
2566c822e059SLionel Landwerlin  *
2567c822e059SLionel Landwerlin  * - the slice mask with one bit per slice telling whether a slice is
2568c822e059SLionel Landwerlin  *   available. The availability of slice X can be queried with the following
2569c822e059SLionel Landwerlin  *   formula :
2570c822e059SLionel Landwerlin  *
2571c822e059SLionel Landwerlin  *           (data[X / 8] >> (X % 8)) & 1
2572c822e059SLionel Landwerlin  *
2573c822e059SLionel Landwerlin  * - the subslice mask for each slice with one bit per subslice telling
2574601734f7SDaniele Ceraolo Spurio  *   whether a subslice is available. Gen12 has dual-subslices, which are
2575601734f7SDaniele Ceraolo Spurio  *   similar to two gen11 subslices. For gen12, this array represents dual-
2576601734f7SDaniele Ceraolo Spurio  *   subslices. The availability of subslice Y in slice X can be queried
2577601734f7SDaniele Ceraolo Spurio  *   with the following formula :
2578c822e059SLionel Landwerlin  *
2579c822e059SLionel Landwerlin  *           (data[subslice_offset +
2580c822e059SLionel Landwerlin  *                 X * subslice_stride +
2581c822e059SLionel Landwerlin  *                 Y / 8] >> (Y % 8)) & 1
2582c822e059SLionel Landwerlin  *
2583c822e059SLionel Landwerlin  * - the EU mask for each subslice in each slice with one bit per EU telling
2584c822e059SLionel Landwerlin  *   whether an EU is available. The availability of EU Z in subslice Y in
2585c822e059SLionel Landwerlin  *   slice X can be queried with the following formula :
2586c822e059SLionel Landwerlin  *
2587c822e059SLionel Landwerlin  *           (data[eu_offset +
2588c822e059SLionel Landwerlin  *                 (X * max_subslices + Y) * eu_stride +
2589c822e059SLionel Landwerlin  *                 Z / 8] >> (Z % 8)) & 1
2590c822e059SLionel Landwerlin  */
2591c822e059SLionel Landwerlin struct drm_i915_query_topology_info {
2592c822e059SLionel Landwerlin 	/*
2593c822e059SLionel Landwerlin 	 * Unused for now. Must be cleared to zero.
2594c822e059SLionel Landwerlin 	 */
2595c822e059SLionel Landwerlin 	__u16 flags;
2596c822e059SLionel Landwerlin 
2597c822e059SLionel Landwerlin 	__u16 max_slices;
2598c822e059SLionel Landwerlin 	__u16 max_subslices;
2599c822e059SLionel Landwerlin 	__u16 max_eus_per_subslice;
2600c822e059SLionel Landwerlin 
2601c822e059SLionel Landwerlin 	/*
2602c822e059SLionel Landwerlin 	 * Offset in data[] at which the subslice masks are stored.
2603c822e059SLionel Landwerlin 	 */
2604c822e059SLionel Landwerlin 	__u16 subslice_offset;
2605c822e059SLionel Landwerlin 
2606c822e059SLionel Landwerlin 	/*
2607c822e059SLionel Landwerlin 	 * Stride at which each of the subslice masks for each slice are
2608c822e059SLionel Landwerlin 	 * stored.
2609c822e059SLionel Landwerlin 	 */
2610c822e059SLionel Landwerlin 	__u16 subslice_stride;
2611c822e059SLionel Landwerlin 
2612c822e059SLionel Landwerlin 	/*
2613c822e059SLionel Landwerlin 	 * Offset in data[] at which the EU masks are stored.
2614c822e059SLionel Landwerlin 	 */
2615c822e059SLionel Landwerlin 	__u16 eu_offset;
2616c822e059SLionel Landwerlin 
2617c822e059SLionel Landwerlin 	/*
2618c822e059SLionel Landwerlin 	 * Stride at which each of the EU masks for each subslice are stored.
2619c822e059SLionel Landwerlin 	 */
2620c822e059SLionel Landwerlin 	__u16 eu_stride;
2621c822e059SLionel Landwerlin 
2622c822e059SLionel Landwerlin 	__u8 data[];
2623c822e059SLionel Landwerlin };
2624c822e059SLionel Landwerlin 
2625c5d3e39cSTvrtko Ursulin /**
262657772953STvrtko Ursulin  * DOC: Engine Discovery uAPI
262757772953STvrtko Ursulin  *
262857772953STvrtko Ursulin  * Engine discovery uAPI is a way of enumerating physical engines present in a
262957772953STvrtko Ursulin  * GPU associated with an open i915 DRM file descriptor. This supersedes the old
263057772953STvrtko Ursulin  * way of using `DRM_IOCTL_I915_GETPARAM` and engine identifiers like
263157772953STvrtko Ursulin  * `I915_PARAM_HAS_BLT`.
263257772953STvrtko Ursulin  *
263357772953STvrtko Ursulin  * The need for this interface came starting with Icelake and newer GPUs, which
263457772953STvrtko Ursulin  * started to establish a pattern of having multiple engines of a same class,
263557772953STvrtko Ursulin  * where not all instances were always completely functionally equivalent.
263657772953STvrtko Ursulin  *
263757772953STvrtko Ursulin  * Entry point for this uapi is `DRM_IOCTL_I915_QUERY` with the
263857772953STvrtko Ursulin  * `DRM_I915_QUERY_ENGINE_INFO` as the queried item id.
263957772953STvrtko Ursulin  *
264057772953STvrtko Ursulin  * Example for getting the list of engines:
264157772953STvrtko Ursulin  *
264257772953STvrtko Ursulin  * .. code-block:: C
264357772953STvrtko Ursulin  *
264457772953STvrtko Ursulin  * 	struct drm_i915_query_engine_info *info;
264557772953STvrtko Ursulin  * 	struct drm_i915_query_item item = {
264657772953STvrtko Ursulin  * 		.query_id = DRM_I915_QUERY_ENGINE_INFO;
264757772953STvrtko Ursulin  * 	};
264857772953STvrtko Ursulin  * 	struct drm_i915_query query = {
264957772953STvrtko Ursulin  * 		.num_items = 1,
265057772953STvrtko Ursulin  * 		.items_ptr = (uintptr_t)&item,
265157772953STvrtko Ursulin  * 	};
265257772953STvrtko Ursulin  * 	int err, i;
265357772953STvrtko Ursulin  *
265457772953STvrtko Ursulin  * 	// First query the size of the blob we need, this needs to be large
265557772953STvrtko Ursulin  * 	// enough to hold our array of engines. The kernel will fill out the
265657772953STvrtko Ursulin  * 	// item.length for us, which is the number of bytes we need.
265757772953STvrtko Ursulin  * 	//
265857772953STvrtko Ursulin  * 	// Alternatively a large buffer can be allocated straight away enabling
265957772953STvrtko Ursulin  * 	// querying in one pass, in which case item.length should contain the
266057772953STvrtko Ursulin  * 	// length of the provided buffer.
266157772953STvrtko Ursulin  * 	err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
266257772953STvrtko Ursulin  * 	if (err) ...
266357772953STvrtko Ursulin  *
266457772953STvrtko Ursulin  * 	info = calloc(1, item.length);
266557772953STvrtko Ursulin  * 	// Now that we allocated the required number of bytes, we call the ioctl
266657772953STvrtko Ursulin  * 	// again, this time with the data_ptr pointing to our newly allocated
266757772953STvrtko Ursulin  * 	// blob, which the kernel can then populate with info on all engines.
266857772953STvrtko Ursulin  * 	item.data_ptr = (uintptr_t)&info,
266957772953STvrtko Ursulin  *
267057772953STvrtko Ursulin  * 	err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
267157772953STvrtko Ursulin  * 	if (err) ...
267257772953STvrtko Ursulin  *
267357772953STvrtko Ursulin  * 	// We can now access each engine in the array
267457772953STvrtko Ursulin  * 	for (i = 0; i < info->num_engines; i++) {
267557772953STvrtko Ursulin  * 		struct drm_i915_engine_info einfo = info->engines[i];
267657772953STvrtko Ursulin  * 		u16 class = einfo.engine.class;
267757772953STvrtko Ursulin  * 		u16 instance = einfo.engine.instance;
267857772953STvrtko Ursulin  * 		....
267957772953STvrtko Ursulin  * 	}
268057772953STvrtko Ursulin  *
268157772953STvrtko Ursulin  * 	free(info);
268257772953STvrtko Ursulin  *
268357772953STvrtko Ursulin  * Each of the enumerated engines, apart from being defined by its class and
268457772953STvrtko Ursulin  * instance (see `struct i915_engine_class_instance`), also can have flags and
268557772953STvrtko Ursulin  * capabilities defined as documented in i915_drm.h.
268657772953STvrtko Ursulin  *
268757772953STvrtko Ursulin  * For instance video engines which support HEVC encoding will have the
268857772953STvrtko Ursulin  * `I915_VIDEO_CLASS_CAPABILITY_HEVC` capability bit set.
268957772953STvrtko Ursulin  *
269057772953STvrtko Ursulin  * Engine discovery only fully comes to its own when combined with the new way
269157772953STvrtko Ursulin  * of addressing engines when submitting batch buffers using contexts with
269257772953STvrtko Ursulin  * engine maps configured.
269357772953STvrtko Ursulin  */
269457772953STvrtko Ursulin 
269557772953STvrtko Ursulin /**
2696c5d3e39cSTvrtko Ursulin  * struct drm_i915_engine_info
2697c5d3e39cSTvrtko Ursulin  *
2698c5d3e39cSTvrtko Ursulin  * Describes one engine and it's capabilities as known to the driver.
2699c5d3e39cSTvrtko Ursulin  */
2700c5d3e39cSTvrtko Ursulin struct drm_i915_engine_info {
27012ef6a01fSMatthew Auld 	/** @engine: Engine class and instance. */
2702c5d3e39cSTvrtko Ursulin 	struct i915_engine_class_instance engine;
2703c5d3e39cSTvrtko Ursulin 
27042ef6a01fSMatthew Auld 	/** @rsvd0: Reserved field. */
2705c5d3e39cSTvrtko Ursulin 	__u32 rsvd0;
2706c5d3e39cSTvrtko Ursulin 
27072ef6a01fSMatthew Auld 	/** @flags: Engine flags. */
2708c5d3e39cSTvrtko Ursulin 	__u64 flags;
2709c5d3e39cSTvrtko Ursulin 
27102ef6a01fSMatthew Auld 	/** @capabilities: Capabilities of this engine. */
2711c5d3e39cSTvrtko Ursulin 	__u64 capabilities;
2712c5d3e39cSTvrtko Ursulin #define I915_VIDEO_CLASS_CAPABILITY_HEVC		(1 << 0)
2713c5d3e39cSTvrtko Ursulin #define I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC	(1 << 1)
2714c5d3e39cSTvrtko Ursulin 
27152ef6a01fSMatthew Auld 	/** @rsvd1: Reserved fields. */
2716c5d3e39cSTvrtko Ursulin 	__u64 rsvd1[4];
2717c5d3e39cSTvrtko Ursulin };
2718c5d3e39cSTvrtko Ursulin 
2719c5d3e39cSTvrtko Ursulin /**
2720c5d3e39cSTvrtko Ursulin  * struct drm_i915_query_engine_info
2721c5d3e39cSTvrtko Ursulin  *
2722c5d3e39cSTvrtko Ursulin  * Engine info query enumerates all engines known to the driver by filling in
2723c5d3e39cSTvrtko Ursulin  * an array of struct drm_i915_engine_info structures.
2724c5d3e39cSTvrtko Ursulin  */
2725c5d3e39cSTvrtko Ursulin struct drm_i915_query_engine_info {
27262ef6a01fSMatthew Auld 	/** @num_engines: Number of struct drm_i915_engine_info structs following. */
2727c5d3e39cSTvrtko Ursulin 	__u32 num_engines;
2728c5d3e39cSTvrtko Ursulin 
27292ef6a01fSMatthew Auld 	/** @rsvd: MBZ */
2730c5d3e39cSTvrtko Ursulin 	__u32 rsvd[3];
2731c5d3e39cSTvrtko Ursulin 
27322ef6a01fSMatthew Auld 	/** @engines: Marker for drm_i915_engine_info structures. */
2733c5d3e39cSTvrtko Ursulin 	struct drm_i915_engine_info engines[];
2734c5d3e39cSTvrtko Ursulin };
2735c5d3e39cSTvrtko Ursulin 
27364f6ccc74SLionel Landwerlin /*
27374f6ccc74SLionel Landwerlin  * Data written by the kernel with query DRM_I915_QUERY_PERF_CONFIG.
27384f6ccc74SLionel Landwerlin  */
27394f6ccc74SLionel Landwerlin struct drm_i915_query_perf_config {
27404f6ccc74SLionel Landwerlin 	union {
27414f6ccc74SLionel Landwerlin 		/*
27424f6ccc74SLionel Landwerlin 		 * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_LIST, i915 sets
27434f6ccc74SLionel Landwerlin 		 * this fields to the number of configurations available.
27444f6ccc74SLionel Landwerlin 		 */
27454f6ccc74SLionel Landwerlin 		__u64 n_configs;
27464f6ccc74SLionel Landwerlin 
27474f6ccc74SLionel Landwerlin 		/*
27484f6ccc74SLionel Landwerlin 		 * When query_id == DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID,
27494f6ccc74SLionel Landwerlin 		 * i915 will use the value in this field as configuration
27504f6ccc74SLionel Landwerlin 		 * identifier to decide what data to write into config_ptr.
27514f6ccc74SLionel Landwerlin 		 */
27524f6ccc74SLionel Landwerlin 		__u64 config;
27534f6ccc74SLionel Landwerlin 
27544f6ccc74SLionel Landwerlin 		/*
27554f6ccc74SLionel Landwerlin 		 * When query_id == DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID,
27564f6ccc74SLionel Landwerlin 		 * i915 will use the value in this field as configuration
27574f6ccc74SLionel Landwerlin 		 * identifier to decide what data to write into config_ptr.
27584f6ccc74SLionel Landwerlin 		 *
27594f6ccc74SLionel Landwerlin 		 * String formatted like "%08x-%04x-%04x-%04x-%012x"
27604f6ccc74SLionel Landwerlin 		 */
27614f6ccc74SLionel Landwerlin 		char uuid[36];
27624f6ccc74SLionel Landwerlin 	};
27634f6ccc74SLionel Landwerlin 
27644f6ccc74SLionel Landwerlin 	/*
27654f6ccc74SLionel Landwerlin 	 * Unused for now. Must be cleared to zero.
27664f6ccc74SLionel Landwerlin 	 */
27674f6ccc74SLionel Landwerlin 	__u32 flags;
27684f6ccc74SLionel Landwerlin 
27694f6ccc74SLionel Landwerlin 	/*
27704f6ccc74SLionel Landwerlin 	 * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_LIST, i915 will
27714f6ccc74SLionel Landwerlin 	 * write an array of __u64 of configuration identifiers.
27724f6ccc74SLionel Landwerlin 	 *
27734f6ccc74SLionel Landwerlin 	 * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_DATA, i915 will
27744f6ccc74SLionel Landwerlin 	 * write a struct drm_i915_perf_oa_config. If the following fields of
27754f6ccc74SLionel Landwerlin 	 * drm_i915_perf_oa_config are set not set to 0, i915 will write into
27764f6ccc74SLionel Landwerlin 	 * the associated pointers the values of submitted when the
27774f6ccc74SLionel Landwerlin 	 * configuration was created :
27784f6ccc74SLionel Landwerlin 	 *
27794f6ccc74SLionel Landwerlin 	 *         - n_mux_regs
27804f6ccc74SLionel Landwerlin 	 *         - n_boolean_regs
27814f6ccc74SLionel Landwerlin 	 *         - n_flex_regs
27824f6ccc74SLionel Landwerlin 	 */
27834f6ccc74SLionel Landwerlin 	__u8 data[];
27844f6ccc74SLionel Landwerlin };
27854f6ccc74SLionel Landwerlin 
278671021729SAbdiel Janulgue /**
278771021729SAbdiel Janulgue  * enum drm_i915_gem_memory_class - Supported memory classes
278871021729SAbdiel Janulgue  */
278971021729SAbdiel Janulgue enum drm_i915_gem_memory_class {
279071021729SAbdiel Janulgue 	/** @I915_MEMORY_CLASS_SYSTEM: System memory */
279171021729SAbdiel Janulgue 	I915_MEMORY_CLASS_SYSTEM = 0,
279271021729SAbdiel Janulgue 	/** @I915_MEMORY_CLASS_DEVICE: Device local-memory */
279371021729SAbdiel Janulgue 	I915_MEMORY_CLASS_DEVICE,
279471021729SAbdiel Janulgue };
279571021729SAbdiel Janulgue 
279671021729SAbdiel Janulgue /**
279771021729SAbdiel Janulgue  * struct drm_i915_gem_memory_class_instance - Identify particular memory region
279871021729SAbdiel Janulgue  */
279971021729SAbdiel Janulgue struct drm_i915_gem_memory_class_instance {
280071021729SAbdiel Janulgue 	/** @memory_class: See enum drm_i915_gem_memory_class */
280171021729SAbdiel Janulgue 	__u16 memory_class;
280271021729SAbdiel Janulgue 
280371021729SAbdiel Janulgue 	/** @memory_instance: Which instance */
280471021729SAbdiel Janulgue 	__u16 memory_instance;
280571021729SAbdiel Janulgue };
280671021729SAbdiel Janulgue 
280771021729SAbdiel Janulgue /**
280871021729SAbdiel Janulgue  * struct drm_i915_memory_region_info - Describes one region as known to the
280971021729SAbdiel Janulgue  * driver.
281071021729SAbdiel Janulgue  *
281171021729SAbdiel Janulgue  * Note that we reserve some stuff here for potential future work. As an example
281271021729SAbdiel Janulgue  * we might want expose the capabilities for a given region, which could include
281371021729SAbdiel Janulgue  * things like if the region is CPU mappable/accessible, what are the supported
281471021729SAbdiel Janulgue  * mapping types etc.
281571021729SAbdiel Janulgue  *
281671021729SAbdiel Janulgue  * Note that to extend struct drm_i915_memory_region_info and struct
281771021729SAbdiel Janulgue  * drm_i915_query_memory_regions in the future the plan is to do the following:
281871021729SAbdiel Janulgue  *
281971021729SAbdiel Janulgue  * .. code-block:: C
282071021729SAbdiel Janulgue  *
282171021729SAbdiel Janulgue  *	struct drm_i915_memory_region_info {
282271021729SAbdiel Janulgue  *		struct drm_i915_gem_memory_class_instance region;
282371021729SAbdiel Janulgue  *		union {
282471021729SAbdiel Janulgue  *			__u32 rsvd0;
282571021729SAbdiel Janulgue  *			__u32 new_thing1;
282671021729SAbdiel Janulgue  *		};
282771021729SAbdiel Janulgue  *		...
282871021729SAbdiel Janulgue  *		union {
282971021729SAbdiel Janulgue  *			__u64 rsvd1[8];
283071021729SAbdiel Janulgue  *			struct {
283171021729SAbdiel Janulgue  *				__u64 new_thing2;
283271021729SAbdiel Janulgue  *				__u64 new_thing3;
283371021729SAbdiel Janulgue  *				...
283471021729SAbdiel Janulgue  *			};
283571021729SAbdiel Janulgue  *		};
283671021729SAbdiel Janulgue  *	};
283771021729SAbdiel Janulgue  *
283871021729SAbdiel Janulgue  * With this things should remain source compatible between versions for
283971021729SAbdiel Janulgue  * userspace, even as we add new fields.
284071021729SAbdiel Janulgue  *
284171021729SAbdiel Janulgue  * Note this is using both struct drm_i915_query_item and struct drm_i915_query.
284271021729SAbdiel Janulgue  * For this new query we are adding the new query id DRM_I915_QUERY_MEMORY_REGIONS
284371021729SAbdiel Janulgue  * at &drm_i915_query_item.query_id.
284471021729SAbdiel Janulgue  */
284571021729SAbdiel Janulgue struct drm_i915_memory_region_info {
284671021729SAbdiel Janulgue 	/** @region: The class:instance pair encoding */
284771021729SAbdiel Janulgue 	struct drm_i915_gem_memory_class_instance region;
284871021729SAbdiel Janulgue 
284971021729SAbdiel Janulgue 	/** @rsvd0: MBZ */
285071021729SAbdiel Janulgue 	__u32 rsvd0;
285171021729SAbdiel Janulgue 
285271021729SAbdiel Janulgue 	/** @probed_size: Memory probed by the driver (-1 = unknown) */
285371021729SAbdiel Janulgue 	__u64 probed_size;
285471021729SAbdiel Janulgue 
285571021729SAbdiel Janulgue 	/** @unallocated_size: Estimate of memory remaining (-1 = unknown) */
285671021729SAbdiel Janulgue 	__u64 unallocated_size;
285771021729SAbdiel Janulgue 
285871021729SAbdiel Janulgue 	/** @rsvd1: MBZ */
285971021729SAbdiel Janulgue 	__u64 rsvd1[8];
286071021729SAbdiel Janulgue };
286171021729SAbdiel Janulgue 
286271021729SAbdiel Janulgue /**
286371021729SAbdiel Janulgue  * struct drm_i915_query_memory_regions
286471021729SAbdiel Janulgue  *
286571021729SAbdiel Janulgue  * The region info query enumerates all regions known to the driver by filling
286671021729SAbdiel Janulgue  * in an array of struct drm_i915_memory_region_info structures.
286771021729SAbdiel Janulgue  *
286871021729SAbdiel Janulgue  * Example for getting the list of supported regions:
286971021729SAbdiel Janulgue  *
287071021729SAbdiel Janulgue  * .. code-block:: C
287171021729SAbdiel Janulgue  *
287271021729SAbdiel Janulgue  *	struct drm_i915_query_memory_regions *info;
287371021729SAbdiel Janulgue  *	struct drm_i915_query_item item = {
287471021729SAbdiel Janulgue  *		.query_id = DRM_I915_QUERY_MEMORY_REGIONS;
287571021729SAbdiel Janulgue  *	};
287671021729SAbdiel Janulgue  *	struct drm_i915_query query = {
287771021729SAbdiel Janulgue  *		.num_items = 1,
287871021729SAbdiel Janulgue  *		.items_ptr = (uintptr_t)&item,
287971021729SAbdiel Janulgue  *	};
288071021729SAbdiel Janulgue  *	int err, i;
288171021729SAbdiel Janulgue  *
288271021729SAbdiel Janulgue  *	// First query the size of the blob we need, this needs to be large
288371021729SAbdiel Janulgue  *	// enough to hold our array of regions. The kernel will fill out the
288471021729SAbdiel Janulgue  *	// item.length for us, which is the number of bytes we need.
288571021729SAbdiel Janulgue  *	err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
288671021729SAbdiel Janulgue  *	if (err) ...
288771021729SAbdiel Janulgue  *
288871021729SAbdiel Janulgue  *	info = calloc(1, item.length);
288971021729SAbdiel Janulgue  *	// Now that we allocated the required number of bytes, we call the ioctl
289071021729SAbdiel Janulgue  *	// again, this time with the data_ptr pointing to our newly allocated
289171021729SAbdiel Janulgue  *	// blob, which the kernel can then populate with the all the region info.
289271021729SAbdiel Janulgue  *	item.data_ptr = (uintptr_t)&info,
289371021729SAbdiel Janulgue  *
289471021729SAbdiel Janulgue  *	err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
289571021729SAbdiel Janulgue  *	if (err) ...
289671021729SAbdiel Janulgue  *
289771021729SAbdiel Janulgue  *	// We can now access each region in the array
289871021729SAbdiel Janulgue  *	for (i = 0; i < info->num_regions; i++) {
289971021729SAbdiel Janulgue  *		struct drm_i915_memory_region_info mr = info->regions[i];
290071021729SAbdiel Janulgue  *		u16 class = mr.region.class;
290171021729SAbdiel Janulgue  *		u16 instance = mr.region.instance;
290271021729SAbdiel Janulgue  *
290371021729SAbdiel Janulgue  *		....
290471021729SAbdiel Janulgue  *	}
290571021729SAbdiel Janulgue  *
290671021729SAbdiel Janulgue  *	free(info);
290771021729SAbdiel Janulgue  */
290871021729SAbdiel Janulgue struct drm_i915_query_memory_regions {
290971021729SAbdiel Janulgue 	/** @num_regions: Number of supported regions */
291071021729SAbdiel Janulgue 	__u32 num_regions;
291171021729SAbdiel Janulgue 
291271021729SAbdiel Janulgue 	/** @rsvd: MBZ */
291371021729SAbdiel Janulgue 	__u32 rsvd[3];
291471021729SAbdiel Janulgue 
291571021729SAbdiel Janulgue 	/** @regions: Info about each supported region */
291671021729SAbdiel Janulgue 	struct drm_i915_memory_region_info regions[];
291771021729SAbdiel Janulgue };
291871021729SAbdiel Janulgue 
2919ebcb4029SMatthew Auld /**
2920ebcb4029SMatthew Auld  * struct drm_i915_gem_create_ext - Existing gem_create behaviour, with added
2921ebcb4029SMatthew Auld  * extension support using struct i915_user_extension.
2922ebcb4029SMatthew Auld  *
2923ebcb4029SMatthew Auld  * Note that in the future we want to have our buffer flags here, at least for
2924ebcb4029SMatthew Auld  * the stuff that is immutable. Previously we would have two ioctls, one to
2925ebcb4029SMatthew Auld  * create the object with gem_create, and another to apply various parameters,
2926ebcb4029SMatthew Auld  * however this creates some ambiguity for the params which are considered
2927ebcb4029SMatthew Auld  * immutable. Also in general we're phasing out the various SET/GET ioctls.
2928ebcb4029SMatthew Auld  */
2929ebcb4029SMatthew Auld struct drm_i915_gem_create_ext {
2930ebcb4029SMatthew Auld 	/**
2931ebcb4029SMatthew Auld 	 * @size: Requested size for the object.
2932ebcb4029SMatthew Auld 	 *
2933ebcb4029SMatthew Auld 	 * The (page-aligned) allocated size for the object will be returned.
2934ebcb4029SMatthew Auld 	 *
29352459e56fSMatthew Auld 	 * Note that for some devices we have might have further minimum
29362459e56fSMatthew Auld 	 * page-size restrictions(larger than 4K), like for device local-memory.
29372459e56fSMatthew Auld 	 * However in general the final size here should always reflect any
29382459e56fSMatthew Auld 	 * rounding up, if for example using the I915_GEM_CREATE_EXT_MEMORY_REGIONS
29392459e56fSMatthew Auld 	 * extension to place the object in device local-memory.
2940ebcb4029SMatthew Auld 	 */
2941ebcb4029SMatthew Auld 	__u64 size;
2942ebcb4029SMatthew Auld 	/**
2943ebcb4029SMatthew Auld 	 * @handle: Returned handle for the object.
2944ebcb4029SMatthew Auld 	 *
2945ebcb4029SMatthew Auld 	 * Object handles are nonzero.
2946ebcb4029SMatthew Auld 	 */
2947ebcb4029SMatthew Auld 	__u32 handle;
2948ebcb4029SMatthew Auld 	/** @flags: MBZ */
2949ebcb4029SMatthew Auld 	__u32 flags;
2950ebcb4029SMatthew Auld 	/**
2951ebcb4029SMatthew Auld 	 * @extensions: The chain of extensions to apply to this object.
2952ebcb4029SMatthew Auld 	 *
2953ebcb4029SMatthew Auld 	 * This will be useful in the future when we need to support several
2954ebcb4029SMatthew Auld 	 * different extensions, and we need to apply more than one when
2955ebcb4029SMatthew Auld 	 * creating the object. See struct i915_user_extension.
2956ebcb4029SMatthew Auld 	 *
2957ebcb4029SMatthew Auld 	 * If we don't supply any extensions then we get the same old gem_create
2958ebcb4029SMatthew Auld 	 * behaviour.
2959ebcb4029SMatthew Auld 	 *
29602459e56fSMatthew Auld 	 * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see
29612459e56fSMatthew Auld 	 * struct drm_i915_gem_create_ext_memory_regions.
2962ebcb4029SMatthew Auld 	 */
29632459e56fSMatthew Auld #define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
2964ebcb4029SMatthew Auld 	__u64 extensions;
2965ebcb4029SMatthew Auld };
2966ebcb4029SMatthew Auld 
29672459e56fSMatthew Auld /**
29682459e56fSMatthew Auld  * struct drm_i915_gem_create_ext_memory_regions - The
29692459e56fSMatthew Auld  * I915_GEM_CREATE_EXT_MEMORY_REGIONS extension.
29702459e56fSMatthew Auld  *
29712459e56fSMatthew Auld  * Set the object with the desired set of placements/regions in priority
29722459e56fSMatthew Auld  * order. Each entry must be unique and supported by the device.
29732459e56fSMatthew Auld  *
29742459e56fSMatthew Auld  * This is provided as an array of struct drm_i915_gem_memory_class_instance, or
29752459e56fSMatthew Auld  * an equivalent layout of class:instance pair encodings. See struct
29762459e56fSMatthew Auld  * drm_i915_query_memory_regions and DRM_I915_QUERY_MEMORY_REGIONS for how to
29772459e56fSMatthew Auld  * query the supported regions for a device.
29782459e56fSMatthew Auld  *
29792459e56fSMatthew Auld  * As an example, on discrete devices, if we wish to set the placement as
29802459e56fSMatthew Auld  * device local-memory we can do something like:
29812459e56fSMatthew Auld  *
29822459e56fSMatthew Auld  * .. code-block:: C
29832459e56fSMatthew Auld  *
29842459e56fSMatthew Auld  *	struct drm_i915_gem_memory_class_instance region_lmem = {
29852459e56fSMatthew Auld  *              .memory_class = I915_MEMORY_CLASS_DEVICE,
29862459e56fSMatthew Auld  *              .memory_instance = 0,
29872459e56fSMatthew Auld  *      };
29882459e56fSMatthew Auld  *      struct drm_i915_gem_create_ext_memory_regions regions = {
29892459e56fSMatthew Auld  *              .base = { .name = I915_GEM_CREATE_EXT_MEMORY_REGIONS },
29902459e56fSMatthew Auld  *              .regions = (uintptr_t)&region_lmem,
29912459e56fSMatthew Auld  *              .num_regions = 1,
29922459e56fSMatthew Auld  *      };
29932459e56fSMatthew Auld  *      struct drm_i915_gem_create_ext create_ext = {
29942459e56fSMatthew Auld  *              .size = 16 * PAGE_SIZE,
29952459e56fSMatthew Auld  *              .extensions = (uintptr_t)&regions,
29962459e56fSMatthew Auld  *      };
29972459e56fSMatthew Auld  *
29982459e56fSMatthew Auld  *      int err = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE_EXT, &create_ext);
29992459e56fSMatthew Auld  *      if (err) ...
30002459e56fSMatthew Auld  *
30012459e56fSMatthew Auld  * At which point we get the object handle in &drm_i915_gem_create_ext.handle,
30022459e56fSMatthew Auld  * along with the final object size in &drm_i915_gem_create_ext.size, which
30032459e56fSMatthew Auld  * should account for any rounding up, if required.
30042459e56fSMatthew Auld  */
30052459e56fSMatthew Auld struct drm_i915_gem_create_ext_memory_regions {
30062459e56fSMatthew Auld 	/** @base: Extension link. See struct i915_user_extension. */
30072459e56fSMatthew Auld 	struct i915_user_extension base;
30082459e56fSMatthew Auld 
30092459e56fSMatthew Auld 	/** @pad: MBZ */
30102459e56fSMatthew Auld 	__u32 pad;
30112459e56fSMatthew Auld 	/** @num_regions: Number of elements in the @regions array. */
30122459e56fSMatthew Auld 	__u32 num_regions;
30132459e56fSMatthew Auld 	/**
30142459e56fSMatthew Auld 	 * @regions: The regions/placements array.
30152459e56fSMatthew Auld 	 *
30162459e56fSMatthew Auld 	 * An array of struct drm_i915_gem_memory_class_instance.
30172459e56fSMatthew Auld 	 */
30182459e56fSMatthew Auld 	__u64 regions;
30192459e56fSMatthew Auld };
30202459e56fSMatthew Auld 
3021b1c1f5c4SEmil Velikov #if defined(__cplusplus)
3022b1c1f5c4SEmil Velikov }
3023b1c1f5c4SEmil Velikov #endif
3024b1c1f5c4SEmil Velikov 
3025718dceddSDavid Howells #endif /* _UAPI_I915_DRM_H_ */
3026