xref: /openbmc/linux/include/uapi/drm/i915_drm.h (revision e3bdccaf)
1718dceddSDavid Howells /*
2718dceddSDavid Howells  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
3718dceddSDavid Howells  * All Rights Reserved.
4718dceddSDavid Howells  *
5718dceddSDavid Howells  * Permission is hereby granted, free of charge, to any person obtaining a
6718dceddSDavid Howells  * copy of this software and associated documentation files (the
7718dceddSDavid Howells  * "Software"), to deal in the Software without restriction, including
8718dceddSDavid Howells  * without limitation the rights to use, copy, modify, merge, publish,
9718dceddSDavid Howells  * distribute, sub license, and/or sell copies of the Software, and to
10718dceddSDavid Howells  * permit persons to whom the Software is furnished to do so, subject to
11718dceddSDavid Howells  * the following conditions:
12718dceddSDavid Howells  *
13718dceddSDavid Howells  * The above copyright notice and this permission notice (including the
14718dceddSDavid Howells  * next paragraph) shall be included in all copies or substantial portions
15718dceddSDavid Howells  * of the Software.
16718dceddSDavid Howells  *
17718dceddSDavid Howells  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18718dceddSDavid Howells  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19718dceddSDavid Howells  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
20718dceddSDavid Howells  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
21718dceddSDavid Howells  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22718dceddSDavid Howells  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23718dceddSDavid Howells  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24718dceddSDavid Howells  *
25718dceddSDavid Howells  */
26718dceddSDavid Howells 
27718dceddSDavid Howells #ifndef _UAPI_I915_DRM_H_
28718dceddSDavid Howells #define _UAPI_I915_DRM_H_
29718dceddSDavid Howells 
301049102fSGabriel Laskar #include "drm.h"
31718dceddSDavid Howells 
32b1c1f5c4SEmil Velikov #if defined(__cplusplus)
33b1c1f5c4SEmil Velikov extern "C" {
34b1c1f5c4SEmil Velikov #endif
35b1c1f5c4SEmil Velikov 
36718dceddSDavid Howells /* Please note that modifications to all structs defined here are
37718dceddSDavid Howells  * subject to backwards-compatibility constraints.
38718dceddSDavid Howells  */
39718dceddSDavid Howells 
40cce723edSBen Widawsky /**
41cce723edSBen Widawsky  * DOC: uevents generated by i915 on it's device node
42cce723edSBen Widawsky  *
43cce723edSBen Widawsky  * I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch
44cce723edSBen Widawsky  *	event from the gpu l3 cache. Additional information supplied is ROW,
4535a85ac6SBen Widawsky  *	BANK, SUBBANK, SLICE of the affected cacheline. Userspace should keep
4635a85ac6SBen Widawsky  *	track of these events and if a specific cache-line seems to have a
4735a85ac6SBen Widawsky  *	persistent error remap it with the l3 remapping tool supplied in
4835a85ac6SBen Widawsky  *	intel-gpu-tools.  The value supplied with the event is always 1.
49cce723edSBen Widawsky  *
50cce723edSBen Widawsky  * I915_ERROR_UEVENT - Generated upon error detection, currently only via
51cce723edSBen Widawsky  *	hangcheck. The error detection event is a good indicator of when things
52cce723edSBen Widawsky  *	began to go badly. The value supplied with the event is a 1 upon error
53cce723edSBen Widawsky  *	detection, and a 0 upon reset completion, signifying no more error
54cce723edSBen Widawsky  *	exists. NOTE: Disabling hangcheck or reset via module parameter will
55cce723edSBen Widawsky  *	cause the related events to not be seen.
56cce723edSBen Widawsky  *
57cce723edSBen Widawsky  * I915_RESET_UEVENT - Event is generated just before an attempt to reset the
5866137f54SRandy Dunlap  *	GPU. The value supplied with the event is always 1. NOTE: Disable
59cce723edSBen Widawsky  *	reset via module parameter will cause this event to not be seen.
60cce723edSBen Widawsky  */
61cce723edSBen Widawsky #define I915_L3_PARITY_UEVENT		"L3_PARITY_ERROR"
62cce723edSBen Widawsky #define I915_ERROR_UEVENT		"ERROR"
63cce723edSBen Widawsky #define I915_RESET_UEVENT		"RESET"
64718dceddSDavid Howells 
6519d053d4SMatthew Auld /**
6619d053d4SMatthew Auld  * struct i915_user_extension - Base class for defining a chain of extensions
679d1305efSChris Wilson  *
689d1305efSChris Wilson  * Many interfaces need to grow over time. In most cases we can simply
699d1305efSChris Wilson  * extend the struct and have userspace pass in more data. Another option,
709d1305efSChris Wilson  * as demonstrated by Vulkan's approach to providing extensions for forward
719d1305efSChris Wilson  * and backward compatibility, is to use a list of optional structs to
729d1305efSChris Wilson  * provide those extra details.
739d1305efSChris Wilson  *
749d1305efSChris Wilson  * The key advantage to using an extension chain is that it allows us to
759d1305efSChris Wilson  * redefine the interface more easily than an ever growing struct of
769d1305efSChris Wilson  * increasing complexity, and for large parts of that interface to be
779d1305efSChris Wilson  * entirely optional. The downside is more pointer chasing; chasing across
789d1305efSChris Wilson  * the __user boundary with pointers encapsulated inside u64.
7919d053d4SMatthew Auld  *
8019d053d4SMatthew Auld  * Example chaining:
8119d053d4SMatthew Auld  *
8219d053d4SMatthew Auld  * .. code-block:: C
8319d053d4SMatthew Auld  *
8419d053d4SMatthew Auld  *	struct i915_user_extension ext3 {
8519d053d4SMatthew Auld  *		.next_extension = 0, // end
8619d053d4SMatthew Auld  *		.name = ...,
8719d053d4SMatthew Auld  *	};
8819d053d4SMatthew Auld  *	struct i915_user_extension ext2 {
8919d053d4SMatthew Auld  *		.next_extension = (uintptr_t)&ext3,
9019d053d4SMatthew Auld  *		.name = ...,
9119d053d4SMatthew Auld  *	};
9219d053d4SMatthew Auld  *	struct i915_user_extension ext1 {
9319d053d4SMatthew Auld  *		.next_extension = (uintptr_t)&ext2,
9419d053d4SMatthew Auld  *		.name = ...,
9519d053d4SMatthew Auld  *	};
9619d053d4SMatthew Auld  *
9719d053d4SMatthew Auld  * Typically the struct i915_user_extension would be embedded in some uAPI
9819d053d4SMatthew Auld  * struct, and in this case we would feed it the head of the chain(i.e ext1),
9919d053d4SMatthew Auld  * which would then apply all of the above extensions.
10019d053d4SMatthew Auld  *
1019d1305efSChris Wilson  */
1029d1305efSChris Wilson struct i915_user_extension {
10319d053d4SMatthew Auld 	/**
10419d053d4SMatthew Auld 	 * @next_extension:
10519d053d4SMatthew Auld 	 *
10619d053d4SMatthew Auld 	 * Pointer to the next struct i915_user_extension, or zero if the end.
10719d053d4SMatthew Auld 	 */
1089d1305efSChris Wilson 	__u64 next_extension;
10919d053d4SMatthew Auld 	/**
11019d053d4SMatthew Auld 	 * @name: Name of the extension.
11119d053d4SMatthew Auld 	 *
11219d053d4SMatthew Auld 	 * Note that the name here is just some integer.
11319d053d4SMatthew Auld 	 *
11419d053d4SMatthew Auld 	 * Also note that the name space for this is not global for the whole
11519d053d4SMatthew Auld 	 * driver, but rather its scope/meaning is limited to the specific piece
11619d053d4SMatthew Auld 	 * of uAPI which has embedded the struct i915_user_extension.
11719d053d4SMatthew Auld 	 */
1189d1305efSChris Wilson 	__u32 name;
11919d053d4SMatthew Auld 	/**
12019d053d4SMatthew Auld 	 * @flags: MBZ
12119d053d4SMatthew Auld 	 *
12219d053d4SMatthew Auld 	 * All undefined bits must be zero.
12319d053d4SMatthew Auld 	 */
12419d053d4SMatthew Auld 	__u32 flags;
12519d053d4SMatthew Auld 	/**
12619d053d4SMatthew Auld 	 * @rsvd: MBZ
12719d053d4SMatthew Auld 	 *
12819d053d4SMatthew Auld 	 * Reserved for future use; must be zero.
12919d053d4SMatthew Auld 	 */
13019d053d4SMatthew Auld 	__u32 rsvd[4];
1319d1305efSChris Wilson };
1329d1305efSChris Wilson 
1339d1305efSChris Wilson /*
1343373ce2eSImre Deak  * MOCS indexes used for GPU surfaces, defining the cacheability of the
1353373ce2eSImre Deak  * surface data and the coherency for this data wrt. CPU vs. GPU accesses.
1363373ce2eSImre Deak  */
1373373ce2eSImre Deak enum i915_mocs_table_index {
1383373ce2eSImre Deak 	/*
1393373ce2eSImre Deak 	 * Not cached anywhere, coherency between CPU and GPU accesses is
1403373ce2eSImre Deak 	 * guaranteed.
1413373ce2eSImre Deak 	 */
1423373ce2eSImre Deak 	I915_MOCS_UNCACHED,
1433373ce2eSImre Deak 	/*
1443373ce2eSImre Deak 	 * Cacheability and coherency controlled by the kernel automatically
1453373ce2eSImre Deak 	 * based on the DRM_I915_GEM_SET_CACHING IOCTL setting and the current
1463373ce2eSImre Deak 	 * usage of the surface (used for display scanout or not).
1473373ce2eSImre Deak 	 */
1483373ce2eSImre Deak 	I915_MOCS_PTE,
1493373ce2eSImre Deak 	/*
1503373ce2eSImre Deak 	 * Cached in all GPU caches available on the platform.
1513373ce2eSImre Deak 	 * Coherency between CPU and GPU accesses to the surface is not
1523373ce2eSImre Deak 	 * guaranteed without extra synchronization.
1533373ce2eSImre Deak 	 */
1543373ce2eSImre Deak 	I915_MOCS_CACHED,
1553373ce2eSImre Deak };
1563373ce2eSImre Deak 
1571803fcbcSTvrtko Ursulin /*
1581803fcbcSTvrtko Ursulin  * Different engines serve different roles, and there may be more than one
1591803fcbcSTvrtko Ursulin  * engine serving each role. enum drm_i915_gem_engine_class provides a
1601803fcbcSTvrtko Ursulin  * classification of the role of the engine, which may be used when requesting
1611803fcbcSTvrtko Ursulin  * operations to be performed on a certain subset of engines, or for providing
1621803fcbcSTvrtko Ursulin  * information about that group.
1631803fcbcSTvrtko Ursulin  */
1641803fcbcSTvrtko Ursulin enum drm_i915_gem_engine_class {
1651803fcbcSTvrtko Ursulin 	I915_ENGINE_CLASS_RENDER	= 0,
1661803fcbcSTvrtko Ursulin 	I915_ENGINE_CLASS_COPY		= 1,
1671803fcbcSTvrtko Ursulin 	I915_ENGINE_CLASS_VIDEO		= 2,
1681803fcbcSTvrtko Ursulin 	I915_ENGINE_CLASS_VIDEO_ENHANCE	= 3,
1691803fcbcSTvrtko Ursulin 
170be03564bSChris Wilson 	/* should be kept compact */
171be03564bSChris Wilson 
1721803fcbcSTvrtko Ursulin 	I915_ENGINE_CLASS_INVALID	= -1
1731803fcbcSTvrtko Ursulin };
1741803fcbcSTvrtko Ursulin 
175d1172ab3SChris Wilson /*
176d1172ab3SChris Wilson  * There may be more than one engine fulfilling any role within the system.
177d1172ab3SChris Wilson  * Each engine of a class is given a unique instance number and therefore
178d1172ab3SChris Wilson  * any engine can be specified by its class:instance tuplet. APIs that allow
179d1172ab3SChris Wilson  * access to any engine in the system will use struct i915_engine_class_instance
180d1172ab3SChris Wilson  * for this identification.
181d1172ab3SChris Wilson  */
182d1172ab3SChris Wilson struct i915_engine_class_instance {
183d1172ab3SChris Wilson 	__u16 engine_class; /* see enum drm_i915_gem_engine_class */
184d1172ab3SChris Wilson 	__u16 engine_instance;
185976b55f0SChris Wilson #define I915_ENGINE_CLASS_INVALID_NONE -1
1866d06779eSChris Wilson #define I915_ENGINE_CLASS_INVALID_VIRTUAL -2
187d1172ab3SChris Wilson };
188d1172ab3SChris Wilson 
189b46a33e2STvrtko Ursulin /**
190b46a33e2STvrtko Ursulin  * DOC: perf_events exposed by i915 through /sys/bus/event_sources/drivers/i915
191b46a33e2STvrtko Ursulin  *
192b46a33e2STvrtko Ursulin  */
193b46a33e2STvrtko Ursulin 
194b46a33e2STvrtko Ursulin enum drm_i915_pmu_engine_sample {
195b46a33e2STvrtko Ursulin 	I915_SAMPLE_BUSY = 0,
196b46a33e2STvrtko Ursulin 	I915_SAMPLE_WAIT = 1,
197b552ae44STvrtko Ursulin 	I915_SAMPLE_SEMA = 2
198b46a33e2STvrtko Ursulin };
199b46a33e2STvrtko Ursulin 
200b46a33e2STvrtko Ursulin #define I915_PMU_SAMPLE_BITS (4)
201b46a33e2STvrtko Ursulin #define I915_PMU_SAMPLE_MASK (0xf)
202b46a33e2STvrtko Ursulin #define I915_PMU_SAMPLE_INSTANCE_BITS (8)
203b46a33e2STvrtko Ursulin #define I915_PMU_CLASS_SHIFT \
204b46a33e2STvrtko Ursulin 	(I915_PMU_SAMPLE_BITS + I915_PMU_SAMPLE_INSTANCE_BITS)
205b46a33e2STvrtko Ursulin 
206b46a33e2STvrtko Ursulin #define __I915_PMU_ENGINE(class, instance, sample) \
207b46a33e2STvrtko Ursulin 	((class) << I915_PMU_CLASS_SHIFT | \
208b46a33e2STvrtko Ursulin 	(instance) << I915_PMU_SAMPLE_BITS | \
209b46a33e2STvrtko Ursulin 	(sample))
210b46a33e2STvrtko Ursulin 
211b46a33e2STvrtko Ursulin #define I915_PMU_ENGINE_BUSY(class, instance) \
212b46a33e2STvrtko Ursulin 	__I915_PMU_ENGINE(class, instance, I915_SAMPLE_BUSY)
213b46a33e2STvrtko Ursulin 
214b46a33e2STvrtko Ursulin #define I915_PMU_ENGINE_WAIT(class, instance) \
215b46a33e2STvrtko Ursulin 	__I915_PMU_ENGINE(class, instance, I915_SAMPLE_WAIT)
216b46a33e2STvrtko Ursulin 
217b46a33e2STvrtko Ursulin #define I915_PMU_ENGINE_SEMA(class, instance) \
218b46a33e2STvrtko Ursulin 	__I915_PMU_ENGINE(class, instance, I915_SAMPLE_SEMA)
219b46a33e2STvrtko Ursulin 
220b46a33e2STvrtko Ursulin #define __I915_PMU_OTHER(x) (__I915_PMU_ENGINE(0xff, 0xff, 0xf) + 1 + (x))
221b46a33e2STvrtko Ursulin 
222b46a33e2STvrtko Ursulin #define I915_PMU_ACTUAL_FREQUENCY	__I915_PMU_OTHER(0)
223b46a33e2STvrtko Ursulin #define I915_PMU_REQUESTED_FREQUENCY	__I915_PMU_OTHER(1)
2240cd4684dSTvrtko Ursulin #define I915_PMU_INTERRUPTS		__I915_PMU_OTHER(2)
2256060b6aeSTvrtko Ursulin #define I915_PMU_RC6_RESIDENCY		__I915_PMU_OTHER(3)
2268c3b1ba0SChris Wilson #define I915_PMU_SOFTWARE_GT_AWAKE_TIME	__I915_PMU_OTHER(4)
2276060b6aeSTvrtko Ursulin 
228348fb0cbSTvrtko Ursulin #define I915_PMU_LAST /* Deprecated - do not use */ I915_PMU_RC6_RESIDENCY
229b46a33e2STvrtko Ursulin 
230718dceddSDavid Howells /* Each region is a minimum of 16k, and there are at most 255 of them.
231718dceddSDavid Howells  */
232718dceddSDavid Howells #define I915_NR_TEX_REGIONS 255	/* table size 2k - maximum due to use
233718dceddSDavid Howells 				 * of chars for next/prev indices */
234718dceddSDavid Howells #define I915_LOG_MIN_TEX_REGION_SIZE 14
235718dceddSDavid Howells 
236718dceddSDavid Howells typedef struct _drm_i915_init {
237718dceddSDavid Howells 	enum {
238718dceddSDavid Howells 		I915_INIT_DMA = 0x01,
239718dceddSDavid Howells 		I915_CLEANUP_DMA = 0x02,
240718dceddSDavid Howells 		I915_RESUME_DMA = 0x03
241718dceddSDavid Howells 	} func;
242718dceddSDavid Howells 	unsigned int mmio_offset;
243718dceddSDavid Howells 	int sarea_priv_offset;
244718dceddSDavid Howells 	unsigned int ring_start;
245718dceddSDavid Howells 	unsigned int ring_end;
246718dceddSDavid Howells 	unsigned int ring_size;
247718dceddSDavid Howells 	unsigned int front_offset;
248718dceddSDavid Howells 	unsigned int back_offset;
249718dceddSDavid Howells 	unsigned int depth_offset;
250718dceddSDavid Howells 	unsigned int w;
251718dceddSDavid Howells 	unsigned int h;
252718dceddSDavid Howells 	unsigned int pitch;
253718dceddSDavid Howells 	unsigned int pitch_bits;
254718dceddSDavid Howells 	unsigned int back_pitch;
255718dceddSDavid Howells 	unsigned int depth_pitch;
256718dceddSDavid Howells 	unsigned int cpp;
257718dceddSDavid Howells 	unsigned int chipset;
258718dceddSDavid Howells } drm_i915_init_t;
259718dceddSDavid Howells 
260718dceddSDavid Howells typedef struct _drm_i915_sarea {
261718dceddSDavid Howells 	struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1];
262718dceddSDavid Howells 	int last_upload;	/* last time texture was uploaded */
263718dceddSDavid Howells 	int last_enqueue;	/* last time a buffer was enqueued */
264718dceddSDavid Howells 	int last_dispatch;	/* age of the most recently dispatched buffer */
265718dceddSDavid Howells 	int ctxOwner;		/* last context to upload state */
266718dceddSDavid Howells 	int texAge;
267718dceddSDavid Howells 	int pf_enabled;		/* is pageflipping allowed? */
268718dceddSDavid Howells 	int pf_active;
269718dceddSDavid Howells 	int pf_current_page;	/* which buffer is being displayed? */
270718dceddSDavid Howells 	int perf_boxes;		/* performance boxes to be displayed */
271718dceddSDavid Howells 	int width, height;      /* screen size in pixels */
272718dceddSDavid Howells 
273718dceddSDavid Howells 	drm_handle_t front_handle;
274718dceddSDavid Howells 	int front_offset;
275718dceddSDavid Howells 	int front_size;
276718dceddSDavid Howells 
277718dceddSDavid Howells 	drm_handle_t back_handle;
278718dceddSDavid Howells 	int back_offset;
279718dceddSDavid Howells 	int back_size;
280718dceddSDavid Howells 
281718dceddSDavid Howells 	drm_handle_t depth_handle;
282718dceddSDavid Howells 	int depth_offset;
283718dceddSDavid Howells 	int depth_size;
284718dceddSDavid Howells 
285718dceddSDavid Howells 	drm_handle_t tex_handle;
286718dceddSDavid Howells 	int tex_offset;
287718dceddSDavid Howells 	int tex_size;
288718dceddSDavid Howells 	int log_tex_granularity;
289718dceddSDavid Howells 	int pitch;
290718dceddSDavid Howells 	int rotation;           /* 0, 90, 180 or 270 */
291718dceddSDavid Howells 	int rotated_offset;
292718dceddSDavid Howells 	int rotated_size;
293718dceddSDavid Howells 	int rotated_pitch;
294718dceddSDavid Howells 	int virtualX, virtualY;
295718dceddSDavid Howells 
296718dceddSDavid Howells 	unsigned int front_tiled;
297718dceddSDavid Howells 	unsigned int back_tiled;
298718dceddSDavid Howells 	unsigned int depth_tiled;
299718dceddSDavid Howells 	unsigned int rotated_tiled;
300718dceddSDavid Howells 	unsigned int rotated2_tiled;
301718dceddSDavid Howells 
302718dceddSDavid Howells 	int pipeA_x;
303718dceddSDavid Howells 	int pipeA_y;
304718dceddSDavid Howells 	int pipeA_w;
305718dceddSDavid Howells 	int pipeA_h;
306718dceddSDavid Howells 	int pipeB_x;
307718dceddSDavid Howells 	int pipeB_y;
308718dceddSDavid Howells 	int pipeB_w;
309718dceddSDavid Howells 	int pipeB_h;
310718dceddSDavid Howells 
311718dceddSDavid Howells 	/* fill out some space for old userspace triple buffer */
312718dceddSDavid Howells 	drm_handle_t unused_handle;
313718dceddSDavid Howells 	__u32 unused1, unused2, unused3;
314718dceddSDavid Howells 
315718dceddSDavid Howells 	/* buffer object handles for static buffers. May change
316718dceddSDavid Howells 	 * over the lifetime of the client.
317718dceddSDavid Howells 	 */
318718dceddSDavid Howells 	__u32 front_bo_handle;
319718dceddSDavid Howells 	__u32 back_bo_handle;
320718dceddSDavid Howells 	__u32 unused_bo_handle;
321718dceddSDavid Howells 	__u32 depth_bo_handle;
322718dceddSDavid Howells 
323718dceddSDavid Howells } drm_i915_sarea_t;
324718dceddSDavid Howells 
325718dceddSDavid Howells /* due to userspace building against these headers we need some compat here */
326718dceddSDavid Howells #define planeA_x pipeA_x
327718dceddSDavid Howells #define planeA_y pipeA_y
328718dceddSDavid Howells #define planeA_w pipeA_w
329718dceddSDavid Howells #define planeA_h pipeA_h
330718dceddSDavid Howells #define planeB_x pipeB_x
331718dceddSDavid Howells #define planeB_y pipeB_y
332718dceddSDavid Howells #define planeB_w pipeB_w
333718dceddSDavid Howells #define planeB_h pipeB_h
334718dceddSDavid Howells 
335718dceddSDavid Howells /* Flags for perf_boxes
336718dceddSDavid Howells  */
337718dceddSDavid Howells #define I915_BOX_RING_EMPTY    0x1
338718dceddSDavid Howells #define I915_BOX_FLIP          0x2
339718dceddSDavid Howells #define I915_BOX_WAIT          0x4
340718dceddSDavid Howells #define I915_BOX_TEXTURE_LOAD  0x8
341718dceddSDavid Howells #define I915_BOX_LOST_CONTEXT  0x10
342718dceddSDavid Howells 
34321631f10SDamien Lespiau /*
34421631f10SDamien Lespiau  * i915 specific ioctls.
34521631f10SDamien Lespiau  *
34621631f10SDamien Lespiau  * The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie
34721631f10SDamien Lespiau  * [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset
34821631f10SDamien Lespiau  * against DRM_COMMAND_BASE and should be between [0x0, 0x60).
349718dceddSDavid Howells  */
350718dceddSDavid Howells #define DRM_I915_INIT		0x00
351718dceddSDavid Howells #define DRM_I915_FLUSH		0x01
352718dceddSDavid Howells #define DRM_I915_FLIP		0x02
353718dceddSDavid Howells #define DRM_I915_BATCHBUFFER	0x03
354718dceddSDavid Howells #define DRM_I915_IRQ_EMIT	0x04
355718dceddSDavid Howells #define DRM_I915_IRQ_WAIT	0x05
356718dceddSDavid Howells #define DRM_I915_GETPARAM	0x06
357718dceddSDavid Howells #define DRM_I915_SETPARAM	0x07
358718dceddSDavid Howells #define DRM_I915_ALLOC		0x08
359718dceddSDavid Howells #define DRM_I915_FREE		0x09
360718dceddSDavid Howells #define DRM_I915_INIT_HEAP	0x0a
361718dceddSDavid Howells #define DRM_I915_CMDBUFFER	0x0b
362718dceddSDavid Howells #define DRM_I915_DESTROY_HEAP	0x0c
363718dceddSDavid Howells #define DRM_I915_SET_VBLANK_PIPE	0x0d
364718dceddSDavid Howells #define DRM_I915_GET_VBLANK_PIPE	0x0e
365718dceddSDavid Howells #define DRM_I915_VBLANK_SWAP	0x0f
366718dceddSDavid Howells #define DRM_I915_HWS_ADDR	0x11
367718dceddSDavid Howells #define DRM_I915_GEM_INIT	0x13
368718dceddSDavid Howells #define DRM_I915_GEM_EXECBUFFER	0x14
369718dceddSDavid Howells #define DRM_I915_GEM_PIN	0x15
370718dceddSDavid Howells #define DRM_I915_GEM_UNPIN	0x16
371718dceddSDavid Howells #define DRM_I915_GEM_BUSY	0x17
372718dceddSDavid Howells #define DRM_I915_GEM_THROTTLE	0x18
373718dceddSDavid Howells #define DRM_I915_GEM_ENTERVT	0x19
374718dceddSDavid Howells #define DRM_I915_GEM_LEAVEVT	0x1a
375718dceddSDavid Howells #define DRM_I915_GEM_CREATE	0x1b
376718dceddSDavid Howells #define DRM_I915_GEM_PREAD	0x1c
377718dceddSDavid Howells #define DRM_I915_GEM_PWRITE	0x1d
378718dceddSDavid Howells #define DRM_I915_GEM_MMAP	0x1e
379718dceddSDavid Howells #define DRM_I915_GEM_SET_DOMAIN	0x1f
380718dceddSDavid Howells #define DRM_I915_GEM_SW_FINISH	0x20
381718dceddSDavid Howells #define DRM_I915_GEM_SET_TILING	0x21
382718dceddSDavid Howells #define DRM_I915_GEM_GET_TILING	0x22
383718dceddSDavid Howells #define DRM_I915_GEM_GET_APERTURE 0x23
384718dceddSDavid Howells #define DRM_I915_GEM_MMAP_GTT	0x24
385718dceddSDavid Howells #define DRM_I915_GET_PIPE_FROM_CRTC_ID	0x25
386718dceddSDavid Howells #define DRM_I915_GEM_MADVISE	0x26
387718dceddSDavid Howells #define DRM_I915_OVERLAY_PUT_IMAGE	0x27
388718dceddSDavid Howells #define DRM_I915_OVERLAY_ATTRS	0x28
389718dceddSDavid Howells #define DRM_I915_GEM_EXECBUFFER2	0x29
390fec0445cSChris Wilson #define DRM_I915_GEM_EXECBUFFER2_WR	DRM_I915_GEM_EXECBUFFER2
391718dceddSDavid Howells #define DRM_I915_GET_SPRITE_COLORKEY	0x2a
392718dceddSDavid Howells #define DRM_I915_SET_SPRITE_COLORKEY	0x2b
393718dceddSDavid Howells #define DRM_I915_GEM_WAIT	0x2c
394718dceddSDavid Howells #define DRM_I915_GEM_CONTEXT_CREATE	0x2d
395718dceddSDavid Howells #define DRM_I915_GEM_CONTEXT_DESTROY	0x2e
396718dceddSDavid Howells #define DRM_I915_GEM_SET_CACHING	0x2f
397718dceddSDavid Howells #define DRM_I915_GEM_GET_CACHING	0x30
398718dceddSDavid Howells #define DRM_I915_REG_READ		0x31
399b6359918SMika Kuoppala #define DRM_I915_GET_RESET_STATS	0x32
4005cc9ed4bSChris Wilson #define DRM_I915_GEM_USERPTR		0x33
401c9dc0f35SChris Wilson #define DRM_I915_GEM_CONTEXT_GETPARAM	0x34
402c9dc0f35SChris Wilson #define DRM_I915_GEM_CONTEXT_SETPARAM	0x35
403eec688e1SRobert Bragg #define DRM_I915_PERF_OPEN		0x36
404f89823c2SLionel Landwerlin #define DRM_I915_PERF_ADD_CONFIG	0x37
405f89823c2SLionel Landwerlin #define DRM_I915_PERF_REMOVE_CONFIG	0x38
406a446ae2cSLionel Landwerlin #define DRM_I915_QUERY			0x39
4077f3f317aSChris Wilson #define DRM_I915_GEM_VM_CREATE		0x3a
4087f3f317aSChris Wilson #define DRM_I915_GEM_VM_DESTROY		0x3b
409be03564bSChris Wilson /* Must be kept compact -- no holes */
410718dceddSDavid Howells 
411718dceddSDavid Howells #define DRM_IOCTL_I915_INIT		DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
412718dceddSDavid Howells #define DRM_IOCTL_I915_FLUSH		DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
413718dceddSDavid Howells #define DRM_IOCTL_I915_FLIP		DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP)
414718dceddSDavid Howells #define DRM_IOCTL_I915_BATCHBUFFER	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t)
415718dceddSDavid Howells #define DRM_IOCTL_I915_IRQ_EMIT         DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t)
416718dceddSDavid Howells #define DRM_IOCTL_I915_IRQ_WAIT         DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t)
417718dceddSDavid Howells #define DRM_IOCTL_I915_GETPARAM         DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t)
418718dceddSDavid Howells #define DRM_IOCTL_I915_SETPARAM         DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t)
419718dceddSDavid Howells #define DRM_IOCTL_I915_ALLOC            DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t)
420718dceddSDavid Howells #define DRM_IOCTL_I915_FREE             DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t)
421718dceddSDavid Howells #define DRM_IOCTL_I915_INIT_HEAP        DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t)
422718dceddSDavid Howells #define DRM_IOCTL_I915_CMDBUFFER	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t)
423718dceddSDavid Howells #define DRM_IOCTL_I915_DESTROY_HEAP	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t)
424718dceddSDavid Howells #define DRM_IOCTL_I915_SET_VBLANK_PIPE	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
425718dceddSDavid Howells #define DRM_IOCTL_I915_GET_VBLANK_PIPE	DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
426718dceddSDavid Howells #define DRM_IOCTL_I915_VBLANK_SWAP	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
427718dceddSDavid Howells #define DRM_IOCTL_I915_HWS_ADDR		DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init)
428718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_INIT		DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
429718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_EXECBUFFER	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
430718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_EXECBUFFER2	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
431fec0445cSChris Wilson #define DRM_IOCTL_I915_GEM_EXECBUFFER2_WR	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2_WR, struct drm_i915_gem_execbuffer2)
432718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_PIN		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
433718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_UNPIN	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
434718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_BUSY		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
435718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_SET_CACHING		DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHING, struct drm_i915_gem_caching)
436718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_GET_CACHING		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHING, struct drm_i915_gem_caching)
437718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_THROTTLE	DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
438718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_ENTERVT	DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
439718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_LEAVEVT	DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
440718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_CREATE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
441718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_PREAD	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
442718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_PWRITE	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
443718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_MMAP		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
444718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_MMAP_GTT	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt)
445cc662126SAbdiel Janulgue #define DRM_IOCTL_I915_GEM_MMAP_OFFSET	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_offset)
446718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_SET_DOMAIN	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
447718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_SW_FINISH	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
448718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_SET_TILING	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
449718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_GET_TILING	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
450718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_GET_APERTURE	DRM_IOR  (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture)
451718dceddSDavid Howells #define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id)
452718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_MADVISE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
453718dceddSDavid Howells #define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image)
454718dceddSDavid Howells #define DRM_IOCTL_I915_OVERLAY_ATTRS	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
455718dceddSDavid Howells #define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
4562c60fae1STommi Rantala #define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
457718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_WAIT		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait)
458718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_CONTEXT_CREATE	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
459b9171541SChris Wilson #define DRM_IOCTL_I915_GEM_CONTEXT_CREATE_EXT	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create_ext)
460718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
461718dceddSDavid Howells #define DRM_IOCTL_I915_REG_READ			DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
462b6359918SMika Kuoppala #define DRM_IOCTL_I915_GET_RESET_STATS		DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats)
4635cc9ed4bSChris Wilson #define DRM_IOCTL_I915_GEM_USERPTR			DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr)
464c9dc0f35SChris Wilson #define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param)
465c9dc0f35SChris Wilson #define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param)
466eec688e1SRobert Bragg #define DRM_IOCTL_I915_PERF_OPEN	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param)
467f89823c2SLionel Landwerlin #define DRM_IOCTL_I915_PERF_ADD_CONFIG	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_ADD_CONFIG, struct drm_i915_perf_oa_config)
468f89823c2SLionel Landwerlin #define DRM_IOCTL_I915_PERF_REMOVE_CONFIG	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_REMOVE_CONFIG, __u64)
469a446ae2cSLionel Landwerlin #define DRM_IOCTL_I915_QUERY			DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_QUERY, struct drm_i915_query)
4707f3f317aSChris Wilson #define DRM_IOCTL_I915_GEM_VM_CREATE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_CREATE, struct drm_i915_gem_vm_control)
4717f3f317aSChris Wilson #define DRM_IOCTL_I915_GEM_VM_DESTROY	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_VM_DESTROY, struct drm_i915_gem_vm_control)
472718dceddSDavid Howells 
473718dceddSDavid Howells /* Allow drivers to submit batchbuffers directly to hardware, relying
474718dceddSDavid Howells  * on the security mechanisms provided by hardware.
475718dceddSDavid Howells  */
476718dceddSDavid Howells typedef struct drm_i915_batchbuffer {
477718dceddSDavid Howells 	int start;		/* agp offset */
478718dceddSDavid Howells 	int used;		/* nr bytes in use */
479718dceddSDavid Howells 	int DR1;		/* hw flags for GFX_OP_DRAWRECT_INFO */
480718dceddSDavid Howells 	int DR4;		/* window origin for GFX_OP_DRAWRECT_INFO */
481718dceddSDavid Howells 	int num_cliprects;	/* mulitpass with multiple cliprects? */
482718dceddSDavid Howells 	struct drm_clip_rect __user *cliprects;	/* pointer to userspace cliprects */
483718dceddSDavid Howells } drm_i915_batchbuffer_t;
484718dceddSDavid Howells 
485718dceddSDavid Howells /* As above, but pass a pointer to userspace buffer which can be
486718dceddSDavid Howells  * validated by the kernel prior to sending to hardware.
487718dceddSDavid Howells  */
488718dceddSDavid Howells typedef struct _drm_i915_cmdbuffer {
489718dceddSDavid Howells 	char __user *buf;	/* pointer to userspace command buffer */
490718dceddSDavid Howells 	int sz;			/* nr bytes in buf */
491718dceddSDavid Howells 	int DR1;		/* hw flags for GFX_OP_DRAWRECT_INFO */
492718dceddSDavid Howells 	int DR4;		/* window origin for GFX_OP_DRAWRECT_INFO */
493718dceddSDavid Howells 	int num_cliprects;	/* mulitpass with multiple cliprects? */
494718dceddSDavid Howells 	struct drm_clip_rect __user *cliprects;	/* pointer to userspace cliprects */
495718dceddSDavid Howells } drm_i915_cmdbuffer_t;
496718dceddSDavid Howells 
497718dceddSDavid Howells /* Userspace can request & wait on irq's:
498718dceddSDavid Howells  */
499718dceddSDavid Howells typedef struct drm_i915_irq_emit {
500718dceddSDavid Howells 	int __user *irq_seq;
501718dceddSDavid Howells } drm_i915_irq_emit_t;
502718dceddSDavid Howells 
503718dceddSDavid Howells typedef struct drm_i915_irq_wait {
504718dceddSDavid Howells 	int irq_seq;
505718dceddSDavid Howells } drm_i915_irq_wait_t;
506718dceddSDavid Howells 
5074bdafb9dSChris Wilson /*
5084bdafb9dSChris Wilson  * Different modes of per-process Graphics Translation Table,
5094bdafb9dSChris Wilson  * see I915_PARAM_HAS_ALIASING_PPGTT
5104bdafb9dSChris Wilson  */
5114bdafb9dSChris Wilson #define I915_GEM_PPGTT_NONE	0
5124bdafb9dSChris Wilson #define I915_GEM_PPGTT_ALIASING	1
5134bdafb9dSChris Wilson #define I915_GEM_PPGTT_FULL	2
5144bdafb9dSChris Wilson 
515718dceddSDavid Howells /* Ioctl to query kernel params:
516718dceddSDavid Howells  */
517718dceddSDavid Howells #define I915_PARAM_IRQ_ACTIVE            1
518718dceddSDavid Howells #define I915_PARAM_ALLOW_BATCHBUFFER     2
519718dceddSDavid Howells #define I915_PARAM_LAST_DISPATCH         3
520718dceddSDavid Howells #define I915_PARAM_CHIPSET_ID            4
521718dceddSDavid Howells #define I915_PARAM_HAS_GEM               5
522718dceddSDavid Howells #define I915_PARAM_NUM_FENCES_AVAIL      6
523718dceddSDavid Howells #define I915_PARAM_HAS_OVERLAY           7
524718dceddSDavid Howells #define I915_PARAM_HAS_PAGEFLIPPING	 8
525718dceddSDavid Howells #define I915_PARAM_HAS_EXECBUF2          9
526718dceddSDavid Howells #define I915_PARAM_HAS_BSD		 10
527718dceddSDavid Howells #define I915_PARAM_HAS_BLT		 11
528718dceddSDavid Howells #define I915_PARAM_HAS_RELAXED_FENCING	 12
529718dceddSDavid Howells #define I915_PARAM_HAS_COHERENT_RINGS	 13
530718dceddSDavid Howells #define I915_PARAM_HAS_EXEC_CONSTANTS	 14
531718dceddSDavid Howells #define I915_PARAM_HAS_RELAXED_DELTA	 15
532718dceddSDavid Howells #define I915_PARAM_HAS_GEN7_SOL_RESET	 16
533718dceddSDavid Howells #define I915_PARAM_HAS_LLC     	 	 17
534718dceddSDavid Howells #define I915_PARAM_HAS_ALIASING_PPGTT	 18
535718dceddSDavid Howells #define I915_PARAM_HAS_WAIT_TIMEOUT	 19
536718dceddSDavid Howells #define I915_PARAM_HAS_SEMAPHORES	 20
537718dceddSDavid Howells #define I915_PARAM_HAS_PRIME_VMAP_FLUSH	 21
538a1f2cc73SXiang, Haihao #define I915_PARAM_HAS_VEBOX		 22
539c2fb7916SDaniel Vetter #define I915_PARAM_HAS_SECURE_BATCHES	 23
540b45305fcSDaniel Vetter #define I915_PARAM_HAS_PINNED_BATCHES	 24
541ed5982e6SDaniel Vetter #define I915_PARAM_HAS_EXEC_NO_RELOC	 25
542eef90ccbSChris Wilson #define I915_PARAM_HAS_EXEC_HANDLE_LUT   26
543651d794fSChris Wilson #define I915_PARAM_HAS_WT     	 	 27
544d728c8efSBrad Volkin #define I915_PARAM_CMD_PARSER_VERSION	 28
5456a2c4232SChris Wilson #define I915_PARAM_HAS_COHERENT_PHYS_GTT 29
5461816f923SAkash Goel #define I915_PARAM_MMAP_VERSION          30
54708e16dc8SZhipeng Gong #define I915_PARAM_HAS_BSD2		 31
54827cd4461SNeil Roberts #define I915_PARAM_REVISION              32
549a1559ffeSJeff McGee #define I915_PARAM_SUBSLICE_TOTAL	 33
550a1559ffeSJeff McGee #define I915_PARAM_EU_TOTAL		 34
55149e4d842SChris Wilson #define I915_PARAM_HAS_GPU_RESET	 35
552a9ed33caSAbdiel Janulgue #define I915_PARAM_HAS_RESOURCE_STREAMER 36
553506a8e87SChris Wilson #define I915_PARAM_HAS_EXEC_SOFTPIN	 37
55437f501afSarun.siluvery@linux.intel.com #define I915_PARAM_HAS_POOLED_EU	 38
55537f501afSarun.siluvery@linux.intel.com #define I915_PARAM_MIN_EU_IN_POOL	 39
5564cc69075SChris Wilson #define I915_PARAM_MMAP_GTT_VERSION	 40
557718dceddSDavid Howells 
558bf64e0b0SChris Wilson /*
559bf64e0b0SChris Wilson  * Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution
5600de9136dSChris Wilson  * priorities and the driver will attempt to execute batches in priority order.
561bf64e0b0SChris Wilson  * The param returns a capability bitmask, nonzero implies that the scheduler
562bf64e0b0SChris Wilson  * is enabled, with different features present according to the mask.
563ac14fbd4SChris Wilson  *
564ac14fbd4SChris Wilson  * The initial priority for each batch is supplied by the context and is
565ac14fbd4SChris Wilson  * controlled via I915_CONTEXT_PARAM_PRIORITY.
5660de9136dSChris Wilson  */
5670de9136dSChris Wilson #define I915_PARAM_HAS_SCHEDULER	 41
568bf64e0b0SChris Wilson #define   I915_SCHEDULER_CAP_ENABLED	(1ul << 0)
569bf64e0b0SChris Wilson #define   I915_SCHEDULER_CAP_PRIORITY	(1ul << 1)
570bf64e0b0SChris Wilson #define   I915_SCHEDULER_CAP_PREEMPTION	(1ul << 2)
571e8861964SChris Wilson #define   I915_SCHEDULER_CAP_SEMAPHORES	(1ul << 3)
572bf73fc0fSChris Wilson #define   I915_SCHEDULER_CAP_ENGINE_BUSY_STATS	(1ul << 4)
573bf64e0b0SChris Wilson 
5745464cd65SAnusha Srivatsa #define I915_PARAM_HUC_STATUS		 42
5750de9136dSChris Wilson 
57677ae9957SChris Wilson /* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of
57777ae9957SChris Wilson  * synchronisation with implicit fencing on individual objects.
57877ae9957SChris Wilson  * See EXEC_OBJECT_ASYNC.
57977ae9957SChris Wilson  */
58077ae9957SChris Wilson #define I915_PARAM_HAS_EXEC_ASYNC	 43
58177ae9957SChris Wilson 
582fec0445cSChris Wilson /* Query whether DRM_I915_GEM_EXECBUFFER2 supports explicit fence support -
583fec0445cSChris Wilson  * both being able to pass in a sync_file fd to wait upon before executing,
584fec0445cSChris Wilson  * and being able to return a new sync_file fd that is signaled when the
585fec0445cSChris Wilson  * current request is complete. See I915_EXEC_FENCE_IN and I915_EXEC_FENCE_OUT.
586fec0445cSChris Wilson  */
587fec0445cSChris Wilson #define I915_PARAM_HAS_EXEC_FENCE	 44
588fec0445cSChris Wilson 
589b0fd47adSChris Wilson /* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to capture
590b0fd47adSChris Wilson  * user specified bufffers for post-mortem debugging of GPU hangs. See
591b0fd47adSChris Wilson  * EXEC_OBJECT_CAPTURE.
592b0fd47adSChris Wilson  */
593b0fd47adSChris Wilson #define I915_PARAM_HAS_EXEC_CAPTURE	 45
594b0fd47adSChris Wilson 
5957fed555cSRobert Bragg #define I915_PARAM_SLICE_MASK		 46
5967fed555cSRobert Bragg 
597f5320233SRobert Bragg /* Assuming it's uniform for each slice, this queries the mask of subslices
598f5320233SRobert Bragg  * per-slice for this system.
599f5320233SRobert Bragg  */
600f5320233SRobert Bragg #define I915_PARAM_SUBSLICE_MASK	 47
601f5320233SRobert Bragg 
6021a71cf2fSChris Wilson /*
6031a71cf2fSChris Wilson  * Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying the batch buffer
6041a71cf2fSChris Wilson  * as the first execobject as opposed to the last. See I915_EXEC_BATCH_FIRST.
6051a71cf2fSChris Wilson  */
6061a71cf2fSChris Wilson #define I915_PARAM_HAS_EXEC_BATCH_FIRST	 48
6071a71cf2fSChris Wilson 
608cf6e7bacSJason Ekstrand /* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of
609cf6e7bacSJason Ekstrand  * drm_i915_gem_exec_fence structures.  See I915_EXEC_FENCE_ARRAY.
610cf6e7bacSJason Ekstrand  */
611cf6e7bacSJason Ekstrand #define I915_PARAM_HAS_EXEC_FENCE_ARRAY  49
612cf6e7bacSJason Ekstrand 
613d2b4b979SChris Wilson /*
614d2b4b979SChris Wilson  * Query whether every context (both per-file default and user created) is
615d2b4b979SChris Wilson  * isolated (insofar as HW supports). If this parameter is not true, then
616d2b4b979SChris Wilson  * freshly created contexts may inherit values from an existing context,
617d2b4b979SChris Wilson  * rather than default HW values. If true, it also ensures (insofar as HW
618d2b4b979SChris Wilson  * supports) that all state set by this context will not leak to any other
619d2b4b979SChris Wilson  * context.
620d2b4b979SChris Wilson  *
621d2b4b979SChris Wilson  * As not every engine across every gen support contexts, the returned
622d2b4b979SChris Wilson  * value reports the support of context isolation for individual engines by
623d2b4b979SChris Wilson  * returning a bitmask of each engine class set to true if that class supports
624d2b4b979SChris Wilson  * isolation.
625d2b4b979SChris Wilson  */
626d2b4b979SChris Wilson #define I915_PARAM_HAS_CONTEXT_ISOLATION 50
627d2b4b979SChris Wilson 
628dab91783SLionel Landwerlin /* Frequency of the command streamer timestamps given by the *_TIMESTAMP
629dab91783SLionel Landwerlin  * registers. This used to be fixed per platform but from CNL onwards, this
630dab91783SLionel Landwerlin  * might vary depending on the parts.
631dab91783SLionel Landwerlin  */
632dab91783SLionel Landwerlin #define I915_PARAM_CS_TIMESTAMP_FREQUENCY 51
633dab91783SLionel Landwerlin 
634900ccf30SChris Wilson /*
635900ccf30SChris Wilson  * Once upon a time we supposed that writes through the GGTT would be
636900ccf30SChris Wilson  * immediately in physical memory (once flushed out of the CPU path). However,
637900ccf30SChris Wilson  * on a few different processors and chipsets, this is not necessarily the case
638900ccf30SChris Wilson  * as the writes appear to be buffered internally. Thus a read of the backing
639900ccf30SChris Wilson  * storage (physical memory) via a different path (with different physical tags
640900ccf30SChris Wilson  * to the indirect write via the GGTT) will see stale values from before
641900ccf30SChris Wilson  * the GGTT write. Inside the kernel, we can for the most part keep track of
642900ccf30SChris Wilson  * the different read/write domains in use (e.g. set-domain), but the assumption
643900ccf30SChris Wilson  * of coherency is baked into the ABI, hence reporting its true state in this
644900ccf30SChris Wilson  * parameter.
645900ccf30SChris Wilson  *
646900ccf30SChris Wilson  * Reports true when writes via mmap_gtt are immediately visible following an
647900ccf30SChris Wilson  * lfence to flush the WCB.
648900ccf30SChris Wilson  *
649900ccf30SChris Wilson  * Reports false when writes via mmap_gtt are indeterminately delayed in an in
650900ccf30SChris Wilson  * internal buffer and are _not_ immediately visible to third parties accessing
651900ccf30SChris Wilson  * directly via mmap_cpu/mmap_wc. Use of mmap_gtt as part of an IPC
652900ccf30SChris Wilson  * communications channel when reporting false is strongly disadvised.
653900ccf30SChris Wilson  */
654900ccf30SChris Wilson #define I915_PARAM_MMAP_GTT_COHERENT	52
655900ccf30SChris Wilson 
656a88b6e4cSChris Wilson /*
657a88b6e4cSChris Wilson  * Query whether DRM_I915_GEM_EXECBUFFER2 supports coordination of parallel
658a88b6e4cSChris Wilson  * execution through use of explicit fence support.
659a88b6e4cSChris Wilson  * See I915_EXEC_FENCE_OUT and I915_EXEC_FENCE_SUBMIT.
660a88b6e4cSChris Wilson  */
661a88b6e4cSChris Wilson #define I915_PARAM_HAS_EXEC_SUBMIT_FENCE 53
662b8d49f28SLionel Landwerlin 
663b8d49f28SLionel Landwerlin /*
664b8d49f28SLionel Landwerlin  * Revision of the i915-perf uAPI. The value returned helps determine what
665b8d49f28SLionel Landwerlin  * i915-perf features are available. See drm_i915_perf_property_id.
666b8d49f28SLionel Landwerlin  */
667b8d49f28SLionel Landwerlin #define I915_PARAM_PERF_REVISION	54
668b8d49f28SLionel Landwerlin 
66913149e8bSLionel Landwerlin /* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of
67013149e8bSLionel Landwerlin  * timeline syncobj through drm_i915_gem_execbuffer_ext_timeline_fences. See
67113149e8bSLionel Landwerlin  * I915_EXEC_USE_EXTENSIONS.
67213149e8bSLionel Landwerlin  */
67313149e8bSLionel Landwerlin #define I915_PARAM_HAS_EXEC_TIMELINE_FENCES 55
67413149e8bSLionel Landwerlin 
675be03564bSChris Wilson /* Must be kept compact -- no holes and well documented */
676be03564bSChris Wilson 
677718dceddSDavid Howells typedef struct drm_i915_getparam {
67816f7249dSArtem Savkov 	__s32 param;
679346add78SDaniel Vetter 	/*
680346add78SDaniel Vetter 	 * WARNING: Using pointers instead of fixed-size u64 means we need to write
681346add78SDaniel Vetter 	 * compat32 code. Don't repeat this mistake.
682346add78SDaniel Vetter 	 */
683718dceddSDavid Howells 	int __user *value;
684718dceddSDavid Howells } drm_i915_getparam_t;
685718dceddSDavid Howells 
686718dceddSDavid Howells /* Ioctl to set kernel params:
687718dceddSDavid Howells  */
688718dceddSDavid Howells #define I915_SETPARAM_USE_MI_BATCHBUFFER_START            1
689718dceddSDavid Howells #define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY             2
690718dceddSDavid Howells #define I915_SETPARAM_ALLOW_BATCHBUFFER                   3
691718dceddSDavid Howells #define I915_SETPARAM_NUM_USED_FENCES                     4
692be03564bSChris Wilson /* Must be kept compact -- no holes */
693718dceddSDavid Howells 
694718dceddSDavid Howells typedef struct drm_i915_setparam {
695718dceddSDavid Howells 	int param;
696718dceddSDavid Howells 	int value;
697718dceddSDavid Howells } drm_i915_setparam_t;
698718dceddSDavid Howells 
699718dceddSDavid Howells /* A memory manager for regions of shared memory:
700718dceddSDavid Howells  */
701718dceddSDavid Howells #define I915_MEM_REGION_AGP 1
702718dceddSDavid Howells 
703718dceddSDavid Howells typedef struct drm_i915_mem_alloc {
704718dceddSDavid Howells 	int region;
705718dceddSDavid Howells 	int alignment;
706718dceddSDavid Howells 	int size;
707718dceddSDavid Howells 	int __user *region_offset;	/* offset from start of fb or agp */
708718dceddSDavid Howells } drm_i915_mem_alloc_t;
709718dceddSDavid Howells 
710718dceddSDavid Howells typedef struct drm_i915_mem_free {
711718dceddSDavid Howells 	int region;
712718dceddSDavid Howells 	int region_offset;
713718dceddSDavid Howells } drm_i915_mem_free_t;
714718dceddSDavid Howells 
715718dceddSDavid Howells typedef struct drm_i915_mem_init_heap {
716718dceddSDavid Howells 	int region;
717718dceddSDavid Howells 	int size;
718718dceddSDavid Howells 	int start;
719718dceddSDavid Howells } drm_i915_mem_init_heap_t;
720718dceddSDavid Howells 
721718dceddSDavid Howells /* Allow memory manager to be torn down and re-initialized (eg on
722718dceddSDavid Howells  * rotate):
723718dceddSDavid Howells  */
724718dceddSDavid Howells typedef struct drm_i915_mem_destroy_heap {
725718dceddSDavid Howells 	int region;
726718dceddSDavid Howells } drm_i915_mem_destroy_heap_t;
727718dceddSDavid Howells 
728718dceddSDavid Howells /* Allow X server to configure which pipes to monitor for vblank signals
729718dceddSDavid Howells  */
730718dceddSDavid Howells #define	DRM_I915_VBLANK_PIPE_A	1
731718dceddSDavid Howells #define	DRM_I915_VBLANK_PIPE_B	2
732718dceddSDavid Howells 
733718dceddSDavid Howells typedef struct drm_i915_vblank_pipe {
734718dceddSDavid Howells 	int pipe;
735718dceddSDavid Howells } drm_i915_vblank_pipe_t;
736718dceddSDavid Howells 
737718dceddSDavid Howells /* Schedule buffer swap at given vertical blank:
738718dceddSDavid Howells  */
739718dceddSDavid Howells typedef struct drm_i915_vblank_swap {
740718dceddSDavid Howells 	drm_drawable_t drawable;
741718dceddSDavid Howells 	enum drm_vblank_seq_type seqtype;
742718dceddSDavid Howells 	unsigned int sequence;
743718dceddSDavid Howells } drm_i915_vblank_swap_t;
744718dceddSDavid Howells 
745718dceddSDavid Howells typedef struct drm_i915_hws_addr {
746718dceddSDavid Howells 	__u64 addr;
747718dceddSDavid Howells } drm_i915_hws_addr_t;
748718dceddSDavid Howells 
749718dceddSDavid Howells struct drm_i915_gem_init {
750718dceddSDavid Howells 	/**
751718dceddSDavid Howells 	 * Beginning offset in the GTT to be managed by the DRM memory
752718dceddSDavid Howells 	 * manager.
753718dceddSDavid Howells 	 */
754718dceddSDavid Howells 	__u64 gtt_start;
755718dceddSDavid Howells 	/**
756718dceddSDavid Howells 	 * Ending offset in the GTT to be managed by the DRM memory
757718dceddSDavid Howells 	 * manager.
758718dceddSDavid Howells 	 */
759718dceddSDavid Howells 	__u64 gtt_end;
760718dceddSDavid Howells };
761718dceddSDavid Howells 
762718dceddSDavid Howells struct drm_i915_gem_create {
763718dceddSDavid Howells 	/**
764718dceddSDavid Howells 	 * Requested size for the object.
765718dceddSDavid Howells 	 *
766718dceddSDavid Howells 	 * The (page-aligned) allocated size for the object will be returned.
767718dceddSDavid Howells 	 */
768718dceddSDavid Howells 	__u64 size;
769718dceddSDavid Howells 	/**
770718dceddSDavid Howells 	 * Returned handle for the object.
771718dceddSDavid Howells 	 *
772718dceddSDavid Howells 	 * Object handles are nonzero.
773718dceddSDavid Howells 	 */
774718dceddSDavid Howells 	__u32 handle;
775718dceddSDavid Howells 	__u32 pad;
776718dceddSDavid Howells };
777718dceddSDavid Howells 
778718dceddSDavid Howells struct drm_i915_gem_pread {
779718dceddSDavid Howells 	/** Handle for the object being read. */
780718dceddSDavid Howells 	__u32 handle;
781718dceddSDavid Howells 	__u32 pad;
782718dceddSDavid Howells 	/** Offset into the object to read from */
783718dceddSDavid Howells 	__u64 offset;
784718dceddSDavid Howells 	/** Length of data to read */
785718dceddSDavid Howells 	__u64 size;
786718dceddSDavid Howells 	/**
787718dceddSDavid Howells 	 * Pointer to write the data into.
788718dceddSDavid Howells 	 *
789718dceddSDavid Howells 	 * This is a fixed-size type for 32/64 compatibility.
790718dceddSDavid Howells 	 */
791718dceddSDavid Howells 	__u64 data_ptr;
792718dceddSDavid Howells };
793718dceddSDavid Howells 
794718dceddSDavid Howells struct drm_i915_gem_pwrite {
795718dceddSDavid Howells 	/** Handle for the object being written to. */
796718dceddSDavid Howells 	__u32 handle;
797718dceddSDavid Howells 	__u32 pad;
798718dceddSDavid Howells 	/** Offset into the object to write to */
799718dceddSDavid Howells 	__u64 offset;
800718dceddSDavid Howells 	/** Length of data to write */
801718dceddSDavid Howells 	__u64 size;
802718dceddSDavid Howells 	/**
803718dceddSDavid Howells 	 * Pointer to read the data from.
804718dceddSDavid Howells 	 *
805718dceddSDavid Howells 	 * This is a fixed-size type for 32/64 compatibility.
806718dceddSDavid Howells 	 */
807718dceddSDavid Howells 	__u64 data_ptr;
808718dceddSDavid Howells };
809718dceddSDavid Howells 
810718dceddSDavid Howells struct drm_i915_gem_mmap {
811718dceddSDavid Howells 	/** Handle for the object being mapped. */
812718dceddSDavid Howells 	__u32 handle;
813718dceddSDavid Howells 	__u32 pad;
814718dceddSDavid Howells 	/** Offset in the object to map. */
815718dceddSDavid Howells 	__u64 offset;
816718dceddSDavid Howells 	/**
817718dceddSDavid Howells 	 * Length of data to map.
818718dceddSDavid Howells 	 *
819718dceddSDavid Howells 	 * The value will be page-aligned.
820718dceddSDavid Howells 	 */
821718dceddSDavid Howells 	__u64 size;
822718dceddSDavid Howells 	/**
823718dceddSDavid Howells 	 * Returned pointer the data was mapped at.
824718dceddSDavid Howells 	 *
825718dceddSDavid Howells 	 * This is a fixed-size type for 32/64 compatibility.
826718dceddSDavid Howells 	 */
827718dceddSDavid Howells 	__u64 addr_ptr;
8281816f923SAkash Goel 
8291816f923SAkash Goel 	/**
8301816f923SAkash Goel 	 * Flags for extended behaviour.
8311816f923SAkash Goel 	 *
8321816f923SAkash Goel 	 * Added in version 2.
8331816f923SAkash Goel 	 */
8341816f923SAkash Goel 	__u64 flags;
8351816f923SAkash Goel #define I915_MMAP_WC 0x1
836718dceddSDavid Howells };
837718dceddSDavid Howells 
838718dceddSDavid Howells struct drm_i915_gem_mmap_gtt {
839718dceddSDavid Howells 	/** Handle for the object being mapped. */
840718dceddSDavid Howells 	__u32 handle;
841718dceddSDavid Howells 	__u32 pad;
842718dceddSDavid Howells 	/**
843718dceddSDavid Howells 	 * Fake offset to use for subsequent mmap call
844718dceddSDavid Howells 	 *
845718dceddSDavid Howells 	 * This is a fixed-size type for 32/64 compatibility.
846718dceddSDavid Howells 	 */
847718dceddSDavid Howells 	__u64 offset;
848718dceddSDavid Howells };
849718dceddSDavid Howells 
850cc662126SAbdiel Janulgue struct drm_i915_gem_mmap_offset {
851cc662126SAbdiel Janulgue 	/** Handle for the object being mapped. */
852cc662126SAbdiel Janulgue 	__u32 handle;
853cc662126SAbdiel Janulgue 	__u32 pad;
854cc662126SAbdiel Janulgue 	/**
855cc662126SAbdiel Janulgue 	 * Fake offset to use for subsequent mmap call
856cc662126SAbdiel Janulgue 	 *
857cc662126SAbdiel Janulgue 	 * This is a fixed-size type for 32/64 compatibility.
858cc662126SAbdiel Janulgue 	 */
859cc662126SAbdiel Janulgue 	__u64 offset;
860cc662126SAbdiel Janulgue 
861cc662126SAbdiel Janulgue 	/**
862cc662126SAbdiel Janulgue 	 * Flags for extended behaviour.
863cc662126SAbdiel Janulgue 	 *
864cc662126SAbdiel Janulgue 	 * It is mandatory that one of the MMAP_OFFSET types
865cc662126SAbdiel Janulgue 	 * (GTT, WC, WB, UC, etc) should be included.
866cc662126SAbdiel Janulgue 	 */
867cc662126SAbdiel Janulgue 	__u64 flags;
868cc662126SAbdiel Janulgue #define I915_MMAP_OFFSET_GTT 0
869cc662126SAbdiel Janulgue #define I915_MMAP_OFFSET_WC  1
870cc662126SAbdiel Janulgue #define I915_MMAP_OFFSET_WB  2
871cc662126SAbdiel Janulgue #define I915_MMAP_OFFSET_UC  3
872cc662126SAbdiel Janulgue 
873cc662126SAbdiel Janulgue 	/*
874cc662126SAbdiel Janulgue 	 * Zero-terminated chain of extensions.
875cc662126SAbdiel Janulgue 	 *
876cc662126SAbdiel Janulgue 	 * No current extensions defined; mbz.
877cc662126SAbdiel Janulgue 	 */
878cc662126SAbdiel Janulgue 	__u64 extensions;
879cc662126SAbdiel Janulgue };
880cc662126SAbdiel Janulgue 
881718dceddSDavid Howells struct drm_i915_gem_set_domain {
882718dceddSDavid Howells 	/** Handle for the object */
883718dceddSDavid Howells 	__u32 handle;
884718dceddSDavid Howells 
885718dceddSDavid Howells 	/** New read domains */
886718dceddSDavid Howells 	__u32 read_domains;
887718dceddSDavid Howells 
888718dceddSDavid Howells 	/** New write domain */
889718dceddSDavid Howells 	__u32 write_domain;
890718dceddSDavid Howells };
891718dceddSDavid Howells 
892718dceddSDavid Howells struct drm_i915_gem_sw_finish {
893718dceddSDavid Howells 	/** Handle for the object */
894718dceddSDavid Howells 	__u32 handle;
895718dceddSDavid Howells };
896718dceddSDavid Howells 
897718dceddSDavid Howells struct drm_i915_gem_relocation_entry {
898718dceddSDavid Howells 	/**
899718dceddSDavid Howells 	 * Handle of the buffer being pointed to by this relocation entry.
900718dceddSDavid Howells 	 *
901718dceddSDavid Howells 	 * It's appealing to make this be an index into the mm_validate_entry
902718dceddSDavid Howells 	 * list to refer to the buffer, but this allows the driver to create
903718dceddSDavid Howells 	 * a relocation list for state buffers and not re-write it per
904718dceddSDavid Howells 	 * exec using the buffer.
905718dceddSDavid Howells 	 */
906718dceddSDavid Howells 	__u32 target_handle;
907718dceddSDavid Howells 
908718dceddSDavid Howells 	/**
909718dceddSDavid Howells 	 * Value to be added to the offset of the target buffer to make up
910718dceddSDavid Howells 	 * the relocation entry.
911718dceddSDavid Howells 	 */
912718dceddSDavid Howells 	__u32 delta;
913718dceddSDavid Howells 
914718dceddSDavid Howells 	/** Offset in the buffer the relocation entry will be written into */
915718dceddSDavid Howells 	__u64 offset;
916718dceddSDavid Howells 
917718dceddSDavid Howells 	/**
918718dceddSDavid Howells 	 * Offset value of the target buffer that the relocation entry was last
919718dceddSDavid Howells 	 * written as.
920718dceddSDavid Howells 	 *
921718dceddSDavid Howells 	 * If the buffer has the same offset as last time, we can skip syncing
922718dceddSDavid Howells 	 * and writing the relocation.  This value is written back out by
923718dceddSDavid Howells 	 * the execbuffer ioctl when the relocation is written.
924718dceddSDavid Howells 	 */
925718dceddSDavid Howells 	__u64 presumed_offset;
926718dceddSDavid Howells 
927718dceddSDavid Howells 	/**
928718dceddSDavid Howells 	 * Target memory domains read by this operation.
929718dceddSDavid Howells 	 */
930718dceddSDavid Howells 	__u32 read_domains;
931718dceddSDavid Howells 
932718dceddSDavid Howells 	/**
933718dceddSDavid Howells 	 * Target memory domains written by this operation.
934718dceddSDavid Howells 	 *
935718dceddSDavid Howells 	 * Note that only one domain may be written by the whole
936718dceddSDavid Howells 	 * execbuffer operation, so that where there are conflicts,
937718dceddSDavid Howells 	 * the application will get -EINVAL back.
938718dceddSDavid Howells 	 */
939718dceddSDavid Howells 	__u32 write_domain;
940718dceddSDavid Howells };
941718dceddSDavid Howells 
942718dceddSDavid Howells /** @{
943718dceddSDavid Howells  * Intel memory domains
944718dceddSDavid Howells  *
945718dceddSDavid Howells  * Most of these just align with the various caches in
946718dceddSDavid Howells  * the system and are used to flush and invalidate as
947718dceddSDavid Howells  * objects end up cached in different domains.
948718dceddSDavid Howells  */
949718dceddSDavid Howells /** CPU cache */
950718dceddSDavid Howells #define I915_GEM_DOMAIN_CPU		0x00000001
951718dceddSDavid Howells /** Render cache, used by 2D and 3D drawing */
952718dceddSDavid Howells #define I915_GEM_DOMAIN_RENDER		0x00000002
953718dceddSDavid Howells /** Sampler cache, used by texture engine */
954718dceddSDavid Howells #define I915_GEM_DOMAIN_SAMPLER		0x00000004
955718dceddSDavid Howells /** Command queue, used to load batch buffers */
956718dceddSDavid Howells #define I915_GEM_DOMAIN_COMMAND		0x00000008
957718dceddSDavid Howells /** Instruction cache, used by shader programs */
958718dceddSDavid Howells #define I915_GEM_DOMAIN_INSTRUCTION	0x00000010
959718dceddSDavid Howells /** Vertex address cache */
960718dceddSDavid Howells #define I915_GEM_DOMAIN_VERTEX		0x00000020
961718dceddSDavid Howells /** GTT domain - aperture and scanout */
962718dceddSDavid Howells #define I915_GEM_DOMAIN_GTT		0x00000040
963e22d8e3cSChris Wilson /** WC domain - uncached access */
964e22d8e3cSChris Wilson #define I915_GEM_DOMAIN_WC		0x00000080
965718dceddSDavid Howells /** @} */
966718dceddSDavid Howells 
967718dceddSDavid Howells struct drm_i915_gem_exec_object {
968718dceddSDavid Howells 	/**
969718dceddSDavid Howells 	 * User's handle for a buffer to be bound into the GTT for this
970718dceddSDavid Howells 	 * operation.
971718dceddSDavid Howells 	 */
972718dceddSDavid Howells 	__u32 handle;
973718dceddSDavid Howells 
974718dceddSDavid Howells 	/** Number of relocations to be performed on this buffer */
975718dceddSDavid Howells 	__u32 relocation_count;
976718dceddSDavid Howells 	/**
977718dceddSDavid Howells 	 * Pointer to array of struct drm_i915_gem_relocation_entry containing
978718dceddSDavid Howells 	 * the relocations to be performed in this buffer.
979718dceddSDavid Howells 	 */
980718dceddSDavid Howells 	__u64 relocs_ptr;
981718dceddSDavid Howells 
982718dceddSDavid Howells 	/** Required alignment in graphics aperture */
983718dceddSDavid Howells 	__u64 alignment;
984718dceddSDavid Howells 
985718dceddSDavid Howells 	/**
986718dceddSDavid Howells 	 * Returned value of the updated offset of the object, for future
987718dceddSDavid Howells 	 * presumed_offset writes.
988718dceddSDavid Howells 	 */
989718dceddSDavid Howells 	__u64 offset;
990718dceddSDavid Howells };
991718dceddSDavid Howells 
992b5b6f6a6SJason Ekstrand /* DRM_IOCTL_I915_GEM_EXECBUFFER was removed in Linux 5.13 */
993718dceddSDavid Howells struct drm_i915_gem_execbuffer {
994718dceddSDavid Howells 	/**
995718dceddSDavid Howells 	 * List of buffers to be validated with their relocations to be
996718dceddSDavid Howells 	 * performend on them.
997718dceddSDavid Howells 	 *
998718dceddSDavid Howells 	 * This is a pointer to an array of struct drm_i915_gem_validate_entry.
999718dceddSDavid Howells 	 *
1000718dceddSDavid Howells 	 * These buffers must be listed in an order such that all relocations
1001718dceddSDavid Howells 	 * a buffer is performing refer to buffers that have already appeared
1002718dceddSDavid Howells 	 * in the validate list.
1003718dceddSDavid Howells 	 */
1004718dceddSDavid Howells 	__u64 buffers_ptr;
1005718dceddSDavid Howells 	__u32 buffer_count;
1006718dceddSDavid Howells 
1007718dceddSDavid Howells 	/** Offset in the batchbuffer to start execution from. */
1008718dceddSDavid Howells 	__u32 batch_start_offset;
1009718dceddSDavid Howells 	/** Bytes used in batchbuffer from batch_start_offset */
1010718dceddSDavid Howells 	__u32 batch_len;
1011718dceddSDavid Howells 	__u32 DR1;
1012718dceddSDavid Howells 	__u32 DR4;
1013718dceddSDavid Howells 	__u32 num_cliprects;
1014718dceddSDavid Howells 	/** This is a struct drm_clip_rect *cliprects */
1015718dceddSDavid Howells 	__u64 cliprects_ptr;
1016718dceddSDavid Howells };
1017718dceddSDavid Howells 
1018718dceddSDavid Howells struct drm_i915_gem_exec_object2 {
1019718dceddSDavid Howells 	/**
1020718dceddSDavid Howells 	 * User's handle for a buffer to be bound into the GTT for this
1021718dceddSDavid Howells 	 * operation.
1022718dceddSDavid Howells 	 */
1023718dceddSDavid Howells 	__u32 handle;
1024718dceddSDavid Howells 
1025718dceddSDavid Howells 	/** Number of relocations to be performed on this buffer */
1026718dceddSDavid Howells 	__u32 relocation_count;
1027718dceddSDavid Howells 	/**
1028718dceddSDavid Howells 	 * Pointer to array of struct drm_i915_gem_relocation_entry containing
1029718dceddSDavid Howells 	 * the relocations to be performed in this buffer.
1030718dceddSDavid Howells 	 */
1031718dceddSDavid Howells 	__u64 relocs_ptr;
1032718dceddSDavid Howells 
1033718dceddSDavid Howells 	/** Required alignment in graphics aperture */
1034718dceddSDavid Howells 	__u64 alignment;
1035718dceddSDavid Howells 
1036718dceddSDavid Howells 	/**
1037506a8e87SChris Wilson 	 * When the EXEC_OBJECT_PINNED flag is specified this is populated by
1038506a8e87SChris Wilson 	 * the user with the GTT offset at which this object will be pinned.
1039506a8e87SChris Wilson 	 * When the I915_EXEC_NO_RELOC flag is specified this must contain the
1040506a8e87SChris Wilson 	 * presumed_offset of the object.
1041506a8e87SChris Wilson 	 * During execbuffer2 the kernel populates it with the value of the
1042506a8e87SChris Wilson 	 * current GTT offset of the object, for future presumed_offset writes.
1043718dceddSDavid Howells 	 */
1044718dceddSDavid Howells 	__u64 offset;
1045718dceddSDavid Howells 
1046718dceddSDavid Howells #define EXEC_OBJECT_NEEDS_FENCE		 (1<<0)
1047ed5982e6SDaniel Vetter #define EXEC_OBJECT_NEEDS_GTT		 (1<<1)
1048ed5982e6SDaniel Vetter #define EXEC_OBJECT_WRITE		 (1<<2)
1049101b506aSMichel Thierry #define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3)
1050506a8e87SChris Wilson #define EXEC_OBJECT_PINNED		 (1<<4)
105191b2db6fSChris Wilson #define EXEC_OBJECT_PAD_TO_SIZE		 (1<<5)
105277ae9957SChris Wilson /* The kernel implicitly tracks GPU activity on all GEM objects, and
105377ae9957SChris Wilson  * synchronises operations with outstanding rendering. This includes
105477ae9957SChris Wilson  * rendering on other devices if exported via dma-buf. However, sometimes
105577ae9957SChris Wilson  * this tracking is too coarse and the user knows better. For example,
105677ae9957SChris Wilson  * if the object is split into non-overlapping ranges shared between different
105777ae9957SChris Wilson  * clients or engines (i.e. suballocating objects), the implicit tracking
105877ae9957SChris Wilson  * by kernel assumes that each operation affects the whole object rather
105977ae9957SChris Wilson  * than an individual range, causing needless synchronisation between clients.
106077ae9957SChris Wilson  * The kernel will also forgo any CPU cache flushes prior to rendering from
106177ae9957SChris Wilson  * the object as the client is expected to be also handling such domain
106277ae9957SChris Wilson  * tracking.
106377ae9957SChris Wilson  *
106477ae9957SChris Wilson  * The kernel maintains the implicit tracking in order to manage resources
106577ae9957SChris Wilson  * used by the GPU - this flag only disables the synchronisation prior to
106677ae9957SChris Wilson  * rendering with this object in this execbuf.
106777ae9957SChris Wilson  *
106877ae9957SChris Wilson  * Opting out of implicit synhronisation requires the user to do its own
106977ae9957SChris Wilson  * explicit tracking to avoid rendering corruption. See, for example,
107077ae9957SChris Wilson  * I915_PARAM_HAS_EXEC_FENCE to order execbufs and execute them asynchronously.
107177ae9957SChris Wilson  */
107277ae9957SChris Wilson #define EXEC_OBJECT_ASYNC		(1<<6)
1073b0fd47adSChris Wilson /* Request that the contents of this execobject be copied into the error
1074b0fd47adSChris Wilson  * state upon a GPU hang involving this batch for post-mortem debugging.
1075b0fd47adSChris Wilson  * These buffers are recorded in no particular order as "user" in
1076b0fd47adSChris Wilson  * /sys/class/drm/cardN/error. Query I915_PARAM_HAS_EXEC_CAPTURE to see
1077b0fd47adSChris Wilson  * if the kernel supports this flag.
1078b0fd47adSChris Wilson  */
1079b0fd47adSChris Wilson #define EXEC_OBJECT_CAPTURE		(1<<7)
10809e2793f6SDave Gordon /* All remaining bits are MBZ and RESERVED FOR FUTURE USE */
1081b0fd47adSChris Wilson #define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_CAPTURE<<1)
1082718dceddSDavid Howells 	__u64 flags;
1083ed5982e6SDaniel Vetter 
108491b2db6fSChris Wilson 	union {
1085718dceddSDavid Howells 		__u64 rsvd1;
108691b2db6fSChris Wilson 		__u64 pad_to_size;
108791b2db6fSChris Wilson 	};
1088718dceddSDavid Howells 	__u64 rsvd2;
1089718dceddSDavid Howells };
1090718dceddSDavid Howells 
1091cf6e7bacSJason Ekstrand struct drm_i915_gem_exec_fence {
1092cf6e7bacSJason Ekstrand 	/**
1093cf6e7bacSJason Ekstrand 	 * User's handle for a drm_syncobj to wait on or signal.
1094cf6e7bacSJason Ekstrand 	 */
1095cf6e7bacSJason Ekstrand 	__u32 handle;
1096cf6e7bacSJason Ekstrand 
1097cf6e7bacSJason Ekstrand #define I915_EXEC_FENCE_WAIT            (1<<0)
1098cf6e7bacSJason Ekstrand #define I915_EXEC_FENCE_SIGNAL          (1<<1)
1099ebcaa1ffSTvrtko Ursulin #define __I915_EXEC_FENCE_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SIGNAL << 1))
1100cf6e7bacSJason Ekstrand 	__u32 flags;
1101cf6e7bacSJason Ekstrand };
1102cf6e7bacSJason Ekstrand 
11032ef6a01fSMatthew Auld /*
110413149e8bSLionel Landwerlin  * See drm_i915_gem_execbuffer_ext_timeline_fences.
110513149e8bSLionel Landwerlin  */
110613149e8bSLionel Landwerlin #define DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES 0
110713149e8bSLionel Landwerlin 
11082ef6a01fSMatthew Auld /*
110913149e8bSLionel Landwerlin  * This structure describes an array of drm_syncobj and associated points for
111013149e8bSLionel Landwerlin  * timeline variants of drm_syncobj. It is invalid to append this structure to
111113149e8bSLionel Landwerlin  * the execbuf if I915_EXEC_FENCE_ARRAY is set.
111213149e8bSLionel Landwerlin  */
111313149e8bSLionel Landwerlin struct drm_i915_gem_execbuffer_ext_timeline_fences {
111413149e8bSLionel Landwerlin 	struct i915_user_extension base;
111513149e8bSLionel Landwerlin 
111613149e8bSLionel Landwerlin 	/**
111713149e8bSLionel Landwerlin 	 * Number of element in the handles_ptr & value_ptr arrays.
111813149e8bSLionel Landwerlin 	 */
111913149e8bSLionel Landwerlin 	__u64 fence_count;
112013149e8bSLionel Landwerlin 
112113149e8bSLionel Landwerlin 	/**
112213149e8bSLionel Landwerlin 	 * Pointer to an array of struct drm_i915_gem_exec_fence of length
112313149e8bSLionel Landwerlin 	 * fence_count.
112413149e8bSLionel Landwerlin 	 */
112513149e8bSLionel Landwerlin 	__u64 handles_ptr;
112613149e8bSLionel Landwerlin 
112713149e8bSLionel Landwerlin 	/**
112813149e8bSLionel Landwerlin 	 * Pointer to an array of u64 values of length fence_count. Values
112913149e8bSLionel Landwerlin 	 * must be 0 for a binary drm_syncobj. A Value of 0 for a timeline
113013149e8bSLionel Landwerlin 	 * drm_syncobj is invalid as it turns a drm_syncobj into a binary one.
113113149e8bSLionel Landwerlin 	 */
113213149e8bSLionel Landwerlin 	__u64 values_ptr;
1133cda9edd0SLionel Landwerlin };
1134cda9edd0SLionel Landwerlin 
1135718dceddSDavid Howells struct drm_i915_gem_execbuffer2 {
1136718dceddSDavid Howells 	/**
1137718dceddSDavid Howells 	 * List of gem_exec_object2 structs
1138718dceddSDavid Howells 	 */
1139718dceddSDavid Howells 	__u64 buffers_ptr;
1140718dceddSDavid Howells 	__u32 buffer_count;
1141718dceddSDavid Howells 
1142718dceddSDavid Howells 	/** Offset in the batchbuffer to start execution from. */
1143718dceddSDavid Howells 	__u32 batch_start_offset;
1144718dceddSDavid Howells 	/** Bytes used in batchbuffer from batch_start_offset */
1145718dceddSDavid Howells 	__u32 batch_len;
1146718dceddSDavid Howells 	__u32 DR1;
1147718dceddSDavid Howells 	__u32 DR4;
1148718dceddSDavid Howells 	__u32 num_cliprects;
1149cf6e7bacSJason Ekstrand 	/**
1150cf6e7bacSJason Ekstrand 	 * This is a struct drm_clip_rect *cliprects if I915_EXEC_FENCE_ARRAY
1151cda9edd0SLionel Landwerlin 	 * & I915_EXEC_USE_EXTENSIONS are not set.
1152cda9edd0SLionel Landwerlin 	 *
1153cda9edd0SLionel Landwerlin 	 * If I915_EXEC_FENCE_ARRAY is set, then this is a pointer to an array
1154cda9edd0SLionel Landwerlin 	 * of struct drm_i915_gem_exec_fence and num_cliprects is the length
1155cda9edd0SLionel Landwerlin 	 * of the array.
1156cda9edd0SLionel Landwerlin 	 *
1157cda9edd0SLionel Landwerlin 	 * If I915_EXEC_USE_EXTENSIONS is set, then this is a pointer to a
1158cda9edd0SLionel Landwerlin 	 * single struct i915_user_extension and num_cliprects is 0.
1159cf6e7bacSJason Ekstrand 	 */
1160718dceddSDavid Howells 	__u64 cliprects_ptr;
1161d90c06d5SChris Wilson #define I915_EXEC_RING_MASK              (0x3f)
1162718dceddSDavid Howells #define I915_EXEC_DEFAULT                (0<<0)
1163718dceddSDavid Howells #define I915_EXEC_RENDER                 (1<<0)
1164718dceddSDavid Howells #define I915_EXEC_BSD                    (2<<0)
1165718dceddSDavid Howells #define I915_EXEC_BLT                    (3<<0)
116682f91b6eSXiang, Haihao #define I915_EXEC_VEBOX                  (4<<0)
1167718dceddSDavid Howells 
1168718dceddSDavid Howells /* Used for switching the constants addressing mode on gen4+ RENDER ring.
1169718dceddSDavid Howells  * Gen6+ only supports relative addressing to dynamic state (default) and
1170718dceddSDavid Howells  * absolute addressing.
1171718dceddSDavid Howells  *
1172718dceddSDavid Howells  * These flags are ignored for the BSD and BLT rings.
1173718dceddSDavid Howells  */
1174718dceddSDavid Howells #define I915_EXEC_CONSTANTS_MASK 	(3<<6)
1175718dceddSDavid Howells #define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */
1176718dceddSDavid Howells #define I915_EXEC_CONSTANTS_ABSOLUTE 	(1<<6)
1177718dceddSDavid Howells #define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
1178718dceddSDavid Howells 	__u64 flags;
1179718dceddSDavid Howells 	__u64 rsvd1; /* now used for context info */
1180718dceddSDavid Howells 	__u64 rsvd2;
1181718dceddSDavid Howells };
1182718dceddSDavid Howells 
1183718dceddSDavid Howells /** Resets the SO write offset registers for transform feedback on gen7. */
1184718dceddSDavid Howells #define I915_EXEC_GEN7_SOL_RESET	(1<<8)
1185718dceddSDavid Howells 
1186c2fb7916SDaniel Vetter /** Request a privileged ("secure") batch buffer. Note only available for
1187c2fb7916SDaniel Vetter  * DRM_ROOT_ONLY | DRM_MASTER processes.
1188c2fb7916SDaniel Vetter  */
1189c2fb7916SDaniel Vetter #define I915_EXEC_SECURE		(1<<9)
1190c2fb7916SDaniel Vetter 
1191b45305fcSDaniel Vetter /** Inform the kernel that the batch is and will always be pinned. This
1192b45305fcSDaniel Vetter  * negates the requirement for a workaround to be performed to avoid
1193b45305fcSDaniel Vetter  * an incoherent CS (such as can be found on 830/845). If this flag is
1194b45305fcSDaniel Vetter  * not passed, the kernel will endeavour to make sure the batch is
1195b45305fcSDaniel Vetter  * coherent with the CS before execution. If this flag is passed,
1196b45305fcSDaniel Vetter  * userspace assumes the responsibility for ensuring the same.
1197b45305fcSDaniel Vetter  */
1198b45305fcSDaniel Vetter #define I915_EXEC_IS_PINNED		(1<<10)
1199b45305fcSDaniel Vetter 
1200c3d19d3cSGeert Uytterhoeven /** Provide a hint to the kernel that the command stream and auxiliary
1201ed5982e6SDaniel Vetter  * state buffers already holds the correct presumed addresses and so the
1202ed5982e6SDaniel Vetter  * relocation process may be skipped if no buffers need to be moved in
1203ed5982e6SDaniel Vetter  * preparation for the execbuffer.
1204ed5982e6SDaniel Vetter  */
1205ed5982e6SDaniel Vetter #define I915_EXEC_NO_RELOC		(1<<11)
1206ed5982e6SDaniel Vetter 
1207eef90ccbSChris Wilson /** Use the reloc.handle as an index into the exec object array rather
1208eef90ccbSChris Wilson  * than as the per-file handle.
1209eef90ccbSChris Wilson  */
1210eef90ccbSChris Wilson #define I915_EXEC_HANDLE_LUT		(1<<12)
1211eef90ccbSChris Wilson 
12128d360dffSZhipeng Gong /** Used for switching BSD rings on the platforms with two BSD rings */
1213d9da6aa0STvrtko Ursulin #define I915_EXEC_BSD_SHIFT	 (13)
1214d9da6aa0STvrtko Ursulin #define I915_EXEC_BSD_MASK	 (3 << I915_EXEC_BSD_SHIFT)
1215d9da6aa0STvrtko Ursulin /* default ping-pong mode */
1216d9da6aa0STvrtko Ursulin #define I915_EXEC_BSD_DEFAULT	 (0 << I915_EXEC_BSD_SHIFT)
1217d9da6aa0STvrtko Ursulin #define I915_EXEC_BSD_RING1	 (1 << I915_EXEC_BSD_SHIFT)
1218d9da6aa0STvrtko Ursulin #define I915_EXEC_BSD_RING2	 (2 << I915_EXEC_BSD_SHIFT)
12198d360dffSZhipeng Gong 
1220a9ed33caSAbdiel Janulgue /** Tell the kernel that the batchbuffer is processed by
1221a9ed33caSAbdiel Janulgue  *  the resource streamer.
1222a9ed33caSAbdiel Janulgue  */
1223a9ed33caSAbdiel Janulgue #define I915_EXEC_RESOURCE_STREAMER     (1<<15)
1224a9ed33caSAbdiel Janulgue 
1225fec0445cSChris Wilson /* Setting I915_EXEC_FENCE_IN implies that lower_32_bits(rsvd2) represent
1226fec0445cSChris Wilson  * a sync_file fd to wait upon (in a nonblocking manner) prior to executing
1227fec0445cSChris Wilson  * the batch.
1228fec0445cSChris Wilson  *
1229fec0445cSChris Wilson  * Returns -EINVAL if the sync_file fd cannot be found.
1230fec0445cSChris Wilson  */
1231fec0445cSChris Wilson #define I915_EXEC_FENCE_IN		(1<<16)
1232fec0445cSChris Wilson 
1233fec0445cSChris Wilson /* Setting I915_EXEC_FENCE_OUT causes the ioctl to return a sync_file fd
1234fec0445cSChris Wilson  * in the upper_32_bits(rsvd2) upon success. Ownership of the fd is given
1235fec0445cSChris Wilson  * to the caller, and it should be close() after use. (The fd is a regular
1236fec0445cSChris Wilson  * file descriptor and will be cleaned up on process termination. It holds
1237fec0445cSChris Wilson  * a reference to the request, but nothing else.)
1238fec0445cSChris Wilson  *
1239fec0445cSChris Wilson  * The sync_file fd can be combined with other sync_file and passed either
1240fec0445cSChris Wilson  * to execbuf using I915_EXEC_FENCE_IN, to atomic KMS ioctls (so that a flip
1241fec0445cSChris Wilson  * will only occur after this request completes), or to other devices.
1242fec0445cSChris Wilson  *
1243fec0445cSChris Wilson  * Using I915_EXEC_FENCE_OUT requires use of
1244fec0445cSChris Wilson  * DRM_IOCTL_I915_GEM_EXECBUFFER2_WR ioctl so that the result is written
1245fec0445cSChris Wilson  * back to userspace. Failure to do so will cause the out-fence to always
1246fec0445cSChris Wilson  * be reported as zero, and the real fence fd to be leaked.
1247fec0445cSChris Wilson  */
1248fec0445cSChris Wilson #define I915_EXEC_FENCE_OUT		(1<<17)
1249fec0445cSChris Wilson 
12501a71cf2fSChris Wilson /*
12511a71cf2fSChris Wilson  * Traditionally the execbuf ioctl has only considered the final element in
12521a71cf2fSChris Wilson  * the execobject[] to be the executable batch. Often though, the client
12531a71cf2fSChris Wilson  * will known the batch object prior to construction and being able to place
12541a71cf2fSChris Wilson  * it into the execobject[] array first can simplify the relocation tracking.
12551a71cf2fSChris Wilson  * Setting I915_EXEC_BATCH_FIRST tells execbuf to use element 0 of the
12561a71cf2fSChris Wilson  * execobject[] as the * batch instead (the default is to use the last
12571a71cf2fSChris Wilson  * element).
12581a71cf2fSChris Wilson  */
12591a71cf2fSChris Wilson #define I915_EXEC_BATCH_FIRST		(1<<18)
1260cf6e7bacSJason Ekstrand 
1261cf6e7bacSJason Ekstrand /* Setting I915_FENCE_ARRAY implies that num_cliprects and cliprects_ptr
1262cf6e7bacSJason Ekstrand  * define an array of i915_gem_exec_fence structures which specify a set of
1263cf6e7bacSJason Ekstrand  * dma fences to wait upon or signal.
1264cf6e7bacSJason Ekstrand  */
1265cf6e7bacSJason Ekstrand #define I915_EXEC_FENCE_ARRAY   (1<<19)
1266cf6e7bacSJason Ekstrand 
1267a88b6e4cSChris Wilson /*
1268a88b6e4cSChris Wilson  * Setting I915_EXEC_FENCE_SUBMIT implies that lower_32_bits(rsvd2) represent
1269a88b6e4cSChris Wilson  * a sync_file fd to wait upon (in a nonblocking manner) prior to executing
1270a88b6e4cSChris Wilson  * the batch.
1271a88b6e4cSChris Wilson  *
1272a88b6e4cSChris Wilson  * Returns -EINVAL if the sync_file fd cannot be found.
1273a88b6e4cSChris Wilson  */
1274a88b6e4cSChris Wilson #define I915_EXEC_FENCE_SUBMIT		(1 << 20)
1275a88b6e4cSChris Wilson 
1276cda9edd0SLionel Landwerlin /*
1277cda9edd0SLionel Landwerlin  * Setting I915_EXEC_USE_EXTENSIONS implies that
1278cda9edd0SLionel Landwerlin  * drm_i915_gem_execbuffer2.cliprects_ptr is treated as a pointer to an linked
1279cda9edd0SLionel Landwerlin  * list of i915_user_extension. Each i915_user_extension node is the base of a
1280cda9edd0SLionel Landwerlin  * larger structure. The list of supported structures are listed in the
1281cda9edd0SLionel Landwerlin  * drm_i915_gem_execbuffer_ext enum.
1282cda9edd0SLionel Landwerlin  */
1283cda9edd0SLionel Landwerlin #define I915_EXEC_USE_EXTENSIONS	(1 << 21)
1284cda9edd0SLionel Landwerlin 
1285cda9edd0SLionel Landwerlin #define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_USE_EXTENSIONS << 1))
1286ed5982e6SDaniel Vetter 
1287718dceddSDavid Howells #define I915_EXEC_CONTEXT_ID_MASK	(0xffffffff)
1288718dceddSDavid Howells #define i915_execbuffer2_set_context_id(eb2, context) \
1289718dceddSDavid Howells 	(eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK
1290718dceddSDavid Howells #define i915_execbuffer2_get_context_id(eb2) \
1291718dceddSDavid Howells 	((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK)
1292718dceddSDavid Howells 
1293718dceddSDavid Howells struct drm_i915_gem_pin {
1294718dceddSDavid Howells 	/** Handle of the buffer to be pinned. */
1295718dceddSDavid Howells 	__u32 handle;
1296718dceddSDavid Howells 	__u32 pad;
1297718dceddSDavid Howells 
1298718dceddSDavid Howells 	/** alignment required within the aperture */
1299718dceddSDavid Howells 	__u64 alignment;
1300718dceddSDavid Howells 
1301718dceddSDavid Howells 	/** Returned GTT offset of the buffer. */
1302718dceddSDavid Howells 	__u64 offset;
1303718dceddSDavid Howells };
1304718dceddSDavid Howells 
1305718dceddSDavid Howells struct drm_i915_gem_unpin {
1306718dceddSDavid Howells 	/** Handle of the buffer to be unpinned. */
1307718dceddSDavid Howells 	__u32 handle;
1308718dceddSDavid Howells 	__u32 pad;
1309718dceddSDavid Howells };
1310718dceddSDavid Howells 
1311718dceddSDavid Howells struct drm_i915_gem_busy {
1312718dceddSDavid Howells 	/** Handle of the buffer to check for busy */
1313718dceddSDavid Howells 	__u32 handle;
1314718dceddSDavid Howells 
1315426960beSChris Wilson 	/** Return busy status
1316426960beSChris Wilson 	 *
1317426960beSChris Wilson 	 * A return of 0 implies that the object is idle (after
1318426960beSChris Wilson 	 * having flushed any pending activity), and a non-zero return that
1319426960beSChris Wilson 	 * the object is still in-flight on the GPU. (The GPU has not yet
1320426960beSChris Wilson 	 * signaled completion for all pending requests that reference the
13211255501dSChris Wilson 	 * object.) An object is guaranteed to become idle eventually (so
13221255501dSChris Wilson 	 * long as no new GPU commands are executed upon it). Due to the
13231255501dSChris Wilson 	 * asynchronous nature of the hardware, an object reported
13241255501dSChris Wilson 	 * as busy may become idle before the ioctl is completed.
13251255501dSChris Wilson 	 *
13261255501dSChris Wilson 	 * Furthermore, if the object is busy, which engine is busy is only
1327c8b50242SChris Wilson 	 * provided as a guide and only indirectly by reporting its class
1328c8b50242SChris Wilson 	 * (there may be more than one engine in each class). There are race
1329c8b50242SChris Wilson 	 * conditions which prevent the report of which engines are busy from
1330c8b50242SChris Wilson 	 * being always accurate.  However, the converse is not true. If the
1331c8b50242SChris Wilson 	 * object is idle, the result of the ioctl, that all engines are idle,
1332c8b50242SChris Wilson 	 * is accurate.
1333426960beSChris Wilson 	 *
1334426960beSChris Wilson 	 * The returned dword is split into two fields to indicate both
1335c8b50242SChris Wilson 	 * the engine classess on which the object is being read, and the
1336c8b50242SChris Wilson 	 * engine class on which it is currently being written (if any).
1337426960beSChris Wilson 	 *
1338426960beSChris Wilson 	 * The low word (bits 0:15) indicate if the object is being written
1339426960beSChris Wilson 	 * to by any engine (there can only be one, as the GEM implicit
1340426960beSChris Wilson 	 * synchronisation rules force writes to be serialised). Only the
1341c8b50242SChris Wilson 	 * engine class (offset by 1, I915_ENGINE_CLASS_RENDER is reported as
1342c8b50242SChris Wilson 	 * 1 not 0 etc) for the last write is reported.
1343426960beSChris Wilson 	 *
1344c8b50242SChris Wilson 	 * The high word (bits 16:31) are a bitmask of which engines classes
1345c8b50242SChris Wilson 	 * are currently reading from the object. Multiple engines may be
1346426960beSChris Wilson 	 * reading from the object simultaneously.
1347426960beSChris Wilson 	 *
1348c8b50242SChris Wilson 	 * The value of each engine class is the same as specified in the
1349c8b50242SChris Wilson 	 * I915_CONTEXT_SET_ENGINES parameter and via perf, i.e.
1350c8b50242SChris Wilson 	 * I915_ENGINE_CLASS_RENDER, I915_ENGINE_CLASS_COPY, etc.
1351426960beSChris Wilson 	 * reported as active itself. Some hardware may have parallel
1352426960beSChris Wilson 	 * execution engines, e.g. multiple media engines, which are
1353c8b50242SChris Wilson 	 * mapped to the same class identifier and so are not separately
1354c8b50242SChris Wilson 	 * reported for busyness.
13551255501dSChris Wilson 	 *
13561255501dSChris Wilson 	 * Caveat emptor:
13571255501dSChris Wilson 	 * Only the boolean result of this query is reliable; that is whether
13581255501dSChris Wilson 	 * the object is idle or busy. The report of which engines are busy
13591255501dSChris Wilson 	 * should be only used as a heuristic.
1360718dceddSDavid Howells 	 */
1361718dceddSDavid Howells 	__u32 busy;
1362718dceddSDavid Howells };
1363718dceddSDavid Howells 
136435c7ab42SDaniel Vetter /**
136535c7ab42SDaniel Vetter  * I915_CACHING_NONE
136635c7ab42SDaniel Vetter  *
136735c7ab42SDaniel Vetter  * GPU access is not coherent with cpu caches. Default for machines without an
136835c7ab42SDaniel Vetter  * LLC.
136935c7ab42SDaniel Vetter  */
1370718dceddSDavid Howells #define I915_CACHING_NONE		0
137135c7ab42SDaniel Vetter /**
137235c7ab42SDaniel Vetter  * I915_CACHING_CACHED
137335c7ab42SDaniel Vetter  *
137435c7ab42SDaniel Vetter  * GPU access is coherent with cpu caches and furthermore the data is cached in
137535c7ab42SDaniel Vetter  * last-level caches shared between cpu cores and the gpu GT. Default on
137635c7ab42SDaniel Vetter  * machines with HAS_LLC.
137735c7ab42SDaniel Vetter  */
1378718dceddSDavid Howells #define I915_CACHING_CACHED		1
137935c7ab42SDaniel Vetter /**
138035c7ab42SDaniel Vetter  * I915_CACHING_DISPLAY
138135c7ab42SDaniel Vetter  *
138235c7ab42SDaniel Vetter  * Special GPU caching mode which is coherent with the scanout engines.
138335c7ab42SDaniel Vetter  * Transparently falls back to I915_CACHING_NONE on platforms where no special
138435c7ab42SDaniel Vetter  * cache mode (like write-through or gfdt flushing) is available. The kernel
138535c7ab42SDaniel Vetter  * automatically sets this mode when using a buffer as a scanout target.
138635c7ab42SDaniel Vetter  * Userspace can manually set this mode to avoid a costly stall and clflush in
138735c7ab42SDaniel Vetter  * the hotpath of drawing the first frame.
138835c7ab42SDaniel Vetter  */
138935c7ab42SDaniel Vetter #define I915_CACHING_DISPLAY		2
1390718dceddSDavid Howells 
1391718dceddSDavid Howells struct drm_i915_gem_caching {
1392718dceddSDavid Howells 	/**
1393718dceddSDavid Howells 	 * Handle of the buffer to set/get the caching level of. */
1394718dceddSDavid Howells 	__u32 handle;
1395718dceddSDavid Howells 
1396718dceddSDavid Howells 	/**
1397718dceddSDavid Howells 	 * Cacheing level to apply or return value
1398718dceddSDavid Howells 	 *
1399718dceddSDavid Howells 	 * bits0-15 are for generic caching control (i.e. the above defined
1400718dceddSDavid Howells 	 * values). bits16-31 are reserved for platform-specific variations
1401718dceddSDavid Howells 	 * (e.g. l3$ caching on gen7). */
1402718dceddSDavid Howells 	__u32 caching;
1403718dceddSDavid Howells };
1404718dceddSDavid Howells 
1405718dceddSDavid Howells #define I915_TILING_NONE	0
1406718dceddSDavid Howells #define I915_TILING_X		1
1407718dceddSDavid Howells #define I915_TILING_Y		2
1408deeb1519SChris Wilson #define I915_TILING_LAST	I915_TILING_Y
1409718dceddSDavid Howells 
1410718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_NONE		0
1411718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_9		1
1412718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_9_10		2
1413718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_9_11		3
1414718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_9_10_11	4
1415718dceddSDavid Howells /* Not seen by userland */
1416718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_UNKNOWN	5
1417718dceddSDavid Howells /* Seen by userland. */
1418718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_9_17		6
1419718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_9_10_17	7
1420718dceddSDavid Howells 
1421718dceddSDavid Howells struct drm_i915_gem_set_tiling {
1422718dceddSDavid Howells 	/** Handle of the buffer to have its tiling state updated */
1423718dceddSDavid Howells 	__u32 handle;
1424718dceddSDavid Howells 
1425718dceddSDavid Howells 	/**
1426718dceddSDavid Howells 	 * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
1427718dceddSDavid Howells 	 * I915_TILING_Y).
1428718dceddSDavid Howells 	 *
1429718dceddSDavid Howells 	 * This value is to be set on request, and will be updated by the
1430718dceddSDavid Howells 	 * kernel on successful return with the actual chosen tiling layout.
1431718dceddSDavid Howells 	 *
1432718dceddSDavid Howells 	 * The tiling mode may be demoted to I915_TILING_NONE when the system
1433718dceddSDavid Howells 	 * has bit 6 swizzling that can't be managed correctly by GEM.
1434718dceddSDavid Howells 	 *
1435718dceddSDavid Howells 	 * Buffer contents become undefined when changing tiling_mode.
1436718dceddSDavid Howells 	 */
1437718dceddSDavid Howells 	__u32 tiling_mode;
1438718dceddSDavid Howells 
1439718dceddSDavid Howells 	/**
1440718dceddSDavid Howells 	 * Stride in bytes for the object when in I915_TILING_X or
1441718dceddSDavid Howells 	 * I915_TILING_Y.
1442718dceddSDavid Howells 	 */
1443718dceddSDavid Howells 	__u32 stride;
1444718dceddSDavid Howells 
1445718dceddSDavid Howells 	/**
1446718dceddSDavid Howells 	 * Returned address bit 6 swizzling required for CPU access through
1447718dceddSDavid Howells 	 * mmap mapping.
1448718dceddSDavid Howells 	 */
1449718dceddSDavid Howells 	__u32 swizzle_mode;
1450718dceddSDavid Howells };
1451718dceddSDavid Howells 
1452718dceddSDavid Howells struct drm_i915_gem_get_tiling {
1453718dceddSDavid Howells 	/** Handle of the buffer to get tiling state for. */
1454718dceddSDavid Howells 	__u32 handle;
1455718dceddSDavid Howells 
1456718dceddSDavid Howells 	/**
1457718dceddSDavid Howells 	 * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
1458718dceddSDavid Howells 	 * I915_TILING_Y).
1459718dceddSDavid Howells 	 */
1460718dceddSDavid Howells 	__u32 tiling_mode;
1461718dceddSDavid Howells 
1462718dceddSDavid Howells 	/**
1463718dceddSDavid Howells 	 * Returned address bit 6 swizzling required for CPU access through
1464718dceddSDavid Howells 	 * mmap mapping.
1465718dceddSDavid Howells 	 */
1466718dceddSDavid Howells 	__u32 swizzle_mode;
146770f2f5c7SChris Wilson 
146870f2f5c7SChris Wilson 	/**
146970f2f5c7SChris Wilson 	 * Returned address bit 6 swizzling required for CPU access through
147070f2f5c7SChris Wilson 	 * mmap mapping whilst bound.
147170f2f5c7SChris Wilson 	 */
147270f2f5c7SChris Wilson 	__u32 phys_swizzle_mode;
1473718dceddSDavid Howells };
1474718dceddSDavid Howells 
1475718dceddSDavid Howells struct drm_i915_gem_get_aperture {
1476718dceddSDavid Howells 	/** Total size of the aperture used by i915_gem_execbuffer, in bytes */
1477718dceddSDavid Howells 	__u64 aper_size;
1478718dceddSDavid Howells 
1479718dceddSDavid Howells 	/**
1480718dceddSDavid Howells 	 * Available space in the aperture used by i915_gem_execbuffer, in
1481718dceddSDavid Howells 	 * bytes
1482718dceddSDavid Howells 	 */
1483718dceddSDavid Howells 	__u64 aper_available_size;
1484718dceddSDavid Howells };
1485718dceddSDavid Howells 
1486718dceddSDavid Howells struct drm_i915_get_pipe_from_crtc_id {
1487718dceddSDavid Howells 	/** ID of CRTC being requested **/
1488718dceddSDavid Howells 	__u32 crtc_id;
1489718dceddSDavid Howells 
1490718dceddSDavid Howells 	/** pipe of requested CRTC **/
1491718dceddSDavid Howells 	__u32 pipe;
1492718dceddSDavid Howells };
1493718dceddSDavid Howells 
1494718dceddSDavid Howells #define I915_MADV_WILLNEED 0
1495718dceddSDavid Howells #define I915_MADV_DONTNEED 1
1496718dceddSDavid Howells #define __I915_MADV_PURGED 2 /* internal state */
1497718dceddSDavid Howells 
1498718dceddSDavid Howells struct drm_i915_gem_madvise {
1499718dceddSDavid Howells 	/** Handle of the buffer to change the backing store advice */
1500718dceddSDavid Howells 	__u32 handle;
1501718dceddSDavid Howells 
1502718dceddSDavid Howells 	/* Advice: either the buffer will be needed again in the near future,
1503718dceddSDavid Howells 	 *         or wont be and could be discarded under memory pressure.
1504718dceddSDavid Howells 	 */
1505718dceddSDavid Howells 	__u32 madv;
1506718dceddSDavid Howells 
1507718dceddSDavid Howells 	/** Whether the backing store still exists. */
1508718dceddSDavid Howells 	__u32 retained;
1509718dceddSDavid Howells };
1510718dceddSDavid Howells 
1511718dceddSDavid Howells /* flags */
1512718dceddSDavid Howells #define I915_OVERLAY_TYPE_MASK 		0xff
1513718dceddSDavid Howells #define I915_OVERLAY_YUV_PLANAR 	0x01
1514718dceddSDavid Howells #define I915_OVERLAY_YUV_PACKED 	0x02
1515718dceddSDavid Howells #define I915_OVERLAY_RGB		0x03
1516718dceddSDavid Howells 
1517718dceddSDavid Howells #define I915_OVERLAY_DEPTH_MASK		0xff00
1518718dceddSDavid Howells #define I915_OVERLAY_RGB24		0x1000
1519718dceddSDavid Howells #define I915_OVERLAY_RGB16		0x2000
1520718dceddSDavid Howells #define I915_OVERLAY_RGB15		0x3000
1521718dceddSDavid Howells #define I915_OVERLAY_YUV422		0x0100
1522718dceddSDavid Howells #define I915_OVERLAY_YUV411		0x0200
1523718dceddSDavid Howells #define I915_OVERLAY_YUV420		0x0300
1524718dceddSDavid Howells #define I915_OVERLAY_YUV410		0x0400
1525718dceddSDavid Howells 
1526718dceddSDavid Howells #define I915_OVERLAY_SWAP_MASK		0xff0000
1527718dceddSDavid Howells #define I915_OVERLAY_NO_SWAP		0x000000
1528718dceddSDavid Howells #define I915_OVERLAY_UV_SWAP		0x010000
1529718dceddSDavid Howells #define I915_OVERLAY_Y_SWAP		0x020000
1530718dceddSDavid Howells #define I915_OVERLAY_Y_AND_UV_SWAP	0x030000
1531718dceddSDavid Howells 
1532718dceddSDavid Howells #define I915_OVERLAY_FLAGS_MASK		0xff000000
1533718dceddSDavid Howells #define I915_OVERLAY_ENABLE		0x01000000
1534718dceddSDavid Howells 
1535718dceddSDavid Howells struct drm_intel_overlay_put_image {
1536718dceddSDavid Howells 	/* various flags and src format description */
1537718dceddSDavid Howells 	__u32 flags;
1538718dceddSDavid Howells 	/* source picture description */
1539718dceddSDavid Howells 	__u32 bo_handle;
1540718dceddSDavid Howells 	/* stride values and offsets are in bytes, buffer relative */
1541718dceddSDavid Howells 	__u16 stride_Y; /* stride for packed formats */
1542718dceddSDavid Howells 	__u16 stride_UV;
1543718dceddSDavid Howells 	__u32 offset_Y; /* offset for packet formats */
1544718dceddSDavid Howells 	__u32 offset_U;
1545718dceddSDavid Howells 	__u32 offset_V;
1546718dceddSDavid Howells 	/* in pixels */
1547718dceddSDavid Howells 	__u16 src_width;
1548718dceddSDavid Howells 	__u16 src_height;
1549718dceddSDavid Howells 	/* to compensate the scaling factors for partially covered surfaces */
1550718dceddSDavid Howells 	__u16 src_scan_width;
1551718dceddSDavid Howells 	__u16 src_scan_height;
1552718dceddSDavid Howells 	/* output crtc description */
1553718dceddSDavid Howells 	__u32 crtc_id;
1554718dceddSDavid Howells 	__u16 dst_x;
1555718dceddSDavid Howells 	__u16 dst_y;
1556718dceddSDavid Howells 	__u16 dst_width;
1557718dceddSDavid Howells 	__u16 dst_height;
1558718dceddSDavid Howells };
1559718dceddSDavid Howells 
1560718dceddSDavid Howells /* flags */
1561718dceddSDavid Howells #define I915_OVERLAY_UPDATE_ATTRS	(1<<0)
1562718dceddSDavid Howells #define I915_OVERLAY_UPDATE_GAMMA	(1<<1)
1563ea9da4e4SChris Wilson #define I915_OVERLAY_DISABLE_DEST_COLORKEY	(1<<2)
1564718dceddSDavid Howells struct drm_intel_overlay_attrs {
1565718dceddSDavid Howells 	__u32 flags;
1566718dceddSDavid Howells 	__u32 color_key;
1567718dceddSDavid Howells 	__s32 brightness;
1568718dceddSDavid Howells 	__u32 contrast;
1569718dceddSDavid Howells 	__u32 saturation;
1570718dceddSDavid Howells 	__u32 gamma0;
1571718dceddSDavid Howells 	__u32 gamma1;
1572718dceddSDavid Howells 	__u32 gamma2;
1573718dceddSDavid Howells 	__u32 gamma3;
1574718dceddSDavid Howells 	__u32 gamma4;
1575718dceddSDavid Howells 	__u32 gamma5;
1576718dceddSDavid Howells };
1577718dceddSDavid Howells 
1578718dceddSDavid Howells /*
1579718dceddSDavid Howells  * Intel sprite handling
1580718dceddSDavid Howells  *
1581718dceddSDavid Howells  * Color keying works with a min/mask/max tuple.  Both source and destination
1582718dceddSDavid Howells  * color keying is allowed.
1583718dceddSDavid Howells  *
1584718dceddSDavid Howells  * Source keying:
1585718dceddSDavid Howells  * Sprite pixels within the min & max values, masked against the color channels
1586718dceddSDavid Howells  * specified in the mask field, will be transparent.  All other pixels will
1587718dceddSDavid Howells  * be displayed on top of the primary plane.  For RGB surfaces, only the min
1588718dceddSDavid Howells  * and mask fields will be used; ranged compares are not allowed.
1589718dceddSDavid Howells  *
1590718dceddSDavid Howells  * Destination keying:
1591718dceddSDavid Howells  * Primary plane pixels that match the min value, masked against the color
1592718dceddSDavid Howells  * channels specified in the mask field, will be replaced by corresponding
1593718dceddSDavid Howells  * pixels from the sprite plane.
1594718dceddSDavid Howells  *
1595718dceddSDavid Howells  * Note that source & destination keying are exclusive; only one can be
1596718dceddSDavid Howells  * active on a given plane.
1597718dceddSDavid Howells  */
1598718dceddSDavid Howells 
15996ec5bd34SVille Syrjälä #define I915_SET_COLORKEY_NONE		(1<<0) /* Deprecated. Instead set
16006ec5bd34SVille Syrjälä 						* flags==0 to disable colorkeying.
16016ec5bd34SVille Syrjälä 						*/
1602718dceddSDavid Howells #define I915_SET_COLORKEY_DESTINATION	(1<<1)
1603718dceddSDavid Howells #define I915_SET_COLORKEY_SOURCE	(1<<2)
1604718dceddSDavid Howells struct drm_intel_sprite_colorkey {
1605718dceddSDavid Howells 	__u32 plane_id;
1606718dceddSDavid Howells 	__u32 min_value;
1607718dceddSDavid Howells 	__u32 channel_mask;
1608718dceddSDavid Howells 	__u32 max_value;
1609718dceddSDavid Howells 	__u32 flags;
1610718dceddSDavid Howells };
1611718dceddSDavid Howells 
1612718dceddSDavid Howells struct drm_i915_gem_wait {
1613718dceddSDavid Howells 	/** Handle of BO we shall wait on */
1614718dceddSDavid Howells 	__u32 bo_handle;
1615718dceddSDavid Howells 	__u32 flags;
1616718dceddSDavid Howells 	/** Number of nanoseconds to wait, Returns time remaining. */
1617718dceddSDavid Howells 	__s64 timeout_ns;
1618718dceddSDavid Howells };
1619718dceddSDavid Howells 
1620718dceddSDavid Howells struct drm_i915_gem_context_create {
1621b9171541SChris Wilson 	__u32 ctx_id; /* output: id of new context*/
1622718dceddSDavid Howells 	__u32 pad;
1623718dceddSDavid Howells };
1624718dceddSDavid Howells 
1625b9171541SChris Wilson struct drm_i915_gem_context_create_ext {
1626b9171541SChris Wilson 	__u32 ctx_id; /* output: id of new context*/
1627b9171541SChris Wilson 	__u32 flags;
1628b9171541SChris Wilson #define I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS	(1u << 0)
16298319f44cSChris Wilson #define I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE	(1u << 1)
1630b9171541SChris Wilson #define I915_CONTEXT_CREATE_FLAGS_UNKNOWN \
16318319f44cSChris Wilson 	(-(I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE << 1))
1632e0695db7SChris Wilson 	__u64 extensions;
16335cc9ed4bSChris Wilson };
16345cc9ed4bSChris Wilson 
1635c9dc0f35SChris Wilson struct drm_i915_gem_context_param {
1636c9dc0f35SChris Wilson 	__u32 ctx_id;
1637c9dc0f35SChris Wilson 	__u32 size;
1638c9dc0f35SChris Wilson 	__u64 param;
1639c9dc0f35SChris Wilson #define I915_CONTEXT_PARAM_BAN_PERIOD	0x1
1640b1b38278SDavid Weinehall #define I915_CONTEXT_PARAM_NO_ZEROMAP	0x2
1641fa8848f2SChris Wilson #define I915_CONTEXT_PARAM_GTT_SIZE	0x3
1642bc3d6744SChris Wilson #define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE	0x4
164384102171SMika Kuoppala #define I915_CONTEXT_PARAM_BANNABLE	0x5
1644ac14fbd4SChris Wilson #define I915_CONTEXT_PARAM_PRIORITY	0x6
1645ac14fbd4SChris Wilson #define   I915_CONTEXT_MAX_USER_PRIORITY	1023 /* inclusive */
1646ac14fbd4SChris Wilson #define   I915_CONTEXT_DEFAULT_PRIORITY		0
1647ac14fbd4SChris Wilson #define   I915_CONTEXT_MIN_USER_PRIORITY	-1023 /* inclusive */
1648e46c2e99STvrtko Ursulin 	/*
1649e46c2e99STvrtko Ursulin 	 * When using the following param, value should be a pointer to
1650e46c2e99STvrtko Ursulin 	 * drm_i915_gem_context_param_sseu.
1651e46c2e99STvrtko Ursulin 	 */
1652e46c2e99STvrtko Ursulin #define I915_CONTEXT_PARAM_SSEU		0x7
1653ba4fda62SChris Wilson 
1654ba4fda62SChris Wilson /*
1655ba4fda62SChris Wilson  * Not all clients may want to attempt automatic recover of a context after
1656ba4fda62SChris Wilson  * a hang (for example, some clients may only submit very small incremental
1657ba4fda62SChris Wilson  * batches relying on known logical state of previous batches which will never
1658ba4fda62SChris Wilson  * recover correctly and each attempt will hang), and so would prefer that
1659ba4fda62SChris Wilson  * the context is forever banned instead.
1660ba4fda62SChris Wilson  *
1661ba4fda62SChris Wilson  * If set to false (0), after a reset, subsequent (and in flight) rendering
1662ba4fda62SChris Wilson  * from this context is discarded, and the client will need to create a new
1663ba4fda62SChris Wilson  * context to use instead.
1664ba4fda62SChris Wilson  *
1665ba4fda62SChris Wilson  * If set to true (1), the kernel will automatically attempt to recover the
1666ba4fda62SChris Wilson  * context by skipping the hanging batch and executing the next batch starting
1667ba4fda62SChris Wilson  * from the default context state (discarding the incomplete logical context
1668ba4fda62SChris Wilson  * state lost due to the reset).
1669ba4fda62SChris Wilson  *
1670ba4fda62SChris Wilson  * On creation, all new contexts are marked as recoverable.
1671ba4fda62SChris Wilson  */
1672ba4fda62SChris Wilson #define I915_CONTEXT_PARAM_RECOVERABLE	0x8
16737f3f317aSChris Wilson 
16747f3f317aSChris Wilson 	/*
16757f3f317aSChris Wilson 	 * The id of the associated virtual memory address space (ppGTT) of
16767f3f317aSChris Wilson 	 * this context. Can be retrieved and passed to another context
16777f3f317aSChris Wilson 	 * (on the same fd) for both to use the same ppGTT and so share
16787f3f317aSChris Wilson 	 * address layouts, and avoid reloading the page tables on context
16797f3f317aSChris Wilson 	 * switches between themselves.
16807f3f317aSChris Wilson 	 *
16817f3f317aSChris Wilson 	 * See DRM_I915_GEM_VM_CREATE and DRM_I915_GEM_VM_DESTROY.
16827f3f317aSChris Wilson 	 */
16837f3f317aSChris Wilson #define I915_CONTEXT_PARAM_VM		0x9
1684976b55f0SChris Wilson 
1685976b55f0SChris Wilson /*
1686976b55f0SChris Wilson  * I915_CONTEXT_PARAM_ENGINES:
1687976b55f0SChris Wilson  *
1688976b55f0SChris Wilson  * Bind this context to operate on this subset of available engines. Henceforth,
1689976b55f0SChris Wilson  * the I915_EXEC_RING selector for DRM_IOCTL_I915_GEM_EXECBUFFER2 operates as
1690976b55f0SChris Wilson  * an index into this array of engines; I915_EXEC_DEFAULT selecting engine[0]
1691976b55f0SChris Wilson  * and upwards. Slots 0...N are filled in using the specified (class, instance).
1692976b55f0SChris Wilson  * Use
1693976b55f0SChris Wilson  *	engine_class: I915_ENGINE_CLASS_INVALID,
1694976b55f0SChris Wilson  *	engine_instance: I915_ENGINE_CLASS_INVALID_NONE
1695976b55f0SChris Wilson  * to specify a gap in the array that can be filled in later, e.g. by a
1696976b55f0SChris Wilson  * virtual engine used for load balancing.
1697976b55f0SChris Wilson  *
1698976b55f0SChris Wilson  * Setting the number of engines bound to the context to 0, by passing a zero
1699976b55f0SChris Wilson  * sized argument, will revert back to default settings.
1700976b55f0SChris Wilson  *
1701976b55f0SChris Wilson  * See struct i915_context_param_engines.
1702ee113690SChris Wilson  *
1703ee113690SChris Wilson  * Extensions:
1704ee113690SChris Wilson  *   i915_context_engines_load_balance (I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE)
1705ee113690SChris Wilson  *   i915_context_engines_bond (I915_CONTEXT_ENGINES_EXT_BOND)
1706976b55f0SChris Wilson  */
1707976b55f0SChris Wilson #define I915_CONTEXT_PARAM_ENGINES	0xa
1708a0e04715SChris Wilson 
1709a0e04715SChris Wilson /*
1710a0e04715SChris Wilson  * I915_CONTEXT_PARAM_PERSISTENCE:
1711a0e04715SChris Wilson  *
1712a0e04715SChris Wilson  * Allow the context and active rendering to survive the process until
1713a0e04715SChris Wilson  * completion. Persistence allows fire-and-forget clients to queue up a
1714a0e04715SChris Wilson  * bunch of work, hand the output over to a display server and then quit.
1715a0e04715SChris Wilson  * If the context is marked as not persistent, upon closing (either via
1716a0e04715SChris Wilson  * an explicit DRM_I915_GEM_CONTEXT_DESTROY or implicitly from file closure
1717a0e04715SChris Wilson  * or process termination), the context and any outstanding requests will be
1718a0e04715SChris Wilson  * cancelled (and exported fences for cancelled requests marked as -EIO).
1719a0e04715SChris Wilson  *
1720a0e04715SChris Wilson  * By default, new contexts allow persistence.
1721a0e04715SChris Wilson  */
1722a0e04715SChris Wilson #define I915_CONTEXT_PARAM_PERSISTENCE	0xb
172388be76cdSChris Wilson 
172488be76cdSChris Wilson /*
172588be76cdSChris Wilson  * I915_CONTEXT_PARAM_RINGSIZE:
172688be76cdSChris Wilson  *
172788be76cdSChris Wilson  * Sets the size of the CS ringbuffer to use for logical ring contexts. This
172888be76cdSChris Wilson  * applies a limit of how many batches can be queued to HW before the caller
172988be76cdSChris Wilson  * is blocked due to lack of space for more commands.
173088be76cdSChris Wilson  *
173188be76cdSChris Wilson  * Only reliably possible to be set prior to first use, i.e. during
173288be76cdSChris Wilson  * construction. At any later point, the current execution must be flushed as
173388be76cdSChris Wilson  * the ring can only be changed while the context is idle. Note, the ringsize
173488be76cdSChris Wilson  * can be specified as a constructor property, see
173588be76cdSChris Wilson  * I915_CONTEXT_CREATE_EXT_SETPARAM, but can also be set later if required.
173688be76cdSChris Wilson  *
173788be76cdSChris Wilson  * Only applies to the current set of engine and lost when those engines
173888be76cdSChris Wilson  * are replaced by a new mapping (see I915_CONTEXT_PARAM_ENGINES).
173988be76cdSChris Wilson  *
174088be76cdSChris Wilson  * Must be between 4 - 512 KiB, in intervals of page size [4 KiB].
174188be76cdSChris Wilson  * Default is 16 KiB.
174288be76cdSChris Wilson  */
174388be76cdSChris Wilson #define I915_CONTEXT_PARAM_RINGSIZE	0xc
1744be03564bSChris Wilson /* Must be kept compact -- no holes and well documented */
1745e0695db7SChris Wilson 
1746c9dc0f35SChris Wilson 	__u64 value;
1747c9dc0f35SChris Wilson };
1748c9dc0f35SChris Wilson 
17492ef6a01fSMatthew Auld /*
1750e46c2e99STvrtko Ursulin  * Context SSEU programming
1751e46c2e99STvrtko Ursulin  *
1752e46c2e99STvrtko Ursulin  * It may be necessary for either functional or performance reason to configure
1753e46c2e99STvrtko Ursulin  * a context to run with a reduced number of SSEU (where SSEU stands for Slice/
1754e46c2e99STvrtko Ursulin  * Sub-slice/EU).
1755e46c2e99STvrtko Ursulin  *
1756e46c2e99STvrtko Ursulin  * This is done by configuring SSEU configuration using the below
1757e46c2e99STvrtko Ursulin  * @struct drm_i915_gem_context_param_sseu for every supported engine which
1758e46c2e99STvrtko Ursulin  * userspace intends to use.
1759e46c2e99STvrtko Ursulin  *
1760e46c2e99STvrtko Ursulin  * Not all GPUs or engines support this functionality in which case an error
1761e46c2e99STvrtko Ursulin  * code -ENODEV will be returned.
1762e46c2e99STvrtko Ursulin  *
1763e46c2e99STvrtko Ursulin  * Also, flexibility of possible SSEU configuration permutations varies between
1764e46c2e99STvrtko Ursulin  * GPU generations and software imposed limitations. Requesting such a
1765e46c2e99STvrtko Ursulin  * combination will return an error code of -EINVAL.
1766e46c2e99STvrtko Ursulin  *
1767e46c2e99STvrtko Ursulin  * NOTE: When perf/OA is active the context's SSEU configuration is ignored in
1768e46c2e99STvrtko Ursulin  * favour of a single global setting.
1769e46c2e99STvrtko Ursulin  */
1770e46c2e99STvrtko Ursulin struct drm_i915_gem_context_param_sseu {
1771e46c2e99STvrtko Ursulin 	/*
1772e46c2e99STvrtko Ursulin 	 * Engine class & instance to be configured or queried.
1773e46c2e99STvrtko Ursulin 	 */
1774d1172ab3SChris Wilson 	struct i915_engine_class_instance engine;
1775e46c2e99STvrtko Ursulin 
1776e46c2e99STvrtko Ursulin 	/*
1777e620f7b3SChris Wilson 	 * Unknown flags must be cleared to zero.
1778e46c2e99STvrtko Ursulin 	 */
1779e46c2e99STvrtko Ursulin 	__u32 flags;
1780e620f7b3SChris Wilson #define I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX (1u << 0)
1781e46c2e99STvrtko Ursulin 
1782e46c2e99STvrtko Ursulin 	/*
1783e46c2e99STvrtko Ursulin 	 * Mask of slices to enable for the context. Valid values are a subset
1784e46c2e99STvrtko Ursulin 	 * of the bitmask value returned for I915_PARAM_SLICE_MASK.
1785e46c2e99STvrtko Ursulin 	 */
1786e46c2e99STvrtko Ursulin 	__u64 slice_mask;
1787e46c2e99STvrtko Ursulin 
1788e46c2e99STvrtko Ursulin 	/*
1789e46c2e99STvrtko Ursulin 	 * Mask of subslices to enable for the context. Valid values are a
1790e46c2e99STvrtko Ursulin 	 * subset of the bitmask value return by I915_PARAM_SUBSLICE_MASK.
1791e46c2e99STvrtko Ursulin 	 */
1792e46c2e99STvrtko Ursulin 	__u64 subslice_mask;
1793e46c2e99STvrtko Ursulin 
1794e46c2e99STvrtko Ursulin 	/*
1795e46c2e99STvrtko Ursulin 	 * Minimum/Maximum number of EUs to enable per subslice for the
1796e46c2e99STvrtko Ursulin 	 * context. min_eus_per_subslice must be inferior or equal to
1797e46c2e99STvrtko Ursulin 	 * max_eus_per_subslice.
1798e46c2e99STvrtko Ursulin 	 */
1799e46c2e99STvrtko Ursulin 	__u16 min_eus_per_subslice;
1800e46c2e99STvrtko Ursulin 	__u16 max_eus_per_subslice;
1801e46c2e99STvrtko Ursulin 
1802e46c2e99STvrtko Ursulin 	/*
1803e46c2e99STvrtko Ursulin 	 * Unused for now. Must be cleared to zero.
1804e46c2e99STvrtko Ursulin 	 */
1805e46c2e99STvrtko Ursulin 	__u32 rsvd;
1806e46c2e99STvrtko Ursulin };
1807e46c2e99STvrtko Ursulin 
18086d06779eSChris Wilson /*
18096d06779eSChris Wilson  * i915_context_engines_load_balance:
18106d06779eSChris Wilson  *
18116d06779eSChris Wilson  * Enable load balancing across this set of engines.
18126d06779eSChris Wilson  *
18136d06779eSChris Wilson  * Into the I915_EXEC_DEFAULT slot [0], a virtual engine is created that when
18146d06779eSChris Wilson  * used will proxy the execbuffer request onto one of the set of engines
18156d06779eSChris Wilson  * in such a way as to distribute the load evenly across the set.
18166d06779eSChris Wilson  *
18176d06779eSChris Wilson  * The set of engines must be compatible (e.g. the same HW class) as they
18186d06779eSChris Wilson  * will share the same logical GPU context and ring.
18196d06779eSChris Wilson  *
18206d06779eSChris Wilson  * To intermix rendering with the virtual engine and direct rendering onto
18216d06779eSChris Wilson  * the backing engines (bypassing the load balancing proxy), the context must
18226d06779eSChris Wilson  * be defined to use a single timeline for all engines.
18236d06779eSChris Wilson  */
18246d06779eSChris Wilson struct i915_context_engines_load_balance {
18256d06779eSChris Wilson 	struct i915_user_extension base;
18266d06779eSChris Wilson 
18276d06779eSChris Wilson 	__u16 engine_index;
18286d06779eSChris Wilson 	__u16 num_siblings;
18296d06779eSChris Wilson 	__u32 flags; /* all undefined flags must be zero */
18306d06779eSChris Wilson 
18316d06779eSChris Wilson 	__u64 mbz64; /* reserved for future use; must be zero */
18326d06779eSChris Wilson 
18336d06779eSChris Wilson 	struct i915_engine_class_instance engines[0];
18346d06779eSChris Wilson } __attribute__((packed));
18356d06779eSChris Wilson 
18366d06779eSChris Wilson #define I915_DEFINE_CONTEXT_ENGINES_LOAD_BALANCE(name__, N__) struct { \
18376d06779eSChris Wilson 	struct i915_user_extension base; \
18386d06779eSChris Wilson 	__u16 engine_index; \
18396d06779eSChris Wilson 	__u16 num_siblings; \
18406d06779eSChris Wilson 	__u32 flags; \
18416d06779eSChris Wilson 	__u64 mbz64; \
18426d06779eSChris Wilson 	struct i915_engine_class_instance engines[N__]; \
18436d06779eSChris Wilson } __attribute__((packed)) name__
18446d06779eSChris Wilson 
1845ee113690SChris Wilson /*
1846ee113690SChris Wilson  * i915_context_engines_bond:
1847ee113690SChris Wilson  *
1848ee113690SChris Wilson  * Constructed bonded pairs for execution within a virtual engine.
1849ee113690SChris Wilson  *
1850ee113690SChris Wilson  * All engines are equal, but some are more equal than others. Given
1851ee113690SChris Wilson  * the distribution of resources in the HW, it may be preferable to run
1852ee113690SChris Wilson  * a request on a given subset of engines in parallel to a request on a
1853ee113690SChris Wilson  * specific engine. We enable this selection of engines within a virtual
1854ee113690SChris Wilson  * engine by specifying bonding pairs, for any given master engine we will
1855ee113690SChris Wilson  * only execute on one of the corresponding siblings within the virtual engine.
1856ee113690SChris Wilson  *
1857ee113690SChris Wilson  * To execute a request in parallel on the master engine and a sibling requires
1858ee113690SChris Wilson  * coordination with a I915_EXEC_FENCE_SUBMIT.
1859ee113690SChris Wilson  */
1860ee113690SChris Wilson struct i915_context_engines_bond {
1861ee113690SChris Wilson 	struct i915_user_extension base;
1862ee113690SChris Wilson 
1863ee113690SChris Wilson 	struct i915_engine_class_instance master;
1864ee113690SChris Wilson 
1865ee113690SChris Wilson 	__u16 virtual_index; /* index of virtual engine in ctx->engines[] */
1866ee113690SChris Wilson 	__u16 num_bonds;
1867ee113690SChris Wilson 
1868ee113690SChris Wilson 	__u64 flags; /* all undefined flags must be zero */
1869ee113690SChris Wilson 	__u64 mbz64[4]; /* reserved for future use; must be zero */
1870ee113690SChris Wilson 
1871ee113690SChris Wilson 	struct i915_engine_class_instance engines[0];
1872ee113690SChris Wilson } __attribute__((packed));
1873ee113690SChris Wilson 
1874ee113690SChris Wilson #define I915_DEFINE_CONTEXT_ENGINES_BOND(name__, N__) struct { \
1875ee113690SChris Wilson 	struct i915_user_extension base; \
1876ee113690SChris Wilson 	struct i915_engine_class_instance master; \
1877ee113690SChris Wilson 	__u16 virtual_index; \
1878ee113690SChris Wilson 	__u16 num_bonds; \
1879ee113690SChris Wilson 	__u64 flags; \
1880ee113690SChris Wilson 	__u64 mbz64[4]; \
1881ee113690SChris Wilson 	struct i915_engine_class_instance engines[N__]; \
1882ee113690SChris Wilson } __attribute__((packed)) name__
1883ee113690SChris Wilson 
1884976b55f0SChris Wilson struct i915_context_param_engines {
1885976b55f0SChris Wilson 	__u64 extensions; /* linked chain of extension blocks, 0 terminates */
18866d06779eSChris Wilson #define I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE 0 /* see i915_context_engines_load_balance */
1887ee113690SChris Wilson #define I915_CONTEXT_ENGINES_EXT_BOND 1 /* see i915_context_engines_bond */
1888976b55f0SChris Wilson 	struct i915_engine_class_instance engines[0];
1889976b55f0SChris Wilson } __attribute__((packed));
1890976b55f0SChris Wilson 
1891976b55f0SChris Wilson #define I915_DEFINE_CONTEXT_PARAM_ENGINES(name__, N__) struct { \
1892976b55f0SChris Wilson 	__u64 extensions; \
1893976b55f0SChris Wilson 	struct i915_engine_class_instance engines[N__]; \
1894976b55f0SChris Wilson } __attribute__((packed)) name__
1895976b55f0SChris Wilson 
1896b9171541SChris Wilson struct drm_i915_gem_context_create_ext_setparam {
1897b9171541SChris Wilson #define I915_CONTEXT_CREATE_EXT_SETPARAM 0
1898b9171541SChris Wilson 	struct i915_user_extension base;
1899b9171541SChris Wilson 	struct drm_i915_gem_context_param param;
1900b9171541SChris Wilson };
1901b9171541SChris Wilson 
1902b81dde71SChris Wilson struct drm_i915_gem_context_create_ext_clone {
1903b81dde71SChris Wilson #define I915_CONTEXT_CREATE_EXT_CLONE 1
1904b81dde71SChris Wilson 	struct i915_user_extension base;
1905b81dde71SChris Wilson 	__u32 clone_id;
1906b81dde71SChris Wilson 	__u32 flags;
1907b81dde71SChris Wilson #define I915_CONTEXT_CLONE_ENGINES	(1u << 0)
1908b81dde71SChris Wilson #define I915_CONTEXT_CLONE_FLAGS	(1u << 1)
1909b81dde71SChris Wilson #define I915_CONTEXT_CLONE_SCHEDATTR	(1u << 2)
1910b81dde71SChris Wilson #define I915_CONTEXT_CLONE_SSEU		(1u << 3)
1911b81dde71SChris Wilson #define I915_CONTEXT_CLONE_TIMELINE	(1u << 4)
1912b81dde71SChris Wilson #define I915_CONTEXT_CLONE_VM		(1u << 5)
1913b81dde71SChris Wilson #define I915_CONTEXT_CLONE_UNKNOWN -(I915_CONTEXT_CLONE_VM << 1)
1914b81dde71SChris Wilson 	__u64 rsvd;
1915b81dde71SChris Wilson };
1916b81dde71SChris Wilson 
1917b9171541SChris Wilson struct drm_i915_gem_context_destroy {
1918b9171541SChris Wilson 	__u32 ctx_id;
1919b9171541SChris Wilson 	__u32 pad;
1920b9171541SChris Wilson };
1921b9171541SChris Wilson 
1922b9171541SChris Wilson /*
1923b9171541SChris Wilson  * DRM_I915_GEM_VM_CREATE -
1924b9171541SChris Wilson  *
1925b9171541SChris Wilson  * Create a new virtual memory address space (ppGTT) for use within a context
1926b9171541SChris Wilson  * on the same file. Extensions can be provided to configure exactly how the
1927b9171541SChris Wilson  * address space is setup upon creation.
1928b9171541SChris Wilson  *
1929b9171541SChris Wilson  * The id of new VM (bound to the fd) for use with I915_CONTEXT_PARAM_VM is
1930b9171541SChris Wilson  * returned in the outparam @id.
1931b9171541SChris Wilson  *
1932b9171541SChris Wilson  * No flags are defined, with all bits reserved and must be zero.
1933b9171541SChris Wilson  *
1934b9171541SChris Wilson  * An extension chain maybe provided, starting with @extensions, and terminated
1935b9171541SChris Wilson  * by the @next_extension being 0. Currently, no extensions are defined.
1936b9171541SChris Wilson  *
1937b9171541SChris Wilson  * DRM_I915_GEM_VM_DESTROY -
1938b9171541SChris Wilson  *
1939b9171541SChris Wilson  * Destroys a previously created VM id, specified in @id.
1940b9171541SChris Wilson  *
1941b9171541SChris Wilson  * No extensions or flags are allowed currently, and so must be zero.
1942b9171541SChris Wilson  */
1943b9171541SChris Wilson struct drm_i915_gem_vm_control {
1944b9171541SChris Wilson 	__u64 extensions;
1945b9171541SChris Wilson 	__u32 flags;
1946b9171541SChris Wilson 	__u32 vm_id;
1947b9171541SChris Wilson };
1948b9171541SChris Wilson 
1949b9171541SChris Wilson struct drm_i915_reg_read {
1950b9171541SChris Wilson 	/*
1951b9171541SChris Wilson 	 * Register offset.
1952b9171541SChris Wilson 	 * For 64bit wide registers where the upper 32bits don't immediately
1953b9171541SChris Wilson 	 * follow the lower 32bits, the offset of the lower 32bits must
1954b9171541SChris Wilson 	 * be specified
1955b9171541SChris Wilson 	 */
1956b9171541SChris Wilson 	__u64 offset;
1957b9171541SChris Wilson #define I915_REG_READ_8B_WA (1ul << 0)
1958b9171541SChris Wilson 
1959b9171541SChris Wilson 	__u64 val; /* Return value */
1960b9171541SChris Wilson };
1961b9171541SChris Wilson 
1962b9171541SChris Wilson /* Known registers:
1963b9171541SChris Wilson  *
1964b9171541SChris Wilson  * Render engine timestamp - 0x2358 + 64bit - gen7+
1965b9171541SChris Wilson  * - Note this register returns an invalid value if using the default
1966b9171541SChris Wilson  *   single instruction 8byte read, in order to workaround that pass
1967b9171541SChris Wilson  *   flag I915_REG_READ_8B_WA in offset field.
1968b9171541SChris Wilson  *
1969b9171541SChris Wilson  */
1970b9171541SChris Wilson 
1971b9171541SChris Wilson struct drm_i915_reset_stats {
1972b9171541SChris Wilson 	__u32 ctx_id;
1973b9171541SChris Wilson 	__u32 flags;
1974b9171541SChris Wilson 
1975b9171541SChris Wilson 	/* All resets since boot/module reload, for all contexts */
1976b9171541SChris Wilson 	__u32 reset_count;
1977b9171541SChris Wilson 
1978b9171541SChris Wilson 	/* Number of batches lost when active in GPU, for this context */
1979b9171541SChris Wilson 	__u32 batch_active;
1980b9171541SChris Wilson 
1981b9171541SChris Wilson 	/* Number of batches lost pending for execution, for this context */
1982b9171541SChris Wilson 	__u32 batch_pending;
1983b9171541SChris Wilson 
1984b9171541SChris Wilson 	__u32 pad;
1985b9171541SChris Wilson };
1986b9171541SChris Wilson 
1987b9171541SChris Wilson struct drm_i915_gem_userptr {
1988b9171541SChris Wilson 	__u64 user_ptr;
1989b9171541SChris Wilson 	__u64 user_size;
1990b9171541SChris Wilson 	__u32 flags;
1991b9171541SChris Wilson #define I915_USERPTR_READ_ONLY 0x1
1992b9171541SChris Wilson #define I915_USERPTR_UNSYNCHRONIZED 0x80000000
1993b9171541SChris Wilson 	/**
1994b9171541SChris Wilson 	 * Returned handle for the object.
1995b9171541SChris Wilson 	 *
1996b9171541SChris Wilson 	 * Object handles are nonzero.
1997b9171541SChris Wilson 	 */
1998b9171541SChris Wilson 	__u32 handle;
1999b9171541SChris Wilson };
2000b9171541SChris Wilson 
2001d7965152SRobert Bragg enum drm_i915_oa_format {
200219f81df2SRobert Bragg 	I915_OA_FORMAT_A13 = 1,	    /* HSW only */
200319f81df2SRobert Bragg 	I915_OA_FORMAT_A29,	    /* HSW only */
200419f81df2SRobert Bragg 	I915_OA_FORMAT_A13_B8_C8,   /* HSW only */
200519f81df2SRobert Bragg 	I915_OA_FORMAT_B4_C8,	    /* HSW only */
200619f81df2SRobert Bragg 	I915_OA_FORMAT_A45_B8_C8,   /* HSW only */
200719f81df2SRobert Bragg 	I915_OA_FORMAT_B4_C8_A16,   /* HSW only */
200819f81df2SRobert Bragg 	I915_OA_FORMAT_C4_B8,	    /* HSW+ */
200919f81df2SRobert Bragg 
201019f81df2SRobert Bragg 	/* Gen8+ */
201119f81df2SRobert Bragg 	I915_OA_FORMAT_A12,
201219f81df2SRobert Bragg 	I915_OA_FORMAT_A12_B8_C8,
201319f81df2SRobert Bragg 	I915_OA_FORMAT_A32u40_A4u32_B8_C8,
2014d7965152SRobert Bragg 
2015d7965152SRobert Bragg 	I915_OA_FORMAT_MAX	    /* non-ABI */
2016d7965152SRobert Bragg };
2017d7965152SRobert Bragg 
2018eec688e1SRobert Bragg enum drm_i915_perf_property_id {
2019eec688e1SRobert Bragg 	/**
2020eec688e1SRobert Bragg 	 * Open the stream for a specific context handle (as used with
2021eec688e1SRobert Bragg 	 * execbuffer2). A stream opened for a specific context this way
2022eec688e1SRobert Bragg 	 * won't typically require root privileges.
2023b8d49f28SLionel Landwerlin 	 *
2024b8d49f28SLionel Landwerlin 	 * This property is available in perf revision 1.
2025eec688e1SRobert Bragg 	 */
2026eec688e1SRobert Bragg 	DRM_I915_PERF_PROP_CTX_HANDLE = 1,
2027eec688e1SRobert Bragg 
2028d7965152SRobert Bragg 	/**
2029d7965152SRobert Bragg 	 * A value of 1 requests the inclusion of raw OA unit reports as
2030d7965152SRobert Bragg 	 * part of stream samples.
2031b8d49f28SLionel Landwerlin 	 *
2032b8d49f28SLionel Landwerlin 	 * This property is available in perf revision 1.
2033d7965152SRobert Bragg 	 */
2034d7965152SRobert Bragg 	DRM_I915_PERF_PROP_SAMPLE_OA,
2035d7965152SRobert Bragg 
2036d7965152SRobert Bragg 	/**
2037d7965152SRobert Bragg 	 * The value specifies which set of OA unit metrics should be
203866137f54SRandy Dunlap 	 * configured, defining the contents of any OA unit reports.
2039b8d49f28SLionel Landwerlin 	 *
2040b8d49f28SLionel Landwerlin 	 * This property is available in perf revision 1.
2041d7965152SRobert Bragg 	 */
2042d7965152SRobert Bragg 	DRM_I915_PERF_PROP_OA_METRICS_SET,
2043d7965152SRobert Bragg 
2044d7965152SRobert Bragg 	/**
2045d7965152SRobert Bragg 	 * The value specifies the size and layout of OA unit reports.
2046b8d49f28SLionel Landwerlin 	 *
2047b8d49f28SLionel Landwerlin 	 * This property is available in perf revision 1.
2048d7965152SRobert Bragg 	 */
2049d7965152SRobert Bragg 	DRM_I915_PERF_PROP_OA_FORMAT,
2050d7965152SRobert Bragg 
2051d7965152SRobert Bragg 	/**
2052d7965152SRobert Bragg 	 * Specifying this property implicitly requests periodic OA unit
2053d7965152SRobert Bragg 	 * sampling and (at least on Haswell) the sampling frequency is derived
2054d7965152SRobert Bragg 	 * from this exponent as follows:
2055d7965152SRobert Bragg 	 *
2056d7965152SRobert Bragg 	 *   80ns * 2^(period_exponent + 1)
2057b8d49f28SLionel Landwerlin 	 *
2058b8d49f28SLionel Landwerlin 	 * This property is available in perf revision 1.
2059d7965152SRobert Bragg 	 */
2060d7965152SRobert Bragg 	DRM_I915_PERF_PROP_OA_EXPONENT,
2061d7965152SRobert Bragg 
20629cd20ef7SLionel Landwerlin 	/**
20639cd20ef7SLionel Landwerlin 	 * Specifying this property is only valid when specify a context to
20649cd20ef7SLionel Landwerlin 	 * filter with DRM_I915_PERF_PROP_CTX_HANDLE. Specifying this property
20659cd20ef7SLionel Landwerlin 	 * will hold preemption of the particular context we want to gather
20669cd20ef7SLionel Landwerlin 	 * performance data about. The execbuf2 submissions must include a
20679cd20ef7SLionel Landwerlin 	 * drm_i915_gem_execbuffer_ext_perf parameter for this to apply.
20689cd20ef7SLionel Landwerlin 	 *
20699cd20ef7SLionel Landwerlin 	 * This property is available in perf revision 3.
20709cd20ef7SLionel Landwerlin 	 */
20719cd20ef7SLionel Landwerlin 	DRM_I915_PERF_PROP_HOLD_PREEMPTION,
20729cd20ef7SLionel Landwerlin 
207311ecbdddSLionel Landwerlin 	/**
207411ecbdddSLionel Landwerlin 	 * Specifying this pins all contexts to the specified SSEU power
207511ecbdddSLionel Landwerlin 	 * configuration for the duration of the recording.
207611ecbdddSLionel Landwerlin 	 *
207711ecbdddSLionel Landwerlin 	 * This parameter's value is a pointer to a struct
207811ecbdddSLionel Landwerlin 	 * drm_i915_gem_context_param_sseu.
207911ecbdddSLionel Landwerlin 	 *
208011ecbdddSLionel Landwerlin 	 * This property is available in perf revision 4.
208111ecbdddSLionel Landwerlin 	 */
208211ecbdddSLionel Landwerlin 	DRM_I915_PERF_PROP_GLOBAL_SSEU,
208311ecbdddSLionel Landwerlin 
20844ef10fe0SLionel Landwerlin 	/**
20854ef10fe0SLionel Landwerlin 	 * This optional parameter specifies the timer interval in nanoseconds
20864ef10fe0SLionel Landwerlin 	 * at which the i915 driver will check the OA buffer for available data.
20874ef10fe0SLionel Landwerlin 	 * Minimum allowed value is 100 microseconds. A default value is used by
20884ef10fe0SLionel Landwerlin 	 * the driver if this parameter is not specified. Note that larger timer
20894ef10fe0SLionel Landwerlin 	 * values will reduce cpu consumption during OA perf captures. However,
20904ef10fe0SLionel Landwerlin 	 * excessively large values would potentially result in OA buffer
20914ef10fe0SLionel Landwerlin 	 * overwrites as captures reach end of the OA buffer.
20924ef10fe0SLionel Landwerlin 	 *
20934ef10fe0SLionel Landwerlin 	 * This property is available in perf revision 5.
20944ef10fe0SLionel Landwerlin 	 */
20954ef10fe0SLionel Landwerlin 	DRM_I915_PERF_PROP_POLL_OA_PERIOD,
20964ef10fe0SLionel Landwerlin 
2097eec688e1SRobert Bragg 	DRM_I915_PERF_PROP_MAX /* non-ABI */
2098eec688e1SRobert Bragg };
2099eec688e1SRobert Bragg 
2100eec688e1SRobert Bragg struct drm_i915_perf_open_param {
2101eec688e1SRobert Bragg 	__u32 flags;
2102eec688e1SRobert Bragg #define I915_PERF_FLAG_FD_CLOEXEC	(1<<0)
2103eec688e1SRobert Bragg #define I915_PERF_FLAG_FD_NONBLOCK	(1<<1)
2104eec688e1SRobert Bragg #define I915_PERF_FLAG_DISABLED		(1<<2)
2105eec688e1SRobert Bragg 
2106eec688e1SRobert Bragg 	/** The number of u64 (id, value) pairs */
2107eec688e1SRobert Bragg 	__u32 num_properties;
2108eec688e1SRobert Bragg 
2109eec688e1SRobert Bragg 	/**
2110eec688e1SRobert Bragg 	 * Pointer to array of u64 (id, value) pairs configuring the stream
2111eec688e1SRobert Bragg 	 * to open.
2112eec688e1SRobert Bragg 	 */
2113cd8bddc4SChris Wilson 	__u64 properties_ptr;
2114eec688e1SRobert Bragg };
2115eec688e1SRobert Bragg 
21162ef6a01fSMatthew Auld /*
2117d7965152SRobert Bragg  * Enable data capture for a stream that was either opened in a disabled state
2118d7965152SRobert Bragg  * via I915_PERF_FLAG_DISABLED or was later disabled via
2119d7965152SRobert Bragg  * I915_PERF_IOCTL_DISABLE.
2120d7965152SRobert Bragg  *
2121d7965152SRobert Bragg  * It is intended to be cheaper to disable and enable a stream than it may be
2122d7965152SRobert Bragg  * to close and re-open a stream with the same configuration.
2123d7965152SRobert Bragg  *
2124d7965152SRobert Bragg  * It's undefined whether any pending data for the stream will be lost.
2125b8d49f28SLionel Landwerlin  *
2126b8d49f28SLionel Landwerlin  * This ioctl is available in perf revision 1.
2127d7965152SRobert Bragg  */
2128eec688e1SRobert Bragg #define I915_PERF_IOCTL_ENABLE	_IO('i', 0x0)
2129d7965152SRobert Bragg 
21302ef6a01fSMatthew Auld /*
2131d7965152SRobert Bragg  * Disable data capture for a stream.
2132d7965152SRobert Bragg  *
2133d7965152SRobert Bragg  * It is an error to try and read a stream that is disabled.
2134b8d49f28SLionel Landwerlin  *
2135b8d49f28SLionel Landwerlin  * This ioctl is available in perf revision 1.
2136d7965152SRobert Bragg  */
2137eec688e1SRobert Bragg #define I915_PERF_IOCTL_DISABLE	_IO('i', 0x1)
2138eec688e1SRobert Bragg 
21392ef6a01fSMatthew Auld /*
21407831e9a9SChris Wilson  * Change metrics_set captured by a stream.
21417831e9a9SChris Wilson  *
21427831e9a9SChris Wilson  * If the stream is bound to a specific context, the configuration change
21437831e9a9SChris Wilson  * will performed inline with that context such that it takes effect before
21447831e9a9SChris Wilson  * the next execbuf submission.
21457831e9a9SChris Wilson  *
21467831e9a9SChris Wilson  * Returns the previously bound metrics set id, or a negative error code.
21477831e9a9SChris Wilson  *
21487831e9a9SChris Wilson  * This ioctl is available in perf revision 2.
21497831e9a9SChris Wilson  */
21507831e9a9SChris Wilson #define I915_PERF_IOCTL_CONFIG	_IO('i', 0x2)
21517831e9a9SChris Wilson 
21522ef6a01fSMatthew Auld /*
2153eec688e1SRobert Bragg  * Common to all i915 perf records
2154eec688e1SRobert Bragg  */
2155eec688e1SRobert Bragg struct drm_i915_perf_record_header {
2156eec688e1SRobert Bragg 	__u32 type;
2157eec688e1SRobert Bragg 	__u16 pad;
2158eec688e1SRobert Bragg 	__u16 size;
2159eec688e1SRobert Bragg };
2160eec688e1SRobert Bragg 
2161eec688e1SRobert Bragg enum drm_i915_perf_record_type {
2162eec688e1SRobert Bragg 
2163eec688e1SRobert Bragg 	/**
2164eec688e1SRobert Bragg 	 * Samples are the work horse record type whose contents are extensible
2165eec688e1SRobert Bragg 	 * and defined when opening an i915 perf stream based on the given
2166eec688e1SRobert Bragg 	 * properties.
2167eec688e1SRobert Bragg 	 *
2168eec688e1SRobert Bragg 	 * Boolean properties following the naming convention
2169eec688e1SRobert Bragg 	 * DRM_I915_PERF_SAMPLE_xyz_PROP request the inclusion of 'xyz' data in
2170eec688e1SRobert Bragg 	 * every sample.
2171eec688e1SRobert Bragg 	 *
2172eec688e1SRobert Bragg 	 * The order of these sample properties given by userspace has no
2173d7965152SRobert Bragg 	 * affect on the ordering of data within a sample. The order is
2174eec688e1SRobert Bragg 	 * documented here.
2175eec688e1SRobert Bragg 	 *
2176eec688e1SRobert Bragg 	 * struct {
2177eec688e1SRobert Bragg 	 *     struct drm_i915_perf_record_header header;
2178eec688e1SRobert Bragg 	 *
2179d7965152SRobert Bragg 	 *     { u32 oa_report[]; } && DRM_I915_PERF_PROP_SAMPLE_OA
2180eec688e1SRobert Bragg 	 * };
2181eec688e1SRobert Bragg 	 */
2182eec688e1SRobert Bragg 	DRM_I915_PERF_RECORD_SAMPLE = 1,
2183eec688e1SRobert Bragg 
2184d7965152SRobert Bragg 	/*
2185d7965152SRobert Bragg 	 * Indicates that one or more OA reports were not written by the
2186d7965152SRobert Bragg 	 * hardware. This can happen for example if an MI_REPORT_PERF_COUNT
2187d7965152SRobert Bragg 	 * command collides with periodic sampling - which would be more likely
2188d7965152SRobert Bragg 	 * at higher sampling frequencies.
2189d7965152SRobert Bragg 	 */
2190d7965152SRobert Bragg 	DRM_I915_PERF_RECORD_OA_REPORT_LOST = 2,
2191d7965152SRobert Bragg 
2192d7965152SRobert Bragg 	/**
2193d7965152SRobert Bragg 	 * An error occurred that resulted in all pending OA reports being lost.
2194d7965152SRobert Bragg 	 */
2195d7965152SRobert Bragg 	DRM_I915_PERF_RECORD_OA_BUFFER_LOST = 3,
2196d7965152SRobert Bragg 
2197eec688e1SRobert Bragg 	DRM_I915_PERF_RECORD_MAX /* non-ABI */
2198eec688e1SRobert Bragg };
2199eec688e1SRobert Bragg 
22002ef6a01fSMatthew Auld /*
2201f89823c2SLionel Landwerlin  * Structure to upload perf dynamic configuration into the kernel.
2202f89823c2SLionel Landwerlin  */
2203f89823c2SLionel Landwerlin struct drm_i915_perf_oa_config {
2204f89823c2SLionel Landwerlin 	/** String formatted like "%08x-%04x-%04x-%04x-%012x" */
2205f89823c2SLionel Landwerlin 	char uuid[36];
2206f89823c2SLionel Landwerlin 
2207f89823c2SLionel Landwerlin 	__u32 n_mux_regs;
2208f89823c2SLionel Landwerlin 	__u32 n_boolean_regs;
2209f89823c2SLionel Landwerlin 	__u32 n_flex_regs;
2210f89823c2SLionel Landwerlin 
2211ee427e25SLionel Landwerlin 	/*
2212a446ae2cSLionel Landwerlin 	 * These fields are pointers to tuples of u32 values (register address,
2213a446ae2cSLionel Landwerlin 	 * value). For example the expected length of the buffer pointed by
2214a446ae2cSLionel Landwerlin 	 * mux_regs_ptr is (2 * sizeof(u32) * n_mux_regs).
2215ee427e25SLionel Landwerlin 	 */
221617ad4fddSChris Wilson 	__u64 mux_regs_ptr;
221717ad4fddSChris Wilson 	__u64 boolean_regs_ptr;
221817ad4fddSChris Wilson 	__u64 flex_regs_ptr;
2219f89823c2SLionel Landwerlin };
2220f89823c2SLionel Landwerlin 
2221*e3bdccafSMatthew Auld /**
2222*e3bdccafSMatthew Auld  * struct drm_i915_query_item - An individual query for the kernel to process.
2223*e3bdccafSMatthew Auld  *
2224*e3bdccafSMatthew Auld  * The behaviour is determined by the @query_id. Note that exactly what
2225*e3bdccafSMatthew Auld  * @data_ptr is also depends on the specific @query_id.
2226*e3bdccafSMatthew Auld  */
2227a446ae2cSLionel Landwerlin struct drm_i915_query_item {
2228*e3bdccafSMatthew Auld 	/** @query_id: The id for this query */
2229a446ae2cSLionel Landwerlin 	__u64 query_id;
2230c822e059SLionel Landwerlin #define DRM_I915_QUERY_TOPOLOGY_INFO    1
2231c5d3e39cSTvrtko Ursulin #define DRM_I915_QUERY_ENGINE_INFO	2
22324f6ccc74SLionel Landwerlin #define DRM_I915_QUERY_PERF_CONFIG      3
2233be03564bSChris Wilson /* Must be kept compact -- no holes and well documented */
2234a446ae2cSLionel Landwerlin 
2235*e3bdccafSMatthew Auld 	/**
2236*e3bdccafSMatthew Auld 	 * @length:
2237*e3bdccafSMatthew Auld 	 *
2238a446ae2cSLionel Landwerlin 	 * When set to zero by userspace, this is filled with the size of the
2239*e3bdccafSMatthew Auld 	 * data to be written at the @data_ptr pointer. The kernel sets this
2240a446ae2cSLionel Landwerlin 	 * value to a negative value to signal an error on a particular query
2241a446ae2cSLionel Landwerlin 	 * item.
2242a446ae2cSLionel Landwerlin 	 */
2243a446ae2cSLionel Landwerlin 	__s32 length;
2244a446ae2cSLionel Landwerlin 
2245*e3bdccafSMatthew Auld 	/**
2246*e3bdccafSMatthew Auld 	 * @flags:
2247*e3bdccafSMatthew Auld 	 *
22484f6ccc74SLionel Landwerlin 	 * When query_id == DRM_I915_QUERY_TOPOLOGY_INFO, must be 0.
22494f6ccc74SLionel Landwerlin 	 *
22504f6ccc74SLionel Landwerlin 	 * When query_id == DRM_I915_QUERY_PERF_CONFIG, must be one of the
22514f6ccc74SLionel Landwerlin 	 * following:
2252*e3bdccafSMatthew Auld 	 *
22534f6ccc74SLionel Landwerlin 	 *	- DRM_I915_QUERY_PERF_CONFIG_LIST
22544f6ccc74SLionel Landwerlin 	 *      - DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID
22554f6ccc74SLionel Landwerlin 	 *      - DRM_I915_QUERY_PERF_CONFIG_FOR_UUID
2256a446ae2cSLionel Landwerlin 	 */
2257a446ae2cSLionel Landwerlin 	__u32 flags;
22584f6ccc74SLionel Landwerlin #define DRM_I915_QUERY_PERF_CONFIG_LIST          1
22594f6ccc74SLionel Landwerlin #define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID 2
22604f6ccc74SLionel Landwerlin #define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID   3
2261a446ae2cSLionel Landwerlin 
2262*e3bdccafSMatthew Auld 	/**
2263*e3bdccafSMatthew Auld 	 * @data_ptr:
2264*e3bdccafSMatthew Auld 	 *
2265*e3bdccafSMatthew Auld 	 * Data will be written at the location pointed by @data_ptr when the
2266*e3bdccafSMatthew Auld 	 * value of @length matches the length of the data to be written by the
2267a446ae2cSLionel Landwerlin 	 * kernel.
2268a446ae2cSLionel Landwerlin 	 */
2269a446ae2cSLionel Landwerlin 	__u64 data_ptr;
2270a446ae2cSLionel Landwerlin };
2271a446ae2cSLionel Landwerlin 
2272*e3bdccafSMatthew Auld /**
2273*e3bdccafSMatthew Auld  * struct drm_i915_query - Supply an array of struct drm_i915_query_item for the
2274*e3bdccafSMatthew Auld  * kernel to fill out.
2275*e3bdccafSMatthew Auld  *
2276*e3bdccafSMatthew Auld  * Note that this is generally a two step process for each struct
2277*e3bdccafSMatthew Auld  * drm_i915_query_item in the array:
2278*e3bdccafSMatthew Auld  *
2279*e3bdccafSMatthew Auld  * 1. Call the DRM_IOCTL_I915_QUERY, giving it our array of struct
2280*e3bdccafSMatthew Auld  *    drm_i915_query_item, with &drm_i915_query_item.length set to zero. The
2281*e3bdccafSMatthew Auld  *    kernel will then fill in the size, in bytes, which tells userspace how
2282*e3bdccafSMatthew Auld  *    memory it needs to allocate for the blob(say for an array of properties).
2283*e3bdccafSMatthew Auld  *
2284*e3bdccafSMatthew Auld  * 2. Next we call DRM_IOCTL_I915_QUERY again, this time with the
2285*e3bdccafSMatthew Auld  *    &drm_i915_query_item.data_ptr equal to our newly allocated blob. Note that
2286*e3bdccafSMatthew Auld  *    the &drm_i915_query_item.length should still be the same as what the
2287*e3bdccafSMatthew Auld  *    kernel previously set. At this point the kernel can fill in the blob.
2288*e3bdccafSMatthew Auld  *
2289*e3bdccafSMatthew Auld  * Note that for some query items it can make sense for userspace to just pass
2290*e3bdccafSMatthew Auld  * in a buffer/blob equal to or larger than the required size. In this case only
2291*e3bdccafSMatthew Auld  * a single ioctl call is needed. For some smaller query items this can work
2292*e3bdccafSMatthew Auld  * quite well.
2293*e3bdccafSMatthew Auld  *
2294*e3bdccafSMatthew Auld  */
2295a446ae2cSLionel Landwerlin struct drm_i915_query {
2296*e3bdccafSMatthew Auld 	/** @num_items: The number of elements in the @items_ptr array */
2297a446ae2cSLionel Landwerlin 	__u32 num_items;
2298a446ae2cSLionel Landwerlin 
2299*e3bdccafSMatthew Auld 	/**
2300*e3bdccafSMatthew Auld 	 * @flags: Unused for now. Must be cleared to zero.
2301a446ae2cSLionel Landwerlin 	 */
2302a446ae2cSLionel Landwerlin 	__u32 flags;
2303a446ae2cSLionel Landwerlin 
2304*e3bdccafSMatthew Auld 	/**
2305*e3bdccafSMatthew Auld 	 * @items_ptr:
2306*e3bdccafSMatthew Auld 	 *
2307*e3bdccafSMatthew Auld 	 * Pointer to an array of struct drm_i915_query_item. The number of
2308*e3bdccafSMatthew Auld 	 * array elements is @num_items.
2309a446ae2cSLionel Landwerlin 	 */
2310a446ae2cSLionel Landwerlin 	__u64 items_ptr;
2311a446ae2cSLionel Landwerlin };
2312a446ae2cSLionel Landwerlin 
2313c822e059SLionel Landwerlin /*
2314c822e059SLionel Landwerlin  * Data written by the kernel with query DRM_I915_QUERY_TOPOLOGY_INFO :
2315c822e059SLionel Landwerlin  *
2316c822e059SLionel Landwerlin  * data: contains the 3 pieces of information :
2317c822e059SLionel Landwerlin  *
2318c822e059SLionel Landwerlin  * - the slice mask with one bit per slice telling whether a slice is
2319c822e059SLionel Landwerlin  *   available. The availability of slice X can be queried with the following
2320c822e059SLionel Landwerlin  *   formula :
2321c822e059SLionel Landwerlin  *
2322c822e059SLionel Landwerlin  *           (data[X / 8] >> (X % 8)) & 1
2323c822e059SLionel Landwerlin  *
2324c822e059SLionel Landwerlin  * - the subslice mask for each slice with one bit per subslice telling
2325601734f7SDaniele Ceraolo Spurio  *   whether a subslice is available. Gen12 has dual-subslices, which are
2326601734f7SDaniele Ceraolo Spurio  *   similar to two gen11 subslices. For gen12, this array represents dual-
2327601734f7SDaniele Ceraolo Spurio  *   subslices. The availability of subslice Y in slice X can be queried
2328601734f7SDaniele Ceraolo Spurio  *   with the following formula :
2329c822e059SLionel Landwerlin  *
2330c822e059SLionel Landwerlin  *           (data[subslice_offset +
2331c822e059SLionel Landwerlin  *                 X * subslice_stride +
2332c822e059SLionel Landwerlin  *                 Y / 8] >> (Y % 8)) & 1
2333c822e059SLionel Landwerlin  *
2334c822e059SLionel Landwerlin  * - the EU mask for each subslice in each slice with one bit per EU telling
2335c822e059SLionel Landwerlin  *   whether an EU is available. The availability of EU Z in subslice Y in
2336c822e059SLionel Landwerlin  *   slice X can be queried with the following formula :
2337c822e059SLionel Landwerlin  *
2338c822e059SLionel Landwerlin  *           (data[eu_offset +
2339c822e059SLionel Landwerlin  *                 (X * max_subslices + Y) * eu_stride +
2340c822e059SLionel Landwerlin  *                 Z / 8] >> (Z % 8)) & 1
2341c822e059SLionel Landwerlin  */
2342c822e059SLionel Landwerlin struct drm_i915_query_topology_info {
2343c822e059SLionel Landwerlin 	/*
2344c822e059SLionel Landwerlin 	 * Unused for now. Must be cleared to zero.
2345c822e059SLionel Landwerlin 	 */
2346c822e059SLionel Landwerlin 	__u16 flags;
2347c822e059SLionel Landwerlin 
2348c822e059SLionel Landwerlin 	__u16 max_slices;
2349c822e059SLionel Landwerlin 	__u16 max_subslices;
2350c822e059SLionel Landwerlin 	__u16 max_eus_per_subslice;
2351c822e059SLionel Landwerlin 
2352c822e059SLionel Landwerlin 	/*
2353c822e059SLionel Landwerlin 	 * Offset in data[] at which the subslice masks are stored.
2354c822e059SLionel Landwerlin 	 */
2355c822e059SLionel Landwerlin 	__u16 subslice_offset;
2356c822e059SLionel Landwerlin 
2357c822e059SLionel Landwerlin 	/*
2358c822e059SLionel Landwerlin 	 * Stride at which each of the subslice masks for each slice are
2359c822e059SLionel Landwerlin 	 * stored.
2360c822e059SLionel Landwerlin 	 */
2361c822e059SLionel Landwerlin 	__u16 subslice_stride;
2362c822e059SLionel Landwerlin 
2363c822e059SLionel Landwerlin 	/*
2364c822e059SLionel Landwerlin 	 * Offset in data[] at which the EU masks are stored.
2365c822e059SLionel Landwerlin 	 */
2366c822e059SLionel Landwerlin 	__u16 eu_offset;
2367c822e059SLionel Landwerlin 
2368c822e059SLionel Landwerlin 	/*
2369c822e059SLionel Landwerlin 	 * Stride at which each of the EU masks for each subslice are stored.
2370c822e059SLionel Landwerlin 	 */
2371c822e059SLionel Landwerlin 	__u16 eu_stride;
2372c822e059SLionel Landwerlin 
2373c822e059SLionel Landwerlin 	__u8 data[];
2374c822e059SLionel Landwerlin };
2375c822e059SLionel Landwerlin 
2376c5d3e39cSTvrtko Ursulin /**
2377c5d3e39cSTvrtko Ursulin  * struct drm_i915_engine_info
2378c5d3e39cSTvrtko Ursulin  *
2379c5d3e39cSTvrtko Ursulin  * Describes one engine and it's capabilities as known to the driver.
2380c5d3e39cSTvrtko Ursulin  */
2381c5d3e39cSTvrtko Ursulin struct drm_i915_engine_info {
23822ef6a01fSMatthew Auld 	/** @engine: Engine class and instance. */
2383c5d3e39cSTvrtko Ursulin 	struct i915_engine_class_instance engine;
2384c5d3e39cSTvrtko Ursulin 
23852ef6a01fSMatthew Auld 	/** @rsvd0: Reserved field. */
2386c5d3e39cSTvrtko Ursulin 	__u32 rsvd0;
2387c5d3e39cSTvrtko Ursulin 
23882ef6a01fSMatthew Auld 	/** @flags: Engine flags. */
2389c5d3e39cSTvrtko Ursulin 	__u64 flags;
2390c5d3e39cSTvrtko Ursulin 
23912ef6a01fSMatthew Auld 	/** @capabilities: Capabilities of this engine. */
2392c5d3e39cSTvrtko Ursulin 	__u64 capabilities;
2393c5d3e39cSTvrtko Ursulin #define I915_VIDEO_CLASS_CAPABILITY_HEVC		(1 << 0)
2394c5d3e39cSTvrtko Ursulin #define I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC	(1 << 1)
2395c5d3e39cSTvrtko Ursulin 
23962ef6a01fSMatthew Auld 	/** @rsvd1: Reserved fields. */
2397c5d3e39cSTvrtko Ursulin 	__u64 rsvd1[4];
2398c5d3e39cSTvrtko Ursulin };
2399c5d3e39cSTvrtko Ursulin 
2400c5d3e39cSTvrtko Ursulin /**
2401c5d3e39cSTvrtko Ursulin  * struct drm_i915_query_engine_info
2402c5d3e39cSTvrtko Ursulin  *
2403c5d3e39cSTvrtko Ursulin  * Engine info query enumerates all engines known to the driver by filling in
2404c5d3e39cSTvrtko Ursulin  * an array of struct drm_i915_engine_info structures.
2405c5d3e39cSTvrtko Ursulin  */
2406c5d3e39cSTvrtko Ursulin struct drm_i915_query_engine_info {
24072ef6a01fSMatthew Auld 	/** @num_engines: Number of struct drm_i915_engine_info structs following. */
2408c5d3e39cSTvrtko Ursulin 	__u32 num_engines;
2409c5d3e39cSTvrtko Ursulin 
24102ef6a01fSMatthew Auld 	/** @rsvd: MBZ */
2411c5d3e39cSTvrtko Ursulin 	__u32 rsvd[3];
2412c5d3e39cSTvrtko Ursulin 
24132ef6a01fSMatthew Auld 	/** @engines: Marker for drm_i915_engine_info structures. */
2414c5d3e39cSTvrtko Ursulin 	struct drm_i915_engine_info engines[];
2415c5d3e39cSTvrtko Ursulin };
2416c5d3e39cSTvrtko Ursulin 
24174f6ccc74SLionel Landwerlin /*
24184f6ccc74SLionel Landwerlin  * Data written by the kernel with query DRM_I915_QUERY_PERF_CONFIG.
24194f6ccc74SLionel Landwerlin  */
24204f6ccc74SLionel Landwerlin struct drm_i915_query_perf_config {
24214f6ccc74SLionel Landwerlin 	union {
24224f6ccc74SLionel Landwerlin 		/*
24234f6ccc74SLionel Landwerlin 		 * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_LIST, i915 sets
24244f6ccc74SLionel Landwerlin 		 * this fields to the number of configurations available.
24254f6ccc74SLionel Landwerlin 		 */
24264f6ccc74SLionel Landwerlin 		__u64 n_configs;
24274f6ccc74SLionel Landwerlin 
24284f6ccc74SLionel Landwerlin 		/*
24294f6ccc74SLionel Landwerlin 		 * When query_id == DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID,
24304f6ccc74SLionel Landwerlin 		 * i915 will use the value in this field as configuration
24314f6ccc74SLionel Landwerlin 		 * identifier to decide what data to write into config_ptr.
24324f6ccc74SLionel Landwerlin 		 */
24334f6ccc74SLionel Landwerlin 		__u64 config;
24344f6ccc74SLionel Landwerlin 
24354f6ccc74SLionel Landwerlin 		/*
24364f6ccc74SLionel Landwerlin 		 * When query_id == DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID,
24374f6ccc74SLionel Landwerlin 		 * i915 will use the value in this field as configuration
24384f6ccc74SLionel Landwerlin 		 * identifier to decide what data to write into config_ptr.
24394f6ccc74SLionel Landwerlin 		 *
24404f6ccc74SLionel Landwerlin 		 * String formatted like "%08x-%04x-%04x-%04x-%012x"
24414f6ccc74SLionel Landwerlin 		 */
24424f6ccc74SLionel Landwerlin 		char uuid[36];
24434f6ccc74SLionel Landwerlin 	};
24444f6ccc74SLionel Landwerlin 
24454f6ccc74SLionel Landwerlin 	/*
24464f6ccc74SLionel Landwerlin 	 * Unused for now. Must be cleared to zero.
24474f6ccc74SLionel Landwerlin 	 */
24484f6ccc74SLionel Landwerlin 	__u32 flags;
24494f6ccc74SLionel Landwerlin 
24504f6ccc74SLionel Landwerlin 	/*
24514f6ccc74SLionel Landwerlin 	 * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_LIST, i915 will
24524f6ccc74SLionel Landwerlin 	 * write an array of __u64 of configuration identifiers.
24534f6ccc74SLionel Landwerlin 	 *
24544f6ccc74SLionel Landwerlin 	 * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_DATA, i915 will
24554f6ccc74SLionel Landwerlin 	 * write a struct drm_i915_perf_oa_config. If the following fields of
24564f6ccc74SLionel Landwerlin 	 * drm_i915_perf_oa_config are set not set to 0, i915 will write into
24574f6ccc74SLionel Landwerlin 	 * the associated pointers the values of submitted when the
24584f6ccc74SLionel Landwerlin 	 * configuration was created :
24594f6ccc74SLionel Landwerlin 	 *
24604f6ccc74SLionel Landwerlin 	 *         - n_mux_regs
24614f6ccc74SLionel Landwerlin 	 *         - n_boolean_regs
24624f6ccc74SLionel Landwerlin 	 *         - n_flex_regs
24634f6ccc74SLionel Landwerlin 	 */
24644f6ccc74SLionel Landwerlin 	__u8 data[];
24654f6ccc74SLionel Landwerlin };
24664f6ccc74SLionel Landwerlin 
2467b1c1f5c4SEmil Velikov #if defined(__cplusplus)
2468b1c1f5c4SEmil Velikov }
2469b1c1f5c4SEmil Velikov #endif
2470b1c1f5c4SEmil Velikov 
2471718dceddSDavid Howells #endif /* _UAPI_I915_DRM_H_ */
2472