xref: /openbmc/linux/include/uapi/drm/i915_drm.h (revision cc662126)
1718dceddSDavid Howells /*
2718dceddSDavid Howells  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
3718dceddSDavid Howells  * All Rights Reserved.
4718dceddSDavid Howells  *
5718dceddSDavid Howells  * Permission is hereby granted, free of charge, to any person obtaining a
6718dceddSDavid Howells  * copy of this software and associated documentation files (the
7718dceddSDavid Howells  * "Software"), to deal in the Software without restriction, including
8718dceddSDavid Howells  * without limitation the rights to use, copy, modify, merge, publish,
9718dceddSDavid Howells  * distribute, sub license, and/or sell copies of the Software, and to
10718dceddSDavid Howells  * permit persons to whom the Software is furnished to do so, subject to
11718dceddSDavid Howells  * the following conditions:
12718dceddSDavid Howells  *
13718dceddSDavid Howells  * The above copyright notice and this permission notice (including the
14718dceddSDavid Howells  * next paragraph) shall be included in all copies or substantial portions
15718dceddSDavid Howells  * of the Software.
16718dceddSDavid Howells  *
17718dceddSDavid Howells  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18718dceddSDavid Howells  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19718dceddSDavid Howells  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
20718dceddSDavid Howells  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
21718dceddSDavid Howells  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22718dceddSDavid Howells  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23718dceddSDavid Howells  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24718dceddSDavid Howells  *
25718dceddSDavid Howells  */
26718dceddSDavid Howells 
27718dceddSDavid Howells #ifndef _UAPI_I915_DRM_H_
28718dceddSDavid Howells #define _UAPI_I915_DRM_H_
29718dceddSDavid Howells 
301049102fSGabriel Laskar #include "drm.h"
31718dceddSDavid Howells 
32b1c1f5c4SEmil Velikov #if defined(__cplusplus)
33b1c1f5c4SEmil Velikov extern "C" {
34b1c1f5c4SEmil Velikov #endif
35b1c1f5c4SEmil Velikov 
36718dceddSDavid Howells /* Please note that modifications to all structs defined here are
37718dceddSDavid Howells  * subject to backwards-compatibility constraints.
38718dceddSDavid Howells  */
39718dceddSDavid Howells 
40cce723edSBen Widawsky /**
41cce723edSBen Widawsky  * DOC: uevents generated by i915 on it's device node
42cce723edSBen Widawsky  *
43cce723edSBen Widawsky  * I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch
44cce723edSBen Widawsky  *	event from the gpu l3 cache. Additional information supplied is ROW,
4535a85ac6SBen Widawsky  *	BANK, SUBBANK, SLICE of the affected cacheline. Userspace should keep
4635a85ac6SBen Widawsky  *	track of these events and if a specific cache-line seems to have a
4735a85ac6SBen Widawsky  *	persistent error remap it with the l3 remapping tool supplied in
4835a85ac6SBen Widawsky  *	intel-gpu-tools.  The value supplied with the event is always 1.
49cce723edSBen Widawsky  *
50cce723edSBen Widawsky  * I915_ERROR_UEVENT - Generated upon error detection, currently only via
51cce723edSBen Widawsky  *	hangcheck. The error detection event is a good indicator of when things
52cce723edSBen Widawsky  *	began to go badly. The value supplied with the event is a 1 upon error
53cce723edSBen Widawsky  *	detection, and a 0 upon reset completion, signifying no more error
54cce723edSBen Widawsky  *	exists. NOTE: Disabling hangcheck or reset via module parameter will
55cce723edSBen Widawsky  *	cause the related events to not be seen.
56cce723edSBen Widawsky  *
57cce723edSBen Widawsky  * I915_RESET_UEVENT - Event is generated just before an attempt to reset the
58cce723edSBen Widawsky  *	the GPU. The value supplied with the event is always 1. NOTE: Disable
59cce723edSBen Widawsky  *	reset via module parameter will cause this event to not be seen.
60cce723edSBen Widawsky  */
61cce723edSBen Widawsky #define I915_L3_PARITY_UEVENT		"L3_PARITY_ERROR"
62cce723edSBen Widawsky #define I915_ERROR_UEVENT		"ERROR"
63cce723edSBen Widawsky #define I915_RESET_UEVENT		"RESET"
64718dceddSDavid Howells 
653373ce2eSImre Deak /*
669d1305efSChris Wilson  * i915_user_extension: Base class for defining a chain of extensions
679d1305efSChris Wilson  *
689d1305efSChris Wilson  * Many interfaces need to grow over time. In most cases we can simply
699d1305efSChris Wilson  * extend the struct and have userspace pass in more data. Another option,
709d1305efSChris Wilson  * as demonstrated by Vulkan's approach to providing extensions for forward
719d1305efSChris Wilson  * and backward compatibility, is to use a list of optional structs to
729d1305efSChris Wilson  * provide those extra details.
739d1305efSChris Wilson  *
749d1305efSChris Wilson  * The key advantage to using an extension chain is that it allows us to
759d1305efSChris Wilson  * redefine the interface more easily than an ever growing struct of
769d1305efSChris Wilson  * increasing complexity, and for large parts of that interface to be
779d1305efSChris Wilson  * entirely optional. The downside is more pointer chasing; chasing across
789d1305efSChris Wilson  * the __user boundary with pointers encapsulated inside u64.
799d1305efSChris Wilson  */
809d1305efSChris Wilson struct i915_user_extension {
819d1305efSChris Wilson 	__u64 next_extension;
829d1305efSChris Wilson 	__u32 name;
839d1305efSChris Wilson 	__u32 flags; /* All undefined bits must be zero. */
849d1305efSChris Wilson 	__u32 rsvd[4]; /* Reserved for future use; must be zero. */
859d1305efSChris Wilson };
869d1305efSChris Wilson 
879d1305efSChris Wilson /*
883373ce2eSImre Deak  * MOCS indexes used for GPU surfaces, defining the cacheability of the
893373ce2eSImre Deak  * surface data and the coherency for this data wrt. CPU vs. GPU accesses.
903373ce2eSImre Deak  */
913373ce2eSImre Deak enum i915_mocs_table_index {
923373ce2eSImre Deak 	/*
933373ce2eSImre Deak 	 * Not cached anywhere, coherency between CPU and GPU accesses is
943373ce2eSImre Deak 	 * guaranteed.
953373ce2eSImre Deak 	 */
963373ce2eSImre Deak 	I915_MOCS_UNCACHED,
973373ce2eSImre Deak 	/*
983373ce2eSImre Deak 	 * Cacheability and coherency controlled by the kernel automatically
993373ce2eSImre Deak 	 * based on the DRM_I915_GEM_SET_CACHING IOCTL setting and the current
1003373ce2eSImre Deak 	 * usage of the surface (used for display scanout or not).
1013373ce2eSImre Deak 	 */
1023373ce2eSImre Deak 	I915_MOCS_PTE,
1033373ce2eSImre Deak 	/*
1043373ce2eSImre Deak 	 * Cached in all GPU caches available on the platform.
1053373ce2eSImre Deak 	 * Coherency between CPU and GPU accesses to the surface is not
1063373ce2eSImre Deak 	 * guaranteed without extra synchronization.
1073373ce2eSImre Deak 	 */
1083373ce2eSImre Deak 	I915_MOCS_CACHED,
1093373ce2eSImre Deak };
1103373ce2eSImre Deak 
1111803fcbcSTvrtko Ursulin /*
1121803fcbcSTvrtko Ursulin  * Different engines serve different roles, and there may be more than one
1131803fcbcSTvrtko Ursulin  * engine serving each role. enum drm_i915_gem_engine_class provides a
1141803fcbcSTvrtko Ursulin  * classification of the role of the engine, which may be used when requesting
1151803fcbcSTvrtko Ursulin  * operations to be performed on a certain subset of engines, or for providing
1161803fcbcSTvrtko Ursulin  * information about that group.
1171803fcbcSTvrtko Ursulin  */
1181803fcbcSTvrtko Ursulin enum drm_i915_gem_engine_class {
1191803fcbcSTvrtko Ursulin 	I915_ENGINE_CLASS_RENDER	= 0,
1201803fcbcSTvrtko Ursulin 	I915_ENGINE_CLASS_COPY		= 1,
1211803fcbcSTvrtko Ursulin 	I915_ENGINE_CLASS_VIDEO		= 2,
1221803fcbcSTvrtko Ursulin 	I915_ENGINE_CLASS_VIDEO_ENHANCE	= 3,
1231803fcbcSTvrtko Ursulin 
124be03564bSChris Wilson 	/* should be kept compact */
125be03564bSChris Wilson 
1261803fcbcSTvrtko Ursulin 	I915_ENGINE_CLASS_INVALID	= -1
1271803fcbcSTvrtko Ursulin };
1281803fcbcSTvrtko Ursulin 
129d1172ab3SChris Wilson /*
130d1172ab3SChris Wilson  * There may be more than one engine fulfilling any role within the system.
131d1172ab3SChris Wilson  * Each engine of a class is given a unique instance number and therefore
132d1172ab3SChris Wilson  * any engine can be specified by its class:instance tuplet. APIs that allow
133d1172ab3SChris Wilson  * access to any engine in the system will use struct i915_engine_class_instance
134d1172ab3SChris Wilson  * for this identification.
135d1172ab3SChris Wilson  */
136d1172ab3SChris Wilson struct i915_engine_class_instance {
137d1172ab3SChris Wilson 	__u16 engine_class; /* see enum drm_i915_gem_engine_class */
138d1172ab3SChris Wilson 	__u16 engine_instance;
139976b55f0SChris Wilson #define I915_ENGINE_CLASS_INVALID_NONE -1
1406d06779eSChris Wilson #define I915_ENGINE_CLASS_INVALID_VIRTUAL -2
141d1172ab3SChris Wilson };
142d1172ab3SChris Wilson 
143b46a33e2STvrtko Ursulin /**
144b46a33e2STvrtko Ursulin  * DOC: perf_events exposed by i915 through /sys/bus/event_sources/drivers/i915
145b46a33e2STvrtko Ursulin  *
146b46a33e2STvrtko Ursulin  */
147b46a33e2STvrtko Ursulin 
148b46a33e2STvrtko Ursulin enum drm_i915_pmu_engine_sample {
149b46a33e2STvrtko Ursulin 	I915_SAMPLE_BUSY = 0,
150b46a33e2STvrtko Ursulin 	I915_SAMPLE_WAIT = 1,
151b552ae44STvrtko Ursulin 	I915_SAMPLE_SEMA = 2
152b46a33e2STvrtko Ursulin };
153b46a33e2STvrtko Ursulin 
154b46a33e2STvrtko Ursulin #define I915_PMU_SAMPLE_BITS (4)
155b46a33e2STvrtko Ursulin #define I915_PMU_SAMPLE_MASK (0xf)
156b46a33e2STvrtko Ursulin #define I915_PMU_SAMPLE_INSTANCE_BITS (8)
157b46a33e2STvrtko Ursulin #define I915_PMU_CLASS_SHIFT \
158b46a33e2STvrtko Ursulin 	(I915_PMU_SAMPLE_BITS + I915_PMU_SAMPLE_INSTANCE_BITS)
159b46a33e2STvrtko Ursulin 
160b46a33e2STvrtko Ursulin #define __I915_PMU_ENGINE(class, instance, sample) \
161b46a33e2STvrtko Ursulin 	((class) << I915_PMU_CLASS_SHIFT | \
162b46a33e2STvrtko Ursulin 	(instance) << I915_PMU_SAMPLE_BITS | \
163b46a33e2STvrtko Ursulin 	(sample))
164b46a33e2STvrtko Ursulin 
165b46a33e2STvrtko Ursulin #define I915_PMU_ENGINE_BUSY(class, instance) \
166b46a33e2STvrtko Ursulin 	__I915_PMU_ENGINE(class, instance, I915_SAMPLE_BUSY)
167b46a33e2STvrtko Ursulin 
168b46a33e2STvrtko Ursulin #define I915_PMU_ENGINE_WAIT(class, instance) \
169b46a33e2STvrtko Ursulin 	__I915_PMU_ENGINE(class, instance, I915_SAMPLE_WAIT)
170b46a33e2STvrtko Ursulin 
171b46a33e2STvrtko Ursulin #define I915_PMU_ENGINE_SEMA(class, instance) \
172b46a33e2STvrtko Ursulin 	__I915_PMU_ENGINE(class, instance, I915_SAMPLE_SEMA)
173b46a33e2STvrtko Ursulin 
174b46a33e2STvrtko Ursulin #define __I915_PMU_OTHER(x) (__I915_PMU_ENGINE(0xff, 0xff, 0xf) + 1 + (x))
175b46a33e2STvrtko Ursulin 
176b46a33e2STvrtko Ursulin #define I915_PMU_ACTUAL_FREQUENCY	__I915_PMU_OTHER(0)
177b46a33e2STvrtko Ursulin #define I915_PMU_REQUESTED_FREQUENCY	__I915_PMU_OTHER(1)
1780cd4684dSTvrtko Ursulin #define I915_PMU_INTERRUPTS		__I915_PMU_OTHER(2)
1796060b6aeSTvrtko Ursulin #define I915_PMU_RC6_RESIDENCY		__I915_PMU_OTHER(3)
1806060b6aeSTvrtko Ursulin 
1813452fa30STvrtko Ursulin #define I915_PMU_LAST I915_PMU_RC6_RESIDENCY
182b46a33e2STvrtko Ursulin 
183718dceddSDavid Howells /* Each region is a minimum of 16k, and there are at most 255 of them.
184718dceddSDavid Howells  */
185718dceddSDavid Howells #define I915_NR_TEX_REGIONS 255	/* table size 2k - maximum due to use
186718dceddSDavid Howells 				 * of chars for next/prev indices */
187718dceddSDavid Howells #define I915_LOG_MIN_TEX_REGION_SIZE 14
188718dceddSDavid Howells 
189718dceddSDavid Howells typedef struct _drm_i915_init {
190718dceddSDavid Howells 	enum {
191718dceddSDavid Howells 		I915_INIT_DMA = 0x01,
192718dceddSDavid Howells 		I915_CLEANUP_DMA = 0x02,
193718dceddSDavid Howells 		I915_RESUME_DMA = 0x03
194718dceddSDavid Howells 	} func;
195718dceddSDavid Howells 	unsigned int mmio_offset;
196718dceddSDavid Howells 	int sarea_priv_offset;
197718dceddSDavid Howells 	unsigned int ring_start;
198718dceddSDavid Howells 	unsigned int ring_end;
199718dceddSDavid Howells 	unsigned int ring_size;
200718dceddSDavid Howells 	unsigned int front_offset;
201718dceddSDavid Howells 	unsigned int back_offset;
202718dceddSDavid Howells 	unsigned int depth_offset;
203718dceddSDavid Howells 	unsigned int w;
204718dceddSDavid Howells 	unsigned int h;
205718dceddSDavid Howells 	unsigned int pitch;
206718dceddSDavid Howells 	unsigned int pitch_bits;
207718dceddSDavid Howells 	unsigned int back_pitch;
208718dceddSDavid Howells 	unsigned int depth_pitch;
209718dceddSDavid Howells 	unsigned int cpp;
210718dceddSDavid Howells 	unsigned int chipset;
211718dceddSDavid Howells } drm_i915_init_t;
212718dceddSDavid Howells 
213718dceddSDavid Howells typedef struct _drm_i915_sarea {
214718dceddSDavid Howells 	struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1];
215718dceddSDavid Howells 	int last_upload;	/* last time texture was uploaded */
216718dceddSDavid Howells 	int last_enqueue;	/* last time a buffer was enqueued */
217718dceddSDavid Howells 	int last_dispatch;	/* age of the most recently dispatched buffer */
218718dceddSDavid Howells 	int ctxOwner;		/* last context to upload state */
219718dceddSDavid Howells 	int texAge;
220718dceddSDavid Howells 	int pf_enabled;		/* is pageflipping allowed? */
221718dceddSDavid Howells 	int pf_active;
222718dceddSDavid Howells 	int pf_current_page;	/* which buffer is being displayed? */
223718dceddSDavid Howells 	int perf_boxes;		/* performance boxes to be displayed */
224718dceddSDavid Howells 	int width, height;      /* screen size in pixels */
225718dceddSDavid Howells 
226718dceddSDavid Howells 	drm_handle_t front_handle;
227718dceddSDavid Howells 	int front_offset;
228718dceddSDavid Howells 	int front_size;
229718dceddSDavid Howells 
230718dceddSDavid Howells 	drm_handle_t back_handle;
231718dceddSDavid Howells 	int back_offset;
232718dceddSDavid Howells 	int back_size;
233718dceddSDavid Howells 
234718dceddSDavid Howells 	drm_handle_t depth_handle;
235718dceddSDavid Howells 	int depth_offset;
236718dceddSDavid Howells 	int depth_size;
237718dceddSDavid Howells 
238718dceddSDavid Howells 	drm_handle_t tex_handle;
239718dceddSDavid Howells 	int tex_offset;
240718dceddSDavid Howells 	int tex_size;
241718dceddSDavid Howells 	int log_tex_granularity;
242718dceddSDavid Howells 	int pitch;
243718dceddSDavid Howells 	int rotation;           /* 0, 90, 180 or 270 */
244718dceddSDavid Howells 	int rotated_offset;
245718dceddSDavid Howells 	int rotated_size;
246718dceddSDavid Howells 	int rotated_pitch;
247718dceddSDavid Howells 	int virtualX, virtualY;
248718dceddSDavid Howells 
249718dceddSDavid Howells 	unsigned int front_tiled;
250718dceddSDavid Howells 	unsigned int back_tiled;
251718dceddSDavid Howells 	unsigned int depth_tiled;
252718dceddSDavid Howells 	unsigned int rotated_tiled;
253718dceddSDavid Howells 	unsigned int rotated2_tiled;
254718dceddSDavid Howells 
255718dceddSDavid Howells 	int pipeA_x;
256718dceddSDavid Howells 	int pipeA_y;
257718dceddSDavid Howells 	int pipeA_w;
258718dceddSDavid Howells 	int pipeA_h;
259718dceddSDavid Howells 	int pipeB_x;
260718dceddSDavid Howells 	int pipeB_y;
261718dceddSDavid Howells 	int pipeB_w;
262718dceddSDavid Howells 	int pipeB_h;
263718dceddSDavid Howells 
264718dceddSDavid Howells 	/* fill out some space for old userspace triple buffer */
265718dceddSDavid Howells 	drm_handle_t unused_handle;
266718dceddSDavid Howells 	__u32 unused1, unused2, unused3;
267718dceddSDavid Howells 
268718dceddSDavid Howells 	/* buffer object handles for static buffers. May change
269718dceddSDavid Howells 	 * over the lifetime of the client.
270718dceddSDavid Howells 	 */
271718dceddSDavid Howells 	__u32 front_bo_handle;
272718dceddSDavid Howells 	__u32 back_bo_handle;
273718dceddSDavid Howells 	__u32 unused_bo_handle;
274718dceddSDavid Howells 	__u32 depth_bo_handle;
275718dceddSDavid Howells 
276718dceddSDavid Howells } drm_i915_sarea_t;
277718dceddSDavid Howells 
278718dceddSDavid Howells /* due to userspace building against these headers we need some compat here */
279718dceddSDavid Howells #define planeA_x pipeA_x
280718dceddSDavid Howells #define planeA_y pipeA_y
281718dceddSDavid Howells #define planeA_w pipeA_w
282718dceddSDavid Howells #define planeA_h pipeA_h
283718dceddSDavid Howells #define planeB_x pipeB_x
284718dceddSDavid Howells #define planeB_y pipeB_y
285718dceddSDavid Howells #define planeB_w pipeB_w
286718dceddSDavid Howells #define planeB_h pipeB_h
287718dceddSDavid Howells 
288718dceddSDavid Howells /* Flags for perf_boxes
289718dceddSDavid Howells  */
290718dceddSDavid Howells #define I915_BOX_RING_EMPTY    0x1
291718dceddSDavid Howells #define I915_BOX_FLIP          0x2
292718dceddSDavid Howells #define I915_BOX_WAIT          0x4
293718dceddSDavid Howells #define I915_BOX_TEXTURE_LOAD  0x8
294718dceddSDavid Howells #define I915_BOX_LOST_CONTEXT  0x10
295718dceddSDavid Howells 
29621631f10SDamien Lespiau /*
29721631f10SDamien Lespiau  * i915 specific ioctls.
29821631f10SDamien Lespiau  *
29921631f10SDamien Lespiau  * The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie
30021631f10SDamien Lespiau  * [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset
30121631f10SDamien Lespiau  * against DRM_COMMAND_BASE and should be between [0x0, 0x60).
302718dceddSDavid Howells  */
303718dceddSDavid Howells #define DRM_I915_INIT		0x00
304718dceddSDavid Howells #define DRM_I915_FLUSH		0x01
305718dceddSDavid Howells #define DRM_I915_FLIP		0x02
306718dceddSDavid Howells #define DRM_I915_BATCHBUFFER	0x03
307718dceddSDavid Howells #define DRM_I915_IRQ_EMIT	0x04
308718dceddSDavid Howells #define DRM_I915_IRQ_WAIT	0x05
309718dceddSDavid Howells #define DRM_I915_GETPARAM	0x06
310718dceddSDavid Howells #define DRM_I915_SETPARAM	0x07
311718dceddSDavid Howells #define DRM_I915_ALLOC		0x08
312718dceddSDavid Howells #define DRM_I915_FREE		0x09
313718dceddSDavid Howells #define DRM_I915_INIT_HEAP	0x0a
314718dceddSDavid Howells #define DRM_I915_CMDBUFFER	0x0b
315718dceddSDavid Howells #define DRM_I915_DESTROY_HEAP	0x0c
316718dceddSDavid Howells #define DRM_I915_SET_VBLANK_PIPE	0x0d
317718dceddSDavid Howells #define DRM_I915_GET_VBLANK_PIPE	0x0e
318718dceddSDavid Howells #define DRM_I915_VBLANK_SWAP	0x0f
319718dceddSDavid Howells #define DRM_I915_HWS_ADDR	0x11
320718dceddSDavid Howells #define DRM_I915_GEM_INIT	0x13
321718dceddSDavid Howells #define DRM_I915_GEM_EXECBUFFER	0x14
322718dceddSDavid Howells #define DRM_I915_GEM_PIN	0x15
323718dceddSDavid Howells #define DRM_I915_GEM_UNPIN	0x16
324718dceddSDavid Howells #define DRM_I915_GEM_BUSY	0x17
325718dceddSDavid Howells #define DRM_I915_GEM_THROTTLE	0x18
326718dceddSDavid Howells #define DRM_I915_GEM_ENTERVT	0x19
327718dceddSDavid Howells #define DRM_I915_GEM_LEAVEVT	0x1a
328718dceddSDavid Howells #define DRM_I915_GEM_CREATE	0x1b
329718dceddSDavid Howells #define DRM_I915_GEM_PREAD	0x1c
330718dceddSDavid Howells #define DRM_I915_GEM_PWRITE	0x1d
331718dceddSDavid Howells #define DRM_I915_GEM_MMAP	0x1e
332718dceddSDavid Howells #define DRM_I915_GEM_SET_DOMAIN	0x1f
333718dceddSDavid Howells #define DRM_I915_GEM_SW_FINISH	0x20
334718dceddSDavid Howells #define DRM_I915_GEM_SET_TILING	0x21
335718dceddSDavid Howells #define DRM_I915_GEM_GET_TILING	0x22
336718dceddSDavid Howells #define DRM_I915_GEM_GET_APERTURE 0x23
337718dceddSDavid Howells #define DRM_I915_GEM_MMAP_GTT	0x24
338718dceddSDavid Howells #define DRM_I915_GET_PIPE_FROM_CRTC_ID	0x25
339718dceddSDavid Howells #define DRM_I915_GEM_MADVISE	0x26
340718dceddSDavid Howells #define DRM_I915_OVERLAY_PUT_IMAGE	0x27
341718dceddSDavid Howells #define DRM_I915_OVERLAY_ATTRS	0x28
342718dceddSDavid Howells #define DRM_I915_GEM_EXECBUFFER2	0x29
343fec0445cSChris Wilson #define DRM_I915_GEM_EXECBUFFER2_WR	DRM_I915_GEM_EXECBUFFER2
344718dceddSDavid Howells #define DRM_I915_GET_SPRITE_COLORKEY	0x2a
345718dceddSDavid Howells #define DRM_I915_SET_SPRITE_COLORKEY	0x2b
346718dceddSDavid Howells #define DRM_I915_GEM_WAIT	0x2c
347718dceddSDavid Howells #define DRM_I915_GEM_CONTEXT_CREATE	0x2d
348718dceddSDavid Howells #define DRM_I915_GEM_CONTEXT_DESTROY	0x2e
349718dceddSDavid Howells #define DRM_I915_GEM_SET_CACHING	0x2f
350718dceddSDavid Howells #define DRM_I915_GEM_GET_CACHING	0x30
351718dceddSDavid Howells #define DRM_I915_REG_READ		0x31
352b6359918SMika Kuoppala #define DRM_I915_GET_RESET_STATS	0x32
3535cc9ed4bSChris Wilson #define DRM_I915_GEM_USERPTR		0x33
354c9dc0f35SChris Wilson #define DRM_I915_GEM_CONTEXT_GETPARAM	0x34
355c9dc0f35SChris Wilson #define DRM_I915_GEM_CONTEXT_SETPARAM	0x35
356eec688e1SRobert Bragg #define DRM_I915_PERF_OPEN		0x36
357f89823c2SLionel Landwerlin #define DRM_I915_PERF_ADD_CONFIG	0x37
358f89823c2SLionel Landwerlin #define DRM_I915_PERF_REMOVE_CONFIG	0x38
359a446ae2cSLionel Landwerlin #define DRM_I915_QUERY			0x39
3607f3f317aSChris Wilson #define DRM_I915_GEM_VM_CREATE		0x3a
3617f3f317aSChris Wilson #define DRM_I915_GEM_VM_DESTROY		0x3b
362be03564bSChris Wilson /* Must be kept compact -- no holes */
363718dceddSDavid Howells 
364718dceddSDavid Howells #define DRM_IOCTL_I915_INIT		DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
365718dceddSDavid Howells #define DRM_IOCTL_I915_FLUSH		DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
366718dceddSDavid Howells #define DRM_IOCTL_I915_FLIP		DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP)
367718dceddSDavid Howells #define DRM_IOCTL_I915_BATCHBUFFER	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t)
368718dceddSDavid Howells #define DRM_IOCTL_I915_IRQ_EMIT         DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t)
369718dceddSDavid Howells #define DRM_IOCTL_I915_IRQ_WAIT         DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t)
370718dceddSDavid Howells #define DRM_IOCTL_I915_GETPARAM         DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t)
371718dceddSDavid Howells #define DRM_IOCTL_I915_SETPARAM         DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t)
372718dceddSDavid Howells #define DRM_IOCTL_I915_ALLOC            DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t)
373718dceddSDavid Howells #define DRM_IOCTL_I915_FREE             DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t)
374718dceddSDavid Howells #define DRM_IOCTL_I915_INIT_HEAP        DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t)
375718dceddSDavid Howells #define DRM_IOCTL_I915_CMDBUFFER	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t)
376718dceddSDavid Howells #define DRM_IOCTL_I915_DESTROY_HEAP	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t)
377718dceddSDavid Howells #define DRM_IOCTL_I915_SET_VBLANK_PIPE	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
378718dceddSDavid Howells #define DRM_IOCTL_I915_GET_VBLANK_PIPE	DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
379718dceddSDavid Howells #define DRM_IOCTL_I915_VBLANK_SWAP	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
380718dceddSDavid Howells #define DRM_IOCTL_I915_HWS_ADDR		DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init)
381718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_INIT		DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
382718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_EXECBUFFER	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
383718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_EXECBUFFER2	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
384fec0445cSChris Wilson #define DRM_IOCTL_I915_GEM_EXECBUFFER2_WR	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2_WR, struct drm_i915_gem_execbuffer2)
385718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_PIN		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
386718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_UNPIN	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
387718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_BUSY		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
388718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_SET_CACHING		DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHING, struct drm_i915_gem_caching)
389718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_GET_CACHING		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHING, struct drm_i915_gem_caching)
390718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_THROTTLE	DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
391718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_ENTERVT	DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
392718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_LEAVEVT	DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
393718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_CREATE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
394718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_PREAD	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
395718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_PWRITE	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
396718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_MMAP		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
397718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_MMAP_GTT	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt)
398cc662126SAbdiel Janulgue #define DRM_IOCTL_I915_GEM_MMAP_OFFSET	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_offset)
399718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_SET_DOMAIN	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
400718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_SW_FINISH	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
401718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_SET_TILING	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
402718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_GET_TILING	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
403718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_GET_APERTURE	DRM_IOR  (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture)
404718dceddSDavid Howells #define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id)
405718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_MADVISE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
406718dceddSDavid Howells #define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image)
407718dceddSDavid Howells #define DRM_IOCTL_I915_OVERLAY_ATTRS	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
408718dceddSDavid Howells #define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
4092c60fae1STommi Rantala #define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
410718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_WAIT		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait)
411718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_CONTEXT_CREATE	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
412b9171541SChris Wilson #define DRM_IOCTL_I915_GEM_CONTEXT_CREATE_EXT	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create_ext)
413718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
414718dceddSDavid Howells #define DRM_IOCTL_I915_REG_READ			DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
415b6359918SMika Kuoppala #define DRM_IOCTL_I915_GET_RESET_STATS		DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats)
4165cc9ed4bSChris Wilson #define DRM_IOCTL_I915_GEM_USERPTR			DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr)
417c9dc0f35SChris Wilson #define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param)
418c9dc0f35SChris Wilson #define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param)
419eec688e1SRobert Bragg #define DRM_IOCTL_I915_PERF_OPEN	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param)
420f89823c2SLionel Landwerlin #define DRM_IOCTL_I915_PERF_ADD_CONFIG	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_ADD_CONFIG, struct drm_i915_perf_oa_config)
421f89823c2SLionel Landwerlin #define DRM_IOCTL_I915_PERF_REMOVE_CONFIG	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_REMOVE_CONFIG, __u64)
422a446ae2cSLionel Landwerlin #define DRM_IOCTL_I915_QUERY			DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_QUERY, struct drm_i915_query)
4237f3f317aSChris Wilson #define DRM_IOCTL_I915_GEM_VM_CREATE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_CREATE, struct drm_i915_gem_vm_control)
4247f3f317aSChris Wilson #define DRM_IOCTL_I915_GEM_VM_DESTROY	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_VM_DESTROY, struct drm_i915_gem_vm_control)
425718dceddSDavid Howells 
426718dceddSDavid Howells /* Allow drivers to submit batchbuffers directly to hardware, relying
427718dceddSDavid Howells  * on the security mechanisms provided by hardware.
428718dceddSDavid Howells  */
429718dceddSDavid Howells typedef struct drm_i915_batchbuffer {
430718dceddSDavid Howells 	int start;		/* agp offset */
431718dceddSDavid Howells 	int used;		/* nr bytes in use */
432718dceddSDavid Howells 	int DR1;		/* hw flags for GFX_OP_DRAWRECT_INFO */
433718dceddSDavid Howells 	int DR4;		/* window origin for GFX_OP_DRAWRECT_INFO */
434718dceddSDavid Howells 	int num_cliprects;	/* mulitpass with multiple cliprects? */
435718dceddSDavid Howells 	struct drm_clip_rect __user *cliprects;	/* pointer to userspace cliprects */
436718dceddSDavid Howells } drm_i915_batchbuffer_t;
437718dceddSDavid Howells 
438718dceddSDavid Howells /* As above, but pass a pointer to userspace buffer which can be
439718dceddSDavid Howells  * validated by the kernel prior to sending to hardware.
440718dceddSDavid Howells  */
441718dceddSDavid Howells typedef struct _drm_i915_cmdbuffer {
442718dceddSDavid Howells 	char __user *buf;	/* pointer to userspace command buffer */
443718dceddSDavid Howells 	int sz;			/* nr bytes in buf */
444718dceddSDavid Howells 	int DR1;		/* hw flags for GFX_OP_DRAWRECT_INFO */
445718dceddSDavid Howells 	int DR4;		/* window origin for GFX_OP_DRAWRECT_INFO */
446718dceddSDavid Howells 	int num_cliprects;	/* mulitpass with multiple cliprects? */
447718dceddSDavid Howells 	struct drm_clip_rect __user *cliprects;	/* pointer to userspace cliprects */
448718dceddSDavid Howells } drm_i915_cmdbuffer_t;
449718dceddSDavid Howells 
450718dceddSDavid Howells /* Userspace can request & wait on irq's:
451718dceddSDavid Howells  */
452718dceddSDavid Howells typedef struct drm_i915_irq_emit {
453718dceddSDavid Howells 	int __user *irq_seq;
454718dceddSDavid Howells } drm_i915_irq_emit_t;
455718dceddSDavid Howells 
456718dceddSDavid Howells typedef struct drm_i915_irq_wait {
457718dceddSDavid Howells 	int irq_seq;
458718dceddSDavid Howells } drm_i915_irq_wait_t;
459718dceddSDavid Howells 
4604bdafb9dSChris Wilson /*
4614bdafb9dSChris Wilson  * Different modes of per-process Graphics Translation Table,
4624bdafb9dSChris Wilson  * see I915_PARAM_HAS_ALIASING_PPGTT
4634bdafb9dSChris Wilson  */
4644bdafb9dSChris Wilson #define I915_GEM_PPGTT_NONE	0
4654bdafb9dSChris Wilson #define I915_GEM_PPGTT_ALIASING	1
4664bdafb9dSChris Wilson #define I915_GEM_PPGTT_FULL	2
4674bdafb9dSChris Wilson 
468718dceddSDavid Howells /* Ioctl to query kernel params:
469718dceddSDavid Howells  */
470718dceddSDavid Howells #define I915_PARAM_IRQ_ACTIVE            1
471718dceddSDavid Howells #define I915_PARAM_ALLOW_BATCHBUFFER     2
472718dceddSDavid Howells #define I915_PARAM_LAST_DISPATCH         3
473718dceddSDavid Howells #define I915_PARAM_CHIPSET_ID            4
474718dceddSDavid Howells #define I915_PARAM_HAS_GEM               5
475718dceddSDavid Howells #define I915_PARAM_NUM_FENCES_AVAIL      6
476718dceddSDavid Howells #define I915_PARAM_HAS_OVERLAY           7
477718dceddSDavid Howells #define I915_PARAM_HAS_PAGEFLIPPING	 8
478718dceddSDavid Howells #define I915_PARAM_HAS_EXECBUF2          9
479718dceddSDavid Howells #define I915_PARAM_HAS_BSD		 10
480718dceddSDavid Howells #define I915_PARAM_HAS_BLT		 11
481718dceddSDavid Howells #define I915_PARAM_HAS_RELAXED_FENCING	 12
482718dceddSDavid Howells #define I915_PARAM_HAS_COHERENT_RINGS	 13
483718dceddSDavid Howells #define I915_PARAM_HAS_EXEC_CONSTANTS	 14
484718dceddSDavid Howells #define I915_PARAM_HAS_RELAXED_DELTA	 15
485718dceddSDavid Howells #define I915_PARAM_HAS_GEN7_SOL_RESET	 16
486718dceddSDavid Howells #define I915_PARAM_HAS_LLC     	 	 17
487718dceddSDavid Howells #define I915_PARAM_HAS_ALIASING_PPGTT	 18
488718dceddSDavid Howells #define I915_PARAM_HAS_WAIT_TIMEOUT	 19
489718dceddSDavid Howells #define I915_PARAM_HAS_SEMAPHORES	 20
490718dceddSDavid Howells #define I915_PARAM_HAS_PRIME_VMAP_FLUSH	 21
491a1f2cc73SXiang, Haihao #define I915_PARAM_HAS_VEBOX		 22
492c2fb7916SDaniel Vetter #define I915_PARAM_HAS_SECURE_BATCHES	 23
493b45305fcSDaniel Vetter #define I915_PARAM_HAS_PINNED_BATCHES	 24
494ed5982e6SDaniel Vetter #define I915_PARAM_HAS_EXEC_NO_RELOC	 25
495eef90ccbSChris Wilson #define I915_PARAM_HAS_EXEC_HANDLE_LUT   26
496651d794fSChris Wilson #define I915_PARAM_HAS_WT     	 	 27
497d728c8efSBrad Volkin #define I915_PARAM_CMD_PARSER_VERSION	 28
4986a2c4232SChris Wilson #define I915_PARAM_HAS_COHERENT_PHYS_GTT 29
4991816f923SAkash Goel #define I915_PARAM_MMAP_VERSION          30
50008e16dc8SZhipeng Gong #define I915_PARAM_HAS_BSD2		 31
50127cd4461SNeil Roberts #define I915_PARAM_REVISION              32
502a1559ffeSJeff McGee #define I915_PARAM_SUBSLICE_TOTAL	 33
503a1559ffeSJeff McGee #define I915_PARAM_EU_TOTAL		 34
50449e4d842SChris Wilson #define I915_PARAM_HAS_GPU_RESET	 35
505a9ed33caSAbdiel Janulgue #define I915_PARAM_HAS_RESOURCE_STREAMER 36
506506a8e87SChris Wilson #define I915_PARAM_HAS_EXEC_SOFTPIN	 37
50737f501afSarun.siluvery@linux.intel.com #define I915_PARAM_HAS_POOLED_EU	 38
50837f501afSarun.siluvery@linux.intel.com #define I915_PARAM_MIN_EU_IN_POOL	 39
5094cc69075SChris Wilson #define I915_PARAM_MMAP_GTT_VERSION	 40
510718dceddSDavid Howells 
511bf64e0b0SChris Wilson /*
512bf64e0b0SChris Wilson  * Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution
5130de9136dSChris Wilson  * priorities and the driver will attempt to execute batches in priority order.
514bf64e0b0SChris Wilson  * The param returns a capability bitmask, nonzero implies that the scheduler
515bf64e0b0SChris Wilson  * is enabled, with different features present according to the mask.
516ac14fbd4SChris Wilson  *
517ac14fbd4SChris Wilson  * The initial priority for each batch is supplied by the context and is
518ac14fbd4SChris Wilson  * controlled via I915_CONTEXT_PARAM_PRIORITY.
5190de9136dSChris Wilson  */
5200de9136dSChris Wilson #define I915_PARAM_HAS_SCHEDULER	 41
521bf64e0b0SChris Wilson #define   I915_SCHEDULER_CAP_ENABLED	(1ul << 0)
522bf64e0b0SChris Wilson #define   I915_SCHEDULER_CAP_PRIORITY	(1ul << 1)
523bf64e0b0SChris Wilson #define   I915_SCHEDULER_CAP_PREEMPTION	(1ul << 2)
524e8861964SChris Wilson #define   I915_SCHEDULER_CAP_SEMAPHORES	(1ul << 3)
525bf73fc0fSChris Wilson #define   I915_SCHEDULER_CAP_ENGINE_BUSY_STATS	(1ul << 4)
526bf64e0b0SChris Wilson 
5275464cd65SAnusha Srivatsa #define I915_PARAM_HUC_STATUS		 42
5280de9136dSChris Wilson 
52977ae9957SChris Wilson /* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of
53077ae9957SChris Wilson  * synchronisation with implicit fencing on individual objects.
53177ae9957SChris Wilson  * See EXEC_OBJECT_ASYNC.
53277ae9957SChris Wilson  */
53377ae9957SChris Wilson #define I915_PARAM_HAS_EXEC_ASYNC	 43
53477ae9957SChris Wilson 
535fec0445cSChris Wilson /* Query whether DRM_I915_GEM_EXECBUFFER2 supports explicit fence support -
536fec0445cSChris Wilson  * both being able to pass in a sync_file fd to wait upon before executing,
537fec0445cSChris Wilson  * and being able to return a new sync_file fd that is signaled when the
538fec0445cSChris Wilson  * current request is complete. See I915_EXEC_FENCE_IN and I915_EXEC_FENCE_OUT.
539fec0445cSChris Wilson  */
540fec0445cSChris Wilson #define I915_PARAM_HAS_EXEC_FENCE	 44
541fec0445cSChris Wilson 
542b0fd47adSChris Wilson /* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to capture
543b0fd47adSChris Wilson  * user specified bufffers for post-mortem debugging of GPU hangs. See
544b0fd47adSChris Wilson  * EXEC_OBJECT_CAPTURE.
545b0fd47adSChris Wilson  */
546b0fd47adSChris Wilson #define I915_PARAM_HAS_EXEC_CAPTURE	 45
547b0fd47adSChris Wilson 
5487fed555cSRobert Bragg #define I915_PARAM_SLICE_MASK		 46
5497fed555cSRobert Bragg 
550f5320233SRobert Bragg /* Assuming it's uniform for each slice, this queries the mask of subslices
551f5320233SRobert Bragg  * per-slice for this system.
552f5320233SRobert Bragg  */
553f5320233SRobert Bragg #define I915_PARAM_SUBSLICE_MASK	 47
554f5320233SRobert Bragg 
5551a71cf2fSChris Wilson /*
5561a71cf2fSChris Wilson  * Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying the batch buffer
5571a71cf2fSChris Wilson  * as the first execobject as opposed to the last. See I915_EXEC_BATCH_FIRST.
5581a71cf2fSChris Wilson  */
5591a71cf2fSChris Wilson #define I915_PARAM_HAS_EXEC_BATCH_FIRST	 48
5601a71cf2fSChris Wilson 
561cf6e7bacSJason Ekstrand /* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of
562cf6e7bacSJason Ekstrand  * drm_i915_gem_exec_fence structures.  See I915_EXEC_FENCE_ARRAY.
563cf6e7bacSJason Ekstrand  */
564cf6e7bacSJason Ekstrand #define I915_PARAM_HAS_EXEC_FENCE_ARRAY  49
565cf6e7bacSJason Ekstrand 
566d2b4b979SChris Wilson /*
567d2b4b979SChris Wilson  * Query whether every context (both per-file default and user created) is
568d2b4b979SChris Wilson  * isolated (insofar as HW supports). If this parameter is not true, then
569d2b4b979SChris Wilson  * freshly created contexts may inherit values from an existing context,
570d2b4b979SChris Wilson  * rather than default HW values. If true, it also ensures (insofar as HW
571d2b4b979SChris Wilson  * supports) that all state set by this context will not leak to any other
572d2b4b979SChris Wilson  * context.
573d2b4b979SChris Wilson  *
574d2b4b979SChris Wilson  * As not every engine across every gen support contexts, the returned
575d2b4b979SChris Wilson  * value reports the support of context isolation for individual engines by
576d2b4b979SChris Wilson  * returning a bitmask of each engine class set to true if that class supports
577d2b4b979SChris Wilson  * isolation.
578d2b4b979SChris Wilson  */
579d2b4b979SChris Wilson #define I915_PARAM_HAS_CONTEXT_ISOLATION 50
580d2b4b979SChris Wilson 
581dab91783SLionel Landwerlin /* Frequency of the command streamer timestamps given by the *_TIMESTAMP
582dab91783SLionel Landwerlin  * registers. This used to be fixed per platform but from CNL onwards, this
583dab91783SLionel Landwerlin  * might vary depending on the parts.
584dab91783SLionel Landwerlin  */
585dab91783SLionel Landwerlin #define I915_PARAM_CS_TIMESTAMP_FREQUENCY 51
586dab91783SLionel Landwerlin 
587900ccf30SChris Wilson /*
588900ccf30SChris Wilson  * Once upon a time we supposed that writes through the GGTT would be
589900ccf30SChris Wilson  * immediately in physical memory (once flushed out of the CPU path). However,
590900ccf30SChris Wilson  * on a few different processors and chipsets, this is not necessarily the case
591900ccf30SChris Wilson  * as the writes appear to be buffered internally. Thus a read of the backing
592900ccf30SChris Wilson  * storage (physical memory) via a different path (with different physical tags
593900ccf30SChris Wilson  * to the indirect write via the GGTT) will see stale values from before
594900ccf30SChris Wilson  * the GGTT write. Inside the kernel, we can for the most part keep track of
595900ccf30SChris Wilson  * the different read/write domains in use (e.g. set-domain), but the assumption
596900ccf30SChris Wilson  * of coherency is baked into the ABI, hence reporting its true state in this
597900ccf30SChris Wilson  * parameter.
598900ccf30SChris Wilson  *
599900ccf30SChris Wilson  * Reports true when writes via mmap_gtt are immediately visible following an
600900ccf30SChris Wilson  * lfence to flush the WCB.
601900ccf30SChris Wilson  *
602900ccf30SChris Wilson  * Reports false when writes via mmap_gtt are indeterminately delayed in an in
603900ccf30SChris Wilson  * internal buffer and are _not_ immediately visible to third parties accessing
604900ccf30SChris Wilson  * directly via mmap_cpu/mmap_wc. Use of mmap_gtt as part of an IPC
605900ccf30SChris Wilson  * communications channel when reporting false is strongly disadvised.
606900ccf30SChris Wilson  */
607900ccf30SChris Wilson #define I915_PARAM_MMAP_GTT_COHERENT	52
608900ccf30SChris Wilson 
609a88b6e4cSChris Wilson /*
610a88b6e4cSChris Wilson  * Query whether DRM_I915_GEM_EXECBUFFER2 supports coordination of parallel
611a88b6e4cSChris Wilson  * execution through use of explicit fence support.
612a88b6e4cSChris Wilson  * See I915_EXEC_FENCE_OUT and I915_EXEC_FENCE_SUBMIT.
613a88b6e4cSChris Wilson  */
614a88b6e4cSChris Wilson #define I915_PARAM_HAS_EXEC_SUBMIT_FENCE 53
615b8d49f28SLionel Landwerlin 
616b8d49f28SLionel Landwerlin /*
617b8d49f28SLionel Landwerlin  * Revision of the i915-perf uAPI. The value returned helps determine what
618b8d49f28SLionel Landwerlin  * i915-perf features are available. See drm_i915_perf_property_id.
619b8d49f28SLionel Landwerlin  */
620b8d49f28SLionel Landwerlin #define I915_PARAM_PERF_REVISION	54
621b8d49f28SLionel Landwerlin 
622be03564bSChris Wilson /* Must be kept compact -- no holes and well documented */
623be03564bSChris Wilson 
624718dceddSDavid Howells typedef struct drm_i915_getparam {
62516f7249dSArtem Savkov 	__s32 param;
626346add78SDaniel Vetter 	/*
627346add78SDaniel Vetter 	 * WARNING: Using pointers instead of fixed-size u64 means we need to write
628346add78SDaniel Vetter 	 * compat32 code. Don't repeat this mistake.
629346add78SDaniel Vetter 	 */
630718dceddSDavid Howells 	int __user *value;
631718dceddSDavid Howells } drm_i915_getparam_t;
632718dceddSDavid Howells 
633718dceddSDavid Howells /* Ioctl to set kernel params:
634718dceddSDavid Howells  */
635718dceddSDavid Howells #define I915_SETPARAM_USE_MI_BATCHBUFFER_START            1
636718dceddSDavid Howells #define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY             2
637718dceddSDavid Howells #define I915_SETPARAM_ALLOW_BATCHBUFFER                   3
638718dceddSDavid Howells #define I915_SETPARAM_NUM_USED_FENCES                     4
639be03564bSChris Wilson /* Must be kept compact -- no holes */
640718dceddSDavid Howells 
641718dceddSDavid Howells typedef struct drm_i915_setparam {
642718dceddSDavid Howells 	int param;
643718dceddSDavid Howells 	int value;
644718dceddSDavid Howells } drm_i915_setparam_t;
645718dceddSDavid Howells 
646718dceddSDavid Howells /* A memory manager for regions of shared memory:
647718dceddSDavid Howells  */
648718dceddSDavid Howells #define I915_MEM_REGION_AGP 1
649718dceddSDavid Howells 
650718dceddSDavid Howells typedef struct drm_i915_mem_alloc {
651718dceddSDavid Howells 	int region;
652718dceddSDavid Howells 	int alignment;
653718dceddSDavid Howells 	int size;
654718dceddSDavid Howells 	int __user *region_offset;	/* offset from start of fb or agp */
655718dceddSDavid Howells } drm_i915_mem_alloc_t;
656718dceddSDavid Howells 
657718dceddSDavid Howells typedef struct drm_i915_mem_free {
658718dceddSDavid Howells 	int region;
659718dceddSDavid Howells 	int region_offset;
660718dceddSDavid Howells } drm_i915_mem_free_t;
661718dceddSDavid Howells 
662718dceddSDavid Howells typedef struct drm_i915_mem_init_heap {
663718dceddSDavid Howells 	int region;
664718dceddSDavid Howells 	int size;
665718dceddSDavid Howells 	int start;
666718dceddSDavid Howells } drm_i915_mem_init_heap_t;
667718dceddSDavid Howells 
668718dceddSDavid Howells /* Allow memory manager to be torn down and re-initialized (eg on
669718dceddSDavid Howells  * rotate):
670718dceddSDavid Howells  */
671718dceddSDavid Howells typedef struct drm_i915_mem_destroy_heap {
672718dceddSDavid Howells 	int region;
673718dceddSDavid Howells } drm_i915_mem_destroy_heap_t;
674718dceddSDavid Howells 
675718dceddSDavid Howells /* Allow X server to configure which pipes to monitor for vblank signals
676718dceddSDavid Howells  */
677718dceddSDavid Howells #define	DRM_I915_VBLANK_PIPE_A	1
678718dceddSDavid Howells #define	DRM_I915_VBLANK_PIPE_B	2
679718dceddSDavid Howells 
680718dceddSDavid Howells typedef struct drm_i915_vblank_pipe {
681718dceddSDavid Howells 	int pipe;
682718dceddSDavid Howells } drm_i915_vblank_pipe_t;
683718dceddSDavid Howells 
684718dceddSDavid Howells /* Schedule buffer swap at given vertical blank:
685718dceddSDavid Howells  */
686718dceddSDavid Howells typedef struct drm_i915_vblank_swap {
687718dceddSDavid Howells 	drm_drawable_t drawable;
688718dceddSDavid Howells 	enum drm_vblank_seq_type seqtype;
689718dceddSDavid Howells 	unsigned int sequence;
690718dceddSDavid Howells } drm_i915_vblank_swap_t;
691718dceddSDavid Howells 
692718dceddSDavid Howells typedef struct drm_i915_hws_addr {
693718dceddSDavid Howells 	__u64 addr;
694718dceddSDavid Howells } drm_i915_hws_addr_t;
695718dceddSDavid Howells 
696718dceddSDavid Howells struct drm_i915_gem_init {
697718dceddSDavid Howells 	/**
698718dceddSDavid Howells 	 * Beginning offset in the GTT to be managed by the DRM memory
699718dceddSDavid Howells 	 * manager.
700718dceddSDavid Howells 	 */
701718dceddSDavid Howells 	__u64 gtt_start;
702718dceddSDavid Howells 	/**
703718dceddSDavid Howells 	 * Ending offset in the GTT to be managed by the DRM memory
704718dceddSDavid Howells 	 * manager.
705718dceddSDavid Howells 	 */
706718dceddSDavid Howells 	__u64 gtt_end;
707718dceddSDavid Howells };
708718dceddSDavid Howells 
709718dceddSDavid Howells struct drm_i915_gem_create {
710718dceddSDavid Howells 	/**
711718dceddSDavid Howells 	 * Requested size for the object.
712718dceddSDavid Howells 	 *
713718dceddSDavid Howells 	 * The (page-aligned) allocated size for the object will be returned.
714718dceddSDavid Howells 	 */
715718dceddSDavid Howells 	__u64 size;
716718dceddSDavid Howells 	/**
717718dceddSDavid Howells 	 * Returned handle for the object.
718718dceddSDavid Howells 	 *
719718dceddSDavid Howells 	 * Object handles are nonzero.
720718dceddSDavid Howells 	 */
721718dceddSDavid Howells 	__u32 handle;
722718dceddSDavid Howells 	__u32 pad;
723718dceddSDavid Howells };
724718dceddSDavid Howells 
725718dceddSDavid Howells struct drm_i915_gem_pread {
726718dceddSDavid Howells 	/** Handle for the object being read. */
727718dceddSDavid Howells 	__u32 handle;
728718dceddSDavid Howells 	__u32 pad;
729718dceddSDavid Howells 	/** Offset into the object to read from */
730718dceddSDavid Howells 	__u64 offset;
731718dceddSDavid Howells 	/** Length of data to read */
732718dceddSDavid Howells 	__u64 size;
733718dceddSDavid Howells 	/**
734718dceddSDavid Howells 	 * Pointer to write the data into.
735718dceddSDavid Howells 	 *
736718dceddSDavid Howells 	 * This is a fixed-size type for 32/64 compatibility.
737718dceddSDavid Howells 	 */
738718dceddSDavid Howells 	__u64 data_ptr;
739718dceddSDavid Howells };
740718dceddSDavid Howells 
741718dceddSDavid Howells struct drm_i915_gem_pwrite {
742718dceddSDavid Howells 	/** Handle for the object being written to. */
743718dceddSDavid Howells 	__u32 handle;
744718dceddSDavid Howells 	__u32 pad;
745718dceddSDavid Howells 	/** Offset into the object to write to */
746718dceddSDavid Howells 	__u64 offset;
747718dceddSDavid Howells 	/** Length of data to write */
748718dceddSDavid Howells 	__u64 size;
749718dceddSDavid Howells 	/**
750718dceddSDavid Howells 	 * Pointer to read the data from.
751718dceddSDavid Howells 	 *
752718dceddSDavid Howells 	 * This is a fixed-size type for 32/64 compatibility.
753718dceddSDavid Howells 	 */
754718dceddSDavid Howells 	__u64 data_ptr;
755718dceddSDavid Howells };
756718dceddSDavid Howells 
757718dceddSDavid Howells struct drm_i915_gem_mmap {
758718dceddSDavid Howells 	/** Handle for the object being mapped. */
759718dceddSDavid Howells 	__u32 handle;
760718dceddSDavid Howells 	__u32 pad;
761718dceddSDavid Howells 	/** Offset in the object to map. */
762718dceddSDavid Howells 	__u64 offset;
763718dceddSDavid Howells 	/**
764718dceddSDavid Howells 	 * Length of data to map.
765718dceddSDavid Howells 	 *
766718dceddSDavid Howells 	 * The value will be page-aligned.
767718dceddSDavid Howells 	 */
768718dceddSDavid Howells 	__u64 size;
769718dceddSDavid Howells 	/**
770718dceddSDavid Howells 	 * Returned pointer the data was mapped at.
771718dceddSDavid Howells 	 *
772718dceddSDavid Howells 	 * This is a fixed-size type for 32/64 compatibility.
773718dceddSDavid Howells 	 */
774718dceddSDavid Howells 	__u64 addr_ptr;
7751816f923SAkash Goel 
7761816f923SAkash Goel 	/**
7771816f923SAkash Goel 	 * Flags for extended behaviour.
7781816f923SAkash Goel 	 *
7791816f923SAkash Goel 	 * Added in version 2.
7801816f923SAkash Goel 	 */
7811816f923SAkash Goel 	__u64 flags;
7821816f923SAkash Goel #define I915_MMAP_WC 0x1
783718dceddSDavid Howells };
784718dceddSDavid Howells 
785718dceddSDavid Howells struct drm_i915_gem_mmap_gtt {
786718dceddSDavid Howells 	/** Handle for the object being mapped. */
787718dceddSDavid Howells 	__u32 handle;
788718dceddSDavid Howells 	__u32 pad;
789718dceddSDavid Howells 	/**
790718dceddSDavid Howells 	 * Fake offset to use for subsequent mmap call
791718dceddSDavid Howells 	 *
792718dceddSDavid Howells 	 * This is a fixed-size type for 32/64 compatibility.
793718dceddSDavid Howells 	 */
794718dceddSDavid Howells 	__u64 offset;
795718dceddSDavid Howells };
796718dceddSDavid Howells 
797cc662126SAbdiel Janulgue struct drm_i915_gem_mmap_offset {
798cc662126SAbdiel Janulgue 	/** Handle for the object being mapped. */
799cc662126SAbdiel Janulgue 	__u32 handle;
800cc662126SAbdiel Janulgue 	__u32 pad;
801cc662126SAbdiel Janulgue 	/**
802cc662126SAbdiel Janulgue 	 * Fake offset to use for subsequent mmap call
803cc662126SAbdiel Janulgue 	 *
804cc662126SAbdiel Janulgue 	 * This is a fixed-size type for 32/64 compatibility.
805cc662126SAbdiel Janulgue 	 */
806cc662126SAbdiel Janulgue 	__u64 offset;
807cc662126SAbdiel Janulgue 
808cc662126SAbdiel Janulgue 	/**
809cc662126SAbdiel Janulgue 	 * Flags for extended behaviour.
810cc662126SAbdiel Janulgue 	 *
811cc662126SAbdiel Janulgue 	 * It is mandatory that one of the MMAP_OFFSET types
812cc662126SAbdiel Janulgue 	 * (GTT, WC, WB, UC, etc) should be included.
813cc662126SAbdiel Janulgue 	 */
814cc662126SAbdiel Janulgue 	__u64 flags;
815cc662126SAbdiel Janulgue #define I915_MMAP_OFFSET_GTT 0
816cc662126SAbdiel Janulgue #define I915_MMAP_OFFSET_WC  1
817cc662126SAbdiel Janulgue #define I915_MMAP_OFFSET_WB  2
818cc662126SAbdiel Janulgue #define I915_MMAP_OFFSET_UC  3
819cc662126SAbdiel Janulgue 
820cc662126SAbdiel Janulgue 	/*
821cc662126SAbdiel Janulgue 	 * Zero-terminated chain of extensions.
822cc662126SAbdiel Janulgue 	 *
823cc662126SAbdiel Janulgue 	 * No current extensions defined; mbz.
824cc662126SAbdiel Janulgue 	 */
825cc662126SAbdiel Janulgue 	__u64 extensions;
826cc662126SAbdiel Janulgue };
827cc662126SAbdiel Janulgue 
828718dceddSDavid Howells struct drm_i915_gem_set_domain {
829718dceddSDavid Howells 	/** Handle for the object */
830718dceddSDavid Howells 	__u32 handle;
831718dceddSDavid Howells 
832718dceddSDavid Howells 	/** New read domains */
833718dceddSDavid Howells 	__u32 read_domains;
834718dceddSDavid Howells 
835718dceddSDavid Howells 	/** New write domain */
836718dceddSDavid Howells 	__u32 write_domain;
837718dceddSDavid Howells };
838718dceddSDavid Howells 
839718dceddSDavid Howells struct drm_i915_gem_sw_finish {
840718dceddSDavid Howells 	/** Handle for the object */
841718dceddSDavid Howells 	__u32 handle;
842718dceddSDavid Howells };
843718dceddSDavid Howells 
844718dceddSDavid Howells struct drm_i915_gem_relocation_entry {
845718dceddSDavid Howells 	/**
846718dceddSDavid Howells 	 * Handle of the buffer being pointed to by this relocation entry.
847718dceddSDavid Howells 	 *
848718dceddSDavid Howells 	 * It's appealing to make this be an index into the mm_validate_entry
849718dceddSDavid Howells 	 * list to refer to the buffer, but this allows the driver to create
850718dceddSDavid Howells 	 * a relocation list for state buffers and not re-write it per
851718dceddSDavid Howells 	 * exec using the buffer.
852718dceddSDavid Howells 	 */
853718dceddSDavid Howells 	__u32 target_handle;
854718dceddSDavid Howells 
855718dceddSDavid Howells 	/**
856718dceddSDavid Howells 	 * Value to be added to the offset of the target buffer to make up
857718dceddSDavid Howells 	 * the relocation entry.
858718dceddSDavid Howells 	 */
859718dceddSDavid Howells 	__u32 delta;
860718dceddSDavid Howells 
861718dceddSDavid Howells 	/** Offset in the buffer the relocation entry will be written into */
862718dceddSDavid Howells 	__u64 offset;
863718dceddSDavid Howells 
864718dceddSDavid Howells 	/**
865718dceddSDavid Howells 	 * Offset value of the target buffer that the relocation entry was last
866718dceddSDavid Howells 	 * written as.
867718dceddSDavid Howells 	 *
868718dceddSDavid Howells 	 * If the buffer has the same offset as last time, we can skip syncing
869718dceddSDavid Howells 	 * and writing the relocation.  This value is written back out by
870718dceddSDavid Howells 	 * the execbuffer ioctl when the relocation is written.
871718dceddSDavid Howells 	 */
872718dceddSDavid Howells 	__u64 presumed_offset;
873718dceddSDavid Howells 
874718dceddSDavid Howells 	/**
875718dceddSDavid Howells 	 * Target memory domains read by this operation.
876718dceddSDavid Howells 	 */
877718dceddSDavid Howells 	__u32 read_domains;
878718dceddSDavid Howells 
879718dceddSDavid Howells 	/**
880718dceddSDavid Howells 	 * Target memory domains written by this operation.
881718dceddSDavid Howells 	 *
882718dceddSDavid Howells 	 * Note that only one domain may be written by the whole
883718dceddSDavid Howells 	 * execbuffer operation, so that where there are conflicts,
884718dceddSDavid Howells 	 * the application will get -EINVAL back.
885718dceddSDavid Howells 	 */
886718dceddSDavid Howells 	__u32 write_domain;
887718dceddSDavid Howells };
888718dceddSDavid Howells 
889718dceddSDavid Howells /** @{
890718dceddSDavid Howells  * Intel memory domains
891718dceddSDavid Howells  *
892718dceddSDavid Howells  * Most of these just align with the various caches in
893718dceddSDavid Howells  * the system and are used to flush and invalidate as
894718dceddSDavid Howells  * objects end up cached in different domains.
895718dceddSDavid Howells  */
896718dceddSDavid Howells /** CPU cache */
897718dceddSDavid Howells #define I915_GEM_DOMAIN_CPU		0x00000001
898718dceddSDavid Howells /** Render cache, used by 2D and 3D drawing */
899718dceddSDavid Howells #define I915_GEM_DOMAIN_RENDER		0x00000002
900718dceddSDavid Howells /** Sampler cache, used by texture engine */
901718dceddSDavid Howells #define I915_GEM_DOMAIN_SAMPLER		0x00000004
902718dceddSDavid Howells /** Command queue, used to load batch buffers */
903718dceddSDavid Howells #define I915_GEM_DOMAIN_COMMAND		0x00000008
904718dceddSDavid Howells /** Instruction cache, used by shader programs */
905718dceddSDavid Howells #define I915_GEM_DOMAIN_INSTRUCTION	0x00000010
906718dceddSDavid Howells /** Vertex address cache */
907718dceddSDavid Howells #define I915_GEM_DOMAIN_VERTEX		0x00000020
908718dceddSDavid Howells /** GTT domain - aperture and scanout */
909718dceddSDavid Howells #define I915_GEM_DOMAIN_GTT		0x00000040
910e22d8e3cSChris Wilson /** WC domain - uncached access */
911e22d8e3cSChris Wilson #define I915_GEM_DOMAIN_WC		0x00000080
912718dceddSDavid Howells /** @} */
913718dceddSDavid Howells 
914718dceddSDavid Howells struct drm_i915_gem_exec_object {
915718dceddSDavid Howells 	/**
916718dceddSDavid Howells 	 * User's handle for a buffer to be bound into the GTT for this
917718dceddSDavid Howells 	 * operation.
918718dceddSDavid Howells 	 */
919718dceddSDavid Howells 	__u32 handle;
920718dceddSDavid Howells 
921718dceddSDavid Howells 	/** Number of relocations to be performed on this buffer */
922718dceddSDavid Howells 	__u32 relocation_count;
923718dceddSDavid Howells 	/**
924718dceddSDavid Howells 	 * Pointer to array of struct drm_i915_gem_relocation_entry containing
925718dceddSDavid Howells 	 * the relocations to be performed in this buffer.
926718dceddSDavid Howells 	 */
927718dceddSDavid Howells 	__u64 relocs_ptr;
928718dceddSDavid Howells 
929718dceddSDavid Howells 	/** Required alignment in graphics aperture */
930718dceddSDavid Howells 	__u64 alignment;
931718dceddSDavid Howells 
932718dceddSDavid Howells 	/**
933718dceddSDavid Howells 	 * Returned value of the updated offset of the object, for future
934718dceddSDavid Howells 	 * presumed_offset writes.
935718dceddSDavid Howells 	 */
936718dceddSDavid Howells 	__u64 offset;
937718dceddSDavid Howells };
938718dceddSDavid Howells 
939718dceddSDavid Howells struct drm_i915_gem_execbuffer {
940718dceddSDavid Howells 	/**
941718dceddSDavid Howells 	 * List of buffers to be validated with their relocations to be
942718dceddSDavid Howells 	 * performend on them.
943718dceddSDavid Howells 	 *
944718dceddSDavid Howells 	 * This is a pointer to an array of struct drm_i915_gem_validate_entry.
945718dceddSDavid Howells 	 *
946718dceddSDavid Howells 	 * These buffers must be listed in an order such that all relocations
947718dceddSDavid Howells 	 * a buffer is performing refer to buffers that have already appeared
948718dceddSDavid Howells 	 * in the validate list.
949718dceddSDavid Howells 	 */
950718dceddSDavid Howells 	__u64 buffers_ptr;
951718dceddSDavid Howells 	__u32 buffer_count;
952718dceddSDavid Howells 
953718dceddSDavid Howells 	/** Offset in the batchbuffer to start execution from. */
954718dceddSDavid Howells 	__u32 batch_start_offset;
955718dceddSDavid Howells 	/** Bytes used in batchbuffer from batch_start_offset */
956718dceddSDavid Howells 	__u32 batch_len;
957718dceddSDavid Howells 	__u32 DR1;
958718dceddSDavid Howells 	__u32 DR4;
959718dceddSDavid Howells 	__u32 num_cliprects;
960718dceddSDavid Howells 	/** This is a struct drm_clip_rect *cliprects */
961718dceddSDavid Howells 	__u64 cliprects_ptr;
962718dceddSDavid Howells };
963718dceddSDavid Howells 
964718dceddSDavid Howells struct drm_i915_gem_exec_object2 {
965718dceddSDavid Howells 	/**
966718dceddSDavid Howells 	 * User's handle for a buffer to be bound into the GTT for this
967718dceddSDavid Howells 	 * operation.
968718dceddSDavid Howells 	 */
969718dceddSDavid Howells 	__u32 handle;
970718dceddSDavid Howells 
971718dceddSDavid Howells 	/** Number of relocations to be performed on this buffer */
972718dceddSDavid Howells 	__u32 relocation_count;
973718dceddSDavid Howells 	/**
974718dceddSDavid Howells 	 * Pointer to array of struct drm_i915_gem_relocation_entry containing
975718dceddSDavid Howells 	 * the relocations to be performed in this buffer.
976718dceddSDavid Howells 	 */
977718dceddSDavid Howells 	__u64 relocs_ptr;
978718dceddSDavid Howells 
979718dceddSDavid Howells 	/** Required alignment in graphics aperture */
980718dceddSDavid Howells 	__u64 alignment;
981718dceddSDavid Howells 
982718dceddSDavid Howells 	/**
983506a8e87SChris Wilson 	 * When the EXEC_OBJECT_PINNED flag is specified this is populated by
984506a8e87SChris Wilson 	 * the user with the GTT offset at which this object will be pinned.
985506a8e87SChris Wilson 	 * When the I915_EXEC_NO_RELOC flag is specified this must contain the
986506a8e87SChris Wilson 	 * presumed_offset of the object.
987506a8e87SChris Wilson 	 * During execbuffer2 the kernel populates it with the value of the
988506a8e87SChris Wilson 	 * current GTT offset of the object, for future presumed_offset writes.
989718dceddSDavid Howells 	 */
990718dceddSDavid Howells 	__u64 offset;
991718dceddSDavid Howells 
992718dceddSDavid Howells #define EXEC_OBJECT_NEEDS_FENCE		 (1<<0)
993ed5982e6SDaniel Vetter #define EXEC_OBJECT_NEEDS_GTT		 (1<<1)
994ed5982e6SDaniel Vetter #define EXEC_OBJECT_WRITE		 (1<<2)
995101b506aSMichel Thierry #define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3)
996506a8e87SChris Wilson #define EXEC_OBJECT_PINNED		 (1<<4)
99791b2db6fSChris Wilson #define EXEC_OBJECT_PAD_TO_SIZE		 (1<<5)
99877ae9957SChris Wilson /* The kernel implicitly tracks GPU activity on all GEM objects, and
99977ae9957SChris Wilson  * synchronises operations with outstanding rendering. This includes
100077ae9957SChris Wilson  * rendering on other devices if exported via dma-buf. However, sometimes
100177ae9957SChris Wilson  * this tracking is too coarse and the user knows better. For example,
100277ae9957SChris Wilson  * if the object is split into non-overlapping ranges shared between different
100377ae9957SChris Wilson  * clients or engines (i.e. suballocating objects), the implicit tracking
100477ae9957SChris Wilson  * by kernel assumes that each operation affects the whole object rather
100577ae9957SChris Wilson  * than an individual range, causing needless synchronisation between clients.
100677ae9957SChris Wilson  * The kernel will also forgo any CPU cache flushes prior to rendering from
100777ae9957SChris Wilson  * the object as the client is expected to be also handling such domain
100877ae9957SChris Wilson  * tracking.
100977ae9957SChris Wilson  *
101077ae9957SChris Wilson  * The kernel maintains the implicit tracking in order to manage resources
101177ae9957SChris Wilson  * used by the GPU - this flag only disables the synchronisation prior to
101277ae9957SChris Wilson  * rendering with this object in this execbuf.
101377ae9957SChris Wilson  *
101477ae9957SChris Wilson  * Opting out of implicit synhronisation requires the user to do its own
101577ae9957SChris Wilson  * explicit tracking to avoid rendering corruption. See, for example,
101677ae9957SChris Wilson  * I915_PARAM_HAS_EXEC_FENCE to order execbufs and execute them asynchronously.
101777ae9957SChris Wilson  */
101877ae9957SChris Wilson #define EXEC_OBJECT_ASYNC		(1<<6)
1019b0fd47adSChris Wilson /* Request that the contents of this execobject be copied into the error
1020b0fd47adSChris Wilson  * state upon a GPU hang involving this batch for post-mortem debugging.
1021b0fd47adSChris Wilson  * These buffers are recorded in no particular order as "user" in
1022b0fd47adSChris Wilson  * /sys/class/drm/cardN/error. Query I915_PARAM_HAS_EXEC_CAPTURE to see
1023b0fd47adSChris Wilson  * if the kernel supports this flag.
1024b0fd47adSChris Wilson  */
1025b0fd47adSChris Wilson #define EXEC_OBJECT_CAPTURE		(1<<7)
10269e2793f6SDave Gordon /* All remaining bits are MBZ and RESERVED FOR FUTURE USE */
1027b0fd47adSChris Wilson #define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_CAPTURE<<1)
1028718dceddSDavid Howells 	__u64 flags;
1029ed5982e6SDaniel Vetter 
103091b2db6fSChris Wilson 	union {
1031718dceddSDavid Howells 		__u64 rsvd1;
103291b2db6fSChris Wilson 		__u64 pad_to_size;
103391b2db6fSChris Wilson 	};
1034718dceddSDavid Howells 	__u64 rsvd2;
1035718dceddSDavid Howells };
1036718dceddSDavid Howells 
1037cf6e7bacSJason Ekstrand struct drm_i915_gem_exec_fence {
1038cf6e7bacSJason Ekstrand 	/**
1039cf6e7bacSJason Ekstrand 	 * User's handle for a drm_syncobj to wait on or signal.
1040cf6e7bacSJason Ekstrand 	 */
1041cf6e7bacSJason Ekstrand 	__u32 handle;
1042cf6e7bacSJason Ekstrand 
1043cf6e7bacSJason Ekstrand #define I915_EXEC_FENCE_WAIT            (1<<0)
1044cf6e7bacSJason Ekstrand #define I915_EXEC_FENCE_SIGNAL          (1<<1)
1045ebcaa1ffSTvrtko Ursulin #define __I915_EXEC_FENCE_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SIGNAL << 1))
1046cf6e7bacSJason Ekstrand 	__u32 flags;
1047cf6e7bacSJason Ekstrand };
1048cf6e7bacSJason Ekstrand 
1049718dceddSDavid Howells struct drm_i915_gem_execbuffer2 {
1050718dceddSDavid Howells 	/**
1051718dceddSDavid Howells 	 * List of gem_exec_object2 structs
1052718dceddSDavid Howells 	 */
1053718dceddSDavid Howells 	__u64 buffers_ptr;
1054718dceddSDavid Howells 	__u32 buffer_count;
1055718dceddSDavid Howells 
1056718dceddSDavid Howells 	/** Offset in the batchbuffer to start execution from. */
1057718dceddSDavid Howells 	__u32 batch_start_offset;
1058718dceddSDavid Howells 	/** Bytes used in batchbuffer from batch_start_offset */
1059718dceddSDavid Howells 	__u32 batch_len;
1060718dceddSDavid Howells 	__u32 DR1;
1061718dceddSDavid Howells 	__u32 DR4;
1062718dceddSDavid Howells 	__u32 num_cliprects;
1063cf6e7bacSJason Ekstrand 	/**
1064cf6e7bacSJason Ekstrand 	 * This is a struct drm_clip_rect *cliprects if I915_EXEC_FENCE_ARRAY
1065cf6e7bacSJason Ekstrand 	 * is not set.  If I915_EXEC_FENCE_ARRAY is set, then this is a
1066cf6e7bacSJason Ekstrand 	 * struct drm_i915_gem_exec_fence *fences.
1067cf6e7bacSJason Ekstrand 	 */
1068718dceddSDavid Howells 	__u64 cliprects_ptr;
1069d90c06d5SChris Wilson #define I915_EXEC_RING_MASK              (0x3f)
1070718dceddSDavid Howells #define I915_EXEC_DEFAULT                (0<<0)
1071718dceddSDavid Howells #define I915_EXEC_RENDER                 (1<<0)
1072718dceddSDavid Howells #define I915_EXEC_BSD                    (2<<0)
1073718dceddSDavid Howells #define I915_EXEC_BLT                    (3<<0)
107482f91b6eSXiang, Haihao #define I915_EXEC_VEBOX                  (4<<0)
1075718dceddSDavid Howells 
1076718dceddSDavid Howells /* Used for switching the constants addressing mode on gen4+ RENDER ring.
1077718dceddSDavid Howells  * Gen6+ only supports relative addressing to dynamic state (default) and
1078718dceddSDavid Howells  * absolute addressing.
1079718dceddSDavid Howells  *
1080718dceddSDavid Howells  * These flags are ignored for the BSD and BLT rings.
1081718dceddSDavid Howells  */
1082718dceddSDavid Howells #define I915_EXEC_CONSTANTS_MASK 	(3<<6)
1083718dceddSDavid Howells #define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */
1084718dceddSDavid Howells #define I915_EXEC_CONSTANTS_ABSOLUTE 	(1<<6)
1085718dceddSDavid Howells #define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
1086718dceddSDavid Howells 	__u64 flags;
1087718dceddSDavid Howells 	__u64 rsvd1; /* now used for context info */
1088718dceddSDavid Howells 	__u64 rsvd2;
1089718dceddSDavid Howells };
1090718dceddSDavid Howells 
1091718dceddSDavid Howells /** Resets the SO write offset registers for transform feedback on gen7. */
1092718dceddSDavid Howells #define I915_EXEC_GEN7_SOL_RESET	(1<<8)
1093718dceddSDavid Howells 
1094c2fb7916SDaniel Vetter /** Request a privileged ("secure") batch buffer. Note only available for
1095c2fb7916SDaniel Vetter  * DRM_ROOT_ONLY | DRM_MASTER processes.
1096c2fb7916SDaniel Vetter  */
1097c2fb7916SDaniel Vetter #define I915_EXEC_SECURE		(1<<9)
1098c2fb7916SDaniel Vetter 
1099b45305fcSDaniel Vetter /** Inform the kernel that the batch is and will always be pinned. This
1100b45305fcSDaniel Vetter  * negates the requirement for a workaround to be performed to avoid
1101b45305fcSDaniel Vetter  * an incoherent CS (such as can be found on 830/845). If this flag is
1102b45305fcSDaniel Vetter  * not passed, the kernel will endeavour to make sure the batch is
1103b45305fcSDaniel Vetter  * coherent with the CS before execution. If this flag is passed,
1104b45305fcSDaniel Vetter  * userspace assumes the responsibility for ensuring the same.
1105b45305fcSDaniel Vetter  */
1106b45305fcSDaniel Vetter #define I915_EXEC_IS_PINNED		(1<<10)
1107b45305fcSDaniel Vetter 
1108c3d19d3cSGeert Uytterhoeven /** Provide a hint to the kernel that the command stream and auxiliary
1109ed5982e6SDaniel Vetter  * state buffers already holds the correct presumed addresses and so the
1110ed5982e6SDaniel Vetter  * relocation process may be skipped if no buffers need to be moved in
1111ed5982e6SDaniel Vetter  * preparation for the execbuffer.
1112ed5982e6SDaniel Vetter  */
1113ed5982e6SDaniel Vetter #define I915_EXEC_NO_RELOC		(1<<11)
1114ed5982e6SDaniel Vetter 
1115eef90ccbSChris Wilson /** Use the reloc.handle as an index into the exec object array rather
1116eef90ccbSChris Wilson  * than as the per-file handle.
1117eef90ccbSChris Wilson  */
1118eef90ccbSChris Wilson #define I915_EXEC_HANDLE_LUT		(1<<12)
1119eef90ccbSChris Wilson 
11208d360dffSZhipeng Gong /** Used for switching BSD rings on the platforms with two BSD rings */
1121d9da6aa0STvrtko Ursulin #define I915_EXEC_BSD_SHIFT	 (13)
1122d9da6aa0STvrtko Ursulin #define I915_EXEC_BSD_MASK	 (3 << I915_EXEC_BSD_SHIFT)
1123d9da6aa0STvrtko Ursulin /* default ping-pong mode */
1124d9da6aa0STvrtko Ursulin #define I915_EXEC_BSD_DEFAULT	 (0 << I915_EXEC_BSD_SHIFT)
1125d9da6aa0STvrtko Ursulin #define I915_EXEC_BSD_RING1	 (1 << I915_EXEC_BSD_SHIFT)
1126d9da6aa0STvrtko Ursulin #define I915_EXEC_BSD_RING2	 (2 << I915_EXEC_BSD_SHIFT)
11278d360dffSZhipeng Gong 
1128a9ed33caSAbdiel Janulgue /** Tell the kernel that the batchbuffer is processed by
1129a9ed33caSAbdiel Janulgue  *  the resource streamer.
1130a9ed33caSAbdiel Janulgue  */
1131a9ed33caSAbdiel Janulgue #define I915_EXEC_RESOURCE_STREAMER     (1<<15)
1132a9ed33caSAbdiel Janulgue 
1133fec0445cSChris Wilson /* Setting I915_EXEC_FENCE_IN implies that lower_32_bits(rsvd2) represent
1134fec0445cSChris Wilson  * a sync_file fd to wait upon (in a nonblocking manner) prior to executing
1135fec0445cSChris Wilson  * the batch.
1136fec0445cSChris Wilson  *
1137fec0445cSChris Wilson  * Returns -EINVAL if the sync_file fd cannot be found.
1138fec0445cSChris Wilson  */
1139fec0445cSChris Wilson #define I915_EXEC_FENCE_IN		(1<<16)
1140fec0445cSChris Wilson 
1141fec0445cSChris Wilson /* Setting I915_EXEC_FENCE_OUT causes the ioctl to return a sync_file fd
1142fec0445cSChris Wilson  * in the upper_32_bits(rsvd2) upon success. Ownership of the fd is given
1143fec0445cSChris Wilson  * to the caller, and it should be close() after use. (The fd is a regular
1144fec0445cSChris Wilson  * file descriptor and will be cleaned up on process termination. It holds
1145fec0445cSChris Wilson  * a reference to the request, but nothing else.)
1146fec0445cSChris Wilson  *
1147fec0445cSChris Wilson  * The sync_file fd can be combined with other sync_file and passed either
1148fec0445cSChris Wilson  * to execbuf using I915_EXEC_FENCE_IN, to atomic KMS ioctls (so that a flip
1149fec0445cSChris Wilson  * will only occur after this request completes), or to other devices.
1150fec0445cSChris Wilson  *
1151fec0445cSChris Wilson  * Using I915_EXEC_FENCE_OUT requires use of
1152fec0445cSChris Wilson  * DRM_IOCTL_I915_GEM_EXECBUFFER2_WR ioctl so that the result is written
1153fec0445cSChris Wilson  * back to userspace. Failure to do so will cause the out-fence to always
1154fec0445cSChris Wilson  * be reported as zero, and the real fence fd to be leaked.
1155fec0445cSChris Wilson  */
1156fec0445cSChris Wilson #define I915_EXEC_FENCE_OUT		(1<<17)
1157fec0445cSChris Wilson 
11581a71cf2fSChris Wilson /*
11591a71cf2fSChris Wilson  * Traditionally the execbuf ioctl has only considered the final element in
11601a71cf2fSChris Wilson  * the execobject[] to be the executable batch. Often though, the client
11611a71cf2fSChris Wilson  * will known the batch object prior to construction and being able to place
11621a71cf2fSChris Wilson  * it into the execobject[] array first can simplify the relocation tracking.
11631a71cf2fSChris Wilson  * Setting I915_EXEC_BATCH_FIRST tells execbuf to use element 0 of the
11641a71cf2fSChris Wilson  * execobject[] as the * batch instead (the default is to use the last
11651a71cf2fSChris Wilson  * element).
11661a71cf2fSChris Wilson  */
11671a71cf2fSChris Wilson #define I915_EXEC_BATCH_FIRST		(1<<18)
1168cf6e7bacSJason Ekstrand 
1169cf6e7bacSJason Ekstrand /* Setting I915_FENCE_ARRAY implies that num_cliprects and cliprects_ptr
1170cf6e7bacSJason Ekstrand  * define an array of i915_gem_exec_fence structures which specify a set of
1171cf6e7bacSJason Ekstrand  * dma fences to wait upon or signal.
1172cf6e7bacSJason Ekstrand  */
1173cf6e7bacSJason Ekstrand #define I915_EXEC_FENCE_ARRAY   (1<<19)
1174cf6e7bacSJason Ekstrand 
1175a88b6e4cSChris Wilson /*
1176a88b6e4cSChris Wilson  * Setting I915_EXEC_FENCE_SUBMIT implies that lower_32_bits(rsvd2) represent
1177a88b6e4cSChris Wilson  * a sync_file fd to wait upon (in a nonblocking manner) prior to executing
1178a88b6e4cSChris Wilson  * the batch.
1179a88b6e4cSChris Wilson  *
1180a88b6e4cSChris Wilson  * Returns -EINVAL if the sync_file fd cannot be found.
1181a88b6e4cSChris Wilson  */
1182a88b6e4cSChris Wilson #define I915_EXEC_FENCE_SUBMIT		(1 << 20)
1183a88b6e4cSChris Wilson 
1184a88b6e4cSChris Wilson #define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SUBMIT << 1))
1185ed5982e6SDaniel Vetter 
1186718dceddSDavid Howells #define I915_EXEC_CONTEXT_ID_MASK	(0xffffffff)
1187718dceddSDavid Howells #define i915_execbuffer2_set_context_id(eb2, context) \
1188718dceddSDavid Howells 	(eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK
1189718dceddSDavid Howells #define i915_execbuffer2_get_context_id(eb2) \
1190718dceddSDavid Howells 	((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK)
1191718dceddSDavid Howells 
1192718dceddSDavid Howells struct drm_i915_gem_pin {
1193718dceddSDavid Howells 	/** Handle of the buffer to be pinned. */
1194718dceddSDavid Howells 	__u32 handle;
1195718dceddSDavid Howells 	__u32 pad;
1196718dceddSDavid Howells 
1197718dceddSDavid Howells 	/** alignment required within the aperture */
1198718dceddSDavid Howells 	__u64 alignment;
1199718dceddSDavid Howells 
1200718dceddSDavid Howells 	/** Returned GTT offset of the buffer. */
1201718dceddSDavid Howells 	__u64 offset;
1202718dceddSDavid Howells };
1203718dceddSDavid Howells 
1204718dceddSDavid Howells struct drm_i915_gem_unpin {
1205718dceddSDavid Howells 	/** Handle of the buffer to be unpinned. */
1206718dceddSDavid Howells 	__u32 handle;
1207718dceddSDavid Howells 	__u32 pad;
1208718dceddSDavid Howells };
1209718dceddSDavid Howells 
1210718dceddSDavid Howells struct drm_i915_gem_busy {
1211718dceddSDavid Howells 	/** Handle of the buffer to check for busy */
1212718dceddSDavid Howells 	__u32 handle;
1213718dceddSDavid Howells 
1214426960beSChris Wilson 	/** Return busy status
1215426960beSChris Wilson 	 *
1216426960beSChris Wilson 	 * A return of 0 implies that the object is idle (after
1217426960beSChris Wilson 	 * having flushed any pending activity), and a non-zero return that
1218426960beSChris Wilson 	 * the object is still in-flight on the GPU. (The GPU has not yet
1219426960beSChris Wilson 	 * signaled completion for all pending requests that reference the
12201255501dSChris Wilson 	 * object.) An object is guaranteed to become idle eventually (so
12211255501dSChris Wilson 	 * long as no new GPU commands are executed upon it). Due to the
12221255501dSChris Wilson 	 * asynchronous nature of the hardware, an object reported
12231255501dSChris Wilson 	 * as busy may become idle before the ioctl is completed.
12241255501dSChris Wilson 	 *
12251255501dSChris Wilson 	 * Furthermore, if the object is busy, which engine is busy is only
1226c8b50242SChris Wilson 	 * provided as a guide and only indirectly by reporting its class
1227c8b50242SChris Wilson 	 * (there may be more than one engine in each class). There are race
1228c8b50242SChris Wilson 	 * conditions which prevent the report of which engines are busy from
1229c8b50242SChris Wilson 	 * being always accurate.  However, the converse is not true. If the
1230c8b50242SChris Wilson 	 * object is idle, the result of the ioctl, that all engines are idle,
1231c8b50242SChris Wilson 	 * is accurate.
1232426960beSChris Wilson 	 *
1233426960beSChris Wilson 	 * The returned dword is split into two fields to indicate both
1234c8b50242SChris Wilson 	 * the engine classess on which the object is being read, and the
1235c8b50242SChris Wilson 	 * engine class on which it is currently being written (if any).
1236426960beSChris Wilson 	 *
1237426960beSChris Wilson 	 * The low word (bits 0:15) indicate if the object is being written
1238426960beSChris Wilson 	 * to by any engine (there can only be one, as the GEM implicit
1239426960beSChris Wilson 	 * synchronisation rules force writes to be serialised). Only the
1240c8b50242SChris Wilson 	 * engine class (offset by 1, I915_ENGINE_CLASS_RENDER is reported as
1241c8b50242SChris Wilson 	 * 1 not 0 etc) for the last write is reported.
1242426960beSChris Wilson 	 *
1243c8b50242SChris Wilson 	 * The high word (bits 16:31) are a bitmask of which engines classes
1244c8b50242SChris Wilson 	 * are currently reading from the object. Multiple engines may be
1245426960beSChris Wilson 	 * reading from the object simultaneously.
1246426960beSChris Wilson 	 *
1247c8b50242SChris Wilson 	 * The value of each engine class is the same as specified in the
1248c8b50242SChris Wilson 	 * I915_CONTEXT_SET_ENGINES parameter and via perf, i.e.
1249c8b50242SChris Wilson 	 * I915_ENGINE_CLASS_RENDER, I915_ENGINE_CLASS_COPY, etc.
1250426960beSChris Wilson 	 * reported as active itself. Some hardware may have parallel
1251426960beSChris Wilson 	 * execution engines, e.g. multiple media engines, which are
1252c8b50242SChris Wilson 	 * mapped to the same class identifier and so are not separately
1253c8b50242SChris Wilson 	 * reported for busyness.
12541255501dSChris Wilson 	 *
12551255501dSChris Wilson 	 * Caveat emptor:
12561255501dSChris Wilson 	 * Only the boolean result of this query is reliable; that is whether
12571255501dSChris Wilson 	 * the object is idle or busy. The report of which engines are busy
12581255501dSChris Wilson 	 * should be only used as a heuristic.
1259718dceddSDavid Howells 	 */
1260718dceddSDavid Howells 	__u32 busy;
1261718dceddSDavid Howells };
1262718dceddSDavid Howells 
126335c7ab42SDaniel Vetter /**
126435c7ab42SDaniel Vetter  * I915_CACHING_NONE
126535c7ab42SDaniel Vetter  *
126635c7ab42SDaniel Vetter  * GPU access is not coherent with cpu caches. Default for machines without an
126735c7ab42SDaniel Vetter  * LLC.
126835c7ab42SDaniel Vetter  */
1269718dceddSDavid Howells #define I915_CACHING_NONE		0
127035c7ab42SDaniel Vetter /**
127135c7ab42SDaniel Vetter  * I915_CACHING_CACHED
127235c7ab42SDaniel Vetter  *
127335c7ab42SDaniel Vetter  * GPU access is coherent with cpu caches and furthermore the data is cached in
127435c7ab42SDaniel Vetter  * last-level caches shared between cpu cores and the gpu GT. Default on
127535c7ab42SDaniel Vetter  * machines with HAS_LLC.
127635c7ab42SDaniel Vetter  */
1277718dceddSDavid Howells #define I915_CACHING_CACHED		1
127835c7ab42SDaniel Vetter /**
127935c7ab42SDaniel Vetter  * I915_CACHING_DISPLAY
128035c7ab42SDaniel Vetter  *
128135c7ab42SDaniel Vetter  * Special GPU caching mode which is coherent with the scanout engines.
128235c7ab42SDaniel Vetter  * Transparently falls back to I915_CACHING_NONE on platforms where no special
128335c7ab42SDaniel Vetter  * cache mode (like write-through or gfdt flushing) is available. The kernel
128435c7ab42SDaniel Vetter  * automatically sets this mode when using a buffer as a scanout target.
128535c7ab42SDaniel Vetter  * Userspace can manually set this mode to avoid a costly stall and clflush in
128635c7ab42SDaniel Vetter  * the hotpath of drawing the first frame.
128735c7ab42SDaniel Vetter  */
128835c7ab42SDaniel Vetter #define I915_CACHING_DISPLAY		2
1289718dceddSDavid Howells 
1290718dceddSDavid Howells struct drm_i915_gem_caching {
1291718dceddSDavid Howells 	/**
1292718dceddSDavid Howells 	 * Handle of the buffer to set/get the caching level of. */
1293718dceddSDavid Howells 	__u32 handle;
1294718dceddSDavid Howells 
1295718dceddSDavid Howells 	/**
1296718dceddSDavid Howells 	 * Cacheing level to apply or return value
1297718dceddSDavid Howells 	 *
1298718dceddSDavid Howells 	 * bits0-15 are for generic caching control (i.e. the above defined
1299718dceddSDavid Howells 	 * values). bits16-31 are reserved for platform-specific variations
1300718dceddSDavid Howells 	 * (e.g. l3$ caching on gen7). */
1301718dceddSDavid Howells 	__u32 caching;
1302718dceddSDavid Howells };
1303718dceddSDavid Howells 
1304718dceddSDavid Howells #define I915_TILING_NONE	0
1305718dceddSDavid Howells #define I915_TILING_X		1
1306718dceddSDavid Howells #define I915_TILING_Y		2
1307deeb1519SChris Wilson #define I915_TILING_LAST	I915_TILING_Y
1308718dceddSDavid Howells 
1309718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_NONE		0
1310718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_9		1
1311718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_9_10		2
1312718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_9_11		3
1313718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_9_10_11	4
1314718dceddSDavid Howells /* Not seen by userland */
1315718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_UNKNOWN	5
1316718dceddSDavid Howells /* Seen by userland. */
1317718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_9_17		6
1318718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_9_10_17	7
1319718dceddSDavid Howells 
1320718dceddSDavid Howells struct drm_i915_gem_set_tiling {
1321718dceddSDavid Howells 	/** Handle of the buffer to have its tiling state updated */
1322718dceddSDavid Howells 	__u32 handle;
1323718dceddSDavid Howells 
1324718dceddSDavid Howells 	/**
1325718dceddSDavid Howells 	 * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
1326718dceddSDavid Howells 	 * I915_TILING_Y).
1327718dceddSDavid Howells 	 *
1328718dceddSDavid Howells 	 * This value is to be set on request, and will be updated by the
1329718dceddSDavid Howells 	 * kernel on successful return with the actual chosen tiling layout.
1330718dceddSDavid Howells 	 *
1331718dceddSDavid Howells 	 * The tiling mode may be demoted to I915_TILING_NONE when the system
1332718dceddSDavid Howells 	 * has bit 6 swizzling that can't be managed correctly by GEM.
1333718dceddSDavid Howells 	 *
1334718dceddSDavid Howells 	 * Buffer contents become undefined when changing tiling_mode.
1335718dceddSDavid Howells 	 */
1336718dceddSDavid Howells 	__u32 tiling_mode;
1337718dceddSDavid Howells 
1338718dceddSDavid Howells 	/**
1339718dceddSDavid Howells 	 * Stride in bytes for the object when in I915_TILING_X or
1340718dceddSDavid Howells 	 * I915_TILING_Y.
1341718dceddSDavid Howells 	 */
1342718dceddSDavid Howells 	__u32 stride;
1343718dceddSDavid Howells 
1344718dceddSDavid Howells 	/**
1345718dceddSDavid Howells 	 * Returned address bit 6 swizzling required for CPU access through
1346718dceddSDavid Howells 	 * mmap mapping.
1347718dceddSDavid Howells 	 */
1348718dceddSDavid Howells 	__u32 swizzle_mode;
1349718dceddSDavid Howells };
1350718dceddSDavid Howells 
1351718dceddSDavid Howells struct drm_i915_gem_get_tiling {
1352718dceddSDavid Howells 	/** Handle of the buffer to get tiling state for. */
1353718dceddSDavid Howells 	__u32 handle;
1354718dceddSDavid Howells 
1355718dceddSDavid Howells 	/**
1356718dceddSDavid Howells 	 * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
1357718dceddSDavid Howells 	 * I915_TILING_Y).
1358718dceddSDavid Howells 	 */
1359718dceddSDavid Howells 	__u32 tiling_mode;
1360718dceddSDavid Howells 
1361718dceddSDavid Howells 	/**
1362718dceddSDavid Howells 	 * Returned address bit 6 swizzling required for CPU access through
1363718dceddSDavid Howells 	 * mmap mapping.
1364718dceddSDavid Howells 	 */
1365718dceddSDavid Howells 	__u32 swizzle_mode;
136670f2f5c7SChris Wilson 
136770f2f5c7SChris Wilson 	/**
136870f2f5c7SChris Wilson 	 * Returned address bit 6 swizzling required for CPU access through
136970f2f5c7SChris Wilson 	 * mmap mapping whilst bound.
137070f2f5c7SChris Wilson 	 */
137170f2f5c7SChris Wilson 	__u32 phys_swizzle_mode;
1372718dceddSDavid Howells };
1373718dceddSDavid Howells 
1374718dceddSDavid Howells struct drm_i915_gem_get_aperture {
1375718dceddSDavid Howells 	/** Total size of the aperture used by i915_gem_execbuffer, in bytes */
1376718dceddSDavid Howells 	__u64 aper_size;
1377718dceddSDavid Howells 
1378718dceddSDavid Howells 	/**
1379718dceddSDavid Howells 	 * Available space in the aperture used by i915_gem_execbuffer, in
1380718dceddSDavid Howells 	 * bytes
1381718dceddSDavid Howells 	 */
1382718dceddSDavid Howells 	__u64 aper_available_size;
1383718dceddSDavid Howells };
1384718dceddSDavid Howells 
1385718dceddSDavid Howells struct drm_i915_get_pipe_from_crtc_id {
1386718dceddSDavid Howells 	/** ID of CRTC being requested **/
1387718dceddSDavid Howells 	__u32 crtc_id;
1388718dceddSDavid Howells 
1389718dceddSDavid Howells 	/** pipe of requested CRTC **/
1390718dceddSDavid Howells 	__u32 pipe;
1391718dceddSDavid Howells };
1392718dceddSDavid Howells 
1393718dceddSDavid Howells #define I915_MADV_WILLNEED 0
1394718dceddSDavid Howells #define I915_MADV_DONTNEED 1
1395718dceddSDavid Howells #define __I915_MADV_PURGED 2 /* internal state */
1396718dceddSDavid Howells 
1397718dceddSDavid Howells struct drm_i915_gem_madvise {
1398718dceddSDavid Howells 	/** Handle of the buffer to change the backing store advice */
1399718dceddSDavid Howells 	__u32 handle;
1400718dceddSDavid Howells 
1401718dceddSDavid Howells 	/* Advice: either the buffer will be needed again in the near future,
1402718dceddSDavid Howells 	 *         or wont be and could be discarded under memory pressure.
1403718dceddSDavid Howells 	 */
1404718dceddSDavid Howells 	__u32 madv;
1405718dceddSDavid Howells 
1406718dceddSDavid Howells 	/** Whether the backing store still exists. */
1407718dceddSDavid Howells 	__u32 retained;
1408718dceddSDavid Howells };
1409718dceddSDavid Howells 
1410718dceddSDavid Howells /* flags */
1411718dceddSDavid Howells #define I915_OVERLAY_TYPE_MASK 		0xff
1412718dceddSDavid Howells #define I915_OVERLAY_YUV_PLANAR 	0x01
1413718dceddSDavid Howells #define I915_OVERLAY_YUV_PACKED 	0x02
1414718dceddSDavid Howells #define I915_OVERLAY_RGB		0x03
1415718dceddSDavid Howells 
1416718dceddSDavid Howells #define I915_OVERLAY_DEPTH_MASK		0xff00
1417718dceddSDavid Howells #define I915_OVERLAY_RGB24		0x1000
1418718dceddSDavid Howells #define I915_OVERLAY_RGB16		0x2000
1419718dceddSDavid Howells #define I915_OVERLAY_RGB15		0x3000
1420718dceddSDavid Howells #define I915_OVERLAY_YUV422		0x0100
1421718dceddSDavid Howells #define I915_OVERLAY_YUV411		0x0200
1422718dceddSDavid Howells #define I915_OVERLAY_YUV420		0x0300
1423718dceddSDavid Howells #define I915_OVERLAY_YUV410		0x0400
1424718dceddSDavid Howells 
1425718dceddSDavid Howells #define I915_OVERLAY_SWAP_MASK		0xff0000
1426718dceddSDavid Howells #define I915_OVERLAY_NO_SWAP		0x000000
1427718dceddSDavid Howells #define I915_OVERLAY_UV_SWAP		0x010000
1428718dceddSDavid Howells #define I915_OVERLAY_Y_SWAP		0x020000
1429718dceddSDavid Howells #define I915_OVERLAY_Y_AND_UV_SWAP	0x030000
1430718dceddSDavid Howells 
1431718dceddSDavid Howells #define I915_OVERLAY_FLAGS_MASK		0xff000000
1432718dceddSDavid Howells #define I915_OVERLAY_ENABLE		0x01000000
1433718dceddSDavid Howells 
1434718dceddSDavid Howells struct drm_intel_overlay_put_image {
1435718dceddSDavid Howells 	/* various flags and src format description */
1436718dceddSDavid Howells 	__u32 flags;
1437718dceddSDavid Howells 	/* source picture description */
1438718dceddSDavid Howells 	__u32 bo_handle;
1439718dceddSDavid Howells 	/* stride values and offsets are in bytes, buffer relative */
1440718dceddSDavid Howells 	__u16 stride_Y; /* stride for packed formats */
1441718dceddSDavid Howells 	__u16 stride_UV;
1442718dceddSDavid Howells 	__u32 offset_Y; /* offset for packet formats */
1443718dceddSDavid Howells 	__u32 offset_U;
1444718dceddSDavid Howells 	__u32 offset_V;
1445718dceddSDavid Howells 	/* in pixels */
1446718dceddSDavid Howells 	__u16 src_width;
1447718dceddSDavid Howells 	__u16 src_height;
1448718dceddSDavid Howells 	/* to compensate the scaling factors for partially covered surfaces */
1449718dceddSDavid Howells 	__u16 src_scan_width;
1450718dceddSDavid Howells 	__u16 src_scan_height;
1451718dceddSDavid Howells 	/* output crtc description */
1452718dceddSDavid Howells 	__u32 crtc_id;
1453718dceddSDavid Howells 	__u16 dst_x;
1454718dceddSDavid Howells 	__u16 dst_y;
1455718dceddSDavid Howells 	__u16 dst_width;
1456718dceddSDavid Howells 	__u16 dst_height;
1457718dceddSDavid Howells };
1458718dceddSDavid Howells 
1459718dceddSDavid Howells /* flags */
1460718dceddSDavid Howells #define I915_OVERLAY_UPDATE_ATTRS	(1<<0)
1461718dceddSDavid Howells #define I915_OVERLAY_UPDATE_GAMMA	(1<<1)
1462ea9da4e4SChris Wilson #define I915_OVERLAY_DISABLE_DEST_COLORKEY	(1<<2)
1463718dceddSDavid Howells struct drm_intel_overlay_attrs {
1464718dceddSDavid Howells 	__u32 flags;
1465718dceddSDavid Howells 	__u32 color_key;
1466718dceddSDavid Howells 	__s32 brightness;
1467718dceddSDavid Howells 	__u32 contrast;
1468718dceddSDavid Howells 	__u32 saturation;
1469718dceddSDavid Howells 	__u32 gamma0;
1470718dceddSDavid Howells 	__u32 gamma1;
1471718dceddSDavid Howells 	__u32 gamma2;
1472718dceddSDavid Howells 	__u32 gamma3;
1473718dceddSDavid Howells 	__u32 gamma4;
1474718dceddSDavid Howells 	__u32 gamma5;
1475718dceddSDavid Howells };
1476718dceddSDavid Howells 
1477718dceddSDavid Howells /*
1478718dceddSDavid Howells  * Intel sprite handling
1479718dceddSDavid Howells  *
1480718dceddSDavid Howells  * Color keying works with a min/mask/max tuple.  Both source and destination
1481718dceddSDavid Howells  * color keying is allowed.
1482718dceddSDavid Howells  *
1483718dceddSDavid Howells  * Source keying:
1484718dceddSDavid Howells  * Sprite pixels within the min & max values, masked against the color channels
1485718dceddSDavid Howells  * specified in the mask field, will be transparent.  All other pixels will
1486718dceddSDavid Howells  * be displayed on top of the primary plane.  For RGB surfaces, only the min
1487718dceddSDavid Howells  * and mask fields will be used; ranged compares are not allowed.
1488718dceddSDavid Howells  *
1489718dceddSDavid Howells  * Destination keying:
1490718dceddSDavid Howells  * Primary plane pixels that match the min value, masked against the color
1491718dceddSDavid Howells  * channels specified in the mask field, will be replaced by corresponding
1492718dceddSDavid Howells  * pixels from the sprite plane.
1493718dceddSDavid Howells  *
1494718dceddSDavid Howells  * Note that source & destination keying are exclusive; only one can be
1495718dceddSDavid Howells  * active on a given plane.
1496718dceddSDavid Howells  */
1497718dceddSDavid Howells 
14986ec5bd34SVille Syrjälä #define I915_SET_COLORKEY_NONE		(1<<0) /* Deprecated. Instead set
14996ec5bd34SVille Syrjälä 						* flags==0 to disable colorkeying.
15006ec5bd34SVille Syrjälä 						*/
1501718dceddSDavid Howells #define I915_SET_COLORKEY_DESTINATION	(1<<1)
1502718dceddSDavid Howells #define I915_SET_COLORKEY_SOURCE	(1<<2)
1503718dceddSDavid Howells struct drm_intel_sprite_colorkey {
1504718dceddSDavid Howells 	__u32 plane_id;
1505718dceddSDavid Howells 	__u32 min_value;
1506718dceddSDavid Howells 	__u32 channel_mask;
1507718dceddSDavid Howells 	__u32 max_value;
1508718dceddSDavid Howells 	__u32 flags;
1509718dceddSDavid Howells };
1510718dceddSDavid Howells 
1511718dceddSDavid Howells struct drm_i915_gem_wait {
1512718dceddSDavid Howells 	/** Handle of BO we shall wait on */
1513718dceddSDavid Howells 	__u32 bo_handle;
1514718dceddSDavid Howells 	__u32 flags;
1515718dceddSDavid Howells 	/** Number of nanoseconds to wait, Returns time remaining. */
1516718dceddSDavid Howells 	__s64 timeout_ns;
1517718dceddSDavid Howells };
1518718dceddSDavid Howells 
1519718dceddSDavid Howells struct drm_i915_gem_context_create {
1520b9171541SChris Wilson 	__u32 ctx_id; /* output: id of new context*/
1521718dceddSDavid Howells 	__u32 pad;
1522718dceddSDavid Howells };
1523718dceddSDavid Howells 
1524b9171541SChris Wilson struct drm_i915_gem_context_create_ext {
1525b9171541SChris Wilson 	__u32 ctx_id; /* output: id of new context*/
1526b9171541SChris Wilson 	__u32 flags;
1527b9171541SChris Wilson #define I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS	(1u << 0)
15288319f44cSChris Wilson #define I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE	(1u << 1)
1529b9171541SChris Wilson #define I915_CONTEXT_CREATE_FLAGS_UNKNOWN \
15308319f44cSChris Wilson 	(-(I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE << 1))
1531e0695db7SChris Wilson 	__u64 extensions;
15325cc9ed4bSChris Wilson };
15335cc9ed4bSChris Wilson 
1534c9dc0f35SChris Wilson struct drm_i915_gem_context_param {
1535c9dc0f35SChris Wilson 	__u32 ctx_id;
1536c9dc0f35SChris Wilson 	__u32 size;
1537c9dc0f35SChris Wilson 	__u64 param;
1538c9dc0f35SChris Wilson #define I915_CONTEXT_PARAM_BAN_PERIOD	0x1
1539b1b38278SDavid Weinehall #define I915_CONTEXT_PARAM_NO_ZEROMAP	0x2
1540fa8848f2SChris Wilson #define I915_CONTEXT_PARAM_GTT_SIZE	0x3
1541bc3d6744SChris Wilson #define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE	0x4
154284102171SMika Kuoppala #define I915_CONTEXT_PARAM_BANNABLE	0x5
1543ac14fbd4SChris Wilson #define I915_CONTEXT_PARAM_PRIORITY	0x6
1544ac14fbd4SChris Wilson #define   I915_CONTEXT_MAX_USER_PRIORITY	1023 /* inclusive */
1545ac14fbd4SChris Wilson #define   I915_CONTEXT_DEFAULT_PRIORITY		0
1546ac14fbd4SChris Wilson #define   I915_CONTEXT_MIN_USER_PRIORITY	-1023 /* inclusive */
1547e46c2e99STvrtko Ursulin 	/*
1548e46c2e99STvrtko Ursulin 	 * When using the following param, value should be a pointer to
1549e46c2e99STvrtko Ursulin 	 * drm_i915_gem_context_param_sseu.
1550e46c2e99STvrtko Ursulin 	 */
1551e46c2e99STvrtko Ursulin #define I915_CONTEXT_PARAM_SSEU		0x7
1552ba4fda62SChris Wilson 
1553ba4fda62SChris Wilson /*
1554ba4fda62SChris Wilson  * Not all clients may want to attempt automatic recover of a context after
1555ba4fda62SChris Wilson  * a hang (for example, some clients may only submit very small incremental
1556ba4fda62SChris Wilson  * batches relying on known logical state of previous batches which will never
1557ba4fda62SChris Wilson  * recover correctly and each attempt will hang), and so would prefer that
1558ba4fda62SChris Wilson  * the context is forever banned instead.
1559ba4fda62SChris Wilson  *
1560ba4fda62SChris Wilson  * If set to false (0), after a reset, subsequent (and in flight) rendering
1561ba4fda62SChris Wilson  * from this context is discarded, and the client will need to create a new
1562ba4fda62SChris Wilson  * context to use instead.
1563ba4fda62SChris Wilson  *
1564ba4fda62SChris Wilson  * If set to true (1), the kernel will automatically attempt to recover the
1565ba4fda62SChris Wilson  * context by skipping the hanging batch and executing the next batch starting
1566ba4fda62SChris Wilson  * from the default context state (discarding the incomplete logical context
1567ba4fda62SChris Wilson  * state lost due to the reset).
1568ba4fda62SChris Wilson  *
1569ba4fda62SChris Wilson  * On creation, all new contexts are marked as recoverable.
1570ba4fda62SChris Wilson  */
1571ba4fda62SChris Wilson #define I915_CONTEXT_PARAM_RECOVERABLE	0x8
15727f3f317aSChris Wilson 
15737f3f317aSChris Wilson 	/*
15747f3f317aSChris Wilson 	 * The id of the associated virtual memory address space (ppGTT) of
15757f3f317aSChris Wilson 	 * this context. Can be retrieved and passed to another context
15767f3f317aSChris Wilson 	 * (on the same fd) for both to use the same ppGTT and so share
15777f3f317aSChris Wilson 	 * address layouts, and avoid reloading the page tables on context
15787f3f317aSChris Wilson 	 * switches between themselves.
15797f3f317aSChris Wilson 	 *
15807f3f317aSChris Wilson 	 * See DRM_I915_GEM_VM_CREATE and DRM_I915_GEM_VM_DESTROY.
15817f3f317aSChris Wilson 	 */
15827f3f317aSChris Wilson #define I915_CONTEXT_PARAM_VM		0x9
1583976b55f0SChris Wilson 
1584976b55f0SChris Wilson /*
1585976b55f0SChris Wilson  * I915_CONTEXT_PARAM_ENGINES:
1586976b55f0SChris Wilson  *
1587976b55f0SChris Wilson  * Bind this context to operate on this subset of available engines. Henceforth,
1588976b55f0SChris Wilson  * the I915_EXEC_RING selector for DRM_IOCTL_I915_GEM_EXECBUFFER2 operates as
1589976b55f0SChris Wilson  * an index into this array of engines; I915_EXEC_DEFAULT selecting engine[0]
1590976b55f0SChris Wilson  * and upwards. Slots 0...N are filled in using the specified (class, instance).
1591976b55f0SChris Wilson  * Use
1592976b55f0SChris Wilson  *	engine_class: I915_ENGINE_CLASS_INVALID,
1593976b55f0SChris Wilson  *	engine_instance: I915_ENGINE_CLASS_INVALID_NONE
1594976b55f0SChris Wilson  * to specify a gap in the array that can be filled in later, e.g. by a
1595976b55f0SChris Wilson  * virtual engine used for load balancing.
1596976b55f0SChris Wilson  *
1597976b55f0SChris Wilson  * Setting the number of engines bound to the context to 0, by passing a zero
1598976b55f0SChris Wilson  * sized argument, will revert back to default settings.
1599976b55f0SChris Wilson  *
1600976b55f0SChris Wilson  * See struct i915_context_param_engines.
1601ee113690SChris Wilson  *
1602ee113690SChris Wilson  * Extensions:
1603ee113690SChris Wilson  *   i915_context_engines_load_balance (I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE)
1604ee113690SChris Wilson  *   i915_context_engines_bond (I915_CONTEXT_ENGINES_EXT_BOND)
1605976b55f0SChris Wilson  */
1606976b55f0SChris Wilson #define I915_CONTEXT_PARAM_ENGINES	0xa
1607a0e04715SChris Wilson 
1608a0e04715SChris Wilson /*
1609a0e04715SChris Wilson  * I915_CONTEXT_PARAM_PERSISTENCE:
1610a0e04715SChris Wilson  *
1611a0e04715SChris Wilson  * Allow the context and active rendering to survive the process until
1612a0e04715SChris Wilson  * completion. Persistence allows fire-and-forget clients to queue up a
1613a0e04715SChris Wilson  * bunch of work, hand the output over to a display server and then quit.
1614a0e04715SChris Wilson  * If the context is marked as not persistent, upon closing (either via
1615a0e04715SChris Wilson  * an explicit DRM_I915_GEM_CONTEXT_DESTROY or implicitly from file closure
1616a0e04715SChris Wilson  * or process termination), the context and any outstanding requests will be
1617a0e04715SChris Wilson  * cancelled (and exported fences for cancelled requests marked as -EIO).
1618a0e04715SChris Wilson  *
1619a0e04715SChris Wilson  * By default, new contexts allow persistence.
1620a0e04715SChris Wilson  */
1621a0e04715SChris Wilson #define I915_CONTEXT_PARAM_PERSISTENCE	0xb
1622be03564bSChris Wilson /* Must be kept compact -- no holes and well documented */
1623e0695db7SChris Wilson 
1624c9dc0f35SChris Wilson 	__u64 value;
1625c9dc0f35SChris Wilson };
1626c9dc0f35SChris Wilson 
1627e46c2e99STvrtko Ursulin /**
1628e46c2e99STvrtko Ursulin  * Context SSEU programming
1629e46c2e99STvrtko Ursulin  *
1630e46c2e99STvrtko Ursulin  * It may be necessary for either functional or performance reason to configure
1631e46c2e99STvrtko Ursulin  * a context to run with a reduced number of SSEU (where SSEU stands for Slice/
1632e46c2e99STvrtko Ursulin  * Sub-slice/EU).
1633e46c2e99STvrtko Ursulin  *
1634e46c2e99STvrtko Ursulin  * This is done by configuring SSEU configuration using the below
1635e46c2e99STvrtko Ursulin  * @struct drm_i915_gem_context_param_sseu for every supported engine which
1636e46c2e99STvrtko Ursulin  * userspace intends to use.
1637e46c2e99STvrtko Ursulin  *
1638e46c2e99STvrtko Ursulin  * Not all GPUs or engines support this functionality in which case an error
1639e46c2e99STvrtko Ursulin  * code -ENODEV will be returned.
1640e46c2e99STvrtko Ursulin  *
1641e46c2e99STvrtko Ursulin  * Also, flexibility of possible SSEU configuration permutations varies between
1642e46c2e99STvrtko Ursulin  * GPU generations and software imposed limitations. Requesting such a
1643e46c2e99STvrtko Ursulin  * combination will return an error code of -EINVAL.
1644e46c2e99STvrtko Ursulin  *
1645e46c2e99STvrtko Ursulin  * NOTE: When perf/OA is active the context's SSEU configuration is ignored in
1646e46c2e99STvrtko Ursulin  * favour of a single global setting.
1647e46c2e99STvrtko Ursulin  */
1648e46c2e99STvrtko Ursulin struct drm_i915_gem_context_param_sseu {
1649e46c2e99STvrtko Ursulin 	/*
1650e46c2e99STvrtko Ursulin 	 * Engine class & instance to be configured or queried.
1651e46c2e99STvrtko Ursulin 	 */
1652d1172ab3SChris Wilson 	struct i915_engine_class_instance engine;
1653e46c2e99STvrtko Ursulin 
1654e46c2e99STvrtko Ursulin 	/*
1655e620f7b3SChris Wilson 	 * Unknown flags must be cleared to zero.
1656e46c2e99STvrtko Ursulin 	 */
1657e46c2e99STvrtko Ursulin 	__u32 flags;
1658e620f7b3SChris Wilson #define I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX (1u << 0)
1659e46c2e99STvrtko Ursulin 
1660e46c2e99STvrtko Ursulin 	/*
1661e46c2e99STvrtko Ursulin 	 * Mask of slices to enable for the context. Valid values are a subset
1662e46c2e99STvrtko Ursulin 	 * of the bitmask value returned for I915_PARAM_SLICE_MASK.
1663e46c2e99STvrtko Ursulin 	 */
1664e46c2e99STvrtko Ursulin 	__u64 slice_mask;
1665e46c2e99STvrtko Ursulin 
1666e46c2e99STvrtko Ursulin 	/*
1667e46c2e99STvrtko Ursulin 	 * Mask of subslices to enable for the context. Valid values are a
1668e46c2e99STvrtko Ursulin 	 * subset of the bitmask value return by I915_PARAM_SUBSLICE_MASK.
1669e46c2e99STvrtko Ursulin 	 */
1670e46c2e99STvrtko Ursulin 	__u64 subslice_mask;
1671e46c2e99STvrtko Ursulin 
1672e46c2e99STvrtko Ursulin 	/*
1673e46c2e99STvrtko Ursulin 	 * Minimum/Maximum number of EUs to enable per subslice for the
1674e46c2e99STvrtko Ursulin 	 * context. min_eus_per_subslice must be inferior or equal to
1675e46c2e99STvrtko Ursulin 	 * max_eus_per_subslice.
1676e46c2e99STvrtko Ursulin 	 */
1677e46c2e99STvrtko Ursulin 	__u16 min_eus_per_subslice;
1678e46c2e99STvrtko Ursulin 	__u16 max_eus_per_subslice;
1679e46c2e99STvrtko Ursulin 
1680e46c2e99STvrtko Ursulin 	/*
1681e46c2e99STvrtko Ursulin 	 * Unused for now. Must be cleared to zero.
1682e46c2e99STvrtko Ursulin 	 */
1683e46c2e99STvrtko Ursulin 	__u32 rsvd;
1684e46c2e99STvrtko Ursulin };
1685e46c2e99STvrtko Ursulin 
16866d06779eSChris Wilson /*
16876d06779eSChris Wilson  * i915_context_engines_load_balance:
16886d06779eSChris Wilson  *
16896d06779eSChris Wilson  * Enable load balancing across this set of engines.
16906d06779eSChris Wilson  *
16916d06779eSChris Wilson  * Into the I915_EXEC_DEFAULT slot [0], a virtual engine is created that when
16926d06779eSChris Wilson  * used will proxy the execbuffer request onto one of the set of engines
16936d06779eSChris Wilson  * in such a way as to distribute the load evenly across the set.
16946d06779eSChris Wilson  *
16956d06779eSChris Wilson  * The set of engines must be compatible (e.g. the same HW class) as they
16966d06779eSChris Wilson  * will share the same logical GPU context and ring.
16976d06779eSChris Wilson  *
16986d06779eSChris Wilson  * To intermix rendering with the virtual engine and direct rendering onto
16996d06779eSChris Wilson  * the backing engines (bypassing the load balancing proxy), the context must
17006d06779eSChris Wilson  * be defined to use a single timeline for all engines.
17016d06779eSChris Wilson  */
17026d06779eSChris Wilson struct i915_context_engines_load_balance {
17036d06779eSChris Wilson 	struct i915_user_extension base;
17046d06779eSChris Wilson 
17056d06779eSChris Wilson 	__u16 engine_index;
17066d06779eSChris Wilson 	__u16 num_siblings;
17076d06779eSChris Wilson 	__u32 flags; /* all undefined flags must be zero */
17086d06779eSChris Wilson 
17096d06779eSChris Wilson 	__u64 mbz64; /* reserved for future use; must be zero */
17106d06779eSChris Wilson 
17116d06779eSChris Wilson 	struct i915_engine_class_instance engines[0];
17126d06779eSChris Wilson } __attribute__((packed));
17136d06779eSChris Wilson 
17146d06779eSChris Wilson #define I915_DEFINE_CONTEXT_ENGINES_LOAD_BALANCE(name__, N__) struct { \
17156d06779eSChris Wilson 	struct i915_user_extension base; \
17166d06779eSChris Wilson 	__u16 engine_index; \
17176d06779eSChris Wilson 	__u16 num_siblings; \
17186d06779eSChris Wilson 	__u32 flags; \
17196d06779eSChris Wilson 	__u64 mbz64; \
17206d06779eSChris Wilson 	struct i915_engine_class_instance engines[N__]; \
17216d06779eSChris Wilson } __attribute__((packed)) name__
17226d06779eSChris Wilson 
1723ee113690SChris Wilson /*
1724ee113690SChris Wilson  * i915_context_engines_bond:
1725ee113690SChris Wilson  *
1726ee113690SChris Wilson  * Constructed bonded pairs for execution within a virtual engine.
1727ee113690SChris Wilson  *
1728ee113690SChris Wilson  * All engines are equal, but some are more equal than others. Given
1729ee113690SChris Wilson  * the distribution of resources in the HW, it may be preferable to run
1730ee113690SChris Wilson  * a request on a given subset of engines in parallel to a request on a
1731ee113690SChris Wilson  * specific engine. We enable this selection of engines within a virtual
1732ee113690SChris Wilson  * engine by specifying bonding pairs, for any given master engine we will
1733ee113690SChris Wilson  * only execute on one of the corresponding siblings within the virtual engine.
1734ee113690SChris Wilson  *
1735ee113690SChris Wilson  * To execute a request in parallel on the master engine and a sibling requires
1736ee113690SChris Wilson  * coordination with a I915_EXEC_FENCE_SUBMIT.
1737ee113690SChris Wilson  */
1738ee113690SChris Wilson struct i915_context_engines_bond {
1739ee113690SChris Wilson 	struct i915_user_extension base;
1740ee113690SChris Wilson 
1741ee113690SChris Wilson 	struct i915_engine_class_instance master;
1742ee113690SChris Wilson 
1743ee113690SChris Wilson 	__u16 virtual_index; /* index of virtual engine in ctx->engines[] */
1744ee113690SChris Wilson 	__u16 num_bonds;
1745ee113690SChris Wilson 
1746ee113690SChris Wilson 	__u64 flags; /* all undefined flags must be zero */
1747ee113690SChris Wilson 	__u64 mbz64[4]; /* reserved for future use; must be zero */
1748ee113690SChris Wilson 
1749ee113690SChris Wilson 	struct i915_engine_class_instance engines[0];
1750ee113690SChris Wilson } __attribute__((packed));
1751ee113690SChris Wilson 
1752ee113690SChris Wilson #define I915_DEFINE_CONTEXT_ENGINES_BOND(name__, N__) struct { \
1753ee113690SChris Wilson 	struct i915_user_extension base; \
1754ee113690SChris Wilson 	struct i915_engine_class_instance master; \
1755ee113690SChris Wilson 	__u16 virtual_index; \
1756ee113690SChris Wilson 	__u16 num_bonds; \
1757ee113690SChris Wilson 	__u64 flags; \
1758ee113690SChris Wilson 	__u64 mbz64[4]; \
1759ee113690SChris Wilson 	struct i915_engine_class_instance engines[N__]; \
1760ee113690SChris Wilson } __attribute__((packed)) name__
1761ee113690SChris Wilson 
1762976b55f0SChris Wilson struct i915_context_param_engines {
1763976b55f0SChris Wilson 	__u64 extensions; /* linked chain of extension blocks, 0 terminates */
17646d06779eSChris Wilson #define I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE 0 /* see i915_context_engines_load_balance */
1765ee113690SChris Wilson #define I915_CONTEXT_ENGINES_EXT_BOND 1 /* see i915_context_engines_bond */
1766976b55f0SChris Wilson 	struct i915_engine_class_instance engines[0];
1767976b55f0SChris Wilson } __attribute__((packed));
1768976b55f0SChris Wilson 
1769976b55f0SChris Wilson #define I915_DEFINE_CONTEXT_PARAM_ENGINES(name__, N__) struct { \
1770976b55f0SChris Wilson 	__u64 extensions; \
1771976b55f0SChris Wilson 	struct i915_engine_class_instance engines[N__]; \
1772976b55f0SChris Wilson } __attribute__((packed)) name__
1773976b55f0SChris Wilson 
1774b9171541SChris Wilson struct drm_i915_gem_context_create_ext_setparam {
1775b9171541SChris Wilson #define I915_CONTEXT_CREATE_EXT_SETPARAM 0
1776b9171541SChris Wilson 	struct i915_user_extension base;
1777b9171541SChris Wilson 	struct drm_i915_gem_context_param param;
1778b9171541SChris Wilson };
1779b9171541SChris Wilson 
1780b81dde71SChris Wilson struct drm_i915_gem_context_create_ext_clone {
1781b81dde71SChris Wilson #define I915_CONTEXT_CREATE_EXT_CLONE 1
1782b81dde71SChris Wilson 	struct i915_user_extension base;
1783b81dde71SChris Wilson 	__u32 clone_id;
1784b81dde71SChris Wilson 	__u32 flags;
1785b81dde71SChris Wilson #define I915_CONTEXT_CLONE_ENGINES	(1u << 0)
1786b81dde71SChris Wilson #define I915_CONTEXT_CLONE_FLAGS	(1u << 1)
1787b81dde71SChris Wilson #define I915_CONTEXT_CLONE_SCHEDATTR	(1u << 2)
1788b81dde71SChris Wilson #define I915_CONTEXT_CLONE_SSEU		(1u << 3)
1789b81dde71SChris Wilson #define I915_CONTEXT_CLONE_TIMELINE	(1u << 4)
1790b81dde71SChris Wilson #define I915_CONTEXT_CLONE_VM		(1u << 5)
1791b81dde71SChris Wilson #define I915_CONTEXT_CLONE_UNKNOWN -(I915_CONTEXT_CLONE_VM << 1)
1792b81dde71SChris Wilson 	__u64 rsvd;
1793b81dde71SChris Wilson };
1794b81dde71SChris Wilson 
1795b9171541SChris Wilson struct drm_i915_gem_context_destroy {
1796b9171541SChris Wilson 	__u32 ctx_id;
1797b9171541SChris Wilson 	__u32 pad;
1798b9171541SChris Wilson };
1799b9171541SChris Wilson 
1800b9171541SChris Wilson /*
1801b9171541SChris Wilson  * DRM_I915_GEM_VM_CREATE -
1802b9171541SChris Wilson  *
1803b9171541SChris Wilson  * Create a new virtual memory address space (ppGTT) for use within a context
1804b9171541SChris Wilson  * on the same file. Extensions can be provided to configure exactly how the
1805b9171541SChris Wilson  * address space is setup upon creation.
1806b9171541SChris Wilson  *
1807b9171541SChris Wilson  * The id of new VM (bound to the fd) for use with I915_CONTEXT_PARAM_VM is
1808b9171541SChris Wilson  * returned in the outparam @id.
1809b9171541SChris Wilson  *
1810b9171541SChris Wilson  * No flags are defined, with all bits reserved and must be zero.
1811b9171541SChris Wilson  *
1812b9171541SChris Wilson  * An extension chain maybe provided, starting with @extensions, and terminated
1813b9171541SChris Wilson  * by the @next_extension being 0. Currently, no extensions are defined.
1814b9171541SChris Wilson  *
1815b9171541SChris Wilson  * DRM_I915_GEM_VM_DESTROY -
1816b9171541SChris Wilson  *
1817b9171541SChris Wilson  * Destroys a previously created VM id, specified in @id.
1818b9171541SChris Wilson  *
1819b9171541SChris Wilson  * No extensions or flags are allowed currently, and so must be zero.
1820b9171541SChris Wilson  */
1821b9171541SChris Wilson struct drm_i915_gem_vm_control {
1822b9171541SChris Wilson 	__u64 extensions;
1823b9171541SChris Wilson 	__u32 flags;
1824b9171541SChris Wilson 	__u32 vm_id;
1825b9171541SChris Wilson };
1826b9171541SChris Wilson 
1827b9171541SChris Wilson struct drm_i915_reg_read {
1828b9171541SChris Wilson 	/*
1829b9171541SChris Wilson 	 * Register offset.
1830b9171541SChris Wilson 	 * For 64bit wide registers where the upper 32bits don't immediately
1831b9171541SChris Wilson 	 * follow the lower 32bits, the offset of the lower 32bits must
1832b9171541SChris Wilson 	 * be specified
1833b9171541SChris Wilson 	 */
1834b9171541SChris Wilson 	__u64 offset;
1835b9171541SChris Wilson #define I915_REG_READ_8B_WA (1ul << 0)
1836b9171541SChris Wilson 
1837b9171541SChris Wilson 	__u64 val; /* Return value */
1838b9171541SChris Wilson };
1839b9171541SChris Wilson 
1840b9171541SChris Wilson /* Known registers:
1841b9171541SChris Wilson  *
1842b9171541SChris Wilson  * Render engine timestamp - 0x2358 + 64bit - gen7+
1843b9171541SChris Wilson  * - Note this register returns an invalid value if using the default
1844b9171541SChris Wilson  *   single instruction 8byte read, in order to workaround that pass
1845b9171541SChris Wilson  *   flag I915_REG_READ_8B_WA in offset field.
1846b9171541SChris Wilson  *
1847b9171541SChris Wilson  */
1848b9171541SChris Wilson 
1849b9171541SChris Wilson struct drm_i915_reset_stats {
1850b9171541SChris Wilson 	__u32 ctx_id;
1851b9171541SChris Wilson 	__u32 flags;
1852b9171541SChris Wilson 
1853b9171541SChris Wilson 	/* All resets since boot/module reload, for all contexts */
1854b9171541SChris Wilson 	__u32 reset_count;
1855b9171541SChris Wilson 
1856b9171541SChris Wilson 	/* Number of batches lost when active in GPU, for this context */
1857b9171541SChris Wilson 	__u32 batch_active;
1858b9171541SChris Wilson 
1859b9171541SChris Wilson 	/* Number of batches lost pending for execution, for this context */
1860b9171541SChris Wilson 	__u32 batch_pending;
1861b9171541SChris Wilson 
1862b9171541SChris Wilson 	__u32 pad;
1863b9171541SChris Wilson };
1864b9171541SChris Wilson 
1865b9171541SChris Wilson struct drm_i915_gem_userptr {
1866b9171541SChris Wilson 	__u64 user_ptr;
1867b9171541SChris Wilson 	__u64 user_size;
1868b9171541SChris Wilson 	__u32 flags;
1869b9171541SChris Wilson #define I915_USERPTR_READ_ONLY 0x1
1870b9171541SChris Wilson #define I915_USERPTR_UNSYNCHRONIZED 0x80000000
1871b9171541SChris Wilson 	/**
1872b9171541SChris Wilson 	 * Returned handle for the object.
1873b9171541SChris Wilson 	 *
1874b9171541SChris Wilson 	 * Object handles are nonzero.
1875b9171541SChris Wilson 	 */
1876b9171541SChris Wilson 	__u32 handle;
1877b9171541SChris Wilson };
1878b9171541SChris Wilson 
1879d7965152SRobert Bragg enum drm_i915_oa_format {
188019f81df2SRobert Bragg 	I915_OA_FORMAT_A13 = 1,	    /* HSW only */
188119f81df2SRobert Bragg 	I915_OA_FORMAT_A29,	    /* HSW only */
188219f81df2SRobert Bragg 	I915_OA_FORMAT_A13_B8_C8,   /* HSW only */
188319f81df2SRobert Bragg 	I915_OA_FORMAT_B4_C8,	    /* HSW only */
188419f81df2SRobert Bragg 	I915_OA_FORMAT_A45_B8_C8,   /* HSW only */
188519f81df2SRobert Bragg 	I915_OA_FORMAT_B4_C8_A16,   /* HSW only */
188619f81df2SRobert Bragg 	I915_OA_FORMAT_C4_B8,	    /* HSW+ */
188719f81df2SRobert Bragg 
188819f81df2SRobert Bragg 	/* Gen8+ */
188919f81df2SRobert Bragg 	I915_OA_FORMAT_A12,
189019f81df2SRobert Bragg 	I915_OA_FORMAT_A12_B8_C8,
189119f81df2SRobert Bragg 	I915_OA_FORMAT_A32u40_A4u32_B8_C8,
1892d7965152SRobert Bragg 
1893d7965152SRobert Bragg 	I915_OA_FORMAT_MAX	    /* non-ABI */
1894d7965152SRobert Bragg };
1895d7965152SRobert Bragg 
1896eec688e1SRobert Bragg enum drm_i915_perf_property_id {
1897eec688e1SRobert Bragg 	/**
1898eec688e1SRobert Bragg 	 * Open the stream for a specific context handle (as used with
1899eec688e1SRobert Bragg 	 * execbuffer2). A stream opened for a specific context this way
1900eec688e1SRobert Bragg 	 * won't typically require root privileges.
1901b8d49f28SLionel Landwerlin 	 *
1902b8d49f28SLionel Landwerlin 	 * This property is available in perf revision 1.
1903eec688e1SRobert Bragg 	 */
1904eec688e1SRobert Bragg 	DRM_I915_PERF_PROP_CTX_HANDLE = 1,
1905eec688e1SRobert Bragg 
1906d7965152SRobert Bragg 	/**
1907d7965152SRobert Bragg 	 * A value of 1 requests the inclusion of raw OA unit reports as
1908d7965152SRobert Bragg 	 * part of stream samples.
1909b8d49f28SLionel Landwerlin 	 *
1910b8d49f28SLionel Landwerlin 	 * This property is available in perf revision 1.
1911d7965152SRobert Bragg 	 */
1912d7965152SRobert Bragg 	DRM_I915_PERF_PROP_SAMPLE_OA,
1913d7965152SRobert Bragg 
1914d7965152SRobert Bragg 	/**
1915d7965152SRobert Bragg 	 * The value specifies which set of OA unit metrics should be
1916d7965152SRobert Bragg 	 * be configured, defining the contents of any OA unit reports.
1917b8d49f28SLionel Landwerlin 	 *
1918b8d49f28SLionel Landwerlin 	 * This property is available in perf revision 1.
1919d7965152SRobert Bragg 	 */
1920d7965152SRobert Bragg 	DRM_I915_PERF_PROP_OA_METRICS_SET,
1921d7965152SRobert Bragg 
1922d7965152SRobert Bragg 	/**
1923d7965152SRobert Bragg 	 * The value specifies the size and layout of OA unit reports.
1924b8d49f28SLionel Landwerlin 	 *
1925b8d49f28SLionel Landwerlin 	 * This property is available in perf revision 1.
1926d7965152SRobert Bragg 	 */
1927d7965152SRobert Bragg 	DRM_I915_PERF_PROP_OA_FORMAT,
1928d7965152SRobert Bragg 
1929d7965152SRobert Bragg 	/**
1930d7965152SRobert Bragg 	 * Specifying this property implicitly requests periodic OA unit
1931d7965152SRobert Bragg 	 * sampling and (at least on Haswell) the sampling frequency is derived
1932d7965152SRobert Bragg 	 * from this exponent as follows:
1933d7965152SRobert Bragg 	 *
1934d7965152SRobert Bragg 	 *   80ns * 2^(period_exponent + 1)
1935b8d49f28SLionel Landwerlin 	 *
1936b8d49f28SLionel Landwerlin 	 * This property is available in perf revision 1.
1937d7965152SRobert Bragg 	 */
1938d7965152SRobert Bragg 	DRM_I915_PERF_PROP_OA_EXPONENT,
1939d7965152SRobert Bragg 
19409cd20ef7SLionel Landwerlin 	/**
19419cd20ef7SLionel Landwerlin 	 * Specifying this property is only valid when specify a context to
19429cd20ef7SLionel Landwerlin 	 * filter with DRM_I915_PERF_PROP_CTX_HANDLE. Specifying this property
19439cd20ef7SLionel Landwerlin 	 * will hold preemption of the particular context we want to gather
19449cd20ef7SLionel Landwerlin 	 * performance data about. The execbuf2 submissions must include a
19459cd20ef7SLionel Landwerlin 	 * drm_i915_gem_execbuffer_ext_perf parameter for this to apply.
19469cd20ef7SLionel Landwerlin 	 *
19479cd20ef7SLionel Landwerlin 	 * This property is available in perf revision 3.
19489cd20ef7SLionel Landwerlin 	 */
19499cd20ef7SLionel Landwerlin 	DRM_I915_PERF_PROP_HOLD_PREEMPTION,
19509cd20ef7SLionel Landwerlin 
1951eec688e1SRobert Bragg 	DRM_I915_PERF_PROP_MAX /* non-ABI */
1952eec688e1SRobert Bragg };
1953eec688e1SRobert Bragg 
1954eec688e1SRobert Bragg struct drm_i915_perf_open_param {
1955eec688e1SRobert Bragg 	__u32 flags;
1956eec688e1SRobert Bragg #define I915_PERF_FLAG_FD_CLOEXEC	(1<<0)
1957eec688e1SRobert Bragg #define I915_PERF_FLAG_FD_NONBLOCK	(1<<1)
1958eec688e1SRobert Bragg #define I915_PERF_FLAG_DISABLED		(1<<2)
1959eec688e1SRobert Bragg 
1960eec688e1SRobert Bragg 	/** The number of u64 (id, value) pairs */
1961eec688e1SRobert Bragg 	__u32 num_properties;
1962eec688e1SRobert Bragg 
1963eec688e1SRobert Bragg 	/**
1964eec688e1SRobert Bragg 	 * Pointer to array of u64 (id, value) pairs configuring the stream
1965eec688e1SRobert Bragg 	 * to open.
1966eec688e1SRobert Bragg 	 */
1967cd8bddc4SChris Wilson 	__u64 properties_ptr;
1968eec688e1SRobert Bragg };
1969eec688e1SRobert Bragg 
1970d7965152SRobert Bragg /**
1971d7965152SRobert Bragg  * Enable data capture for a stream that was either opened in a disabled state
1972d7965152SRobert Bragg  * via I915_PERF_FLAG_DISABLED or was later disabled via
1973d7965152SRobert Bragg  * I915_PERF_IOCTL_DISABLE.
1974d7965152SRobert Bragg  *
1975d7965152SRobert Bragg  * It is intended to be cheaper to disable and enable a stream than it may be
1976d7965152SRobert Bragg  * to close and re-open a stream with the same configuration.
1977d7965152SRobert Bragg  *
1978d7965152SRobert Bragg  * It's undefined whether any pending data for the stream will be lost.
1979b8d49f28SLionel Landwerlin  *
1980b8d49f28SLionel Landwerlin  * This ioctl is available in perf revision 1.
1981d7965152SRobert Bragg  */
1982eec688e1SRobert Bragg #define I915_PERF_IOCTL_ENABLE	_IO('i', 0x0)
1983d7965152SRobert Bragg 
1984d7965152SRobert Bragg /**
1985d7965152SRobert Bragg  * Disable data capture for a stream.
1986d7965152SRobert Bragg  *
1987d7965152SRobert Bragg  * It is an error to try and read a stream that is disabled.
1988b8d49f28SLionel Landwerlin  *
1989b8d49f28SLionel Landwerlin  * This ioctl is available in perf revision 1.
1990d7965152SRobert Bragg  */
1991eec688e1SRobert Bragg #define I915_PERF_IOCTL_DISABLE	_IO('i', 0x1)
1992eec688e1SRobert Bragg 
1993eec688e1SRobert Bragg /**
19947831e9a9SChris Wilson  * Change metrics_set captured by a stream.
19957831e9a9SChris Wilson  *
19967831e9a9SChris Wilson  * If the stream is bound to a specific context, the configuration change
19977831e9a9SChris Wilson  * will performed inline with that context such that it takes effect before
19987831e9a9SChris Wilson  * the next execbuf submission.
19997831e9a9SChris Wilson  *
20007831e9a9SChris Wilson  * Returns the previously bound metrics set id, or a negative error code.
20017831e9a9SChris Wilson  *
20027831e9a9SChris Wilson  * This ioctl is available in perf revision 2.
20037831e9a9SChris Wilson  */
20047831e9a9SChris Wilson #define I915_PERF_IOCTL_CONFIG	_IO('i', 0x2)
20057831e9a9SChris Wilson 
20067831e9a9SChris Wilson /**
2007eec688e1SRobert Bragg  * Common to all i915 perf records
2008eec688e1SRobert Bragg  */
2009eec688e1SRobert Bragg struct drm_i915_perf_record_header {
2010eec688e1SRobert Bragg 	__u32 type;
2011eec688e1SRobert Bragg 	__u16 pad;
2012eec688e1SRobert Bragg 	__u16 size;
2013eec688e1SRobert Bragg };
2014eec688e1SRobert Bragg 
2015eec688e1SRobert Bragg enum drm_i915_perf_record_type {
2016eec688e1SRobert Bragg 
2017eec688e1SRobert Bragg 	/**
2018eec688e1SRobert Bragg 	 * Samples are the work horse record type whose contents are extensible
2019eec688e1SRobert Bragg 	 * and defined when opening an i915 perf stream based on the given
2020eec688e1SRobert Bragg 	 * properties.
2021eec688e1SRobert Bragg 	 *
2022eec688e1SRobert Bragg 	 * Boolean properties following the naming convention
2023eec688e1SRobert Bragg 	 * DRM_I915_PERF_SAMPLE_xyz_PROP request the inclusion of 'xyz' data in
2024eec688e1SRobert Bragg 	 * every sample.
2025eec688e1SRobert Bragg 	 *
2026eec688e1SRobert Bragg 	 * The order of these sample properties given by userspace has no
2027d7965152SRobert Bragg 	 * affect on the ordering of data within a sample. The order is
2028eec688e1SRobert Bragg 	 * documented here.
2029eec688e1SRobert Bragg 	 *
2030eec688e1SRobert Bragg 	 * struct {
2031eec688e1SRobert Bragg 	 *     struct drm_i915_perf_record_header header;
2032eec688e1SRobert Bragg 	 *
2033d7965152SRobert Bragg 	 *     { u32 oa_report[]; } && DRM_I915_PERF_PROP_SAMPLE_OA
2034eec688e1SRobert Bragg 	 * };
2035eec688e1SRobert Bragg 	 */
2036eec688e1SRobert Bragg 	DRM_I915_PERF_RECORD_SAMPLE = 1,
2037eec688e1SRobert Bragg 
2038d7965152SRobert Bragg 	/*
2039d7965152SRobert Bragg 	 * Indicates that one or more OA reports were not written by the
2040d7965152SRobert Bragg 	 * hardware. This can happen for example if an MI_REPORT_PERF_COUNT
2041d7965152SRobert Bragg 	 * command collides with periodic sampling - which would be more likely
2042d7965152SRobert Bragg 	 * at higher sampling frequencies.
2043d7965152SRobert Bragg 	 */
2044d7965152SRobert Bragg 	DRM_I915_PERF_RECORD_OA_REPORT_LOST = 2,
2045d7965152SRobert Bragg 
2046d7965152SRobert Bragg 	/**
2047d7965152SRobert Bragg 	 * An error occurred that resulted in all pending OA reports being lost.
2048d7965152SRobert Bragg 	 */
2049d7965152SRobert Bragg 	DRM_I915_PERF_RECORD_OA_BUFFER_LOST = 3,
2050d7965152SRobert Bragg 
2051eec688e1SRobert Bragg 	DRM_I915_PERF_RECORD_MAX /* non-ABI */
2052eec688e1SRobert Bragg };
2053eec688e1SRobert Bragg 
2054f89823c2SLionel Landwerlin /**
2055f89823c2SLionel Landwerlin  * Structure to upload perf dynamic configuration into the kernel.
2056f89823c2SLionel Landwerlin  */
2057f89823c2SLionel Landwerlin struct drm_i915_perf_oa_config {
2058f89823c2SLionel Landwerlin 	/** String formatted like "%08x-%04x-%04x-%04x-%012x" */
2059f89823c2SLionel Landwerlin 	char uuid[36];
2060f89823c2SLionel Landwerlin 
2061f89823c2SLionel Landwerlin 	__u32 n_mux_regs;
2062f89823c2SLionel Landwerlin 	__u32 n_boolean_regs;
2063f89823c2SLionel Landwerlin 	__u32 n_flex_regs;
2064f89823c2SLionel Landwerlin 
2065ee427e25SLionel Landwerlin 	/*
2066a446ae2cSLionel Landwerlin 	 * These fields are pointers to tuples of u32 values (register address,
2067a446ae2cSLionel Landwerlin 	 * value). For example the expected length of the buffer pointed by
2068a446ae2cSLionel Landwerlin 	 * mux_regs_ptr is (2 * sizeof(u32) * n_mux_regs).
2069ee427e25SLionel Landwerlin 	 */
207017ad4fddSChris Wilson 	__u64 mux_regs_ptr;
207117ad4fddSChris Wilson 	__u64 boolean_regs_ptr;
207217ad4fddSChris Wilson 	__u64 flex_regs_ptr;
2073f89823c2SLionel Landwerlin };
2074f89823c2SLionel Landwerlin 
2075a446ae2cSLionel Landwerlin struct drm_i915_query_item {
2076a446ae2cSLionel Landwerlin 	__u64 query_id;
2077c822e059SLionel Landwerlin #define DRM_I915_QUERY_TOPOLOGY_INFO    1
2078c5d3e39cSTvrtko Ursulin #define DRM_I915_QUERY_ENGINE_INFO	2
20794f6ccc74SLionel Landwerlin #define DRM_I915_QUERY_PERF_CONFIG      3
2080be03564bSChris Wilson /* Must be kept compact -- no holes and well documented */
2081a446ae2cSLionel Landwerlin 
2082a446ae2cSLionel Landwerlin 	/*
2083a446ae2cSLionel Landwerlin 	 * When set to zero by userspace, this is filled with the size of the
2084a446ae2cSLionel Landwerlin 	 * data to be written at the data_ptr pointer. The kernel sets this
2085a446ae2cSLionel Landwerlin 	 * value to a negative value to signal an error on a particular query
2086a446ae2cSLionel Landwerlin 	 * item.
2087a446ae2cSLionel Landwerlin 	 */
2088a446ae2cSLionel Landwerlin 	__s32 length;
2089a446ae2cSLionel Landwerlin 
2090a446ae2cSLionel Landwerlin 	/*
20914f6ccc74SLionel Landwerlin 	 * When query_id == DRM_I915_QUERY_TOPOLOGY_INFO, must be 0.
20924f6ccc74SLionel Landwerlin 	 *
20934f6ccc74SLionel Landwerlin 	 * When query_id == DRM_I915_QUERY_PERF_CONFIG, must be one of the
20944f6ccc74SLionel Landwerlin 	 * following :
20954f6ccc74SLionel Landwerlin 	 *         - DRM_I915_QUERY_PERF_CONFIG_LIST
20964f6ccc74SLionel Landwerlin 	 *         - DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID
20974f6ccc74SLionel Landwerlin 	 *         - DRM_I915_QUERY_PERF_CONFIG_FOR_UUID
2098a446ae2cSLionel Landwerlin 	 */
2099a446ae2cSLionel Landwerlin 	__u32 flags;
21004f6ccc74SLionel Landwerlin #define DRM_I915_QUERY_PERF_CONFIG_LIST          1
21014f6ccc74SLionel Landwerlin #define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID 2
21024f6ccc74SLionel Landwerlin #define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID   3
2103a446ae2cSLionel Landwerlin 
2104a446ae2cSLionel Landwerlin 	/*
2105a446ae2cSLionel Landwerlin 	 * Data will be written at the location pointed by data_ptr when the
2106a446ae2cSLionel Landwerlin 	 * value of length matches the length of the data to be written by the
2107a446ae2cSLionel Landwerlin 	 * kernel.
2108a446ae2cSLionel Landwerlin 	 */
2109a446ae2cSLionel Landwerlin 	__u64 data_ptr;
2110a446ae2cSLionel Landwerlin };
2111a446ae2cSLionel Landwerlin 
2112a446ae2cSLionel Landwerlin struct drm_i915_query {
2113a446ae2cSLionel Landwerlin 	__u32 num_items;
2114a446ae2cSLionel Landwerlin 
2115a446ae2cSLionel Landwerlin 	/*
2116a446ae2cSLionel Landwerlin 	 * Unused for now. Must be cleared to zero.
2117a446ae2cSLionel Landwerlin 	 */
2118a446ae2cSLionel Landwerlin 	__u32 flags;
2119a446ae2cSLionel Landwerlin 
2120a446ae2cSLionel Landwerlin 	/*
2121a446ae2cSLionel Landwerlin 	 * This points to an array of num_items drm_i915_query_item structures.
2122a446ae2cSLionel Landwerlin 	 */
2123a446ae2cSLionel Landwerlin 	__u64 items_ptr;
2124a446ae2cSLionel Landwerlin };
2125a446ae2cSLionel Landwerlin 
2126c822e059SLionel Landwerlin /*
2127c822e059SLionel Landwerlin  * Data written by the kernel with query DRM_I915_QUERY_TOPOLOGY_INFO :
2128c822e059SLionel Landwerlin  *
2129c822e059SLionel Landwerlin  * data: contains the 3 pieces of information :
2130c822e059SLionel Landwerlin  *
2131c822e059SLionel Landwerlin  * - the slice mask with one bit per slice telling whether a slice is
2132c822e059SLionel Landwerlin  *   available. The availability of slice X can be queried with the following
2133c822e059SLionel Landwerlin  *   formula :
2134c822e059SLionel Landwerlin  *
2135c822e059SLionel Landwerlin  *           (data[X / 8] >> (X % 8)) & 1
2136c822e059SLionel Landwerlin  *
2137c822e059SLionel Landwerlin  * - the subslice mask for each slice with one bit per subslice telling
2138601734f7SDaniele Ceraolo Spurio  *   whether a subslice is available. Gen12 has dual-subslices, which are
2139601734f7SDaniele Ceraolo Spurio  *   similar to two gen11 subslices. For gen12, this array represents dual-
2140601734f7SDaniele Ceraolo Spurio  *   subslices. The availability of subslice Y in slice X can be queried
2141601734f7SDaniele Ceraolo Spurio  *   with the following formula :
2142c822e059SLionel Landwerlin  *
2143c822e059SLionel Landwerlin  *           (data[subslice_offset +
2144c822e059SLionel Landwerlin  *                 X * subslice_stride +
2145c822e059SLionel Landwerlin  *                 Y / 8] >> (Y % 8)) & 1
2146c822e059SLionel Landwerlin  *
2147c822e059SLionel Landwerlin  * - the EU mask for each subslice in each slice with one bit per EU telling
2148c822e059SLionel Landwerlin  *   whether an EU is available. The availability of EU Z in subslice Y in
2149c822e059SLionel Landwerlin  *   slice X can be queried with the following formula :
2150c822e059SLionel Landwerlin  *
2151c822e059SLionel Landwerlin  *           (data[eu_offset +
2152c822e059SLionel Landwerlin  *                 (X * max_subslices + Y) * eu_stride +
2153c822e059SLionel Landwerlin  *                 Z / 8] >> (Z % 8)) & 1
2154c822e059SLionel Landwerlin  */
2155c822e059SLionel Landwerlin struct drm_i915_query_topology_info {
2156c822e059SLionel Landwerlin 	/*
2157c822e059SLionel Landwerlin 	 * Unused for now. Must be cleared to zero.
2158c822e059SLionel Landwerlin 	 */
2159c822e059SLionel Landwerlin 	__u16 flags;
2160c822e059SLionel Landwerlin 
2161c822e059SLionel Landwerlin 	__u16 max_slices;
2162c822e059SLionel Landwerlin 	__u16 max_subslices;
2163c822e059SLionel Landwerlin 	__u16 max_eus_per_subslice;
2164c822e059SLionel Landwerlin 
2165c822e059SLionel Landwerlin 	/*
2166c822e059SLionel Landwerlin 	 * Offset in data[] at which the subslice masks are stored.
2167c822e059SLionel Landwerlin 	 */
2168c822e059SLionel Landwerlin 	__u16 subslice_offset;
2169c822e059SLionel Landwerlin 
2170c822e059SLionel Landwerlin 	/*
2171c822e059SLionel Landwerlin 	 * Stride at which each of the subslice masks for each slice are
2172c822e059SLionel Landwerlin 	 * stored.
2173c822e059SLionel Landwerlin 	 */
2174c822e059SLionel Landwerlin 	__u16 subslice_stride;
2175c822e059SLionel Landwerlin 
2176c822e059SLionel Landwerlin 	/*
2177c822e059SLionel Landwerlin 	 * Offset in data[] at which the EU masks are stored.
2178c822e059SLionel Landwerlin 	 */
2179c822e059SLionel Landwerlin 	__u16 eu_offset;
2180c822e059SLionel Landwerlin 
2181c822e059SLionel Landwerlin 	/*
2182c822e059SLionel Landwerlin 	 * Stride at which each of the EU masks for each subslice are stored.
2183c822e059SLionel Landwerlin 	 */
2184c822e059SLionel Landwerlin 	__u16 eu_stride;
2185c822e059SLionel Landwerlin 
2186c822e059SLionel Landwerlin 	__u8 data[];
2187c822e059SLionel Landwerlin };
2188c822e059SLionel Landwerlin 
2189c5d3e39cSTvrtko Ursulin /**
2190c5d3e39cSTvrtko Ursulin  * struct drm_i915_engine_info
2191c5d3e39cSTvrtko Ursulin  *
2192c5d3e39cSTvrtko Ursulin  * Describes one engine and it's capabilities as known to the driver.
2193c5d3e39cSTvrtko Ursulin  */
2194c5d3e39cSTvrtko Ursulin struct drm_i915_engine_info {
2195c5d3e39cSTvrtko Ursulin 	/** Engine class and instance. */
2196c5d3e39cSTvrtko Ursulin 	struct i915_engine_class_instance engine;
2197c5d3e39cSTvrtko Ursulin 
2198c5d3e39cSTvrtko Ursulin 	/** Reserved field. */
2199c5d3e39cSTvrtko Ursulin 	__u32 rsvd0;
2200c5d3e39cSTvrtko Ursulin 
2201c5d3e39cSTvrtko Ursulin 	/** Engine flags. */
2202c5d3e39cSTvrtko Ursulin 	__u64 flags;
2203c5d3e39cSTvrtko Ursulin 
2204c5d3e39cSTvrtko Ursulin 	/** Capabilities of this engine. */
2205c5d3e39cSTvrtko Ursulin 	__u64 capabilities;
2206c5d3e39cSTvrtko Ursulin #define I915_VIDEO_CLASS_CAPABILITY_HEVC		(1 << 0)
2207c5d3e39cSTvrtko Ursulin #define I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC	(1 << 1)
2208c5d3e39cSTvrtko Ursulin 
2209c5d3e39cSTvrtko Ursulin 	/** Reserved fields. */
2210c5d3e39cSTvrtko Ursulin 	__u64 rsvd1[4];
2211c5d3e39cSTvrtko Ursulin };
2212c5d3e39cSTvrtko Ursulin 
2213c5d3e39cSTvrtko Ursulin /**
2214c5d3e39cSTvrtko Ursulin  * struct drm_i915_query_engine_info
2215c5d3e39cSTvrtko Ursulin  *
2216c5d3e39cSTvrtko Ursulin  * Engine info query enumerates all engines known to the driver by filling in
2217c5d3e39cSTvrtko Ursulin  * an array of struct drm_i915_engine_info structures.
2218c5d3e39cSTvrtko Ursulin  */
2219c5d3e39cSTvrtko Ursulin struct drm_i915_query_engine_info {
2220c5d3e39cSTvrtko Ursulin 	/** Number of struct drm_i915_engine_info structs following. */
2221c5d3e39cSTvrtko Ursulin 	__u32 num_engines;
2222c5d3e39cSTvrtko Ursulin 
2223c5d3e39cSTvrtko Ursulin 	/** MBZ */
2224c5d3e39cSTvrtko Ursulin 	__u32 rsvd[3];
2225c5d3e39cSTvrtko Ursulin 
2226c5d3e39cSTvrtko Ursulin 	/** Marker for drm_i915_engine_info structures. */
2227c5d3e39cSTvrtko Ursulin 	struct drm_i915_engine_info engines[];
2228c5d3e39cSTvrtko Ursulin };
2229c5d3e39cSTvrtko Ursulin 
22304f6ccc74SLionel Landwerlin /*
22314f6ccc74SLionel Landwerlin  * Data written by the kernel with query DRM_I915_QUERY_PERF_CONFIG.
22324f6ccc74SLionel Landwerlin  */
22334f6ccc74SLionel Landwerlin struct drm_i915_query_perf_config {
22344f6ccc74SLionel Landwerlin 	union {
22354f6ccc74SLionel Landwerlin 		/*
22364f6ccc74SLionel Landwerlin 		 * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_LIST, i915 sets
22374f6ccc74SLionel Landwerlin 		 * this fields to the number of configurations available.
22384f6ccc74SLionel Landwerlin 		 */
22394f6ccc74SLionel Landwerlin 		__u64 n_configs;
22404f6ccc74SLionel Landwerlin 
22414f6ccc74SLionel Landwerlin 		/*
22424f6ccc74SLionel Landwerlin 		 * When query_id == DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID,
22434f6ccc74SLionel Landwerlin 		 * i915 will use the value in this field as configuration
22444f6ccc74SLionel Landwerlin 		 * identifier to decide what data to write into config_ptr.
22454f6ccc74SLionel Landwerlin 		 */
22464f6ccc74SLionel Landwerlin 		__u64 config;
22474f6ccc74SLionel Landwerlin 
22484f6ccc74SLionel Landwerlin 		/*
22494f6ccc74SLionel Landwerlin 		 * When query_id == DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID,
22504f6ccc74SLionel Landwerlin 		 * i915 will use the value in this field as configuration
22514f6ccc74SLionel Landwerlin 		 * identifier to decide what data to write into config_ptr.
22524f6ccc74SLionel Landwerlin 		 *
22534f6ccc74SLionel Landwerlin 		 * String formatted like "%08x-%04x-%04x-%04x-%012x"
22544f6ccc74SLionel Landwerlin 		 */
22554f6ccc74SLionel Landwerlin 		char uuid[36];
22564f6ccc74SLionel Landwerlin 	};
22574f6ccc74SLionel Landwerlin 
22584f6ccc74SLionel Landwerlin 	/*
22594f6ccc74SLionel Landwerlin 	 * Unused for now. Must be cleared to zero.
22604f6ccc74SLionel Landwerlin 	 */
22614f6ccc74SLionel Landwerlin 	__u32 flags;
22624f6ccc74SLionel Landwerlin 
22634f6ccc74SLionel Landwerlin 	/*
22644f6ccc74SLionel Landwerlin 	 * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_LIST, i915 will
22654f6ccc74SLionel Landwerlin 	 * write an array of __u64 of configuration identifiers.
22664f6ccc74SLionel Landwerlin 	 *
22674f6ccc74SLionel Landwerlin 	 * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_DATA, i915 will
22684f6ccc74SLionel Landwerlin 	 * write a struct drm_i915_perf_oa_config. If the following fields of
22694f6ccc74SLionel Landwerlin 	 * drm_i915_perf_oa_config are set not set to 0, i915 will write into
22704f6ccc74SLionel Landwerlin 	 * the associated pointers the values of submitted when the
22714f6ccc74SLionel Landwerlin 	 * configuration was created :
22724f6ccc74SLionel Landwerlin 	 *
22734f6ccc74SLionel Landwerlin 	 *         - n_mux_regs
22744f6ccc74SLionel Landwerlin 	 *         - n_boolean_regs
22754f6ccc74SLionel Landwerlin 	 *         - n_flex_regs
22764f6ccc74SLionel Landwerlin 	 */
22774f6ccc74SLionel Landwerlin 	__u8 data[];
22784f6ccc74SLionel Landwerlin };
22794f6ccc74SLionel Landwerlin 
2280b1c1f5c4SEmil Velikov #if defined(__cplusplus)
2281b1c1f5c4SEmil Velikov }
2282b1c1f5c4SEmil Velikov #endif
2283b1c1f5c4SEmil Velikov 
2284718dceddSDavid Howells #endif /* _UAPI_I915_DRM_H_ */
2285