xref: /openbmc/linux/include/uapi/drm/i915_drm.h (revision be03564b)
1718dceddSDavid Howells /*
2718dceddSDavid Howells  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
3718dceddSDavid Howells  * All Rights Reserved.
4718dceddSDavid Howells  *
5718dceddSDavid Howells  * Permission is hereby granted, free of charge, to any person obtaining a
6718dceddSDavid Howells  * copy of this software and associated documentation files (the
7718dceddSDavid Howells  * "Software"), to deal in the Software without restriction, including
8718dceddSDavid Howells  * without limitation the rights to use, copy, modify, merge, publish,
9718dceddSDavid Howells  * distribute, sub license, and/or sell copies of the Software, and to
10718dceddSDavid Howells  * permit persons to whom the Software is furnished to do so, subject to
11718dceddSDavid Howells  * the following conditions:
12718dceddSDavid Howells  *
13718dceddSDavid Howells  * The above copyright notice and this permission notice (including the
14718dceddSDavid Howells  * next paragraph) shall be included in all copies or substantial portions
15718dceddSDavid Howells  * of the Software.
16718dceddSDavid Howells  *
17718dceddSDavid Howells  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18718dceddSDavid Howells  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19718dceddSDavid Howells  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
20718dceddSDavid Howells  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
21718dceddSDavid Howells  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22718dceddSDavid Howells  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23718dceddSDavid Howells  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24718dceddSDavid Howells  *
25718dceddSDavid Howells  */
26718dceddSDavid Howells 
27718dceddSDavid Howells #ifndef _UAPI_I915_DRM_H_
28718dceddSDavid Howells #define _UAPI_I915_DRM_H_
29718dceddSDavid Howells 
301049102fSGabriel Laskar #include "drm.h"
31718dceddSDavid Howells 
32b1c1f5c4SEmil Velikov #if defined(__cplusplus)
33b1c1f5c4SEmil Velikov extern "C" {
34b1c1f5c4SEmil Velikov #endif
35b1c1f5c4SEmil Velikov 
36718dceddSDavid Howells /* Please note that modifications to all structs defined here are
37718dceddSDavid Howells  * subject to backwards-compatibility constraints.
38718dceddSDavid Howells  */
39718dceddSDavid Howells 
40cce723edSBen Widawsky /**
41cce723edSBen Widawsky  * DOC: uevents generated by i915 on it's device node
42cce723edSBen Widawsky  *
43cce723edSBen Widawsky  * I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch
44cce723edSBen Widawsky  *	event from the gpu l3 cache. Additional information supplied is ROW,
4535a85ac6SBen Widawsky  *	BANK, SUBBANK, SLICE of the affected cacheline. Userspace should keep
4635a85ac6SBen Widawsky  *	track of these events and if a specific cache-line seems to have a
4735a85ac6SBen Widawsky  *	persistent error remap it with the l3 remapping tool supplied in
4835a85ac6SBen Widawsky  *	intel-gpu-tools.  The value supplied with the event is always 1.
49cce723edSBen Widawsky  *
50cce723edSBen Widawsky  * I915_ERROR_UEVENT - Generated upon error detection, currently only via
51cce723edSBen Widawsky  *	hangcheck. The error detection event is a good indicator of when things
52cce723edSBen Widawsky  *	began to go badly. The value supplied with the event is a 1 upon error
53cce723edSBen Widawsky  *	detection, and a 0 upon reset completion, signifying no more error
54cce723edSBen Widawsky  *	exists. NOTE: Disabling hangcheck or reset via module parameter will
55cce723edSBen Widawsky  *	cause the related events to not be seen.
56cce723edSBen Widawsky  *
57cce723edSBen Widawsky  * I915_RESET_UEVENT - Event is generated just before an attempt to reset the
58cce723edSBen Widawsky  *	the GPU. The value supplied with the event is always 1. NOTE: Disable
59cce723edSBen Widawsky  *	reset via module parameter will cause this event to not be seen.
60cce723edSBen Widawsky  */
61cce723edSBen Widawsky #define I915_L3_PARITY_UEVENT		"L3_PARITY_ERROR"
62cce723edSBen Widawsky #define I915_ERROR_UEVENT		"ERROR"
63cce723edSBen Widawsky #define I915_RESET_UEVENT		"RESET"
64718dceddSDavid Howells 
653373ce2eSImre Deak /*
663373ce2eSImre Deak  * MOCS indexes used for GPU surfaces, defining the cacheability of the
673373ce2eSImre Deak  * surface data and the coherency for this data wrt. CPU vs. GPU accesses.
683373ce2eSImre Deak  */
693373ce2eSImre Deak enum i915_mocs_table_index {
703373ce2eSImre Deak 	/*
713373ce2eSImre Deak 	 * Not cached anywhere, coherency between CPU and GPU accesses is
723373ce2eSImre Deak 	 * guaranteed.
733373ce2eSImre Deak 	 */
743373ce2eSImre Deak 	I915_MOCS_UNCACHED,
753373ce2eSImre Deak 	/*
763373ce2eSImre Deak 	 * Cacheability and coherency controlled by the kernel automatically
773373ce2eSImre Deak 	 * based on the DRM_I915_GEM_SET_CACHING IOCTL setting and the current
783373ce2eSImre Deak 	 * usage of the surface (used for display scanout or not).
793373ce2eSImre Deak 	 */
803373ce2eSImre Deak 	I915_MOCS_PTE,
813373ce2eSImre Deak 	/*
823373ce2eSImre Deak 	 * Cached in all GPU caches available on the platform.
833373ce2eSImre Deak 	 * Coherency between CPU and GPU accesses to the surface is not
843373ce2eSImre Deak 	 * guaranteed without extra synchronization.
853373ce2eSImre Deak 	 */
863373ce2eSImre Deak 	I915_MOCS_CACHED,
873373ce2eSImre Deak };
883373ce2eSImre Deak 
891803fcbcSTvrtko Ursulin /*
901803fcbcSTvrtko Ursulin  * Different engines serve different roles, and there may be more than one
911803fcbcSTvrtko Ursulin  * engine serving each role. enum drm_i915_gem_engine_class provides a
921803fcbcSTvrtko Ursulin  * classification of the role of the engine, which may be used when requesting
931803fcbcSTvrtko Ursulin  * operations to be performed on a certain subset of engines, or for providing
941803fcbcSTvrtko Ursulin  * information about that group.
951803fcbcSTvrtko Ursulin  */
961803fcbcSTvrtko Ursulin enum drm_i915_gem_engine_class {
971803fcbcSTvrtko Ursulin 	I915_ENGINE_CLASS_RENDER	= 0,
981803fcbcSTvrtko Ursulin 	I915_ENGINE_CLASS_COPY		= 1,
991803fcbcSTvrtko Ursulin 	I915_ENGINE_CLASS_VIDEO		= 2,
1001803fcbcSTvrtko Ursulin 	I915_ENGINE_CLASS_VIDEO_ENHANCE	= 3,
1011803fcbcSTvrtko Ursulin 
102be03564bSChris Wilson 	/* should be kept compact */
103be03564bSChris Wilson 
1041803fcbcSTvrtko Ursulin 	I915_ENGINE_CLASS_INVALID	= -1
1051803fcbcSTvrtko Ursulin };
1061803fcbcSTvrtko Ursulin 
107b46a33e2STvrtko Ursulin /**
108b46a33e2STvrtko Ursulin  * DOC: perf_events exposed by i915 through /sys/bus/event_sources/drivers/i915
109b46a33e2STvrtko Ursulin  *
110b46a33e2STvrtko Ursulin  */
111b46a33e2STvrtko Ursulin 
112b46a33e2STvrtko Ursulin enum drm_i915_pmu_engine_sample {
113b46a33e2STvrtko Ursulin 	I915_SAMPLE_BUSY = 0,
114b46a33e2STvrtko Ursulin 	I915_SAMPLE_WAIT = 1,
115b552ae44STvrtko Ursulin 	I915_SAMPLE_SEMA = 2
116b46a33e2STvrtko Ursulin };
117b46a33e2STvrtko Ursulin 
118b46a33e2STvrtko Ursulin #define I915_PMU_SAMPLE_BITS (4)
119b46a33e2STvrtko Ursulin #define I915_PMU_SAMPLE_MASK (0xf)
120b46a33e2STvrtko Ursulin #define I915_PMU_SAMPLE_INSTANCE_BITS (8)
121b46a33e2STvrtko Ursulin #define I915_PMU_CLASS_SHIFT \
122b46a33e2STvrtko Ursulin 	(I915_PMU_SAMPLE_BITS + I915_PMU_SAMPLE_INSTANCE_BITS)
123b46a33e2STvrtko Ursulin 
124b46a33e2STvrtko Ursulin #define __I915_PMU_ENGINE(class, instance, sample) \
125b46a33e2STvrtko Ursulin 	((class) << I915_PMU_CLASS_SHIFT | \
126b46a33e2STvrtko Ursulin 	(instance) << I915_PMU_SAMPLE_BITS | \
127b46a33e2STvrtko Ursulin 	(sample))
128b46a33e2STvrtko Ursulin 
129b46a33e2STvrtko Ursulin #define I915_PMU_ENGINE_BUSY(class, instance) \
130b46a33e2STvrtko Ursulin 	__I915_PMU_ENGINE(class, instance, I915_SAMPLE_BUSY)
131b46a33e2STvrtko Ursulin 
132b46a33e2STvrtko Ursulin #define I915_PMU_ENGINE_WAIT(class, instance) \
133b46a33e2STvrtko Ursulin 	__I915_PMU_ENGINE(class, instance, I915_SAMPLE_WAIT)
134b46a33e2STvrtko Ursulin 
135b46a33e2STvrtko Ursulin #define I915_PMU_ENGINE_SEMA(class, instance) \
136b46a33e2STvrtko Ursulin 	__I915_PMU_ENGINE(class, instance, I915_SAMPLE_SEMA)
137b46a33e2STvrtko Ursulin 
138b46a33e2STvrtko Ursulin #define __I915_PMU_OTHER(x) (__I915_PMU_ENGINE(0xff, 0xff, 0xf) + 1 + (x))
139b46a33e2STvrtko Ursulin 
140b46a33e2STvrtko Ursulin #define I915_PMU_ACTUAL_FREQUENCY	__I915_PMU_OTHER(0)
141b46a33e2STvrtko Ursulin #define I915_PMU_REQUESTED_FREQUENCY	__I915_PMU_OTHER(1)
1420cd4684dSTvrtko Ursulin #define I915_PMU_INTERRUPTS		__I915_PMU_OTHER(2)
1436060b6aeSTvrtko Ursulin #define I915_PMU_RC6_RESIDENCY		__I915_PMU_OTHER(3)
1446060b6aeSTvrtko Ursulin 
1453452fa30STvrtko Ursulin #define I915_PMU_LAST I915_PMU_RC6_RESIDENCY
146b46a33e2STvrtko Ursulin 
147718dceddSDavid Howells /* Each region is a minimum of 16k, and there are at most 255 of them.
148718dceddSDavid Howells  */
149718dceddSDavid Howells #define I915_NR_TEX_REGIONS 255	/* table size 2k - maximum due to use
150718dceddSDavid Howells 				 * of chars for next/prev indices */
151718dceddSDavid Howells #define I915_LOG_MIN_TEX_REGION_SIZE 14
152718dceddSDavid Howells 
153718dceddSDavid Howells typedef struct _drm_i915_init {
154718dceddSDavid Howells 	enum {
155718dceddSDavid Howells 		I915_INIT_DMA = 0x01,
156718dceddSDavid Howells 		I915_CLEANUP_DMA = 0x02,
157718dceddSDavid Howells 		I915_RESUME_DMA = 0x03
158718dceddSDavid Howells 	} func;
159718dceddSDavid Howells 	unsigned int mmio_offset;
160718dceddSDavid Howells 	int sarea_priv_offset;
161718dceddSDavid Howells 	unsigned int ring_start;
162718dceddSDavid Howells 	unsigned int ring_end;
163718dceddSDavid Howells 	unsigned int ring_size;
164718dceddSDavid Howells 	unsigned int front_offset;
165718dceddSDavid Howells 	unsigned int back_offset;
166718dceddSDavid Howells 	unsigned int depth_offset;
167718dceddSDavid Howells 	unsigned int w;
168718dceddSDavid Howells 	unsigned int h;
169718dceddSDavid Howells 	unsigned int pitch;
170718dceddSDavid Howells 	unsigned int pitch_bits;
171718dceddSDavid Howells 	unsigned int back_pitch;
172718dceddSDavid Howells 	unsigned int depth_pitch;
173718dceddSDavid Howells 	unsigned int cpp;
174718dceddSDavid Howells 	unsigned int chipset;
175718dceddSDavid Howells } drm_i915_init_t;
176718dceddSDavid Howells 
177718dceddSDavid Howells typedef struct _drm_i915_sarea {
178718dceddSDavid Howells 	struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1];
179718dceddSDavid Howells 	int last_upload;	/* last time texture was uploaded */
180718dceddSDavid Howells 	int last_enqueue;	/* last time a buffer was enqueued */
181718dceddSDavid Howells 	int last_dispatch;	/* age of the most recently dispatched buffer */
182718dceddSDavid Howells 	int ctxOwner;		/* last context to upload state */
183718dceddSDavid Howells 	int texAge;
184718dceddSDavid Howells 	int pf_enabled;		/* is pageflipping allowed? */
185718dceddSDavid Howells 	int pf_active;
186718dceddSDavid Howells 	int pf_current_page;	/* which buffer is being displayed? */
187718dceddSDavid Howells 	int perf_boxes;		/* performance boxes to be displayed */
188718dceddSDavid Howells 	int width, height;      /* screen size in pixels */
189718dceddSDavid Howells 
190718dceddSDavid Howells 	drm_handle_t front_handle;
191718dceddSDavid Howells 	int front_offset;
192718dceddSDavid Howells 	int front_size;
193718dceddSDavid Howells 
194718dceddSDavid Howells 	drm_handle_t back_handle;
195718dceddSDavid Howells 	int back_offset;
196718dceddSDavid Howells 	int back_size;
197718dceddSDavid Howells 
198718dceddSDavid Howells 	drm_handle_t depth_handle;
199718dceddSDavid Howells 	int depth_offset;
200718dceddSDavid Howells 	int depth_size;
201718dceddSDavid Howells 
202718dceddSDavid Howells 	drm_handle_t tex_handle;
203718dceddSDavid Howells 	int tex_offset;
204718dceddSDavid Howells 	int tex_size;
205718dceddSDavid Howells 	int log_tex_granularity;
206718dceddSDavid Howells 	int pitch;
207718dceddSDavid Howells 	int rotation;           /* 0, 90, 180 or 270 */
208718dceddSDavid Howells 	int rotated_offset;
209718dceddSDavid Howells 	int rotated_size;
210718dceddSDavid Howells 	int rotated_pitch;
211718dceddSDavid Howells 	int virtualX, virtualY;
212718dceddSDavid Howells 
213718dceddSDavid Howells 	unsigned int front_tiled;
214718dceddSDavid Howells 	unsigned int back_tiled;
215718dceddSDavid Howells 	unsigned int depth_tiled;
216718dceddSDavid Howells 	unsigned int rotated_tiled;
217718dceddSDavid Howells 	unsigned int rotated2_tiled;
218718dceddSDavid Howells 
219718dceddSDavid Howells 	int pipeA_x;
220718dceddSDavid Howells 	int pipeA_y;
221718dceddSDavid Howells 	int pipeA_w;
222718dceddSDavid Howells 	int pipeA_h;
223718dceddSDavid Howells 	int pipeB_x;
224718dceddSDavid Howells 	int pipeB_y;
225718dceddSDavid Howells 	int pipeB_w;
226718dceddSDavid Howells 	int pipeB_h;
227718dceddSDavid Howells 
228718dceddSDavid Howells 	/* fill out some space for old userspace triple buffer */
229718dceddSDavid Howells 	drm_handle_t unused_handle;
230718dceddSDavid Howells 	__u32 unused1, unused2, unused3;
231718dceddSDavid Howells 
232718dceddSDavid Howells 	/* buffer object handles for static buffers. May change
233718dceddSDavid Howells 	 * over the lifetime of the client.
234718dceddSDavid Howells 	 */
235718dceddSDavid Howells 	__u32 front_bo_handle;
236718dceddSDavid Howells 	__u32 back_bo_handle;
237718dceddSDavid Howells 	__u32 unused_bo_handle;
238718dceddSDavid Howells 	__u32 depth_bo_handle;
239718dceddSDavid Howells 
240718dceddSDavid Howells } drm_i915_sarea_t;
241718dceddSDavid Howells 
242718dceddSDavid Howells /* due to userspace building against these headers we need some compat here */
243718dceddSDavid Howells #define planeA_x pipeA_x
244718dceddSDavid Howells #define planeA_y pipeA_y
245718dceddSDavid Howells #define planeA_w pipeA_w
246718dceddSDavid Howells #define planeA_h pipeA_h
247718dceddSDavid Howells #define planeB_x pipeB_x
248718dceddSDavid Howells #define planeB_y pipeB_y
249718dceddSDavid Howells #define planeB_w pipeB_w
250718dceddSDavid Howells #define planeB_h pipeB_h
251718dceddSDavid Howells 
252718dceddSDavid Howells /* Flags for perf_boxes
253718dceddSDavid Howells  */
254718dceddSDavid Howells #define I915_BOX_RING_EMPTY    0x1
255718dceddSDavid Howells #define I915_BOX_FLIP          0x2
256718dceddSDavid Howells #define I915_BOX_WAIT          0x4
257718dceddSDavid Howells #define I915_BOX_TEXTURE_LOAD  0x8
258718dceddSDavid Howells #define I915_BOX_LOST_CONTEXT  0x10
259718dceddSDavid Howells 
26021631f10SDamien Lespiau /*
26121631f10SDamien Lespiau  * i915 specific ioctls.
26221631f10SDamien Lespiau  *
26321631f10SDamien Lespiau  * The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie
26421631f10SDamien Lespiau  * [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset
26521631f10SDamien Lespiau  * against DRM_COMMAND_BASE and should be between [0x0, 0x60).
266718dceddSDavid Howells  */
267718dceddSDavid Howells #define DRM_I915_INIT		0x00
268718dceddSDavid Howells #define DRM_I915_FLUSH		0x01
269718dceddSDavid Howells #define DRM_I915_FLIP		0x02
270718dceddSDavid Howells #define DRM_I915_BATCHBUFFER	0x03
271718dceddSDavid Howells #define DRM_I915_IRQ_EMIT	0x04
272718dceddSDavid Howells #define DRM_I915_IRQ_WAIT	0x05
273718dceddSDavid Howells #define DRM_I915_GETPARAM	0x06
274718dceddSDavid Howells #define DRM_I915_SETPARAM	0x07
275718dceddSDavid Howells #define DRM_I915_ALLOC		0x08
276718dceddSDavid Howells #define DRM_I915_FREE		0x09
277718dceddSDavid Howells #define DRM_I915_INIT_HEAP	0x0a
278718dceddSDavid Howells #define DRM_I915_CMDBUFFER	0x0b
279718dceddSDavid Howells #define DRM_I915_DESTROY_HEAP	0x0c
280718dceddSDavid Howells #define DRM_I915_SET_VBLANK_PIPE	0x0d
281718dceddSDavid Howells #define DRM_I915_GET_VBLANK_PIPE	0x0e
282718dceddSDavid Howells #define DRM_I915_VBLANK_SWAP	0x0f
283718dceddSDavid Howells #define DRM_I915_HWS_ADDR	0x11
284718dceddSDavid Howells #define DRM_I915_GEM_INIT	0x13
285718dceddSDavid Howells #define DRM_I915_GEM_EXECBUFFER	0x14
286718dceddSDavid Howells #define DRM_I915_GEM_PIN	0x15
287718dceddSDavid Howells #define DRM_I915_GEM_UNPIN	0x16
288718dceddSDavid Howells #define DRM_I915_GEM_BUSY	0x17
289718dceddSDavid Howells #define DRM_I915_GEM_THROTTLE	0x18
290718dceddSDavid Howells #define DRM_I915_GEM_ENTERVT	0x19
291718dceddSDavid Howells #define DRM_I915_GEM_LEAVEVT	0x1a
292718dceddSDavid Howells #define DRM_I915_GEM_CREATE	0x1b
293718dceddSDavid Howells #define DRM_I915_GEM_PREAD	0x1c
294718dceddSDavid Howells #define DRM_I915_GEM_PWRITE	0x1d
295718dceddSDavid Howells #define DRM_I915_GEM_MMAP	0x1e
296718dceddSDavid Howells #define DRM_I915_GEM_SET_DOMAIN	0x1f
297718dceddSDavid Howells #define DRM_I915_GEM_SW_FINISH	0x20
298718dceddSDavid Howells #define DRM_I915_GEM_SET_TILING	0x21
299718dceddSDavid Howells #define DRM_I915_GEM_GET_TILING	0x22
300718dceddSDavid Howells #define DRM_I915_GEM_GET_APERTURE 0x23
301718dceddSDavid Howells #define DRM_I915_GEM_MMAP_GTT	0x24
302718dceddSDavid Howells #define DRM_I915_GET_PIPE_FROM_CRTC_ID	0x25
303718dceddSDavid Howells #define DRM_I915_GEM_MADVISE	0x26
304718dceddSDavid Howells #define DRM_I915_OVERLAY_PUT_IMAGE	0x27
305718dceddSDavid Howells #define DRM_I915_OVERLAY_ATTRS	0x28
306718dceddSDavid Howells #define DRM_I915_GEM_EXECBUFFER2	0x29
307fec0445cSChris Wilson #define DRM_I915_GEM_EXECBUFFER2_WR	DRM_I915_GEM_EXECBUFFER2
308718dceddSDavid Howells #define DRM_I915_GET_SPRITE_COLORKEY	0x2a
309718dceddSDavid Howells #define DRM_I915_SET_SPRITE_COLORKEY	0x2b
310718dceddSDavid Howells #define DRM_I915_GEM_WAIT	0x2c
311718dceddSDavid Howells #define DRM_I915_GEM_CONTEXT_CREATE	0x2d
312718dceddSDavid Howells #define DRM_I915_GEM_CONTEXT_DESTROY	0x2e
313718dceddSDavid Howells #define DRM_I915_GEM_SET_CACHING	0x2f
314718dceddSDavid Howells #define DRM_I915_GEM_GET_CACHING	0x30
315718dceddSDavid Howells #define DRM_I915_REG_READ		0x31
316b6359918SMika Kuoppala #define DRM_I915_GET_RESET_STATS	0x32
3175cc9ed4bSChris Wilson #define DRM_I915_GEM_USERPTR		0x33
318c9dc0f35SChris Wilson #define DRM_I915_GEM_CONTEXT_GETPARAM	0x34
319c9dc0f35SChris Wilson #define DRM_I915_GEM_CONTEXT_SETPARAM	0x35
320eec688e1SRobert Bragg #define DRM_I915_PERF_OPEN		0x36
321f89823c2SLionel Landwerlin #define DRM_I915_PERF_ADD_CONFIG	0x37
322f89823c2SLionel Landwerlin #define DRM_I915_PERF_REMOVE_CONFIG	0x38
323a446ae2cSLionel Landwerlin #define DRM_I915_QUERY			0x39
324be03564bSChris Wilson /* Must be kept compact -- no holes */
325718dceddSDavid Howells 
326718dceddSDavid Howells #define DRM_IOCTL_I915_INIT		DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
327718dceddSDavid Howells #define DRM_IOCTL_I915_FLUSH		DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
328718dceddSDavid Howells #define DRM_IOCTL_I915_FLIP		DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP)
329718dceddSDavid Howells #define DRM_IOCTL_I915_BATCHBUFFER	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t)
330718dceddSDavid Howells #define DRM_IOCTL_I915_IRQ_EMIT         DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t)
331718dceddSDavid Howells #define DRM_IOCTL_I915_IRQ_WAIT         DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t)
332718dceddSDavid Howells #define DRM_IOCTL_I915_GETPARAM         DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t)
333718dceddSDavid Howells #define DRM_IOCTL_I915_SETPARAM         DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t)
334718dceddSDavid Howells #define DRM_IOCTL_I915_ALLOC            DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t)
335718dceddSDavid Howells #define DRM_IOCTL_I915_FREE             DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t)
336718dceddSDavid Howells #define DRM_IOCTL_I915_INIT_HEAP        DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t)
337718dceddSDavid Howells #define DRM_IOCTL_I915_CMDBUFFER	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t)
338718dceddSDavid Howells #define DRM_IOCTL_I915_DESTROY_HEAP	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t)
339718dceddSDavid Howells #define DRM_IOCTL_I915_SET_VBLANK_PIPE	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
340718dceddSDavid Howells #define DRM_IOCTL_I915_GET_VBLANK_PIPE	DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
341718dceddSDavid Howells #define DRM_IOCTL_I915_VBLANK_SWAP	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
342718dceddSDavid Howells #define DRM_IOCTL_I915_HWS_ADDR		DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init)
343718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_INIT		DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
344718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_EXECBUFFER	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
345718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_EXECBUFFER2	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
346fec0445cSChris Wilson #define DRM_IOCTL_I915_GEM_EXECBUFFER2_WR	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2_WR, struct drm_i915_gem_execbuffer2)
347718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_PIN		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
348718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_UNPIN	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
349718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_BUSY		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
350718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_SET_CACHING		DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHING, struct drm_i915_gem_caching)
351718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_GET_CACHING		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHING, struct drm_i915_gem_caching)
352718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_THROTTLE	DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
353718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_ENTERVT	DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
354718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_LEAVEVT	DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
355718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_CREATE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
356718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_PREAD	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
357718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_PWRITE	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
358718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_MMAP		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
359718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_MMAP_GTT	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt)
360718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_SET_DOMAIN	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
361718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_SW_FINISH	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
362718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_SET_TILING	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
363718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_GET_TILING	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
364718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_GET_APERTURE	DRM_IOR  (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture)
365718dceddSDavid Howells #define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id)
366718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_MADVISE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
367718dceddSDavid Howells #define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image)
368718dceddSDavid Howells #define DRM_IOCTL_I915_OVERLAY_ATTRS	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
369718dceddSDavid Howells #define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
3702c60fae1STommi Rantala #define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
371718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_WAIT		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait)
372718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_CONTEXT_CREATE	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
373718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
374718dceddSDavid Howells #define DRM_IOCTL_I915_REG_READ			DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
375b6359918SMika Kuoppala #define DRM_IOCTL_I915_GET_RESET_STATS		DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats)
3765cc9ed4bSChris Wilson #define DRM_IOCTL_I915_GEM_USERPTR			DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr)
377c9dc0f35SChris Wilson #define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param)
378c9dc0f35SChris Wilson #define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param)
379eec688e1SRobert Bragg #define DRM_IOCTL_I915_PERF_OPEN	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param)
380f89823c2SLionel Landwerlin #define DRM_IOCTL_I915_PERF_ADD_CONFIG	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_ADD_CONFIG, struct drm_i915_perf_oa_config)
381f89823c2SLionel Landwerlin #define DRM_IOCTL_I915_PERF_REMOVE_CONFIG	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_REMOVE_CONFIG, __u64)
382a446ae2cSLionel Landwerlin #define DRM_IOCTL_I915_QUERY			DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_QUERY, struct drm_i915_query)
383718dceddSDavid Howells 
384718dceddSDavid Howells /* Allow drivers to submit batchbuffers directly to hardware, relying
385718dceddSDavid Howells  * on the security mechanisms provided by hardware.
386718dceddSDavid Howells  */
387718dceddSDavid Howells typedef struct drm_i915_batchbuffer {
388718dceddSDavid Howells 	int start;		/* agp offset */
389718dceddSDavid Howells 	int used;		/* nr bytes in use */
390718dceddSDavid Howells 	int DR1;		/* hw flags for GFX_OP_DRAWRECT_INFO */
391718dceddSDavid Howells 	int DR4;		/* window origin for GFX_OP_DRAWRECT_INFO */
392718dceddSDavid Howells 	int num_cliprects;	/* mulitpass with multiple cliprects? */
393718dceddSDavid Howells 	struct drm_clip_rect __user *cliprects;	/* pointer to userspace cliprects */
394718dceddSDavid Howells } drm_i915_batchbuffer_t;
395718dceddSDavid Howells 
396718dceddSDavid Howells /* As above, but pass a pointer to userspace buffer which can be
397718dceddSDavid Howells  * validated by the kernel prior to sending to hardware.
398718dceddSDavid Howells  */
399718dceddSDavid Howells typedef struct _drm_i915_cmdbuffer {
400718dceddSDavid Howells 	char __user *buf;	/* pointer to userspace command buffer */
401718dceddSDavid Howells 	int sz;			/* nr bytes in buf */
402718dceddSDavid Howells 	int DR1;		/* hw flags for GFX_OP_DRAWRECT_INFO */
403718dceddSDavid Howells 	int DR4;		/* window origin for GFX_OP_DRAWRECT_INFO */
404718dceddSDavid Howells 	int num_cliprects;	/* mulitpass with multiple cliprects? */
405718dceddSDavid Howells 	struct drm_clip_rect __user *cliprects;	/* pointer to userspace cliprects */
406718dceddSDavid Howells } drm_i915_cmdbuffer_t;
407718dceddSDavid Howells 
408718dceddSDavid Howells /* Userspace can request & wait on irq's:
409718dceddSDavid Howells  */
410718dceddSDavid Howells typedef struct drm_i915_irq_emit {
411718dceddSDavid Howells 	int __user *irq_seq;
412718dceddSDavid Howells } drm_i915_irq_emit_t;
413718dceddSDavid Howells 
414718dceddSDavid Howells typedef struct drm_i915_irq_wait {
415718dceddSDavid Howells 	int irq_seq;
416718dceddSDavid Howells } drm_i915_irq_wait_t;
417718dceddSDavid Howells 
4184bdafb9dSChris Wilson /*
4194bdafb9dSChris Wilson  * Different modes of per-process Graphics Translation Table,
4204bdafb9dSChris Wilson  * see I915_PARAM_HAS_ALIASING_PPGTT
4214bdafb9dSChris Wilson  */
4224bdafb9dSChris Wilson #define I915_GEM_PPGTT_NONE	0
4234bdafb9dSChris Wilson #define I915_GEM_PPGTT_ALIASING	1
4244bdafb9dSChris Wilson #define I915_GEM_PPGTT_FULL	2
4254bdafb9dSChris Wilson 
426718dceddSDavid Howells /* Ioctl to query kernel params:
427718dceddSDavid Howells  */
428718dceddSDavid Howells #define I915_PARAM_IRQ_ACTIVE            1
429718dceddSDavid Howells #define I915_PARAM_ALLOW_BATCHBUFFER     2
430718dceddSDavid Howells #define I915_PARAM_LAST_DISPATCH         3
431718dceddSDavid Howells #define I915_PARAM_CHIPSET_ID            4
432718dceddSDavid Howells #define I915_PARAM_HAS_GEM               5
433718dceddSDavid Howells #define I915_PARAM_NUM_FENCES_AVAIL      6
434718dceddSDavid Howells #define I915_PARAM_HAS_OVERLAY           7
435718dceddSDavid Howells #define I915_PARAM_HAS_PAGEFLIPPING	 8
436718dceddSDavid Howells #define I915_PARAM_HAS_EXECBUF2          9
437718dceddSDavid Howells #define I915_PARAM_HAS_BSD		 10
438718dceddSDavid Howells #define I915_PARAM_HAS_BLT		 11
439718dceddSDavid Howells #define I915_PARAM_HAS_RELAXED_FENCING	 12
440718dceddSDavid Howells #define I915_PARAM_HAS_COHERENT_RINGS	 13
441718dceddSDavid Howells #define I915_PARAM_HAS_EXEC_CONSTANTS	 14
442718dceddSDavid Howells #define I915_PARAM_HAS_RELAXED_DELTA	 15
443718dceddSDavid Howells #define I915_PARAM_HAS_GEN7_SOL_RESET	 16
444718dceddSDavid Howells #define I915_PARAM_HAS_LLC     	 	 17
445718dceddSDavid Howells #define I915_PARAM_HAS_ALIASING_PPGTT	 18
446718dceddSDavid Howells #define I915_PARAM_HAS_WAIT_TIMEOUT	 19
447718dceddSDavid Howells #define I915_PARAM_HAS_SEMAPHORES	 20
448718dceddSDavid Howells #define I915_PARAM_HAS_PRIME_VMAP_FLUSH	 21
449a1f2cc73SXiang, Haihao #define I915_PARAM_HAS_VEBOX		 22
450c2fb7916SDaniel Vetter #define I915_PARAM_HAS_SECURE_BATCHES	 23
451b45305fcSDaniel Vetter #define I915_PARAM_HAS_PINNED_BATCHES	 24
452ed5982e6SDaniel Vetter #define I915_PARAM_HAS_EXEC_NO_RELOC	 25
453eef90ccbSChris Wilson #define I915_PARAM_HAS_EXEC_HANDLE_LUT   26
454651d794fSChris Wilson #define I915_PARAM_HAS_WT     	 	 27
455d728c8efSBrad Volkin #define I915_PARAM_CMD_PARSER_VERSION	 28
4566a2c4232SChris Wilson #define I915_PARAM_HAS_COHERENT_PHYS_GTT 29
4571816f923SAkash Goel #define I915_PARAM_MMAP_VERSION          30
45808e16dc8SZhipeng Gong #define I915_PARAM_HAS_BSD2		 31
45927cd4461SNeil Roberts #define I915_PARAM_REVISION              32
460a1559ffeSJeff McGee #define I915_PARAM_SUBSLICE_TOTAL	 33
461a1559ffeSJeff McGee #define I915_PARAM_EU_TOTAL		 34
46249e4d842SChris Wilson #define I915_PARAM_HAS_GPU_RESET	 35
463a9ed33caSAbdiel Janulgue #define I915_PARAM_HAS_RESOURCE_STREAMER 36
464506a8e87SChris Wilson #define I915_PARAM_HAS_EXEC_SOFTPIN	 37
46537f501afSarun.siluvery@linux.intel.com #define I915_PARAM_HAS_POOLED_EU	 38
46637f501afSarun.siluvery@linux.intel.com #define I915_PARAM_MIN_EU_IN_POOL	 39
4674cc69075SChris Wilson #define I915_PARAM_MMAP_GTT_VERSION	 40
468718dceddSDavid Howells 
469bf64e0b0SChris Wilson /*
470bf64e0b0SChris Wilson  * Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution
4710de9136dSChris Wilson  * priorities and the driver will attempt to execute batches in priority order.
472bf64e0b0SChris Wilson  * The param returns a capability bitmask, nonzero implies that the scheduler
473bf64e0b0SChris Wilson  * is enabled, with different features present according to the mask.
474ac14fbd4SChris Wilson  *
475ac14fbd4SChris Wilson  * The initial priority for each batch is supplied by the context and is
476ac14fbd4SChris Wilson  * controlled via I915_CONTEXT_PARAM_PRIORITY.
4770de9136dSChris Wilson  */
4780de9136dSChris Wilson #define I915_PARAM_HAS_SCHEDULER	 41
479bf64e0b0SChris Wilson #define   I915_SCHEDULER_CAP_ENABLED	(1ul << 0)
480bf64e0b0SChris Wilson #define   I915_SCHEDULER_CAP_PRIORITY	(1ul << 1)
481bf64e0b0SChris Wilson #define   I915_SCHEDULER_CAP_PREEMPTION	(1ul << 2)
482bf64e0b0SChris Wilson 
4835464cd65SAnusha Srivatsa #define I915_PARAM_HUC_STATUS		 42
4840de9136dSChris Wilson 
48577ae9957SChris Wilson /* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of
48677ae9957SChris Wilson  * synchronisation with implicit fencing on individual objects.
48777ae9957SChris Wilson  * See EXEC_OBJECT_ASYNC.
48877ae9957SChris Wilson  */
48977ae9957SChris Wilson #define I915_PARAM_HAS_EXEC_ASYNC	 43
49077ae9957SChris Wilson 
491fec0445cSChris Wilson /* Query whether DRM_I915_GEM_EXECBUFFER2 supports explicit fence support -
492fec0445cSChris Wilson  * both being able to pass in a sync_file fd to wait upon before executing,
493fec0445cSChris Wilson  * and being able to return a new sync_file fd that is signaled when the
494fec0445cSChris Wilson  * current request is complete. See I915_EXEC_FENCE_IN and I915_EXEC_FENCE_OUT.
495fec0445cSChris Wilson  */
496fec0445cSChris Wilson #define I915_PARAM_HAS_EXEC_FENCE	 44
497fec0445cSChris Wilson 
498b0fd47adSChris Wilson /* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to capture
499b0fd47adSChris Wilson  * user specified bufffers for post-mortem debugging of GPU hangs. See
500b0fd47adSChris Wilson  * EXEC_OBJECT_CAPTURE.
501b0fd47adSChris Wilson  */
502b0fd47adSChris Wilson #define I915_PARAM_HAS_EXEC_CAPTURE	 45
503b0fd47adSChris Wilson 
5047fed555cSRobert Bragg #define I915_PARAM_SLICE_MASK		 46
5057fed555cSRobert Bragg 
506f5320233SRobert Bragg /* Assuming it's uniform for each slice, this queries the mask of subslices
507f5320233SRobert Bragg  * per-slice for this system.
508f5320233SRobert Bragg  */
509f5320233SRobert Bragg #define I915_PARAM_SUBSLICE_MASK	 47
510f5320233SRobert Bragg 
5111a71cf2fSChris Wilson /*
5121a71cf2fSChris Wilson  * Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying the batch buffer
5131a71cf2fSChris Wilson  * as the first execobject as opposed to the last. See I915_EXEC_BATCH_FIRST.
5141a71cf2fSChris Wilson  */
5151a71cf2fSChris Wilson #define I915_PARAM_HAS_EXEC_BATCH_FIRST	 48
5161a71cf2fSChris Wilson 
517cf6e7bacSJason Ekstrand /* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of
518cf6e7bacSJason Ekstrand  * drm_i915_gem_exec_fence structures.  See I915_EXEC_FENCE_ARRAY.
519cf6e7bacSJason Ekstrand  */
520cf6e7bacSJason Ekstrand #define I915_PARAM_HAS_EXEC_FENCE_ARRAY  49
521cf6e7bacSJason Ekstrand 
522d2b4b979SChris Wilson /*
523d2b4b979SChris Wilson  * Query whether every context (both per-file default and user created) is
524d2b4b979SChris Wilson  * isolated (insofar as HW supports). If this parameter is not true, then
525d2b4b979SChris Wilson  * freshly created contexts may inherit values from an existing context,
526d2b4b979SChris Wilson  * rather than default HW values. If true, it also ensures (insofar as HW
527d2b4b979SChris Wilson  * supports) that all state set by this context will not leak to any other
528d2b4b979SChris Wilson  * context.
529d2b4b979SChris Wilson  *
530d2b4b979SChris Wilson  * As not every engine across every gen support contexts, the returned
531d2b4b979SChris Wilson  * value reports the support of context isolation for individual engines by
532d2b4b979SChris Wilson  * returning a bitmask of each engine class set to true if that class supports
533d2b4b979SChris Wilson  * isolation.
534d2b4b979SChris Wilson  */
535d2b4b979SChris Wilson #define I915_PARAM_HAS_CONTEXT_ISOLATION 50
536d2b4b979SChris Wilson 
537dab91783SLionel Landwerlin /* Frequency of the command streamer timestamps given by the *_TIMESTAMP
538dab91783SLionel Landwerlin  * registers. This used to be fixed per platform but from CNL onwards, this
539dab91783SLionel Landwerlin  * might vary depending on the parts.
540dab91783SLionel Landwerlin  */
541dab91783SLionel Landwerlin #define I915_PARAM_CS_TIMESTAMP_FREQUENCY 51
542dab91783SLionel Landwerlin 
543900ccf30SChris Wilson /*
544900ccf30SChris Wilson  * Once upon a time we supposed that writes through the GGTT would be
545900ccf30SChris Wilson  * immediately in physical memory (once flushed out of the CPU path). However,
546900ccf30SChris Wilson  * on a few different processors and chipsets, this is not necessarily the case
547900ccf30SChris Wilson  * as the writes appear to be buffered internally. Thus a read of the backing
548900ccf30SChris Wilson  * storage (physical memory) via a different path (with different physical tags
549900ccf30SChris Wilson  * to the indirect write via the GGTT) will see stale values from before
550900ccf30SChris Wilson  * the GGTT write. Inside the kernel, we can for the most part keep track of
551900ccf30SChris Wilson  * the different read/write domains in use (e.g. set-domain), but the assumption
552900ccf30SChris Wilson  * of coherency is baked into the ABI, hence reporting its true state in this
553900ccf30SChris Wilson  * parameter.
554900ccf30SChris Wilson  *
555900ccf30SChris Wilson  * Reports true when writes via mmap_gtt are immediately visible following an
556900ccf30SChris Wilson  * lfence to flush the WCB.
557900ccf30SChris Wilson  *
558900ccf30SChris Wilson  * Reports false when writes via mmap_gtt are indeterminately delayed in an in
559900ccf30SChris Wilson  * internal buffer and are _not_ immediately visible to third parties accessing
560900ccf30SChris Wilson  * directly via mmap_cpu/mmap_wc. Use of mmap_gtt as part of an IPC
561900ccf30SChris Wilson  * communications channel when reporting false is strongly disadvised.
562900ccf30SChris Wilson  */
563900ccf30SChris Wilson #define I915_PARAM_MMAP_GTT_COHERENT	52
564900ccf30SChris Wilson 
565be03564bSChris Wilson /* Must be kept compact -- no holes and well documented */
566be03564bSChris Wilson 
567718dceddSDavid Howells typedef struct drm_i915_getparam {
56816f7249dSArtem Savkov 	__s32 param;
569346add78SDaniel Vetter 	/*
570346add78SDaniel Vetter 	 * WARNING: Using pointers instead of fixed-size u64 means we need to write
571346add78SDaniel Vetter 	 * compat32 code. Don't repeat this mistake.
572346add78SDaniel Vetter 	 */
573718dceddSDavid Howells 	int __user *value;
574718dceddSDavid Howells } drm_i915_getparam_t;
575718dceddSDavid Howells 
576718dceddSDavid Howells /* Ioctl to set kernel params:
577718dceddSDavid Howells  */
578718dceddSDavid Howells #define I915_SETPARAM_USE_MI_BATCHBUFFER_START            1
579718dceddSDavid Howells #define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY             2
580718dceddSDavid Howells #define I915_SETPARAM_ALLOW_BATCHBUFFER                   3
581718dceddSDavid Howells #define I915_SETPARAM_NUM_USED_FENCES                     4
582be03564bSChris Wilson /* Must be kept compact -- no holes */
583718dceddSDavid Howells 
584718dceddSDavid Howells typedef struct drm_i915_setparam {
585718dceddSDavid Howells 	int param;
586718dceddSDavid Howells 	int value;
587718dceddSDavid Howells } drm_i915_setparam_t;
588718dceddSDavid Howells 
589718dceddSDavid Howells /* A memory manager for regions of shared memory:
590718dceddSDavid Howells  */
591718dceddSDavid Howells #define I915_MEM_REGION_AGP 1
592718dceddSDavid Howells 
593718dceddSDavid Howells typedef struct drm_i915_mem_alloc {
594718dceddSDavid Howells 	int region;
595718dceddSDavid Howells 	int alignment;
596718dceddSDavid Howells 	int size;
597718dceddSDavid Howells 	int __user *region_offset;	/* offset from start of fb or agp */
598718dceddSDavid Howells } drm_i915_mem_alloc_t;
599718dceddSDavid Howells 
600718dceddSDavid Howells typedef struct drm_i915_mem_free {
601718dceddSDavid Howells 	int region;
602718dceddSDavid Howells 	int region_offset;
603718dceddSDavid Howells } drm_i915_mem_free_t;
604718dceddSDavid Howells 
605718dceddSDavid Howells typedef struct drm_i915_mem_init_heap {
606718dceddSDavid Howells 	int region;
607718dceddSDavid Howells 	int size;
608718dceddSDavid Howells 	int start;
609718dceddSDavid Howells } drm_i915_mem_init_heap_t;
610718dceddSDavid Howells 
611718dceddSDavid Howells /* Allow memory manager to be torn down and re-initialized (eg on
612718dceddSDavid Howells  * rotate):
613718dceddSDavid Howells  */
614718dceddSDavid Howells typedef struct drm_i915_mem_destroy_heap {
615718dceddSDavid Howells 	int region;
616718dceddSDavid Howells } drm_i915_mem_destroy_heap_t;
617718dceddSDavid Howells 
618718dceddSDavid Howells /* Allow X server to configure which pipes to monitor for vblank signals
619718dceddSDavid Howells  */
620718dceddSDavid Howells #define	DRM_I915_VBLANK_PIPE_A	1
621718dceddSDavid Howells #define	DRM_I915_VBLANK_PIPE_B	2
622718dceddSDavid Howells 
623718dceddSDavid Howells typedef struct drm_i915_vblank_pipe {
624718dceddSDavid Howells 	int pipe;
625718dceddSDavid Howells } drm_i915_vblank_pipe_t;
626718dceddSDavid Howells 
627718dceddSDavid Howells /* Schedule buffer swap at given vertical blank:
628718dceddSDavid Howells  */
629718dceddSDavid Howells typedef struct drm_i915_vblank_swap {
630718dceddSDavid Howells 	drm_drawable_t drawable;
631718dceddSDavid Howells 	enum drm_vblank_seq_type seqtype;
632718dceddSDavid Howells 	unsigned int sequence;
633718dceddSDavid Howells } drm_i915_vblank_swap_t;
634718dceddSDavid Howells 
635718dceddSDavid Howells typedef struct drm_i915_hws_addr {
636718dceddSDavid Howells 	__u64 addr;
637718dceddSDavid Howells } drm_i915_hws_addr_t;
638718dceddSDavid Howells 
639718dceddSDavid Howells struct drm_i915_gem_init {
640718dceddSDavid Howells 	/**
641718dceddSDavid Howells 	 * Beginning offset in the GTT to be managed by the DRM memory
642718dceddSDavid Howells 	 * manager.
643718dceddSDavid Howells 	 */
644718dceddSDavid Howells 	__u64 gtt_start;
645718dceddSDavid Howells 	/**
646718dceddSDavid Howells 	 * Ending offset in the GTT to be managed by the DRM memory
647718dceddSDavid Howells 	 * manager.
648718dceddSDavid Howells 	 */
649718dceddSDavid Howells 	__u64 gtt_end;
650718dceddSDavid Howells };
651718dceddSDavid Howells 
652718dceddSDavid Howells struct drm_i915_gem_create {
653718dceddSDavid Howells 	/**
654718dceddSDavid Howells 	 * Requested size for the object.
655718dceddSDavid Howells 	 *
656718dceddSDavid Howells 	 * The (page-aligned) allocated size for the object will be returned.
657718dceddSDavid Howells 	 */
658718dceddSDavid Howells 	__u64 size;
659718dceddSDavid Howells 	/**
660718dceddSDavid Howells 	 * Returned handle for the object.
661718dceddSDavid Howells 	 *
662718dceddSDavid Howells 	 * Object handles are nonzero.
663718dceddSDavid Howells 	 */
664718dceddSDavid Howells 	__u32 handle;
665718dceddSDavid Howells 	__u32 pad;
666718dceddSDavid Howells };
667718dceddSDavid Howells 
668718dceddSDavid Howells struct drm_i915_gem_pread {
669718dceddSDavid Howells 	/** Handle for the object being read. */
670718dceddSDavid Howells 	__u32 handle;
671718dceddSDavid Howells 	__u32 pad;
672718dceddSDavid Howells 	/** Offset into the object to read from */
673718dceddSDavid Howells 	__u64 offset;
674718dceddSDavid Howells 	/** Length of data to read */
675718dceddSDavid Howells 	__u64 size;
676718dceddSDavid Howells 	/**
677718dceddSDavid Howells 	 * Pointer to write the data into.
678718dceddSDavid Howells 	 *
679718dceddSDavid Howells 	 * This is a fixed-size type for 32/64 compatibility.
680718dceddSDavid Howells 	 */
681718dceddSDavid Howells 	__u64 data_ptr;
682718dceddSDavid Howells };
683718dceddSDavid Howells 
684718dceddSDavid Howells struct drm_i915_gem_pwrite {
685718dceddSDavid Howells 	/** Handle for the object being written to. */
686718dceddSDavid Howells 	__u32 handle;
687718dceddSDavid Howells 	__u32 pad;
688718dceddSDavid Howells 	/** Offset into the object to write to */
689718dceddSDavid Howells 	__u64 offset;
690718dceddSDavid Howells 	/** Length of data to write */
691718dceddSDavid Howells 	__u64 size;
692718dceddSDavid Howells 	/**
693718dceddSDavid Howells 	 * Pointer to read the data from.
694718dceddSDavid Howells 	 *
695718dceddSDavid Howells 	 * This is a fixed-size type for 32/64 compatibility.
696718dceddSDavid Howells 	 */
697718dceddSDavid Howells 	__u64 data_ptr;
698718dceddSDavid Howells };
699718dceddSDavid Howells 
700718dceddSDavid Howells struct drm_i915_gem_mmap {
701718dceddSDavid Howells 	/** Handle for the object being mapped. */
702718dceddSDavid Howells 	__u32 handle;
703718dceddSDavid Howells 	__u32 pad;
704718dceddSDavid Howells 	/** Offset in the object to map. */
705718dceddSDavid Howells 	__u64 offset;
706718dceddSDavid Howells 	/**
707718dceddSDavid Howells 	 * Length of data to map.
708718dceddSDavid Howells 	 *
709718dceddSDavid Howells 	 * The value will be page-aligned.
710718dceddSDavid Howells 	 */
711718dceddSDavid Howells 	__u64 size;
712718dceddSDavid Howells 	/**
713718dceddSDavid Howells 	 * Returned pointer the data was mapped at.
714718dceddSDavid Howells 	 *
715718dceddSDavid Howells 	 * This is a fixed-size type for 32/64 compatibility.
716718dceddSDavid Howells 	 */
717718dceddSDavid Howells 	__u64 addr_ptr;
7181816f923SAkash Goel 
7191816f923SAkash Goel 	/**
7201816f923SAkash Goel 	 * Flags for extended behaviour.
7211816f923SAkash Goel 	 *
7221816f923SAkash Goel 	 * Added in version 2.
7231816f923SAkash Goel 	 */
7241816f923SAkash Goel 	__u64 flags;
7251816f923SAkash Goel #define I915_MMAP_WC 0x1
726718dceddSDavid Howells };
727718dceddSDavid Howells 
728718dceddSDavid Howells struct drm_i915_gem_mmap_gtt {
729718dceddSDavid Howells 	/** Handle for the object being mapped. */
730718dceddSDavid Howells 	__u32 handle;
731718dceddSDavid Howells 	__u32 pad;
732718dceddSDavid Howells 	/**
733718dceddSDavid Howells 	 * Fake offset to use for subsequent mmap call
734718dceddSDavid Howells 	 *
735718dceddSDavid Howells 	 * This is a fixed-size type for 32/64 compatibility.
736718dceddSDavid Howells 	 */
737718dceddSDavid Howells 	__u64 offset;
738718dceddSDavid Howells };
739718dceddSDavid Howells 
740718dceddSDavid Howells struct drm_i915_gem_set_domain {
741718dceddSDavid Howells 	/** Handle for the object */
742718dceddSDavid Howells 	__u32 handle;
743718dceddSDavid Howells 
744718dceddSDavid Howells 	/** New read domains */
745718dceddSDavid Howells 	__u32 read_domains;
746718dceddSDavid Howells 
747718dceddSDavid Howells 	/** New write domain */
748718dceddSDavid Howells 	__u32 write_domain;
749718dceddSDavid Howells };
750718dceddSDavid Howells 
751718dceddSDavid Howells struct drm_i915_gem_sw_finish {
752718dceddSDavid Howells 	/** Handle for the object */
753718dceddSDavid Howells 	__u32 handle;
754718dceddSDavid Howells };
755718dceddSDavid Howells 
756718dceddSDavid Howells struct drm_i915_gem_relocation_entry {
757718dceddSDavid Howells 	/**
758718dceddSDavid Howells 	 * Handle of the buffer being pointed to by this relocation entry.
759718dceddSDavid Howells 	 *
760718dceddSDavid Howells 	 * It's appealing to make this be an index into the mm_validate_entry
761718dceddSDavid Howells 	 * list to refer to the buffer, but this allows the driver to create
762718dceddSDavid Howells 	 * a relocation list for state buffers and not re-write it per
763718dceddSDavid Howells 	 * exec using the buffer.
764718dceddSDavid Howells 	 */
765718dceddSDavid Howells 	__u32 target_handle;
766718dceddSDavid Howells 
767718dceddSDavid Howells 	/**
768718dceddSDavid Howells 	 * Value to be added to the offset of the target buffer to make up
769718dceddSDavid Howells 	 * the relocation entry.
770718dceddSDavid Howells 	 */
771718dceddSDavid Howells 	__u32 delta;
772718dceddSDavid Howells 
773718dceddSDavid Howells 	/** Offset in the buffer the relocation entry will be written into */
774718dceddSDavid Howells 	__u64 offset;
775718dceddSDavid Howells 
776718dceddSDavid Howells 	/**
777718dceddSDavid Howells 	 * Offset value of the target buffer that the relocation entry was last
778718dceddSDavid Howells 	 * written as.
779718dceddSDavid Howells 	 *
780718dceddSDavid Howells 	 * If the buffer has the same offset as last time, we can skip syncing
781718dceddSDavid Howells 	 * and writing the relocation.  This value is written back out by
782718dceddSDavid Howells 	 * the execbuffer ioctl when the relocation is written.
783718dceddSDavid Howells 	 */
784718dceddSDavid Howells 	__u64 presumed_offset;
785718dceddSDavid Howells 
786718dceddSDavid Howells 	/**
787718dceddSDavid Howells 	 * Target memory domains read by this operation.
788718dceddSDavid Howells 	 */
789718dceddSDavid Howells 	__u32 read_domains;
790718dceddSDavid Howells 
791718dceddSDavid Howells 	/**
792718dceddSDavid Howells 	 * Target memory domains written by this operation.
793718dceddSDavid Howells 	 *
794718dceddSDavid Howells 	 * Note that only one domain may be written by the whole
795718dceddSDavid Howells 	 * execbuffer operation, so that where there are conflicts,
796718dceddSDavid Howells 	 * the application will get -EINVAL back.
797718dceddSDavid Howells 	 */
798718dceddSDavid Howells 	__u32 write_domain;
799718dceddSDavid Howells };
800718dceddSDavid Howells 
801718dceddSDavid Howells /** @{
802718dceddSDavid Howells  * Intel memory domains
803718dceddSDavid Howells  *
804718dceddSDavid Howells  * Most of these just align with the various caches in
805718dceddSDavid Howells  * the system and are used to flush and invalidate as
806718dceddSDavid Howells  * objects end up cached in different domains.
807718dceddSDavid Howells  */
808718dceddSDavid Howells /** CPU cache */
809718dceddSDavid Howells #define I915_GEM_DOMAIN_CPU		0x00000001
810718dceddSDavid Howells /** Render cache, used by 2D and 3D drawing */
811718dceddSDavid Howells #define I915_GEM_DOMAIN_RENDER		0x00000002
812718dceddSDavid Howells /** Sampler cache, used by texture engine */
813718dceddSDavid Howells #define I915_GEM_DOMAIN_SAMPLER		0x00000004
814718dceddSDavid Howells /** Command queue, used to load batch buffers */
815718dceddSDavid Howells #define I915_GEM_DOMAIN_COMMAND		0x00000008
816718dceddSDavid Howells /** Instruction cache, used by shader programs */
817718dceddSDavid Howells #define I915_GEM_DOMAIN_INSTRUCTION	0x00000010
818718dceddSDavid Howells /** Vertex address cache */
819718dceddSDavid Howells #define I915_GEM_DOMAIN_VERTEX		0x00000020
820718dceddSDavid Howells /** GTT domain - aperture and scanout */
821718dceddSDavid Howells #define I915_GEM_DOMAIN_GTT		0x00000040
822e22d8e3cSChris Wilson /** WC domain - uncached access */
823e22d8e3cSChris Wilson #define I915_GEM_DOMAIN_WC		0x00000080
824718dceddSDavid Howells /** @} */
825718dceddSDavid Howells 
826718dceddSDavid Howells struct drm_i915_gem_exec_object {
827718dceddSDavid Howells 	/**
828718dceddSDavid Howells 	 * User's handle for a buffer to be bound into the GTT for this
829718dceddSDavid Howells 	 * operation.
830718dceddSDavid Howells 	 */
831718dceddSDavid Howells 	__u32 handle;
832718dceddSDavid Howells 
833718dceddSDavid Howells 	/** Number of relocations to be performed on this buffer */
834718dceddSDavid Howells 	__u32 relocation_count;
835718dceddSDavid Howells 	/**
836718dceddSDavid Howells 	 * Pointer to array of struct drm_i915_gem_relocation_entry containing
837718dceddSDavid Howells 	 * the relocations to be performed in this buffer.
838718dceddSDavid Howells 	 */
839718dceddSDavid Howells 	__u64 relocs_ptr;
840718dceddSDavid Howells 
841718dceddSDavid Howells 	/** Required alignment in graphics aperture */
842718dceddSDavid Howells 	__u64 alignment;
843718dceddSDavid Howells 
844718dceddSDavid Howells 	/**
845718dceddSDavid Howells 	 * Returned value of the updated offset of the object, for future
846718dceddSDavid Howells 	 * presumed_offset writes.
847718dceddSDavid Howells 	 */
848718dceddSDavid Howells 	__u64 offset;
849718dceddSDavid Howells };
850718dceddSDavid Howells 
851718dceddSDavid Howells struct drm_i915_gem_execbuffer {
852718dceddSDavid Howells 	/**
853718dceddSDavid Howells 	 * List of buffers to be validated with their relocations to be
854718dceddSDavid Howells 	 * performend on them.
855718dceddSDavid Howells 	 *
856718dceddSDavid Howells 	 * This is a pointer to an array of struct drm_i915_gem_validate_entry.
857718dceddSDavid Howells 	 *
858718dceddSDavid Howells 	 * These buffers must be listed in an order such that all relocations
859718dceddSDavid Howells 	 * a buffer is performing refer to buffers that have already appeared
860718dceddSDavid Howells 	 * in the validate list.
861718dceddSDavid Howells 	 */
862718dceddSDavid Howells 	__u64 buffers_ptr;
863718dceddSDavid Howells 	__u32 buffer_count;
864718dceddSDavid Howells 
865718dceddSDavid Howells 	/** Offset in the batchbuffer to start execution from. */
866718dceddSDavid Howells 	__u32 batch_start_offset;
867718dceddSDavid Howells 	/** Bytes used in batchbuffer from batch_start_offset */
868718dceddSDavid Howells 	__u32 batch_len;
869718dceddSDavid Howells 	__u32 DR1;
870718dceddSDavid Howells 	__u32 DR4;
871718dceddSDavid Howells 	__u32 num_cliprects;
872718dceddSDavid Howells 	/** This is a struct drm_clip_rect *cliprects */
873718dceddSDavid Howells 	__u64 cliprects_ptr;
874718dceddSDavid Howells };
875718dceddSDavid Howells 
876718dceddSDavid Howells struct drm_i915_gem_exec_object2 {
877718dceddSDavid Howells 	/**
878718dceddSDavid Howells 	 * User's handle for a buffer to be bound into the GTT for this
879718dceddSDavid Howells 	 * operation.
880718dceddSDavid Howells 	 */
881718dceddSDavid Howells 	__u32 handle;
882718dceddSDavid Howells 
883718dceddSDavid Howells 	/** Number of relocations to be performed on this buffer */
884718dceddSDavid Howells 	__u32 relocation_count;
885718dceddSDavid Howells 	/**
886718dceddSDavid Howells 	 * Pointer to array of struct drm_i915_gem_relocation_entry containing
887718dceddSDavid Howells 	 * the relocations to be performed in this buffer.
888718dceddSDavid Howells 	 */
889718dceddSDavid Howells 	__u64 relocs_ptr;
890718dceddSDavid Howells 
891718dceddSDavid Howells 	/** Required alignment in graphics aperture */
892718dceddSDavid Howells 	__u64 alignment;
893718dceddSDavid Howells 
894718dceddSDavid Howells 	/**
895506a8e87SChris Wilson 	 * When the EXEC_OBJECT_PINNED flag is specified this is populated by
896506a8e87SChris Wilson 	 * the user with the GTT offset at which this object will be pinned.
897506a8e87SChris Wilson 	 * When the I915_EXEC_NO_RELOC flag is specified this must contain the
898506a8e87SChris Wilson 	 * presumed_offset of the object.
899506a8e87SChris Wilson 	 * During execbuffer2 the kernel populates it with the value of the
900506a8e87SChris Wilson 	 * current GTT offset of the object, for future presumed_offset writes.
901718dceddSDavid Howells 	 */
902718dceddSDavid Howells 	__u64 offset;
903718dceddSDavid Howells 
904718dceddSDavid Howells #define EXEC_OBJECT_NEEDS_FENCE		 (1<<0)
905ed5982e6SDaniel Vetter #define EXEC_OBJECT_NEEDS_GTT		 (1<<1)
906ed5982e6SDaniel Vetter #define EXEC_OBJECT_WRITE		 (1<<2)
907101b506aSMichel Thierry #define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3)
908506a8e87SChris Wilson #define EXEC_OBJECT_PINNED		 (1<<4)
90991b2db6fSChris Wilson #define EXEC_OBJECT_PAD_TO_SIZE		 (1<<5)
91077ae9957SChris Wilson /* The kernel implicitly tracks GPU activity on all GEM objects, and
91177ae9957SChris Wilson  * synchronises operations with outstanding rendering. This includes
91277ae9957SChris Wilson  * rendering on other devices if exported via dma-buf. However, sometimes
91377ae9957SChris Wilson  * this tracking is too coarse and the user knows better. For example,
91477ae9957SChris Wilson  * if the object is split into non-overlapping ranges shared between different
91577ae9957SChris Wilson  * clients or engines (i.e. suballocating objects), the implicit tracking
91677ae9957SChris Wilson  * by kernel assumes that each operation affects the whole object rather
91777ae9957SChris Wilson  * than an individual range, causing needless synchronisation between clients.
91877ae9957SChris Wilson  * The kernel will also forgo any CPU cache flushes prior to rendering from
91977ae9957SChris Wilson  * the object as the client is expected to be also handling such domain
92077ae9957SChris Wilson  * tracking.
92177ae9957SChris Wilson  *
92277ae9957SChris Wilson  * The kernel maintains the implicit tracking in order to manage resources
92377ae9957SChris Wilson  * used by the GPU - this flag only disables the synchronisation prior to
92477ae9957SChris Wilson  * rendering with this object in this execbuf.
92577ae9957SChris Wilson  *
92677ae9957SChris Wilson  * Opting out of implicit synhronisation requires the user to do its own
92777ae9957SChris Wilson  * explicit tracking to avoid rendering corruption. See, for example,
92877ae9957SChris Wilson  * I915_PARAM_HAS_EXEC_FENCE to order execbufs and execute them asynchronously.
92977ae9957SChris Wilson  */
93077ae9957SChris Wilson #define EXEC_OBJECT_ASYNC		(1<<6)
931b0fd47adSChris Wilson /* Request that the contents of this execobject be copied into the error
932b0fd47adSChris Wilson  * state upon a GPU hang involving this batch for post-mortem debugging.
933b0fd47adSChris Wilson  * These buffers are recorded in no particular order as "user" in
934b0fd47adSChris Wilson  * /sys/class/drm/cardN/error. Query I915_PARAM_HAS_EXEC_CAPTURE to see
935b0fd47adSChris Wilson  * if the kernel supports this flag.
936b0fd47adSChris Wilson  */
937b0fd47adSChris Wilson #define EXEC_OBJECT_CAPTURE		(1<<7)
9389e2793f6SDave Gordon /* All remaining bits are MBZ and RESERVED FOR FUTURE USE */
939b0fd47adSChris Wilson #define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_CAPTURE<<1)
940718dceddSDavid Howells 	__u64 flags;
941ed5982e6SDaniel Vetter 
94291b2db6fSChris Wilson 	union {
943718dceddSDavid Howells 		__u64 rsvd1;
94491b2db6fSChris Wilson 		__u64 pad_to_size;
94591b2db6fSChris Wilson 	};
946718dceddSDavid Howells 	__u64 rsvd2;
947718dceddSDavid Howells };
948718dceddSDavid Howells 
949cf6e7bacSJason Ekstrand struct drm_i915_gem_exec_fence {
950cf6e7bacSJason Ekstrand 	/**
951cf6e7bacSJason Ekstrand 	 * User's handle for a drm_syncobj to wait on or signal.
952cf6e7bacSJason Ekstrand 	 */
953cf6e7bacSJason Ekstrand 	__u32 handle;
954cf6e7bacSJason Ekstrand 
955cf6e7bacSJason Ekstrand #define I915_EXEC_FENCE_WAIT            (1<<0)
956cf6e7bacSJason Ekstrand #define I915_EXEC_FENCE_SIGNAL          (1<<1)
957ebcaa1ffSTvrtko Ursulin #define __I915_EXEC_FENCE_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SIGNAL << 1))
958cf6e7bacSJason Ekstrand 	__u32 flags;
959cf6e7bacSJason Ekstrand };
960cf6e7bacSJason Ekstrand 
961718dceddSDavid Howells struct drm_i915_gem_execbuffer2 {
962718dceddSDavid Howells 	/**
963718dceddSDavid Howells 	 * List of gem_exec_object2 structs
964718dceddSDavid Howells 	 */
965718dceddSDavid Howells 	__u64 buffers_ptr;
966718dceddSDavid Howells 	__u32 buffer_count;
967718dceddSDavid Howells 
968718dceddSDavid Howells 	/** Offset in the batchbuffer to start execution from. */
969718dceddSDavid Howells 	__u32 batch_start_offset;
970718dceddSDavid Howells 	/** Bytes used in batchbuffer from batch_start_offset */
971718dceddSDavid Howells 	__u32 batch_len;
972718dceddSDavid Howells 	__u32 DR1;
973718dceddSDavid Howells 	__u32 DR4;
974718dceddSDavid Howells 	__u32 num_cliprects;
975cf6e7bacSJason Ekstrand 	/**
976cf6e7bacSJason Ekstrand 	 * This is a struct drm_clip_rect *cliprects if I915_EXEC_FENCE_ARRAY
977cf6e7bacSJason Ekstrand 	 * is not set.  If I915_EXEC_FENCE_ARRAY is set, then this is a
978cf6e7bacSJason Ekstrand 	 * struct drm_i915_gem_exec_fence *fences.
979cf6e7bacSJason Ekstrand 	 */
980718dceddSDavid Howells 	__u64 cliprects_ptr;
981718dceddSDavid Howells #define I915_EXEC_RING_MASK              (7<<0)
982718dceddSDavid Howells #define I915_EXEC_DEFAULT                (0<<0)
983718dceddSDavid Howells #define I915_EXEC_RENDER                 (1<<0)
984718dceddSDavid Howells #define I915_EXEC_BSD                    (2<<0)
985718dceddSDavid Howells #define I915_EXEC_BLT                    (3<<0)
98682f91b6eSXiang, Haihao #define I915_EXEC_VEBOX                  (4<<0)
987718dceddSDavid Howells 
988718dceddSDavid Howells /* Used for switching the constants addressing mode on gen4+ RENDER ring.
989718dceddSDavid Howells  * Gen6+ only supports relative addressing to dynamic state (default) and
990718dceddSDavid Howells  * absolute addressing.
991718dceddSDavid Howells  *
992718dceddSDavid Howells  * These flags are ignored for the BSD and BLT rings.
993718dceddSDavid Howells  */
994718dceddSDavid Howells #define I915_EXEC_CONSTANTS_MASK 	(3<<6)
995718dceddSDavid Howells #define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */
996718dceddSDavid Howells #define I915_EXEC_CONSTANTS_ABSOLUTE 	(1<<6)
997718dceddSDavid Howells #define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
998718dceddSDavid Howells 	__u64 flags;
999718dceddSDavid Howells 	__u64 rsvd1; /* now used for context info */
1000718dceddSDavid Howells 	__u64 rsvd2;
1001718dceddSDavid Howells };
1002718dceddSDavid Howells 
1003718dceddSDavid Howells /** Resets the SO write offset registers for transform feedback on gen7. */
1004718dceddSDavid Howells #define I915_EXEC_GEN7_SOL_RESET	(1<<8)
1005718dceddSDavid Howells 
1006c2fb7916SDaniel Vetter /** Request a privileged ("secure") batch buffer. Note only available for
1007c2fb7916SDaniel Vetter  * DRM_ROOT_ONLY | DRM_MASTER processes.
1008c2fb7916SDaniel Vetter  */
1009c2fb7916SDaniel Vetter #define I915_EXEC_SECURE		(1<<9)
1010c2fb7916SDaniel Vetter 
1011b45305fcSDaniel Vetter /** Inform the kernel that the batch is and will always be pinned. This
1012b45305fcSDaniel Vetter  * negates the requirement for a workaround to be performed to avoid
1013b45305fcSDaniel Vetter  * an incoherent CS (such as can be found on 830/845). If this flag is
1014b45305fcSDaniel Vetter  * not passed, the kernel will endeavour to make sure the batch is
1015b45305fcSDaniel Vetter  * coherent with the CS before execution. If this flag is passed,
1016b45305fcSDaniel Vetter  * userspace assumes the responsibility for ensuring the same.
1017b45305fcSDaniel Vetter  */
1018b45305fcSDaniel Vetter #define I915_EXEC_IS_PINNED		(1<<10)
1019b45305fcSDaniel Vetter 
1020c3d19d3cSGeert Uytterhoeven /** Provide a hint to the kernel that the command stream and auxiliary
1021ed5982e6SDaniel Vetter  * state buffers already holds the correct presumed addresses and so the
1022ed5982e6SDaniel Vetter  * relocation process may be skipped if no buffers need to be moved in
1023ed5982e6SDaniel Vetter  * preparation for the execbuffer.
1024ed5982e6SDaniel Vetter  */
1025ed5982e6SDaniel Vetter #define I915_EXEC_NO_RELOC		(1<<11)
1026ed5982e6SDaniel Vetter 
1027eef90ccbSChris Wilson /** Use the reloc.handle as an index into the exec object array rather
1028eef90ccbSChris Wilson  * than as the per-file handle.
1029eef90ccbSChris Wilson  */
1030eef90ccbSChris Wilson #define I915_EXEC_HANDLE_LUT		(1<<12)
1031eef90ccbSChris Wilson 
10328d360dffSZhipeng Gong /** Used for switching BSD rings on the platforms with two BSD rings */
1033d9da6aa0STvrtko Ursulin #define I915_EXEC_BSD_SHIFT	 (13)
1034d9da6aa0STvrtko Ursulin #define I915_EXEC_BSD_MASK	 (3 << I915_EXEC_BSD_SHIFT)
1035d9da6aa0STvrtko Ursulin /* default ping-pong mode */
1036d9da6aa0STvrtko Ursulin #define I915_EXEC_BSD_DEFAULT	 (0 << I915_EXEC_BSD_SHIFT)
1037d9da6aa0STvrtko Ursulin #define I915_EXEC_BSD_RING1	 (1 << I915_EXEC_BSD_SHIFT)
1038d9da6aa0STvrtko Ursulin #define I915_EXEC_BSD_RING2	 (2 << I915_EXEC_BSD_SHIFT)
10398d360dffSZhipeng Gong 
1040a9ed33caSAbdiel Janulgue /** Tell the kernel that the batchbuffer is processed by
1041a9ed33caSAbdiel Janulgue  *  the resource streamer.
1042a9ed33caSAbdiel Janulgue  */
1043a9ed33caSAbdiel Janulgue #define I915_EXEC_RESOURCE_STREAMER     (1<<15)
1044a9ed33caSAbdiel Janulgue 
1045fec0445cSChris Wilson /* Setting I915_EXEC_FENCE_IN implies that lower_32_bits(rsvd2) represent
1046fec0445cSChris Wilson  * a sync_file fd to wait upon (in a nonblocking manner) prior to executing
1047fec0445cSChris Wilson  * the batch.
1048fec0445cSChris Wilson  *
1049fec0445cSChris Wilson  * Returns -EINVAL if the sync_file fd cannot be found.
1050fec0445cSChris Wilson  */
1051fec0445cSChris Wilson #define I915_EXEC_FENCE_IN		(1<<16)
1052fec0445cSChris Wilson 
1053fec0445cSChris Wilson /* Setting I915_EXEC_FENCE_OUT causes the ioctl to return a sync_file fd
1054fec0445cSChris Wilson  * in the upper_32_bits(rsvd2) upon success. Ownership of the fd is given
1055fec0445cSChris Wilson  * to the caller, and it should be close() after use. (The fd is a regular
1056fec0445cSChris Wilson  * file descriptor and will be cleaned up on process termination. It holds
1057fec0445cSChris Wilson  * a reference to the request, but nothing else.)
1058fec0445cSChris Wilson  *
1059fec0445cSChris Wilson  * The sync_file fd can be combined with other sync_file and passed either
1060fec0445cSChris Wilson  * to execbuf using I915_EXEC_FENCE_IN, to atomic KMS ioctls (so that a flip
1061fec0445cSChris Wilson  * will only occur after this request completes), or to other devices.
1062fec0445cSChris Wilson  *
1063fec0445cSChris Wilson  * Using I915_EXEC_FENCE_OUT requires use of
1064fec0445cSChris Wilson  * DRM_IOCTL_I915_GEM_EXECBUFFER2_WR ioctl so that the result is written
1065fec0445cSChris Wilson  * back to userspace. Failure to do so will cause the out-fence to always
1066fec0445cSChris Wilson  * be reported as zero, and the real fence fd to be leaked.
1067fec0445cSChris Wilson  */
1068fec0445cSChris Wilson #define I915_EXEC_FENCE_OUT		(1<<17)
1069fec0445cSChris Wilson 
10701a71cf2fSChris Wilson /*
10711a71cf2fSChris Wilson  * Traditionally the execbuf ioctl has only considered the final element in
10721a71cf2fSChris Wilson  * the execobject[] to be the executable batch. Often though, the client
10731a71cf2fSChris Wilson  * will known the batch object prior to construction and being able to place
10741a71cf2fSChris Wilson  * it into the execobject[] array first can simplify the relocation tracking.
10751a71cf2fSChris Wilson  * Setting I915_EXEC_BATCH_FIRST tells execbuf to use element 0 of the
10761a71cf2fSChris Wilson  * execobject[] as the * batch instead (the default is to use the last
10771a71cf2fSChris Wilson  * element).
10781a71cf2fSChris Wilson  */
10791a71cf2fSChris Wilson #define I915_EXEC_BATCH_FIRST		(1<<18)
1080cf6e7bacSJason Ekstrand 
1081cf6e7bacSJason Ekstrand /* Setting I915_FENCE_ARRAY implies that num_cliprects and cliprects_ptr
1082cf6e7bacSJason Ekstrand  * define an array of i915_gem_exec_fence structures which specify a set of
1083cf6e7bacSJason Ekstrand  * dma fences to wait upon or signal.
1084cf6e7bacSJason Ekstrand  */
1085cf6e7bacSJason Ekstrand #define I915_EXEC_FENCE_ARRAY   (1<<19)
1086cf6e7bacSJason Ekstrand 
1087cf6e7bacSJason Ekstrand #define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_ARRAY<<1))
1088ed5982e6SDaniel Vetter 
1089718dceddSDavid Howells #define I915_EXEC_CONTEXT_ID_MASK	(0xffffffff)
1090718dceddSDavid Howells #define i915_execbuffer2_set_context_id(eb2, context) \
1091718dceddSDavid Howells 	(eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK
1092718dceddSDavid Howells #define i915_execbuffer2_get_context_id(eb2) \
1093718dceddSDavid Howells 	((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK)
1094718dceddSDavid Howells 
1095718dceddSDavid Howells struct drm_i915_gem_pin {
1096718dceddSDavid Howells 	/** Handle of the buffer to be pinned. */
1097718dceddSDavid Howells 	__u32 handle;
1098718dceddSDavid Howells 	__u32 pad;
1099718dceddSDavid Howells 
1100718dceddSDavid Howells 	/** alignment required within the aperture */
1101718dceddSDavid Howells 	__u64 alignment;
1102718dceddSDavid Howells 
1103718dceddSDavid Howells 	/** Returned GTT offset of the buffer. */
1104718dceddSDavid Howells 	__u64 offset;
1105718dceddSDavid Howells };
1106718dceddSDavid Howells 
1107718dceddSDavid Howells struct drm_i915_gem_unpin {
1108718dceddSDavid Howells 	/** Handle of the buffer to be unpinned. */
1109718dceddSDavid Howells 	__u32 handle;
1110718dceddSDavid Howells 	__u32 pad;
1111718dceddSDavid Howells };
1112718dceddSDavid Howells 
1113718dceddSDavid Howells struct drm_i915_gem_busy {
1114718dceddSDavid Howells 	/** Handle of the buffer to check for busy */
1115718dceddSDavid Howells 	__u32 handle;
1116718dceddSDavid Howells 
1117426960beSChris Wilson 	/** Return busy status
1118426960beSChris Wilson 	 *
1119426960beSChris Wilson 	 * A return of 0 implies that the object is idle (after
1120426960beSChris Wilson 	 * having flushed any pending activity), and a non-zero return that
1121426960beSChris Wilson 	 * the object is still in-flight on the GPU. (The GPU has not yet
1122426960beSChris Wilson 	 * signaled completion for all pending requests that reference the
11231255501dSChris Wilson 	 * object.) An object is guaranteed to become idle eventually (so
11241255501dSChris Wilson 	 * long as no new GPU commands are executed upon it). Due to the
11251255501dSChris Wilson 	 * asynchronous nature of the hardware, an object reported
11261255501dSChris Wilson 	 * as busy may become idle before the ioctl is completed.
11271255501dSChris Wilson 	 *
11281255501dSChris Wilson 	 * Furthermore, if the object is busy, which engine is busy is only
11291255501dSChris Wilson 	 * provided as a guide. There are race conditions which prevent the
11301255501dSChris Wilson 	 * report of which engines are busy from being always accurate.
11311255501dSChris Wilson 	 * However, the converse is not true. If the object is idle, the
11321255501dSChris Wilson 	 * result of the ioctl, that all engines are idle, is accurate.
1133426960beSChris Wilson 	 *
1134426960beSChris Wilson 	 * The returned dword is split into two fields to indicate both
1135426960beSChris Wilson 	 * the engines on which the object is being read, and the
1136426960beSChris Wilson 	 * engine on which it is currently being written (if any).
1137426960beSChris Wilson 	 *
1138426960beSChris Wilson 	 * The low word (bits 0:15) indicate if the object is being written
1139426960beSChris Wilson 	 * to by any engine (there can only be one, as the GEM implicit
1140426960beSChris Wilson 	 * synchronisation rules force writes to be serialised). Only the
1141426960beSChris Wilson 	 * engine for the last write is reported.
1142426960beSChris Wilson 	 *
1143426960beSChris Wilson 	 * The high word (bits 16:31) are a bitmask of which engines are
1144426960beSChris Wilson 	 * currently reading from the object. Multiple engines may be
1145426960beSChris Wilson 	 * reading from the object simultaneously.
1146426960beSChris Wilson 	 *
1147426960beSChris Wilson 	 * The value of each engine is the same as specified in the
1148426960beSChris Wilson 	 * EXECBUFFER2 ioctl, i.e. I915_EXEC_RENDER, I915_EXEC_BSD etc.
1149426960beSChris Wilson 	 * Note I915_EXEC_DEFAULT is a symbolic value and is mapped to
1150426960beSChris Wilson 	 * the I915_EXEC_RENDER engine for execution, and so it is never
1151426960beSChris Wilson 	 * reported as active itself. Some hardware may have parallel
1152426960beSChris Wilson 	 * execution engines, e.g. multiple media engines, which are
1153426960beSChris Wilson 	 * mapped to the same identifier in the EXECBUFFER2 ioctl and
1154426960beSChris Wilson 	 * so are not separately reported for busyness.
11551255501dSChris Wilson 	 *
11561255501dSChris Wilson 	 * Caveat emptor:
11571255501dSChris Wilson 	 * Only the boolean result of this query is reliable; that is whether
11581255501dSChris Wilson 	 * the object is idle or busy. The report of which engines are busy
11591255501dSChris Wilson 	 * should be only used as a heuristic.
1160718dceddSDavid Howells 	 */
1161718dceddSDavid Howells 	__u32 busy;
1162718dceddSDavid Howells };
1163718dceddSDavid Howells 
116435c7ab42SDaniel Vetter /**
116535c7ab42SDaniel Vetter  * I915_CACHING_NONE
116635c7ab42SDaniel Vetter  *
116735c7ab42SDaniel Vetter  * GPU access is not coherent with cpu caches. Default for machines without an
116835c7ab42SDaniel Vetter  * LLC.
116935c7ab42SDaniel Vetter  */
1170718dceddSDavid Howells #define I915_CACHING_NONE		0
117135c7ab42SDaniel Vetter /**
117235c7ab42SDaniel Vetter  * I915_CACHING_CACHED
117335c7ab42SDaniel Vetter  *
117435c7ab42SDaniel Vetter  * GPU access is coherent with cpu caches and furthermore the data is cached in
117535c7ab42SDaniel Vetter  * last-level caches shared between cpu cores and the gpu GT. Default on
117635c7ab42SDaniel Vetter  * machines with HAS_LLC.
117735c7ab42SDaniel Vetter  */
1178718dceddSDavid Howells #define I915_CACHING_CACHED		1
117935c7ab42SDaniel Vetter /**
118035c7ab42SDaniel Vetter  * I915_CACHING_DISPLAY
118135c7ab42SDaniel Vetter  *
118235c7ab42SDaniel Vetter  * Special GPU caching mode which is coherent with the scanout engines.
118335c7ab42SDaniel Vetter  * Transparently falls back to I915_CACHING_NONE on platforms where no special
118435c7ab42SDaniel Vetter  * cache mode (like write-through or gfdt flushing) is available. The kernel
118535c7ab42SDaniel Vetter  * automatically sets this mode when using a buffer as a scanout target.
118635c7ab42SDaniel Vetter  * Userspace can manually set this mode to avoid a costly stall and clflush in
118735c7ab42SDaniel Vetter  * the hotpath of drawing the first frame.
118835c7ab42SDaniel Vetter  */
118935c7ab42SDaniel Vetter #define I915_CACHING_DISPLAY		2
1190718dceddSDavid Howells 
1191718dceddSDavid Howells struct drm_i915_gem_caching {
1192718dceddSDavid Howells 	/**
1193718dceddSDavid Howells 	 * Handle of the buffer to set/get the caching level of. */
1194718dceddSDavid Howells 	__u32 handle;
1195718dceddSDavid Howells 
1196718dceddSDavid Howells 	/**
1197718dceddSDavid Howells 	 * Cacheing level to apply or return value
1198718dceddSDavid Howells 	 *
1199718dceddSDavid Howells 	 * bits0-15 are for generic caching control (i.e. the above defined
1200718dceddSDavid Howells 	 * values). bits16-31 are reserved for platform-specific variations
1201718dceddSDavid Howells 	 * (e.g. l3$ caching on gen7). */
1202718dceddSDavid Howells 	__u32 caching;
1203718dceddSDavid Howells };
1204718dceddSDavid Howells 
1205718dceddSDavid Howells #define I915_TILING_NONE	0
1206718dceddSDavid Howells #define I915_TILING_X		1
1207718dceddSDavid Howells #define I915_TILING_Y		2
1208deeb1519SChris Wilson #define I915_TILING_LAST	I915_TILING_Y
1209718dceddSDavid Howells 
1210718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_NONE		0
1211718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_9		1
1212718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_9_10		2
1213718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_9_11		3
1214718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_9_10_11	4
1215718dceddSDavid Howells /* Not seen by userland */
1216718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_UNKNOWN	5
1217718dceddSDavid Howells /* Seen by userland. */
1218718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_9_17		6
1219718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_9_10_17	7
1220718dceddSDavid Howells 
1221718dceddSDavid Howells struct drm_i915_gem_set_tiling {
1222718dceddSDavid Howells 	/** Handle of the buffer to have its tiling state updated */
1223718dceddSDavid Howells 	__u32 handle;
1224718dceddSDavid Howells 
1225718dceddSDavid Howells 	/**
1226718dceddSDavid Howells 	 * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
1227718dceddSDavid Howells 	 * I915_TILING_Y).
1228718dceddSDavid Howells 	 *
1229718dceddSDavid Howells 	 * This value is to be set on request, and will be updated by the
1230718dceddSDavid Howells 	 * kernel on successful return with the actual chosen tiling layout.
1231718dceddSDavid Howells 	 *
1232718dceddSDavid Howells 	 * The tiling mode may be demoted to I915_TILING_NONE when the system
1233718dceddSDavid Howells 	 * has bit 6 swizzling that can't be managed correctly by GEM.
1234718dceddSDavid Howells 	 *
1235718dceddSDavid Howells 	 * Buffer contents become undefined when changing tiling_mode.
1236718dceddSDavid Howells 	 */
1237718dceddSDavid Howells 	__u32 tiling_mode;
1238718dceddSDavid Howells 
1239718dceddSDavid Howells 	/**
1240718dceddSDavid Howells 	 * Stride in bytes for the object when in I915_TILING_X or
1241718dceddSDavid Howells 	 * I915_TILING_Y.
1242718dceddSDavid Howells 	 */
1243718dceddSDavid Howells 	__u32 stride;
1244718dceddSDavid Howells 
1245718dceddSDavid Howells 	/**
1246718dceddSDavid Howells 	 * Returned address bit 6 swizzling required for CPU access through
1247718dceddSDavid Howells 	 * mmap mapping.
1248718dceddSDavid Howells 	 */
1249718dceddSDavid Howells 	__u32 swizzle_mode;
1250718dceddSDavid Howells };
1251718dceddSDavid Howells 
1252718dceddSDavid Howells struct drm_i915_gem_get_tiling {
1253718dceddSDavid Howells 	/** Handle of the buffer to get tiling state for. */
1254718dceddSDavid Howells 	__u32 handle;
1255718dceddSDavid Howells 
1256718dceddSDavid Howells 	/**
1257718dceddSDavid Howells 	 * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
1258718dceddSDavid Howells 	 * I915_TILING_Y).
1259718dceddSDavid Howells 	 */
1260718dceddSDavid Howells 	__u32 tiling_mode;
1261718dceddSDavid Howells 
1262718dceddSDavid Howells 	/**
1263718dceddSDavid Howells 	 * Returned address bit 6 swizzling required for CPU access through
1264718dceddSDavid Howells 	 * mmap mapping.
1265718dceddSDavid Howells 	 */
1266718dceddSDavid Howells 	__u32 swizzle_mode;
126770f2f5c7SChris Wilson 
126870f2f5c7SChris Wilson 	/**
126970f2f5c7SChris Wilson 	 * Returned address bit 6 swizzling required for CPU access through
127070f2f5c7SChris Wilson 	 * mmap mapping whilst bound.
127170f2f5c7SChris Wilson 	 */
127270f2f5c7SChris Wilson 	__u32 phys_swizzle_mode;
1273718dceddSDavid Howells };
1274718dceddSDavid Howells 
1275718dceddSDavid Howells struct drm_i915_gem_get_aperture {
1276718dceddSDavid Howells 	/** Total size of the aperture used by i915_gem_execbuffer, in bytes */
1277718dceddSDavid Howells 	__u64 aper_size;
1278718dceddSDavid Howells 
1279718dceddSDavid Howells 	/**
1280718dceddSDavid Howells 	 * Available space in the aperture used by i915_gem_execbuffer, in
1281718dceddSDavid Howells 	 * bytes
1282718dceddSDavid Howells 	 */
1283718dceddSDavid Howells 	__u64 aper_available_size;
1284718dceddSDavid Howells };
1285718dceddSDavid Howells 
1286718dceddSDavid Howells struct drm_i915_get_pipe_from_crtc_id {
1287718dceddSDavid Howells 	/** ID of CRTC being requested **/
1288718dceddSDavid Howells 	__u32 crtc_id;
1289718dceddSDavid Howells 
1290718dceddSDavid Howells 	/** pipe of requested CRTC **/
1291718dceddSDavid Howells 	__u32 pipe;
1292718dceddSDavid Howells };
1293718dceddSDavid Howells 
1294718dceddSDavid Howells #define I915_MADV_WILLNEED 0
1295718dceddSDavid Howells #define I915_MADV_DONTNEED 1
1296718dceddSDavid Howells #define __I915_MADV_PURGED 2 /* internal state */
1297718dceddSDavid Howells 
1298718dceddSDavid Howells struct drm_i915_gem_madvise {
1299718dceddSDavid Howells 	/** Handle of the buffer to change the backing store advice */
1300718dceddSDavid Howells 	__u32 handle;
1301718dceddSDavid Howells 
1302718dceddSDavid Howells 	/* Advice: either the buffer will be needed again in the near future,
1303718dceddSDavid Howells 	 *         or wont be and could be discarded under memory pressure.
1304718dceddSDavid Howells 	 */
1305718dceddSDavid Howells 	__u32 madv;
1306718dceddSDavid Howells 
1307718dceddSDavid Howells 	/** Whether the backing store still exists. */
1308718dceddSDavid Howells 	__u32 retained;
1309718dceddSDavid Howells };
1310718dceddSDavid Howells 
1311718dceddSDavid Howells /* flags */
1312718dceddSDavid Howells #define I915_OVERLAY_TYPE_MASK 		0xff
1313718dceddSDavid Howells #define I915_OVERLAY_YUV_PLANAR 	0x01
1314718dceddSDavid Howells #define I915_OVERLAY_YUV_PACKED 	0x02
1315718dceddSDavid Howells #define I915_OVERLAY_RGB		0x03
1316718dceddSDavid Howells 
1317718dceddSDavid Howells #define I915_OVERLAY_DEPTH_MASK		0xff00
1318718dceddSDavid Howells #define I915_OVERLAY_RGB24		0x1000
1319718dceddSDavid Howells #define I915_OVERLAY_RGB16		0x2000
1320718dceddSDavid Howells #define I915_OVERLAY_RGB15		0x3000
1321718dceddSDavid Howells #define I915_OVERLAY_YUV422		0x0100
1322718dceddSDavid Howells #define I915_OVERLAY_YUV411		0x0200
1323718dceddSDavid Howells #define I915_OVERLAY_YUV420		0x0300
1324718dceddSDavid Howells #define I915_OVERLAY_YUV410		0x0400
1325718dceddSDavid Howells 
1326718dceddSDavid Howells #define I915_OVERLAY_SWAP_MASK		0xff0000
1327718dceddSDavid Howells #define I915_OVERLAY_NO_SWAP		0x000000
1328718dceddSDavid Howells #define I915_OVERLAY_UV_SWAP		0x010000
1329718dceddSDavid Howells #define I915_OVERLAY_Y_SWAP		0x020000
1330718dceddSDavid Howells #define I915_OVERLAY_Y_AND_UV_SWAP	0x030000
1331718dceddSDavid Howells 
1332718dceddSDavid Howells #define I915_OVERLAY_FLAGS_MASK		0xff000000
1333718dceddSDavid Howells #define I915_OVERLAY_ENABLE		0x01000000
1334718dceddSDavid Howells 
1335718dceddSDavid Howells struct drm_intel_overlay_put_image {
1336718dceddSDavid Howells 	/* various flags and src format description */
1337718dceddSDavid Howells 	__u32 flags;
1338718dceddSDavid Howells 	/* source picture description */
1339718dceddSDavid Howells 	__u32 bo_handle;
1340718dceddSDavid Howells 	/* stride values and offsets are in bytes, buffer relative */
1341718dceddSDavid Howells 	__u16 stride_Y; /* stride for packed formats */
1342718dceddSDavid Howells 	__u16 stride_UV;
1343718dceddSDavid Howells 	__u32 offset_Y; /* offset for packet formats */
1344718dceddSDavid Howells 	__u32 offset_U;
1345718dceddSDavid Howells 	__u32 offset_V;
1346718dceddSDavid Howells 	/* in pixels */
1347718dceddSDavid Howells 	__u16 src_width;
1348718dceddSDavid Howells 	__u16 src_height;
1349718dceddSDavid Howells 	/* to compensate the scaling factors for partially covered surfaces */
1350718dceddSDavid Howells 	__u16 src_scan_width;
1351718dceddSDavid Howells 	__u16 src_scan_height;
1352718dceddSDavid Howells 	/* output crtc description */
1353718dceddSDavid Howells 	__u32 crtc_id;
1354718dceddSDavid Howells 	__u16 dst_x;
1355718dceddSDavid Howells 	__u16 dst_y;
1356718dceddSDavid Howells 	__u16 dst_width;
1357718dceddSDavid Howells 	__u16 dst_height;
1358718dceddSDavid Howells };
1359718dceddSDavid Howells 
1360718dceddSDavid Howells /* flags */
1361718dceddSDavid Howells #define I915_OVERLAY_UPDATE_ATTRS	(1<<0)
1362718dceddSDavid Howells #define I915_OVERLAY_UPDATE_GAMMA	(1<<1)
1363ea9da4e4SChris Wilson #define I915_OVERLAY_DISABLE_DEST_COLORKEY	(1<<2)
1364718dceddSDavid Howells struct drm_intel_overlay_attrs {
1365718dceddSDavid Howells 	__u32 flags;
1366718dceddSDavid Howells 	__u32 color_key;
1367718dceddSDavid Howells 	__s32 brightness;
1368718dceddSDavid Howells 	__u32 contrast;
1369718dceddSDavid Howells 	__u32 saturation;
1370718dceddSDavid Howells 	__u32 gamma0;
1371718dceddSDavid Howells 	__u32 gamma1;
1372718dceddSDavid Howells 	__u32 gamma2;
1373718dceddSDavid Howells 	__u32 gamma3;
1374718dceddSDavid Howells 	__u32 gamma4;
1375718dceddSDavid Howells 	__u32 gamma5;
1376718dceddSDavid Howells };
1377718dceddSDavid Howells 
1378718dceddSDavid Howells /*
1379718dceddSDavid Howells  * Intel sprite handling
1380718dceddSDavid Howells  *
1381718dceddSDavid Howells  * Color keying works with a min/mask/max tuple.  Both source and destination
1382718dceddSDavid Howells  * color keying is allowed.
1383718dceddSDavid Howells  *
1384718dceddSDavid Howells  * Source keying:
1385718dceddSDavid Howells  * Sprite pixels within the min & max values, masked against the color channels
1386718dceddSDavid Howells  * specified in the mask field, will be transparent.  All other pixels will
1387718dceddSDavid Howells  * be displayed on top of the primary plane.  For RGB surfaces, only the min
1388718dceddSDavid Howells  * and mask fields will be used; ranged compares are not allowed.
1389718dceddSDavid Howells  *
1390718dceddSDavid Howells  * Destination keying:
1391718dceddSDavid Howells  * Primary plane pixels that match the min value, masked against the color
1392718dceddSDavid Howells  * channels specified in the mask field, will be replaced by corresponding
1393718dceddSDavid Howells  * pixels from the sprite plane.
1394718dceddSDavid Howells  *
1395718dceddSDavid Howells  * Note that source & destination keying are exclusive; only one can be
1396718dceddSDavid Howells  * active on a given plane.
1397718dceddSDavid Howells  */
1398718dceddSDavid Howells 
13996ec5bd34SVille Syrjälä #define I915_SET_COLORKEY_NONE		(1<<0) /* Deprecated. Instead set
14006ec5bd34SVille Syrjälä 						* flags==0 to disable colorkeying.
14016ec5bd34SVille Syrjälä 						*/
1402718dceddSDavid Howells #define I915_SET_COLORKEY_DESTINATION	(1<<1)
1403718dceddSDavid Howells #define I915_SET_COLORKEY_SOURCE	(1<<2)
1404718dceddSDavid Howells struct drm_intel_sprite_colorkey {
1405718dceddSDavid Howells 	__u32 plane_id;
1406718dceddSDavid Howells 	__u32 min_value;
1407718dceddSDavid Howells 	__u32 channel_mask;
1408718dceddSDavid Howells 	__u32 max_value;
1409718dceddSDavid Howells 	__u32 flags;
1410718dceddSDavid Howells };
1411718dceddSDavid Howells 
1412718dceddSDavid Howells struct drm_i915_gem_wait {
1413718dceddSDavid Howells 	/** Handle of BO we shall wait on */
1414718dceddSDavid Howells 	__u32 bo_handle;
1415718dceddSDavid Howells 	__u32 flags;
1416718dceddSDavid Howells 	/** Number of nanoseconds to wait, Returns time remaining. */
1417718dceddSDavid Howells 	__s64 timeout_ns;
1418718dceddSDavid Howells };
1419718dceddSDavid Howells 
1420718dceddSDavid Howells struct drm_i915_gem_context_create {
1421718dceddSDavid Howells 	/*  output: id of new context*/
1422718dceddSDavid Howells 	__u32 ctx_id;
1423718dceddSDavid Howells 	__u32 pad;
1424718dceddSDavid Howells };
1425718dceddSDavid Howells 
1426718dceddSDavid Howells struct drm_i915_gem_context_destroy {
1427718dceddSDavid Howells 	__u32 ctx_id;
1428718dceddSDavid Howells 	__u32 pad;
1429718dceddSDavid Howells };
1430718dceddSDavid Howells 
1431718dceddSDavid Howells struct drm_i915_reg_read {
14328697600bSVille Syrjälä 	/*
14338697600bSVille Syrjälä 	 * Register offset.
14348697600bSVille Syrjälä 	 * For 64bit wide registers where the upper 32bits don't immediately
14358697600bSVille Syrjälä 	 * follow the lower 32bits, the offset of the lower 32bits must
14368697600bSVille Syrjälä 	 * be specified
14378697600bSVille Syrjälä 	 */
1438718dceddSDavid Howells 	__u64 offset;
1439822a4b67SJoonas Lahtinen #define I915_REG_READ_8B_WA (1ul << 0)
14403fd3a6ffSJoonas Lahtinen 
1441718dceddSDavid Howells 	__u64 val; /* Return value */
1442718dceddSDavid Howells };
1443648a9bc5SChris Wilson /* Known registers:
1444648a9bc5SChris Wilson  *
1445648a9bc5SChris Wilson  * Render engine timestamp - 0x2358 + 64bit - gen7+
1446648a9bc5SChris Wilson  * - Note this register returns an invalid value if using the default
14473fd3a6ffSJoonas Lahtinen  *   single instruction 8byte read, in order to workaround that pass
14483fd3a6ffSJoonas Lahtinen  *   flag I915_REG_READ_8B_WA in offset field.
1449648a9bc5SChris Wilson  *
1450648a9bc5SChris Wilson  */
1451b6359918SMika Kuoppala 
1452b6359918SMika Kuoppala struct drm_i915_reset_stats {
1453b6359918SMika Kuoppala 	__u32 ctx_id;
1454b6359918SMika Kuoppala 	__u32 flags;
1455b6359918SMika Kuoppala 
1456b6359918SMika Kuoppala 	/* All resets since boot/module reload, for all contexts */
1457b6359918SMika Kuoppala 	__u32 reset_count;
1458b6359918SMika Kuoppala 
1459b6359918SMika Kuoppala 	/* Number of batches lost when active in GPU, for this context */
1460b6359918SMika Kuoppala 	__u32 batch_active;
1461b6359918SMika Kuoppala 
1462b6359918SMika Kuoppala 	/* Number of batches lost pending for execution, for this context */
1463b6359918SMika Kuoppala 	__u32 batch_pending;
1464b6359918SMika Kuoppala 
1465b6359918SMika Kuoppala 	__u32 pad;
1466b6359918SMika Kuoppala };
1467b6359918SMika Kuoppala 
14685cc9ed4bSChris Wilson struct drm_i915_gem_userptr {
14695cc9ed4bSChris Wilson 	__u64 user_ptr;
14705cc9ed4bSChris Wilson 	__u64 user_size;
14715cc9ed4bSChris Wilson 	__u32 flags;
14725cc9ed4bSChris Wilson #define I915_USERPTR_READ_ONLY 0x1
14735cc9ed4bSChris Wilson #define I915_USERPTR_UNSYNCHRONIZED 0x80000000
14745cc9ed4bSChris Wilson 	/**
14755cc9ed4bSChris Wilson 	 * Returned handle for the object.
14765cc9ed4bSChris Wilson 	 *
14775cc9ed4bSChris Wilson 	 * Object handles are nonzero.
14785cc9ed4bSChris Wilson 	 */
14795cc9ed4bSChris Wilson 	__u32 handle;
14805cc9ed4bSChris Wilson };
14815cc9ed4bSChris Wilson 
1482c9dc0f35SChris Wilson struct drm_i915_gem_context_param {
1483c9dc0f35SChris Wilson 	__u32 ctx_id;
1484c9dc0f35SChris Wilson 	__u32 size;
1485c9dc0f35SChris Wilson 	__u64 param;
1486c9dc0f35SChris Wilson #define I915_CONTEXT_PARAM_BAN_PERIOD	0x1
1487b1b38278SDavid Weinehall #define I915_CONTEXT_PARAM_NO_ZEROMAP	0x2
1488fa8848f2SChris Wilson #define I915_CONTEXT_PARAM_GTT_SIZE	0x3
1489bc3d6744SChris Wilson #define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE	0x4
149084102171SMika Kuoppala #define I915_CONTEXT_PARAM_BANNABLE	0x5
1491ac14fbd4SChris Wilson #define I915_CONTEXT_PARAM_PRIORITY	0x6
1492ac14fbd4SChris Wilson #define   I915_CONTEXT_MAX_USER_PRIORITY	1023 /* inclusive */
1493ac14fbd4SChris Wilson #define   I915_CONTEXT_DEFAULT_PRIORITY		0
1494ac14fbd4SChris Wilson #define   I915_CONTEXT_MIN_USER_PRIORITY	-1023 /* inclusive */
1495e46c2e99STvrtko Ursulin 	/*
1496e46c2e99STvrtko Ursulin 	 * When using the following param, value should be a pointer to
1497e46c2e99STvrtko Ursulin 	 * drm_i915_gem_context_param_sseu.
1498e46c2e99STvrtko Ursulin 	 */
1499e46c2e99STvrtko Ursulin #define I915_CONTEXT_PARAM_SSEU		0x7
1500ba4fda62SChris Wilson 
1501ba4fda62SChris Wilson /*
1502ba4fda62SChris Wilson  * Not all clients may want to attempt automatic recover of a context after
1503ba4fda62SChris Wilson  * a hang (for example, some clients may only submit very small incremental
1504ba4fda62SChris Wilson  * batches relying on known logical state of previous batches which will never
1505ba4fda62SChris Wilson  * recover correctly and each attempt will hang), and so would prefer that
1506ba4fda62SChris Wilson  * the context is forever banned instead.
1507ba4fda62SChris Wilson  *
1508ba4fda62SChris Wilson  * If set to false (0), after a reset, subsequent (and in flight) rendering
1509ba4fda62SChris Wilson  * from this context is discarded, and the client will need to create a new
1510ba4fda62SChris Wilson  * context to use instead.
1511ba4fda62SChris Wilson  *
1512ba4fda62SChris Wilson  * If set to true (1), the kernel will automatically attempt to recover the
1513ba4fda62SChris Wilson  * context by skipping the hanging batch and executing the next batch starting
1514ba4fda62SChris Wilson  * from the default context state (discarding the incomplete logical context
1515ba4fda62SChris Wilson  * state lost due to the reset).
1516ba4fda62SChris Wilson  *
1517ba4fda62SChris Wilson  * On creation, all new contexts are marked as recoverable.
1518ba4fda62SChris Wilson  */
1519ba4fda62SChris Wilson #define I915_CONTEXT_PARAM_RECOVERABLE	0x8
1520be03564bSChris Wilson /* Must be kept compact -- no holes and well documented */
1521c9dc0f35SChris Wilson 	__u64 value;
1522c9dc0f35SChris Wilson };
1523c9dc0f35SChris Wilson 
1524e46c2e99STvrtko Ursulin /**
1525e46c2e99STvrtko Ursulin  * Context SSEU programming
1526e46c2e99STvrtko Ursulin  *
1527e46c2e99STvrtko Ursulin  * It may be necessary for either functional or performance reason to configure
1528e46c2e99STvrtko Ursulin  * a context to run with a reduced number of SSEU (where SSEU stands for Slice/
1529e46c2e99STvrtko Ursulin  * Sub-slice/EU).
1530e46c2e99STvrtko Ursulin  *
1531e46c2e99STvrtko Ursulin  * This is done by configuring SSEU configuration using the below
1532e46c2e99STvrtko Ursulin  * @struct drm_i915_gem_context_param_sseu for every supported engine which
1533e46c2e99STvrtko Ursulin  * userspace intends to use.
1534e46c2e99STvrtko Ursulin  *
1535e46c2e99STvrtko Ursulin  * Not all GPUs or engines support this functionality in which case an error
1536e46c2e99STvrtko Ursulin  * code -ENODEV will be returned.
1537e46c2e99STvrtko Ursulin  *
1538e46c2e99STvrtko Ursulin  * Also, flexibility of possible SSEU configuration permutations varies between
1539e46c2e99STvrtko Ursulin  * GPU generations and software imposed limitations. Requesting such a
1540e46c2e99STvrtko Ursulin  * combination will return an error code of -EINVAL.
1541e46c2e99STvrtko Ursulin  *
1542e46c2e99STvrtko Ursulin  * NOTE: When perf/OA is active the context's SSEU configuration is ignored in
1543e46c2e99STvrtko Ursulin  * favour of a single global setting.
1544e46c2e99STvrtko Ursulin  */
1545e46c2e99STvrtko Ursulin struct drm_i915_gem_context_param_sseu {
1546e46c2e99STvrtko Ursulin 	/*
1547e46c2e99STvrtko Ursulin 	 * Engine class & instance to be configured or queried.
1548e46c2e99STvrtko Ursulin 	 */
1549e46c2e99STvrtko Ursulin 	__u16 engine_class;
1550e46c2e99STvrtko Ursulin 	__u16 engine_instance;
1551e46c2e99STvrtko Ursulin 
1552e46c2e99STvrtko Ursulin 	/*
1553e46c2e99STvrtko Ursulin 	 * Unused for now. Must be cleared to zero.
1554e46c2e99STvrtko Ursulin 	 */
1555e46c2e99STvrtko Ursulin 	__u32 flags;
1556e46c2e99STvrtko Ursulin 
1557e46c2e99STvrtko Ursulin 	/*
1558e46c2e99STvrtko Ursulin 	 * Mask of slices to enable for the context. Valid values are a subset
1559e46c2e99STvrtko Ursulin 	 * of the bitmask value returned for I915_PARAM_SLICE_MASK.
1560e46c2e99STvrtko Ursulin 	 */
1561e46c2e99STvrtko Ursulin 	__u64 slice_mask;
1562e46c2e99STvrtko Ursulin 
1563e46c2e99STvrtko Ursulin 	/*
1564e46c2e99STvrtko Ursulin 	 * Mask of subslices to enable for the context. Valid values are a
1565e46c2e99STvrtko Ursulin 	 * subset of the bitmask value return by I915_PARAM_SUBSLICE_MASK.
1566e46c2e99STvrtko Ursulin 	 */
1567e46c2e99STvrtko Ursulin 	__u64 subslice_mask;
1568e46c2e99STvrtko Ursulin 
1569e46c2e99STvrtko Ursulin 	/*
1570e46c2e99STvrtko Ursulin 	 * Minimum/Maximum number of EUs to enable per subslice for the
1571e46c2e99STvrtko Ursulin 	 * context. min_eus_per_subslice must be inferior or equal to
1572e46c2e99STvrtko Ursulin 	 * max_eus_per_subslice.
1573e46c2e99STvrtko Ursulin 	 */
1574e46c2e99STvrtko Ursulin 	__u16 min_eus_per_subslice;
1575e46c2e99STvrtko Ursulin 	__u16 max_eus_per_subslice;
1576e46c2e99STvrtko Ursulin 
1577e46c2e99STvrtko Ursulin 	/*
1578e46c2e99STvrtko Ursulin 	 * Unused for now. Must be cleared to zero.
1579e46c2e99STvrtko Ursulin 	 */
1580e46c2e99STvrtko Ursulin 	__u32 rsvd;
1581e46c2e99STvrtko Ursulin };
1582e46c2e99STvrtko Ursulin 
1583d7965152SRobert Bragg enum drm_i915_oa_format {
158419f81df2SRobert Bragg 	I915_OA_FORMAT_A13 = 1,	    /* HSW only */
158519f81df2SRobert Bragg 	I915_OA_FORMAT_A29,	    /* HSW only */
158619f81df2SRobert Bragg 	I915_OA_FORMAT_A13_B8_C8,   /* HSW only */
158719f81df2SRobert Bragg 	I915_OA_FORMAT_B4_C8,	    /* HSW only */
158819f81df2SRobert Bragg 	I915_OA_FORMAT_A45_B8_C8,   /* HSW only */
158919f81df2SRobert Bragg 	I915_OA_FORMAT_B4_C8_A16,   /* HSW only */
159019f81df2SRobert Bragg 	I915_OA_FORMAT_C4_B8,	    /* HSW+ */
159119f81df2SRobert Bragg 
159219f81df2SRobert Bragg 	/* Gen8+ */
159319f81df2SRobert Bragg 	I915_OA_FORMAT_A12,
159419f81df2SRobert Bragg 	I915_OA_FORMAT_A12_B8_C8,
159519f81df2SRobert Bragg 	I915_OA_FORMAT_A32u40_A4u32_B8_C8,
1596d7965152SRobert Bragg 
1597d7965152SRobert Bragg 	I915_OA_FORMAT_MAX	    /* non-ABI */
1598d7965152SRobert Bragg };
1599d7965152SRobert Bragg 
1600eec688e1SRobert Bragg enum drm_i915_perf_property_id {
1601eec688e1SRobert Bragg 	/**
1602eec688e1SRobert Bragg 	 * Open the stream for a specific context handle (as used with
1603eec688e1SRobert Bragg 	 * execbuffer2). A stream opened for a specific context this way
1604eec688e1SRobert Bragg 	 * won't typically require root privileges.
1605eec688e1SRobert Bragg 	 */
1606eec688e1SRobert Bragg 	DRM_I915_PERF_PROP_CTX_HANDLE = 1,
1607eec688e1SRobert Bragg 
1608d7965152SRobert Bragg 	/**
1609d7965152SRobert Bragg 	 * A value of 1 requests the inclusion of raw OA unit reports as
1610d7965152SRobert Bragg 	 * part of stream samples.
1611d7965152SRobert Bragg 	 */
1612d7965152SRobert Bragg 	DRM_I915_PERF_PROP_SAMPLE_OA,
1613d7965152SRobert Bragg 
1614d7965152SRobert Bragg 	/**
1615d7965152SRobert Bragg 	 * The value specifies which set of OA unit metrics should be
1616d7965152SRobert Bragg 	 * be configured, defining the contents of any OA unit reports.
1617d7965152SRobert Bragg 	 */
1618d7965152SRobert Bragg 	DRM_I915_PERF_PROP_OA_METRICS_SET,
1619d7965152SRobert Bragg 
1620d7965152SRobert Bragg 	/**
1621d7965152SRobert Bragg 	 * The value specifies the size and layout of OA unit reports.
1622d7965152SRobert Bragg 	 */
1623d7965152SRobert Bragg 	DRM_I915_PERF_PROP_OA_FORMAT,
1624d7965152SRobert Bragg 
1625d7965152SRobert Bragg 	/**
1626d7965152SRobert Bragg 	 * Specifying this property implicitly requests periodic OA unit
1627d7965152SRobert Bragg 	 * sampling and (at least on Haswell) the sampling frequency is derived
1628d7965152SRobert Bragg 	 * from this exponent as follows:
1629d7965152SRobert Bragg 	 *
1630d7965152SRobert Bragg 	 *   80ns * 2^(period_exponent + 1)
1631d7965152SRobert Bragg 	 */
1632d7965152SRobert Bragg 	DRM_I915_PERF_PROP_OA_EXPONENT,
1633d7965152SRobert Bragg 
1634eec688e1SRobert Bragg 	DRM_I915_PERF_PROP_MAX /* non-ABI */
1635eec688e1SRobert Bragg };
1636eec688e1SRobert Bragg 
1637eec688e1SRobert Bragg struct drm_i915_perf_open_param {
1638eec688e1SRobert Bragg 	__u32 flags;
1639eec688e1SRobert Bragg #define I915_PERF_FLAG_FD_CLOEXEC	(1<<0)
1640eec688e1SRobert Bragg #define I915_PERF_FLAG_FD_NONBLOCK	(1<<1)
1641eec688e1SRobert Bragg #define I915_PERF_FLAG_DISABLED		(1<<2)
1642eec688e1SRobert Bragg 
1643eec688e1SRobert Bragg 	/** The number of u64 (id, value) pairs */
1644eec688e1SRobert Bragg 	__u32 num_properties;
1645eec688e1SRobert Bragg 
1646eec688e1SRobert Bragg 	/**
1647eec688e1SRobert Bragg 	 * Pointer to array of u64 (id, value) pairs configuring the stream
1648eec688e1SRobert Bragg 	 * to open.
1649eec688e1SRobert Bragg 	 */
1650cd8bddc4SChris Wilson 	__u64 properties_ptr;
1651eec688e1SRobert Bragg };
1652eec688e1SRobert Bragg 
1653d7965152SRobert Bragg /**
1654d7965152SRobert Bragg  * Enable data capture for a stream that was either opened in a disabled state
1655d7965152SRobert Bragg  * via I915_PERF_FLAG_DISABLED or was later disabled via
1656d7965152SRobert Bragg  * I915_PERF_IOCTL_DISABLE.
1657d7965152SRobert Bragg  *
1658d7965152SRobert Bragg  * It is intended to be cheaper to disable and enable a stream than it may be
1659d7965152SRobert Bragg  * to close and re-open a stream with the same configuration.
1660d7965152SRobert Bragg  *
1661d7965152SRobert Bragg  * It's undefined whether any pending data for the stream will be lost.
1662d7965152SRobert Bragg  */
1663eec688e1SRobert Bragg #define I915_PERF_IOCTL_ENABLE	_IO('i', 0x0)
1664d7965152SRobert Bragg 
1665d7965152SRobert Bragg /**
1666d7965152SRobert Bragg  * Disable data capture for a stream.
1667d7965152SRobert Bragg  *
1668d7965152SRobert Bragg  * It is an error to try and read a stream that is disabled.
1669d7965152SRobert Bragg  */
1670eec688e1SRobert Bragg #define I915_PERF_IOCTL_DISABLE	_IO('i', 0x1)
1671eec688e1SRobert Bragg 
1672eec688e1SRobert Bragg /**
1673eec688e1SRobert Bragg  * Common to all i915 perf records
1674eec688e1SRobert Bragg  */
1675eec688e1SRobert Bragg struct drm_i915_perf_record_header {
1676eec688e1SRobert Bragg 	__u32 type;
1677eec688e1SRobert Bragg 	__u16 pad;
1678eec688e1SRobert Bragg 	__u16 size;
1679eec688e1SRobert Bragg };
1680eec688e1SRobert Bragg 
1681eec688e1SRobert Bragg enum drm_i915_perf_record_type {
1682eec688e1SRobert Bragg 
1683eec688e1SRobert Bragg 	/**
1684eec688e1SRobert Bragg 	 * Samples are the work horse record type whose contents are extensible
1685eec688e1SRobert Bragg 	 * and defined when opening an i915 perf stream based on the given
1686eec688e1SRobert Bragg 	 * properties.
1687eec688e1SRobert Bragg 	 *
1688eec688e1SRobert Bragg 	 * Boolean properties following the naming convention
1689eec688e1SRobert Bragg 	 * DRM_I915_PERF_SAMPLE_xyz_PROP request the inclusion of 'xyz' data in
1690eec688e1SRobert Bragg 	 * every sample.
1691eec688e1SRobert Bragg 	 *
1692eec688e1SRobert Bragg 	 * The order of these sample properties given by userspace has no
1693d7965152SRobert Bragg 	 * affect on the ordering of data within a sample. The order is
1694eec688e1SRobert Bragg 	 * documented here.
1695eec688e1SRobert Bragg 	 *
1696eec688e1SRobert Bragg 	 * struct {
1697eec688e1SRobert Bragg 	 *     struct drm_i915_perf_record_header header;
1698eec688e1SRobert Bragg 	 *
1699d7965152SRobert Bragg 	 *     { u32 oa_report[]; } && DRM_I915_PERF_PROP_SAMPLE_OA
1700eec688e1SRobert Bragg 	 * };
1701eec688e1SRobert Bragg 	 */
1702eec688e1SRobert Bragg 	DRM_I915_PERF_RECORD_SAMPLE = 1,
1703eec688e1SRobert Bragg 
1704d7965152SRobert Bragg 	/*
1705d7965152SRobert Bragg 	 * Indicates that one or more OA reports were not written by the
1706d7965152SRobert Bragg 	 * hardware. This can happen for example if an MI_REPORT_PERF_COUNT
1707d7965152SRobert Bragg 	 * command collides with periodic sampling - which would be more likely
1708d7965152SRobert Bragg 	 * at higher sampling frequencies.
1709d7965152SRobert Bragg 	 */
1710d7965152SRobert Bragg 	DRM_I915_PERF_RECORD_OA_REPORT_LOST = 2,
1711d7965152SRobert Bragg 
1712d7965152SRobert Bragg 	/**
1713d7965152SRobert Bragg 	 * An error occurred that resulted in all pending OA reports being lost.
1714d7965152SRobert Bragg 	 */
1715d7965152SRobert Bragg 	DRM_I915_PERF_RECORD_OA_BUFFER_LOST = 3,
1716d7965152SRobert Bragg 
1717eec688e1SRobert Bragg 	DRM_I915_PERF_RECORD_MAX /* non-ABI */
1718eec688e1SRobert Bragg };
1719eec688e1SRobert Bragg 
1720f89823c2SLionel Landwerlin /**
1721f89823c2SLionel Landwerlin  * Structure to upload perf dynamic configuration into the kernel.
1722f89823c2SLionel Landwerlin  */
1723f89823c2SLionel Landwerlin struct drm_i915_perf_oa_config {
1724f89823c2SLionel Landwerlin 	/** String formatted like "%08x-%04x-%04x-%04x-%012x" */
1725f89823c2SLionel Landwerlin 	char uuid[36];
1726f89823c2SLionel Landwerlin 
1727f89823c2SLionel Landwerlin 	__u32 n_mux_regs;
1728f89823c2SLionel Landwerlin 	__u32 n_boolean_regs;
1729f89823c2SLionel Landwerlin 	__u32 n_flex_regs;
1730f89823c2SLionel Landwerlin 
1731ee427e25SLionel Landwerlin 	/*
1732a446ae2cSLionel Landwerlin 	 * These fields are pointers to tuples of u32 values (register address,
1733a446ae2cSLionel Landwerlin 	 * value). For example the expected length of the buffer pointed by
1734a446ae2cSLionel Landwerlin 	 * mux_regs_ptr is (2 * sizeof(u32) * n_mux_regs).
1735ee427e25SLionel Landwerlin 	 */
173617ad4fddSChris Wilson 	__u64 mux_regs_ptr;
173717ad4fddSChris Wilson 	__u64 boolean_regs_ptr;
173817ad4fddSChris Wilson 	__u64 flex_regs_ptr;
1739f89823c2SLionel Landwerlin };
1740f89823c2SLionel Landwerlin 
1741a446ae2cSLionel Landwerlin struct drm_i915_query_item {
1742a446ae2cSLionel Landwerlin 	__u64 query_id;
1743c822e059SLionel Landwerlin #define DRM_I915_QUERY_TOPOLOGY_INFO    1
1744be03564bSChris Wilson /* Must be kept compact -- no holes and well documented */
1745a446ae2cSLionel Landwerlin 
1746a446ae2cSLionel Landwerlin 	/*
1747a446ae2cSLionel Landwerlin 	 * When set to zero by userspace, this is filled with the size of the
1748a446ae2cSLionel Landwerlin 	 * data to be written at the data_ptr pointer. The kernel sets this
1749a446ae2cSLionel Landwerlin 	 * value to a negative value to signal an error on a particular query
1750a446ae2cSLionel Landwerlin 	 * item.
1751a446ae2cSLionel Landwerlin 	 */
1752a446ae2cSLionel Landwerlin 	__s32 length;
1753a446ae2cSLionel Landwerlin 
1754a446ae2cSLionel Landwerlin 	/*
1755a446ae2cSLionel Landwerlin 	 * Unused for now. Must be cleared to zero.
1756a446ae2cSLionel Landwerlin 	 */
1757a446ae2cSLionel Landwerlin 	__u32 flags;
1758a446ae2cSLionel Landwerlin 
1759a446ae2cSLionel Landwerlin 	/*
1760a446ae2cSLionel Landwerlin 	 * Data will be written at the location pointed by data_ptr when the
1761a446ae2cSLionel Landwerlin 	 * value of length matches the length of the data to be written by the
1762a446ae2cSLionel Landwerlin 	 * kernel.
1763a446ae2cSLionel Landwerlin 	 */
1764a446ae2cSLionel Landwerlin 	__u64 data_ptr;
1765a446ae2cSLionel Landwerlin };
1766a446ae2cSLionel Landwerlin 
1767a446ae2cSLionel Landwerlin struct drm_i915_query {
1768a446ae2cSLionel Landwerlin 	__u32 num_items;
1769a446ae2cSLionel Landwerlin 
1770a446ae2cSLionel Landwerlin 	/*
1771a446ae2cSLionel Landwerlin 	 * Unused for now. Must be cleared to zero.
1772a446ae2cSLionel Landwerlin 	 */
1773a446ae2cSLionel Landwerlin 	__u32 flags;
1774a446ae2cSLionel Landwerlin 
1775a446ae2cSLionel Landwerlin 	/*
1776a446ae2cSLionel Landwerlin 	 * This points to an array of num_items drm_i915_query_item structures.
1777a446ae2cSLionel Landwerlin 	 */
1778a446ae2cSLionel Landwerlin 	__u64 items_ptr;
1779a446ae2cSLionel Landwerlin };
1780a446ae2cSLionel Landwerlin 
1781c822e059SLionel Landwerlin /*
1782c822e059SLionel Landwerlin  * Data written by the kernel with query DRM_I915_QUERY_TOPOLOGY_INFO :
1783c822e059SLionel Landwerlin  *
1784c822e059SLionel Landwerlin  * data: contains the 3 pieces of information :
1785c822e059SLionel Landwerlin  *
1786c822e059SLionel Landwerlin  * - the slice mask with one bit per slice telling whether a slice is
1787c822e059SLionel Landwerlin  *   available. The availability of slice X can be queried with the following
1788c822e059SLionel Landwerlin  *   formula :
1789c822e059SLionel Landwerlin  *
1790c822e059SLionel Landwerlin  *           (data[X / 8] >> (X % 8)) & 1
1791c822e059SLionel Landwerlin  *
1792c822e059SLionel Landwerlin  * - the subslice mask for each slice with one bit per subslice telling
1793c822e059SLionel Landwerlin  *   whether a subslice is available. The availability of subslice Y in slice
1794c822e059SLionel Landwerlin  *   X can be queried with the following formula :
1795c822e059SLionel Landwerlin  *
1796c822e059SLionel Landwerlin  *           (data[subslice_offset +
1797c822e059SLionel Landwerlin  *                 X * subslice_stride +
1798c822e059SLionel Landwerlin  *                 Y / 8] >> (Y % 8)) & 1
1799c822e059SLionel Landwerlin  *
1800c822e059SLionel Landwerlin  * - the EU mask for each subslice in each slice with one bit per EU telling
1801c822e059SLionel Landwerlin  *   whether an EU is available. The availability of EU Z in subslice Y in
1802c822e059SLionel Landwerlin  *   slice X can be queried with the following formula :
1803c822e059SLionel Landwerlin  *
1804c822e059SLionel Landwerlin  *           (data[eu_offset +
1805c822e059SLionel Landwerlin  *                 (X * max_subslices + Y) * eu_stride +
1806c822e059SLionel Landwerlin  *                 Z / 8] >> (Z % 8)) & 1
1807c822e059SLionel Landwerlin  */
1808c822e059SLionel Landwerlin struct drm_i915_query_topology_info {
1809c822e059SLionel Landwerlin 	/*
1810c822e059SLionel Landwerlin 	 * Unused for now. Must be cleared to zero.
1811c822e059SLionel Landwerlin 	 */
1812c822e059SLionel Landwerlin 	__u16 flags;
1813c822e059SLionel Landwerlin 
1814c822e059SLionel Landwerlin 	__u16 max_slices;
1815c822e059SLionel Landwerlin 	__u16 max_subslices;
1816c822e059SLionel Landwerlin 	__u16 max_eus_per_subslice;
1817c822e059SLionel Landwerlin 
1818c822e059SLionel Landwerlin 	/*
1819c822e059SLionel Landwerlin 	 * Offset in data[] at which the subslice masks are stored.
1820c822e059SLionel Landwerlin 	 */
1821c822e059SLionel Landwerlin 	__u16 subslice_offset;
1822c822e059SLionel Landwerlin 
1823c822e059SLionel Landwerlin 	/*
1824c822e059SLionel Landwerlin 	 * Stride at which each of the subslice masks for each slice are
1825c822e059SLionel Landwerlin 	 * stored.
1826c822e059SLionel Landwerlin 	 */
1827c822e059SLionel Landwerlin 	__u16 subslice_stride;
1828c822e059SLionel Landwerlin 
1829c822e059SLionel Landwerlin 	/*
1830c822e059SLionel Landwerlin 	 * Offset in data[] at which the EU masks are stored.
1831c822e059SLionel Landwerlin 	 */
1832c822e059SLionel Landwerlin 	__u16 eu_offset;
1833c822e059SLionel Landwerlin 
1834c822e059SLionel Landwerlin 	/*
1835c822e059SLionel Landwerlin 	 * Stride at which each of the EU masks for each subslice are stored.
1836c822e059SLionel Landwerlin 	 */
1837c822e059SLionel Landwerlin 	__u16 eu_stride;
1838c822e059SLionel Landwerlin 
1839c822e059SLionel Landwerlin 	__u8 data[];
1840c822e059SLionel Landwerlin };
1841c822e059SLionel Landwerlin 
1842b1c1f5c4SEmil Velikov #if defined(__cplusplus)
1843b1c1f5c4SEmil Velikov }
1844b1c1f5c4SEmil Velikov #endif
1845b1c1f5c4SEmil Velikov 
1846718dceddSDavid Howells #endif /* _UAPI_I915_DRM_H_ */
1847