xref: /openbmc/linux/include/uapi/drm/vmwgfx_drm.h (revision e2c75e76)
1 /**************************************************************************
2  *
3  * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #ifndef __VMWGFX_DRM_H__
29 #define __VMWGFX_DRM_H__
30 
31 #include "drm.h"
32 
33 #if defined(__cplusplus)
34 extern "C" {
35 #endif
36 
37 #define DRM_VMW_MAX_SURFACE_FACES 6
38 #define DRM_VMW_MAX_MIP_LEVELS 24
39 
40 
41 #define DRM_VMW_GET_PARAM            0
42 #define DRM_VMW_ALLOC_DMABUF         1
43 #define DRM_VMW_UNREF_DMABUF         2
44 #define DRM_VMW_HANDLE_CLOSE         2
45 #define DRM_VMW_CURSOR_BYPASS        3
46 /* guarded by DRM_VMW_PARAM_NUM_STREAMS != 0*/
47 #define DRM_VMW_CONTROL_STREAM       4
48 #define DRM_VMW_CLAIM_STREAM         5
49 #define DRM_VMW_UNREF_STREAM         6
50 /* guarded by DRM_VMW_PARAM_3D == 1 */
51 #define DRM_VMW_CREATE_CONTEXT       7
52 #define DRM_VMW_UNREF_CONTEXT        8
53 #define DRM_VMW_CREATE_SURFACE       9
54 #define DRM_VMW_UNREF_SURFACE        10
55 #define DRM_VMW_REF_SURFACE          11
56 #define DRM_VMW_EXECBUF              12
57 #define DRM_VMW_GET_3D_CAP           13
58 #define DRM_VMW_FENCE_WAIT           14
59 #define DRM_VMW_FENCE_SIGNALED       15
60 #define DRM_VMW_FENCE_UNREF          16
61 #define DRM_VMW_FENCE_EVENT          17
62 #define DRM_VMW_PRESENT              18
63 #define DRM_VMW_PRESENT_READBACK     19
64 #define DRM_VMW_UPDATE_LAYOUT        20
65 #define DRM_VMW_CREATE_SHADER        21
66 #define DRM_VMW_UNREF_SHADER         22
67 #define DRM_VMW_GB_SURFACE_CREATE    23
68 #define DRM_VMW_GB_SURFACE_REF       24
69 #define DRM_VMW_SYNCCPU              25
70 #define DRM_VMW_CREATE_EXTENDED_CONTEXT 26
71 
72 /*************************************************************************/
73 /**
74  * DRM_VMW_GET_PARAM - get device information.
75  *
76  * DRM_VMW_PARAM_FIFO_OFFSET:
77  * Offset to use to map the first page of the FIFO read-only.
78  * The fifo is mapped using the mmap() system call on the drm device.
79  *
80  * DRM_VMW_PARAM_OVERLAY_IOCTL:
81  * Does the driver support the overlay ioctl.
82  */
83 
84 #define DRM_VMW_PARAM_NUM_STREAMS      0
85 #define DRM_VMW_PARAM_NUM_FREE_STREAMS 1
86 #define DRM_VMW_PARAM_3D               2
87 #define DRM_VMW_PARAM_HW_CAPS          3
88 #define DRM_VMW_PARAM_FIFO_CAPS        4
89 #define DRM_VMW_PARAM_MAX_FB_SIZE      5
90 #define DRM_VMW_PARAM_FIFO_HW_VERSION  6
91 #define DRM_VMW_PARAM_MAX_SURF_MEMORY  7
92 #define DRM_VMW_PARAM_3D_CAPS_SIZE     8
93 #define DRM_VMW_PARAM_MAX_MOB_MEMORY   9
94 #define DRM_VMW_PARAM_MAX_MOB_SIZE     10
95 #define DRM_VMW_PARAM_SCREEN_TARGET    11
96 #define DRM_VMW_PARAM_DX               12
97 
98 /**
99  * enum drm_vmw_handle_type - handle type for ref ioctls
100  *
101  */
102 enum drm_vmw_handle_type {
103 	DRM_VMW_HANDLE_LEGACY = 0,
104 	DRM_VMW_HANDLE_PRIME = 1
105 };
106 
107 /**
108  * struct drm_vmw_getparam_arg
109  *
110  * @value: Returned value. //Out
111  * @param: Parameter to query. //In.
112  *
113  * Argument to the DRM_VMW_GET_PARAM Ioctl.
114  */
115 
116 struct drm_vmw_getparam_arg {
117 	__u64 value;
118 	__u32 param;
119 	__u32 pad64;
120 };
121 
122 /*************************************************************************/
123 /**
124  * DRM_VMW_CREATE_CONTEXT - Create a host context.
125  *
126  * Allocates a device unique context id, and queues a create context command
127  * for the host. Does not wait for host completion.
128  */
129 
130 /**
131  * struct drm_vmw_context_arg
132  *
133  * @cid: Device unique context ID.
134  *
135  * Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl.
136  * Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl.
137  */
138 
139 struct drm_vmw_context_arg {
140 	__s32 cid;
141 	__u32 pad64;
142 };
143 
144 /*************************************************************************/
145 /**
146  * DRM_VMW_UNREF_CONTEXT - Create a host context.
147  *
148  * Frees a global context id, and queues a destroy host command for the host.
149  * Does not wait for host completion. The context ID can be used directly
150  * in the command stream and shows up as the same context ID on the host.
151  */
152 
153 /*************************************************************************/
154 /**
155  * DRM_VMW_CREATE_SURFACE - Create a host suface.
156  *
157  * Allocates a device unique surface id, and queues a create surface command
158  * for the host. Does not wait for host completion. The surface ID can be
159  * used directly in the command stream and shows up as the same surface
160  * ID on the host.
161  */
162 
163 /**
164  * struct drm_wmv_surface_create_req
165  *
166  * @flags: Surface flags as understood by the host.
167  * @format: Surface format as understood by the host.
168  * @mip_levels: Number of mip levels for each face.
169  * An unused face should have 0 encoded.
170  * @size_addr: Address of a user-space array of sruct drm_vmw_size
171  * cast to an __u64 for 32-64 bit compatibility.
172  * The size of the array should equal the total number of mipmap levels.
173  * @shareable: Boolean whether other clients (as identified by file descriptors)
174  * may reference this surface.
175  * @scanout: Boolean whether the surface is intended to be used as a
176  * scanout.
177  *
178  * Input data to the DRM_VMW_CREATE_SURFACE Ioctl.
179  * Output data from the DRM_VMW_REF_SURFACE Ioctl.
180  */
181 
182 struct drm_vmw_surface_create_req {
183 	__u32 flags;
184 	__u32 format;
185 	__u32 mip_levels[DRM_VMW_MAX_SURFACE_FACES];
186 	__u64 size_addr;
187 	__s32 shareable;
188 	__s32 scanout;
189 };
190 
191 /**
192  * struct drm_wmv_surface_arg
193  *
194  * @sid: Surface id of created surface or surface to destroy or reference.
195  * @handle_type: Handle type for DRM_VMW_REF_SURFACE Ioctl.
196  *
197  * Output data from the DRM_VMW_CREATE_SURFACE Ioctl.
198  * Input argument to the DRM_VMW_UNREF_SURFACE Ioctl.
199  * Input argument to the DRM_VMW_REF_SURFACE Ioctl.
200  */
201 
202 struct drm_vmw_surface_arg {
203 	__s32 sid;
204 	enum drm_vmw_handle_type handle_type;
205 };
206 
207 /**
208  * struct drm_vmw_size ioctl.
209  *
210  * @width - mip level width
211  * @height - mip level height
212  * @depth - mip level depth
213  *
214  * Description of a mip level.
215  * Input data to the DRM_WMW_CREATE_SURFACE Ioctl.
216  */
217 
218 struct drm_vmw_size {
219 	__u32 width;
220 	__u32 height;
221 	__u32 depth;
222 	__u32 pad64;
223 };
224 
225 /**
226  * union drm_vmw_surface_create_arg
227  *
228  * @rep: Output data as described above.
229  * @req: Input data as described above.
230  *
231  * Argument to the DRM_VMW_CREATE_SURFACE Ioctl.
232  */
233 
234 union drm_vmw_surface_create_arg {
235 	struct drm_vmw_surface_arg rep;
236 	struct drm_vmw_surface_create_req req;
237 };
238 
239 /*************************************************************************/
240 /**
241  * DRM_VMW_REF_SURFACE - Reference a host surface.
242  *
243  * Puts a reference on a host surface with a give sid, as previously
244  * returned by the DRM_VMW_CREATE_SURFACE ioctl.
245  * A reference will make sure the surface isn't destroyed while we hold
246  * it and will allow the calling client to use the surface ID in the command
247  * stream.
248  *
249  * On successful return, the Ioctl returns the surface information given
250  * in the DRM_VMW_CREATE_SURFACE ioctl.
251  */
252 
253 /**
254  * union drm_vmw_surface_reference_arg
255  *
256  * @rep: Output data as described above.
257  * @req: Input data as described above.
258  *
259  * Argument to the DRM_VMW_REF_SURFACE Ioctl.
260  */
261 
262 union drm_vmw_surface_reference_arg {
263 	struct drm_vmw_surface_create_req rep;
264 	struct drm_vmw_surface_arg req;
265 };
266 
267 /*************************************************************************/
268 /**
269  * DRM_VMW_UNREF_SURFACE - Unreference a host surface.
270  *
271  * Clear a reference previously put on a host surface.
272  * When all references are gone, including the one implicitly placed
273  * on creation,
274  * a destroy surface command will be queued for the host.
275  * Does not wait for completion.
276  */
277 
278 /*************************************************************************/
279 /**
280  * DRM_VMW_EXECBUF
281  *
282  * Submit a command buffer for execution on the host, and return a
283  * fence seqno that when signaled, indicates that the command buffer has
284  * executed.
285  */
286 
287 /**
288  * struct drm_vmw_execbuf_arg
289  *
290  * @commands: User-space address of a command buffer cast to an __u64.
291  * @command-size: Size in bytes of the command buffer.
292  * @throttle-us: Sleep until software is less than @throttle_us
293  * microseconds ahead of hardware. The driver may round this value
294  * to the nearest kernel tick.
295  * @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an
296  * __u64.
297  * @version: Allows expanding the execbuf ioctl parameters without breaking
298  * backwards compatibility, since user-space will always tell the kernel
299  * which version it uses.
300  * @flags: Execbuf flags.
301  * @imported_fence_fd:  FD for a fence imported from another device
302  *
303  * Argument to the DRM_VMW_EXECBUF Ioctl.
304  */
305 
306 #define DRM_VMW_EXECBUF_VERSION 2
307 
308 #define DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD (1 << 0)
309 #define DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD (1 << 1)
310 
311 struct drm_vmw_execbuf_arg {
312 	__u64 commands;
313 	__u32 command_size;
314 	__u32 throttle_us;
315 	__u64 fence_rep;
316 	__u32 version;
317 	__u32 flags;
318 	__u32 context_handle;
319 	__s32 imported_fence_fd;
320 };
321 
322 /**
323  * struct drm_vmw_fence_rep
324  *
325  * @handle: Fence object handle for fence associated with a command submission.
326  * @mask: Fence flags relevant for this fence object.
327  * @seqno: Fence sequence number in fifo. A fence object with a lower
328  * seqno will signal the EXEC flag before a fence object with a higher
329  * seqno. This can be used by user-space to avoid kernel calls to determine
330  * whether a fence has signaled the EXEC flag. Note that @seqno will
331  * wrap at 32-bit.
332  * @passed_seqno: The highest seqno number processed by the hardware
333  * so far. This can be used to mark user-space fence objects as signaled, and
334  * to determine whether a fence seqno might be stale.
335  * @fd: FD associated with the fence, -1 if not exported
336  * @error: This member should've been set to -EFAULT on submission.
337  * The following actions should be take on completion:
338  * error == -EFAULT: Fence communication failed. The host is synchronized.
339  * Use the last fence id read from the FIFO fence register.
340  * error != 0 && error != -EFAULT:
341  * Fence submission failed. The host is synchronized. Use the fence_seq member.
342  * error == 0: All is OK, The host may not be synchronized.
343  * Use the fence_seq member.
344  *
345  * Input / Output data to the DRM_VMW_EXECBUF Ioctl.
346  */
347 
348 struct drm_vmw_fence_rep {
349 	__u32 handle;
350 	__u32 mask;
351 	__u32 seqno;
352 	__u32 passed_seqno;
353 	__s32 fd;
354 	__s32 error;
355 };
356 
357 /*************************************************************************/
358 /**
359  * DRM_VMW_ALLOC_DMABUF
360  *
361  * Allocate a DMA buffer that is visible also to the host.
362  * NOTE: The buffer is
363  * identified by a handle and an offset, which are private to the guest, but
364  * useable in the command stream. The guest kernel may translate these
365  * and patch up the command stream accordingly. In the future, the offset may
366  * be zero at all times, or it may disappear from the interface before it is
367  * fixed.
368  *
369  * The DMA buffer may stay user-space mapped in the guest at all times,
370  * and is thus suitable for sub-allocation.
371  *
372  * DMA buffers are mapped using the mmap() syscall on the drm device.
373  */
374 
375 /**
376  * struct drm_vmw_alloc_dmabuf_req
377  *
378  * @size: Required minimum size of the buffer.
379  *
380  * Input data to the DRM_VMW_ALLOC_DMABUF Ioctl.
381  */
382 
383 struct drm_vmw_alloc_dmabuf_req {
384 	__u32 size;
385 	__u32 pad64;
386 };
387 
388 /**
389  * struct drm_vmw_dmabuf_rep
390  *
391  * @map_handle: Offset to use in the mmap() call used to map the buffer.
392  * @handle: Handle unique to this buffer. Used for unreferencing.
393  * @cur_gmr_id: GMR id to use in the command stream when this buffer is
394  * referenced. See not above.
395  * @cur_gmr_offset: Offset to use in the command stream when this buffer is
396  * referenced. See note above.
397  *
398  * Output data from the DRM_VMW_ALLOC_DMABUF Ioctl.
399  */
400 
401 struct drm_vmw_dmabuf_rep {
402 	__u64 map_handle;
403 	__u32 handle;
404 	__u32 cur_gmr_id;
405 	__u32 cur_gmr_offset;
406 	__u32 pad64;
407 };
408 
409 /**
410  * union drm_vmw_dmabuf_arg
411  *
412  * @req: Input data as described above.
413  * @rep: Output data as described above.
414  *
415  * Argument to the DRM_VMW_ALLOC_DMABUF Ioctl.
416  */
417 
418 union drm_vmw_alloc_dmabuf_arg {
419 	struct drm_vmw_alloc_dmabuf_req req;
420 	struct drm_vmw_dmabuf_rep rep;
421 };
422 
423 /*************************************************************************/
424 /**
425  * DRM_VMW_UNREF_DMABUF - Free a DMA buffer.
426  *
427  */
428 
429 /**
430  * struct drm_vmw_unref_dmabuf_arg
431  *
432  * @handle: Handle indicating what buffer to free. Obtained from the
433  * DRM_VMW_ALLOC_DMABUF Ioctl.
434  *
435  * Argument to the DRM_VMW_UNREF_DMABUF Ioctl.
436  */
437 
438 struct drm_vmw_unref_dmabuf_arg {
439 	__u32 handle;
440 	__u32 pad64;
441 };
442 
443 /*************************************************************************/
444 /**
445  * DRM_VMW_CONTROL_STREAM - Control overlays, aka streams.
446  *
447  * This IOCTL controls the overlay units of the svga device.
448  * The SVGA overlay units does not work like regular hardware units in
449  * that they do not automaticaly read back the contents of the given dma
450  * buffer. But instead only read back for each call to this ioctl, and
451  * at any point between this call being made and a following call that
452  * either changes the buffer or disables the stream.
453  */
454 
455 /**
456  * struct drm_vmw_rect
457  *
458  * Defines a rectangle. Used in the overlay ioctl to define
459  * source and destination rectangle.
460  */
461 
462 struct drm_vmw_rect {
463 	__s32 x;
464 	__s32 y;
465 	__u32 w;
466 	__u32 h;
467 };
468 
469 /**
470  * struct drm_vmw_control_stream_arg
471  *
472  * @stream_id: Stearm to control
473  * @enabled: If false all following arguments are ignored.
474  * @handle: Handle to buffer for getting data from.
475  * @format: Format of the overlay as understood by the host.
476  * @width: Width of the overlay.
477  * @height: Height of the overlay.
478  * @size: Size of the overlay in bytes.
479  * @pitch: Array of pitches, the two last are only used for YUV12 formats.
480  * @offset: Offset from start of dma buffer to overlay.
481  * @src: Source rect, must be within the defined area above.
482  * @dst: Destination rect, x and y may be negative.
483  *
484  * Argument to the DRM_VMW_CONTROL_STREAM Ioctl.
485  */
486 
487 struct drm_vmw_control_stream_arg {
488 	__u32 stream_id;
489 	__u32 enabled;
490 
491 	__u32 flags;
492 	__u32 color_key;
493 
494 	__u32 handle;
495 	__u32 offset;
496 	__s32 format;
497 	__u32 size;
498 	__u32 width;
499 	__u32 height;
500 	__u32 pitch[3];
501 
502 	__u32 pad64;
503 	struct drm_vmw_rect src;
504 	struct drm_vmw_rect dst;
505 };
506 
507 /*************************************************************************/
508 /**
509  * DRM_VMW_CURSOR_BYPASS - Give extra information about cursor bypass.
510  *
511  */
512 
513 #define DRM_VMW_CURSOR_BYPASS_ALL    (1 << 0)
514 #define DRM_VMW_CURSOR_BYPASS_FLAGS       (1)
515 
516 /**
517  * struct drm_vmw_cursor_bypass_arg
518  *
519  * @flags: Flags.
520  * @crtc_id: Crtc id, only used if DMR_CURSOR_BYPASS_ALL isn't passed.
521  * @xpos: X position of cursor.
522  * @ypos: Y position of cursor.
523  * @xhot: X hotspot.
524  * @yhot: Y hotspot.
525  *
526  * Argument to the DRM_VMW_CURSOR_BYPASS Ioctl.
527  */
528 
529 struct drm_vmw_cursor_bypass_arg {
530 	__u32 flags;
531 	__u32 crtc_id;
532 	__s32 xpos;
533 	__s32 ypos;
534 	__s32 xhot;
535 	__s32 yhot;
536 };
537 
538 /*************************************************************************/
539 /**
540  * DRM_VMW_CLAIM_STREAM - Claim a single stream.
541  */
542 
543 /**
544  * struct drm_vmw_context_arg
545  *
546  * @stream_id: Device unique context ID.
547  *
548  * Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl.
549  * Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl.
550  */
551 
552 struct drm_vmw_stream_arg {
553 	__u32 stream_id;
554 	__u32 pad64;
555 };
556 
557 /*************************************************************************/
558 /**
559  * DRM_VMW_UNREF_STREAM - Unclaim a stream.
560  *
561  * Return a single stream that was claimed by this process. Also makes
562  * sure that the stream has been stopped.
563  */
564 
565 /*************************************************************************/
566 /**
567  * DRM_VMW_GET_3D_CAP
568  *
569  * Read 3D capabilities from the FIFO
570  *
571  */
572 
573 /**
574  * struct drm_vmw_get_3d_cap_arg
575  *
576  * @buffer: Pointer to a buffer for capability data, cast to an __u64
577  * @size: Max size to copy
578  *
579  * Input argument to the DRM_VMW_GET_3D_CAP_IOCTL
580  * ioctls.
581  */
582 
583 struct drm_vmw_get_3d_cap_arg {
584 	__u64 buffer;
585 	__u32 max_size;
586 	__u32 pad64;
587 };
588 
589 /*************************************************************************/
590 /**
591  * DRM_VMW_FENCE_WAIT
592  *
593  * Waits for a fence object to signal. The wait is interruptible, so that
594  * signals may be delivered during the interrupt. The wait may timeout,
595  * in which case the calls returns -EBUSY. If the wait is restarted,
596  * that is restarting without resetting @cookie_valid to zero,
597  * the timeout is computed from the first call.
598  *
599  * The flags argument to the DRM_VMW_FENCE_WAIT ioctl indicates what to wait
600  * on:
601  * DRM_VMW_FENCE_FLAG_EXEC: All commands ahead of the fence in the command
602  * stream
603  * have executed.
604  * DRM_VMW_FENCE_FLAG_QUERY: All query results resulting from query finish
605  * commands
606  * in the buffer given to the EXECBUF ioctl returning the fence object handle
607  * are available to user-space.
608  *
609  * DRM_VMW_WAIT_OPTION_UNREF: If this wait option is given, and the
610  * fenc wait ioctl returns 0, the fence object has been unreferenced after
611  * the wait.
612  */
613 
614 #define DRM_VMW_FENCE_FLAG_EXEC   (1 << 0)
615 #define DRM_VMW_FENCE_FLAG_QUERY  (1 << 1)
616 
617 #define DRM_VMW_WAIT_OPTION_UNREF (1 << 0)
618 
619 /**
620  * struct drm_vmw_fence_wait_arg
621  *
622  * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
623  * @cookie_valid: Must be reset to 0 on first call. Left alone on restart.
624  * @kernel_cookie: Set to 0 on first call. Left alone on restart.
625  * @timeout_us: Wait timeout in microseconds. 0 for indefinite timeout.
626  * @lazy: Set to 1 if timing is not critical. Allow more than a kernel tick
627  * before returning.
628  * @flags: Fence flags to wait on.
629  * @wait_options: Options that control the behaviour of the wait ioctl.
630  *
631  * Input argument to the DRM_VMW_FENCE_WAIT ioctl.
632  */
633 
634 struct drm_vmw_fence_wait_arg {
635 	__u32 handle;
636 	__s32  cookie_valid;
637 	__u64 kernel_cookie;
638 	__u64 timeout_us;
639 	__s32 lazy;
640 	__s32 flags;
641 	__s32 wait_options;
642 	__s32 pad64;
643 };
644 
645 /*************************************************************************/
646 /**
647  * DRM_VMW_FENCE_SIGNALED
648  *
649  * Checks if a fence object is signaled..
650  */
651 
652 /**
653  * struct drm_vmw_fence_signaled_arg
654  *
655  * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
656  * @flags: Fence object flags input to DRM_VMW_FENCE_SIGNALED ioctl
657  * @signaled: Out: Flags signaled.
658  * @sequence: Out: Highest sequence passed so far. Can be used to signal the
659  * EXEC flag of user-space fence objects.
660  *
661  * Input/Output argument to the DRM_VMW_FENCE_SIGNALED and DRM_VMW_FENCE_UNREF
662  * ioctls.
663  */
664 
665 struct drm_vmw_fence_signaled_arg {
666 	 __u32 handle;
667 	 __u32 flags;
668 	 __s32 signaled;
669 	 __u32 passed_seqno;
670 	 __u32 signaled_flags;
671 	 __u32 pad64;
672 };
673 
674 /*************************************************************************/
675 /**
676  * DRM_VMW_FENCE_UNREF
677  *
678  * Unreferences a fence object, and causes it to be destroyed if there are no
679  * other references to it.
680  *
681  */
682 
683 /**
684  * struct drm_vmw_fence_arg
685  *
686  * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
687  *
688  * Input/Output argument to the DRM_VMW_FENCE_UNREF ioctl..
689  */
690 
691 struct drm_vmw_fence_arg {
692 	 __u32 handle;
693 	 __u32 pad64;
694 };
695 
696 
697 /*************************************************************************/
698 /**
699  * DRM_VMW_FENCE_EVENT
700  *
701  * Queues an event on a fence to be delivered on the drm character device
702  * when the fence has signaled the DRM_VMW_FENCE_FLAG_EXEC flag.
703  * Optionally the approximate time when the fence signaled is
704  * given by the event.
705  */
706 
707 /*
708  * The event type
709  */
710 #define DRM_VMW_EVENT_FENCE_SIGNALED 0x80000000
711 
712 struct drm_vmw_event_fence {
713 	struct drm_event base;
714 	__u64 user_data;
715 	__u32 tv_sec;
716 	__u32 tv_usec;
717 };
718 
719 /*
720  * Flags that may be given to the command.
721  */
722 /* Request fence signaled time on the event. */
723 #define DRM_VMW_FE_FLAG_REQ_TIME (1 << 0)
724 
725 /**
726  * struct drm_vmw_fence_event_arg
727  *
728  * @fence_rep: Pointer to fence_rep structure cast to __u64 or 0 if
729  * the fence is not supposed to be referenced by user-space.
730  * @user_info: Info to be delivered with the event.
731  * @handle: Attach the event to this fence only.
732  * @flags: A set of flags as defined above.
733  */
734 struct drm_vmw_fence_event_arg {
735 	__u64 fence_rep;
736 	__u64 user_data;
737 	__u32 handle;
738 	__u32 flags;
739 };
740 
741 
742 /*************************************************************************/
743 /**
744  * DRM_VMW_PRESENT
745  *
746  * Executes an SVGA present on a given fb for a given surface. The surface
747  * is placed on the framebuffer. Cliprects are given relative to the given
748  * point (the point disignated by dest_{x|y}).
749  *
750  */
751 
752 /**
753  * struct drm_vmw_present_arg
754  * @fb_id: framebuffer id to present / read back from.
755  * @sid: Surface id to present from.
756  * @dest_x: X placement coordinate for surface.
757  * @dest_y: Y placement coordinate for surface.
758  * @clips_ptr: Pointer to an array of clip rects cast to an __u64.
759  * @num_clips: Number of cliprects given relative to the framebuffer origin,
760  * in the same coordinate space as the frame buffer.
761  * @pad64: Unused 64-bit padding.
762  *
763  * Input argument to the DRM_VMW_PRESENT ioctl.
764  */
765 
766 struct drm_vmw_present_arg {
767 	__u32 fb_id;
768 	__u32 sid;
769 	__s32 dest_x;
770 	__s32 dest_y;
771 	__u64 clips_ptr;
772 	__u32 num_clips;
773 	__u32 pad64;
774 };
775 
776 
777 /*************************************************************************/
778 /**
779  * DRM_VMW_PRESENT_READBACK
780  *
781  * Executes an SVGA present readback from a given fb to the dma buffer
782  * currently bound as the fb. If there is no dma buffer bound to the fb,
783  * an error will be returned.
784  *
785  */
786 
787 /**
788  * struct drm_vmw_present_arg
789  * @fb_id: fb_id to present / read back from.
790  * @num_clips: Number of cliprects.
791  * @clips_ptr: Pointer to an array of clip rects cast to an __u64.
792  * @fence_rep: Pointer to a struct drm_vmw_fence_rep, cast to an __u64.
793  * If this member is NULL, then the ioctl should not return a fence.
794  */
795 
796 struct drm_vmw_present_readback_arg {
797 	 __u32 fb_id;
798 	 __u32 num_clips;
799 	 __u64 clips_ptr;
800 	 __u64 fence_rep;
801 };
802 
803 /*************************************************************************/
804 /**
805  * DRM_VMW_UPDATE_LAYOUT - Update layout
806  *
807  * Updates the preferred modes and connection status for connectors. The
808  * command consists of one drm_vmw_update_layout_arg pointing to an array
809  * of num_outputs drm_vmw_rect's.
810  */
811 
812 /**
813  * struct drm_vmw_update_layout_arg
814  *
815  * @num_outputs: number of active connectors
816  * @rects: pointer to array of drm_vmw_rect cast to an __u64
817  *
818  * Input argument to the DRM_VMW_UPDATE_LAYOUT Ioctl.
819  */
820 struct drm_vmw_update_layout_arg {
821 	__u32 num_outputs;
822 	__u32 pad64;
823 	__u64 rects;
824 };
825 
826 
827 /*************************************************************************/
828 /**
829  * DRM_VMW_CREATE_SHADER - Create shader
830  *
831  * Creates a shader and optionally binds it to a dma buffer containing
832  * the shader byte-code.
833  */
834 
835 /**
836  * enum drm_vmw_shader_type - Shader types
837  */
838 enum drm_vmw_shader_type {
839 	drm_vmw_shader_type_vs = 0,
840 	drm_vmw_shader_type_ps,
841 };
842 
843 
844 /**
845  * struct drm_vmw_shader_create_arg
846  *
847  * @shader_type: Shader type of the shader to create.
848  * @size: Size of the byte-code in bytes.
849  * where the shader byte-code starts
850  * @buffer_handle: Buffer handle identifying the buffer containing the
851  * shader byte-code
852  * @shader_handle: On successful completion contains a handle that
853  * can be used to subsequently identify the shader.
854  * @offset: Offset in bytes into the buffer given by @buffer_handle,
855  *
856  * Input / Output argument to the DRM_VMW_CREATE_SHADER Ioctl.
857  */
858 struct drm_vmw_shader_create_arg {
859 	enum drm_vmw_shader_type shader_type;
860 	__u32 size;
861 	__u32 buffer_handle;
862 	__u32 shader_handle;
863 	__u64 offset;
864 };
865 
866 /*************************************************************************/
867 /**
868  * DRM_VMW_UNREF_SHADER - Unreferences a shader
869  *
870  * Destroys a user-space reference to a shader, optionally destroying
871  * it.
872  */
873 
874 /**
875  * struct drm_vmw_shader_arg
876  *
877  * @handle: Handle identifying the shader to destroy.
878  *
879  * Input argument to the DRM_VMW_UNREF_SHADER ioctl.
880  */
881 struct drm_vmw_shader_arg {
882 	__u32 handle;
883 	__u32 pad64;
884 };
885 
886 /*************************************************************************/
887 /**
888  * DRM_VMW_GB_SURFACE_CREATE - Create a host guest-backed surface.
889  *
890  * Allocates a surface handle and queues a create surface command
891  * for the host on the first use of the surface. The surface ID can
892  * be used as the surface ID in commands referencing the surface.
893  */
894 
895 /**
896  * enum drm_vmw_surface_flags
897  *
898  * @drm_vmw_surface_flag_shareable:     Whether the surface is shareable
899  * @drm_vmw_surface_flag_scanout:       Whether the surface is a scanout
900  *                                      surface.
901  * @drm_vmw_surface_flag_create_buffer: Create a backup buffer if none is
902  *                                      given.
903  */
904 enum drm_vmw_surface_flags {
905 	drm_vmw_surface_flag_shareable = (1 << 0),
906 	drm_vmw_surface_flag_scanout = (1 << 1),
907 	drm_vmw_surface_flag_create_buffer = (1 << 2)
908 };
909 
910 /**
911  * struct drm_vmw_gb_surface_create_req
912  *
913  * @svga3d_flags:     SVGA3d surface flags for the device.
914  * @format:           SVGA3d format.
915  * @mip_level:        Number of mip levels for all faces.
916  * @drm_surface_flags Flags as described above.
917  * @multisample_count Future use. Set to 0.
918  * @autogen_filter    Future use. Set to 0.
919  * @buffer_handle     Buffer handle of backup buffer. SVGA3D_INVALID_ID
920  *                    if none.
921  * @base_size         Size of the base mip level for all faces.
922  * @array_size        Must be zero for non-DX hardware, and if non-zero
923  *                    svga3d_flags must have proper bind flags setup.
924  *
925  * Input argument to the  DRM_VMW_GB_SURFACE_CREATE Ioctl.
926  * Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl.
927  */
928 struct drm_vmw_gb_surface_create_req {
929 	__u32 svga3d_flags;
930 	__u32 format;
931 	__u32 mip_levels;
932 	enum drm_vmw_surface_flags drm_surface_flags;
933 	__u32 multisample_count;
934 	__u32 autogen_filter;
935 	__u32 buffer_handle;
936 	__u32 array_size;
937 	struct drm_vmw_size base_size;
938 };
939 
940 /**
941  * struct drm_vmw_gb_surface_create_rep
942  *
943  * @handle:            Surface handle.
944  * @backup_size:       Size of backup buffers for this surface.
945  * @buffer_handle:     Handle of backup buffer. SVGA3D_INVALID_ID if none.
946  * @buffer_size:       Actual size of the buffer identified by
947  *                     @buffer_handle
948  * @buffer_map_handle: Offset into device address space for the buffer
949  *                     identified by @buffer_handle.
950  *
951  * Part of output argument for the DRM_VMW_GB_SURFACE_REF ioctl.
952  * Output argument for the DRM_VMW_GB_SURFACE_CREATE ioctl.
953  */
954 struct drm_vmw_gb_surface_create_rep {
955 	__u32 handle;
956 	__u32 backup_size;
957 	__u32 buffer_handle;
958 	__u32 buffer_size;
959 	__u64 buffer_map_handle;
960 };
961 
962 /**
963  * union drm_vmw_gb_surface_create_arg
964  *
965  * @req: Input argument as described above.
966  * @rep: Output argument as described above.
967  *
968  * Argument to the DRM_VMW_GB_SURFACE_CREATE ioctl.
969  */
970 union drm_vmw_gb_surface_create_arg {
971 	struct drm_vmw_gb_surface_create_rep rep;
972 	struct drm_vmw_gb_surface_create_req req;
973 };
974 
975 /*************************************************************************/
976 /**
977  * DRM_VMW_GB_SURFACE_REF - Reference a host surface.
978  *
979  * Puts a reference on a host surface with a given handle, as previously
980  * returned by the DRM_VMW_GB_SURFACE_CREATE ioctl.
981  * A reference will make sure the surface isn't destroyed while we hold
982  * it and will allow the calling client to use the surface handle in
983  * the command stream.
984  *
985  * On successful return, the Ioctl returns the surface information given
986  * to and returned from the DRM_VMW_GB_SURFACE_CREATE ioctl.
987  */
988 
989 /**
990  * struct drm_vmw_gb_surface_reference_arg
991  *
992  * @creq: The data used as input when the surface was created, as described
993  *        above at "struct drm_vmw_gb_surface_create_req"
994  * @crep: Additional data output when the surface was created, as described
995  *        above at "struct drm_vmw_gb_surface_create_rep"
996  *
997  * Output Argument to the DRM_VMW_GB_SURFACE_REF ioctl.
998  */
999 struct drm_vmw_gb_surface_ref_rep {
1000 	struct drm_vmw_gb_surface_create_req creq;
1001 	struct drm_vmw_gb_surface_create_rep crep;
1002 };
1003 
1004 /**
1005  * union drm_vmw_gb_surface_reference_arg
1006  *
1007  * @req: Input data as described above at "struct drm_vmw_surface_arg"
1008  * @rep: Output data as described above at "struct drm_vmw_gb_surface_ref_rep"
1009  *
1010  * Argument to the DRM_VMW_GB_SURFACE_REF Ioctl.
1011  */
1012 union drm_vmw_gb_surface_reference_arg {
1013 	struct drm_vmw_gb_surface_ref_rep rep;
1014 	struct drm_vmw_surface_arg req;
1015 };
1016 
1017 
1018 /*************************************************************************/
1019 /**
1020  * DRM_VMW_SYNCCPU - Sync a DMA buffer / MOB for CPU access.
1021  *
1022  * Idles any previously submitted GPU operations on the buffer and
1023  * by default blocks command submissions that reference the buffer.
1024  * If the file descriptor used to grab a blocking CPU sync is closed, the
1025  * cpu sync is released.
1026  * The flags argument indicates how the grab / release operation should be
1027  * performed:
1028  */
1029 
1030 /**
1031  * enum drm_vmw_synccpu_flags - Synccpu flags:
1032  *
1033  * @drm_vmw_synccpu_read: Sync for read. If sync is done for read only, it's a
1034  * hint to the kernel to allow command submissions that references the buffer
1035  * for read-only.
1036  * @drm_vmw_synccpu_write: Sync for write. Block all command submissions
1037  * referencing this buffer.
1038  * @drm_vmw_synccpu_dontblock: Dont wait for GPU idle, but rather return
1039  * -EBUSY should the buffer be busy.
1040  * @drm_vmw_synccpu_allow_cs: Allow command submission that touches the buffer
1041  * while the buffer is synced for CPU. This is similar to the GEM bo idle
1042  * behavior.
1043  */
1044 enum drm_vmw_synccpu_flags {
1045 	drm_vmw_synccpu_read = (1 << 0),
1046 	drm_vmw_synccpu_write = (1 << 1),
1047 	drm_vmw_synccpu_dontblock = (1 << 2),
1048 	drm_vmw_synccpu_allow_cs = (1 << 3)
1049 };
1050 
1051 /**
1052  * enum drm_vmw_synccpu_op - Synccpu operations:
1053  *
1054  * @drm_vmw_synccpu_grab:    Grab the buffer for CPU operations
1055  * @drm_vmw_synccpu_release: Release a previous grab.
1056  */
1057 enum drm_vmw_synccpu_op {
1058 	drm_vmw_synccpu_grab,
1059 	drm_vmw_synccpu_release
1060 };
1061 
1062 /**
1063  * struct drm_vmw_synccpu_arg
1064  *
1065  * @op:			     The synccpu operation as described above.
1066  * @handle:		     Handle identifying the buffer object.
1067  * @flags:		     Flags as described above.
1068  */
1069 struct drm_vmw_synccpu_arg {
1070 	enum drm_vmw_synccpu_op op;
1071 	enum drm_vmw_synccpu_flags flags;
1072 	__u32 handle;
1073 	__u32 pad64;
1074 };
1075 
1076 /*************************************************************************/
1077 /**
1078  * DRM_VMW_CREATE_EXTENDED_CONTEXT - Create a host context.
1079  *
1080  * Allocates a device unique context id, and queues a create context command
1081  * for the host. Does not wait for host completion.
1082  */
1083 enum drm_vmw_extended_context {
1084 	drm_vmw_context_legacy,
1085 	drm_vmw_context_dx
1086 };
1087 
1088 /**
1089  * union drm_vmw_extended_context_arg
1090  *
1091  * @req: Context type.
1092  * @rep: Context identifier.
1093  *
1094  * Argument to the DRM_VMW_CREATE_EXTENDED_CONTEXT Ioctl.
1095  */
1096 union drm_vmw_extended_context_arg {
1097 	enum drm_vmw_extended_context req;
1098 	struct drm_vmw_context_arg rep;
1099 };
1100 
1101 /*************************************************************************/
1102 /*
1103  * DRM_VMW_HANDLE_CLOSE - Close a user-space handle and release its
1104  * underlying resource.
1105  *
1106  * Note that this ioctl is overlaid on the DRM_VMW_UNREF_DMABUF Ioctl.
1107  * The ioctl arguments therefore need to be identical in layout.
1108  *
1109  */
1110 
1111 /**
1112  * struct drm_vmw_handle_close_arg
1113  *
1114  * @handle: Handle to close.
1115  *
1116  * Argument to the DRM_VMW_HANDLE_CLOSE Ioctl.
1117  */
1118 struct drm_vmw_handle_close_arg {
1119 	__u32 handle;
1120 	__u32 pad64;
1121 };
1122 
1123 
1124 #if defined(__cplusplus)
1125 }
1126 #endif
1127 
1128 #endif
1129