1 /************************************************************************** 2 * 3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 #ifndef __VMWGFX_DRM_H__ 29 #define __VMWGFX_DRM_H__ 30 31 #include "drm.h" 32 33 #if defined(__cplusplus) 34 extern "C" { 35 #endif 36 37 #define DRM_VMW_MAX_SURFACE_FACES 6 38 #define DRM_VMW_MAX_MIP_LEVELS 24 39 40 41 #define DRM_VMW_GET_PARAM 0 42 #define DRM_VMW_ALLOC_DMABUF 1 43 #define DRM_VMW_ALLOC_BO 1 44 #define DRM_VMW_UNREF_DMABUF 2 45 #define DRM_VMW_HANDLE_CLOSE 2 46 #define DRM_VMW_CURSOR_BYPASS 3 47 /* guarded by DRM_VMW_PARAM_NUM_STREAMS != 0*/ 48 #define DRM_VMW_CONTROL_STREAM 4 49 #define DRM_VMW_CLAIM_STREAM 5 50 #define DRM_VMW_UNREF_STREAM 6 51 /* guarded by DRM_VMW_PARAM_3D == 1 */ 52 #define DRM_VMW_CREATE_CONTEXT 7 53 #define DRM_VMW_UNREF_CONTEXT 8 54 #define DRM_VMW_CREATE_SURFACE 9 55 #define DRM_VMW_UNREF_SURFACE 10 56 #define DRM_VMW_REF_SURFACE 11 57 #define DRM_VMW_EXECBUF 12 58 #define DRM_VMW_GET_3D_CAP 13 59 #define DRM_VMW_FENCE_WAIT 14 60 #define DRM_VMW_FENCE_SIGNALED 15 61 #define DRM_VMW_FENCE_UNREF 16 62 #define DRM_VMW_FENCE_EVENT 17 63 #define DRM_VMW_PRESENT 18 64 #define DRM_VMW_PRESENT_READBACK 19 65 #define DRM_VMW_UPDATE_LAYOUT 20 66 #define DRM_VMW_CREATE_SHADER 21 67 #define DRM_VMW_UNREF_SHADER 22 68 #define DRM_VMW_GB_SURFACE_CREATE 23 69 #define DRM_VMW_GB_SURFACE_REF 24 70 #define DRM_VMW_SYNCCPU 25 71 #define DRM_VMW_CREATE_EXTENDED_CONTEXT 26 72 73 /*************************************************************************/ 74 /** 75 * DRM_VMW_GET_PARAM - get device information. 76 * 77 * DRM_VMW_PARAM_FIFO_OFFSET: 78 * Offset to use to map the first page of the FIFO read-only. 79 * The fifo is mapped using the mmap() system call on the drm device. 80 * 81 * DRM_VMW_PARAM_OVERLAY_IOCTL: 82 * Does the driver support the overlay ioctl. 83 */ 84 85 #define DRM_VMW_PARAM_NUM_STREAMS 0 86 #define DRM_VMW_PARAM_NUM_FREE_STREAMS 1 87 #define DRM_VMW_PARAM_3D 2 88 #define DRM_VMW_PARAM_HW_CAPS 3 89 #define DRM_VMW_PARAM_FIFO_CAPS 4 90 #define DRM_VMW_PARAM_MAX_FB_SIZE 5 91 #define DRM_VMW_PARAM_FIFO_HW_VERSION 6 92 #define DRM_VMW_PARAM_MAX_SURF_MEMORY 7 93 #define DRM_VMW_PARAM_3D_CAPS_SIZE 8 94 #define DRM_VMW_PARAM_MAX_MOB_MEMORY 9 95 #define DRM_VMW_PARAM_MAX_MOB_SIZE 10 96 #define DRM_VMW_PARAM_SCREEN_TARGET 11 97 #define DRM_VMW_PARAM_DX 12 98 #define DRM_VMW_PARAM_HW_CAPS2 13 99 100 /** 101 * enum drm_vmw_handle_type - handle type for ref ioctls 102 * 103 */ 104 enum drm_vmw_handle_type { 105 DRM_VMW_HANDLE_LEGACY = 0, 106 DRM_VMW_HANDLE_PRIME = 1 107 }; 108 109 /** 110 * struct drm_vmw_getparam_arg 111 * 112 * @value: Returned value. //Out 113 * @param: Parameter to query. //In. 114 * 115 * Argument to the DRM_VMW_GET_PARAM Ioctl. 116 */ 117 118 struct drm_vmw_getparam_arg { 119 __u64 value; 120 __u32 param; 121 __u32 pad64; 122 }; 123 124 /*************************************************************************/ 125 /** 126 * DRM_VMW_CREATE_CONTEXT - Create a host context. 127 * 128 * Allocates a device unique context id, and queues a create context command 129 * for the host. Does not wait for host completion. 130 */ 131 132 /** 133 * struct drm_vmw_context_arg 134 * 135 * @cid: Device unique context ID. 136 * 137 * Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl. 138 * Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl. 139 */ 140 141 struct drm_vmw_context_arg { 142 __s32 cid; 143 __u32 pad64; 144 }; 145 146 /*************************************************************************/ 147 /** 148 * DRM_VMW_UNREF_CONTEXT - Create a host context. 149 * 150 * Frees a global context id, and queues a destroy host command for the host. 151 * Does not wait for host completion. The context ID can be used directly 152 * in the command stream and shows up as the same context ID on the host. 153 */ 154 155 /*************************************************************************/ 156 /** 157 * DRM_VMW_CREATE_SURFACE - Create a host suface. 158 * 159 * Allocates a device unique surface id, and queues a create surface command 160 * for the host. Does not wait for host completion. The surface ID can be 161 * used directly in the command stream and shows up as the same surface 162 * ID on the host. 163 */ 164 165 /** 166 * struct drm_wmv_surface_create_req 167 * 168 * @flags: Surface flags as understood by the host. 169 * @format: Surface format as understood by the host. 170 * @mip_levels: Number of mip levels for each face. 171 * An unused face should have 0 encoded. 172 * @size_addr: Address of a user-space array of sruct drm_vmw_size 173 * cast to an __u64 for 32-64 bit compatibility. 174 * The size of the array should equal the total number of mipmap levels. 175 * @shareable: Boolean whether other clients (as identified by file descriptors) 176 * may reference this surface. 177 * @scanout: Boolean whether the surface is intended to be used as a 178 * scanout. 179 * 180 * Input data to the DRM_VMW_CREATE_SURFACE Ioctl. 181 * Output data from the DRM_VMW_REF_SURFACE Ioctl. 182 */ 183 184 struct drm_vmw_surface_create_req { 185 __u32 flags; 186 __u32 format; 187 __u32 mip_levels[DRM_VMW_MAX_SURFACE_FACES]; 188 __u64 size_addr; 189 __s32 shareable; 190 __s32 scanout; 191 }; 192 193 /** 194 * struct drm_wmv_surface_arg 195 * 196 * @sid: Surface id of created surface or surface to destroy or reference. 197 * @handle_type: Handle type for DRM_VMW_REF_SURFACE Ioctl. 198 * 199 * Output data from the DRM_VMW_CREATE_SURFACE Ioctl. 200 * Input argument to the DRM_VMW_UNREF_SURFACE Ioctl. 201 * Input argument to the DRM_VMW_REF_SURFACE Ioctl. 202 */ 203 204 struct drm_vmw_surface_arg { 205 __s32 sid; 206 enum drm_vmw_handle_type handle_type; 207 }; 208 209 /** 210 * struct drm_vmw_size ioctl. 211 * 212 * @width - mip level width 213 * @height - mip level height 214 * @depth - mip level depth 215 * 216 * Description of a mip level. 217 * Input data to the DRM_WMW_CREATE_SURFACE Ioctl. 218 */ 219 220 struct drm_vmw_size { 221 __u32 width; 222 __u32 height; 223 __u32 depth; 224 __u32 pad64; 225 }; 226 227 /** 228 * union drm_vmw_surface_create_arg 229 * 230 * @rep: Output data as described above. 231 * @req: Input data as described above. 232 * 233 * Argument to the DRM_VMW_CREATE_SURFACE Ioctl. 234 */ 235 236 union drm_vmw_surface_create_arg { 237 struct drm_vmw_surface_arg rep; 238 struct drm_vmw_surface_create_req req; 239 }; 240 241 /*************************************************************************/ 242 /** 243 * DRM_VMW_REF_SURFACE - Reference a host surface. 244 * 245 * Puts a reference on a host surface with a give sid, as previously 246 * returned by the DRM_VMW_CREATE_SURFACE ioctl. 247 * A reference will make sure the surface isn't destroyed while we hold 248 * it and will allow the calling client to use the surface ID in the command 249 * stream. 250 * 251 * On successful return, the Ioctl returns the surface information given 252 * in the DRM_VMW_CREATE_SURFACE ioctl. 253 */ 254 255 /** 256 * union drm_vmw_surface_reference_arg 257 * 258 * @rep: Output data as described above. 259 * @req: Input data as described above. 260 * 261 * Argument to the DRM_VMW_REF_SURFACE Ioctl. 262 */ 263 264 union drm_vmw_surface_reference_arg { 265 struct drm_vmw_surface_create_req rep; 266 struct drm_vmw_surface_arg req; 267 }; 268 269 /*************************************************************************/ 270 /** 271 * DRM_VMW_UNREF_SURFACE - Unreference a host surface. 272 * 273 * Clear a reference previously put on a host surface. 274 * When all references are gone, including the one implicitly placed 275 * on creation, 276 * a destroy surface command will be queued for the host. 277 * Does not wait for completion. 278 */ 279 280 /*************************************************************************/ 281 /** 282 * DRM_VMW_EXECBUF 283 * 284 * Submit a command buffer for execution on the host, and return a 285 * fence seqno that when signaled, indicates that the command buffer has 286 * executed. 287 */ 288 289 /** 290 * struct drm_vmw_execbuf_arg 291 * 292 * @commands: User-space address of a command buffer cast to an __u64. 293 * @command-size: Size in bytes of the command buffer. 294 * @throttle-us: Sleep until software is less than @throttle_us 295 * microseconds ahead of hardware. The driver may round this value 296 * to the nearest kernel tick. 297 * @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an 298 * __u64. 299 * @version: Allows expanding the execbuf ioctl parameters without breaking 300 * backwards compatibility, since user-space will always tell the kernel 301 * which version it uses. 302 * @flags: Execbuf flags. 303 * @imported_fence_fd: FD for a fence imported from another device 304 * 305 * Argument to the DRM_VMW_EXECBUF Ioctl. 306 */ 307 308 #define DRM_VMW_EXECBUF_VERSION 2 309 310 #define DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD (1 << 0) 311 #define DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD (1 << 1) 312 313 struct drm_vmw_execbuf_arg { 314 __u64 commands; 315 __u32 command_size; 316 __u32 throttle_us; 317 __u64 fence_rep; 318 __u32 version; 319 __u32 flags; 320 __u32 context_handle; 321 __s32 imported_fence_fd; 322 }; 323 324 /** 325 * struct drm_vmw_fence_rep 326 * 327 * @handle: Fence object handle for fence associated with a command submission. 328 * @mask: Fence flags relevant for this fence object. 329 * @seqno: Fence sequence number in fifo. A fence object with a lower 330 * seqno will signal the EXEC flag before a fence object with a higher 331 * seqno. This can be used by user-space to avoid kernel calls to determine 332 * whether a fence has signaled the EXEC flag. Note that @seqno will 333 * wrap at 32-bit. 334 * @passed_seqno: The highest seqno number processed by the hardware 335 * so far. This can be used to mark user-space fence objects as signaled, and 336 * to determine whether a fence seqno might be stale. 337 * @fd: FD associated with the fence, -1 if not exported 338 * @error: This member should've been set to -EFAULT on submission. 339 * The following actions should be take on completion: 340 * error == -EFAULT: Fence communication failed. The host is synchronized. 341 * Use the last fence id read from the FIFO fence register. 342 * error != 0 && error != -EFAULT: 343 * Fence submission failed. The host is synchronized. Use the fence_seq member. 344 * error == 0: All is OK, The host may not be synchronized. 345 * Use the fence_seq member. 346 * 347 * Input / Output data to the DRM_VMW_EXECBUF Ioctl. 348 */ 349 350 struct drm_vmw_fence_rep { 351 __u32 handle; 352 __u32 mask; 353 __u32 seqno; 354 __u32 passed_seqno; 355 __s32 fd; 356 __s32 error; 357 }; 358 359 /*************************************************************************/ 360 /** 361 * DRM_VMW_ALLOC_BO 362 * 363 * Allocate a buffer object that is visible also to the host. 364 * NOTE: The buffer is 365 * identified by a handle and an offset, which are private to the guest, but 366 * useable in the command stream. The guest kernel may translate these 367 * and patch up the command stream accordingly. In the future, the offset may 368 * be zero at all times, or it may disappear from the interface before it is 369 * fixed. 370 * 371 * The buffer object may stay user-space mapped in the guest at all times, 372 * and is thus suitable for sub-allocation. 373 * 374 * Buffer objects are mapped using the mmap() syscall on the drm device. 375 */ 376 377 /** 378 * struct drm_vmw_alloc_bo_req 379 * 380 * @size: Required minimum size of the buffer. 381 * 382 * Input data to the DRM_VMW_ALLOC_BO Ioctl. 383 */ 384 385 struct drm_vmw_alloc_bo_req { 386 __u32 size; 387 __u32 pad64; 388 }; 389 #define drm_vmw_alloc_dmabuf_req drm_vmw_alloc_bo_req 390 391 /** 392 * struct drm_vmw_bo_rep 393 * 394 * @map_handle: Offset to use in the mmap() call used to map the buffer. 395 * @handle: Handle unique to this buffer. Used for unreferencing. 396 * @cur_gmr_id: GMR id to use in the command stream when this buffer is 397 * referenced. See not above. 398 * @cur_gmr_offset: Offset to use in the command stream when this buffer is 399 * referenced. See note above. 400 * 401 * Output data from the DRM_VMW_ALLOC_BO Ioctl. 402 */ 403 404 struct drm_vmw_bo_rep { 405 __u64 map_handle; 406 __u32 handle; 407 __u32 cur_gmr_id; 408 __u32 cur_gmr_offset; 409 __u32 pad64; 410 }; 411 #define drm_vmw_dmabuf_rep drm_vmw_bo_rep 412 413 /** 414 * union drm_vmw_alloc_bo_arg 415 * 416 * @req: Input data as described above. 417 * @rep: Output data as described above. 418 * 419 * Argument to the DRM_VMW_ALLOC_BO Ioctl. 420 */ 421 422 union drm_vmw_alloc_bo_arg { 423 struct drm_vmw_alloc_bo_req req; 424 struct drm_vmw_bo_rep rep; 425 }; 426 #define drm_vmw_alloc_dmabuf_arg drm_vmw_alloc_bo_arg 427 428 /*************************************************************************/ 429 /** 430 * DRM_VMW_CONTROL_STREAM - Control overlays, aka streams. 431 * 432 * This IOCTL controls the overlay units of the svga device. 433 * The SVGA overlay units does not work like regular hardware units in 434 * that they do not automaticaly read back the contents of the given dma 435 * buffer. But instead only read back for each call to this ioctl, and 436 * at any point between this call being made and a following call that 437 * either changes the buffer or disables the stream. 438 */ 439 440 /** 441 * struct drm_vmw_rect 442 * 443 * Defines a rectangle. Used in the overlay ioctl to define 444 * source and destination rectangle. 445 */ 446 447 struct drm_vmw_rect { 448 __s32 x; 449 __s32 y; 450 __u32 w; 451 __u32 h; 452 }; 453 454 /** 455 * struct drm_vmw_control_stream_arg 456 * 457 * @stream_id: Stearm to control 458 * @enabled: If false all following arguments are ignored. 459 * @handle: Handle to buffer for getting data from. 460 * @format: Format of the overlay as understood by the host. 461 * @width: Width of the overlay. 462 * @height: Height of the overlay. 463 * @size: Size of the overlay in bytes. 464 * @pitch: Array of pitches, the two last are only used for YUV12 formats. 465 * @offset: Offset from start of dma buffer to overlay. 466 * @src: Source rect, must be within the defined area above. 467 * @dst: Destination rect, x and y may be negative. 468 * 469 * Argument to the DRM_VMW_CONTROL_STREAM Ioctl. 470 */ 471 472 struct drm_vmw_control_stream_arg { 473 __u32 stream_id; 474 __u32 enabled; 475 476 __u32 flags; 477 __u32 color_key; 478 479 __u32 handle; 480 __u32 offset; 481 __s32 format; 482 __u32 size; 483 __u32 width; 484 __u32 height; 485 __u32 pitch[3]; 486 487 __u32 pad64; 488 struct drm_vmw_rect src; 489 struct drm_vmw_rect dst; 490 }; 491 492 /*************************************************************************/ 493 /** 494 * DRM_VMW_CURSOR_BYPASS - Give extra information about cursor bypass. 495 * 496 */ 497 498 #define DRM_VMW_CURSOR_BYPASS_ALL (1 << 0) 499 #define DRM_VMW_CURSOR_BYPASS_FLAGS (1) 500 501 /** 502 * struct drm_vmw_cursor_bypass_arg 503 * 504 * @flags: Flags. 505 * @crtc_id: Crtc id, only used if DMR_CURSOR_BYPASS_ALL isn't passed. 506 * @xpos: X position of cursor. 507 * @ypos: Y position of cursor. 508 * @xhot: X hotspot. 509 * @yhot: Y hotspot. 510 * 511 * Argument to the DRM_VMW_CURSOR_BYPASS Ioctl. 512 */ 513 514 struct drm_vmw_cursor_bypass_arg { 515 __u32 flags; 516 __u32 crtc_id; 517 __s32 xpos; 518 __s32 ypos; 519 __s32 xhot; 520 __s32 yhot; 521 }; 522 523 /*************************************************************************/ 524 /** 525 * DRM_VMW_CLAIM_STREAM - Claim a single stream. 526 */ 527 528 /** 529 * struct drm_vmw_context_arg 530 * 531 * @stream_id: Device unique context ID. 532 * 533 * Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl. 534 * Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl. 535 */ 536 537 struct drm_vmw_stream_arg { 538 __u32 stream_id; 539 __u32 pad64; 540 }; 541 542 /*************************************************************************/ 543 /** 544 * DRM_VMW_UNREF_STREAM - Unclaim a stream. 545 * 546 * Return a single stream that was claimed by this process. Also makes 547 * sure that the stream has been stopped. 548 */ 549 550 /*************************************************************************/ 551 /** 552 * DRM_VMW_GET_3D_CAP 553 * 554 * Read 3D capabilities from the FIFO 555 * 556 */ 557 558 /** 559 * struct drm_vmw_get_3d_cap_arg 560 * 561 * @buffer: Pointer to a buffer for capability data, cast to an __u64 562 * @size: Max size to copy 563 * 564 * Input argument to the DRM_VMW_GET_3D_CAP_IOCTL 565 * ioctls. 566 */ 567 568 struct drm_vmw_get_3d_cap_arg { 569 __u64 buffer; 570 __u32 max_size; 571 __u32 pad64; 572 }; 573 574 /*************************************************************************/ 575 /** 576 * DRM_VMW_FENCE_WAIT 577 * 578 * Waits for a fence object to signal. The wait is interruptible, so that 579 * signals may be delivered during the interrupt. The wait may timeout, 580 * in which case the calls returns -EBUSY. If the wait is restarted, 581 * that is restarting without resetting @cookie_valid to zero, 582 * the timeout is computed from the first call. 583 * 584 * The flags argument to the DRM_VMW_FENCE_WAIT ioctl indicates what to wait 585 * on: 586 * DRM_VMW_FENCE_FLAG_EXEC: All commands ahead of the fence in the command 587 * stream 588 * have executed. 589 * DRM_VMW_FENCE_FLAG_QUERY: All query results resulting from query finish 590 * commands 591 * in the buffer given to the EXECBUF ioctl returning the fence object handle 592 * are available to user-space. 593 * 594 * DRM_VMW_WAIT_OPTION_UNREF: If this wait option is given, and the 595 * fenc wait ioctl returns 0, the fence object has been unreferenced after 596 * the wait. 597 */ 598 599 #define DRM_VMW_FENCE_FLAG_EXEC (1 << 0) 600 #define DRM_VMW_FENCE_FLAG_QUERY (1 << 1) 601 602 #define DRM_VMW_WAIT_OPTION_UNREF (1 << 0) 603 604 /** 605 * struct drm_vmw_fence_wait_arg 606 * 607 * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl. 608 * @cookie_valid: Must be reset to 0 on first call. Left alone on restart. 609 * @kernel_cookie: Set to 0 on first call. Left alone on restart. 610 * @timeout_us: Wait timeout in microseconds. 0 for indefinite timeout. 611 * @lazy: Set to 1 if timing is not critical. Allow more than a kernel tick 612 * before returning. 613 * @flags: Fence flags to wait on. 614 * @wait_options: Options that control the behaviour of the wait ioctl. 615 * 616 * Input argument to the DRM_VMW_FENCE_WAIT ioctl. 617 */ 618 619 struct drm_vmw_fence_wait_arg { 620 __u32 handle; 621 __s32 cookie_valid; 622 __u64 kernel_cookie; 623 __u64 timeout_us; 624 __s32 lazy; 625 __s32 flags; 626 __s32 wait_options; 627 __s32 pad64; 628 }; 629 630 /*************************************************************************/ 631 /** 632 * DRM_VMW_FENCE_SIGNALED 633 * 634 * Checks if a fence object is signaled.. 635 */ 636 637 /** 638 * struct drm_vmw_fence_signaled_arg 639 * 640 * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl. 641 * @flags: Fence object flags input to DRM_VMW_FENCE_SIGNALED ioctl 642 * @signaled: Out: Flags signaled. 643 * @sequence: Out: Highest sequence passed so far. Can be used to signal the 644 * EXEC flag of user-space fence objects. 645 * 646 * Input/Output argument to the DRM_VMW_FENCE_SIGNALED and DRM_VMW_FENCE_UNREF 647 * ioctls. 648 */ 649 650 struct drm_vmw_fence_signaled_arg { 651 __u32 handle; 652 __u32 flags; 653 __s32 signaled; 654 __u32 passed_seqno; 655 __u32 signaled_flags; 656 __u32 pad64; 657 }; 658 659 /*************************************************************************/ 660 /** 661 * DRM_VMW_FENCE_UNREF 662 * 663 * Unreferences a fence object, and causes it to be destroyed if there are no 664 * other references to it. 665 * 666 */ 667 668 /** 669 * struct drm_vmw_fence_arg 670 * 671 * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl. 672 * 673 * Input/Output argument to the DRM_VMW_FENCE_UNREF ioctl.. 674 */ 675 676 struct drm_vmw_fence_arg { 677 __u32 handle; 678 __u32 pad64; 679 }; 680 681 682 /*************************************************************************/ 683 /** 684 * DRM_VMW_FENCE_EVENT 685 * 686 * Queues an event on a fence to be delivered on the drm character device 687 * when the fence has signaled the DRM_VMW_FENCE_FLAG_EXEC flag. 688 * Optionally the approximate time when the fence signaled is 689 * given by the event. 690 */ 691 692 /* 693 * The event type 694 */ 695 #define DRM_VMW_EVENT_FENCE_SIGNALED 0x80000000 696 697 struct drm_vmw_event_fence { 698 struct drm_event base; 699 __u64 user_data; 700 __u32 tv_sec; 701 __u32 tv_usec; 702 }; 703 704 /* 705 * Flags that may be given to the command. 706 */ 707 /* Request fence signaled time on the event. */ 708 #define DRM_VMW_FE_FLAG_REQ_TIME (1 << 0) 709 710 /** 711 * struct drm_vmw_fence_event_arg 712 * 713 * @fence_rep: Pointer to fence_rep structure cast to __u64 or 0 if 714 * the fence is not supposed to be referenced by user-space. 715 * @user_info: Info to be delivered with the event. 716 * @handle: Attach the event to this fence only. 717 * @flags: A set of flags as defined above. 718 */ 719 struct drm_vmw_fence_event_arg { 720 __u64 fence_rep; 721 __u64 user_data; 722 __u32 handle; 723 __u32 flags; 724 }; 725 726 727 /*************************************************************************/ 728 /** 729 * DRM_VMW_PRESENT 730 * 731 * Executes an SVGA present on a given fb for a given surface. The surface 732 * is placed on the framebuffer. Cliprects are given relative to the given 733 * point (the point disignated by dest_{x|y}). 734 * 735 */ 736 737 /** 738 * struct drm_vmw_present_arg 739 * @fb_id: framebuffer id to present / read back from. 740 * @sid: Surface id to present from. 741 * @dest_x: X placement coordinate for surface. 742 * @dest_y: Y placement coordinate for surface. 743 * @clips_ptr: Pointer to an array of clip rects cast to an __u64. 744 * @num_clips: Number of cliprects given relative to the framebuffer origin, 745 * in the same coordinate space as the frame buffer. 746 * @pad64: Unused 64-bit padding. 747 * 748 * Input argument to the DRM_VMW_PRESENT ioctl. 749 */ 750 751 struct drm_vmw_present_arg { 752 __u32 fb_id; 753 __u32 sid; 754 __s32 dest_x; 755 __s32 dest_y; 756 __u64 clips_ptr; 757 __u32 num_clips; 758 __u32 pad64; 759 }; 760 761 762 /*************************************************************************/ 763 /** 764 * DRM_VMW_PRESENT_READBACK 765 * 766 * Executes an SVGA present readback from a given fb to the dma buffer 767 * currently bound as the fb. If there is no dma buffer bound to the fb, 768 * an error will be returned. 769 * 770 */ 771 772 /** 773 * struct drm_vmw_present_arg 774 * @fb_id: fb_id to present / read back from. 775 * @num_clips: Number of cliprects. 776 * @clips_ptr: Pointer to an array of clip rects cast to an __u64. 777 * @fence_rep: Pointer to a struct drm_vmw_fence_rep, cast to an __u64. 778 * If this member is NULL, then the ioctl should not return a fence. 779 */ 780 781 struct drm_vmw_present_readback_arg { 782 __u32 fb_id; 783 __u32 num_clips; 784 __u64 clips_ptr; 785 __u64 fence_rep; 786 }; 787 788 /*************************************************************************/ 789 /** 790 * DRM_VMW_UPDATE_LAYOUT - Update layout 791 * 792 * Updates the preferred modes and connection status for connectors. The 793 * command consists of one drm_vmw_update_layout_arg pointing to an array 794 * of num_outputs drm_vmw_rect's. 795 */ 796 797 /** 798 * struct drm_vmw_update_layout_arg 799 * 800 * @num_outputs: number of active connectors 801 * @rects: pointer to array of drm_vmw_rect cast to an __u64 802 * 803 * Input argument to the DRM_VMW_UPDATE_LAYOUT Ioctl. 804 */ 805 struct drm_vmw_update_layout_arg { 806 __u32 num_outputs; 807 __u32 pad64; 808 __u64 rects; 809 }; 810 811 812 /*************************************************************************/ 813 /** 814 * DRM_VMW_CREATE_SHADER - Create shader 815 * 816 * Creates a shader and optionally binds it to a dma buffer containing 817 * the shader byte-code. 818 */ 819 820 /** 821 * enum drm_vmw_shader_type - Shader types 822 */ 823 enum drm_vmw_shader_type { 824 drm_vmw_shader_type_vs = 0, 825 drm_vmw_shader_type_ps, 826 }; 827 828 829 /** 830 * struct drm_vmw_shader_create_arg 831 * 832 * @shader_type: Shader type of the shader to create. 833 * @size: Size of the byte-code in bytes. 834 * where the shader byte-code starts 835 * @buffer_handle: Buffer handle identifying the buffer containing the 836 * shader byte-code 837 * @shader_handle: On successful completion contains a handle that 838 * can be used to subsequently identify the shader. 839 * @offset: Offset in bytes into the buffer given by @buffer_handle, 840 * 841 * Input / Output argument to the DRM_VMW_CREATE_SHADER Ioctl. 842 */ 843 struct drm_vmw_shader_create_arg { 844 enum drm_vmw_shader_type shader_type; 845 __u32 size; 846 __u32 buffer_handle; 847 __u32 shader_handle; 848 __u64 offset; 849 }; 850 851 /*************************************************************************/ 852 /** 853 * DRM_VMW_UNREF_SHADER - Unreferences a shader 854 * 855 * Destroys a user-space reference to a shader, optionally destroying 856 * it. 857 */ 858 859 /** 860 * struct drm_vmw_shader_arg 861 * 862 * @handle: Handle identifying the shader to destroy. 863 * 864 * Input argument to the DRM_VMW_UNREF_SHADER ioctl. 865 */ 866 struct drm_vmw_shader_arg { 867 __u32 handle; 868 __u32 pad64; 869 }; 870 871 /*************************************************************************/ 872 /** 873 * DRM_VMW_GB_SURFACE_CREATE - Create a host guest-backed surface. 874 * 875 * Allocates a surface handle and queues a create surface command 876 * for the host on the first use of the surface. The surface ID can 877 * be used as the surface ID in commands referencing the surface. 878 */ 879 880 /** 881 * enum drm_vmw_surface_flags 882 * 883 * @drm_vmw_surface_flag_shareable: Whether the surface is shareable 884 * @drm_vmw_surface_flag_scanout: Whether the surface is a scanout 885 * surface. 886 * @drm_vmw_surface_flag_create_buffer: Create a backup buffer if none is 887 * given. 888 */ 889 enum drm_vmw_surface_flags { 890 drm_vmw_surface_flag_shareable = (1 << 0), 891 drm_vmw_surface_flag_scanout = (1 << 1), 892 drm_vmw_surface_flag_create_buffer = (1 << 2) 893 }; 894 895 /** 896 * struct drm_vmw_gb_surface_create_req 897 * 898 * @svga3d_flags: SVGA3d surface flags for the device. 899 * @format: SVGA3d format. 900 * @mip_level: Number of mip levels for all faces. 901 * @drm_surface_flags Flags as described above. 902 * @multisample_count Future use. Set to 0. 903 * @autogen_filter Future use. Set to 0. 904 * @buffer_handle Buffer handle of backup buffer. SVGA3D_INVALID_ID 905 * if none. 906 * @base_size Size of the base mip level for all faces. 907 * @array_size Must be zero for non-DX hardware, and if non-zero 908 * svga3d_flags must have proper bind flags setup. 909 * 910 * Input argument to the DRM_VMW_GB_SURFACE_CREATE Ioctl. 911 * Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl. 912 */ 913 struct drm_vmw_gb_surface_create_req { 914 __u32 svga3d_flags; 915 __u32 format; 916 __u32 mip_levels; 917 enum drm_vmw_surface_flags drm_surface_flags; 918 __u32 multisample_count; 919 __u32 autogen_filter; 920 __u32 buffer_handle; 921 __u32 array_size; 922 struct drm_vmw_size base_size; 923 }; 924 925 /** 926 * struct drm_vmw_gb_surface_create_rep 927 * 928 * @handle: Surface handle. 929 * @backup_size: Size of backup buffers for this surface. 930 * @buffer_handle: Handle of backup buffer. SVGA3D_INVALID_ID if none. 931 * @buffer_size: Actual size of the buffer identified by 932 * @buffer_handle 933 * @buffer_map_handle: Offset into device address space for the buffer 934 * identified by @buffer_handle. 935 * 936 * Part of output argument for the DRM_VMW_GB_SURFACE_REF ioctl. 937 * Output argument for the DRM_VMW_GB_SURFACE_CREATE ioctl. 938 */ 939 struct drm_vmw_gb_surface_create_rep { 940 __u32 handle; 941 __u32 backup_size; 942 __u32 buffer_handle; 943 __u32 buffer_size; 944 __u64 buffer_map_handle; 945 }; 946 947 /** 948 * union drm_vmw_gb_surface_create_arg 949 * 950 * @req: Input argument as described above. 951 * @rep: Output argument as described above. 952 * 953 * Argument to the DRM_VMW_GB_SURFACE_CREATE ioctl. 954 */ 955 union drm_vmw_gb_surface_create_arg { 956 struct drm_vmw_gb_surface_create_rep rep; 957 struct drm_vmw_gb_surface_create_req req; 958 }; 959 960 /*************************************************************************/ 961 /** 962 * DRM_VMW_GB_SURFACE_REF - Reference a host surface. 963 * 964 * Puts a reference on a host surface with a given handle, as previously 965 * returned by the DRM_VMW_GB_SURFACE_CREATE ioctl. 966 * A reference will make sure the surface isn't destroyed while we hold 967 * it and will allow the calling client to use the surface handle in 968 * the command stream. 969 * 970 * On successful return, the Ioctl returns the surface information given 971 * to and returned from the DRM_VMW_GB_SURFACE_CREATE ioctl. 972 */ 973 974 /** 975 * struct drm_vmw_gb_surface_reference_arg 976 * 977 * @creq: The data used as input when the surface was created, as described 978 * above at "struct drm_vmw_gb_surface_create_req" 979 * @crep: Additional data output when the surface was created, as described 980 * above at "struct drm_vmw_gb_surface_create_rep" 981 * 982 * Output Argument to the DRM_VMW_GB_SURFACE_REF ioctl. 983 */ 984 struct drm_vmw_gb_surface_ref_rep { 985 struct drm_vmw_gb_surface_create_req creq; 986 struct drm_vmw_gb_surface_create_rep crep; 987 }; 988 989 /** 990 * union drm_vmw_gb_surface_reference_arg 991 * 992 * @req: Input data as described above at "struct drm_vmw_surface_arg" 993 * @rep: Output data as described above at "struct drm_vmw_gb_surface_ref_rep" 994 * 995 * Argument to the DRM_VMW_GB_SURFACE_REF Ioctl. 996 */ 997 union drm_vmw_gb_surface_reference_arg { 998 struct drm_vmw_gb_surface_ref_rep rep; 999 struct drm_vmw_surface_arg req; 1000 }; 1001 1002 1003 /*************************************************************************/ 1004 /** 1005 * DRM_VMW_SYNCCPU - Sync a DMA buffer / MOB for CPU access. 1006 * 1007 * Idles any previously submitted GPU operations on the buffer and 1008 * by default blocks command submissions that reference the buffer. 1009 * If the file descriptor used to grab a blocking CPU sync is closed, the 1010 * cpu sync is released. 1011 * The flags argument indicates how the grab / release operation should be 1012 * performed: 1013 */ 1014 1015 /** 1016 * enum drm_vmw_synccpu_flags - Synccpu flags: 1017 * 1018 * @drm_vmw_synccpu_read: Sync for read. If sync is done for read only, it's a 1019 * hint to the kernel to allow command submissions that references the buffer 1020 * for read-only. 1021 * @drm_vmw_synccpu_write: Sync for write. Block all command submissions 1022 * referencing this buffer. 1023 * @drm_vmw_synccpu_dontblock: Dont wait for GPU idle, but rather return 1024 * -EBUSY should the buffer be busy. 1025 * @drm_vmw_synccpu_allow_cs: Allow command submission that touches the buffer 1026 * while the buffer is synced for CPU. This is similar to the GEM bo idle 1027 * behavior. 1028 */ 1029 enum drm_vmw_synccpu_flags { 1030 drm_vmw_synccpu_read = (1 << 0), 1031 drm_vmw_synccpu_write = (1 << 1), 1032 drm_vmw_synccpu_dontblock = (1 << 2), 1033 drm_vmw_synccpu_allow_cs = (1 << 3) 1034 }; 1035 1036 /** 1037 * enum drm_vmw_synccpu_op - Synccpu operations: 1038 * 1039 * @drm_vmw_synccpu_grab: Grab the buffer for CPU operations 1040 * @drm_vmw_synccpu_release: Release a previous grab. 1041 */ 1042 enum drm_vmw_synccpu_op { 1043 drm_vmw_synccpu_grab, 1044 drm_vmw_synccpu_release 1045 }; 1046 1047 /** 1048 * struct drm_vmw_synccpu_arg 1049 * 1050 * @op: The synccpu operation as described above. 1051 * @handle: Handle identifying the buffer object. 1052 * @flags: Flags as described above. 1053 */ 1054 struct drm_vmw_synccpu_arg { 1055 enum drm_vmw_synccpu_op op; 1056 enum drm_vmw_synccpu_flags flags; 1057 __u32 handle; 1058 __u32 pad64; 1059 }; 1060 1061 /*************************************************************************/ 1062 /** 1063 * DRM_VMW_CREATE_EXTENDED_CONTEXT - Create a host context. 1064 * 1065 * Allocates a device unique context id, and queues a create context command 1066 * for the host. Does not wait for host completion. 1067 */ 1068 enum drm_vmw_extended_context { 1069 drm_vmw_context_legacy, 1070 drm_vmw_context_dx 1071 }; 1072 1073 /** 1074 * union drm_vmw_extended_context_arg 1075 * 1076 * @req: Context type. 1077 * @rep: Context identifier. 1078 * 1079 * Argument to the DRM_VMW_CREATE_EXTENDED_CONTEXT Ioctl. 1080 */ 1081 union drm_vmw_extended_context_arg { 1082 enum drm_vmw_extended_context req; 1083 struct drm_vmw_context_arg rep; 1084 }; 1085 1086 /*************************************************************************/ 1087 /* 1088 * DRM_VMW_HANDLE_CLOSE - Close a user-space handle and release its 1089 * underlying resource. 1090 * 1091 * Note that this ioctl is overlaid on the deprecated DRM_VMW_UNREF_DMABUF 1092 * Ioctl. 1093 */ 1094 1095 /** 1096 * struct drm_vmw_handle_close_arg 1097 * 1098 * @handle: Handle to close. 1099 * 1100 * Argument to the DRM_VMW_HANDLE_CLOSE Ioctl. 1101 */ 1102 struct drm_vmw_handle_close_arg { 1103 __u32 handle; 1104 __u32 pad64; 1105 }; 1106 #define drm_vmw_unref_dmabuf_arg drm_vmw_handle_close_arg 1107 1108 #if defined(__cplusplus) 1109 } 1110 #endif 1111 1112 #endif 1113