1 /************************************************************************** 2 * 3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 #ifndef __VMWGFX_DRM_H__ 29 #define __VMWGFX_DRM_H__ 30 31 #ifndef __KERNEL__ 32 #include <drm.h> 33 #endif 34 35 #define DRM_VMW_MAX_SURFACE_FACES 6 36 #define DRM_VMW_MAX_MIP_LEVELS 24 37 38 39 #define DRM_VMW_GET_PARAM 0 40 #define DRM_VMW_ALLOC_DMABUF 1 41 #define DRM_VMW_UNREF_DMABUF 2 42 #define DRM_VMW_CURSOR_BYPASS 3 43 /* guarded by DRM_VMW_PARAM_NUM_STREAMS != 0*/ 44 #define DRM_VMW_CONTROL_STREAM 4 45 #define DRM_VMW_CLAIM_STREAM 5 46 #define DRM_VMW_UNREF_STREAM 6 47 /* guarded by DRM_VMW_PARAM_3D == 1 */ 48 #define DRM_VMW_CREATE_CONTEXT 7 49 #define DRM_VMW_UNREF_CONTEXT 8 50 #define DRM_VMW_CREATE_SURFACE 9 51 #define DRM_VMW_UNREF_SURFACE 10 52 #define DRM_VMW_REF_SURFACE 11 53 #define DRM_VMW_EXECBUF 12 54 #define DRM_VMW_GET_3D_CAP 13 55 #define DRM_VMW_FENCE_WAIT 14 56 #define DRM_VMW_FENCE_SIGNALED 15 57 #define DRM_VMW_FENCE_UNREF 16 58 #define DRM_VMW_FENCE_EVENT 17 59 #define DRM_VMW_PRESENT 18 60 #define DRM_VMW_PRESENT_READBACK 19 61 #define DRM_VMW_UPDATE_LAYOUT 20 62 #define DRM_VMW_CREATE_SHADER 21 63 #define DRM_VMW_UNREF_SHADER 22 64 #define DRM_VMW_GB_SURFACE_CREATE 23 65 #define DRM_VMW_GB_SURFACE_REF 24 66 #define DRM_VMW_SYNCCPU 25 67 68 /*************************************************************************/ 69 /** 70 * DRM_VMW_GET_PARAM - get device information. 71 * 72 * DRM_VMW_PARAM_FIFO_OFFSET: 73 * Offset to use to map the first page of the FIFO read-only. 74 * The fifo is mapped using the mmap() system call on the drm device. 75 * 76 * DRM_VMW_PARAM_OVERLAY_IOCTL: 77 * Does the driver support the overlay ioctl. 78 */ 79 80 #define DRM_VMW_PARAM_NUM_STREAMS 0 81 #define DRM_VMW_PARAM_NUM_FREE_STREAMS 1 82 #define DRM_VMW_PARAM_3D 2 83 #define DRM_VMW_PARAM_HW_CAPS 3 84 #define DRM_VMW_PARAM_FIFO_CAPS 4 85 #define DRM_VMW_PARAM_MAX_FB_SIZE 5 86 #define DRM_VMW_PARAM_FIFO_HW_VERSION 6 87 #define DRM_VMW_PARAM_MAX_SURF_MEMORY 7 88 #define DRM_VMW_PARAM_3D_CAPS_SIZE 8 89 #define DRM_VMW_PARAM_MAX_MOB_MEMORY 9 90 #define DRM_VMW_PARAM_MAX_MOB_SIZE 10 91 92 /** 93 * struct drm_vmw_getparam_arg 94 * 95 * @value: Returned value. //Out 96 * @param: Parameter to query. //In. 97 * 98 * Argument to the DRM_VMW_GET_PARAM Ioctl. 99 */ 100 101 struct drm_vmw_getparam_arg { 102 uint64_t value; 103 uint32_t param; 104 uint32_t pad64; 105 }; 106 107 /*************************************************************************/ 108 /** 109 * DRM_VMW_CREATE_CONTEXT - Create a host context. 110 * 111 * Allocates a device unique context id, and queues a create context command 112 * for the host. Does not wait for host completion. 113 */ 114 115 /** 116 * struct drm_vmw_context_arg 117 * 118 * @cid: Device unique context ID. 119 * 120 * Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl. 121 * Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl. 122 */ 123 124 struct drm_vmw_context_arg { 125 int32_t cid; 126 uint32_t pad64; 127 }; 128 129 /*************************************************************************/ 130 /** 131 * DRM_VMW_UNREF_CONTEXT - Create a host context. 132 * 133 * Frees a global context id, and queues a destroy host command for the host. 134 * Does not wait for host completion. The context ID can be used directly 135 * in the command stream and shows up as the same context ID on the host. 136 */ 137 138 /*************************************************************************/ 139 /** 140 * DRM_VMW_CREATE_SURFACE - Create a host suface. 141 * 142 * Allocates a device unique surface id, and queues a create surface command 143 * for the host. Does not wait for host completion. The surface ID can be 144 * used directly in the command stream and shows up as the same surface 145 * ID on the host. 146 */ 147 148 /** 149 * struct drm_wmv_surface_create_req 150 * 151 * @flags: Surface flags as understood by the host. 152 * @format: Surface format as understood by the host. 153 * @mip_levels: Number of mip levels for each face. 154 * An unused face should have 0 encoded. 155 * @size_addr: Address of a user-space array of sruct drm_vmw_size 156 * cast to an uint64_t for 32-64 bit compatibility. 157 * The size of the array should equal the total number of mipmap levels. 158 * @shareable: Boolean whether other clients (as identified by file descriptors) 159 * may reference this surface. 160 * @scanout: Boolean whether the surface is intended to be used as a 161 * scanout. 162 * 163 * Input data to the DRM_VMW_CREATE_SURFACE Ioctl. 164 * Output data from the DRM_VMW_REF_SURFACE Ioctl. 165 */ 166 167 struct drm_vmw_surface_create_req { 168 uint32_t flags; 169 uint32_t format; 170 uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES]; 171 uint64_t size_addr; 172 int32_t shareable; 173 int32_t scanout; 174 }; 175 176 /** 177 * struct drm_wmv_surface_arg 178 * 179 * @sid: Surface id of created surface or surface to destroy or reference. 180 * 181 * Output data from the DRM_VMW_CREATE_SURFACE Ioctl. 182 * Input argument to the DRM_VMW_UNREF_SURFACE Ioctl. 183 * Input argument to the DRM_VMW_REF_SURFACE Ioctl. 184 */ 185 186 struct drm_vmw_surface_arg { 187 int32_t sid; 188 uint32_t pad64; 189 }; 190 191 /** 192 * struct drm_vmw_size ioctl. 193 * 194 * @width - mip level width 195 * @height - mip level height 196 * @depth - mip level depth 197 * 198 * Description of a mip level. 199 * Input data to the DRM_WMW_CREATE_SURFACE Ioctl. 200 */ 201 202 struct drm_vmw_size { 203 uint32_t width; 204 uint32_t height; 205 uint32_t depth; 206 uint32_t pad64; 207 }; 208 209 /** 210 * union drm_vmw_surface_create_arg 211 * 212 * @rep: Output data as described above. 213 * @req: Input data as described above. 214 * 215 * Argument to the DRM_VMW_CREATE_SURFACE Ioctl. 216 */ 217 218 union drm_vmw_surface_create_arg { 219 struct drm_vmw_surface_arg rep; 220 struct drm_vmw_surface_create_req req; 221 }; 222 223 /*************************************************************************/ 224 /** 225 * DRM_VMW_REF_SURFACE - Reference a host surface. 226 * 227 * Puts a reference on a host surface with a give sid, as previously 228 * returned by the DRM_VMW_CREATE_SURFACE ioctl. 229 * A reference will make sure the surface isn't destroyed while we hold 230 * it and will allow the calling client to use the surface ID in the command 231 * stream. 232 * 233 * On successful return, the Ioctl returns the surface information given 234 * in the DRM_VMW_CREATE_SURFACE ioctl. 235 */ 236 237 /** 238 * union drm_vmw_surface_reference_arg 239 * 240 * @rep: Output data as described above. 241 * @req: Input data as described above. 242 * 243 * Argument to the DRM_VMW_REF_SURFACE Ioctl. 244 */ 245 246 union drm_vmw_surface_reference_arg { 247 struct drm_vmw_surface_create_req rep; 248 struct drm_vmw_surface_arg req; 249 }; 250 251 /*************************************************************************/ 252 /** 253 * DRM_VMW_UNREF_SURFACE - Unreference a host surface. 254 * 255 * Clear a reference previously put on a host surface. 256 * When all references are gone, including the one implicitly placed 257 * on creation, 258 * a destroy surface command will be queued for the host. 259 * Does not wait for completion. 260 */ 261 262 /*************************************************************************/ 263 /** 264 * DRM_VMW_EXECBUF 265 * 266 * Submit a command buffer for execution on the host, and return a 267 * fence seqno that when signaled, indicates that the command buffer has 268 * executed. 269 */ 270 271 /** 272 * struct drm_vmw_execbuf_arg 273 * 274 * @commands: User-space address of a command buffer cast to an uint64_t. 275 * @command-size: Size in bytes of the command buffer. 276 * @throttle-us: Sleep until software is less than @throttle_us 277 * microseconds ahead of hardware. The driver may round this value 278 * to the nearest kernel tick. 279 * @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an 280 * uint64_t. 281 * @version: Allows expanding the execbuf ioctl parameters without breaking 282 * backwards compatibility, since user-space will always tell the kernel 283 * which version it uses. 284 * @flags: Execbuf flags. None currently. 285 * 286 * Argument to the DRM_VMW_EXECBUF Ioctl. 287 */ 288 289 #define DRM_VMW_EXECBUF_VERSION 1 290 291 struct drm_vmw_execbuf_arg { 292 uint64_t commands; 293 uint32_t command_size; 294 uint32_t throttle_us; 295 uint64_t fence_rep; 296 uint32_t version; 297 uint32_t flags; 298 }; 299 300 /** 301 * struct drm_vmw_fence_rep 302 * 303 * @handle: Fence object handle for fence associated with a command submission. 304 * @mask: Fence flags relevant for this fence object. 305 * @seqno: Fence sequence number in fifo. A fence object with a lower 306 * seqno will signal the EXEC flag before a fence object with a higher 307 * seqno. This can be used by user-space to avoid kernel calls to determine 308 * whether a fence has signaled the EXEC flag. Note that @seqno will 309 * wrap at 32-bit. 310 * @passed_seqno: The highest seqno number processed by the hardware 311 * so far. This can be used to mark user-space fence objects as signaled, and 312 * to determine whether a fence seqno might be stale. 313 * @error: This member should've been set to -EFAULT on submission. 314 * The following actions should be take on completion: 315 * error == -EFAULT: Fence communication failed. The host is synchronized. 316 * Use the last fence id read from the FIFO fence register. 317 * error != 0 && error != -EFAULT: 318 * Fence submission failed. The host is synchronized. Use the fence_seq member. 319 * error == 0: All is OK, The host may not be synchronized. 320 * Use the fence_seq member. 321 * 322 * Input / Output data to the DRM_VMW_EXECBUF Ioctl. 323 */ 324 325 struct drm_vmw_fence_rep { 326 uint32_t handle; 327 uint32_t mask; 328 uint32_t seqno; 329 uint32_t passed_seqno; 330 uint32_t pad64; 331 int32_t error; 332 }; 333 334 /*************************************************************************/ 335 /** 336 * DRM_VMW_ALLOC_DMABUF 337 * 338 * Allocate a DMA buffer that is visible also to the host. 339 * NOTE: The buffer is 340 * identified by a handle and an offset, which are private to the guest, but 341 * useable in the command stream. The guest kernel may translate these 342 * and patch up the command stream accordingly. In the future, the offset may 343 * be zero at all times, or it may disappear from the interface before it is 344 * fixed. 345 * 346 * The DMA buffer may stay user-space mapped in the guest at all times, 347 * and is thus suitable for sub-allocation. 348 * 349 * DMA buffers are mapped using the mmap() syscall on the drm device. 350 */ 351 352 /** 353 * struct drm_vmw_alloc_dmabuf_req 354 * 355 * @size: Required minimum size of the buffer. 356 * 357 * Input data to the DRM_VMW_ALLOC_DMABUF Ioctl. 358 */ 359 360 struct drm_vmw_alloc_dmabuf_req { 361 uint32_t size; 362 uint32_t pad64; 363 }; 364 365 /** 366 * struct drm_vmw_dmabuf_rep 367 * 368 * @map_handle: Offset to use in the mmap() call used to map the buffer. 369 * @handle: Handle unique to this buffer. Used for unreferencing. 370 * @cur_gmr_id: GMR id to use in the command stream when this buffer is 371 * referenced. See not above. 372 * @cur_gmr_offset: Offset to use in the command stream when this buffer is 373 * referenced. See note above. 374 * 375 * Output data from the DRM_VMW_ALLOC_DMABUF Ioctl. 376 */ 377 378 struct drm_vmw_dmabuf_rep { 379 uint64_t map_handle; 380 uint32_t handle; 381 uint32_t cur_gmr_id; 382 uint32_t cur_gmr_offset; 383 uint32_t pad64; 384 }; 385 386 /** 387 * union drm_vmw_dmabuf_arg 388 * 389 * @req: Input data as described above. 390 * @rep: Output data as described above. 391 * 392 * Argument to the DRM_VMW_ALLOC_DMABUF Ioctl. 393 */ 394 395 union drm_vmw_alloc_dmabuf_arg { 396 struct drm_vmw_alloc_dmabuf_req req; 397 struct drm_vmw_dmabuf_rep rep; 398 }; 399 400 /*************************************************************************/ 401 /** 402 * DRM_VMW_UNREF_DMABUF - Free a DMA buffer. 403 * 404 */ 405 406 /** 407 * struct drm_vmw_unref_dmabuf_arg 408 * 409 * @handle: Handle indicating what buffer to free. Obtained from the 410 * DRM_VMW_ALLOC_DMABUF Ioctl. 411 * 412 * Argument to the DRM_VMW_UNREF_DMABUF Ioctl. 413 */ 414 415 struct drm_vmw_unref_dmabuf_arg { 416 uint32_t handle; 417 uint32_t pad64; 418 }; 419 420 /*************************************************************************/ 421 /** 422 * DRM_VMW_CONTROL_STREAM - Control overlays, aka streams. 423 * 424 * This IOCTL controls the overlay units of the svga device. 425 * The SVGA overlay units does not work like regular hardware units in 426 * that they do not automaticaly read back the contents of the given dma 427 * buffer. But instead only read back for each call to this ioctl, and 428 * at any point between this call being made and a following call that 429 * either changes the buffer or disables the stream. 430 */ 431 432 /** 433 * struct drm_vmw_rect 434 * 435 * Defines a rectangle. Used in the overlay ioctl to define 436 * source and destination rectangle. 437 */ 438 439 struct drm_vmw_rect { 440 int32_t x; 441 int32_t y; 442 uint32_t w; 443 uint32_t h; 444 }; 445 446 /** 447 * struct drm_vmw_control_stream_arg 448 * 449 * @stream_id: Stearm to control 450 * @enabled: If false all following arguments are ignored. 451 * @handle: Handle to buffer for getting data from. 452 * @format: Format of the overlay as understood by the host. 453 * @width: Width of the overlay. 454 * @height: Height of the overlay. 455 * @size: Size of the overlay in bytes. 456 * @pitch: Array of pitches, the two last are only used for YUV12 formats. 457 * @offset: Offset from start of dma buffer to overlay. 458 * @src: Source rect, must be within the defined area above. 459 * @dst: Destination rect, x and y may be negative. 460 * 461 * Argument to the DRM_VMW_CONTROL_STREAM Ioctl. 462 */ 463 464 struct drm_vmw_control_stream_arg { 465 uint32_t stream_id; 466 uint32_t enabled; 467 468 uint32_t flags; 469 uint32_t color_key; 470 471 uint32_t handle; 472 uint32_t offset; 473 int32_t format; 474 uint32_t size; 475 uint32_t width; 476 uint32_t height; 477 uint32_t pitch[3]; 478 479 uint32_t pad64; 480 struct drm_vmw_rect src; 481 struct drm_vmw_rect dst; 482 }; 483 484 /*************************************************************************/ 485 /** 486 * DRM_VMW_CURSOR_BYPASS - Give extra information about cursor bypass. 487 * 488 */ 489 490 #define DRM_VMW_CURSOR_BYPASS_ALL (1 << 0) 491 #define DRM_VMW_CURSOR_BYPASS_FLAGS (1) 492 493 /** 494 * struct drm_vmw_cursor_bypass_arg 495 * 496 * @flags: Flags. 497 * @crtc_id: Crtc id, only used if DMR_CURSOR_BYPASS_ALL isn't passed. 498 * @xpos: X position of cursor. 499 * @ypos: Y position of cursor. 500 * @xhot: X hotspot. 501 * @yhot: Y hotspot. 502 * 503 * Argument to the DRM_VMW_CURSOR_BYPASS Ioctl. 504 */ 505 506 struct drm_vmw_cursor_bypass_arg { 507 uint32_t flags; 508 uint32_t crtc_id; 509 int32_t xpos; 510 int32_t ypos; 511 int32_t xhot; 512 int32_t yhot; 513 }; 514 515 /*************************************************************************/ 516 /** 517 * DRM_VMW_CLAIM_STREAM - Claim a single stream. 518 */ 519 520 /** 521 * struct drm_vmw_context_arg 522 * 523 * @stream_id: Device unique context ID. 524 * 525 * Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl. 526 * Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl. 527 */ 528 529 struct drm_vmw_stream_arg { 530 uint32_t stream_id; 531 uint32_t pad64; 532 }; 533 534 /*************************************************************************/ 535 /** 536 * DRM_VMW_UNREF_STREAM - Unclaim a stream. 537 * 538 * Return a single stream that was claimed by this process. Also makes 539 * sure that the stream has been stopped. 540 */ 541 542 /*************************************************************************/ 543 /** 544 * DRM_VMW_GET_3D_CAP 545 * 546 * Read 3D capabilities from the FIFO 547 * 548 */ 549 550 /** 551 * struct drm_vmw_get_3d_cap_arg 552 * 553 * @buffer: Pointer to a buffer for capability data, cast to an uint64_t 554 * @size: Max size to copy 555 * 556 * Input argument to the DRM_VMW_GET_3D_CAP_IOCTL 557 * ioctls. 558 */ 559 560 struct drm_vmw_get_3d_cap_arg { 561 uint64_t buffer; 562 uint32_t max_size; 563 uint32_t pad64; 564 }; 565 566 /*************************************************************************/ 567 /** 568 * DRM_VMW_FENCE_WAIT 569 * 570 * Waits for a fence object to signal. The wait is interruptible, so that 571 * signals may be delivered during the interrupt. The wait may timeout, 572 * in which case the calls returns -EBUSY. If the wait is restarted, 573 * that is restarting without resetting @cookie_valid to zero, 574 * the timeout is computed from the first call. 575 * 576 * The flags argument to the DRM_VMW_FENCE_WAIT ioctl indicates what to wait 577 * on: 578 * DRM_VMW_FENCE_FLAG_EXEC: All commands ahead of the fence in the command 579 * stream 580 * have executed. 581 * DRM_VMW_FENCE_FLAG_QUERY: All query results resulting from query finish 582 * commands 583 * in the buffer given to the EXECBUF ioctl returning the fence object handle 584 * are available to user-space. 585 * 586 * DRM_VMW_WAIT_OPTION_UNREF: If this wait option is given, and the 587 * fenc wait ioctl returns 0, the fence object has been unreferenced after 588 * the wait. 589 */ 590 591 #define DRM_VMW_FENCE_FLAG_EXEC (1 << 0) 592 #define DRM_VMW_FENCE_FLAG_QUERY (1 << 1) 593 594 #define DRM_VMW_WAIT_OPTION_UNREF (1 << 0) 595 596 /** 597 * struct drm_vmw_fence_wait_arg 598 * 599 * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl. 600 * @cookie_valid: Must be reset to 0 on first call. Left alone on restart. 601 * @kernel_cookie: Set to 0 on first call. Left alone on restart. 602 * @timeout_us: Wait timeout in microseconds. 0 for indefinite timeout. 603 * @lazy: Set to 1 if timing is not critical. Allow more than a kernel tick 604 * before returning. 605 * @flags: Fence flags to wait on. 606 * @wait_options: Options that control the behaviour of the wait ioctl. 607 * 608 * Input argument to the DRM_VMW_FENCE_WAIT ioctl. 609 */ 610 611 struct drm_vmw_fence_wait_arg { 612 uint32_t handle; 613 int32_t cookie_valid; 614 uint64_t kernel_cookie; 615 uint64_t timeout_us; 616 int32_t lazy; 617 int32_t flags; 618 int32_t wait_options; 619 int32_t pad64; 620 }; 621 622 /*************************************************************************/ 623 /** 624 * DRM_VMW_FENCE_SIGNALED 625 * 626 * Checks if a fence object is signaled.. 627 */ 628 629 /** 630 * struct drm_vmw_fence_signaled_arg 631 * 632 * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl. 633 * @flags: Fence object flags input to DRM_VMW_FENCE_SIGNALED ioctl 634 * @signaled: Out: Flags signaled. 635 * @sequence: Out: Highest sequence passed so far. Can be used to signal the 636 * EXEC flag of user-space fence objects. 637 * 638 * Input/Output argument to the DRM_VMW_FENCE_SIGNALED and DRM_VMW_FENCE_UNREF 639 * ioctls. 640 */ 641 642 struct drm_vmw_fence_signaled_arg { 643 uint32_t handle; 644 uint32_t flags; 645 int32_t signaled; 646 uint32_t passed_seqno; 647 uint32_t signaled_flags; 648 uint32_t pad64; 649 }; 650 651 /*************************************************************************/ 652 /** 653 * DRM_VMW_FENCE_UNREF 654 * 655 * Unreferences a fence object, and causes it to be destroyed if there are no 656 * other references to it. 657 * 658 */ 659 660 /** 661 * struct drm_vmw_fence_arg 662 * 663 * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl. 664 * 665 * Input/Output argument to the DRM_VMW_FENCE_UNREF ioctl.. 666 */ 667 668 struct drm_vmw_fence_arg { 669 uint32_t handle; 670 uint32_t pad64; 671 }; 672 673 674 /*************************************************************************/ 675 /** 676 * DRM_VMW_FENCE_EVENT 677 * 678 * Queues an event on a fence to be delivered on the drm character device 679 * when the fence has signaled the DRM_VMW_FENCE_FLAG_EXEC flag. 680 * Optionally the approximate time when the fence signaled is 681 * given by the event. 682 */ 683 684 /* 685 * The event type 686 */ 687 #define DRM_VMW_EVENT_FENCE_SIGNALED 0x80000000 688 689 struct drm_vmw_event_fence { 690 struct drm_event base; 691 uint64_t user_data; 692 uint32_t tv_sec; 693 uint32_t tv_usec; 694 }; 695 696 /* 697 * Flags that may be given to the command. 698 */ 699 /* Request fence signaled time on the event. */ 700 #define DRM_VMW_FE_FLAG_REQ_TIME (1 << 0) 701 702 /** 703 * struct drm_vmw_fence_event_arg 704 * 705 * @fence_rep: Pointer to fence_rep structure cast to uint64_t or 0 if 706 * the fence is not supposed to be referenced by user-space. 707 * @user_info: Info to be delivered with the event. 708 * @handle: Attach the event to this fence only. 709 * @flags: A set of flags as defined above. 710 */ 711 struct drm_vmw_fence_event_arg { 712 uint64_t fence_rep; 713 uint64_t user_data; 714 uint32_t handle; 715 uint32_t flags; 716 }; 717 718 719 /*************************************************************************/ 720 /** 721 * DRM_VMW_PRESENT 722 * 723 * Executes an SVGA present on a given fb for a given surface. The surface 724 * is placed on the framebuffer. Cliprects are given relative to the given 725 * point (the point disignated by dest_{x|y}). 726 * 727 */ 728 729 /** 730 * struct drm_vmw_present_arg 731 * @fb_id: framebuffer id to present / read back from. 732 * @sid: Surface id to present from. 733 * @dest_x: X placement coordinate for surface. 734 * @dest_y: Y placement coordinate for surface. 735 * @clips_ptr: Pointer to an array of clip rects cast to an uint64_t. 736 * @num_clips: Number of cliprects given relative to the framebuffer origin, 737 * in the same coordinate space as the frame buffer. 738 * @pad64: Unused 64-bit padding. 739 * 740 * Input argument to the DRM_VMW_PRESENT ioctl. 741 */ 742 743 struct drm_vmw_present_arg { 744 uint32_t fb_id; 745 uint32_t sid; 746 int32_t dest_x; 747 int32_t dest_y; 748 uint64_t clips_ptr; 749 uint32_t num_clips; 750 uint32_t pad64; 751 }; 752 753 754 /*************************************************************************/ 755 /** 756 * DRM_VMW_PRESENT_READBACK 757 * 758 * Executes an SVGA present readback from a given fb to the dma buffer 759 * currently bound as the fb. If there is no dma buffer bound to the fb, 760 * an error will be returned. 761 * 762 */ 763 764 /** 765 * struct drm_vmw_present_arg 766 * @fb_id: fb_id to present / read back from. 767 * @num_clips: Number of cliprects. 768 * @clips_ptr: Pointer to an array of clip rects cast to an uint64_t. 769 * @fence_rep: Pointer to a struct drm_vmw_fence_rep, cast to an uint64_t. 770 * If this member is NULL, then the ioctl should not return a fence. 771 */ 772 773 struct drm_vmw_present_readback_arg { 774 uint32_t fb_id; 775 uint32_t num_clips; 776 uint64_t clips_ptr; 777 uint64_t fence_rep; 778 }; 779 780 /*************************************************************************/ 781 /** 782 * DRM_VMW_UPDATE_LAYOUT - Update layout 783 * 784 * Updates the preferred modes and connection status for connectors. The 785 * command consists of one drm_vmw_update_layout_arg pointing to an array 786 * of num_outputs drm_vmw_rect's. 787 */ 788 789 /** 790 * struct drm_vmw_update_layout_arg 791 * 792 * @num_outputs: number of active connectors 793 * @rects: pointer to array of drm_vmw_rect cast to an uint64_t 794 * 795 * Input argument to the DRM_VMW_UPDATE_LAYOUT Ioctl. 796 */ 797 struct drm_vmw_update_layout_arg { 798 uint32_t num_outputs; 799 uint32_t pad64; 800 uint64_t rects; 801 }; 802 803 804 /*************************************************************************/ 805 /** 806 * DRM_VMW_CREATE_SHADER - Create shader 807 * 808 * Creates a shader and optionally binds it to a dma buffer containing 809 * the shader byte-code. 810 */ 811 812 /** 813 * enum drm_vmw_shader_type - Shader types 814 */ 815 enum drm_vmw_shader_type { 816 drm_vmw_shader_type_vs = 0, 817 drm_vmw_shader_type_ps, 818 drm_vmw_shader_type_gs 819 }; 820 821 822 /** 823 * struct drm_vmw_shader_create_arg 824 * 825 * @shader_type: Shader type of the shader to create. 826 * @size: Size of the byte-code in bytes. 827 * where the shader byte-code starts 828 * @buffer_handle: Buffer handle identifying the buffer containing the 829 * shader byte-code 830 * @shader_handle: On successful completion contains a handle that 831 * can be used to subsequently identify the shader. 832 * @offset: Offset in bytes into the buffer given by @buffer_handle, 833 * 834 * Input / Output argument to the DRM_VMW_CREATE_SHADER Ioctl. 835 */ 836 struct drm_vmw_shader_create_arg { 837 enum drm_vmw_shader_type shader_type; 838 uint32_t size; 839 uint32_t buffer_handle; 840 uint32_t shader_handle; 841 uint64_t offset; 842 }; 843 844 /*************************************************************************/ 845 /** 846 * DRM_VMW_UNREF_SHADER - Unreferences a shader 847 * 848 * Destroys a user-space reference to a shader, optionally destroying 849 * it. 850 */ 851 852 /** 853 * struct drm_vmw_shader_arg 854 * 855 * @handle: Handle identifying the shader to destroy. 856 * 857 * Input argument to the DRM_VMW_UNREF_SHADER ioctl. 858 */ 859 struct drm_vmw_shader_arg { 860 uint32_t handle; 861 uint32_t pad64; 862 }; 863 864 /*************************************************************************/ 865 /** 866 * DRM_VMW_GB_SURFACE_CREATE - Create a host guest-backed surface. 867 * 868 * Allocates a surface handle and queues a create surface command 869 * for the host on the first use of the surface. The surface ID can 870 * be used as the surface ID in commands referencing the surface. 871 */ 872 873 /** 874 * enum drm_vmw_surface_flags 875 * 876 * @drm_vmw_surface_flag_shareable: Whether the surface is shareable 877 * @drm_vmw_surface_flag_scanout: Whether the surface is a scanout 878 * surface. 879 * @drm_vmw_surface_flag_create_buffer: Create a backup buffer if none is 880 * given. 881 */ 882 enum drm_vmw_surface_flags { 883 drm_vmw_surface_flag_shareable = (1 << 0), 884 drm_vmw_surface_flag_scanout = (1 << 1), 885 drm_vmw_surface_flag_create_buffer = (1 << 2) 886 }; 887 888 /** 889 * struct drm_vmw_gb_surface_create_req 890 * 891 * @svga3d_flags: SVGA3d surface flags for the device. 892 * @format: SVGA3d format. 893 * @mip_level: Number of mip levels for all faces. 894 * @drm_surface_flags Flags as described above. 895 * @multisample_count Future use. Set to 0. 896 * @autogen_filter Future use. Set to 0. 897 * @buffer_handle Buffer handle of backup buffer. SVGA3D_INVALID_ID 898 * if none. 899 * @base_size Size of the base mip level for all faces. 900 * 901 * Input argument to the DRM_VMW_GB_SURFACE_CREATE Ioctl. 902 * Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl. 903 */ 904 struct drm_vmw_gb_surface_create_req { 905 uint32_t svga3d_flags; 906 uint32_t format; 907 uint32_t mip_levels; 908 enum drm_vmw_surface_flags drm_surface_flags; 909 uint32_t multisample_count; 910 uint32_t autogen_filter; 911 uint32_t buffer_handle; 912 uint32_t pad64; 913 struct drm_vmw_size base_size; 914 }; 915 916 /** 917 * struct drm_vmw_gb_surface_create_rep 918 * 919 * @handle: Surface handle. 920 * @backup_size: Size of backup buffers for this surface. 921 * @buffer_handle: Handle of backup buffer. SVGA3D_INVALID_ID if none. 922 * @buffer_size: Actual size of the buffer identified by 923 * @buffer_handle 924 * @buffer_map_handle: Offset into device address space for the buffer 925 * identified by @buffer_handle. 926 * 927 * Part of output argument for the DRM_VMW_GB_SURFACE_REF ioctl. 928 * Output argument for the DRM_VMW_GB_SURFACE_CREATE ioctl. 929 */ 930 struct drm_vmw_gb_surface_create_rep { 931 uint32_t handle; 932 uint32_t backup_size; 933 uint32_t buffer_handle; 934 uint32_t buffer_size; 935 uint64_t buffer_map_handle; 936 }; 937 938 /** 939 * union drm_vmw_gb_surface_create_arg 940 * 941 * @req: Input argument as described above. 942 * @rep: Output argument as described above. 943 * 944 * Argument to the DRM_VMW_GB_SURFACE_CREATE ioctl. 945 */ 946 union drm_vmw_gb_surface_create_arg { 947 struct drm_vmw_gb_surface_create_rep rep; 948 struct drm_vmw_gb_surface_create_req req; 949 }; 950 951 /*************************************************************************/ 952 /** 953 * DRM_VMW_GB_SURFACE_REF - Reference a host surface. 954 * 955 * Puts a reference on a host surface with a given handle, as previously 956 * returned by the DRM_VMW_GB_SURFACE_CREATE ioctl. 957 * A reference will make sure the surface isn't destroyed while we hold 958 * it and will allow the calling client to use the surface handle in 959 * the command stream. 960 * 961 * On successful return, the Ioctl returns the surface information given 962 * to and returned from the DRM_VMW_GB_SURFACE_CREATE ioctl. 963 */ 964 965 /** 966 * struct drm_vmw_gb_surface_reference_arg 967 * 968 * @creq: The data used as input when the surface was created, as described 969 * above at "struct drm_vmw_gb_surface_create_req" 970 * @crep: Additional data output when the surface was created, as described 971 * above at "struct drm_vmw_gb_surface_create_rep" 972 * 973 * Output Argument to the DRM_VMW_GB_SURFACE_REF ioctl. 974 */ 975 struct drm_vmw_gb_surface_ref_rep { 976 struct drm_vmw_gb_surface_create_req creq; 977 struct drm_vmw_gb_surface_create_rep crep; 978 }; 979 980 /** 981 * union drm_vmw_gb_surface_reference_arg 982 * 983 * @req: Input data as described above at "struct drm_vmw_surface_arg" 984 * @rep: Output data as described above at "struct drm_vmw_gb_surface_ref_rep" 985 * 986 * Argument to the DRM_VMW_GB_SURFACE_REF Ioctl. 987 */ 988 union drm_vmw_gb_surface_reference_arg { 989 struct drm_vmw_gb_surface_ref_rep rep; 990 struct drm_vmw_surface_arg req; 991 }; 992 993 994 /*************************************************************************/ 995 /** 996 * DRM_VMW_SYNCCPU - Sync a DMA buffer / MOB for CPU access. 997 * 998 * Idles any previously submitted GPU operations on the buffer and 999 * by default blocks command submissions that reference the buffer. 1000 * If the file descriptor used to grab a blocking CPU sync is closed, the 1001 * cpu sync is released. 1002 * The flags argument indicates how the grab / release operation should be 1003 * performed: 1004 */ 1005 1006 /** 1007 * enum drm_vmw_synccpu_flags - Synccpu flags: 1008 * 1009 * @drm_vmw_synccpu_read: Sync for read. If sync is done for read only, it's a 1010 * hint to the kernel to allow command submissions that references the buffer 1011 * for read-only. 1012 * @drm_vmw_synccpu_write: Sync for write. Block all command submissions 1013 * referencing this buffer. 1014 * @drm_vmw_synccpu_dontblock: Dont wait for GPU idle, but rather return 1015 * -EBUSY should the buffer be busy. 1016 * @drm_vmw_synccpu_allow_cs: Allow command submission that touches the buffer 1017 * while the buffer is synced for CPU. This is similar to the GEM bo idle 1018 * behavior. 1019 */ 1020 enum drm_vmw_synccpu_flags { 1021 drm_vmw_synccpu_read = (1 << 0), 1022 drm_vmw_synccpu_write = (1 << 1), 1023 drm_vmw_synccpu_dontblock = (1 << 2), 1024 drm_vmw_synccpu_allow_cs = (1 << 3) 1025 }; 1026 1027 /** 1028 * enum drm_vmw_synccpu_op - Synccpu operations: 1029 * 1030 * @drm_vmw_synccpu_grab: Grab the buffer for CPU operations 1031 * @drm_vmw_synccpu_release: Release a previous grab. 1032 */ 1033 enum drm_vmw_synccpu_op { 1034 drm_vmw_synccpu_grab, 1035 drm_vmw_synccpu_release 1036 }; 1037 1038 /** 1039 * struct drm_vmw_synccpu_arg 1040 * 1041 * @op: The synccpu operation as described above. 1042 * @handle: Handle identifying the buffer object. 1043 * @flags: Flags as described above. 1044 */ 1045 struct drm_vmw_synccpu_arg { 1046 enum drm_vmw_synccpu_op op; 1047 enum drm_vmw_synccpu_flags flags; 1048 uint32_t handle; 1049 uint32_t pad64; 1050 }; 1051 1052 #endif 1053