1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 2 /************************************************************************** 3 * 4 * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 #ifndef _VMWGFX_DRV_H_ 29 #define _VMWGFX_DRV_H_ 30 31 #include <linux/suspend.h> 32 #include <linux/sync_file.h> 33 34 #include <drm/drm_auth.h> 35 #include <drm/drm_device.h> 36 #include <drm/drm_file.h> 37 #include <drm/drm_hashtab.h> 38 #include <drm/drm_rect.h> 39 40 #include <drm/ttm/ttm_bo_driver.h> 41 #include <drm/ttm/ttm_execbuf_util.h> 42 43 #include "ttm_lock.h" 44 #include "ttm_object.h" 45 46 #include "vmwgfx_fence.h" 47 #include "vmwgfx_reg.h" 48 #include "vmwgfx_validation.h" 49 50 /* 51 * FIXME: vmwgfx_drm.h needs to be last due to dependencies. 52 * uapi headers should not depend on header files outside uapi/. 53 */ 54 #include <drm/vmwgfx_drm.h> 55 56 57 #define VMWGFX_DRIVER_NAME "vmwgfx" 58 #define VMWGFX_DRIVER_DATE "20210218" 59 #define VMWGFX_DRIVER_MAJOR 2 60 #define VMWGFX_DRIVER_MINOR 18 61 #define VMWGFX_DRIVER_PATCHLEVEL 1 62 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) 63 #define VMWGFX_MAX_RELOCATIONS 2048 64 #define VMWGFX_MAX_VALIDATIONS 2048 65 #define VMWGFX_MAX_DISPLAYS 16 66 #define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768 67 #define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 1 68 69 #define VMWGFX_PCI_ID_SVGA2 0x0405 70 71 /* 72 * Perhaps we should have sysfs entries for these. 73 */ 74 #define VMWGFX_NUM_GB_CONTEXT 256 75 #define VMWGFX_NUM_GB_SHADER 20000 76 #define VMWGFX_NUM_GB_SURFACE 32768 77 #define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS 78 #define VMWGFX_NUM_DXCONTEXT 256 79 #define VMWGFX_NUM_DXQUERY 512 80 #define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\ 81 VMWGFX_NUM_GB_SHADER +\ 82 VMWGFX_NUM_GB_SURFACE +\ 83 VMWGFX_NUM_GB_SCREEN_TARGET) 84 85 #define VMW_PL_GMR (TTM_PL_PRIV + 0) 86 #define VMW_PL_MOB (TTM_PL_PRIV + 1) 87 88 #define VMW_RES_CONTEXT ttm_driver_type0 89 #define VMW_RES_SURFACE ttm_driver_type1 90 #define VMW_RES_STREAM ttm_driver_type2 91 #define VMW_RES_FENCE ttm_driver_type3 92 #define VMW_RES_SHADER ttm_driver_type4 93 94 struct vmw_fpriv { 95 struct ttm_object_file *tfile; 96 bool gb_aware; /* user-space is guest-backed aware */ 97 }; 98 99 /** 100 * struct vmw_buffer_object - TTM buffer object with vmwgfx additions 101 * @base: The TTM buffer object 102 * @res_tree: RB tree of resources using this buffer object as a backing MOB 103 * @cpu_writers: Number of synccpu write grabs. Protected by reservation when 104 * increased. May be decreased without reservation. 105 * @dx_query_ctx: DX context if this buffer object is used as a DX query MOB 106 * @map: Kmap object for semi-persistent mappings 107 * @res_prios: Eviction priority counts for attached resources 108 * @dirty: structure for user-space dirty-tracking 109 */ 110 struct vmw_buffer_object { 111 struct ttm_buffer_object base; 112 struct rb_root res_tree; 113 atomic_t cpu_writers; 114 /* Not ref-counted. Protected by binding_mutex */ 115 struct vmw_resource *dx_query_ctx; 116 /* Protected by reservation */ 117 struct ttm_bo_kmap_obj map; 118 u32 res_prios[TTM_MAX_BO_PRIORITY]; 119 struct vmw_bo_dirty *dirty; 120 }; 121 122 /** 123 * struct vmw_validate_buffer - Carries validation info about buffers. 124 * 125 * @base: Validation info for TTM. 126 * @hash: Hash entry for quick lookup of the TTM buffer object. 127 * 128 * This structure contains also driver private validation info 129 * on top of the info needed by TTM. 130 */ 131 struct vmw_validate_buffer { 132 struct ttm_validate_buffer base; 133 struct drm_hash_item hash; 134 bool validate_as_mob; 135 }; 136 137 struct vmw_res_func; 138 139 140 /** 141 * struct vmw-resource - base class for hardware resources 142 * 143 * @kref: For refcounting. 144 * @dev_priv: Pointer to the device private for this resource. Immutable. 145 * @id: Device id. Protected by @dev_priv::resource_lock. 146 * @backup_size: Backup buffer size. Immutable. 147 * @res_dirty: Resource contains data not yet in the backup buffer. Protected 148 * by resource reserved. 149 * @backup_dirty: Backup buffer contains data not yet in the HW resource. 150 * Protected by resource reserved. 151 * @coherent: Emulate coherency by tracking vm accesses. 152 * @backup: The backup buffer if any. Protected by resource reserved. 153 * @backup_offset: Offset into the backup buffer if any. Protected by resource 154 * reserved. Note that only a few resource types can have a @backup_offset 155 * different from zero. 156 * @pin_count: The pin count for this resource. A pinned resource has a 157 * pin-count greater than zero. It is not on the resource LRU lists and its 158 * backup buffer is pinned. Hence it can't be evicted. 159 * @func: Method vtable for this resource. Immutable. 160 * @mob_node; Node for the MOB backup rbtree. Protected by @backup reserved. 161 * @lru_head: List head for the LRU list. Protected by @dev_priv::resource_lock. 162 * @binding_head: List head for the context binding list. Protected by 163 * the @dev_priv::binding_mutex 164 * @res_free: The resource destructor. 165 * @hw_destroy: Callback to destroy the resource on the device, as part of 166 * resource destruction. 167 */ 168 struct vmw_resource_dirty; 169 struct vmw_resource { 170 struct kref kref; 171 struct vmw_private *dev_priv; 172 int id; 173 u32 used_prio; 174 unsigned long backup_size; 175 u32 res_dirty : 1; 176 u32 backup_dirty : 1; 177 u32 coherent : 1; 178 struct vmw_buffer_object *backup; 179 unsigned long backup_offset; 180 unsigned long pin_count; 181 const struct vmw_res_func *func; 182 struct rb_node mob_node; 183 struct list_head lru_head; 184 struct list_head binding_head; 185 struct vmw_resource_dirty *dirty; 186 void (*res_free) (struct vmw_resource *res); 187 void (*hw_destroy) (struct vmw_resource *res); 188 }; 189 190 191 /* 192 * Resources that are managed using ioctls. 193 */ 194 enum vmw_res_type { 195 vmw_res_context, 196 vmw_res_surface, 197 vmw_res_stream, 198 vmw_res_shader, 199 vmw_res_dx_context, 200 vmw_res_cotable, 201 vmw_res_view, 202 vmw_res_streamoutput, 203 vmw_res_max 204 }; 205 206 /* 207 * Resources that are managed using command streams. 208 */ 209 enum vmw_cmdbuf_res_type { 210 vmw_cmdbuf_res_shader, 211 vmw_cmdbuf_res_view, 212 vmw_cmdbuf_res_streamoutput 213 }; 214 215 struct vmw_cmdbuf_res_manager; 216 217 struct vmw_cursor_snooper { 218 size_t age; 219 uint32_t *image; 220 }; 221 222 struct vmw_framebuffer; 223 struct vmw_surface_offset; 224 225 /** 226 * struct vmw_surface_metadata - Metadata describing a surface. 227 * 228 * @flags: Device flags. 229 * @format: Surface SVGA3D_x format. 230 * @mip_levels: Mip level for each face. For GB first index is used only. 231 * @multisample_count: Sample count. 232 * @multisample_pattern: Sample patterns. 233 * @quality_level: Quality level. 234 * @autogen_filter: Filter for automatically generated mipmaps. 235 * @array_size: Number of array elements for a 1D/2D texture. For cubemap 236 texture number of faces * array_size. This should be 0 for pre 237 SM4 device. 238 * @buffer_byte_stride: Buffer byte stride. 239 * @num_sizes: Size of @sizes. For GB surface this should always be 1. 240 * @base_size: Surface dimension. 241 * @sizes: Array representing mip sizes. Legacy only. 242 * @scanout: Whether this surface will be used for scanout. 243 * 244 * This tracks metadata for both legacy and guest backed surface. 245 */ 246 struct vmw_surface_metadata { 247 u64 flags; 248 u32 format; 249 u32 mip_levels[DRM_VMW_MAX_SURFACE_FACES]; 250 u32 multisample_count; 251 u32 multisample_pattern; 252 u32 quality_level; 253 u32 autogen_filter; 254 u32 array_size; 255 u32 num_sizes; 256 u32 buffer_byte_stride; 257 struct drm_vmw_size base_size; 258 struct drm_vmw_size *sizes; 259 bool scanout; 260 }; 261 262 /** 263 * struct vmw_surface: Resource structure for a surface. 264 * 265 * @res: The base resource for this surface. 266 * @metadata: Metadata for this surface resource. 267 * @snooper: Cursor data. Legacy surface only. 268 * @offsets: Legacy surface only. 269 * @view_list: List of views bound to this surface. 270 */ 271 struct vmw_surface { 272 struct vmw_resource res; 273 struct vmw_surface_metadata metadata; 274 struct vmw_cursor_snooper snooper; 275 struct vmw_surface_offset *offsets; 276 struct list_head view_list; 277 }; 278 279 struct vmw_fifo_state { 280 unsigned long reserved_size; 281 u32 *dynamic_buffer; 282 u32 *static_buffer; 283 unsigned long static_buffer_size; 284 bool using_bounce_buffer; 285 uint32_t capabilities; 286 struct mutex fifo_mutex; 287 struct rw_semaphore rwsem; 288 bool dx; 289 }; 290 291 /** 292 * struct vmw_res_cache_entry - resource information cache entry 293 * @handle: User-space handle of a resource. 294 * @res: Non-ref-counted pointer to the resource. 295 * @valid_handle: Whether the @handle member is valid. 296 * @valid: Whether the entry is valid, which also implies that the execbuf 297 * code holds a reference to the resource, and it's placed on the 298 * validation list. 299 * 300 * Used to avoid frequent repeated user-space handle lookups of the 301 * same resource. 302 */ 303 struct vmw_res_cache_entry { 304 uint32_t handle; 305 struct vmw_resource *res; 306 void *private; 307 unsigned short valid_handle; 308 unsigned short valid; 309 }; 310 311 /** 312 * enum vmw_dma_map_mode - indicate how to perform TTM page dma mappings. 313 */ 314 enum vmw_dma_map_mode { 315 vmw_dma_phys, /* Use physical page addresses */ 316 vmw_dma_alloc_coherent, /* Use TTM coherent pages */ 317 vmw_dma_map_populate, /* Unmap from DMA just after unpopulate */ 318 vmw_dma_map_bind, /* Unmap from DMA just before unbind */ 319 vmw_dma_map_max 320 }; 321 322 /** 323 * struct vmw_sg_table - Scatter/gather table for binding, with additional 324 * device-specific information. 325 * 326 * @sgt: Pointer to a struct sg_table with binding information 327 * @num_regions: Number of regions with device-address contiguous pages 328 */ 329 struct vmw_sg_table { 330 enum vmw_dma_map_mode mode; 331 struct page **pages; 332 const dma_addr_t *addrs; 333 struct sg_table *sgt; 334 unsigned long num_regions; 335 unsigned long num_pages; 336 }; 337 338 /** 339 * struct vmw_piter - Page iterator that iterates over a list of pages 340 * and DMA addresses that could be either a scatter-gather list or 341 * arrays 342 * 343 * @pages: Array of page pointers to the pages. 344 * @addrs: DMA addresses to the pages if coherent pages are used. 345 * @iter: Scatter-gather page iterator. Current position in SG list. 346 * @i: Current position in arrays. 347 * @num_pages: Number of pages total. 348 * @next: Function to advance the iterator. Returns false if past the list 349 * of pages, true otherwise. 350 * @dma_address: Function to return the DMA address of the current page. 351 */ 352 struct vmw_piter { 353 struct page **pages; 354 const dma_addr_t *addrs; 355 struct sg_dma_page_iter iter; 356 unsigned long i; 357 unsigned long num_pages; 358 bool (*next)(struct vmw_piter *); 359 dma_addr_t (*dma_address)(struct vmw_piter *); 360 struct page *(*page)(struct vmw_piter *); 361 }; 362 363 /* 364 * enum vmw_display_unit_type - Describes the display unit 365 */ 366 enum vmw_display_unit_type { 367 vmw_du_invalid = 0, 368 vmw_du_legacy, 369 vmw_du_screen_object, 370 vmw_du_screen_target 371 }; 372 373 struct vmw_validation_context; 374 struct vmw_ctx_validation_info; 375 376 /** 377 * struct vmw_sw_context - Command submission context 378 * @res_ht: Pointer hash table used to find validation duplicates 379 * @kernel: Whether the command buffer originates from kernel code rather 380 * than from user-space 381 * @fp: If @kernel is false, points to the file of the client. Otherwise 382 * NULL 383 * @cmd_bounce: Command bounce buffer used for command validation before 384 * copying to fifo space 385 * @cmd_bounce_size: Current command bounce buffer size 386 * @cur_query_bo: Current buffer object used as query result buffer 387 * @bo_relocations: List of buffer object relocations 388 * @res_relocations: List of resource relocations 389 * @buf_start: Pointer to start of memory where command validation takes 390 * place 391 * @res_cache: Cache of recently looked up resources 392 * @last_query_ctx: Last context that submitted a query 393 * @needs_post_query_barrier: Whether a query barrier is needed after 394 * command submission 395 * @staged_bindings: Cached per-context binding tracker 396 * @staged_bindings_inuse: Whether the cached per-context binding tracker 397 * is in use 398 * @staged_cmd_res: List of staged command buffer managed resources in this 399 * command buffer 400 * @ctx_list: List of context resources referenced in this command buffer 401 * @dx_ctx_node: Validation metadata of the current DX context 402 * @dx_query_mob: The MOB used for DX queries 403 * @dx_query_ctx: The DX context used for the last DX query 404 * @man: Pointer to the command buffer managed resource manager 405 * @ctx: The validation context 406 */ 407 struct vmw_sw_context{ 408 struct drm_open_hash res_ht; 409 bool res_ht_initialized; 410 bool kernel; 411 struct vmw_fpriv *fp; 412 uint32_t *cmd_bounce; 413 uint32_t cmd_bounce_size; 414 struct vmw_buffer_object *cur_query_bo; 415 struct list_head bo_relocations; 416 struct list_head res_relocations; 417 uint32_t *buf_start; 418 struct vmw_res_cache_entry res_cache[vmw_res_max]; 419 struct vmw_resource *last_query_ctx; 420 bool needs_post_query_barrier; 421 struct vmw_ctx_binding_state *staged_bindings; 422 bool staged_bindings_inuse; 423 struct list_head staged_cmd_res; 424 struct list_head ctx_list; 425 struct vmw_ctx_validation_info *dx_ctx_node; 426 struct vmw_buffer_object *dx_query_mob; 427 struct vmw_resource *dx_query_ctx; 428 struct vmw_cmdbuf_res_manager *man; 429 struct vmw_validation_context *ctx; 430 }; 431 432 struct vmw_legacy_display; 433 struct vmw_overlay; 434 435 struct vmw_vga_topology_state { 436 uint32_t width; 437 uint32_t height; 438 uint32_t primary; 439 uint32_t pos_x; 440 uint32_t pos_y; 441 }; 442 443 444 /* 445 * struct vmw_otable - Guest Memory OBject table metadata 446 * 447 * @size: Size of the table (page-aligned). 448 * @page_table: Pointer to a struct vmw_mob holding the page table. 449 */ 450 struct vmw_otable { 451 unsigned long size; 452 struct vmw_mob *page_table; 453 bool enabled; 454 }; 455 456 struct vmw_otable_batch { 457 unsigned num_otables; 458 struct vmw_otable *otables; 459 struct vmw_resource *context; 460 struct ttm_buffer_object *otable_bo; 461 }; 462 463 enum { 464 VMW_IRQTHREAD_FENCE, 465 VMW_IRQTHREAD_CMDBUF, 466 VMW_IRQTHREAD_MAX 467 }; 468 469 /** 470 * enum vmw_sm_type - Graphics context capability supported by device. 471 * @VMW_SM_LEGACY: Pre DX context. 472 * @VMW_SM_4: Context support upto SM4. 473 * @VMW_SM_4_1: Context support upto SM4_1. 474 * @VMW_SM_5: Context support up to SM5. 475 * @VMW_SM_MAX: Should be the last. 476 */ 477 enum vmw_sm_type { 478 VMW_SM_LEGACY = 0, 479 VMW_SM_4, 480 VMW_SM_4_1, 481 VMW_SM_5, 482 VMW_SM_MAX 483 }; 484 485 struct vmw_private { 486 struct drm_device drm; 487 struct ttm_device bdev; 488 489 struct vmw_fifo_state fifo; 490 491 struct drm_vma_offset_manager vma_manager; 492 u32 vmw_chipset; 493 resource_size_t io_start; 494 resource_size_t vram_start; 495 resource_size_t vram_size; 496 resource_size_t prim_bb_mem; 497 u32 *fifo_mem; 498 resource_size_t fifo_mem_size; 499 uint32_t fb_max_width; 500 uint32_t fb_max_height; 501 uint32_t texture_max_width; 502 uint32_t texture_max_height; 503 uint32_t stdu_max_width; 504 uint32_t stdu_max_height; 505 uint32_t initial_width; 506 uint32_t initial_height; 507 uint32_t capabilities; 508 uint32_t capabilities2; 509 uint32_t max_gmr_ids; 510 uint32_t max_gmr_pages; 511 uint32_t max_mob_pages; 512 uint32_t max_mob_size; 513 uint32_t memory_size; 514 bool has_gmr; 515 bool has_mob; 516 spinlock_t hw_lock; 517 spinlock_t cap_lock; 518 bool assume_16bpp; 519 520 enum vmw_sm_type sm_type; 521 522 /* 523 * Framebuffer info. 524 */ 525 526 void *fb_info; 527 enum vmw_display_unit_type active_display_unit; 528 struct vmw_legacy_display *ldu_priv; 529 struct vmw_overlay *overlay_priv; 530 struct drm_property *hotplug_mode_update_property; 531 struct drm_property *implicit_placement_property; 532 struct mutex global_kms_state_mutex; 533 spinlock_t cursor_lock; 534 struct drm_atomic_state *suspend_state; 535 536 /* 537 * Context and surface management. 538 */ 539 540 spinlock_t resource_lock; 541 struct idr res_idr[vmw_res_max]; 542 543 /* 544 * A resource manager for kernel-only surfaces and 545 * contexts. 546 */ 547 548 struct ttm_object_device *tdev; 549 550 /* 551 * Fencing and IRQs. 552 */ 553 554 atomic_t marker_seq; 555 wait_queue_head_t fence_queue; 556 wait_queue_head_t fifo_queue; 557 spinlock_t waiter_lock; 558 int fence_queue_waiters; /* Protected by waiter_lock */ 559 int goal_queue_waiters; /* Protected by waiter_lock */ 560 int cmdbuf_waiters; /* Protected by waiter_lock */ 561 int error_waiters; /* Protected by waiter_lock */ 562 int fifo_queue_waiters; /* Protected by waiter_lock */ 563 uint32_t last_read_seqno; 564 struct vmw_fence_manager *fman; 565 uint32_t irq_mask; /* Updates protected by waiter_lock */ 566 567 /* 568 * Device state 569 */ 570 571 uint32_t traces_state; 572 uint32_t enable_state; 573 uint32_t config_done_state; 574 575 /** 576 * Execbuf 577 */ 578 /** 579 * Protected by the cmdbuf mutex. 580 */ 581 582 struct vmw_sw_context ctx; 583 struct mutex cmdbuf_mutex; 584 struct mutex binding_mutex; 585 586 bool enable_fb; 587 588 /** 589 * PM management. 590 */ 591 struct notifier_block pm_nb; 592 bool refuse_hibernation; 593 bool suspend_locked; 594 595 struct mutex release_mutex; 596 atomic_t num_fifo_resources; 597 598 /* 599 * Replace this with an rwsem as soon as we have down_xx_interruptible() 600 */ 601 struct ttm_lock reservation_sem; 602 603 /* 604 * Query processing. These members 605 * are protected by the cmdbuf mutex. 606 */ 607 608 struct vmw_buffer_object *dummy_query_bo; 609 struct vmw_buffer_object *pinned_bo; 610 uint32_t query_cid; 611 uint32_t query_cid_valid; 612 bool dummy_query_bo_pinned; 613 614 /* 615 * Surface swapping. The "surface_lru" list is protected by the 616 * resource lock in order to be able to destroy a surface and take 617 * it off the lru atomically. "used_memory_size" is currently 618 * protected by the cmdbuf mutex for simplicity. 619 */ 620 621 struct list_head res_lru[vmw_res_max]; 622 uint32_t used_memory_size; 623 624 /* 625 * DMA mapping stuff. 626 */ 627 enum vmw_dma_map_mode map_mode; 628 629 /* 630 * Guest Backed stuff 631 */ 632 struct vmw_otable_batch otable_batch; 633 634 struct vmw_cmdbuf_man *cman; 635 DECLARE_BITMAP(irqthread_pending, VMW_IRQTHREAD_MAX); 636 637 /* Validation memory reservation */ 638 struct vmw_validation_mem vvm; 639 }; 640 641 static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res) 642 { 643 return container_of(res, struct vmw_surface, res); 644 } 645 646 static inline struct vmw_private *vmw_priv(struct drm_device *dev) 647 { 648 return (struct vmw_private *)dev->dev_private; 649 } 650 651 static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv) 652 { 653 return (struct vmw_fpriv *)file_priv->driver_priv; 654 } 655 656 /* 657 * The locking here is fine-grained, so that it is performed once 658 * for every read- and write operation. This is of course costly, but we 659 * don't perform much register access in the timing critical paths anyway. 660 * Instead we have the extra benefit of being sure that we don't forget 661 * the hw lock around register accesses. 662 */ 663 static inline void vmw_write(struct vmw_private *dev_priv, 664 unsigned int offset, uint32_t value) 665 { 666 spin_lock(&dev_priv->hw_lock); 667 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); 668 outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT); 669 spin_unlock(&dev_priv->hw_lock); 670 } 671 672 static inline uint32_t vmw_read(struct vmw_private *dev_priv, 673 unsigned int offset) 674 { 675 u32 val; 676 677 spin_lock(&dev_priv->hw_lock); 678 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); 679 val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT); 680 spin_unlock(&dev_priv->hw_lock); 681 682 return val; 683 } 684 685 /** 686 * has_sm4_context - Does the device support SM4 context. 687 * @dev_priv: Device private. 688 * 689 * Return: Bool value if device support SM4 context or not. 690 */ 691 static inline bool has_sm4_context(const struct vmw_private *dev_priv) 692 { 693 return (dev_priv->sm_type >= VMW_SM_4); 694 } 695 696 /** 697 * has_sm4_1_context - Does the device support SM4_1 context. 698 * @dev_priv: Device private. 699 * 700 * Return: Bool value if device support SM4_1 context or not. 701 */ 702 static inline bool has_sm4_1_context(const struct vmw_private *dev_priv) 703 { 704 return (dev_priv->sm_type >= VMW_SM_4_1); 705 } 706 707 /** 708 * has_sm5_context - Does the device support SM5 context. 709 * @dev_priv: Device private. 710 * 711 * Return: Bool value if device support SM5 context or not. 712 */ 713 static inline bool has_sm5_context(const struct vmw_private *dev_priv) 714 { 715 return (dev_priv->sm_type >= VMW_SM_5); 716 } 717 718 extern void vmw_svga_enable(struct vmw_private *dev_priv); 719 extern void vmw_svga_disable(struct vmw_private *dev_priv); 720 721 722 /** 723 * GMR utilities - vmwgfx_gmr.c 724 */ 725 726 extern int vmw_gmr_bind(struct vmw_private *dev_priv, 727 const struct vmw_sg_table *vsgt, 728 unsigned long num_pages, 729 int gmr_id); 730 extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id); 731 732 /** 733 * Resource utilities - vmwgfx_resource.c 734 */ 735 struct vmw_user_resource_conv; 736 737 extern void vmw_resource_unreference(struct vmw_resource **p_res); 738 extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res); 739 extern struct vmw_resource * 740 vmw_resource_reference_unless_doomed(struct vmw_resource *res); 741 extern int vmw_resource_validate(struct vmw_resource *res, bool intr, 742 bool dirtying); 743 extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible, 744 bool no_backup); 745 extern bool vmw_resource_needs_backup(const struct vmw_resource *res); 746 extern int vmw_user_lookup_handle(struct vmw_private *dev_priv, 747 struct ttm_object_file *tfile, 748 uint32_t handle, 749 struct vmw_surface **out_surf, 750 struct vmw_buffer_object **out_buf); 751 extern int vmw_user_resource_lookup_handle( 752 struct vmw_private *dev_priv, 753 struct ttm_object_file *tfile, 754 uint32_t handle, 755 const struct vmw_user_resource_conv *converter, 756 struct vmw_resource **p_res); 757 extern struct vmw_resource * 758 vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv, 759 struct ttm_object_file *tfile, 760 uint32_t handle, 761 const struct vmw_user_resource_conv * 762 converter); 763 extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, 764 struct drm_file *file_priv); 765 extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, 766 struct drm_file *file_priv); 767 extern int vmw_user_stream_lookup(struct vmw_private *dev_priv, 768 struct ttm_object_file *tfile, 769 uint32_t *inout_id, 770 struct vmw_resource **out); 771 extern void vmw_resource_unreserve(struct vmw_resource *res, 772 bool dirty_set, 773 bool dirty, 774 bool switch_backup, 775 struct vmw_buffer_object *new_backup, 776 unsigned long new_backup_offset); 777 extern void vmw_query_move_notify(struct ttm_buffer_object *bo, 778 struct ttm_resource *old_mem, 779 struct ttm_resource *new_mem); 780 extern int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob); 781 extern void vmw_resource_evict_all(struct vmw_private *dev_priv); 782 extern void vmw_resource_unbind_list(struct vmw_buffer_object *vbo); 783 void vmw_resource_mob_attach(struct vmw_resource *res); 784 void vmw_resource_mob_detach(struct vmw_resource *res); 785 void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start, 786 pgoff_t end); 787 int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start, 788 pgoff_t end, pgoff_t *num_prefault); 789 790 /** 791 * vmw_resource_mob_attached - Whether a resource currently has a mob attached 792 * @res: The resource 793 * 794 * Return: true if the resource has a mob attached, false otherwise. 795 */ 796 static inline bool vmw_resource_mob_attached(const struct vmw_resource *res) 797 { 798 return !RB_EMPTY_NODE(&res->mob_node); 799 } 800 801 /** 802 * vmw_user_resource_noref_release - release a user resource pointer looked up 803 * without reference 804 */ 805 static inline void vmw_user_resource_noref_release(void) 806 { 807 ttm_base_object_noref_release(); 808 } 809 810 /** 811 * Buffer object helper functions - vmwgfx_bo.c 812 */ 813 extern int vmw_bo_pin_in_placement(struct vmw_private *vmw_priv, 814 struct vmw_buffer_object *bo, 815 struct ttm_placement *placement, 816 bool interruptible); 817 extern int vmw_bo_pin_in_vram(struct vmw_private *dev_priv, 818 struct vmw_buffer_object *buf, 819 bool interruptible); 820 extern int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv, 821 struct vmw_buffer_object *buf, 822 bool interruptible); 823 extern int vmw_bo_pin_in_start_of_vram(struct vmw_private *vmw_priv, 824 struct vmw_buffer_object *bo, 825 bool interruptible); 826 extern int vmw_bo_unpin(struct vmw_private *vmw_priv, 827 struct vmw_buffer_object *bo, 828 bool interruptible); 829 extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf, 830 SVGAGuestPtr *ptr); 831 extern void vmw_bo_pin_reserved(struct vmw_buffer_object *bo, bool pin); 832 extern void vmw_bo_bo_free(struct ttm_buffer_object *bo); 833 extern int vmw_bo_create_kernel(struct vmw_private *dev_priv, 834 unsigned long size, 835 struct ttm_placement *placement, 836 struct ttm_buffer_object **p_bo); 837 extern int vmw_bo_init(struct vmw_private *dev_priv, 838 struct vmw_buffer_object *vmw_bo, 839 size_t size, struct ttm_placement *placement, 840 bool interruptible, bool pin, 841 void (*bo_free)(struct ttm_buffer_object *bo)); 842 extern int vmw_user_bo_verify_access(struct ttm_buffer_object *bo, 843 struct ttm_object_file *tfile); 844 extern int vmw_user_bo_alloc(struct vmw_private *dev_priv, 845 struct ttm_object_file *tfile, 846 uint32_t size, 847 bool shareable, 848 uint32_t *handle, 849 struct vmw_buffer_object **p_dma_buf, 850 struct ttm_base_object **p_base); 851 extern int vmw_user_bo_reference(struct ttm_object_file *tfile, 852 struct vmw_buffer_object *dma_buf, 853 uint32_t *handle); 854 extern int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data, 855 struct drm_file *file_priv); 856 extern int vmw_bo_unref_ioctl(struct drm_device *dev, void *data, 857 struct drm_file *file_priv); 858 extern int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data, 859 struct drm_file *file_priv); 860 extern int vmw_user_bo_lookup(struct ttm_object_file *tfile, 861 uint32_t id, struct vmw_buffer_object **out, 862 struct ttm_base_object **base); 863 extern void vmw_bo_fence_single(struct ttm_buffer_object *bo, 864 struct vmw_fence_obj *fence); 865 extern void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo); 866 extern void vmw_bo_unmap(struct vmw_buffer_object *vbo); 867 extern void vmw_bo_move_notify(struct ttm_buffer_object *bo, 868 struct ttm_resource *mem); 869 extern void vmw_bo_swap_notify(struct ttm_buffer_object *bo); 870 extern struct vmw_buffer_object * 871 vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle); 872 873 /** 874 * vmw_user_bo_noref_release - release a buffer object pointer looked up 875 * without reference 876 */ 877 static inline void vmw_user_bo_noref_release(void) 878 { 879 ttm_base_object_noref_release(); 880 } 881 882 /** 883 * vmw_bo_adjust_prio - Adjust the buffer object eviction priority 884 * according to attached resources 885 * @vbo: The struct vmw_buffer_object 886 */ 887 static inline void vmw_bo_prio_adjust(struct vmw_buffer_object *vbo) 888 { 889 int i = ARRAY_SIZE(vbo->res_prios); 890 891 while (i--) { 892 if (vbo->res_prios[i]) { 893 vbo->base.priority = i; 894 return; 895 } 896 } 897 898 vbo->base.priority = 3; 899 } 900 901 /** 902 * vmw_bo_prio_add - Notify a buffer object of a newly attached resource 903 * eviction priority 904 * @vbo: The struct vmw_buffer_object 905 * @prio: The resource priority 906 * 907 * After being notified, the code assigns the highest resource eviction priority 908 * to the backing buffer object (mob). 909 */ 910 static inline void vmw_bo_prio_add(struct vmw_buffer_object *vbo, int prio) 911 { 912 if (vbo->res_prios[prio]++ == 0) 913 vmw_bo_prio_adjust(vbo); 914 } 915 916 /** 917 * vmw_bo_prio_del - Notify a buffer object of a resource with a certain 918 * priority being removed 919 * @vbo: The struct vmw_buffer_object 920 * @prio: The resource priority 921 * 922 * After being notified, the code assigns the highest resource eviction priority 923 * to the backing buffer object (mob). 924 */ 925 static inline void vmw_bo_prio_del(struct vmw_buffer_object *vbo, int prio) 926 { 927 if (--vbo->res_prios[prio] == 0) 928 vmw_bo_prio_adjust(vbo); 929 } 930 931 /** 932 * Misc Ioctl functionality - vmwgfx_ioctl.c 933 */ 934 935 extern int vmw_getparam_ioctl(struct drm_device *dev, void *data, 936 struct drm_file *file_priv); 937 extern int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, 938 struct drm_file *file_priv); 939 extern int vmw_present_ioctl(struct drm_device *dev, void *data, 940 struct drm_file *file_priv); 941 extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data, 942 struct drm_file *file_priv); 943 extern __poll_t vmw_fops_poll(struct file *filp, 944 struct poll_table_struct *wait); 945 extern ssize_t vmw_fops_read(struct file *filp, char __user *buffer, 946 size_t count, loff_t *offset); 947 948 /** 949 * Fifo utilities - vmwgfx_fifo.c 950 */ 951 952 extern int vmw_fifo_init(struct vmw_private *dev_priv, 953 struct vmw_fifo_state *fifo); 954 extern void vmw_fifo_release(struct vmw_private *dev_priv, 955 struct vmw_fifo_state *fifo); 956 extern void * 957 vmw_cmd_ctx_reserve(struct vmw_private *dev_priv, uint32_t bytes, int ctx_id); 958 extern void vmw_cmd_commit(struct vmw_private *dev_priv, uint32_t bytes); 959 extern void vmw_cmd_commit_flush(struct vmw_private *dev_priv, uint32_t bytes); 960 extern int vmw_cmd_send_fence(struct vmw_private *dev_priv, uint32_t *seqno); 961 extern bool vmw_supports_3d(struct vmw_private *dev_priv); 962 extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason); 963 extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv); 964 extern int vmw_cmd_emit_dummy_query(struct vmw_private *dev_priv, 965 uint32_t cid); 966 extern int vmw_cmd_flush(struct vmw_private *dev_priv, 967 bool interruptible); 968 969 #define VMW_CMD_CTX_RESERVE(__priv, __bytes, __ctx_id) \ 970 ({ \ 971 vmw_cmd_ctx_reserve(__priv, __bytes, __ctx_id) ? : ({ \ 972 DRM_ERROR("FIFO reserve failed at %s for %u bytes\n", \ 973 __func__, (unsigned int) __bytes); \ 974 NULL; \ 975 }); \ 976 }) 977 978 #define VMW_CMD_RESERVE(__priv, __bytes) \ 979 VMW_CMD_CTX_RESERVE(__priv, __bytes, SVGA3D_INVALID_ID) 980 981 /** 982 * TTM glue - vmwgfx_ttm_glue.c 983 */ 984 985 extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma); 986 987 extern void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv, 988 size_t gran); 989 990 /** 991 * TTM buffer object driver - vmwgfx_ttm_buffer.c 992 */ 993 994 extern const size_t vmw_tt_size; 995 extern struct ttm_placement vmw_vram_placement; 996 extern struct ttm_placement vmw_vram_sys_placement; 997 extern struct ttm_placement vmw_vram_gmr_placement; 998 extern struct ttm_placement vmw_sys_placement; 999 extern struct ttm_placement vmw_evictable_placement; 1000 extern struct ttm_placement vmw_srf_placement; 1001 extern struct ttm_placement vmw_mob_placement; 1002 extern struct ttm_placement vmw_nonfixed_placement; 1003 extern struct ttm_device_funcs vmw_bo_driver; 1004 extern const struct vmw_sg_table * 1005 vmw_bo_sg_table(struct ttm_buffer_object *bo); 1006 extern int vmw_bo_create_and_populate(struct vmw_private *dev_priv, 1007 unsigned long bo_size, 1008 struct ttm_buffer_object **bo_p); 1009 1010 extern void vmw_piter_start(struct vmw_piter *viter, 1011 const struct vmw_sg_table *vsgt, 1012 unsigned long p_offs); 1013 1014 /** 1015 * vmw_piter_next - Advance the iterator one page. 1016 * 1017 * @viter: Pointer to the iterator to advance. 1018 * 1019 * Returns false if past the list of pages, true otherwise. 1020 */ 1021 static inline bool vmw_piter_next(struct vmw_piter *viter) 1022 { 1023 return viter->next(viter); 1024 } 1025 1026 /** 1027 * vmw_piter_dma_addr - Return the DMA address of the current page. 1028 * 1029 * @viter: Pointer to the iterator 1030 * 1031 * Returns the DMA address of the page pointed to by @viter. 1032 */ 1033 static inline dma_addr_t vmw_piter_dma_addr(struct vmw_piter *viter) 1034 { 1035 return viter->dma_address(viter); 1036 } 1037 1038 /** 1039 * vmw_piter_page - Return a pointer to the current page. 1040 * 1041 * @viter: Pointer to the iterator 1042 * 1043 * Returns the DMA address of the page pointed to by @viter. 1044 */ 1045 static inline struct page *vmw_piter_page(struct vmw_piter *viter) 1046 { 1047 return viter->page(viter); 1048 } 1049 1050 /** 1051 * Command submission - vmwgfx_execbuf.c 1052 */ 1053 1054 extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data, 1055 struct drm_file *file_priv); 1056 extern int vmw_execbuf_process(struct drm_file *file_priv, 1057 struct vmw_private *dev_priv, 1058 void __user *user_commands, 1059 void *kernel_commands, 1060 uint32_t command_size, 1061 uint64_t throttle_us, 1062 uint32_t dx_context_handle, 1063 struct drm_vmw_fence_rep __user 1064 *user_fence_rep, 1065 struct vmw_fence_obj **out_fence, 1066 uint32_t flags); 1067 extern void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, 1068 struct vmw_fence_obj *fence); 1069 extern void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv); 1070 1071 extern int vmw_execbuf_fence_commands(struct drm_file *file_priv, 1072 struct vmw_private *dev_priv, 1073 struct vmw_fence_obj **p_fence, 1074 uint32_t *p_handle); 1075 extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, 1076 struct vmw_fpriv *vmw_fp, 1077 int ret, 1078 struct drm_vmw_fence_rep __user 1079 *user_fence_rep, 1080 struct vmw_fence_obj *fence, 1081 uint32_t fence_handle, 1082 int32_t out_fence_fd, 1083 struct sync_file *sync_file); 1084 bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd); 1085 1086 /** 1087 * IRQs and wating - vmwgfx_irq.c 1088 */ 1089 1090 extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy, 1091 uint32_t seqno, bool interruptible, 1092 unsigned long timeout); 1093 extern int vmw_irq_install(struct drm_device *dev, int irq); 1094 extern void vmw_irq_uninstall(struct drm_device *dev); 1095 extern bool vmw_seqno_passed(struct vmw_private *dev_priv, 1096 uint32_t seqno); 1097 extern int vmw_fallback_wait(struct vmw_private *dev_priv, 1098 bool lazy, 1099 bool fifo_idle, 1100 uint32_t seqno, 1101 bool interruptible, 1102 unsigned long timeout); 1103 extern void vmw_update_seqno(struct vmw_private *dev_priv, 1104 struct vmw_fifo_state *fifo_state); 1105 extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv); 1106 extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv); 1107 extern void vmw_goal_waiter_add(struct vmw_private *dev_priv); 1108 extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv); 1109 extern void vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag, 1110 int *waiter_count); 1111 extern void vmw_generic_waiter_remove(struct vmw_private *dev_priv, 1112 u32 flag, int *waiter_count); 1113 1114 1115 /** 1116 * Kernel framebuffer - vmwgfx_fb.c 1117 */ 1118 1119 int vmw_fb_init(struct vmw_private *vmw_priv); 1120 int vmw_fb_close(struct vmw_private *dev_priv); 1121 int vmw_fb_off(struct vmw_private *vmw_priv); 1122 int vmw_fb_on(struct vmw_private *vmw_priv); 1123 1124 /** 1125 * Kernel modesetting - vmwgfx_kms.c 1126 */ 1127 1128 int vmw_kms_init(struct vmw_private *dev_priv); 1129 int vmw_kms_close(struct vmw_private *dev_priv); 1130 int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data, 1131 struct drm_file *file_priv); 1132 void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv); 1133 void vmw_kms_cursor_snoop(struct vmw_surface *srf, 1134 struct ttm_object_file *tfile, 1135 struct ttm_buffer_object *bo, 1136 SVGA3dCmdHeader *header); 1137 int vmw_kms_write_svga(struct vmw_private *vmw_priv, 1138 unsigned width, unsigned height, unsigned pitch, 1139 unsigned bpp, unsigned depth); 1140 bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv, 1141 uint32_t pitch, 1142 uint32_t height); 1143 u32 vmw_get_vblank_counter(struct drm_crtc *crtc); 1144 int vmw_enable_vblank(struct drm_crtc *crtc); 1145 void vmw_disable_vblank(struct drm_crtc *crtc); 1146 int vmw_kms_present(struct vmw_private *dev_priv, 1147 struct drm_file *file_priv, 1148 struct vmw_framebuffer *vfb, 1149 struct vmw_surface *surface, 1150 uint32_t sid, int32_t destX, int32_t destY, 1151 struct drm_vmw_rect *clips, 1152 uint32_t num_clips); 1153 int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, 1154 struct drm_file *file_priv); 1155 void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv); 1156 int vmw_kms_suspend(struct drm_device *dev); 1157 int vmw_kms_resume(struct drm_device *dev); 1158 void vmw_kms_lost_device(struct drm_device *dev); 1159 1160 int vmw_dumb_create(struct drm_file *file_priv, 1161 struct drm_device *dev, 1162 struct drm_mode_create_dumb *args); 1163 1164 int vmw_dumb_map_offset(struct drm_file *file_priv, 1165 struct drm_device *dev, uint32_t handle, 1166 uint64_t *offset); 1167 int vmw_dumb_destroy(struct drm_file *file_priv, 1168 struct drm_device *dev, 1169 uint32_t handle); 1170 extern int vmw_resource_pin(struct vmw_resource *res, bool interruptible); 1171 extern void vmw_resource_unpin(struct vmw_resource *res); 1172 extern enum vmw_res_type vmw_res_type(const struct vmw_resource *res); 1173 1174 /** 1175 * Overlay control - vmwgfx_overlay.c 1176 */ 1177 1178 int vmw_overlay_init(struct vmw_private *dev_priv); 1179 int vmw_overlay_close(struct vmw_private *dev_priv); 1180 int vmw_overlay_ioctl(struct drm_device *dev, void *data, 1181 struct drm_file *file_priv); 1182 int vmw_overlay_resume_all(struct vmw_private *dev_priv); 1183 int vmw_overlay_pause_all(struct vmw_private *dev_priv); 1184 int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out); 1185 int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id); 1186 int vmw_overlay_num_overlays(struct vmw_private *dev_priv); 1187 int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv); 1188 1189 /** 1190 * GMR Id manager 1191 */ 1192 1193 int vmw_gmrid_man_init(struct vmw_private *dev_priv, int type); 1194 void vmw_gmrid_man_fini(struct vmw_private *dev_priv, int type); 1195 1196 /** 1197 * Prime - vmwgfx_prime.c 1198 */ 1199 1200 extern const struct dma_buf_ops vmw_prime_dmabuf_ops; 1201 extern int vmw_prime_fd_to_handle(struct drm_device *dev, 1202 struct drm_file *file_priv, 1203 int fd, u32 *handle); 1204 extern int vmw_prime_handle_to_fd(struct drm_device *dev, 1205 struct drm_file *file_priv, 1206 uint32_t handle, uint32_t flags, 1207 int *prime_fd); 1208 1209 /* 1210 * MemoryOBject management - vmwgfx_mob.c 1211 */ 1212 struct vmw_mob; 1213 extern int vmw_mob_bind(struct vmw_private *dev_priv, struct vmw_mob *mob, 1214 const struct vmw_sg_table *vsgt, 1215 unsigned long num_data_pages, int32_t mob_id); 1216 extern void vmw_mob_unbind(struct vmw_private *dev_priv, 1217 struct vmw_mob *mob); 1218 extern void vmw_mob_destroy(struct vmw_mob *mob); 1219 extern struct vmw_mob *vmw_mob_create(unsigned long data_pages); 1220 extern int vmw_otables_setup(struct vmw_private *dev_priv); 1221 extern void vmw_otables_takedown(struct vmw_private *dev_priv); 1222 1223 /* 1224 * Context management - vmwgfx_context.c 1225 */ 1226 1227 extern const struct vmw_user_resource_conv *user_context_converter; 1228 1229 extern int vmw_context_define_ioctl(struct drm_device *dev, void *data, 1230 struct drm_file *file_priv); 1231 extern int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data, 1232 struct drm_file *file_priv); 1233 extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, 1234 struct drm_file *file_priv); 1235 extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx); 1236 extern struct vmw_cmdbuf_res_manager * 1237 vmw_context_res_man(struct vmw_resource *ctx); 1238 extern struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx, 1239 SVGACOTableType cotable_type); 1240 extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx); 1241 struct vmw_ctx_binding_state; 1242 extern struct vmw_ctx_binding_state * 1243 vmw_context_binding_state(struct vmw_resource *ctx); 1244 extern void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx, 1245 bool readback); 1246 extern int vmw_context_bind_dx_query(struct vmw_resource *ctx_res, 1247 struct vmw_buffer_object *mob); 1248 extern struct vmw_buffer_object * 1249 vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res); 1250 1251 1252 /* 1253 * Surface management - vmwgfx_surface.c 1254 */ 1255 1256 extern const struct vmw_user_resource_conv *user_surface_converter; 1257 1258 extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data, 1259 struct drm_file *file_priv); 1260 extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data, 1261 struct drm_file *file_priv); 1262 extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, 1263 struct drm_file *file_priv); 1264 extern int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, 1265 struct drm_file *file_priv); 1266 extern int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data, 1267 struct drm_file *file_priv); 1268 int vmw_surface_gb_priv_define(struct drm_device *dev, 1269 uint32_t user_accounting_size, 1270 SVGA3dSurfaceAllFlags svga3d_flags, 1271 SVGA3dSurfaceFormat format, 1272 bool for_scanout, 1273 uint32_t num_mip_levels, 1274 uint32_t multisample_count, 1275 uint32_t array_size, 1276 struct drm_vmw_size size, 1277 SVGA3dMSPattern multisample_pattern, 1278 SVGA3dMSQualityLevel quality_level, 1279 struct vmw_surface **srf_out); 1280 extern int vmw_gb_surface_define_ext_ioctl(struct drm_device *dev, 1281 void *data, 1282 struct drm_file *file_priv); 1283 extern int vmw_gb_surface_reference_ext_ioctl(struct drm_device *dev, 1284 void *data, 1285 struct drm_file *file_priv); 1286 1287 int vmw_gb_surface_define(struct vmw_private *dev_priv, 1288 uint32_t user_accounting_size, 1289 const struct vmw_surface_metadata *req, 1290 struct vmw_surface **srf_out); 1291 1292 /* 1293 * Shader management - vmwgfx_shader.c 1294 */ 1295 1296 extern const struct vmw_user_resource_conv *user_shader_converter; 1297 1298 extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data, 1299 struct drm_file *file_priv); 1300 extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data, 1301 struct drm_file *file_priv); 1302 extern int vmw_compat_shader_add(struct vmw_private *dev_priv, 1303 struct vmw_cmdbuf_res_manager *man, 1304 u32 user_key, const void *bytecode, 1305 SVGA3dShaderType shader_type, 1306 size_t size, 1307 struct list_head *list); 1308 extern int vmw_shader_remove(struct vmw_cmdbuf_res_manager *man, 1309 u32 user_key, SVGA3dShaderType shader_type, 1310 struct list_head *list); 1311 extern int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man, 1312 struct vmw_resource *ctx, 1313 u32 user_key, 1314 SVGA3dShaderType shader_type, 1315 struct list_head *list); 1316 extern void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv, 1317 struct list_head *list, 1318 bool readback); 1319 1320 extern struct vmw_resource * 1321 vmw_shader_lookup(struct vmw_cmdbuf_res_manager *man, 1322 u32 user_key, SVGA3dShaderType shader_type); 1323 1324 /* 1325 * Streamoutput management 1326 */ 1327 struct vmw_resource * 1328 vmw_dx_streamoutput_lookup(struct vmw_cmdbuf_res_manager *man, 1329 u32 user_key); 1330 int vmw_dx_streamoutput_add(struct vmw_cmdbuf_res_manager *man, 1331 struct vmw_resource *ctx, 1332 SVGA3dStreamOutputId user_key, 1333 struct list_head *list); 1334 void vmw_dx_streamoutput_set_size(struct vmw_resource *res, u32 size); 1335 int vmw_dx_streamoutput_remove(struct vmw_cmdbuf_res_manager *man, 1336 SVGA3dStreamOutputId user_key, 1337 struct list_head *list); 1338 void vmw_dx_streamoutput_cotable_list_scrub(struct vmw_private *dev_priv, 1339 struct list_head *list, 1340 bool readback); 1341 1342 /* 1343 * Command buffer managed resources - vmwgfx_cmdbuf_res.c 1344 */ 1345 1346 extern struct vmw_cmdbuf_res_manager * 1347 vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv); 1348 extern void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man); 1349 extern size_t vmw_cmdbuf_res_man_size(void); 1350 extern struct vmw_resource * 1351 vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man, 1352 enum vmw_cmdbuf_res_type res_type, 1353 u32 user_key); 1354 extern void vmw_cmdbuf_res_revert(struct list_head *list); 1355 extern void vmw_cmdbuf_res_commit(struct list_head *list); 1356 extern int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man, 1357 enum vmw_cmdbuf_res_type res_type, 1358 u32 user_key, 1359 struct vmw_resource *res, 1360 struct list_head *list); 1361 extern int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man, 1362 enum vmw_cmdbuf_res_type res_type, 1363 u32 user_key, 1364 struct list_head *list, 1365 struct vmw_resource **res); 1366 1367 /* 1368 * COTable management - vmwgfx_cotable.c 1369 */ 1370 extern const SVGACOTableType vmw_cotable_scrub_order[]; 1371 extern struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv, 1372 struct vmw_resource *ctx, 1373 u32 type); 1374 extern int vmw_cotable_notify(struct vmw_resource *res, int id); 1375 extern int vmw_cotable_scrub(struct vmw_resource *res, bool readback); 1376 extern void vmw_cotable_add_resource(struct vmw_resource *ctx, 1377 struct list_head *head); 1378 1379 /* 1380 * Command buffer managerment vmwgfx_cmdbuf.c 1381 */ 1382 struct vmw_cmdbuf_man; 1383 struct vmw_cmdbuf_header; 1384 1385 extern struct vmw_cmdbuf_man * 1386 vmw_cmdbuf_man_create(struct vmw_private *dev_priv); 1387 extern int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size); 1388 extern void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man); 1389 extern void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man); 1390 extern int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible, 1391 unsigned long timeout); 1392 extern void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size, 1393 int ctx_id, bool interruptible, 1394 struct vmw_cmdbuf_header *header); 1395 extern void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size, 1396 struct vmw_cmdbuf_header *header, 1397 bool flush); 1398 extern void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man, 1399 size_t size, bool interruptible, 1400 struct vmw_cmdbuf_header **p_header); 1401 extern void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header); 1402 extern int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man, 1403 bool interruptible); 1404 extern void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man *man); 1405 1406 /* CPU blit utilities - vmwgfx_blit.c */ 1407 1408 /** 1409 * struct vmw_diff_cpy - CPU blit information structure 1410 * 1411 * @rect: The output bounding box rectangle. 1412 * @line: The current line of the blit. 1413 * @line_offset: Offset of the current line segment. 1414 * @cpp: Bytes per pixel (granularity information). 1415 * @memcpy: Which memcpy function to use. 1416 */ 1417 struct vmw_diff_cpy { 1418 struct drm_rect rect; 1419 size_t line; 1420 size_t line_offset; 1421 int cpp; 1422 void (*do_cpy)(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src, 1423 size_t n); 1424 }; 1425 1426 #define VMW_CPU_BLIT_INITIALIZER { \ 1427 .do_cpy = vmw_memcpy, \ 1428 } 1429 1430 #define VMW_CPU_BLIT_DIFF_INITIALIZER(_cpp) { \ 1431 .line = 0, \ 1432 .line_offset = 0, \ 1433 .rect = { .x1 = INT_MAX/2, \ 1434 .y1 = INT_MAX/2, \ 1435 .x2 = INT_MIN/2, \ 1436 .y2 = INT_MIN/2 \ 1437 }, \ 1438 .cpp = _cpp, \ 1439 .do_cpy = vmw_diff_memcpy, \ 1440 } 1441 1442 void vmw_diff_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src, 1443 size_t n); 1444 1445 void vmw_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src, size_t n); 1446 1447 int vmw_bo_cpu_blit(struct ttm_buffer_object *dst, 1448 u32 dst_offset, u32 dst_stride, 1449 struct ttm_buffer_object *src, 1450 u32 src_offset, u32 src_stride, 1451 u32 w, u32 h, 1452 struct vmw_diff_cpy *diff); 1453 1454 /* Host messaging -vmwgfx_msg.c: */ 1455 int vmw_host_get_guestinfo(const char *guest_info_param, 1456 char *buffer, size_t *length); 1457 int vmw_host_log(const char *log); 1458 int vmw_msg_ioctl(struct drm_device *dev, void *data, 1459 struct drm_file *file_priv); 1460 1461 /* VMW logging */ 1462 1463 /** 1464 * VMW_DEBUG_USER - Debug output for user-space debugging. 1465 * 1466 * @fmt: printf() like format string. 1467 * 1468 * This macro is for logging user-space error and debugging messages for e.g. 1469 * command buffer execution errors due to malformed commands, invalid context, 1470 * etc. 1471 */ 1472 #define VMW_DEBUG_USER(fmt, ...) \ 1473 DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__) 1474 1475 /* Resource dirtying - vmwgfx_page_dirty.c */ 1476 void vmw_bo_dirty_scan(struct vmw_buffer_object *vbo); 1477 int vmw_bo_dirty_add(struct vmw_buffer_object *vbo); 1478 void vmw_bo_dirty_transfer_to_res(struct vmw_resource *res); 1479 void vmw_bo_dirty_clear_res(struct vmw_resource *res); 1480 void vmw_bo_dirty_release(struct vmw_buffer_object *vbo); 1481 void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo, 1482 pgoff_t start, pgoff_t end); 1483 vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf); 1484 vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf); 1485 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1486 vm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf, 1487 enum page_entry_size pe_size); 1488 #endif 1489 1490 /* Transparent hugepage support - vmwgfx_thp.c */ 1491 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1492 extern int vmw_thp_init(struct vmw_private *dev_priv); 1493 void vmw_thp_fini(struct vmw_private *dev_priv); 1494 #endif 1495 1496 /** 1497 * VMW_DEBUG_KMS - Debug output for kernel mode-setting 1498 * 1499 * This macro is for debugging vmwgfx mode-setting code. 1500 */ 1501 #define VMW_DEBUG_KMS(fmt, ...) \ 1502 DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__) 1503 1504 /** 1505 * Inline helper functions 1506 */ 1507 1508 static inline void vmw_surface_unreference(struct vmw_surface **srf) 1509 { 1510 struct vmw_surface *tmp_srf = *srf; 1511 struct vmw_resource *res = &tmp_srf->res; 1512 *srf = NULL; 1513 1514 vmw_resource_unreference(&res); 1515 } 1516 1517 static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf) 1518 { 1519 (void) vmw_resource_reference(&srf->res); 1520 return srf; 1521 } 1522 1523 static inline void vmw_bo_unreference(struct vmw_buffer_object **buf) 1524 { 1525 struct vmw_buffer_object *tmp_buf = *buf; 1526 1527 *buf = NULL; 1528 if (tmp_buf != NULL) { 1529 if (tmp_buf->base.pin_count > 0) 1530 ttm_bo_unpin(&tmp_buf->base); 1531 ttm_bo_put(&tmp_buf->base); 1532 } 1533 } 1534 1535 static inline struct vmw_buffer_object * 1536 vmw_bo_reference(struct vmw_buffer_object *buf) 1537 { 1538 ttm_bo_get(&buf->base); 1539 return buf; 1540 } 1541 1542 static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv) 1543 { 1544 return &ttm_mem_glob; 1545 } 1546 1547 static inline void vmw_fifo_resource_inc(struct vmw_private *dev_priv) 1548 { 1549 atomic_inc(&dev_priv->num_fifo_resources); 1550 } 1551 1552 static inline void vmw_fifo_resource_dec(struct vmw_private *dev_priv) 1553 { 1554 atomic_dec(&dev_priv->num_fifo_resources); 1555 } 1556 1557 /** 1558 * vmw_fifo_mem_read - Perform a MMIO read from the fifo memory 1559 * 1560 * @fifo_reg: The fifo register to read from 1561 * 1562 * This function is intended to be equivalent to ioread32() on 1563 * memremap'd memory, but without byteswapping. 1564 */ 1565 static inline u32 vmw_fifo_mem_read(struct vmw_private *vmw, uint32 fifo_reg) 1566 { 1567 return READ_ONCE(*(vmw->fifo_mem + fifo_reg)); 1568 } 1569 1570 /** 1571 * vmw_fifo_mem_write - Perform a MMIO write to volatile memory 1572 * 1573 * @addr: The fifo register to write to 1574 * 1575 * This function is intended to be equivalent to iowrite32 on 1576 * memremap'd memory, but without byteswapping. 1577 */ 1578 static inline void vmw_fifo_mem_write(struct vmw_private *vmw, u32 fifo_reg, 1579 u32 value) 1580 { 1581 WRITE_ONCE(*(vmw->fifo_mem + fifo_reg), value); 1582 } 1583 #endif 1584