1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 2 /************************************************************************** 3 * 4 * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 #ifndef _VMWGFX_DRV_H_ 29 #define _VMWGFX_DRV_H_ 30 31 #include <linux/suspend.h> 32 #include <linux/sync_file.h> 33 34 #include <drm/drm_auth.h> 35 #include <drm/drm_device.h> 36 #include <drm/drm_file.h> 37 #include <drm/drm_hashtab.h> 38 #include <drm/drm_rect.h> 39 40 #include <drm/ttm/ttm_bo_driver.h> 41 #include <drm/ttm/ttm_execbuf_util.h> 42 43 #include "ttm_lock.h" 44 #include "ttm_object.h" 45 46 #include "vmwgfx_fence.h" 47 #include "vmwgfx_reg.h" 48 #include "vmwgfx_validation.h" 49 50 /* 51 * FIXME: vmwgfx_drm.h needs to be last due to dependencies. 52 * uapi headers should not depend on header files outside uapi/. 53 */ 54 #include <drm/vmwgfx_drm.h> 55 56 57 #define VMWGFX_DRIVER_NAME "vmwgfx" 58 #define VMWGFX_DRIVER_DATE "20210218" 59 #define VMWGFX_DRIVER_MAJOR 2 60 #define VMWGFX_DRIVER_MINOR 18 61 #define VMWGFX_DRIVER_PATCHLEVEL 1 62 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) 63 #define VMWGFX_MAX_RELOCATIONS 2048 64 #define VMWGFX_MAX_VALIDATIONS 2048 65 #define VMWGFX_MAX_DISPLAYS 16 66 #define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768 67 #define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 1 68 69 #define VMWGFX_PCI_ID_SVGA2 0x0405 70 71 /* 72 * Perhaps we should have sysfs entries for these. 73 */ 74 #define VMWGFX_NUM_GB_CONTEXT 256 75 #define VMWGFX_NUM_GB_SHADER 20000 76 #define VMWGFX_NUM_GB_SURFACE 32768 77 #define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS 78 #define VMWGFX_NUM_DXCONTEXT 256 79 #define VMWGFX_NUM_DXQUERY 512 80 #define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\ 81 VMWGFX_NUM_GB_SHADER +\ 82 VMWGFX_NUM_GB_SURFACE +\ 83 VMWGFX_NUM_GB_SCREEN_TARGET) 84 85 #define VMW_PL_GMR (TTM_PL_PRIV + 0) 86 #define VMW_PL_MOB (TTM_PL_PRIV + 1) 87 88 #define VMW_RES_CONTEXT ttm_driver_type0 89 #define VMW_RES_SURFACE ttm_driver_type1 90 #define VMW_RES_STREAM ttm_driver_type2 91 #define VMW_RES_FENCE ttm_driver_type3 92 #define VMW_RES_SHADER ttm_driver_type4 93 94 struct vmw_fpriv { 95 struct ttm_object_file *tfile; 96 bool gb_aware; /* user-space is guest-backed aware */ 97 }; 98 99 /** 100 * struct vmw_buffer_object - TTM buffer object with vmwgfx additions 101 * @base: The TTM buffer object 102 * @res_tree: RB tree of resources using this buffer object as a backing MOB 103 * @cpu_writers: Number of synccpu write grabs. Protected by reservation when 104 * increased. May be decreased without reservation. 105 * @dx_query_ctx: DX context if this buffer object is used as a DX query MOB 106 * @map: Kmap object for semi-persistent mappings 107 * @res_prios: Eviction priority counts for attached resources 108 * @dirty: structure for user-space dirty-tracking 109 */ 110 struct vmw_buffer_object { 111 struct ttm_buffer_object base; 112 struct rb_root res_tree; 113 atomic_t cpu_writers; 114 /* Not ref-counted. Protected by binding_mutex */ 115 struct vmw_resource *dx_query_ctx; 116 /* Protected by reservation */ 117 struct ttm_bo_kmap_obj map; 118 u32 res_prios[TTM_MAX_BO_PRIORITY]; 119 struct vmw_bo_dirty *dirty; 120 }; 121 122 /** 123 * struct vmw_validate_buffer - Carries validation info about buffers. 124 * 125 * @base: Validation info for TTM. 126 * @hash: Hash entry for quick lookup of the TTM buffer object. 127 * 128 * This structure contains also driver private validation info 129 * on top of the info needed by TTM. 130 */ 131 struct vmw_validate_buffer { 132 struct ttm_validate_buffer base; 133 struct drm_hash_item hash; 134 bool validate_as_mob; 135 }; 136 137 struct vmw_res_func; 138 139 140 /** 141 * struct vmw-resource - base class for hardware resources 142 * 143 * @kref: For refcounting. 144 * @dev_priv: Pointer to the device private for this resource. Immutable. 145 * @id: Device id. Protected by @dev_priv::resource_lock. 146 * @backup_size: Backup buffer size. Immutable. 147 * @res_dirty: Resource contains data not yet in the backup buffer. Protected 148 * by resource reserved. 149 * @backup_dirty: Backup buffer contains data not yet in the HW resource. 150 * Protected by resource reserved. 151 * @coherent: Emulate coherency by tracking vm accesses. 152 * @backup: The backup buffer if any. Protected by resource reserved. 153 * @backup_offset: Offset into the backup buffer if any. Protected by resource 154 * reserved. Note that only a few resource types can have a @backup_offset 155 * different from zero. 156 * @pin_count: The pin count for this resource. A pinned resource has a 157 * pin-count greater than zero. It is not on the resource LRU lists and its 158 * backup buffer is pinned. Hence it can't be evicted. 159 * @func: Method vtable for this resource. Immutable. 160 * @mob_node; Node for the MOB backup rbtree. Protected by @backup reserved. 161 * @lru_head: List head for the LRU list. Protected by @dev_priv::resource_lock. 162 * @binding_head: List head for the context binding list. Protected by 163 * the @dev_priv::binding_mutex 164 * @res_free: The resource destructor. 165 * @hw_destroy: Callback to destroy the resource on the device, as part of 166 * resource destruction. 167 */ 168 struct vmw_resource_dirty; 169 struct vmw_resource { 170 struct kref kref; 171 struct vmw_private *dev_priv; 172 int id; 173 u32 used_prio; 174 unsigned long backup_size; 175 u32 res_dirty : 1; 176 u32 backup_dirty : 1; 177 u32 coherent : 1; 178 struct vmw_buffer_object *backup; 179 unsigned long backup_offset; 180 unsigned long pin_count; 181 const struct vmw_res_func *func; 182 struct rb_node mob_node; 183 struct list_head lru_head; 184 struct list_head binding_head; 185 struct vmw_resource_dirty *dirty; 186 void (*res_free) (struct vmw_resource *res); 187 void (*hw_destroy) (struct vmw_resource *res); 188 }; 189 190 191 /* 192 * Resources that are managed using ioctls. 193 */ 194 enum vmw_res_type { 195 vmw_res_context, 196 vmw_res_surface, 197 vmw_res_stream, 198 vmw_res_shader, 199 vmw_res_dx_context, 200 vmw_res_cotable, 201 vmw_res_view, 202 vmw_res_streamoutput, 203 vmw_res_max 204 }; 205 206 /* 207 * Resources that are managed using command streams. 208 */ 209 enum vmw_cmdbuf_res_type { 210 vmw_cmdbuf_res_shader, 211 vmw_cmdbuf_res_view, 212 vmw_cmdbuf_res_streamoutput 213 }; 214 215 struct vmw_cmdbuf_res_manager; 216 217 struct vmw_cursor_snooper { 218 size_t age; 219 uint32_t *image; 220 }; 221 222 struct vmw_framebuffer; 223 struct vmw_surface_offset; 224 225 /** 226 * struct vmw_surface_metadata - Metadata describing a surface. 227 * 228 * @flags: Device flags. 229 * @format: Surface SVGA3D_x format. 230 * @mip_levels: Mip level for each face. For GB first index is used only. 231 * @multisample_count: Sample count. 232 * @multisample_pattern: Sample patterns. 233 * @quality_level: Quality level. 234 * @autogen_filter: Filter for automatically generated mipmaps. 235 * @array_size: Number of array elements for a 1D/2D texture. For cubemap 236 texture number of faces * array_size. This should be 0 for pre 237 SM4 device. 238 * @buffer_byte_stride: Buffer byte stride. 239 * @num_sizes: Size of @sizes. For GB surface this should always be 1. 240 * @base_size: Surface dimension. 241 * @sizes: Array representing mip sizes. Legacy only. 242 * @scanout: Whether this surface will be used for scanout. 243 * 244 * This tracks metadata for both legacy and guest backed surface. 245 */ 246 struct vmw_surface_metadata { 247 u64 flags; 248 u32 format; 249 u32 mip_levels[DRM_VMW_MAX_SURFACE_FACES]; 250 u32 multisample_count; 251 u32 multisample_pattern; 252 u32 quality_level; 253 u32 autogen_filter; 254 u32 array_size; 255 u32 num_sizes; 256 u32 buffer_byte_stride; 257 struct drm_vmw_size base_size; 258 struct drm_vmw_size *sizes; 259 bool scanout; 260 }; 261 262 /** 263 * struct vmw_surface: Resource structure for a surface. 264 * 265 * @res: The base resource for this surface. 266 * @metadata: Metadata for this surface resource. 267 * @snooper: Cursor data. Legacy surface only. 268 * @offsets: Legacy surface only. 269 * @view_list: List of views bound to this surface. 270 */ 271 struct vmw_surface { 272 struct vmw_resource res; 273 struct vmw_surface_metadata metadata; 274 struct vmw_cursor_snooper snooper; 275 struct vmw_surface_offset *offsets; 276 struct list_head view_list; 277 }; 278 279 struct vmw_fifo_state { 280 unsigned long reserved_size; 281 u32 *dynamic_buffer; 282 u32 *static_buffer; 283 unsigned long static_buffer_size; 284 bool using_bounce_buffer; 285 uint32_t capabilities; 286 struct mutex fifo_mutex; 287 struct rw_semaphore rwsem; 288 bool dx; 289 }; 290 291 /** 292 * struct vmw_res_cache_entry - resource information cache entry 293 * @handle: User-space handle of a resource. 294 * @res: Non-ref-counted pointer to the resource. 295 * @valid_handle: Whether the @handle member is valid. 296 * @valid: Whether the entry is valid, which also implies that the execbuf 297 * code holds a reference to the resource, and it's placed on the 298 * validation list. 299 * 300 * Used to avoid frequent repeated user-space handle lookups of the 301 * same resource. 302 */ 303 struct vmw_res_cache_entry { 304 uint32_t handle; 305 struct vmw_resource *res; 306 void *private; 307 unsigned short valid_handle; 308 unsigned short valid; 309 }; 310 311 /** 312 * enum vmw_dma_map_mode - indicate how to perform TTM page dma mappings. 313 */ 314 enum vmw_dma_map_mode { 315 vmw_dma_phys, /* Use physical page addresses */ 316 vmw_dma_alloc_coherent, /* Use TTM coherent pages */ 317 vmw_dma_map_populate, /* Unmap from DMA just after unpopulate */ 318 vmw_dma_map_bind, /* Unmap from DMA just before unbind */ 319 vmw_dma_map_max 320 }; 321 322 /** 323 * struct vmw_sg_table - Scatter/gather table for binding, with additional 324 * device-specific information. 325 * 326 * @sgt: Pointer to a struct sg_table with binding information 327 * @num_regions: Number of regions with device-address contiguous pages 328 */ 329 struct vmw_sg_table { 330 enum vmw_dma_map_mode mode; 331 struct page **pages; 332 const dma_addr_t *addrs; 333 struct sg_table *sgt; 334 unsigned long num_regions; 335 unsigned long num_pages; 336 }; 337 338 /** 339 * struct vmw_piter - Page iterator that iterates over a list of pages 340 * and DMA addresses that could be either a scatter-gather list or 341 * arrays 342 * 343 * @pages: Array of page pointers to the pages. 344 * @addrs: DMA addresses to the pages if coherent pages are used. 345 * @iter: Scatter-gather page iterator. Current position in SG list. 346 * @i: Current position in arrays. 347 * @num_pages: Number of pages total. 348 * @next: Function to advance the iterator. Returns false if past the list 349 * of pages, true otherwise. 350 * @dma_address: Function to return the DMA address of the current page. 351 */ 352 struct vmw_piter { 353 struct page **pages; 354 const dma_addr_t *addrs; 355 struct sg_dma_page_iter iter; 356 unsigned long i; 357 unsigned long num_pages; 358 bool (*next)(struct vmw_piter *); 359 dma_addr_t (*dma_address)(struct vmw_piter *); 360 struct page *(*page)(struct vmw_piter *); 361 }; 362 363 /* 364 * enum vmw_display_unit_type - Describes the display unit 365 */ 366 enum vmw_display_unit_type { 367 vmw_du_invalid = 0, 368 vmw_du_legacy, 369 vmw_du_screen_object, 370 vmw_du_screen_target 371 }; 372 373 struct vmw_validation_context; 374 struct vmw_ctx_validation_info; 375 376 /** 377 * struct vmw_sw_context - Command submission context 378 * @res_ht: Pointer hash table used to find validation duplicates 379 * @kernel: Whether the command buffer originates from kernel code rather 380 * than from user-space 381 * @fp: If @kernel is false, points to the file of the client. Otherwise 382 * NULL 383 * @cmd_bounce: Command bounce buffer used for command validation before 384 * copying to fifo space 385 * @cmd_bounce_size: Current command bounce buffer size 386 * @cur_query_bo: Current buffer object used as query result buffer 387 * @bo_relocations: List of buffer object relocations 388 * @res_relocations: List of resource relocations 389 * @buf_start: Pointer to start of memory where command validation takes 390 * place 391 * @res_cache: Cache of recently looked up resources 392 * @last_query_ctx: Last context that submitted a query 393 * @needs_post_query_barrier: Whether a query barrier is needed after 394 * command submission 395 * @staged_bindings: Cached per-context binding tracker 396 * @staged_bindings_inuse: Whether the cached per-context binding tracker 397 * is in use 398 * @staged_cmd_res: List of staged command buffer managed resources in this 399 * command buffer 400 * @ctx_list: List of context resources referenced in this command buffer 401 * @dx_ctx_node: Validation metadata of the current DX context 402 * @dx_query_mob: The MOB used for DX queries 403 * @dx_query_ctx: The DX context used for the last DX query 404 * @man: Pointer to the command buffer managed resource manager 405 * @ctx: The validation context 406 */ 407 struct vmw_sw_context{ 408 struct drm_open_hash res_ht; 409 bool res_ht_initialized; 410 bool kernel; 411 struct vmw_fpriv *fp; 412 uint32_t *cmd_bounce; 413 uint32_t cmd_bounce_size; 414 struct vmw_buffer_object *cur_query_bo; 415 struct list_head bo_relocations; 416 struct list_head res_relocations; 417 uint32_t *buf_start; 418 struct vmw_res_cache_entry res_cache[vmw_res_max]; 419 struct vmw_resource *last_query_ctx; 420 bool needs_post_query_barrier; 421 struct vmw_ctx_binding_state *staged_bindings; 422 bool staged_bindings_inuse; 423 struct list_head staged_cmd_res; 424 struct list_head ctx_list; 425 struct vmw_ctx_validation_info *dx_ctx_node; 426 struct vmw_buffer_object *dx_query_mob; 427 struct vmw_resource *dx_query_ctx; 428 struct vmw_cmdbuf_res_manager *man; 429 struct vmw_validation_context *ctx; 430 }; 431 432 struct vmw_legacy_display; 433 struct vmw_overlay; 434 435 struct vmw_vga_topology_state { 436 uint32_t width; 437 uint32_t height; 438 uint32_t primary; 439 uint32_t pos_x; 440 uint32_t pos_y; 441 }; 442 443 444 /* 445 * struct vmw_otable - Guest Memory OBject table metadata 446 * 447 * @size: Size of the table (page-aligned). 448 * @page_table: Pointer to a struct vmw_mob holding the page table. 449 */ 450 struct vmw_otable { 451 unsigned long size; 452 struct vmw_mob *page_table; 453 bool enabled; 454 }; 455 456 struct vmw_otable_batch { 457 unsigned num_otables; 458 struct vmw_otable *otables; 459 struct vmw_resource *context; 460 struct ttm_buffer_object *otable_bo; 461 }; 462 463 enum { 464 VMW_IRQTHREAD_FENCE, 465 VMW_IRQTHREAD_CMDBUF, 466 VMW_IRQTHREAD_MAX 467 }; 468 469 /** 470 * enum vmw_sm_type - Graphics context capability supported by device. 471 * @VMW_SM_LEGACY: Pre DX context. 472 * @VMW_SM_4: Context support upto SM4. 473 * @VMW_SM_4_1: Context support upto SM4_1. 474 * @VMW_SM_5: Context support up to SM5. 475 * @VMW_SM_MAX: Should be the last. 476 */ 477 enum vmw_sm_type { 478 VMW_SM_LEGACY = 0, 479 VMW_SM_4, 480 VMW_SM_4_1, 481 VMW_SM_5, 482 VMW_SM_MAX 483 }; 484 485 struct vmw_private { 486 struct drm_device drm; 487 struct ttm_device bdev; 488 489 struct vmw_fifo_state fifo; 490 491 struct drm_vma_offset_manager vma_manager; 492 u32 vmw_chipset; 493 resource_size_t io_start; 494 resource_size_t vram_start; 495 resource_size_t vram_size; 496 resource_size_t prim_bb_mem; 497 u32 *fifo_mem; 498 resource_size_t fifo_mem_size; 499 uint32_t fb_max_width; 500 uint32_t fb_max_height; 501 uint32_t texture_max_width; 502 uint32_t texture_max_height; 503 uint32_t stdu_max_width; 504 uint32_t stdu_max_height; 505 uint32_t initial_width; 506 uint32_t initial_height; 507 uint32_t capabilities; 508 uint32_t capabilities2; 509 uint32_t max_gmr_ids; 510 uint32_t max_gmr_pages; 511 uint32_t max_mob_pages; 512 uint32_t max_mob_size; 513 uint32_t memory_size; 514 bool has_gmr; 515 bool has_mob; 516 spinlock_t hw_lock; 517 spinlock_t cap_lock; 518 bool assume_16bpp; 519 520 enum vmw_sm_type sm_type; 521 522 /* 523 * Framebuffer info. 524 */ 525 526 void *fb_info; 527 enum vmw_display_unit_type active_display_unit; 528 struct vmw_legacy_display *ldu_priv; 529 struct vmw_overlay *overlay_priv; 530 struct drm_property *hotplug_mode_update_property; 531 struct drm_property *implicit_placement_property; 532 spinlock_t cursor_lock; 533 struct drm_atomic_state *suspend_state; 534 535 /* 536 * Context and surface management. 537 */ 538 539 spinlock_t resource_lock; 540 struct idr res_idr[vmw_res_max]; 541 542 /* 543 * A resource manager for kernel-only surfaces and 544 * contexts. 545 */ 546 547 struct ttm_object_device *tdev; 548 549 /* 550 * Fencing and IRQs. 551 */ 552 553 atomic_t marker_seq; 554 wait_queue_head_t fence_queue; 555 wait_queue_head_t fifo_queue; 556 spinlock_t waiter_lock; 557 int fence_queue_waiters; /* Protected by waiter_lock */ 558 int goal_queue_waiters; /* Protected by waiter_lock */ 559 int cmdbuf_waiters; /* Protected by waiter_lock */ 560 int error_waiters; /* Protected by waiter_lock */ 561 int fifo_queue_waiters; /* Protected by waiter_lock */ 562 uint32_t last_read_seqno; 563 struct vmw_fence_manager *fman; 564 uint32_t irq_mask; /* Updates protected by waiter_lock */ 565 566 /* 567 * Device state 568 */ 569 570 uint32_t traces_state; 571 uint32_t enable_state; 572 uint32_t config_done_state; 573 574 /** 575 * Execbuf 576 */ 577 /** 578 * Protected by the cmdbuf mutex. 579 */ 580 581 struct vmw_sw_context ctx; 582 struct mutex cmdbuf_mutex; 583 struct mutex binding_mutex; 584 585 bool enable_fb; 586 587 /** 588 * PM management. 589 */ 590 struct notifier_block pm_nb; 591 bool refuse_hibernation; 592 bool suspend_locked; 593 594 atomic_t num_fifo_resources; 595 596 /* 597 * Replace this with an rwsem as soon as we have down_xx_interruptible() 598 */ 599 struct ttm_lock reservation_sem; 600 601 /* 602 * Query processing. These members 603 * are protected by the cmdbuf mutex. 604 */ 605 606 struct vmw_buffer_object *dummy_query_bo; 607 struct vmw_buffer_object *pinned_bo; 608 uint32_t query_cid; 609 uint32_t query_cid_valid; 610 bool dummy_query_bo_pinned; 611 612 /* 613 * Surface swapping. The "surface_lru" list is protected by the 614 * resource lock in order to be able to destroy a surface and take 615 * it off the lru atomically. "used_memory_size" is currently 616 * protected by the cmdbuf mutex for simplicity. 617 */ 618 619 struct list_head res_lru[vmw_res_max]; 620 uint32_t used_memory_size; 621 622 /* 623 * DMA mapping stuff. 624 */ 625 enum vmw_dma_map_mode map_mode; 626 627 /* 628 * Guest Backed stuff 629 */ 630 struct vmw_otable_batch otable_batch; 631 632 struct vmw_cmdbuf_man *cman; 633 DECLARE_BITMAP(irqthread_pending, VMW_IRQTHREAD_MAX); 634 635 /* Validation memory reservation */ 636 struct vmw_validation_mem vvm; 637 }; 638 639 static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res) 640 { 641 return container_of(res, struct vmw_surface, res); 642 } 643 644 static inline struct vmw_private *vmw_priv(struct drm_device *dev) 645 { 646 return (struct vmw_private *)dev->dev_private; 647 } 648 649 static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv) 650 { 651 return (struct vmw_fpriv *)file_priv->driver_priv; 652 } 653 654 /* 655 * The locking here is fine-grained, so that it is performed once 656 * for every read- and write operation. This is of course costly, but we 657 * don't perform much register access in the timing critical paths anyway. 658 * Instead we have the extra benefit of being sure that we don't forget 659 * the hw lock around register accesses. 660 */ 661 static inline void vmw_write(struct vmw_private *dev_priv, 662 unsigned int offset, uint32_t value) 663 { 664 spin_lock(&dev_priv->hw_lock); 665 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); 666 outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT); 667 spin_unlock(&dev_priv->hw_lock); 668 } 669 670 static inline uint32_t vmw_read(struct vmw_private *dev_priv, 671 unsigned int offset) 672 { 673 u32 val; 674 675 spin_lock(&dev_priv->hw_lock); 676 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); 677 val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT); 678 spin_unlock(&dev_priv->hw_lock); 679 680 return val; 681 } 682 683 /** 684 * has_sm4_context - Does the device support SM4 context. 685 * @dev_priv: Device private. 686 * 687 * Return: Bool value if device support SM4 context or not. 688 */ 689 static inline bool has_sm4_context(const struct vmw_private *dev_priv) 690 { 691 return (dev_priv->sm_type >= VMW_SM_4); 692 } 693 694 /** 695 * has_sm4_1_context - Does the device support SM4_1 context. 696 * @dev_priv: Device private. 697 * 698 * Return: Bool value if device support SM4_1 context or not. 699 */ 700 static inline bool has_sm4_1_context(const struct vmw_private *dev_priv) 701 { 702 return (dev_priv->sm_type >= VMW_SM_4_1); 703 } 704 705 /** 706 * has_sm5_context - Does the device support SM5 context. 707 * @dev_priv: Device private. 708 * 709 * Return: Bool value if device support SM5 context or not. 710 */ 711 static inline bool has_sm5_context(const struct vmw_private *dev_priv) 712 { 713 return (dev_priv->sm_type >= VMW_SM_5); 714 } 715 716 extern void vmw_svga_enable(struct vmw_private *dev_priv); 717 extern void vmw_svga_disable(struct vmw_private *dev_priv); 718 719 720 /** 721 * GMR utilities - vmwgfx_gmr.c 722 */ 723 724 extern int vmw_gmr_bind(struct vmw_private *dev_priv, 725 const struct vmw_sg_table *vsgt, 726 unsigned long num_pages, 727 int gmr_id); 728 extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id); 729 730 /** 731 * Resource utilities - vmwgfx_resource.c 732 */ 733 struct vmw_user_resource_conv; 734 735 extern void vmw_resource_unreference(struct vmw_resource **p_res); 736 extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res); 737 extern struct vmw_resource * 738 vmw_resource_reference_unless_doomed(struct vmw_resource *res); 739 extern int vmw_resource_validate(struct vmw_resource *res, bool intr, 740 bool dirtying); 741 extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible, 742 bool no_backup); 743 extern bool vmw_resource_needs_backup(const struct vmw_resource *res); 744 extern int vmw_user_lookup_handle(struct vmw_private *dev_priv, 745 struct ttm_object_file *tfile, 746 uint32_t handle, 747 struct vmw_surface **out_surf, 748 struct vmw_buffer_object **out_buf); 749 extern int vmw_user_resource_lookup_handle( 750 struct vmw_private *dev_priv, 751 struct ttm_object_file *tfile, 752 uint32_t handle, 753 const struct vmw_user_resource_conv *converter, 754 struct vmw_resource **p_res); 755 extern struct vmw_resource * 756 vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv, 757 struct ttm_object_file *tfile, 758 uint32_t handle, 759 const struct vmw_user_resource_conv * 760 converter); 761 extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, 762 struct drm_file *file_priv); 763 extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, 764 struct drm_file *file_priv); 765 extern int vmw_user_stream_lookup(struct vmw_private *dev_priv, 766 struct ttm_object_file *tfile, 767 uint32_t *inout_id, 768 struct vmw_resource **out); 769 extern void vmw_resource_unreserve(struct vmw_resource *res, 770 bool dirty_set, 771 bool dirty, 772 bool switch_backup, 773 struct vmw_buffer_object *new_backup, 774 unsigned long new_backup_offset); 775 extern void vmw_query_move_notify(struct ttm_buffer_object *bo, 776 struct ttm_resource *old_mem, 777 struct ttm_resource *new_mem); 778 extern int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob); 779 extern void vmw_resource_evict_all(struct vmw_private *dev_priv); 780 extern void vmw_resource_unbind_list(struct vmw_buffer_object *vbo); 781 void vmw_resource_mob_attach(struct vmw_resource *res); 782 void vmw_resource_mob_detach(struct vmw_resource *res); 783 void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start, 784 pgoff_t end); 785 int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start, 786 pgoff_t end, pgoff_t *num_prefault); 787 788 /** 789 * vmw_resource_mob_attached - Whether a resource currently has a mob attached 790 * @res: The resource 791 * 792 * Return: true if the resource has a mob attached, false otherwise. 793 */ 794 static inline bool vmw_resource_mob_attached(const struct vmw_resource *res) 795 { 796 return !RB_EMPTY_NODE(&res->mob_node); 797 } 798 799 /** 800 * vmw_user_resource_noref_release - release a user resource pointer looked up 801 * without reference 802 */ 803 static inline void vmw_user_resource_noref_release(void) 804 { 805 ttm_base_object_noref_release(); 806 } 807 808 /** 809 * Buffer object helper functions - vmwgfx_bo.c 810 */ 811 extern int vmw_bo_pin_in_placement(struct vmw_private *vmw_priv, 812 struct vmw_buffer_object *bo, 813 struct ttm_placement *placement, 814 bool interruptible); 815 extern int vmw_bo_pin_in_vram(struct vmw_private *dev_priv, 816 struct vmw_buffer_object *buf, 817 bool interruptible); 818 extern int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv, 819 struct vmw_buffer_object *buf, 820 bool interruptible); 821 extern int vmw_bo_pin_in_start_of_vram(struct vmw_private *vmw_priv, 822 struct vmw_buffer_object *bo, 823 bool interruptible); 824 extern int vmw_bo_unpin(struct vmw_private *vmw_priv, 825 struct vmw_buffer_object *bo, 826 bool interruptible); 827 extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf, 828 SVGAGuestPtr *ptr); 829 extern void vmw_bo_pin_reserved(struct vmw_buffer_object *bo, bool pin); 830 extern void vmw_bo_bo_free(struct ttm_buffer_object *bo); 831 extern int vmw_bo_create_kernel(struct vmw_private *dev_priv, 832 unsigned long size, 833 struct ttm_placement *placement, 834 struct ttm_buffer_object **p_bo); 835 extern int vmw_bo_init(struct vmw_private *dev_priv, 836 struct vmw_buffer_object *vmw_bo, 837 size_t size, struct ttm_placement *placement, 838 bool interruptible, bool pin, 839 void (*bo_free)(struct ttm_buffer_object *bo)); 840 extern int vmw_user_bo_verify_access(struct ttm_buffer_object *bo, 841 struct ttm_object_file *tfile); 842 extern int vmw_user_bo_alloc(struct vmw_private *dev_priv, 843 struct ttm_object_file *tfile, 844 uint32_t size, 845 bool shareable, 846 uint32_t *handle, 847 struct vmw_buffer_object **p_dma_buf, 848 struct ttm_base_object **p_base); 849 extern int vmw_user_bo_reference(struct ttm_object_file *tfile, 850 struct vmw_buffer_object *dma_buf, 851 uint32_t *handle); 852 extern int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data, 853 struct drm_file *file_priv); 854 extern int vmw_bo_unref_ioctl(struct drm_device *dev, void *data, 855 struct drm_file *file_priv); 856 extern int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data, 857 struct drm_file *file_priv); 858 extern int vmw_user_bo_lookup(struct ttm_object_file *tfile, 859 uint32_t id, struct vmw_buffer_object **out, 860 struct ttm_base_object **base); 861 extern void vmw_bo_fence_single(struct ttm_buffer_object *bo, 862 struct vmw_fence_obj *fence); 863 extern void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo); 864 extern void vmw_bo_unmap(struct vmw_buffer_object *vbo); 865 extern void vmw_bo_move_notify(struct ttm_buffer_object *bo, 866 struct ttm_resource *mem); 867 extern void vmw_bo_swap_notify(struct ttm_buffer_object *bo); 868 extern struct vmw_buffer_object * 869 vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle); 870 871 /** 872 * vmw_user_bo_noref_release - release a buffer object pointer looked up 873 * without reference 874 */ 875 static inline void vmw_user_bo_noref_release(void) 876 { 877 ttm_base_object_noref_release(); 878 } 879 880 /** 881 * vmw_bo_adjust_prio - Adjust the buffer object eviction priority 882 * according to attached resources 883 * @vbo: The struct vmw_buffer_object 884 */ 885 static inline void vmw_bo_prio_adjust(struct vmw_buffer_object *vbo) 886 { 887 int i = ARRAY_SIZE(vbo->res_prios); 888 889 while (i--) { 890 if (vbo->res_prios[i]) { 891 vbo->base.priority = i; 892 return; 893 } 894 } 895 896 vbo->base.priority = 3; 897 } 898 899 /** 900 * vmw_bo_prio_add - Notify a buffer object of a newly attached resource 901 * eviction priority 902 * @vbo: The struct vmw_buffer_object 903 * @prio: The resource priority 904 * 905 * After being notified, the code assigns the highest resource eviction priority 906 * to the backing buffer object (mob). 907 */ 908 static inline void vmw_bo_prio_add(struct vmw_buffer_object *vbo, int prio) 909 { 910 if (vbo->res_prios[prio]++ == 0) 911 vmw_bo_prio_adjust(vbo); 912 } 913 914 /** 915 * vmw_bo_prio_del - Notify a buffer object of a resource with a certain 916 * priority being removed 917 * @vbo: The struct vmw_buffer_object 918 * @prio: The resource priority 919 * 920 * After being notified, the code assigns the highest resource eviction priority 921 * to the backing buffer object (mob). 922 */ 923 static inline void vmw_bo_prio_del(struct vmw_buffer_object *vbo, int prio) 924 { 925 if (--vbo->res_prios[prio] == 0) 926 vmw_bo_prio_adjust(vbo); 927 } 928 929 /** 930 * Misc Ioctl functionality - vmwgfx_ioctl.c 931 */ 932 933 extern int vmw_getparam_ioctl(struct drm_device *dev, void *data, 934 struct drm_file *file_priv); 935 extern int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, 936 struct drm_file *file_priv); 937 extern int vmw_present_ioctl(struct drm_device *dev, void *data, 938 struct drm_file *file_priv); 939 extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data, 940 struct drm_file *file_priv); 941 extern __poll_t vmw_fops_poll(struct file *filp, 942 struct poll_table_struct *wait); 943 extern ssize_t vmw_fops_read(struct file *filp, char __user *buffer, 944 size_t count, loff_t *offset); 945 946 /** 947 * Fifo utilities - vmwgfx_fifo.c 948 */ 949 950 extern int vmw_fifo_init(struct vmw_private *dev_priv, 951 struct vmw_fifo_state *fifo); 952 extern void vmw_fifo_release(struct vmw_private *dev_priv, 953 struct vmw_fifo_state *fifo); 954 extern void * 955 vmw_cmd_ctx_reserve(struct vmw_private *dev_priv, uint32_t bytes, int ctx_id); 956 extern void vmw_cmd_commit(struct vmw_private *dev_priv, uint32_t bytes); 957 extern void vmw_cmd_commit_flush(struct vmw_private *dev_priv, uint32_t bytes); 958 extern int vmw_cmd_send_fence(struct vmw_private *dev_priv, uint32_t *seqno); 959 extern bool vmw_supports_3d(struct vmw_private *dev_priv); 960 extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason); 961 extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv); 962 extern int vmw_cmd_emit_dummy_query(struct vmw_private *dev_priv, 963 uint32_t cid); 964 extern int vmw_cmd_flush(struct vmw_private *dev_priv, 965 bool interruptible); 966 967 #define VMW_CMD_CTX_RESERVE(__priv, __bytes, __ctx_id) \ 968 ({ \ 969 vmw_cmd_ctx_reserve(__priv, __bytes, __ctx_id) ? : ({ \ 970 DRM_ERROR("FIFO reserve failed at %s for %u bytes\n", \ 971 __func__, (unsigned int) __bytes); \ 972 NULL; \ 973 }); \ 974 }) 975 976 #define VMW_CMD_RESERVE(__priv, __bytes) \ 977 VMW_CMD_CTX_RESERVE(__priv, __bytes, SVGA3D_INVALID_ID) 978 979 /** 980 * TTM glue - vmwgfx_ttm_glue.c 981 */ 982 983 extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma); 984 985 extern void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv, 986 size_t gran); 987 988 /** 989 * TTM buffer object driver - vmwgfx_ttm_buffer.c 990 */ 991 992 extern const size_t vmw_tt_size; 993 extern struct ttm_placement vmw_vram_placement; 994 extern struct ttm_placement vmw_vram_sys_placement; 995 extern struct ttm_placement vmw_vram_gmr_placement; 996 extern struct ttm_placement vmw_sys_placement; 997 extern struct ttm_placement vmw_evictable_placement; 998 extern struct ttm_placement vmw_srf_placement; 999 extern struct ttm_placement vmw_mob_placement; 1000 extern struct ttm_placement vmw_nonfixed_placement; 1001 extern struct ttm_device_funcs vmw_bo_driver; 1002 extern const struct vmw_sg_table * 1003 vmw_bo_sg_table(struct ttm_buffer_object *bo); 1004 extern int vmw_bo_create_and_populate(struct vmw_private *dev_priv, 1005 unsigned long bo_size, 1006 struct ttm_buffer_object **bo_p); 1007 1008 extern void vmw_piter_start(struct vmw_piter *viter, 1009 const struct vmw_sg_table *vsgt, 1010 unsigned long p_offs); 1011 1012 /** 1013 * vmw_piter_next - Advance the iterator one page. 1014 * 1015 * @viter: Pointer to the iterator to advance. 1016 * 1017 * Returns false if past the list of pages, true otherwise. 1018 */ 1019 static inline bool vmw_piter_next(struct vmw_piter *viter) 1020 { 1021 return viter->next(viter); 1022 } 1023 1024 /** 1025 * vmw_piter_dma_addr - Return the DMA address of the current page. 1026 * 1027 * @viter: Pointer to the iterator 1028 * 1029 * Returns the DMA address of the page pointed to by @viter. 1030 */ 1031 static inline dma_addr_t vmw_piter_dma_addr(struct vmw_piter *viter) 1032 { 1033 return viter->dma_address(viter); 1034 } 1035 1036 /** 1037 * vmw_piter_page - Return a pointer to the current page. 1038 * 1039 * @viter: Pointer to the iterator 1040 * 1041 * Returns the DMA address of the page pointed to by @viter. 1042 */ 1043 static inline struct page *vmw_piter_page(struct vmw_piter *viter) 1044 { 1045 return viter->page(viter); 1046 } 1047 1048 /** 1049 * Command submission - vmwgfx_execbuf.c 1050 */ 1051 1052 extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data, 1053 struct drm_file *file_priv); 1054 extern int vmw_execbuf_process(struct drm_file *file_priv, 1055 struct vmw_private *dev_priv, 1056 void __user *user_commands, 1057 void *kernel_commands, 1058 uint32_t command_size, 1059 uint64_t throttle_us, 1060 uint32_t dx_context_handle, 1061 struct drm_vmw_fence_rep __user 1062 *user_fence_rep, 1063 struct vmw_fence_obj **out_fence, 1064 uint32_t flags); 1065 extern void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, 1066 struct vmw_fence_obj *fence); 1067 extern void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv); 1068 1069 extern int vmw_execbuf_fence_commands(struct drm_file *file_priv, 1070 struct vmw_private *dev_priv, 1071 struct vmw_fence_obj **p_fence, 1072 uint32_t *p_handle); 1073 extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, 1074 struct vmw_fpriv *vmw_fp, 1075 int ret, 1076 struct drm_vmw_fence_rep __user 1077 *user_fence_rep, 1078 struct vmw_fence_obj *fence, 1079 uint32_t fence_handle, 1080 int32_t out_fence_fd, 1081 struct sync_file *sync_file); 1082 bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd); 1083 1084 /** 1085 * IRQs and wating - vmwgfx_irq.c 1086 */ 1087 1088 extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy, 1089 uint32_t seqno, bool interruptible, 1090 unsigned long timeout); 1091 extern int vmw_irq_install(struct drm_device *dev, int irq); 1092 extern void vmw_irq_uninstall(struct drm_device *dev); 1093 extern bool vmw_seqno_passed(struct vmw_private *dev_priv, 1094 uint32_t seqno); 1095 extern int vmw_fallback_wait(struct vmw_private *dev_priv, 1096 bool lazy, 1097 bool fifo_idle, 1098 uint32_t seqno, 1099 bool interruptible, 1100 unsigned long timeout); 1101 extern void vmw_update_seqno(struct vmw_private *dev_priv, 1102 struct vmw_fifo_state *fifo_state); 1103 extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv); 1104 extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv); 1105 extern void vmw_goal_waiter_add(struct vmw_private *dev_priv); 1106 extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv); 1107 extern void vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag, 1108 int *waiter_count); 1109 extern void vmw_generic_waiter_remove(struct vmw_private *dev_priv, 1110 u32 flag, int *waiter_count); 1111 1112 1113 /** 1114 * Kernel framebuffer - vmwgfx_fb.c 1115 */ 1116 1117 int vmw_fb_init(struct vmw_private *vmw_priv); 1118 int vmw_fb_close(struct vmw_private *dev_priv); 1119 int vmw_fb_off(struct vmw_private *vmw_priv); 1120 int vmw_fb_on(struct vmw_private *vmw_priv); 1121 1122 /** 1123 * Kernel modesetting - vmwgfx_kms.c 1124 */ 1125 1126 int vmw_kms_init(struct vmw_private *dev_priv); 1127 int vmw_kms_close(struct vmw_private *dev_priv); 1128 int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data, 1129 struct drm_file *file_priv); 1130 void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv); 1131 void vmw_kms_cursor_snoop(struct vmw_surface *srf, 1132 struct ttm_object_file *tfile, 1133 struct ttm_buffer_object *bo, 1134 SVGA3dCmdHeader *header); 1135 int vmw_kms_write_svga(struct vmw_private *vmw_priv, 1136 unsigned width, unsigned height, unsigned pitch, 1137 unsigned bpp, unsigned depth); 1138 bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv, 1139 uint32_t pitch, 1140 uint32_t height); 1141 u32 vmw_get_vblank_counter(struct drm_crtc *crtc); 1142 int vmw_enable_vblank(struct drm_crtc *crtc); 1143 void vmw_disable_vblank(struct drm_crtc *crtc); 1144 int vmw_kms_present(struct vmw_private *dev_priv, 1145 struct drm_file *file_priv, 1146 struct vmw_framebuffer *vfb, 1147 struct vmw_surface *surface, 1148 uint32_t sid, int32_t destX, int32_t destY, 1149 struct drm_vmw_rect *clips, 1150 uint32_t num_clips); 1151 int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, 1152 struct drm_file *file_priv); 1153 void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv); 1154 int vmw_kms_suspend(struct drm_device *dev); 1155 int vmw_kms_resume(struct drm_device *dev); 1156 void vmw_kms_lost_device(struct drm_device *dev); 1157 1158 int vmw_dumb_create(struct drm_file *file_priv, 1159 struct drm_device *dev, 1160 struct drm_mode_create_dumb *args); 1161 1162 int vmw_dumb_map_offset(struct drm_file *file_priv, 1163 struct drm_device *dev, uint32_t handle, 1164 uint64_t *offset); 1165 int vmw_dumb_destroy(struct drm_file *file_priv, 1166 struct drm_device *dev, 1167 uint32_t handle); 1168 extern int vmw_resource_pin(struct vmw_resource *res, bool interruptible); 1169 extern void vmw_resource_unpin(struct vmw_resource *res); 1170 extern enum vmw_res_type vmw_res_type(const struct vmw_resource *res); 1171 1172 /** 1173 * Overlay control - vmwgfx_overlay.c 1174 */ 1175 1176 int vmw_overlay_init(struct vmw_private *dev_priv); 1177 int vmw_overlay_close(struct vmw_private *dev_priv); 1178 int vmw_overlay_ioctl(struct drm_device *dev, void *data, 1179 struct drm_file *file_priv); 1180 int vmw_overlay_resume_all(struct vmw_private *dev_priv); 1181 int vmw_overlay_pause_all(struct vmw_private *dev_priv); 1182 int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out); 1183 int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id); 1184 int vmw_overlay_num_overlays(struct vmw_private *dev_priv); 1185 int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv); 1186 1187 /** 1188 * GMR Id manager 1189 */ 1190 1191 int vmw_gmrid_man_init(struct vmw_private *dev_priv, int type); 1192 void vmw_gmrid_man_fini(struct vmw_private *dev_priv, int type); 1193 1194 /** 1195 * Prime - vmwgfx_prime.c 1196 */ 1197 1198 extern const struct dma_buf_ops vmw_prime_dmabuf_ops; 1199 extern int vmw_prime_fd_to_handle(struct drm_device *dev, 1200 struct drm_file *file_priv, 1201 int fd, u32 *handle); 1202 extern int vmw_prime_handle_to_fd(struct drm_device *dev, 1203 struct drm_file *file_priv, 1204 uint32_t handle, uint32_t flags, 1205 int *prime_fd); 1206 1207 /* 1208 * MemoryOBject management - vmwgfx_mob.c 1209 */ 1210 struct vmw_mob; 1211 extern int vmw_mob_bind(struct vmw_private *dev_priv, struct vmw_mob *mob, 1212 const struct vmw_sg_table *vsgt, 1213 unsigned long num_data_pages, int32_t mob_id); 1214 extern void vmw_mob_unbind(struct vmw_private *dev_priv, 1215 struct vmw_mob *mob); 1216 extern void vmw_mob_destroy(struct vmw_mob *mob); 1217 extern struct vmw_mob *vmw_mob_create(unsigned long data_pages); 1218 extern int vmw_otables_setup(struct vmw_private *dev_priv); 1219 extern void vmw_otables_takedown(struct vmw_private *dev_priv); 1220 1221 /* 1222 * Context management - vmwgfx_context.c 1223 */ 1224 1225 extern const struct vmw_user_resource_conv *user_context_converter; 1226 1227 extern int vmw_context_define_ioctl(struct drm_device *dev, void *data, 1228 struct drm_file *file_priv); 1229 extern int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data, 1230 struct drm_file *file_priv); 1231 extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, 1232 struct drm_file *file_priv); 1233 extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx); 1234 extern struct vmw_cmdbuf_res_manager * 1235 vmw_context_res_man(struct vmw_resource *ctx); 1236 extern struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx, 1237 SVGACOTableType cotable_type); 1238 extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx); 1239 struct vmw_ctx_binding_state; 1240 extern struct vmw_ctx_binding_state * 1241 vmw_context_binding_state(struct vmw_resource *ctx); 1242 extern void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx, 1243 bool readback); 1244 extern int vmw_context_bind_dx_query(struct vmw_resource *ctx_res, 1245 struct vmw_buffer_object *mob); 1246 extern struct vmw_buffer_object * 1247 vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res); 1248 1249 1250 /* 1251 * Surface management - vmwgfx_surface.c 1252 */ 1253 1254 extern const struct vmw_user_resource_conv *user_surface_converter; 1255 1256 extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data, 1257 struct drm_file *file_priv); 1258 extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data, 1259 struct drm_file *file_priv); 1260 extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, 1261 struct drm_file *file_priv); 1262 extern int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, 1263 struct drm_file *file_priv); 1264 extern int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data, 1265 struct drm_file *file_priv); 1266 int vmw_surface_gb_priv_define(struct drm_device *dev, 1267 uint32_t user_accounting_size, 1268 SVGA3dSurfaceAllFlags svga3d_flags, 1269 SVGA3dSurfaceFormat format, 1270 bool for_scanout, 1271 uint32_t num_mip_levels, 1272 uint32_t multisample_count, 1273 uint32_t array_size, 1274 struct drm_vmw_size size, 1275 SVGA3dMSPattern multisample_pattern, 1276 SVGA3dMSQualityLevel quality_level, 1277 struct vmw_surface **srf_out); 1278 extern int vmw_gb_surface_define_ext_ioctl(struct drm_device *dev, 1279 void *data, 1280 struct drm_file *file_priv); 1281 extern int vmw_gb_surface_reference_ext_ioctl(struct drm_device *dev, 1282 void *data, 1283 struct drm_file *file_priv); 1284 1285 int vmw_gb_surface_define(struct vmw_private *dev_priv, 1286 uint32_t user_accounting_size, 1287 const struct vmw_surface_metadata *req, 1288 struct vmw_surface **srf_out); 1289 1290 /* 1291 * Shader management - vmwgfx_shader.c 1292 */ 1293 1294 extern const struct vmw_user_resource_conv *user_shader_converter; 1295 1296 extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data, 1297 struct drm_file *file_priv); 1298 extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data, 1299 struct drm_file *file_priv); 1300 extern int vmw_compat_shader_add(struct vmw_private *dev_priv, 1301 struct vmw_cmdbuf_res_manager *man, 1302 u32 user_key, const void *bytecode, 1303 SVGA3dShaderType shader_type, 1304 size_t size, 1305 struct list_head *list); 1306 extern int vmw_shader_remove(struct vmw_cmdbuf_res_manager *man, 1307 u32 user_key, SVGA3dShaderType shader_type, 1308 struct list_head *list); 1309 extern int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man, 1310 struct vmw_resource *ctx, 1311 u32 user_key, 1312 SVGA3dShaderType shader_type, 1313 struct list_head *list); 1314 extern void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv, 1315 struct list_head *list, 1316 bool readback); 1317 1318 extern struct vmw_resource * 1319 vmw_shader_lookup(struct vmw_cmdbuf_res_manager *man, 1320 u32 user_key, SVGA3dShaderType shader_type); 1321 1322 /* 1323 * Streamoutput management 1324 */ 1325 struct vmw_resource * 1326 vmw_dx_streamoutput_lookup(struct vmw_cmdbuf_res_manager *man, 1327 u32 user_key); 1328 int vmw_dx_streamoutput_add(struct vmw_cmdbuf_res_manager *man, 1329 struct vmw_resource *ctx, 1330 SVGA3dStreamOutputId user_key, 1331 struct list_head *list); 1332 void vmw_dx_streamoutput_set_size(struct vmw_resource *res, u32 size); 1333 int vmw_dx_streamoutput_remove(struct vmw_cmdbuf_res_manager *man, 1334 SVGA3dStreamOutputId user_key, 1335 struct list_head *list); 1336 void vmw_dx_streamoutput_cotable_list_scrub(struct vmw_private *dev_priv, 1337 struct list_head *list, 1338 bool readback); 1339 1340 /* 1341 * Command buffer managed resources - vmwgfx_cmdbuf_res.c 1342 */ 1343 1344 extern struct vmw_cmdbuf_res_manager * 1345 vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv); 1346 extern void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man); 1347 extern size_t vmw_cmdbuf_res_man_size(void); 1348 extern struct vmw_resource * 1349 vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man, 1350 enum vmw_cmdbuf_res_type res_type, 1351 u32 user_key); 1352 extern void vmw_cmdbuf_res_revert(struct list_head *list); 1353 extern void vmw_cmdbuf_res_commit(struct list_head *list); 1354 extern int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man, 1355 enum vmw_cmdbuf_res_type res_type, 1356 u32 user_key, 1357 struct vmw_resource *res, 1358 struct list_head *list); 1359 extern int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man, 1360 enum vmw_cmdbuf_res_type res_type, 1361 u32 user_key, 1362 struct list_head *list, 1363 struct vmw_resource **res); 1364 1365 /* 1366 * COTable management - vmwgfx_cotable.c 1367 */ 1368 extern const SVGACOTableType vmw_cotable_scrub_order[]; 1369 extern struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv, 1370 struct vmw_resource *ctx, 1371 u32 type); 1372 extern int vmw_cotable_notify(struct vmw_resource *res, int id); 1373 extern int vmw_cotable_scrub(struct vmw_resource *res, bool readback); 1374 extern void vmw_cotable_add_resource(struct vmw_resource *ctx, 1375 struct list_head *head); 1376 1377 /* 1378 * Command buffer managerment vmwgfx_cmdbuf.c 1379 */ 1380 struct vmw_cmdbuf_man; 1381 struct vmw_cmdbuf_header; 1382 1383 extern struct vmw_cmdbuf_man * 1384 vmw_cmdbuf_man_create(struct vmw_private *dev_priv); 1385 extern int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size); 1386 extern void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man); 1387 extern void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man); 1388 extern int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible, 1389 unsigned long timeout); 1390 extern void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size, 1391 int ctx_id, bool interruptible, 1392 struct vmw_cmdbuf_header *header); 1393 extern void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size, 1394 struct vmw_cmdbuf_header *header, 1395 bool flush); 1396 extern void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man, 1397 size_t size, bool interruptible, 1398 struct vmw_cmdbuf_header **p_header); 1399 extern void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header); 1400 extern int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man, 1401 bool interruptible); 1402 extern void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man *man); 1403 1404 /* CPU blit utilities - vmwgfx_blit.c */ 1405 1406 /** 1407 * struct vmw_diff_cpy - CPU blit information structure 1408 * 1409 * @rect: The output bounding box rectangle. 1410 * @line: The current line of the blit. 1411 * @line_offset: Offset of the current line segment. 1412 * @cpp: Bytes per pixel (granularity information). 1413 * @memcpy: Which memcpy function to use. 1414 */ 1415 struct vmw_diff_cpy { 1416 struct drm_rect rect; 1417 size_t line; 1418 size_t line_offset; 1419 int cpp; 1420 void (*do_cpy)(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src, 1421 size_t n); 1422 }; 1423 1424 #define VMW_CPU_BLIT_INITIALIZER { \ 1425 .do_cpy = vmw_memcpy, \ 1426 } 1427 1428 #define VMW_CPU_BLIT_DIFF_INITIALIZER(_cpp) { \ 1429 .line = 0, \ 1430 .line_offset = 0, \ 1431 .rect = { .x1 = INT_MAX/2, \ 1432 .y1 = INT_MAX/2, \ 1433 .x2 = INT_MIN/2, \ 1434 .y2 = INT_MIN/2 \ 1435 }, \ 1436 .cpp = _cpp, \ 1437 .do_cpy = vmw_diff_memcpy, \ 1438 } 1439 1440 void vmw_diff_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src, 1441 size_t n); 1442 1443 void vmw_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src, size_t n); 1444 1445 int vmw_bo_cpu_blit(struct ttm_buffer_object *dst, 1446 u32 dst_offset, u32 dst_stride, 1447 struct ttm_buffer_object *src, 1448 u32 src_offset, u32 src_stride, 1449 u32 w, u32 h, 1450 struct vmw_diff_cpy *diff); 1451 1452 /* Host messaging -vmwgfx_msg.c: */ 1453 int vmw_host_get_guestinfo(const char *guest_info_param, 1454 char *buffer, size_t *length); 1455 int vmw_host_log(const char *log); 1456 int vmw_msg_ioctl(struct drm_device *dev, void *data, 1457 struct drm_file *file_priv); 1458 1459 /* VMW logging */ 1460 1461 /** 1462 * VMW_DEBUG_USER - Debug output for user-space debugging. 1463 * 1464 * @fmt: printf() like format string. 1465 * 1466 * This macro is for logging user-space error and debugging messages for e.g. 1467 * command buffer execution errors due to malformed commands, invalid context, 1468 * etc. 1469 */ 1470 #define VMW_DEBUG_USER(fmt, ...) \ 1471 DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__) 1472 1473 /* Resource dirtying - vmwgfx_page_dirty.c */ 1474 void vmw_bo_dirty_scan(struct vmw_buffer_object *vbo); 1475 int vmw_bo_dirty_add(struct vmw_buffer_object *vbo); 1476 void vmw_bo_dirty_transfer_to_res(struct vmw_resource *res); 1477 void vmw_bo_dirty_clear_res(struct vmw_resource *res); 1478 void vmw_bo_dirty_release(struct vmw_buffer_object *vbo); 1479 void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo, 1480 pgoff_t start, pgoff_t end); 1481 vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf); 1482 vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf); 1483 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1484 vm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf, 1485 enum page_entry_size pe_size); 1486 #endif 1487 1488 /* Transparent hugepage support - vmwgfx_thp.c */ 1489 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1490 extern int vmw_thp_init(struct vmw_private *dev_priv); 1491 void vmw_thp_fini(struct vmw_private *dev_priv); 1492 #endif 1493 1494 /** 1495 * VMW_DEBUG_KMS - Debug output for kernel mode-setting 1496 * 1497 * This macro is for debugging vmwgfx mode-setting code. 1498 */ 1499 #define VMW_DEBUG_KMS(fmt, ...) \ 1500 DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__) 1501 1502 /** 1503 * Inline helper functions 1504 */ 1505 1506 static inline void vmw_surface_unreference(struct vmw_surface **srf) 1507 { 1508 struct vmw_surface *tmp_srf = *srf; 1509 struct vmw_resource *res = &tmp_srf->res; 1510 *srf = NULL; 1511 1512 vmw_resource_unreference(&res); 1513 } 1514 1515 static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf) 1516 { 1517 (void) vmw_resource_reference(&srf->res); 1518 return srf; 1519 } 1520 1521 static inline void vmw_bo_unreference(struct vmw_buffer_object **buf) 1522 { 1523 struct vmw_buffer_object *tmp_buf = *buf; 1524 1525 *buf = NULL; 1526 if (tmp_buf != NULL) 1527 ttm_bo_put(&tmp_buf->base); 1528 } 1529 1530 static inline struct vmw_buffer_object * 1531 vmw_bo_reference(struct vmw_buffer_object *buf) 1532 { 1533 ttm_bo_get(&buf->base); 1534 return buf; 1535 } 1536 1537 static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv) 1538 { 1539 return &ttm_mem_glob; 1540 } 1541 1542 static inline void vmw_fifo_resource_inc(struct vmw_private *dev_priv) 1543 { 1544 atomic_inc(&dev_priv->num_fifo_resources); 1545 } 1546 1547 static inline void vmw_fifo_resource_dec(struct vmw_private *dev_priv) 1548 { 1549 atomic_dec(&dev_priv->num_fifo_resources); 1550 } 1551 1552 /** 1553 * vmw_fifo_mem_read - Perform a MMIO read from the fifo memory 1554 * 1555 * @fifo_reg: The fifo register to read from 1556 * 1557 * This function is intended to be equivalent to ioread32() on 1558 * memremap'd memory, but without byteswapping. 1559 */ 1560 static inline u32 vmw_fifo_mem_read(struct vmw_private *vmw, uint32 fifo_reg) 1561 { 1562 return READ_ONCE(*(vmw->fifo_mem + fifo_reg)); 1563 } 1564 1565 /** 1566 * vmw_fifo_mem_write - Perform a MMIO write to volatile memory 1567 * 1568 * @addr: The fifo register to write to 1569 * 1570 * This function is intended to be equivalent to iowrite32 on 1571 * memremap'd memory, but without byteswapping. 1572 */ 1573 static inline void vmw_fifo_mem_write(struct vmw_private *vmw, u32 fifo_reg, 1574 u32 value) 1575 { 1576 WRITE_ONCE(*(vmw->fifo_mem + fifo_reg), value); 1577 } 1578 #endif 1579