1 /************************************************************************** 2 * 3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 #ifndef _VMWGFX_DRV_H_ 29 #define _VMWGFX_DRV_H_ 30 31 #include "vmwgfx_reg.h" 32 #include <drm/drmP.h> 33 #include <drm/vmwgfx_drm.h> 34 #include <drm/drm_hashtab.h> 35 #include <drm/drm_auth.h> 36 #include <linux/suspend.h> 37 #include <drm/ttm/ttm_bo_driver.h> 38 #include <drm/ttm/ttm_object.h> 39 #include <drm/ttm/ttm_lock.h> 40 #include <drm/ttm/ttm_execbuf_util.h> 41 #include <drm/ttm/ttm_module.h> 42 #include "vmwgfx_fence.h" 43 44 #define VMWGFX_DRIVER_DATE "20170221" 45 #define VMWGFX_DRIVER_MAJOR 2 46 #define VMWGFX_DRIVER_MINOR 12 47 #define VMWGFX_DRIVER_PATCHLEVEL 0 48 #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 49 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) 50 #define VMWGFX_MAX_RELOCATIONS 2048 51 #define VMWGFX_MAX_VALIDATIONS 2048 52 #define VMWGFX_MAX_DISPLAYS 16 53 #define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768 54 #define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 1 55 56 /* 57 * Perhaps we should have sysfs entries for these. 58 */ 59 #define VMWGFX_NUM_GB_CONTEXT 256 60 #define VMWGFX_NUM_GB_SHADER 20000 61 #define VMWGFX_NUM_GB_SURFACE 32768 62 #define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS 63 #define VMWGFX_NUM_DXCONTEXT 256 64 #define VMWGFX_NUM_DXQUERY 512 65 #define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\ 66 VMWGFX_NUM_GB_SHADER +\ 67 VMWGFX_NUM_GB_SURFACE +\ 68 VMWGFX_NUM_GB_SCREEN_TARGET) 69 70 #define VMW_PL_GMR (TTM_PL_PRIV + 0) 71 #define VMW_PL_FLAG_GMR (TTM_PL_FLAG_PRIV << 0) 72 #define VMW_PL_MOB (TTM_PL_PRIV + 1) 73 #define VMW_PL_FLAG_MOB (TTM_PL_FLAG_PRIV << 1) 74 75 #define VMW_RES_CONTEXT ttm_driver_type0 76 #define VMW_RES_SURFACE ttm_driver_type1 77 #define VMW_RES_STREAM ttm_driver_type2 78 #define VMW_RES_FENCE ttm_driver_type3 79 #define VMW_RES_SHADER ttm_driver_type4 80 81 struct vmw_fpriv { 82 struct drm_master *locked_master; 83 struct ttm_object_file *tfile; 84 bool gb_aware; 85 }; 86 87 struct vmw_dma_buffer { 88 struct ttm_buffer_object base; 89 struct list_head res_list; 90 s32 pin_count; 91 /* Not ref-counted. Protected by binding_mutex */ 92 struct vmw_resource *dx_query_ctx; 93 }; 94 95 /** 96 * struct vmw_validate_buffer - Carries validation info about buffers. 97 * 98 * @base: Validation info for TTM. 99 * @hash: Hash entry for quick lookup of the TTM buffer object. 100 * 101 * This structure contains also driver private validation info 102 * on top of the info needed by TTM. 103 */ 104 struct vmw_validate_buffer { 105 struct ttm_validate_buffer base; 106 struct drm_hash_item hash; 107 bool validate_as_mob; 108 }; 109 110 struct vmw_res_func; 111 struct vmw_resource { 112 struct kref kref; 113 struct vmw_private *dev_priv; 114 int id; 115 bool avail; 116 unsigned long backup_size; 117 bool res_dirty; /* Protected by backup buffer reserved */ 118 bool backup_dirty; /* Protected by backup buffer reserved */ 119 struct vmw_dma_buffer *backup; 120 unsigned long backup_offset; 121 unsigned long pin_count; /* Protected by resource reserved */ 122 const struct vmw_res_func *func; 123 struct list_head lru_head; /* Protected by the resource lock */ 124 struct list_head mob_head; /* Protected by @backup reserved */ 125 struct list_head binding_head; /* Protected by binding_mutex */ 126 void (*res_free) (struct vmw_resource *res); 127 void (*hw_destroy) (struct vmw_resource *res); 128 }; 129 130 131 /* 132 * Resources that are managed using ioctls. 133 */ 134 enum vmw_res_type { 135 vmw_res_context, 136 vmw_res_surface, 137 vmw_res_stream, 138 vmw_res_shader, 139 vmw_res_dx_context, 140 vmw_res_cotable, 141 vmw_res_view, 142 vmw_res_max 143 }; 144 145 /* 146 * Resources that are managed using command streams. 147 */ 148 enum vmw_cmdbuf_res_type { 149 vmw_cmdbuf_res_shader, 150 vmw_cmdbuf_res_view 151 }; 152 153 struct vmw_cmdbuf_res_manager; 154 155 struct vmw_cursor_snooper { 156 struct drm_crtc *crtc; 157 size_t age; 158 uint32_t *image; 159 }; 160 161 struct vmw_framebuffer; 162 struct vmw_surface_offset; 163 164 struct vmw_surface { 165 struct vmw_resource res; 166 uint32_t flags; 167 uint32_t format; 168 uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES]; 169 struct drm_vmw_size base_size; 170 struct drm_vmw_size *sizes; 171 uint32_t num_sizes; 172 bool scanout; 173 uint32_t array_size; 174 /* TODO so far just a extra pointer */ 175 struct vmw_cursor_snooper snooper; 176 struct vmw_surface_offset *offsets; 177 SVGA3dTextureFilter autogen_filter; 178 uint32_t multisample_count; 179 struct list_head view_list; 180 }; 181 182 struct vmw_marker_queue { 183 struct list_head head; 184 u64 lag; 185 u64 lag_time; 186 spinlock_t lock; 187 }; 188 189 struct vmw_fifo_state { 190 unsigned long reserved_size; 191 u32 *dynamic_buffer; 192 u32 *static_buffer; 193 unsigned long static_buffer_size; 194 bool using_bounce_buffer; 195 uint32_t capabilities; 196 struct mutex fifo_mutex; 197 struct rw_semaphore rwsem; 198 struct vmw_marker_queue marker_queue; 199 bool dx; 200 }; 201 202 struct vmw_relocation { 203 SVGAMobId *mob_loc; 204 SVGAGuestPtr *location; 205 uint32_t index; 206 }; 207 208 /** 209 * struct vmw_res_cache_entry - resource information cache entry 210 * 211 * @valid: Whether the entry is valid, which also implies that the execbuf 212 * code holds a reference to the resource, and it's placed on the 213 * validation list. 214 * @handle: User-space handle of a resource. 215 * @res: Non-ref-counted pointer to the resource. 216 * 217 * Used to avoid frequent repeated user-space handle lookups of the 218 * same resource. 219 */ 220 struct vmw_res_cache_entry { 221 bool valid; 222 uint32_t handle; 223 struct vmw_resource *res; 224 struct vmw_resource_val_node *node; 225 }; 226 227 /** 228 * enum vmw_dma_map_mode - indicate how to perform TTM page dma mappings. 229 */ 230 enum vmw_dma_map_mode { 231 vmw_dma_phys, /* Use physical page addresses */ 232 vmw_dma_alloc_coherent, /* Use TTM coherent pages */ 233 vmw_dma_map_populate, /* Unmap from DMA just after unpopulate */ 234 vmw_dma_map_bind, /* Unmap from DMA just before unbind */ 235 vmw_dma_map_max 236 }; 237 238 /** 239 * struct vmw_sg_table - Scatter/gather table for binding, with additional 240 * device-specific information. 241 * 242 * @sgt: Pointer to a struct sg_table with binding information 243 * @num_regions: Number of regions with device-address contiguous pages 244 */ 245 struct vmw_sg_table { 246 enum vmw_dma_map_mode mode; 247 struct page **pages; 248 const dma_addr_t *addrs; 249 struct sg_table *sgt; 250 unsigned long num_regions; 251 unsigned long num_pages; 252 }; 253 254 /** 255 * struct vmw_piter - Page iterator that iterates over a list of pages 256 * and DMA addresses that could be either a scatter-gather list or 257 * arrays 258 * 259 * @pages: Array of page pointers to the pages. 260 * @addrs: DMA addresses to the pages if coherent pages are used. 261 * @iter: Scatter-gather page iterator. Current position in SG list. 262 * @i: Current position in arrays. 263 * @num_pages: Number of pages total. 264 * @next: Function to advance the iterator. Returns false if past the list 265 * of pages, true otherwise. 266 * @dma_address: Function to return the DMA address of the current page. 267 */ 268 struct vmw_piter { 269 struct page **pages; 270 const dma_addr_t *addrs; 271 struct sg_page_iter iter; 272 unsigned long i; 273 unsigned long num_pages; 274 bool (*next)(struct vmw_piter *); 275 dma_addr_t (*dma_address)(struct vmw_piter *); 276 struct page *(*page)(struct vmw_piter *); 277 }; 278 279 /* 280 * enum vmw_display_unit_type - Describes the display unit 281 */ 282 enum vmw_display_unit_type { 283 vmw_du_invalid = 0, 284 vmw_du_legacy, 285 vmw_du_screen_object, 286 vmw_du_screen_target 287 }; 288 289 290 struct vmw_sw_context{ 291 struct drm_open_hash res_ht; 292 bool res_ht_initialized; 293 bool kernel; /**< is the called made from the kernel */ 294 struct vmw_fpriv *fp; 295 struct list_head validate_nodes; 296 struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS]; 297 uint32_t cur_reloc; 298 struct vmw_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS]; 299 uint32_t cur_val_buf; 300 uint32_t *cmd_bounce; 301 uint32_t cmd_bounce_size; 302 struct list_head resource_list; 303 struct list_head ctx_resource_list; /* For contexts and cotables */ 304 struct vmw_dma_buffer *cur_query_bo; 305 struct list_head res_relocations; 306 uint32_t *buf_start; 307 struct vmw_res_cache_entry res_cache[vmw_res_max]; 308 struct vmw_resource *last_query_ctx; 309 bool needs_post_query_barrier; 310 struct vmw_resource *error_resource; 311 struct vmw_ctx_binding_state *staged_bindings; 312 bool staged_bindings_inuse; 313 struct list_head staged_cmd_res; 314 struct vmw_resource_val_node *dx_ctx_node; 315 struct vmw_dma_buffer *dx_query_mob; 316 struct vmw_resource *dx_query_ctx; 317 struct vmw_cmdbuf_res_manager *man; 318 }; 319 320 struct vmw_legacy_display; 321 struct vmw_overlay; 322 323 struct vmw_master { 324 struct ttm_lock lock; 325 }; 326 327 struct vmw_vga_topology_state { 328 uint32_t width; 329 uint32_t height; 330 uint32_t primary; 331 uint32_t pos_x; 332 uint32_t pos_y; 333 }; 334 335 336 /* 337 * struct vmw_otable - Guest Memory OBject table metadata 338 * 339 * @size: Size of the table (page-aligned). 340 * @page_table: Pointer to a struct vmw_mob holding the page table. 341 */ 342 struct vmw_otable { 343 unsigned long size; 344 struct vmw_mob *page_table; 345 bool enabled; 346 }; 347 348 struct vmw_otable_batch { 349 unsigned num_otables; 350 struct vmw_otable *otables; 351 struct vmw_resource *context; 352 struct ttm_buffer_object *otable_bo; 353 }; 354 355 struct vmw_private { 356 struct ttm_bo_device bdev; 357 struct ttm_bo_global_ref bo_global_ref; 358 struct drm_global_reference mem_global_ref; 359 360 struct vmw_fifo_state fifo; 361 362 struct drm_device *dev; 363 unsigned long vmw_chipset; 364 unsigned int io_start; 365 uint32_t vram_start; 366 uint32_t vram_size; 367 uint32_t prim_bb_mem; 368 uint32_t mmio_start; 369 uint32_t mmio_size; 370 uint32_t fb_max_width; 371 uint32_t fb_max_height; 372 uint32_t texture_max_width; 373 uint32_t texture_max_height; 374 uint32_t stdu_max_width; 375 uint32_t stdu_max_height; 376 uint32_t initial_width; 377 uint32_t initial_height; 378 u32 *mmio_virt; 379 uint32_t capabilities; 380 uint32_t max_gmr_ids; 381 uint32_t max_gmr_pages; 382 uint32_t max_mob_pages; 383 uint32_t max_mob_size; 384 uint32_t memory_size; 385 bool has_gmr; 386 bool has_mob; 387 spinlock_t hw_lock; 388 spinlock_t cap_lock; 389 bool has_dx; 390 bool assume_16bpp; 391 392 /* 393 * VGA registers. 394 */ 395 396 struct vmw_vga_topology_state vga_save[VMWGFX_MAX_DISPLAYS]; 397 uint32_t vga_width; 398 uint32_t vga_height; 399 uint32_t vga_bpp; 400 uint32_t vga_bpl; 401 uint32_t vga_pitchlock; 402 403 uint32_t num_displays; 404 405 /* 406 * Framebuffer info. 407 */ 408 409 void *fb_info; 410 enum vmw_display_unit_type active_display_unit; 411 struct vmw_legacy_display *ldu_priv; 412 struct vmw_overlay *overlay_priv; 413 struct drm_property *hotplug_mode_update_property; 414 struct drm_property *implicit_placement_property; 415 unsigned num_implicit; 416 struct vmw_framebuffer *implicit_fb; 417 struct mutex global_kms_state_mutex; 418 419 /* 420 * Context and surface management. 421 */ 422 423 rwlock_t resource_lock; 424 struct idr res_idr[vmw_res_max]; 425 /* 426 * Block lastclose from racing with firstopen. 427 */ 428 429 struct mutex init_mutex; 430 431 /* 432 * A resource manager for kernel-only surfaces and 433 * contexts. 434 */ 435 436 struct ttm_object_device *tdev; 437 438 /* 439 * Fencing and IRQs. 440 */ 441 442 atomic_t marker_seq; 443 wait_queue_head_t fence_queue; 444 wait_queue_head_t fifo_queue; 445 spinlock_t waiter_lock; 446 int fence_queue_waiters; /* Protected by waiter_lock */ 447 int goal_queue_waiters; /* Protected by waiter_lock */ 448 int cmdbuf_waiters; /* Protected by waiter_lock */ 449 int error_waiters; /* Protected by waiter_lock */ 450 int fifo_queue_waiters; /* Protected by waiter_lock */ 451 uint32_t last_read_seqno; 452 struct vmw_fence_manager *fman; 453 uint32_t irq_mask; /* Updates protected by waiter_lock */ 454 455 /* 456 * Device state 457 */ 458 459 uint32_t traces_state; 460 uint32_t enable_state; 461 uint32_t config_done_state; 462 463 /** 464 * Execbuf 465 */ 466 /** 467 * Protected by the cmdbuf mutex. 468 */ 469 470 struct vmw_sw_context ctx; 471 struct mutex cmdbuf_mutex; 472 struct mutex binding_mutex; 473 474 /** 475 * Operating mode. 476 */ 477 478 bool stealth; 479 bool enable_fb; 480 spinlock_t svga_lock; 481 482 /** 483 * Master management. 484 */ 485 486 struct vmw_master *active_master; 487 struct vmw_master fbdev_master; 488 struct notifier_block pm_nb; 489 bool suspended; 490 bool refuse_hibernation; 491 492 struct mutex release_mutex; 493 atomic_t num_fifo_resources; 494 495 /* 496 * Replace this with an rwsem as soon as we have down_xx_interruptible() 497 */ 498 struct ttm_lock reservation_sem; 499 500 /* 501 * Query processing. These members 502 * are protected by the cmdbuf mutex. 503 */ 504 505 struct vmw_dma_buffer *dummy_query_bo; 506 struct vmw_dma_buffer *pinned_bo; 507 uint32_t query_cid; 508 uint32_t query_cid_valid; 509 bool dummy_query_bo_pinned; 510 511 /* 512 * Surface swapping. The "surface_lru" list is protected by the 513 * resource lock in order to be able to destroy a surface and take 514 * it off the lru atomically. "used_memory_size" is currently 515 * protected by the cmdbuf mutex for simplicity. 516 */ 517 518 struct list_head res_lru[vmw_res_max]; 519 uint32_t used_memory_size; 520 521 /* 522 * DMA mapping stuff. 523 */ 524 enum vmw_dma_map_mode map_mode; 525 526 /* 527 * Guest Backed stuff 528 */ 529 struct vmw_otable_batch otable_batch; 530 531 struct vmw_cmdbuf_man *cman; 532 }; 533 534 static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res) 535 { 536 return container_of(res, struct vmw_surface, res); 537 } 538 539 static inline struct vmw_private *vmw_priv(struct drm_device *dev) 540 { 541 return (struct vmw_private *)dev->dev_private; 542 } 543 544 static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv) 545 { 546 return (struct vmw_fpriv *)file_priv->driver_priv; 547 } 548 549 static inline struct vmw_master *vmw_master(struct drm_master *master) 550 { 551 return (struct vmw_master *) master->driver_priv; 552 } 553 554 /* 555 * The locking here is fine-grained, so that it is performed once 556 * for every read- and write operation. This is of course costly, but we 557 * don't perform much register access in the timing critical paths anyway. 558 * Instead we have the extra benefit of being sure that we don't forget 559 * the hw lock around register accesses. 560 */ 561 static inline void vmw_write(struct vmw_private *dev_priv, 562 unsigned int offset, uint32_t value) 563 { 564 unsigned long irq_flags; 565 566 spin_lock_irqsave(&dev_priv->hw_lock, irq_flags); 567 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); 568 outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT); 569 spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags); 570 } 571 572 static inline uint32_t vmw_read(struct vmw_private *dev_priv, 573 unsigned int offset) 574 { 575 unsigned long irq_flags; 576 u32 val; 577 578 spin_lock_irqsave(&dev_priv->hw_lock, irq_flags); 579 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); 580 val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT); 581 spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags); 582 583 return val; 584 } 585 586 extern void vmw_svga_enable(struct vmw_private *dev_priv); 587 extern void vmw_svga_disable(struct vmw_private *dev_priv); 588 589 590 /** 591 * GMR utilities - vmwgfx_gmr.c 592 */ 593 594 extern int vmw_gmr_bind(struct vmw_private *dev_priv, 595 const struct vmw_sg_table *vsgt, 596 unsigned long num_pages, 597 int gmr_id); 598 extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id); 599 600 /** 601 * Resource utilities - vmwgfx_resource.c 602 */ 603 struct vmw_user_resource_conv; 604 605 extern void vmw_resource_unreference(struct vmw_resource **p_res); 606 extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res); 607 extern struct vmw_resource * 608 vmw_resource_reference_unless_doomed(struct vmw_resource *res); 609 extern int vmw_resource_validate(struct vmw_resource *res); 610 extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible, 611 bool no_backup); 612 extern bool vmw_resource_needs_backup(const struct vmw_resource *res); 613 extern int vmw_user_lookup_handle(struct vmw_private *dev_priv, 614 struct ttm_object_file *tfile, 615 uint32_t handle, 616 struct vmw_surface **out_surf, 617 struct vmw_dma_buffer **out_buf); 618 extern int vmw_user_resource_lookup_handle( 619 struct vmw_private *dev_priv, 620 struct ttm_object_file *tfile, 621 uint32_t handle, 622 const struct vmw_user_resource_conv *converter, 623 struct vmw_resource **p_res); 624 extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo); 625 extern int vmw_dmabuf_init(struct vmw_private *dev_priv, 626 struct vmw_dma_buffer *vmw_bo, 627 size_t size, struct ttm_placement *placement, 628 bool interuptable, 629 void (*bo_free) (struct ttm_buffer_object *bo)); 630 extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo, 631 struct ttm_object_file *tfile); 632 extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, 633 struct ttm_object_file *tfile, 634 uint32_t size, 635 bool shareable, 636 uint32_t *handle, 637 struct vmw_dma_buffer **p_dma_buf, 638 struct ttm_base_object **p_base); 639 extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, 640 struct vmw_dma_buffer *dma_buf, 641 uint32_t *handle); 642 extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, 643 struct drm_file *file_priv); 644 extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data, 645 struct drm_file *file_priv); 646 extern int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data, 647 struct drm_file *file_priv); 648 extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo, 649 uint32_t cur_validate_node); 650 extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo); 651 extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, 652 uint32_t id, struct vmw_dma_buffer **out, 653 struct ttm_base_object **base); 654 extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, 655 struct drm_file *file_priv); 656 extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, 657 struct drm_file *file_priv); 658 extern int vmw_user_stream_lookup(struct vmw_private *dev_priv, 659 struct ttm_object_file *tfile, 660 uint32_t *inout_id, 661 struct vmw_resource **out); 662 extern void vmw_resource_unreserve(struct vmw_resource *res, 663 bool switch_backup, 664 struct vmw_dma_buffer *new_backup, 665 unsigned long new_backup_offset); 666 extern void vmw_resource_move_notify(struct ttm_buffer_object *bo, 667 struct ttm_mem_reg *mem); 668 extern void vmw_query_move_notify(struct ttm_buffer_object *bo, 669 struct ttm_mem_reg *mem); 670 extern int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob); 671 extern void vmw_fence_single_bo(struct ttm_buffer_object *bo, 672 struct vmw_fence_obj *fence); 673 extern void vmw_resource_evict_all(struct vmw_private *dev_priv); 674 675 /** 676 * DMA buffer helper routines - vmwgfx_dmabuf.c 677 */ 678 extern int vmw_dmabuf_pin_in_placement(struct vmw_private *vmw_priv, 679 struct vmw_dma_buffer *bo, 680 struct ttm_placement *placement, 681 bool interruptible); 682 extern int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv, 683 struct vmw_dma_buffer *buf, 684 bool interruptible); 685 extern int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv, 686 struct vmw_dma_buffer *buf, 687 bool interruptible); 688 extern int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *vmw_priv, 689 struct vmw_dma_buffer *bo, 690 bool interruptible); 691 extern int vmw_dmabuf_unpin(struct vmw_private *vmw_priv, 692 struct vmw_dma_buffer *bo, 693 bool interruptible); 694 extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf, 695 SVGAGuestPtr *ptr); 696 extern void vmw_bo_pin_reserved(struct vmw_dma_buffer *bo, bool pin); 697 698 /** 699 * Misc Ioctl functionality - vmwgfx_ioctl.c 700 */ 701 702 extern int vmw_getparam_ioctl(struct drm_device *dev, void *data, 703 struct drm_file *file_priv); 704 extern int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, 705 struct drm_file *file_priv); 706 extern int vmw_present_ioctl(struct drm_device *dev, void *data, 707 struct drm_file *file_priv); 708 extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data, 709 struct drm_file *file_priv); 710 extern unsigned int vmw_fops_poll(struct file *filp, 711 struct poll_table_struct *wait); 712 extern ssize_t vmw_fops_read(struct file *filp, char __user *buffer, 713 size_t count, loff_t *offset); 714 715 /** 716 * Fifo utilities - vmwgfx_fifo.c 717 */ 718 719 extern int vmw_fifo_init(struct vmw_private *dev_priv, 720 struct vmw_fifo_state *fifo); 721 extern void vmw_fifo_release(struct vmw_private *dev_priv, 722 struct vmw_fifo_state *fifo); 723 extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes); 724 extern void * 725 vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes, int ctx_id); 726 extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes); 727 extern void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes); 728 extern int vmw_fifo_send_fence(struct vmw_private *dev_priv, 729 uint32_t *seqno); 730 extern void vmw_fifo_ping_host_locked(struct vmw_private *, uint32_t reason); 731 extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason); 732 extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv); 733 extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv); 734 extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv, 735 uint32_t cid); 736 extern int vmw_fifo_flush(struct vmw_private *dev_priv, 737 bool interruptible); 738 739 /** 740 * TTM glue - vmwgfx_ttm_glue.c 741 */ 742 743 extern int vmw_ttm_global_init(struct vmw_private *dev_priv); 744 extern void vmw_ttm_global_release(struct vmw_private *dev_priv); 745 extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma); 746 747 /** 748 * TTM buffer object driver - vmwgfx_buffer.c 749 */ 750 751 extern const size_t vmw_tt_size; 752 extern struct ttm_placement vmw_vram_placement; 753 extern struct ttm_placement vmw_vram_ne_placement; 754 extern struct ttm_placement vmw_vram_sys_placement; 755 extern struct ttm_placement vmw_vram_gmr_placement; 756 extern struct ttm_placement vmw_vram_gmr_ne_placement; 757 extern struct ttm_placement vmw_sys_placement; 758 extern struct ttm_placement vmw_sys_ne_placement; 759 extern struct ttm_placement vmw_evictable_placement; 760 extern struct ttm_placement vmw_srf_placement; 761 extern struct ttm_placement vmw_mob_placement; 762 extern struct ttm_placement vmw_mob_ne_placement; 763 extern struct ttm_bo_driver vmw_bo_driver; 764 extern int vmw_dma_quiescent(struct drm_device *dev); 765 extern int vmw_bo_map_dma(struct ttm_buffer_object *bo); 766 extern void vmw_bo_unmap_dma(struct ttm_buffer_object *bo); 767 extern const struct vmw_sg_table * 768 vmw_bo_sg_table(struct ttm_buffer_object *bo); 769 extern void vmw_piter_start(struct vmw_piter *viter, 770 const struct vmw_sg_table *vsgt, 771 unsigned long p_offs); 772 773 /** 774 * vmw_piter_next - Advance the iterator one page. 775 * 776 * @viter: Pointer to the iterator to advance. 777 * 778 * Returns false if past the list of pages, true otherwise. 779 */ 780 static inline bool vmw_piter_next(struct vmw_piter *viter) 781 { 782 return viter->next(viter); 783 } 784 785 /** 786 * vmw_piter_dma_addr - Return the DMA address of the current page. 787 * 788 * @viter: Pointer to the iterator 789 * 790 * Returns the DMA address of the page pointed to by @viter. 791 */ 792 static inline dma_addr_t vmw_piter_dma_addr(struct vmw_piter *viter) 793 { 794 return viter->dma_address(viter); 795 } 796 797 /** 798 * vmw_piter_page - Return a pointer to the current page. 799 * 800 * @viter: Pointer to the iterator 801 * 802 * Returns the DMA address of the page pointed to by @viter. 803 */ 804 static inline struct page *vmw_piter_page(struct vmw_piter *viter) 805 { 806 return viter->page(viter); 807 } 808 809 /** 810 * Command submission - vmwgfx_execbuf.c 811 */ 812 813 extern int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data, 814 struct drm_file *file_priv, size_t size); 815 extern int vmw_execbuf_process(struct drm_file *file_priv, 816 struct vmw_private *dev_priv, 817 void __user *user_commands, 818 void *kernel_commands, 819 uint32_t command_size, 820 uint64_t throttle_us, 821 uint32_t dx_context_handle, 822 struct drm_vmw_fence_rep __user 823 *user_fence_rep, 824 struct vmw_fence_obj **out_fence); 825 extern void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, 826 struct vmw_fence_obj *fence); 827 extern void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv); 828 829 extern int vmw_execbuf_fence_commands(struct drm_file *file_priv, 830 struct vmw_private *dev_priv, 831 struct vmw_fence_obj **p_fence, 832 uint32_t *p_handle); 833 extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, 834 struct vmw_fpriv *vmw_fp, 835 int ret, 836 struct drm_vmw_fence_rep __user 837 *user_fence_rep, 838 struct vmw_fence_obj *fence, 839 uint32_t fence_handle); 840 extern int vmw_validate_single_buffer(struct vmw_private *dev_priv, 841 struct ttm_buffer_object *bo, 842 bool interruptible, 843 bool validate_as_mob); 844 845 846 /** 847 * IRQs and wating - vmwgfx_irq.c 848 */ 849 850 extern irqreturn_t vmw_irq_handler(int irq, void *arg); 851 extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy, 852 uint32_t seqno, bool interruptible, 853 unsigned long timeout); 854 extern void vmw_irq_preinstall(struct drm_device *dev); 855 extern int vmw_irq_postinstall(struct drm_device *dev); 856 extern void vmw_irq_uninstall(struct drm_device *dev); 857 extern bool vmw_seqno_passed(struct vmw_private *dev_priv, 858 uint32_t seqno); 859 extern int vmw_fallback_wait(struct vmw_private *dev_priv, 860 bool lazy, 861 bool fifo_idle, 862 uint32_t seqno, 863 bool interruptible, 864 unsigned long timeout); 865 extern void vmw_update_seqno(struct vmw_private *dev_priv, 866 struct vmw_fifo_state *fifo_state); 867 extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv); 868 extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv); 869 extern void vmw_goal_waiter_add(struct vmw_private *dev_priv); 870 extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv); 871 extern void vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag, 872 int *waiter_count); 873 extern void vmw_generic_waiter_remove(struct vmw_private *dev_priv, 874 u32 flag, int *waiter_count); 875 876 /** 877 * Rudimentary fence-like objects currently used only for throttling - 878 * vmwgfx_marker.c 879 */ 880 881 extern void vmw_marker_queue_init(struct vmw_marker_queue *queue); 882 extern void vmw_marker_queue_takedown(struct vmw_marker_queue *queue); 883 extern int vmw_marker_push(struct vmw_marker_queue *queue, 884 uint32_t seqno); 885 extern int vmw_marker_pull(struct vmw_marker_queue *queue, 886 uint32_t signaled_seqno); 887 extern int vmw_wait_lag(struct vmw_private *dev_priv, 888 struct vmw_marker_queue *queue, uint32_t us); 889 890 /** 891 * Kernel framebuffer - vmwgfx_fb.c 892 */ 893 894 int vmw_fb_init(struct vmw_private *vmw_priv); 895 int vmw_fb_close(struct vmw_private *dev_priv); 896 int vmw_fb_off(struct vmw_private *vmw_priv); 897 int vmw_fb_on(struct vmw_private *vmw_priv); 898 899 /** 900 * Kernel modesetting - vmwgfx_kms.c 901 */ 902 903 int vmw_kms_init(struct vmw_private *dev_priv); 904 int vmw_kms_close(struct vmw_private *dev_priv); 905 int vmw_kms_save_vga(struct vmw_private *vmw_priv); 906 int vmw_kms_restore_vga(struct vmw_private *vmw_priv); 907 int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data, 908 struct drm_file *file_priv); 909 void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv); 910 void vmw_kms_cursor_snoop(struct vmw_surface *srf, 911 struct ttm_object_file *tfile, 912 struct ttm_buffer_object *bo, 913 SVGA3dCmdHeader *header); 914 int vmw_kms_write_svga(struct vmw_private *vmw_priv, 915 unsigned width, unsigned height, unsigned pitch, 916 unsigned bpp, unsigned depth); 917 void vmw_kms_idle_workqueues(struct vmw_master *vmaster); 918 bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv, 919 uint32_t pitch, 920 uint32_t height); 921 u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe); 922 int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe); 923 void vmw_disable_vblank(struct drm_device *dev, unsigned int pipe); 924 int vmw_kms_present(struct vmw_private *dev_priv, 925 struct drm_file *file_priv, 926 struct vmw_framebuffer *vfb, 927 struct vmw_surface *surface, 928 uint32_t sid, int32_t destX, int32_t destY, 929 struct drm_vmw_rect *clips, 930 uint32_t num_clips); 931 int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, 932 struct drm_file *file_priv); 933 void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv); 934 935 int vmw_dumb_create(struct drm_file *file_priv, 936 struct drm_device *dev, 937 struct drm_mode_create_dumb *args); 938 939 int vmw_dumb_map_offset(struct drm_file *file_priv, 940 struct drm_device *dev, uint32_t handle, 941 uint64_t *offset); 942 int vmw_dumb_destroy(struct drm_file *file_priv, 943 struct drm_device *dev, 944 uint32_t handle); 945 extern int vmw_resource_pin(struct vmw_resource *res, bool interruptible); 946 extern void vmw_resource_unpin(struct vmw_resource *res); 947 extern enum vmw_res_type vmw_res_type(const struct vmw_resource *res); 948 949 /** 950 * Overlay control - vmwgfx_overlay.c 951 */ 952 953 int vmw_overlay_init(struct vmw_private *dev_priv); 954 int vmw_overlay_close(struct vmw_private *dev_priv); 955 int vmw_overlay_ioctl(struct drm_device *dev, void *data, 956 struct drm_file *file_priv); 957 int vmw_overlay_stop_all(struct vmw_private *dev_priv); 958 int vmw_overlay_resume_all(struct vmw_private *dev_priv); 959 int vmw_overlay_pause_all(struct vmw_private *dev_priv); 960 int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out); 961 int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id); 962 int vmw_overlay_num_overlays(struct vmw_private *dev_priv); 963 int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv); 964 965 /** 966 * GMR Id manager 967 */ 968 969 extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func; 970 971 /** 972 * Prime - vmwgfx_prime.c 973 */ 974 975 extern const struct dma_buf_ops vmw_prime_dmabuf_ops; 976 extern int vmw_prime_fd_to_handle(struct drm_device *dev, 977 struct drm_file *file_priv, 978 int fd, u32 *handle); 979 extern int vmw_prime_handle_to_fd(struct drm_device *dev, 980 struct drm_file *file_priv, 981 uint32_t handle, uint32_t flags, 982 int *prime_fd); 983 984 /* 985 * MemoryOBject management - vmwgfx_mob.c 986 */ 987 struct vmw_mob; 988 extern int vmw_mob_bind(struct vmw_private *dev_priv, struct vmw_mob *mob, 989 const struct vmw_sg_table *vsgt, 990 unsigned long num_data_pages, int32_t mob_id); 991 extern void vmw_mob_unbind(struct vmw_private *dev_priv, 992 struct vmw_mob *mob); 993 extern void vmw_mob_destroy(struct vmw_mob *mob); 994 extern struct vmw_mob *vmw_mob_create(unsigned long data_pages); 995 extern int vmw_otables_setup(struct vmw_private *dev_priv); 996 extern void vmw_otables_takedown(struct vmw_private *dev_priv); 997 998 /* 999 * Context management - vmwgfx_context.c 1000 */ 1001 1002 extern const struct vmw_user_resource_conv *user_context_converter; 1003 1004 extern int vmw_context_check(struct vmw_private *dev_priv, 1005 struct ttm_object_file *tfile, 1006 int id, 1007 struct vmw_resource **p_res); 1008 extern int vmw_context_define_ioctl(struct drm_device *dev, void *data, 1009 struct drm_file *file_priv); 1010 extern int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data, 1011 struct drm_file *file_priv); 1012 extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, 1013 struct drm_file *file_priv); 1014 extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx); 1015 extern struct vmw_cmdbuf_res_manager * 1016 vmw_context_res_man(struct vmw_resource *ctx); 1017 extern struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx, 1018 SVGACOTableType cotable_type); 1019 extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx); 1020 struct vmw_ctx_binding_state; 1021 extern struct vmw_ctx_binding_state * 1022 vmw_context_binding_state(struct vmw_resource *ctx); 1023 extern void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx, 1024 bool readback); 1025 extern int vmw_context_bind_dx_query(struct vmw_resource *ctx_res, 1026 struct vmw_dma_buffer *mob); 1027 extern struct vmw_dma_buffer * 1028 vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res); 1029 1030 1031 /* 1032 * Surface management - vmwgfx_surface.c 1033 */ 1034 1035 extern const struct vmw_user_resource_conv *user_surface_converter; 1036 1037 extern void vmw_surface_res_free(struct vmw_resource *res); 1038 extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data, 1039 struct drm_file *file_priv); 1040 extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data, 1041 struct drm_file *file_priv); 1042 extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, 1043 struct drm_file *file_priv); 1044 extern int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, 1045 struct drm_file *file_priv); 1046 extern int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data, 1047 struct drm_file *file_priv); 1048 extern int vmw_surface_check(struct vmw_private *dev_priv, 1049 struct ttm_object_file *tfile, 1050 uint32_t handle, int *id); 1051 extern int vmw_surface_validate(struct vmw_private *dev_priv, 1052 struct vmw_surface *srf); 1053 int vmw_surface_gb_priv_define(struct drm_device *dev, 1054 uint32_t user_accounting_size, 1055 uint32_t svga3d_flags, 1056 SVGA3dSurfaceFormat format, 1057 bool for_scanout, 1058 uint32_t num_mip_levels, 1059 uint32_t multisample_count, 1060 uint32_t array_size, 1061 struct drm_vmw_size size, 1062 struct vmw_surface **srf_out); 1063 1064 /* 1065 * Shader management - vmwgfx_shader.c 1066 */ 1067 1068 extern const struct vmw_user_resource_conv *user_shader_converter; 1069 1070 extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data, 1071 struct drm_file *file_priv); 1072 extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data, 1073 struct drm_file *file_priv); 1074 extern int vmw_compat_shader_add(struct vmw_private *dev_priv, 1075 struct vmw_cmdbuf_res_manager *man, 1076 u32 user_key, const void *bytecode, 1077 SVGA3dShaderType shader_type, 1078 size_t size, 1079 struct list_head *list); 1080 extern int vmw_shader_remove(struct vmw_cmdbuf_res_manager *man, 1081 u32 user_key, SVGA3dShaderType shader_type, 1082 struct list_head *list); 1083 extern int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man, 1084 struct vmw_resource *ctx, 1085 u32 user_key, 1086 SVGA3dShaderType shader_type, 1087 struct list_head *list); 1088 extern void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv, 1089 struct list_head *list, 1090 bool readback); 1091 1092 extern struct vmw_resource * 1093 vmw_shader_lookup(struct vmw_cmdbuf_res_manager *man, 1094 u32 user_key, SVGA3dShaderType shader_type); 1095 1096 /* 1097 * Command buffer managed resources - vmwgfx_cmdbuf_res.c 1098 */ 1099 1100 extern struct vmw_cmdbuf_res_manager * 1101 vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv); 1102 extern void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man); 1103 extern size_t vmw_cmdbuf_res_man_size(void); 1104 extern struct vmw_resource * 1105 vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man, 1106 enum vmw_cmdbuf_res_type res_type, 1107 u32 user_key); 1108 extern void vmw_cmdbuf_res_revert(struct list_head *list); 1109 extern void vmw_cmdbuf_res_commit(struct list_head *list); 1110 extern int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man, 1111 enum vmw_cmdbuf_res_type res_type, 1112 u32 user_key, 1113 struct vmw_resource *res, 1114 struct list_head *list); 1115 extern int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man, 1116 enum vmw_cmdbuf_res_type res_type, 1117 u32 user_key, 1118 struct list_head *list, 1119 struct vmw_resource **res); 1120 1121 /* 1122 * COTable management - vmwgfx_cotable.c 1123 */ 1124 extern const SVGACOTableType vmw_cotable_scrub_order[]; 1125 extern struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv, 1126 struct vmw_resource *ctx, 1127 u32 type); 1128 extern int vmw_cotable_notify(struct vmw_resource *res, int id); 1129 extern int vmw_cotable_scrub(struct vmw_resource *res, bool readback); 1130 extern void vmw_cotable_add_resource(struct vmw_resource *ctx, 1131 struct list_head *head); 1132 1133 /* 1134 * Command buffer managerment vmwgfx_cmdbuf.c 1135 */ 1136 struct vmw_cmdbuf_man; 1137 struct vmw_cmdbuf_header; 1138 1139 extern struct vmw_cmdbuf_man * 1140 vmw_cmdbuf_man_create(struct vmw_private *dev_priv); 1141 extern int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, 1142 size_t size, size_t default_size); 1143 extern void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man); 1144 extern void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man); 1145 extern int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible, 1146 unsigned long timeout); 1147 extern void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size, 1148 int ctx_id, bool interruptible, 1149 struct vmw_cmdbuf_header *header); 1150 extern void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size, 1151 struct vmw_cmdbuf_header *header, 1152 bool flush); 1153 extern void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man *man); 1154 extern void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man, 1155 size_t size, bool interruptible, 1156 struct vmw_cmdbuf_header **p_header); 1157 extern void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header); 1158 extern int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man, 1159 bool interruptible); 1160 1161 1162 /** 1163 * Inline helper functions 1164 */ 1165 1166 static inline void vmw_surface_unreference(struct vmw_surface **srf) 1167 { 1168 struct vmw_surface *tmp_srf = *srf; 1169 struct vmw_resource *res = &tmp_srf->res; 1170 *srf = NULL; 1171 1172 vmw_resource_unreference(&res); 1173 } 1174 1175 static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf) 1176 { 1177 (void) vmw_resource_reference(&srf->res); 1178 return srf; 1179 } 1180 1181 static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf) 1182 { 1183 struct vmw_dma_buffer *tmp_buf = *buf; 1184 1185 *buf = NULL; 1186 if (tmp_buf != NULL) { 1187 struct ttm_buffer_object *bo = &tmp_buf->base; 1188 1189 ttm_bo_unref(&bo); 1190 } 1191 } 1192 1193 static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer *buf) 1194 { 1195 if (ttm_bo_reference(&buf->base)) 1196 return buf; 1197 return NULL; 1198 } 1199 1200 static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv) 1201 { 1202 return (struct ttm_mem_global *) dev_priv->mem_global_ref.object; 1203 } 1204 1205 static inline void vmw_fifo_resource_inc(struct vmw_private *dev_priv) 1206 { 1207 atomic_inc(&dev_priv->num_fifo_resources); 1208 } 1209 1210 static inline void vmw_fifo_resource_dec(struct vmw_private *dev_priv) 1211 { 1212 atomic_dec(&dev_priv->num_fifo_resources); 1213 } 1214 1215 /** 1216 * vmw_mmio_read - Perform a MMIO read from volatile memory 1217 * 1218 * @addr: The address to read from 1219 * 1220 * This function is intended to be equivalent to ioread32() on 1221 * memremap'd memory, but without byteswapping. 1222 */ 1223 static inline u32 vmw_mmio_read(u32 *addr) 1224 { 1225 return READ_ONCE(*addr); 1226 } 1227 1228 /** 1229 * vmw_mmio_write - Perform a MMIO write to volatile memory 1230 * 1231 * @addr: The address to write to 1232 * 1233 * This function is intended to be equivalent to iowrite32 on 1234 * memremap'd memory, but without byteswapping. 1235 */ 1236 static inline void vmw_mmio_write(u32 value, u32 *addr) 1237 { 1238 WRITE_ONCE(*addr, value); 1239 } 1240 1241 /** 1242 * Add vmw_msg module function 1243 */ 1244 extern int vmw_host_log(const char *log); 1245 1246 #endif 1247