1 /************************************************************************** 2 * 3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 #ifndef _VMWGFX_DRV_H_ 29 #define _VMWGFX_DRV_H_ 30 31 #include "vmwgfx_reg.h" 32 #include <drm/drmP.h> 33 #include <drm/vmwgfx_drm.h> 34 #include <drm/drm_hashtab.h> 35 #include <linux/suspend.h> 36 #include <drm/ttm/ttm_bo_driver.h> 37 #include <drm/ttm/ttm_object.h> 38 #include <drm/ttm/ttm_lock.h> 39 #include <drm/ttm/ttm_execbuf_util.h> 40 #include <drm/ttm/ttm_module.h> 41 #include "vmwgfx_fence.h" 42 43 #define VMWGFX_DRIVER_DATE "20120209" 44 #define VMWGFX_DRIVER_MAJOR 2 45 #define VMWGFX_DRIVER_MINOR 4 46 #define VMWGFX_DRIVER_PATCHLEVEL 0 47 #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 48 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) 49 #define VMWGFX_MAX_RELOCATIONS 2048 50 #define VMWGFX_MAX_VALIDATIONS 2048 51 #define VMWGFX_MAX_DISPLAYS 16 52 #define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768 53 54 #define VMW_PL_GMR TTM_PL_PRIV0 55 #define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0 56 57 #define VMW_RES_CONTEXT ttm_driver_type0 58 #define VMW_RES_SURFACE ttm_driver_type1 59 #define VMW_RES_STREAM ttm_driver_type2 60 #define VMW_RES_FENCE ttm_driver_type3 61 62 struct vmw_fpriv { 63 struct drm_master *locked_master; 64 struct ttm_object_file *tfile; 65 struct list_head fence_events; 66 }; 67 68 struct vmw_dma_buffer { 69 struct ttm_buffer_object base; 70 struct list_head res_list; 71 }; 72 73 /** 74 * struct vmw_validate_buffer - Carries validation info about buffers. 75 * 76 * @base: Validation info for TTM. 77 * @hash: Hash entry for quick lookup of the TTM buffer object. 78 * 79 * This structure contains also driver private validation info 80 * on top of the info needed by TTM. 81 */ 82 struct vmw_validate_buffer { 83 struct ttm_validate_buffer base; 84 struct drm_hash_item hash; 85 }; 86 87 struct vmw_res_func; 88 struct vmw_resource { 89 struct kref kref; 90 struct vmw_private *dev_priv; 91 int id; 92 bool avail; 93 unsigned long backup_size; 94 bool res_dirty; /* Protected by backup buffer reserved */ 95 bool backup_dirty; /* Protected by backup buffer reserved */ 96 struct vmw_dma_buffer *backup; 97 unsigned long backup_offset; 98 const struct vmw_res_func *func; 99 struct list_head lru_head; /* Protected by the resource lock */ 100 struct list_head mob_head; /* Protected by @backup reserved */ 101 void (*res_free) (struct vmw_resource *res); 102 void (*hw_destroy) (struct vmw_resource *res); 103 }; 104 105 enum vmw_res_type { 106 vmw_res_context, 107 vmw_res_surface, 108 vmw_res_stream, 109 vmw_res_max 110 }; 111 112 struct vmw_cursor_snooper { 113 struct drm_crtc *crtc; 114 size_t age; 115 uint32_t *image; 116 }; 117 118 struct vmw_framebuffer; 119 struct vmw_surface_offset; 120 121 struct vmw_surface { 122 struct vmw_resource res; 123 uint32_t flags; 124 uint32_t format; 125 uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES]; 126 struct drm_vmw_size base_size; 127 struct drm_vmw_size *sizes; 128 uint32_t num_sizes; 129 bool scanout; 130 /* TODO so far just a extra pointer */ 131 struct vmw_cursor_snooper snooper; 132 struct vmw_surface_offset *offsets; 133 SVGA3dTextureFilter autogen_filter; 134 uint32_t multisample_count; 135 }; 136 137 struct vmw_marker_queue { 138 struct list_head head; 139 struct timespec lag; 140 struct timespec lag_time; 141 spinlock_t lock; 142 }; 143 144 struct vmw_fifo_state { 145 unsigned long reserved_size; 146 __le32 *dynamic_buffer; 147 __le32 *static_buffer; 148 unsigned long static_buffer_size; 149 bool using_bounce_buffer; 150 uint32_t capabilities; 151 struct mutex fifo_mutex; 152 struct rw_semaphore rwsem; 153 struct vmw_marker_queue marker_queue; 154 }; 155 156 struct vmw_relocation { 157 SVGAGuestPtr *location; 158 uint32_t index; 159 }; 160 161 /** 162 * struct vmw_res_cache_entry - resource information cache entry 163 * 164 * @valid: Whether the entry is valid, which also implies that the execbuf 165 * code holds a reference to the resource, and it's placed on the 166 * validation list. 167 * @handle: User-space handle of a resource. 168 * @res: Non-ref-counted pointer to the resource. 169 * 170 * Used to avoid frequent repeated user-space handle lookups of the 171 * same resource. 172 */ 173 struct vmw_res_cache_entry { 174 bool valid; 175 uint32_t handle; 176 struct vmw_resource *res; 177 struct vmw_resource_val_node *node; 178 }; 179 180 /** 181 * enum vmw_dma_map_mode - indicate how to perform TTM page dma mappings. 182 */ 183 enum vmw_dma_map_mode { 184 vmw_dma_phys, /* Use physical page addresses */ 185 vmw_dma_alloc_coherent, /* Use TTM coherent pages */ 186 vmw_dma_map_populate, /* Unmap from DMA just after unpopulate */ 187 vmw_dma_map_bind, /* Unmap from DMA just before unbind */ 188 vmw_dma_map_max 189 }; 190 191 /** 192 * struct vmw_sg_table - Scatter/gather table for binding, with additional 193 * device-specific information. 194 * 195 * @sgt: Pointer to a struct sg_table with binding information 196 * @num_regions: Number of regions with device-address contigous pages 197 */ 198 struct vmw_sg_table { 199 enum vmw_dma_map_mode mode; 200 struct page **pages; 201 const dma_addr_t *addrs; 202 struct sg_table *sgt; 203 unsigned long num_regions; 204 unsigned long num_pages; 205 }; 206 207 /** 208 * struct vmw_piter - Page iterator that iterates over a list of pages 209 * and DMA addresses that could be either a scatter-gather list or 210 * arrays 211 * 212 * @pages: Array of page pointers to the pages. 213 * @addrs: DMA addresses to the pages if coherent pages are used. 214 * @iter: Scatter-gather page iterator. Current position in SG list. 215 * @i: Current position in arrays. 216 * @num_pages: Number of pages total. 217 * @next: Function to advance the iterator. Returns false if past the list 218 * of pages, true otherwise. 219 * @dma_address: Function to return the DMA address of the current page. 220 */ 221 struct vmw_piter { 222 struct page **pages; 223 const dma_addr_t *addrs; 224 struct sg_page_iter iter; 225 unsigned long i; 226 unsigned long num_pages; 227 bool (*next)(struct vmw_piter *); 228 dma_addr_t (*dma_address)(struct vmw_piter *); 229 struct page *(*page)(struct vmw_piter *); 230 }; 231 232 struct vmw_sw_context{ 233 struct drm_open_hash res_ht; 234 bool res_ht_initialized; 235 bool kernel; /**< is the called made from the kernel */ 236 struct ttm_object_file *tfile; 237 struct list_head validate_nodes; 238 struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS]; 239 uint32_t cur_reloc; 240 struct vmw_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS]; 241 uint32_t cur_val_buf; 242 uint32_t *cmd_bounce; 243 uint32_t cmd_bounce_size; 244 struct list_head resource_list; 245 uint32_t fence_flags; 246 struct ttm_buffer_object *cur_query_bo; 247 struct list_head res_relocations; 248 uint32_t *buf_start; 249 struct vmw_res_cache_entry res_cache[vmw_res_max]; 250 struct vmw_resource *last_query_ctx; 251 bool needs_post_query_barrier; 252 struct vmw_resource *error_resource; 253 }; 254 255 struct vmw_legacy_display; 256 struct vmw_overlay; 257 258 struct vmw_master { 259 struct ttm_lock lock; 260 struct mutex fb_surf_mutex; 261 struct list_head fb_surf; 262 }; 263 264 struct vmw_vga_topology_state { 265 uint32_t width; 266 uint32_t height; 267 uint32_t primary; 268 uint32_t pos_x; 269 uint32_t pos_y; 270 }; 271 272 struct vmw_private { 273 struct ttm_bo_device bdev; 274 struct ttm_bo_global_ref bo_global_ref; 275 struct drm_global_reference mem_global_ref; 276 277 struct vmw_fifo_state fifo; 278 279 struct drm_device *dev; 280 unsigned long vmw_chipset; 281 unsigned int io_start; 282 uint32_t vram_start; 283 uint32_t vram_size; 284 uint32_t mmio_start; 285 uint32_t mmio_size; 286 uint32_t fb_max_width; 287 uint32_t fb_max_height; 288 uint32_t initial_width; 289 uint32_t initial_height; 290 __le32 __iomem *mmio_virt; 291 int mmio_mtrr; 292 uint32_t capabilities; 293 uint32_t max_gmr_descriptors; 294 uint32_t max_gmr_ids; 295 uint32_t max_gmr_pages; 296 uint32_t memory_size; 297 bool has_gmr; 298 struct mutex hw_mutex; 299 300 /* 301 * VGA registers. 302 */ 303 304 struct vmw_vga_topology_state vga_save[VMWGFX_MAX_DISPLAYS]; 305 uint32_t vga_width; 306 uint32_t vga_height; 307 uint32_t vga_bpp; 308 uint32_t vga_bpl; 309 uint32_t vga_pitchlock; 310 311 uint32_t num_displays; 312 313 /* 314 * Framebuffer info. 315 */ 316 317 void *fb_info; 318 struct vmw_legacy_display *ldu_priv; 319 struct vmw_screen_object_display *sou_priv; 320 struct vmw_overlay *overlay_priv; 321 322 /* 323 * Context and surface management. 324 */ 325 326 rwlock_t resource_lock; 327 struct idr res_idr[vmw_res_max]; 328 /* 329 * Block lastclose from racing with firstopen. 330 */ 331 332 struct mutex init_mutex; 333 334 /* 335 * A resource manager for kernel-only surfaces and 336 * contexts. 337 */ 338 339 struct ttm_object_device *tdev; 340 341 /* 342 * Fencing and IRQs. 343 */ 344 345 atomic_t marker_seq; 346 wait_queue_head_t fence_queue; 347 wait_queue_head_t fifo_queue; 348 int fence_queue_waiters; /* Protected by hw_mutex */ 349 int goal_queue_waiters; /* Protected by hw_mutex */ 350 atomic_t fifo_queue_waiters; 351 uint32_t last_read_seqno; 352 spinlock_t irq_lock; 353 struct vmw_fence_manager *fman; 354 uint32_t irq_mask; 355 356 /* 357 * Device state 358 */ 359 360 uint32_t traces_state; 361 uint32_t enable_state; 362 uint32_t config_done_state; 363 364 /** 365 * Execbuf 366 */ 367 /** 368 * Protected by the cmdbuf mutex. 369 */ 370 371 struct vmw_sw_context ctx; 372 struct mutex cmdbuf_mutex; 373 374 /** 375 * Operating mode. 376 */ 377 378 bool stealth; 379 bool enable_fb; 380 381 /** 382 * Master management. 383 */ 384 385 struct vmw_master *active_master; 386 struct vmw_master fbdev_master; 387 struct notifier_block pm_nb; 388 bool suspended; 389 390 struct mutex release_mutex; 391 uint32_t num_3d_resources; 392 393 /* 394 * Query processing. These members 395 * are protected by the cmdbuf mutex. 396 */ 397 398 struct ttm_buffer_object *dummy_query_bo; 399 struct ttm_buffer_object *pinned_bo; 400 uint32_t query_cid; 401 uint32_t query_cid_valid; 402 bool dummy_query_bo_pinned; 403 404 /* 405 * Surface swapping. The "surface_lru" list is protected by the 406 * resource lock in order to be able to destroy a surface and take 407 * it off the lru atomically. "used_memory_size" is currently 408 * protected by the cmdbuf mutex for simplicity. 409 */ 410 411 struct list_head res_lru[vmw_res_max]; 412 uint32_t used_memory_size; 413 414 /* 415 * DMA mapping stuff. 416 */ 417 enum vmw_dma_map_mode map_mode; 418 }; 419 420 static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res) 421 { 422 return container_of(res, struct vmw_surface, res); 423 } 424 425 static inline struct vmw_private *vmw_priv(struct drm_device *dev) 426 { 427 return (struct vmw_private *)dev->dev_private; 428 } 429 430 static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv) 431 { 432 return (struct vmw_fpriv *)file_priv->driver_priv; 433 } 434 435 static inline struct vmw_master *vmw_master(struct drm_master *master) 436 { 437 return (struct vmw_master *) master->driver_priv; 438 } 439 440 static inline void vmw_write(struct vmw_private *dev_priv, 441 unsigned int offset, uint32_t value) 442 { 443 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); 444 outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT); 445 } 446 447 static inline uint32_t vmw_read(struct vmw_private *dev_priv, 448 unsigned int offset) 449 { 450 uint32_t val; 451 452 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); 453 val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT); 454 return val; 455 } 456 457 int vmw_3d_resource_inc(struct vmw_private *dev_priv, bool unhide_svga); 458 void vmw_3d_resource_dec(struct vmw_private *dev_priv, bool hide_svga); 459 460 /** 461 * GMR utilities - vmwgfx_gmr.c 462 */ 463 464 extern int vmw_gmr_bind(struct vmw_private *dev_priv, 465 const struct vmw_sg_table *vsgt, 466 unsigned long num_pages, 467 int gmr_id); 468 extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id); 469 470 /** 471 * Resource utilities - vmwgfx_resource.c 472 */ 473 struct vmw_user_resource_conv; 474 extern const struct vmw_user_resource_conv *user_surface_converter; 475 extern const struct vmw_user_resource_conv *user_context_converter; 476 477 extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv); 478 extern void vmw_resource_unreference(struct vmw_resource **p_res); 479 extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res); 480 extern int vmw_resource_validate(struct vmw_resource *res); 481 extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup); 482 extern bool vmw_resource_needs_backup(const struct vmw_resource *res); 483 extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, 484 struct drm_file *file_priv); 485 extern int vmw_context_define_ioctl(struct drm_device *dev, void *data, 486 struct drm_file *file_priv); 487 extern int vmw_context_check(struct vmw_private *dev_priv, 488 struct ttm_object_file *tfile, 489 int id, 490 struct vmw_resource **p_res); 491 extern int vmw_user_lookup_handle(struct vmw_private *dev_priv, 492 struct ttm_object_file *tfile, 493 uint32_t handle, 494 struct vmw_surface **out_surf, 495 struct vmw_dma_buffer **out_buf); 496 extern int vmw_user_resource_lookup_handle( 497 struct vmw_private *dev_priv, 498 struct ttm_object_file *tfile, 499 uint32_t handle, 500 const struct vmw_user_resource_conv *converter, 501 struct vmw_resource **p_res); 502 extern void vmw_surface_res_free(struct vmw_resource *res); 503 extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data, 504 struct drm_file *file_priv); 505 extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data, 506 struct drm_file *file_priv); 507 extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, 508 struct drm_file *file_priv); 509 extern int vmw_surface_check(struct vmw_private *dev_priv, 510 struct ttm_object_file *tfile, 511 uint32_t handle, int *id); 512 extern int vmw_surface_validate(struct vmw_private *dev_priv, 513 struct vmw_surface *srf); 514 extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo); 515 extern int vmw_dmabuf_init(struct vmw_private *dev_priv, 516 struct vmw_dma_buffer *vmw_bo, 517 size_t size, struct ttm_placement *placement, 518 bool interuptable, 519 void (*bo_free) (struct ttm_buffer_object *bo)); 520 extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo, 521 struct ttm_object_file *tfile); 522 extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, 523 struct drm_file *file_priv); 524 extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data, 525 struct drm_file *file_priv); 526 extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo, 527 uint32_t cur_validate_node); 528 extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo); 529 extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, 530 uint32_t id, struct vmw_dma_buffer **out); 531 extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, 532 struct drm_file *file_priv); 533 extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, 534 struct drm_file *file_priv); 535 extern int vmw_user_stream_lookup(struct vmw_private *dev_priv, 536 struct ttm_object_file *tfile, 537 uint32_t *inout_id, 538 struct vmw_resource **out); 539 extern void vmw_resource_unreserve(struct vmw_resource *res, 540 struct vmw_dma_buffer *new_backup, 541 unsigned long new_backup_offset); 542 extern void vmw_resource_move_notify(struct ttm_buffer_object *bo, 543 struct ttm_mem_reg *mem); 544 extern void vmw_fence_single_bo(struct ttm_buffer_object *bo, 545 struct vmw_fence_obj *fence); 546 extern void vmw_resource_evict_all(struct vmw_private *dev_priv); 547 548 /** 549 * DMA buffer helper routines - vmwgfx_dmabuf.c 550 */ 551 extern int vmw_dmabuf_to_placement(struct vmw_private *vmw_priv, 552 struct vmw_dma_buffer *bo, 553 struct ttm_placement *placement, 554 bool interruptible); 555 extern int vmw_dmabuf_to_vram(struct vmw_private *dev_priv, 556 struct vmw_dma_buffer *buf, 557 bool pin, bool interruptible); 558 extern int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv, 559 struct vmw_dma_buffer *buf, 560 bool pin, bool interruptible); 561 extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv, 562 struct vmw_dma_buffer *bo, 563 bool pin, bool interruptible); 564 extern int vmw_dmabuf_unpin(struct vmw_private *vmw_priv, 565 struct vmw_dma_buffer *bo, 566 bool interruptible); 567 extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf, 568 SVGAGuestPtr *ptr); 569 extern void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin); 570 571 /** 572 * Misc Ioctl functionality - vmwgfx_ioctl.c 573 */ 574 575 extern int vmw_getparam_ioctl(struct drm_device *dev, void *data, 576 struct drm_file *file_priv); 577 extern int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, 578 struct drm_file *file_priv); 579 extern int vmw_present_ioctl(struct drm_device *dev, void *data, 580 struct drm_file *file_priv); 581 extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data, 582 struct drm_file *file_priv); 583 extern unsigned int vmw_fops_poll(struct file *filp, 584 struct poll_table_struct *wait); 585 extern ssize_t vmw_fops_read(struct file *filp, char __user *buffer, 586 size_t count, loff_t *offset); 587 588 /** 589 * Fifo utilities - vmwgfx_fifo.c 590 */ 591 592 extern int vmw_fifo_init(struct vmw_private *dev_priv, 593 struct vmw_fifo_state *fifo); 594 extern void vmw_fifo_release(struct vmw_private *dev_priv, 595 struct vmw_fifo_state *fifo); 596 extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes); 597 extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes); 598 extern int vmw_fifo_send_fence(struct vmw_private *dev_priv, 599 uint32_t *seqno); 600 extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason); 601 extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv); 602 extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv); 603 extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv, 604 uint32_t cid); 605 606 /** 607 * TTM glue - vmwgfx_ttm_glue.c 608 */ 609 610 extern int vmw_ttm_global_init(struct vmw_private *dev_priv); 611 extern void vmw_ttm_global_release(struct vmw_private *dev_priv); 612 extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma); 613 614 /** 615 * TTM buffer object driver - vmwgfx_buffer.c 616 */ 617 618 extern const size_t vmw_tt_size; 619 extern struct ttm_placement vmw_vram_placement; 620 extern struct ttm_placement vmw_vram_ne_placement; 621 extern struct ttm_placement vmw_vram_sys_placement; 622 extern struct ttm_placement vmw_vram_gmr_placement; 623 extern struct ttm_placement vmw_vram_gmr_ne_placement; 624 extern struct ttm_placement vmw_sys_placement; 625 extern struct ttm_placement vmw_evictable_placement; 626 extern struct ttm_placement vmw_srf_placement; 627 extern struct ttm_bo_driver vmw_bo_driver; 628 extern int vmw_dma_quiescent(struct drm_device *dev); 629 extern void vmw_piter_start(struct vmw_piter *viter, 630 const struct vmw_sg_table *vsgt, 631 unsigned long p_offs); 632 633 /** 634 * vmw_piter_next - Advance the iterator one page. 635 * 636 * @viter: Pointer to the iterator to advance. 637 * 638 * Returns false if past the list of pages, true otherwise. 639 */ 640 static inline bool vmw_piter_next(struct vmw_piter *viter) 641 { 642 return viter->next(viter); 643 } 644 645 /** 646 * vmw_piter_dma_addr - Return the DMA address of the current page. 647 * 648 * @viter: Pointer to the iterator 649 * 650 * Returns the DMA address of the page pointed to by @viter. 651 */ 652 static inline dma_addr_t vmw_piter_dma_addr(struct vmw_piter *viter) 653 { 654 return viter->dma_address(viter); 655 } 656 657 /** 658 * vmw_piter_page - Return a pointer to the current page. 659 * 660 * @viter: Pointer to the iterator 661 * 662 * Returns the DMA address of the page pointed to by @viter. 663 */ 664 static inline struct page *vmw_piter_page(struct vmw_piter *viter) 665 { 666 return viter->page(viter); 667 } 668 669 /** 670 * Command submission - vmwgfx_execbuf.c 671 */ 672 673 extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data, 674 struct drm_file *file_priv); 675 extern int vmw_execbuf_process(struct drm_file *file_priv, 676 struct vmw_private *dev_priv, 677 void __user *user_commands, 678 void *kernel_commands, 679 uint32_t command_size, 680 uint64_t throttle_us, 681 struct drm_vmw_fence_rep __user 682 *user_fence_rep, 683 struct vmw_fence_obj **out_fence); 684 extern void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, 685 struct vmw_fence_obj *fence); 686 extern void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv); 687 688 extern int vmw_execbuf_fence_commands(struct drm_file *file_priv, 689 struct vmw_private *dev_priv, 690 struct vmw_fence_obj **p_fence, 691 uint32_t *p_handle); 692 extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, 693 struct vmw_fpriv *vmw_fp, 694 int ret, 695 struct drm_vmw_fence_rep __user 696 *user_fence_rep, 697 struct vmw_fence_obj *fence, 698 uint32_t fence_handle); 699 700 /** 701 * IRQs and wating - vmwgfx_irq.c 702 */ 703 704 extern irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS); 705 extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy, 706 uint32_t seqno, bool interruptible, 707 unsigned long timeout); 708 extern void vmw_irq_preinstall(struct drm_device *dev); 709 extern int vmw_irq_postinstall(struct drm_device *dev); 710 extern void vmw_irq_uninstall(struct drm_device *dev); 711 extern bool vmw_seqno_passed(struct vmw_private *dev_priv, 712 uint32_t seqno); 713 extern int vmw_fallback_wait(struct vmw_private *dev_priv, 714 bool lazy, 715 bool fifo_idle, 716 uint32_t seqno, 717 bool interruptible, 718 unsigned long timeout); 719 extern void vmw_update_seqno(struct vmw_private *dev_priv, 720 struct vmw_fifo_state *fifo_state); 721 extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv); 722 extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv); 723 extern void vmw_goal_waiter_add(struct vmw_private *dev_priv); 724 extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv); 725 726 /** 727 * Rudimentary fence-like objects currently used only for throttling - 728 * vmwgfx_marker.c 729 */ 730 731 extern void vmw_marker_queue_init(struct vmw_marker_queue *queue); 732 extern void vmw_marker_queue_takedown(struct vmw_marker_queue *queue); 733 extern int vmw_marker_push(struct vmw_marker_queue *queue, 734 uint32_t seqno); 735 extern int vmw_marker_pull(struct vmw_marker_queue *queue, 736 uint32_t signaled_seqno); 737 extern int vmw_wait_lag(struct vmw_private *dev_priv, 738 struct vmw_marker_queue *queue, uint32_t us); 739 740 /** 741 * Kernel framebuffer - vmwgfx_fb.c 742 */ 743 744 int vmw_fb_init(struct vmw_private *vmw_priv); 745 int vmw_fb_close(struct vmw_private *dev_priv); 746 int vmw_fb_off(struct vmw_private *vmw_priv); 747 int vmw_fb_on(struct vmw_private *vmw_priv); 748 749 /** 750 * Kernel modesetting - vmwgfx_kms.c 751 */ 752 753 int vmw_kms_init(struct vmw_private *dev_priv); 754 int vmw_kms_close(struct vmw_private *dev_priv); 755 int vmw_kms_save_vga(struct vmw_private *vmw_priv); 756 int vmw_kms_restore_vga(struct vmw_private *vmw_priv); 757 int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data, 758 struct drm_file *file_priv); 759 void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv); 760 void vmw_kms_cursor_snoop(struct vmw_surface *srf, 761 struct ttm_object_file *tfile, 762 struct ttm_buffer_object *bo, 763 SVGA3dCmdHeader *header); 764 int vmw_kms_write_svga(struct vmw_private *vmw_priv, 765 unsigned width, unsigned height, unsigned pitch, 766 unsigned bpp, unsigned depth); 767 void vmw_kms_idle_workqueues(struct vmw_master *vmaster); 768 bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv, 769 uint32_t pitch, 770 uint32_t height); 771 u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc); 772 int vmw_enable_vblank(struct drm_device *dev, int crtc); 773 void vmw_disable_vblank(struct drm_device *dev, int crtc); 774 int vmw_kms_present(struct vmw_private *dev_priv, 775 struct drm_file *file_priv, 776 struct vmw_framebuffer *vfb, 777 struct vmw_surface *surface, 778 uint32_t sid, int32_t destX, int32_t destY, 779 struct drm_vmw_rect *clips, 780 uint32_t num_clips); 781 int vmw_kms_readback(struct vmw_private *dev_priv, 782 struct drm_file *file_priv, 783 struct vmw_framebuffer *vfb, 784 struct drm_vmw_fence_rep __user *user_fence_rep, 785 struct drm_vmw_rect *clips, 786 uint32_t num_clips); 787 int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, 788 struct drm_file *file_priv); 789 790 int vmw_dumb_create(struct drm_file *file_priv, 791 struct drm_device *dev, 792 struct drm_mode_create_dumb *args); 793 794 int vmw_dumb_map_offset(struct drm_file *file_priv, 795 struct drm_device *dev, uint32_t handle, 796 uint64_t *offset); 797 int vmw_dumb_destroy(struct drm_file *file_priv, 798 struct drm_device *dev, 799 uint32_t handle); 800 /** 801 * Overlay control - vmwgfx_overlay.c 802 */ 803 804 int vmw_overlay_init(struct vmw_private *dev_priv); 805 int vmw_overlay_close(struct vmw_private *dev_priv); 806 int vmw_overlay_ioctl(struct drm_device *dev, void *data, 807 struct drm_file *file_priv); 808 int vmw_overlay_stop_all(struct vmw_private *dev_priv); 809 int vmw_overlay_resume_all(struct vmw_private *dev_priv); 810 int vmw_overlay_pause_all(struct vmw_private *dev_priv); 811 int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out); 812 int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id); 813 int vmw_overlay_num_overlays(struct vmw_private *dev_priv); 814 int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv); 815 816 /** 817 * GMR Id manager 818 */ 819 820 extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func; 821 822 /** 823 * Prime - vmwgfx_prime.c 824 */ 825 826 extern const struct dma_buf_ops vmw_prime_dmabuf_ops; 827 extern int vmw_prime_fd_to_handle(struct drm_device *dev, 828 struct drm_file *file_priv, 829 int fd, u32 *handle); 830 extern int vmw_prime_handle_to_fd(struct drm_device *dev, 831 struct drm_file *file_priv, 832 uint32_t handle, uint32_t flags, 833 int *prime_fd); 834 835 836 /** 837 * Inline helper functions 838 */ 839 840 static inline void vmw_surface_unreference(struct vmw_surface **srf) 841 { 842 struct vmw_surface *tmp_srf = *srf; 843 struct vmw_resource *res = &tmp_srf->res; 844 *srf = NULL; 845 846 vmw_resource_unreference(&res); 847 } 848 849 static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf) 850 { 851 (void) vmw_resource_reference(&srf->res); 852 return srf; 853 } 854 855 static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf) 856 { 857 struct vmw_dma_buffer *tmp_buf = *buf; 858 859 *buf = NULL; 860 if (tmp_buf != NULL) { 861 struct ttm_buffer_object *bo = &tmp_buf->base; 862 863 ttm_bo_unref(&bo); 864 } 865 } 866 867 static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer *buf) 868 { 869 if (ttm_bo_reference(&buf->base)) 870 return buf; 871 return NULL; 872 } 873 874 static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv) 875 { 876 return (struct ttm_mem_global *) dev_priv->mem_global_ref.object; 877 } 878 #endif 879