1 /************************************************************************** 2 * 3 * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 #include "vmwgfx_drv.h" 29 #include "ttm/ttm_bo_api.h" 30 31 /* 32 * Size of inline command buffers. Try to make sure that a page size is a 33 * multiple of the DMA pool allocation size. 34 */ 35 #define VMW_CMDBUF_INLINE_ALIGN 64 36 #define VMW_CMDBUF_INLINE_SIZE \ 37 (1024 - ALIGN(sizeof(SVGACBHeader), VMW_CMDBUF_INLINE_ALIGN)) 38 39 /** 40 * struct vmw_cmdbuf_context - Command buffer context queues 41 * 42 * @submitted: List of command buffers that have been submitted to the 43 * manager but not yet submitted to hardware. 44 * @hw_submitted: List of command buffers submitted to hardware. 45 * @preempted: List of preempted command buffers. 46 * @num_hw_submitted: Number of buffers currently being processed by hardware 47 */ 48 struct vmw_cmdbuf_context { 49 struct list_head submitted; 50 struct list_head hw_submitted; 51 struct list_head preempted; 52 unsigned num_hw_submitted; 53 }; 54 55 /** 56 * struct vmw_cmdbuf_man: - Command buffer manager 57 * 58 * @cur_mutex: Mutex protecting the command buffer used for incremental small 59 * kernel command submissions, @cur. 60 * @space_mutex: Mutex to protect against starvation when we allocate 61 * main pool buffer space. 62 * @work: A struct work_struct implementeing command buffer error handling. 63 * Immutable. 64 * @dev_priv: Pointer to the device private struct. Immutable. 65 * @ctx: Array of command buffer context queues. The queues and the context 66 * data is protected by @lock. 67 * @error: List of command buffers that have caused device errors. 68 * Protected by @lock. 69 * @mm: Range manager for the command buffer space. Manager allocations and 70 * frees are protected by @lock. 71 * @cmd_space: Buffer object for the command buffer space, unless we were 72 * able to make a contigous coherent DMA memory allocation, @handle. Immutable. 73 * @map_obj: Mapping state for @cmd_space. Immutable. 74 * @map: Pointer to command buffer space. May be a mapped buffer object or 75 * a contigous coherent DMA memory allocation. Immutable. 76 * @cur: Command buffer for small kernel command submissions. Protected by 77 * the @cur_mutex. 78 * @cur_pos: Space already used in @cur. Protected by @cur_mutex. 79 * @default_size: Default size for the @cur command buffer. Immutable. 80 * @max_hw_submitted: Max number of in-flight command buffers the device can 81 * handle. Immutable. 82 * @lock: Spinlock protecting command submission queues. 83 * @header: Pool of DMA memory for device command buffer headers. 84 * Internal protection. 85 * @dheaders: Pool of DMA memory for device command buffer headers with trailing 86 * space for inline data. Internal protection. 87 * @tasklet: Tasklet struct for irq processing. Immutable. 88 * @alloc_queue: Wait queue for processes waiting to allocate command buffer 89 * space. 90 * @idle_queue: Wait queue for processes waiting for command buffer idle. 91 * @irq_on: Whether the process function has requested irq to be turned on. 92 * Protected by @lock. 93 * @using_mob: Whether the command buffer space is a MOB or a contigous DMA 94 * allocation. Immutable. 95 * @has_pool: Has a large pool of DMA memory which allows larger allocations. 96 * Typically this is false only during bootstrap. 97 * @handle: DMA address handle for the command buffer space if @using_mob is 98 * false. Immutable. 99 * @size: The size of the command buffer space. Immutable. 100 */ 101 struct vmw_cmdbuf_man { 102 struct mutex cur_mutex; 103 struct mutex space_mutex; 104 struct work_struct work; 105 struct vmw_private *dev_priv; 106 struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX]; 107 struct list_head error; 108 struct drm_mm mm; 109 struct ttm_buffer_object *cmd_space; 110 struct ttm_bo_kmap_obj map_obj; 111 u8 *map; 112 struct vmw_cmdbuf_header *cur; 113 size_t cur_pos; 114 size_t default_size; 115 unsigned max_hw_submitted; 116 spinlock_t lock; 117 struct dma_pool *headers; 118 struct dma_pool *dheaders; 119 struct tasklet_struct tasklet; 120 wait_queue_head_t alloc_queue; 121 wait_queue_head_t idle_queue; 122 bool irq_on; 123 bool using_mob; 124 bool has_pool; 125 dma_addr_t handle; 126 size_t size; 127 }; 128 129 /** 130 * struct vmw_cmdbuf_header - Command buffer metadata 131 * 132 * @man: The command buffer manager. 133 * @cb_header: Device command buffer header, allocated from a DMA pool. 134 * @cb_context: The device command buffer context. 135 * @list: List head for attaching to the manager lists. 136 * @node: The range manager node. 137 * @handle. The DMA address of @cb_header. Handed to the device on command 138 * buffer submission. 139 * @cmd: Pointer to the command buffer space of this buffer. 140 * @size: Size of the command buffer space of this buffer. 141 * @reserved: Reserved space of this buffer. 142 * @inline_space: Whether inline command buffer space is used. 143 */ 144 struct vmw_cmdbuf_header { 145 struct vmw_cmdbuf_man *man; 146 SVGACBHeader *cb_header; 147 SVGACBContext cb_context; 148 struct list_head list; 149 struct drm_mm_node node; 150 dma_addr_t handle; 151 u8 *cmd; 152 size_t size; 153 size_t reserved; 154 bool inline_space; 155 }; 156 157 /** 158 * struct vmw_cmdbuf_dheader - Device command buffer header with inline 159 * command buffer space. 160 * 161 * @cb_header: Device command buffer header. 162 * @cmd: Inline command buffer space. 163 */ 164 struct vmw_cmdbuf_dheader { 165 SVGACBHeader cb_header; 166 u8 cmd[VMW_CMDBUF_INLINE_SIZE] __aligned(VMW_CMDBUF_INLINE_ALIGN); 167 }; 168 169 /** 170 * struct vmw_cmdbuf_alloc_info - Command buffer space allocation metadata 171 * 172 * @page_size: Size of requested command buffer space in pages. 173 * @node: Pointer to the range manager node. 174 * @done: True if this allocation has succeeded. 175 */ 176 struct vmw_cmdbuf_alloc_info { 177 size_t page_size; 178 struct drm_mm_node *node; 179 bool done; 180 }; 181 182 /* Loop over each context in the command buffer manager. */ 183 #define for_each_cmdbuf_ctx(_man, _i, _ctx) \ 184 for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < SVGA_CB_CONTEXT_MAX; \ 185 ++(_i), ++(_ctx)) 186 187 static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, bool enable); 188 189 190 /** 191 * vmw_cmdbuf_cur_lock - Helper to lock the cur_mutex. 192 * 193 * @man: The range manager. 194 * @interruptible: Whether to wait interruptible when locking. 195 */ 196 static int vmw_cmdbuf_cur_lock(struct vmw_cmdbuf_man *man, bool interruptible) 197 { 198 if (interruptible) { 199 if (mutex_lock_interruptible(&man->cur_mutex)) 200 return -ERESTARTSYS; 201 } else { 202 mutex_lock(&man->cur_mutex); 203 } 204 205 return 0; 206 } 207 208 /** 209 * vmw_cmdbuf_cur_unlock - Helper to unlock the cur_mutex. 210 * 211 * @man: The range manager. 212 */ 213 static void vmw_cmdbuf_cur_unlock(struct vmw_cmdbuf_man *man) 214 { 215 mutex_unlock(&man->cur_mutex); 216 } 217 218 /** 219 * vmw_cmdbuf_header_inline_free - Free a struct vmw_cmdbuf_header that has 220 * been used for the device context with inline command buffers. 221 * Need not be called locked. 222 * 223 * @header: Pointer to the header to free. 224 */ 225 static void vmw_cmdbuf_header_inline_free(struct vmw_cmdbuf_header *header) 226 { 227 struct vmw_cmdbuf_dheader *dheader; 228 229 if (WARN_ON_ONCE(!header->inline_space)) 230 return; 231 232 dheader = container_of(header->cb_header, struct vmw_cmdbuf_dheader, 233 cb_header); 234 dma_pool_free(header->man->dheaders, dheader, header->handle); 235 kfree(header); 236 } 237 238 /** 239 * __vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header and its 240 * associated structures. 241 * 242 * header: Pointer to the header to free. 243 * 244 * For internal use. Must be called with man::lock held. 245 */ 246 static void __vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header) 247 { 248 struct vmw_cmdbuf_man *man = header->man; 249 250 BUG_ON(!spin_is_locked(&man->lock)); 251 252 if (header->inline_space) { 253 vmw_cmdbuf_header_inline_free(header); 254 return; 255 } 256 257 drm_mm_remove_node(&header->node); 258 wake_up_all(&man->alloc_queue); 259 if (header->cb_header) 260 dma_pool_free(man->headers, header->cb_header, 261 header->handle); 262 kfree(header); 263 } 264 265 /** 266 * vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header and its 267 * associated structures. 268 * 269 * @header: Pointer to the header to free. 270 */ 271 void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header) 272 { 273 struct vmw_cmdbuf_man *man = header->man; 274 275 /* Avoid locking if inline_space */ 276 if (header->inline_space) { 277 vmw_cmdbuf_header_inline_free(header); 278 return; 279 } 280 spin_lock_bh(&man->lock); 281 __vmw_cmdbuf_header_free(header); 282 spin_unlock_bh(&man->lock); 283 } 284 285 286 /** 287 * vmw_cmbuf_header_submit: Submit a command buffer to hardware. 288 * 289 * @header: The header of the buffer to submit. 290 */ 291 static int vmw_cmdbuf_header_submit(struct vmw_cmdbuf_header *header) 292 { 293 struct vmw_cmdbuf_man *man = header->man; 294 u32 val; 295 296 if (sizeof(header->handle) > 4) 297 val = (header->handle >> 32); 298 else 299 val = 0; 300 vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val); 301 302 val = (header->handle & 0xFFFFFFFFULL); 303 val |= header->cb_context & SVGA_CB_CONTEXT_MASK; 304 vmw_write(man->dev_priv, SVGA_REG_COMMAND_LOW, val); 305 306 return header->cb_header->status; 307 } 308 309 /** 310 * vmw_cmdbuf_ctx_init: Initialize a command buffer context. 311 * 312 * @ctx: The command buffer context to initialize 313 */ 314 static void vmw_cmdbuf_ctx_init(struct vmw_cmdbuf_context *ctx) 315 { 316 INIT_LIST_HEAD(&ctx->hw_submitted); 317 INIT_LIST_HEAD(&ctx->submitted); 318 INIT_LIST_HEAD(&ctx->preempted); 319 ctx->num_hw_submitted = 0; 320 } 321 322 /** 323 * vmw_cmdbuf_ctx_submit: Submit command buffers from a command buffer 324 * context. 325 * 326 * @man: The command buffer manager. 327 * @ctx: The command buffer context. 328 * 329 * Submits command buffers to hardware until there are no more command 330 * buffers to submit or the hardware can't handle more command buffers. 331 */ 332 static void vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man *man, 333 struct vmw_cmdbuf_context *ctx) 334 { 335 while (ctx->num_hw_submitted < man->max_hw_submitted && 336 !list_empty(&ctx->submitted)) { 337 struct vmw_cmdbuf_header *entry; 338 SVGACBStatus status; 339 340 entry = list_first_entry(&ctx->submitted, 341 struct vmw_cmdbuf_header, 342 list); 343 344 status = vmw_cmdbuf_header_submit(entry); 345 346 /* This should never happen */ 347 if (WARN_ON_ONCE(status == SVGA_CB_STATUS_QUEUE_FULL)) { 348 entry->cb_header->status = SVGA_CB_STATUS_NONE; 349 break; 350 } 351 352 list_del(&entry->list); 353 list_add_tail(&entry->list, &ctx->hw_submitted); 354 ctx->num_hw_submitted++; 355 } 356 357 } 358 359 /** 360 * vmw_cmdbuf_ctx_submit: Process a command buffer context. 361 * 362 * @man: The command buffer manager. 363 * @ctx: The command buffer context. 364 * 365 * Submit command buffers to hardware if possible, and process finished 366 * buffers. Typically freeing them, but on preemption or error take 367 * appropriate action. Wake up waiters if appropriate. 368 */ 369 static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man, 370 struct vmw_cmdbuf_context *ctx, 371 int *notempty) 372 { 373 struct vmw_cmdbuf_header *entry, *next; 374 375 vmw_cmdbuf_ctx_submit(man, ctx); 376 377 list_for_each_entry_safe(entry, next, &ctx->hw_submitted, list) { 378 SVGACBStatus status = entry->cb_header->status; 379 380 if (status == SVGA_CB_STATUS_NONE) 381 break; 382 383 list_del(&entry->list); 384 wake_up_all(&man->idle_queue); 385 ctx->num_hw_submitted--; 386 switch (status) { 387 case SVGA_CB_STATUS_COMPLETED: 388 __vmw_cmdbuf_header_free(entry); 389 break; 390 case SVGA_CB_STATUS_COMMAND_ERROR: 391 case SVGA_CB_STATUS_CB_HEADER_ERROR: 392 list_add_tail(&entry->list, &man->error); 393 schedule_work(&man->work); 394 break; 395 case SVGA_CB_STATUS_PREEMPTED: 396 list_add(&entry->list, &ctx->preempted); 397 break; 398 default: 399 WARN_ONCE(true, "Undefined command buffer status.\n"); 400 __vmw_cmdbuf_header_free(entry); 401 break; 402 } 403 } 404 405 vmw_cmdbuf_ctx_submit(man, ctx); 406 if (!list_empty(&ctx->submitted)) 407 (*notempty)++; 408 } 409 410 /** 411 * vmw_cmdbuf_man_process - Process all command buffer contexts and 412 * switch on and off irqs as appropriate. 413 * 414 * @man: The command buffer manager. 415 * 416 * Calls vmw_cmdbuf_ctx_process() on all contexts. If any context has 417 * command buffers left that are not submitted to hardware, Make sure 418 * IRQ handling is turned on. Otherwise, make sure it's turned off. 419 */ 420 static void vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man) 421 { 422 int notempty; 423 struct vmw_cmdbuf_context *ctx; 424 int i; 425 426 retry: 427 notempty = 0; 428 for_each_cmdbuf_ctx(man, i, ctx) 429 vmw_cmdbuf_ctx_process(man, ctx, ¬empty); 430 431 if (man->irq_on && !notempty) { 432 vmw_generic_waiter_remove(man->dev_priv, 433 SVGA_IRQFLAG_COMMAND_BUFFER, 434 &man->dev_priv->cmdbuf_waiters); 435 man->irq_on = false; 436 } else if (!man->irq_on && notempty) { 437 vmw_generic_waiter_add(man->dev_priv, 438 SVGA_IRQFLAG_COMMAND_BUFFER, 439 &man->dev_priv->cmdbuf_waiters); 440 man->irq_on = true; 441 442 /* Rerun in case we just missed an irq. */ 443 goto retry; 444 } 445 } 446 447 /** 448 * vmw_cmdbuf_ctx_add - Schedule a command buffer for submission on a 449 * command buffer context 450 * 451 * @man: The command buffer manager. 452 * @header: The header of the buffer to submit. 453 * @cb_context: The command buffer context to use. 454 * 455 * This function adds @header to the "submitted" queue of the command 456 * buffer context identified by @cb_context. It then calls the command buffer 457 * manager processing to potentially submit the buffer to hardware. 458 * @man->lock needs to be held when calling this function. 459 */ 460 static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man, 461 struct vmw_cmdbuf_header *header, 462 SVGACBContext cb_context) 463 { 464 if (!(header->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT)) 465 header->cb_header->dxContext = 0; 466 header->cb_context = cb_context; 467 list_add_tail(&header->list, &man->ctx[cb_context].submitted); 468 469 vmw_cmdbuf_man_process(man); 470 } 471 472 /** 473 * vmw_cmdbuf_man_tasklet - The main part of the command buffer interrupt 474 * handler implemented as a tasklet. 475 * 476 * @data: Tasklet closure. A pointer to the command buffer manager cast to 477 * an unsigned long. 478 * 479 * The bottom half (tasklet) of the interrupt handler simply calls into the 480 * command buffer processor to free finished buffers and submit any 481 * queued buffers to hardware. 482 */ 483 static void vmw_cmdbuf_man_tasklet(unsigned long data) 484 { 485 struct vmw_cmdbuf_man *man = (struct vmw_cmdbuf_man *) data; 486 487 spin_lock(&man->lock); 488 vmw_cmdbuf_man_process(man); 489 spin_unlock(&man->lock); 490 } 491 492 /** 493 * vmw_cmdbuf_work_func - The deferred work function that handles 494 * command buffer errors. 495 * 496 * @work: The work func closure argument. 497 * 498 * Restarting the command buffer context after an error requires process 499 * context, so it is deferred to this work function. 500 */ 501 static void vmw_cmdbuf_work_func(struct work_struct *work) 502 { 503 struct vmw_cmdbuf_man *man = 504 container_of(work, struct vmw_cmdbuf_man, work); 505 struct vmw_cmdbuf_header *entry, *next; 506 uint32_t dummy; 507 bool restart = false; 508 509 spin_lock_bh(&man->lock); 510 list_for_each_entry_safe(entry, next, &man->error, list) { 511 restart = true; 512 DRM_ERROR("Command buffer error.\n"); 513 514 list_del(&entry->list); 515 __vmw_cmdbuf_header_free(entry); 516 wake_up_all(&man->idle_queue); 517 } 518 spin_unlock_bh(&man->lock); 519 520 if (restart && vmw_cmdbuf_startstop(man, true)) 521 DRM_ERROR("Failed restarting command buffer context 0.\n"); 522 523 /* Send a new fence in case one was removed */ 524 vmw_fifo_send_fence(man->dev_priv, &dummy); 525 } 526 527 /** 528 * vmw_cmdbuf_man idle - Check whether the command buffer manager is idle. 529 * 530 * @man: The command buffer manager. 531 * @check_preempted: Check also the preempted queue for pending command buffers. 532 * 533 */ 534 static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man, 535 bool check_preempted) 536 { 537 struct vmw_cmdbuf_context *ctx; 538 bool idle = false; 539 int i; 540 541 spin_lock_bh(&man->lock); 542 vmw_cmdbuf_man_process(man); 543 for_each_cmdbuf_ctx(man, i, ctx) { 544 if (!list_empty(&ctx->submitted) || 545 !list_empty(&ctx->hw_submitted) || 546 (check_preempted && !list_empty(&ctx->preempted))) 547 goto out_unlock; 548 } 549 550 idle = list_empty(&man->error); 551 552 out_unlock: 553 spin_unlock_bh(&man->lock); 554 555 return idle; 556 } 557 558 /** 559 * __vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel 560 * command submissions 561 * 562 * @man: The command buffer manager. 563 * 564 * Flushes the current command buffer without allocating a new one. A new one 565 * is automatically allocated when needed. Call with @man->cur_mutex held. 566 */ 567 static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man) 568 { 569 struct vmw_cmdbuf_header *cur = man->cur; 570 571 WARN_ON(!mutex_is_locked(&man->cur_mutex)); 572 573 if (!cur) 574 return; 575 576 spin_lock_bh(&man->lock); 577 if (man->cur_pos == 0) { 578 __vmw_cmdbuf_header_free(cur); 579 goto out_unlock; 580 } 581 582 man->cur->cb_header->length = man->cur_pos; 583 vmw_cmdbuf_ctx_add(man, man->cur, SVGA_CB_CONTEXT_0); 584 out_unlock: 585 spin_unlock_bh(&man->lock); 586 man->cur = NULL; 587 man->cur_pos = 0; 588 } 589 590 /** 591 * vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel 592 * command submissions 593 * 594 * @man: The command buffer manager. 595 * @interruptible: Whether to sleep interruptible when sleeping. 596 * 597 * Flushes the current command buffer without allocating a new one. A new one 598 * is automatically allocated when needed. 599 */ 600 int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man, 601 bool interruptible) 602 { 603 int ret = vmw_cmdbuf_cur_lock(man, interruptible); 604 605 if (ret) 606 return ret; 607 608 __vmw_cmdbuf_cur_flush(man); 609 vmw_cmdbuf_cur_unlock(man); 610 611 return 0; 612 } 613 614 /** 615 * vmw_cmdbuf_idle - Wait for command buffer manager idle. 616 * 617 * @man: The command buffer manager. 618 * @interruptible: Sleep interruptible while waiting. 619 * @timeout: Time out after this many ticks. 620 * 621 * Wait until the command buffer manager has processed all command buffers, 622 * or until a timeout occurs. If a timeout occurs, the function will return 623 * -EBUSY. 624 */ 625 int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible, 626 unsigned long timeout) 627 { 628 int ret; 629 630 ret = vmw_cmdbuf_cur_flush(man, interruptible); 631 vmw_generic_waiter_add(man->dev_priv, 632 SVGA_IRQFLAG_COMMAND_BUFFER, 633 &man->dev_priv->cmdbuf_waiters); 634 635 if (interruptible) { 636 ret = wait_event_interruptible_timeout 637 (man->idle_queue, vmw_cmdbuf_man_idle(man, true), 638 timeout); 639 } else { 640 ret = wait_event_timeout 641 (man->idle_queue, vmw_cmdbuf_man_idle(man, true), 642 timeout); 643 } 644 vmw_generic_waiter_remove(man->dev_priv, 645 SVGA_IRQFLAG_COMMAND_BUFFER, 646 &man->dev_priv->cmdbuf_waiters); 647 if (ret == 0) { 648 if (!vmw_cmdbuf_man_idle(man, true)) 649 ret = -EBUSY; 650 else 651 ret = 0; 652 } 653 if (ret > 0) 654 ret = 0; 655 656 return ret; 657 } 658 659 /** 660 * vmw_cmdbuf_try_alloc - Try to allocate buffer space from the main pool. 661 * 662 * @man: The command buffer manager. 663 * @info: Allocation info. Will hold the size on entry and allocated mm node 664 * on successful return. 665 * 666 * Try to allocate buffer space from the main pool. Returns true if succeeded. 667 * If a fatal error was hit, the error code is returned in @info->ret. 668 */ 669 static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man, 670 struct vmw_cmdbuf_alloc_info *info) 671 { 672 int ret; 673 674 if (info->done) 675 return true; 676 677 memset(info->node, 0, sizeof(*info->node)); 678 spin_lock_bh(&man->lock); 679 ret = drm_mm_insert_node_generic(&man->mm, info->node, info->page_size, 680 0, 0, 681 DRM_MM_SEARCH_DEFAULT, 682 DRM_MM_CREATE_DEFAULT); 683 if (ret) { 684 vmw_cmdbuf_man_process(man); 685 ret = drm_mm_insert_node_generic(&man->mm, info->node, 686 info->page_size, 0, 0, 687 DRM_MM_SEARCH_DEFAULT, 688 DRM_MM_CREATE_DEFAULT); 689 } 690 691 spin_unlock_bh(&man->lock); 692 info->done = !ret; 693 694 return info->done; 695 } 696 697 /** 698 * vmw_cmdbuf_alloc_space - Allocate buffer space from the main pool. 699 * 700 * @man: The command buffer manager. 701 * @node: Pointer to pre-allocated range-manager node. 702 * @size: The size of the allocation. 703 * @interruptible: Whether to sleep interruptible while waiting for space. 704 * 705 * This function allocates buffer space from the main pool, and if there is 706 * no space available ATM, it turns on IRQ handling and sleeps waiting for it to 707 * become available. 708 */ 709 static int vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man, 710 struct drm_mm_node *node, 711 size_t size, 712 bool interruptible) 713 { 714 struct vmw_cmdbuf_alloc_info info; 715 716 info.page_size = PAGE_ALIGN(size) >> PAGE_SHIFT; 717 info.node = node; 718 info.done = false; 719 720 /* 721 * To prevent starvation of large requests, only one allocating call 722 * at a time waiting for space. 723 */ 724 if (interruptible) { 725 if (mutex_lock_interruptible(&man->space_mutex)) 726 return -ERESTARTSYS; 727 } else { 728 mutex_lock(&man->space_mutex); 729 } 730 731 /* Try to allocate space without waiting. */ 732 if (vmw_cmdbuf_try_alloc(man, &info)) 733 goto out_unlock; 734 735 vmw_generic_waiter_add(man->dev_priv, 736 SVGA_IRQFLAG_COMMAND_BUFFER, 737 &man->dev_priv->cmdbuf_waiters); 738 739 if (interruptible) { 740 int ret; 741 742 ret = wait_event_interruptible 743 (man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info)); 744 if (ret) { 745 vmw_generic_waiter_remove 746 (man->dev_priv, SVGA_IRQFLAG_COMMAND_BUFFER, 747 &man->dev_priv->cmdbuf_waiters); 748 mutex_unlock(&man->space_mutex); 749 return ret; 750 } 751 } else { 752 wait_event(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info)); 753 } 754 vmw_generic_waiter_remove(man->dev_priv, 755 SVGA_IRQFLAG_COMMAND_BUFFER, 756 &man->dev_priv->cmdbuf_waiters); 757 758 out_unlock: 759 mutex_unlock(&man->space_mutex); 760 761 return 0; 762 } 763 764 /** 765 * vmw_cmdbuf_space_pool - Set up a command buffer header with command buffer 766 * space from the main pool. 767 * 768 * @man: The command buffer manager. 769 * @header: Pointer to the header to set up. 770 * @size: The requested size of the buffer space. 771 * @interruptible: Whether to sleep interruptible while waiting for space. 772 */ 773 static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man, 774 struct vmw_cmdbuf_header *header, 775 size_t size, 776 bool interruptible) 777 { 778 SVGACBHeader *cb_hdr; 779 size_t offset; 780 int ret; 781 782 if (!man->has_pool) 783 return -ENOMEM; 784 785 ret = vmw_cmdbuf_alloc_space(man, &header->node, size, interruptible); 786 787 if (ret) 788 return ret; 789 790 header->cb_header = dma_pool_alloc(man->headers, GFP_KERNEL, 791 &header->handle); 792 if (!header->cb_header) { 793 ret = -ENOMEM; 794 goto out_no_cb_header; 795 } 796 797 header->size = header->node.size << PAGE_SHIFT; 798 cb_hdr = header->cb_header; 799 offset = header->node.start << PAGE_SHIFT; 800 header->cmd = man->map + offset; 801 memset(cb_hdr, 0, sizeof(*cb_hdr)); 802 if (man->using_mob) { 803 cb_hdr->flags = SVGA_CB_FLAG_MOB; 804 cb_hdr->ptr.mob.mobid = man->cmd_space->mem.start; 805 cb_hdr->ptr.mob.mobOffset = offset; 806 } else { 807 cb_hdr->ptr.pa = (u64)man->handle + (u64)offset; 808 } 809 810 return 0; 811 812 out_no_cb_header: 813 spin_lock_bh(&man->lock); 814 drm_mm_remove_node(&header->node); 815 spin_unlock_bh(&man->lock); 816 817 return ret; 818 } 819 820 /** 821 * vmw_cmdbuf_space_inline - Set up a command buffer header with 822 * inline command buffer space. 823 * 824 * @man: The command buffer manager. 825 * @header: Pointer to the header to set up. 826 * @size: The requested size of the buffer space. 827 */ 828 static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man, 829 struct vmw_cmdbuf_header *header, 830 int size) 831 { 832 struct vmw_cmdbuf_dheader *dheader; 833 SVGACBHeader *cb_hdr; 834 835 if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE)) 836 return -ENOMEM; 837 838 dheader = dma_pool_alloc(man->dheaders, GFP_KERNEL, 839 &header->handle); 840 if (!dheader) 841 return -ENOMEM; 842 843 header->inline_space = true; 844 header->size = VMW_CMDBUF_INLINE_SIZE; 845 cb_hdr = &dheader->cb_header; 846 header->cb_header = cb_hdr; 847 header->cmd = dheader->cmd; 848 memset(dheader, 0, sizeof(*dheader)); 849 cb_hdr->status = SVGA_CB_STATUS_NONE; 850 cb_hdr->flags = SVGA_CB_FLAG_NONE; 851 cb_hdr->ptr.pa = (u64)header->handle + 852 (u64)offsetof(struct vmw_cmdbuf_dheader, cmd); 853 854 return 0; 855 } 856 857 /** 858 * vmw_cmdbuf_alloc - Allocate a command buffer header complete with 859 * command buffer space. 860 * 861 * @man: The command buffer manager. 862 * @size: The requested size of the buffer space. 863 * @interruptible: Whether to sleep interruptible while waiting for space. 864 * @p_header: points to a header pointer to populate on successful return. 865 * 866 * Returns a pointer to command buffer space if successful. Otherwise 867 * returns an error pointer. The header pointer returned in @p_header should 868 * be used for upcoming calls to vmw_cmdbuf_reserve() and vmw_cmdbuf_commit(). 869 */ 870 void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man, 871 size_t size, bool interruptible, 872 struct vmw_cmdbuf_header **p_header) 873 { 874 struct vmw_cmdbuf_header *header; 875 int ret = 0; 876 877 *p_header = NULL; 878 879 header = kzalloc(sizeof(*header), GFP_KERNEL); 880 if (!header) 881 return ERR_PTR(-ENOMEM); 882 883 if (size <= VMW_CMDBUF_INLINE_SIZE) 884 ret = vmw_cmdbuf_space_inline(man, header, size); 885 else 886 ret = vmw_cmdbuf_space_pool(man, header, size, interruptible); 887 888 if (ret) { 889 kfree(header); 890 return ERR_PTR(ret); 891 } 892 893 header->man = man; 894 INIT_LIST_HEAD(&header->list); 895 header->cb_header->status = SVGA_CB_STATUS_NONE; 896 *p_header = header; 897 898 return header->cmd; 899 } 900 901 /** 902 * vmw_cmdbuf_reserve_cur - Reserve space for commands in the current 903 * command buffer. 904 * 905 * @man: The command buffer manager. 906 * @size: The requested size of the commands. 907 * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID. 908 * @interruptible: Whether to sleep interruptible while waiting for space. 909 * 910 * Returns a pointer to command buffer space if successful. Otherwise 911 * returns an error pointer. 912 */ 913 static void *vmw_cmdbuf_reserve_cur(struct vmw_cmdbuf_man *man, 914 size_t size, 915 int ctx_id, 916 bool interruptible) 917 { 918 struct vmw_cmdbuf_header *cur; 919 void *ret; 920 921 if (vmw_cmdbuf_cur_lock(man, interruptible)) 922 return ERR_PTR(-ERESTARTSYS); 923 924 cur = man->cur; 925 if (cur && (size + man->cur_pos > cur->size || 926 ((cur->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT) && 927 ctx_id != cur->cb_header->dxContext))) 928 __vmw_cmdbuf_cur_flush(man); 929 930 if (!man->cur) { 931 ret = vmw_cmdbuf_alloc(man, 932 max_t(size_t, size, man->default_size), 933 interruptible, &man->cur); 934 if (IS_ERR(ret)) { 935 vmw_cmdbuf_cur_unlock(man); 936 return ret; 937 } 938 939 cur = man->cur; 940 } 941 942 if (ctx_id != SVGA3D_INVALID_ID) { 943 cur->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT; 944 cur->cb_header->dxContext = ctx_id; 945 } 946 947 cur->reserved = size; 948 949 return (void *) (man->cur->cmd + man->cur_pos); 950 } 951 952 /** 953 * vmw_cmdbuf_commit_cur - Commit commands in the current command buffer. 954 * 955 * @man: The command buffer manager. 956 * @size: The size of the commands actually written. 957 * @flush: Whether to flush the command buffer immediately. 958 */ 959 static void vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man *man, 960 size_t size, bool flush) 961 { 962 struct vmw_cmdbuf_header *cur = man->cur; 963 964 WARN_ON(!mutex_is_locked(&man->cur_mutex)); 965 966 WARN_ON(size > cur->reserved); 967 man->cur_pos += size; 968 if (!size) 969 cur->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT; 970 if (flush) 971 __vmw_cmdbuf_cur_flush(man); 972 vmw_cmdbuf_cur_unlock(man); 973 } 974 975 /** 976 * vmw_cmdbuf_reserve - Reserve space for commands in a command buffer. 977 * 978 * @man: The command buffer manager. 979 * @size: The requested size of the commands. 980 * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID. 981 * @interruptible: Whether to sleep interruptible while waiting for space. 982 * @header: Header of the command buffer. NULL if the current command buffer 983 * should be used. 984 * 985 * Returns a pointer to command buffer space if successful. Otherwise 986 * returns an error pointer. 987 */ 988 void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size, 989 int ctx_id, bool interruptible, 990 struct vmw_cmdbuf_header *header) 991 { 992 if (!header) 993 return vmw_cmdbuf_reserve_cur(man, size, ctx_id, interruptible); 994 995 if (size > header->size) 996 return ERR_PTR(-EINVAL); 997 998 if (ctx_id != SVGA3D_INVALID_ID) { 999 header->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT; 1000 header->cb_header->dxContext = ctx_id; 1001 } 1002 1003 header->reserved = size; 1004 return header->cmd; 1005 } 1006 1007 /** 1008 * vmw_cmdbuf_commit - Commit commands in a command buffer. 1009 * 1010 * @man: The command buffer manager. 1011 * @size: The size of the commands actually written. 1012 * @header: Header of the command buffer. NULL if the current command buffer 1013 * should be used. 1014 * @flush: Whether to flush the command buffer immediately. 1015 */ 1016 void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size, 1017 struct vmw_cmdbuf_header *header, bool flush) 1018 { 1019 if (!header) { 1020 vmw_cmdbuf_commit_cur(man, size, flush); 1021 return; 1022 } 1023 1024 (void) vmw_cmdbuf_cur_lock(man, false); 1025 __vmw_cmdbuf_cur_flush(man); 1026 WARN_ON(size > header->reserved); 1027 man->cur = header; 1028 man->cur_pos = size; 1029 if (!size) 1030 header->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT; 1031 if (flush) 1032 __vmw_cmdbuf_cur_flush(man); 1033 vmw_cmdbuf_cur_unlock(man); 1034 } 1035 1036 /** 1037 * vmw_cmdbuf_tasklet_schedule - Schedule the interrupt handler bottom half. 1038 * 1039 * @man: The command buffer manager. 1040 */ 1041 void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man *man) 1042 { 1043 if (!man) 1044 return; 1045 1046 tasklet_schedule(&man->tasklet); 1047 } 1048 1049 /** 1050 * vmw_cmdbuf_send_device_command - Send a command through the device context. 1051 * 1052 * @man: The command buffer manager. 1053 * @command: Pointer to the command to send. 1054 * @size: Size of the command. 1055 * 1056 * Synchronously sends a device context command. 1057 */ 1058 static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man, 1059 const void *command, 1060 size_t size) 1061 { 1062 struct vmw_cmdbuf_header *header; 1063 int status; 1064 void *cmd = vmw_cmdbuf_alloc(man, size, false, &header); 1065 1066 if (IS_ERR(cmd)) 1067 return PTR_ERR(cmd); 1068 1069 memcpy(cmd, command, size); 1070 header->cb_header->length = size; 1071 header->cb_context = SVGA_CB_CONTEXT_DEVICE; 1072 spin_lock_bh(&man->lock); 1073 status = vmw_cmdbuf_header_submit(header); 1074 spin_unlock_bh(&man->lock); 1075 vmw_cmdbuf_header_free(header); 1076 1077 if (status != SVGA_CB_STATUS_COMPLETED) { 1078 DRM_ERROR("Device context command failed with status %d\n", 1079 status); 1080 return -EINVAL; 1081 } 1082 1083 return 0; 1084 } 1085 1086 /** 1087 * vmw_cmdbuf_startstop - Send a start / stop command through the device 1088 * context. 1089 * 1090 * @man: The command buffer manager. 1091 * @enable: Whether to enable or disable the context. 1092 * 1093 * Synchronously sends a device start / stop context command. 1094 */ 1095 static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, 1096 bool enable) 1097 { 1098 struct { 1099 uint32 id; 1100 SVGADCCmdStartStop body; 1101 } __packed cmd; 1102 1103 cmd.id = SVGA_DC_CMD_START_STOP_CONTEXT; 1104 cmd.body.enable = (enable) ? 1 : 0; 1105 cmd.body.context = SVGA_CB_CONTEXT_0; 1106 1107 return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd)); 1108 } 1109 1110 /** 1111 * vmw_cmdbuf_set_pool_size - Set command buffer manager sizes 1112 * 1113 * @man: The command buffer manager. 1114 * @size: The size of the main space pool. 1115 * @default_size: The default size of the command buffer for small kernel 1116 * submissions. 1117 * 1118 * Set the size and allocate the main command buffer space pool, 1119 * as well as the default size of the command buffer for 1120 * small kernel submissions. If successful, this enables large command 1121 * submissions. Note that this function requires that rudimentary command 1122 * submission is already available and that the MOB memory manager is alive. 1123 * Returns 0 on success. Negative error code on failure. 1124 */ 1125 int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, 1126 size_t size, size_t default_size) 1127 { 1128 struct vmw_private *dev_priv = man->dev_priv; 1129 bool dummy; 1130 int ret; 1131 1132 if (man->has_pool) 1133 return -EINVAL; 1134 1135 /* First, try to allocate a huge chunk of DMA memory */ 1136 size = PAGE_ALIGN(size); 1137 man->map = dma_alloc_coherent(&dev_priv->dev->pdev->dev, size, 1138 &man->handle, GFP_KERNEL); 1139 if (man->map) { 1140 man->using_mob = false; 1141 } else { 1142 /* 1143 * DMA memory failed. If we can have command buffers in a 1144 * MOB, try to use that instead. Note that this will 1145 * actually call into the already enabled manager, when 1146 * binding the MOB. 1147 */ 1148 if (!(dev_priv->capabilities & SVGA_CAP_DX)) 1149 return -ENOMEM; 1150 1151 ret = ttm_bo_create(&dev_priv->bdev, size, ttm_bo_type_device, 1152 &vmw_mob_ne_placement, 0, false, NULL, 1153 &man->cmd_space); 1154 if (ret) 1155 return ret; 1156 1157 man->using_mob = true; 1158 ret = ttm_bo_kmap(man->cmd_space, 0, size >> PAGE_SHIFT, 1159 &man->map_obj); 1160 if (ret) 1161 goto out_no_map; 1162 1163 man->map = ttm_kmap_obj_virtual(&man->map_obj, &dummy); 1164 } 1165 1166 man->size = size; 1167 drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT); 1168 1169 man->has_pool = true; 1170 1171 /* 1172 * For now, set the default size to VMW_CMDBUF_INLINE_SIZE to 1173 * prevent deadlocks from happening when vmw_cmdbuf_space_pool() 1174 * needs to wait for space and we block on further command 1175 * submissions to be able to free up space. 1176 */ 1177 man->default_size = VMW_CMDBUF_INLINE_SIZE; 1178 DRM_INFO("Using command buffers with %s pool.\n", 1179 (man->using_mob) ? "MOB" : "DMA"); 1180 1181 return 0; 1182 1183 out_no_map: 1184 if (man->using_mob) 1185 ttm_bo_unref(&man->cmd_space); 1186 1187 return ret; 1188 } 1189 1190 /** 1191 * vmw_cmdbuf_man_create: Create a command buffer manager and enable it for 1192 * inline command buffer submissions only. 1193 * 1194 * @dev_priv: Pointer to device private structure. 1195 * 1196 * Returns a pointer to a cummand buffer manager to success or error pointer 1197 * on failure. The command buffer manager will be enabled for submissions of 1198 * size VMW_CMDBUF_INLINE_SIZE only. 1199 */ 1200 struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv) 1201 { 1202 struct vmw_cmdbuf_man *man; 1203 struct vmw_cmdbuf_context *ctx; 1204 int i; 1205 int ret; 1206 1207 if (!(dev_priv->capabilities & SVGA_CAP_COMMAND_BUFFERS)) 1208 return ERR_PTR(-ENOSYS); 1209 1210 man = kzalloc(sizeof(*man), GFP_KERNEL); 1211 if (!man) 1212 return ERR_PTR(-ENOMEM); 1213 1214 man->headers = dma_pool_create("vmwgfx cmdbuf", 1215 &dev_priv->dev->pdev->dev, 1216 sizeof(SVGACBHeader), 1217 64, PAGE_SIZE); 1218 if (!man->headers) { 1219 ret = -ENOMEM; 1220 goto out_no_pool; 1221 } 1222 1223 man->dheaders = dma_pool_create("vmwgfx inline cmdbuf", 1224 &dev_priv->dev->pdev->dev, 1225 sizeof(struct vmw_cmdbuf_dheader), 1226 64, PAGE_SIZE); 1227 if (!man->dheaders) { 1228 ret = -ENOMEM; 1229 goto out_no_dpool; 1230 } 1231 1232 for_each_cmdbuf_ctx(man, i, ctx) 1233 vmw_cmdbuf_ctx_init(ctx); 1234 1235 INIT_LIST_HEAD(&man->error); 1236 spin_lock_init(&man->lock); 1237 mutex_init(&man->cur_mutex); 1238 mutex_init(&man->space_mutex); 1239 tasklet_init(&man->tasklet, vmw_cmdbuf_man_tasklet, 1240 (unsigned long) man); 1241 man->default_size = VMW_CMDBUF_INLINE_SIZE; 1242 init_waitqueue_head(&man->alloc_queue); 1243 init_waitqueue_head(&man->idle_queue); 1244 man->dev_priv = dev_priv; 1245 man->max_hw_submitted = SVGA_CB_MAX_QUEUED_PER_CONTEXT - 1; 1246 INIT_WORK(&man->work, &vmw_cmdbuf_work_func); 1247 vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ERROR, 1248 &dev_priv->error_waiters); 1249 ret = vmw_cmdbuf_startstop(man, true); 1250 if (ret) { 1251 DRM_ERROR("Failed starting command buffer context 0.\n"); 1252 vmw_cmdbuf_man_destroy(man); 1253 return ERR_PTR(ret); 1254 } 1255 1256 return man; 1257 1258 out_no_dpool: 1259 dma_pool_destroy(man->headers); 1260 out_no_pool: 1261 kfree(man); 1262 1263 return ERR_PTR(ret); 1264 } 1265 1266 /** 1267 * vmw_cmdbuf_remove_pool - Take down the main buffer space pool. 1268 * 1269 * @man: Pointer to a command buffer manager. 1270 * 1271 * This function removes the main buffer space pool, and should be called 1272 * before MOB memory management is removed. When this function has been called, 1273 * only small command buffer submissions of size VMW_CMDBUF_INLINE_SIZE or 1274 * less are allowed, and the default size of the command buffer for small kernel 1275 * submissions is also set to this size. 1276 */ 1277 void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man) 1278 { 1279 if (!man->has_pool) 1280 return; 1281 1282 man->has_pool = false; 1283 man->default_size = VMW_CMDBUF_INLINE_SIZE; 1284 (void) vmw_cmdbuf_idle(man, false, 10*HZ); 1285 if (man->using_mob) { 1286 (void) ttm_bo_kunmap(&man->map_obj); 1287 ttm_bo_unref(&man->cmd_space); 1288 } else { 1289 dma_free_coherent(&man->dev_priv->dev->pdev->dev, 1290 man->size, man->map, man->handle); 1291 } 1292 } 1293 1294 /** 1295 * vmw_cmdbuf_man_destroy - Take down a command buffer manager. 1296 * 1297 * @man: Pointer to a command buffer manager. 1298 * 1299 * This function idles and then destroys a command buffer manager. 1300 */ 1301 void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man) 1302 { 1303 WARN_ON_ONCE(man->has_pool); 1304 (void) vmw_cmdbuf_idle(man, false, 10*HZ); 1305 if (vmw_cmdbuf_startstop(man, false)) 1306 DRM_ERROR("Failed stopping command buffer context 0.\n"); 1307 1308 vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR, 1309 &man->dev_priv->error_waiters); 1310 tasklet_kill(&man->tasklet); 1311 (void) cancel_work_sync(&man->work); 1312 dma_pool_destroy(man->dheaders); 1313 dma_pool_destroy(man->headers); 1314 mutex_destroy(&man->cur_mutex); 1315 mutex_destroy(&man->space_mutex); 1316 kfree(man); 1317 } 1318