1 /************************************************************************** 2 * 3 * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 #include "vmwgfx_drv.h" 29 #include "ttm/ttm_bo_api.h" 30 31 /* 32 * Size of inline command buffers. Try to make sure that a page size is a 33 * multiple of the DMA pool allocation size. 34 */ 35 #define VMW_CMDBUF_INLINE_ALIGN 64 36 #define VMW_CMDBUF_INLINE_SIZE \ 37 (1024 - ALIGN(sizeof(SVGACBHeader), VMW_CMDBUF_INLINE_ALIGN)) 38 39 /** 40 * struct vmw_cmdbuf_context - Command buffer context queues 41 * 42 * @submitted: List of command buffers that have been submitted to the 43 * manager but not yet submitted to hardware. 44 * @hw_submitted: List of command buffers submitted to hardware. 45 * @preempted: List of preempted command buffers. 46 * @num_hw_submitted: Number of buffers currently being processed by hardware 47 */ 48 struct vmw_cmdbuf_context { 49 struct list_head submitted; 50 struct list_head hw_submitted; 51 struct list_head preempted; 52 unsigned num_hw_submitted; 53 }; 54 55 /** 56 * struct vmw_cmdbuf_man: - Command buffer manager 57 * 58 * @cur_mutex: Mutex protecting the command buffer used for incremental small 59 * kernel command submissions, @cur. 60 * @space_mutex: Mutex to protect against starvation when we allocate 61 * main pool buffer space. 62 * @work: A struct work_struct implementeing command buffer error handling. 63 * Immutable. 64 * @dev_priv: Pointer to the device private struct. Immutable. 65 * @ctx: Array of command buffer context queues. The queues and the context 66 * data is protected by @lock. 67 * @error: List of command buffers that have caused device errors. 68 * Protected by @lock. 69 * @mm: Range manager for the command buffer space. Manager allocations and 70 * frees are protected by @lock. 71 * @cmd_space: Buffer object for the command buffer space, unless we were 72 * able to make a contigous coherent DMA memory allocation, @handle. Immutable. 73 * @map_obj: Mapping state for @cmd_space. Immutable. 74 * @map: Pointer to command buffer space. May be a mapped buffer object or 75 * a contigous coherent DMA memory allocation. Immutable. 76 * @cur: Command buffer for small kernel command submissions. Protected by 77 * the @cur_mutex. 78 * @cur_pos: Space already used in @cur. Protected by @cur_mutex. 79 * @default_size: Default size for the @cur command buffer. Immutable. 80 * @max_hw_submitted: Max number of in-flight command buffers the device can 81 * handle. Immutable. 82 * @lock: Spinlock protecting command submission queues. 83 * @header: Pool of DMA memory for device command buffer headers. 84 * Internal protection. 85 * @dheaders: Pool of DMA memory for device command buffer headers with trailing 86 * space for inline data. Internal protection. 87 * @tasklet: Tasklet struct for irq processing. Immutable. 88 * @alloc_queue: Wait queue for processes waiting to allocate command buffer 89 * space. 90 * @idle_queue: Wait queue for processes waiting for command buffer idle. 91 * @irq_on: Whether the process function has requested irq to be turned on. 92 * Protected by @lock. 93 * @using_mob: Whether the command buffer space is a MOB or a contigous DMA 94 * allocation. Immutable. 95 * @has_pool: Has a large pool of DMA memory which allows larger allocations. 96 * Typically this is false only during bootstrap. 97 * @handle: DMA address handle for the command buffer space if @using_mob is 98 * false. Immutable. 99 * @size: The size of the command buffer space. Immutable. 100 */ 101 struct vmw_cmdbuf_man { 102 struct mutex cur_mutex; 103 struct mutex space_mutex; 104 struct work_struct work; 105 struct vmw_private *dev_priv; 106 struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX]; 107 struct list_head error; 108 struct drm_mm mm; 109 struct ttm_buffer_object *cmd_space; 110 struct ttm_bo_kmap_obj map_obj; 111 u8 *map; 112 struct vmw_cmdbuf_header *cur; 113 size_t cur_pos; 114 size_t default_size; 115 unsigned max_hw_submitted; 116 spinlock_t lock; 117 struct dma_pool *headers; 118 struct dma_pool *dheaders; 119 struct tasklet_struct tasklet; 120 wait_queue_head_t alloc_queue; 121 wait_queue_head_t idle_queue; 122 bool irq_on; 123 bool using_mob; 124 bool has_pool; 125 dma_addr_t handle; 126 size_t size; 127 }; 128 129 /** 130 * struct vmw_cmdbuf_header - Command buffer metadata 131 * 132 * @man: The command buffer manager. 133 * @cb_header: Device command buffer header, allocated from a DMA pool. 134 * @cb_context: The device command buffer context. 135 * @list: List head for attaching to the manager lists. 136 * @node: The range manager node. 137 * @handle. The DMA address of @cb_header. Handed to the device on command 138 * buffer submission. 139 * @cmd: Pointer to the command buffer space of this buffer. 140 * @size: Size of the command buffer space of this buffer. 141 * @reserved: Reserved space of this buffer. 142 * @inline_space: Whether inline command buffer space is used. 143 */ 144 struct vmw_cmdbuf_header { 145 struct vmw_cmdbuf_man *man; 146 SVGACBHeader *cb_header; 147 SVGACBContext cb_context; 148 struct list_head list; 149 struct drm_mm_node node; 150 dma_addr_t handle; 151 u8 *cmd; 152 size_t size; 153 size_t reserved; 154 bool inline_space; 155 }; 156 157 /** 158 * struct vmw_cmdbuf_dheader - Device command buffer header with inline 159 * command buffer space. 160 * 161 * @cb_header: Device command buffer header. 162 * @cmd: Inline command buffer space. 163 */ 164 struct vmw_cmdbuf_dheader { 165 SVGACBHeader cb_header; 166 u8 cmd[VMW_CMDBUF_INLINE_SIZE] __aligned(VMW_CMDBUF_INLINE_ALIGN); 167 }; 168 169 /** 170 * struct vmw_cmdbuf_alloc_info - Command buffer space allocation metadata 171 * 172 * @page_size: Size of requested command buffer space in pages. 173 * @node: Pointer to the range manager node. 174 * @done: True if this allocation has succeeded. 175 */ 176 struct vmw_cmdbuf_alloc_info { 177 size_t page_size; 178 struct drm_mm_node *node; 179 bool done; 180 }; 181 182 /* Loop over each context in the command buffer manager. */ 183 #define for_each_cmdbuf_ctx(_man, _i, _ctx) \ 184 for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < SVGA_CB_CONTEXT_MAX; \ 185 ++(_i), ++(_ctx)) 186 187 static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, bool enable); 188 189 190 /** 191 * vmw_cmdbuf_cur_lock - Helper to lock the cur_mutex. 192 * 193 * @man: The range manager. 194 * @interruptible: Whether to wait interruptible when locking. 195 */ 196 static int vmw_cmdbuf_cur_lock(struct vmw_cmdbuf_man *man, bool interruptible) 197 { 198 if (interruptible) { 199 if (mutex_lock_interruptible(&man->cur_mutex)) 200 return -ERESTARTSYS; 201 } else { 202 mutex_lock(&man->cur_mutex); 203 } 204 205 return 0; 206 } 207 208 /** 209 * vmw_cmdbuf_cur_unlock - Helper to unlock the cur_mutex. 210 * 211 * @man: The range manager. 212 */ 213 static void vmw_cmdbuf_cur_unlock(struct vmw_cmdbuf_man *man) 214 { 215 mutex_unlock(&man->cur_mutex); 216 } 217 218 /** 219 * vmw_cmdbuf_header_inline_free - Free a struct vmw_cmdbuf_header that has 220 * been used for the device context with inline command buffers. 221 * Need not be called locked. 222 * 223 * @header: Pointer to the header to free. 224 */ 225 static void vmw_cmdbuf_header_inline_free(struct vmw_cmdbuf_header *header) 226 { 227 struct vmw_cmdbuf_dheader *dheader; 228 229 if (WARN_ON_ONCE(!header->inline_space)) 230 return; 231 232 dheader = container_of(header->cb_header, struct vmw_cmdbuf_dheader, 233 cb_header); 234 dma_pool_free(header->man->dheaders, dheader, header->handle); 235 kfree(header); 236 } 237 238 /** 239 * __vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header and its 240 * associated structures. 241 * 242 * header: Pointer to the header to free. 243 * 244 * For internal use. Must be called with man::lock held. 245 */ 246 static void __vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header) 247 { 248 struct vmw_cmdbuf_man *man = header->man; 249 250 BUG_ON(!spin_is_locked(&man->lock)); 251 252 if (header->inline_space) { 253 vmw_cmdbuf_header_inline_free(header); 254 return; 255 } 256 257 drm_mm_remove_node(&header->node); 258 wake_up_all(&man->alloc_queue); 259 if (header->cb_header) 260 dma_pool_free(man->headers, header->cb_header, 261 header->handle); 262 kfree(header); 263 } 264 265 /** 266 * vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header and its 267 * associated structures. 268 * 269 * @header: Pointer to the header to free. 270 */ 271 void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header) 272 { 273 struct vmw_cmdbuf_man *man = header->man; 274 275 /* Avoid locking if inline_space */ 276 if (header->inline_space) { 277 vmw_cmdbuf_header_inline_free(header); 278 return; 279 } 280 spin_lock_bh(&man->lock); 281 __vmw_cmdbuf_header_free(header); 282 spin_unlock_bh(&man->lock); 283 } 284 285 286 /** 287 * vmw_cmbuf_header_submit: Submit a command buffer to hardware. 288 * 289 * @header: The header of the buffer to submit. 290 */ 291 static int vmw_cmdbuf_header_submit(struct vmw_cmdbuf_header *header) 292 { 293 struct vmw_cmdbuf_man *man = header->man; 294 u32 val; 295 296 if (sizeof(header->handle) > 4) 297 val = (header->handle >> 32); 298 else 299 val = 0; 300 vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val); 301 302 val = (header->handle & 0xFFFFFFFFULL); 303 val |= header->cb_context & SVGA_CB_CONTEXT_MASK; 304 vmw_write(man->dev_priv, SVGA_REG_COMMAND_LOW, val); 305 306 return header->cb_header->status; 307 } 308 309 /** 310 * vmw_cmdbuf_ctx_init: Initialize a command buffer context. 311 * 312 * @ctx: The command buffer context to initialize 313 */ 314 static void vmw_cmdbuf_ctx_init(struct vmw_cmdbuf_context *ctx) 315 { 316 INIT_LIST_HEAD(&ctx->hw_submitted); 317 INIT_LIST_HEAD(&ctx->submitted); 318 INIT_LIST_HEAD(&ctx->preempted); 319 ctx->num_hw_submitted = 0; 320 } 321 322 /** 323 * vmw_cmdbuf_ctx_submit: Submit command buffers from a command buffer 324 * context. 325 * 326 * @man: The command buffer manager. 327 * @ctx: The command buffer context. 328 * 329 * Submits command buffers to hardware until there are no more command 330 * buffers to submit or the hardware can't handle more command buffers. 331 */ 332 static void vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man *man, 333 struct vmw_cmdbuf_context *ctx) 334 { 335 while (ctx->num_hw_submitted < man->max_hw_submitted && 336 !list_empty(&ctx->submitted)) { 337 struct vmw_cmdbuf_header *entry; 338 SVGACBStatus status; 339 340 entry = list_first_entry(&ctx->submitted, 341 struct vmw_cmdbuf_header, 342 list); 343 344 status = vmw_cmdbuf_header_submit(entry); 345 346 /* This should never happen */ 347 if (WARN_ON_ONCE(status == SVGA_CB_STATUS_QUEUE_FULL)) { 348 entry->cb_header->status = SVGA_CB_STATUS_NONE; 349 break; 350 } 351 352 list_del(&entry->list); 353 list_add_tail(&entry->list, &ctx->hw_submitted); 354 ctx->num_hw_submitted++; 355 } 356 357 } 358 359 /** 360 * vmw_cmdbuf_ctx_submit: Process a command buffer context. 361 * 362 * @man: The command buffer manager. 363 * @ctx: The command buffer context. 364 * 365 * Submit command buffers to hardware if possible, and process finished 366 * buffers. Typically freeing them, but on preemption or error take 367 * appropriate action. Wake up waiters if appropriate. 368 */ 369 static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man, 370 struct vmw_cmdbuf_context *ctx, 371 int *notempty) 372 { 373 struct vmw_cmdbuf_header *entry, *next; 374 375 vmw_cmdbuf_ctx_submit(man, ctx); 376 377 list_for_each_entry_safe(entry, next, &ctx->hw_submitted, list) { 378 SVGACBStatus status = entry->cb_header->status; 379 380 if (status == SVGA_CB_STATUS_NONE) 381 break; 382 383 list_del(&entry->list); 384 wake_up_all(&man->idle_queue); 385 ctx->num_hw_submitted--; 386 switch (status) { 387 case SVGA_CB_STATUS_COMPLETED: 388 __vmw_cmdbuf_header_free(entry); 389 break; 390 case SVGA_CB_STATUS_COMMAND_ERROR: 391 case SVGA_CB_STATUS_CB_HEADER_ERROR: 392 list_add_tail(&entry->list, &man->error); 393 schedule_work(&man->work); 394 break; 395 case SVGA_CB_STATUS_PREEMPTED: 396 list_add(&entry->list, &ctx->preempted); 397 break; 398 default: 399 WARN_ONCE(true, "Undefined command buffer status.\n"); 400 __vmw_cmdbuf_header_free(entry); 401 break; 402 } 403 } 404 405 vmw_cmdbuf_ctx_submit(man, ctx); 406 if (!list_empty(&ctx->submitted)) 407 (*notempty)++; 408 } 409 410 /** 411 * vmw_cmdbuf_man_process - Process all command buffer contexts and 412 * switch on and off irqs as appropriate. 413 * 414 * @man: The command buffer manager. 415 * 416 * Calls vmw_cmdbuf_ctx_process() on all contexts. If any context has 417 * command buffers left that are not submitted to hardware, Make sure 418 * IRQ handling is turned on. Otherwise, make sure it's turned off. This 419 * function may return -EAGAIN to indicate it should be rerun due to 420 * possibly missed IRQs if IRQs has just been turned on. 421 */ 422 static int vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man) 423 { 424 int notempty = 0; 425 struct vmw_cmdbuf_context *ctx; 426 int i; 427 428 for_each_cmdbuf_ctx(man, i, ctx) 429 vmw_cmdbuf_ctx_process(man, ctx, ¬empty); 430 431 if (man->irq_on && !notempty) { 432 vmw_generic_waiter_remove(man->dev_priv, 433 SVGA_IRQFLAG_COMMAND_BUFFER, 434 &man->dev_priv->cmdbuf_waiters); 435 man->irq_on = false; 436 } else if (!man->irq_on && notempty) { 437 vmw_generic_waiter_add(man->dev_priv, 438 SVGA_IRQFLAG_COMMAND_BUFFER, 439 &man->dev_priv->cmdbuf_waiters); 440 man->irq_on = true; 441 442 /* Rerun in case we just missed an irq. */ 443 return -EAGAIN; 444 } 445 446 return 0; 447 } 448 449 /** 450 * vmw_cmdbuf_ctx_add - Schedule a command buffer for submission on a 451 * command buffer context 452 * 453 * @man: The command buffer manager. 454 * @header: The header of the buffer to submit. 455 * @cb_context: The command buffer context to use. 456 * 457 * This function adds @header to the "submitted" queue of the command 458 * buffer context identified by @cb_context. It then calls the command buffer 459 * manager processing to potentially submit the buffer to hardware. 460 * @man->lock needs to be held when calling this function. 461 */ 462 static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man, 463 struct vmw_cmdbuf_header *header, 464 SVGACBContext cb_context) 465 { 466 if (!(header->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT)) 467 header->cb_header->dxContext = 0; 468 header->cb_context = cb_context; 469 list_add_tail(&header->list, &man->ctx[cb_context].submitted); 470 471 if (vmw_cmdbuf_man_process(man) == -EAGAIN) 472 vmw_cmdbuf_man_process(man); 473 } 474 475 /** 476 * vmw_cmdbuf_man_tasklet - The main part of the command buffer interrupt 477 * handler implemented as a tasklet. 478 * 479 * @data: Tasklet closure. A pointer to the command buffer manager cast to 480 * an unsigned long. 481 * 482 * The bottom half (tasklet) of the interrupt handler simply calls into the 483 * command buffer processor to free finished buffers and submit any 484 * queued buffers to hardware. 485 */ 486 static void vmw_cmdbuf_man_tasklet(unsigned long data) 487 { 488 struct vmw_cmdbuf_man *man = (struct vmw_cmdbuf_man *) data; 489 490 spin_lock(&man->lock); 491 if (vmw_cmdbuf_man_process(man) == -EAGAIN) 492 (void) vmw_cmdbuf_man_process(man); 493 spin_unlock(&man->lock); 494 } 495 496 /** 497 * vmw_cmdbuf_work_func - The deferred work function that handles 498 * command buffer errors. 499 * 500 * @work: The work func closure argument. 501 * 502 * Restarting the command buffer context after an error requires process 503 * context, so it is deferred to this work function. 504 */ 505 static void vmw_cmdbuf_work_func(struct work_struct *work) 506 { 507 struct vmw_cmdbuf_man *man = 508 container_of(work, struct vmw_cmdbuf_man, work); 509 struct vmw_cmdbuf_header *entry, *next; 510 bool restart = false; 511 512 spin_lock_bh(&man->lock); 513 list_for_each_entry_safe(entry, next, &man->error, list) { 514 restart = true; 515 DRM_ERROR("Command buffer error.\n"); 516 517 list_del(&entry->list); 518 __vmw_cmdbuf_header_free(entry); 519 wake_up_all(&man->idle_queue); 520 } 521 spin_unlock_bh(&man->lock); 522 523 if (restart && vmw_cmdbuf_startstop(man, true)) 524 DRM_ERROR("Failed restarting command buffer context 0.\n"); 525 526 } 527 528 /** 529 * vmw_cmdbuf_man idle - Check whether the command buffer manager is idle. 530 * 531 * @man: The command buffer manager. 532 * @check_preempted: Check also the preempted queue for pending command buffers. 533 * 534 */ 535 static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man, 536 bool check_preempted) 537 { 538 struct vmw_cmdbuf_context *ctx; 539 bool idle = false; 540 int i; 541 542 spin_lock_bh(&man->lock); 543 vmw_cmdbuf_man_process(man); 544 for_each_cmdbuf_ctx(man, i, ctx) { 545 if (!list_empty(&ctx->submitted) || 546 !list_empty(&ctx->hw_submitted) || 547 (check_preempted && !list_empty(&ctx->preempted))) 548 goto out_unlock; 549 } 550 551 idle = list_empty(&man->error); 552 553 out_unlock: 554 spin_unlock_bh(&man->lock); 555 556 return idle; 557 } 558 559 /** 560 * __vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel 561 * command submissions 562 * 563 * @man: The command buffer manager. 564 * 565 * Flushes the current command buffer without allocating a new one. A new one 566 * is automatically allocated when needed. Call with @man->cur_mutex held. 567 */ 568 static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man) 569 { 570 struct vmw_cmdbuf_header *cur = man->cur; 571 572 WARN_ON(!mutex_is_locked(&man->cur_mutex)); 573 574 if (!cur) 575 return; 576 577 spin_lock_bh(&man->lock); 578 if (man->cur_pos == 0) { 579 __vmw_cmdbuf_header_free(cur); 580 goto out_unlock; 581 } 582 583 man->cur->cb_header->length = man->cur_pos; 584 vmw_cmdbuf_ctx_add(man, man->cur, SVGA_CB_CONTEXT_0); 585 out_unlock: 586 spin_unlock_bh(&man->lock); 587 man->cur = NULL; 588 man->cur_pos = 0; 589 } 590 591 /** 592 * vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel 593 * command submissions 594 * 595 * @man: The command buffer manager. 596 * @interruptible: Whether to sleep interruptible when sleeping. 597 * 598 * Flushes the current command buffer without allocating a new one. A new one 599 * is automatically allocated when needed. 600 */ 601 int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man, 602 bool interruptible) 603 { 604 int ret = vmw_cmdbuf_cur_lock(man, interruptible); 605 606 if (ret) 607 return ret; 608 609 __vmw_cmdbuf_cur_flush(man); 610 vmw_cmdbuf_cur_unlock(man); 611 612 return 0; 613 } 614 615 /** 616 * vmw_cmdbuf_idle - Wait for command buffer manager idle. 617 * 618 * @man: The command buffer manager. 619 * @interruptible: Sleep interruptible while waiting. 620 * @timeout: Time out after this many ticks. 621 * 622 * Wait until the command buffer manager has processed all command buffers, 623 * or until a timeout occurs. If a timeout occurs, the function will return 624 * -EBUSY. 625 */ 626 int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible, 627 unsigned long timeout) 628 { 629 int ret; 630 631 ret = vmw_cmdbuf_cur_flush(man, interruptible); 632 vmw_generic_waiter_add(man->dev_priv, 633 SVGA_IRQFLAG_COMMAND_BUFFER, 634 &man->dev_priv->cmdbuf_waiters); 635 636 if (interruptible) { 637 ret = wait_event_interruptible_timeout 638 (man->idle_queue, vmw_cmdbuf_man_idle(man, true), 639 timeout); 640 } else { 641 ret = wait_event_timeout 642 (man->idle_queue, vmw_cmdbuf_man_idle(man, true), 643 timeout); 644 } 645 vmw_generic_waiter_remove(man->dev_priv, 646 SVGA_IRQFLAG_COMMAND_BUFFER, 647 &man->dev_priv->cmdbuf_waiters); 648 if (ret == 0) { 649 if (!vmw_cmdbuf_man_idle(man, true)) 650 ret = -EBUSY; 651 else 652 ret = 0; 653 } 654 if (ret > 0) 655 ret = 0; 656 657 return ret; 658 } 659 660 /** 661 * vmw_cmdbuf_try_alloc - Try to allocate buffer space from the main pool. 662 * 663 * @man: The command buffer manager. 664 * @info: Allocation info. Will hold the size on entry and allocated mm node 665 * on successful return. 666 * 667 * Try to allocate buffer space from the main pool. Returns true if succeeded. 668 * If a fatal error was hit, the error code is returned in @info->ret. 669 */ 670 static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man, 671 struct vmw_cmdbuf_alloc_info *info) 672 { 673 int ret; 674 675 if (info->done) 676 return true; 677 678 memset(info->node, 0, sizeof(*info->node)); 679 spin_lock_bh(&man->lock); 680 ret = drm_mm_insert_node_generic(&man->mm, info->node, info->page_size, 681 0, 0, 682 DRM_MM_SEARCH_DEFAULT, 683 DRM_MM_CREATE_DEFAULT); 684 if (ret) { 685 (void) vmw_cmdbuf_man_process(man); 686 ret = drm_mm_insert_node_generic(&man->mm, info->node, 687 info->page_size, 0, 0, 688 DRM_MM_SEARCH_DEFAULT, 689 DRM_MM_CREATE_DEFAULT); 690 } 691 692 spin_unlock_bh(&man->lock); 693 info->done = !ret; 694 695 return info->done; 696 } 697 698 /** 699 * vmw_cmdbuf_alloc_space - Allocate buffer space from the main pool. 700 * 701 * @man: The command buffer manager. 702 * @node: Pointer to pre-allocated range-manager node. 703 * @size: The size of the allocation. 704 * @interruptible: Whether to sleep interruptible while waiting for space. 705 * 706 * This function allocates buffer space from the main pool, and if there is 707 * no space available ATM, it turns on IRQ handling and sleeps waiting for it to 708 * become available. 709 */ 710 static int vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man, 711 struct drm_mm_node *node, 712 size_t size, 713 bool interruptible) 714 { 715 struct vmw_cmdbuf_alloc_info info; 716 717 info.page_size = PAGE_ALIGN(size) >> PAGE_SHIFT; 718 info.node = node; 719 info.done = false; 720 721 /* 722 * To prevent starvation of large requests, only one allocating call 723 * at a time waiting for space. 724 */ 725 if (interruptible) { 726 if (mutex_lock_interruptible(&man->space_mutex)) 727 return -ERESTARTSYS; 728 } else { 729 mutex_lock(&man->space_mutex); 730 } 731 732 /* Try to allocate space without waiting. */ 733 if (vmw_cmdbuf_try_alloc(man, &info)) 734 goto out_unlock; 735 736 vmw_generic_waiter_add(man->dev_priv, 737 SVGA_IRQFLAG_COMMAND_BUFFER, 738 &man->dev_priv->cmdbuf_waiters); 739 740 if (interruptible) { 741 int ret; 742 743 ret = wait_event_interruptible 744 (man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info)); 745 if (ret) { 746 vmw_generic_waiter_remove 747 (man->dev_priv, SVGA_IRQFLAG_COMMAND_BUFFER, 748 &man->dev_priv->cmdbuf_waiters); 749 mutex_unlock(&man->space_mutex); 750 return ret; 751 } 752 } else { 753 wait_event(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info)); 754 } 755 vmw_generic_waiter_remove(man->dev_priv, 756 SVGA_IRQFLAG_COMMAND_BUFFER, 757 &man->dev_priv->cmdbuf_waiters); 758 759 out_unlock: 760 mutex_unlock(&man->space_mutex); 761 762 return 0; 763 } 764 765 /** 766 * vmw_cmdbuf_space_pool - Set up a command buffer header with command buffer 767 * space from the main pool. 768 * 769 * @man: The command buffer manager. 770 * @header: Pointer to the header to set up. 771 * @size: The requested size of the buffer space. 772 * @interruptible: Whether to sleep interruptible while waiting for space. 773 */ 774 static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man, 775 struct vmw_cmdbuf_header *header, 776 size_t size, 777 bool interruptible) 778 { 779 SVGACBHeader *cb_hdr; 780 size_t offset; 781 int ret; 782 783 if (!man->has_pool) 784 return -ENOMEM; 785 786 ret = vmw_cmdbuf_alloc_space(man, &header->node, size, interruptible); 787 788 if (ret) 789 return ret; 790 791 header->cb_header = dma_pool_alloc(man->headers, GFP_KERNEL, 792 &header->handle); 793 if (!header->cb_header) { 794 ret = -ENOMEM; 795 goto out_no_cb_header; 796 } 797 798 header->size = header->node.size << PAGE_SHIFT; 799 cb_hdr = header->cb_header; 800 offset = header->node.start << PAGE_SHIFT; 801 header->cmd = man->map + offset; 802 memset(cb_hdr, 0, sizeof(*cb_hdr)); 803 if (man->using_mob) { 804 cb_hdr->flags = SVGA_CB_FLAG_MOB; 805 cb_hdr->ptr.mob.mobid = man->cmd_space->mem.start; 806 cb_hdr->ptr.mob.mobOffset = offset; 807 } else { 808 cb_hdr->ptr.pa = (u64)man->handle + (u64)offset; 809 } 810 811 return 0; 812 813 out_no_cb_header: 814 spin_lock_bh(&man->lock); 815 drm_mm_remove_node(&header->node); 816 spin_unlock_bh(&man->lock); 817 818 return ret; 819 } 820 821 /** 822 * vmw_cmdbuf_space_inline - Set up a command buffer header with 823 * inline command buffer space. 824 * 825 * @man: The command buffer manager. 826 * @header: Pointer to the header to set up. 827 * @size: The requested size of the buffer space. 828 */ 829 static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man, 830 struct vmw_cmdbuf_header *header, 831 int size) 832 { 833 struct vmw_cmdbuf_dheader *dheader; 834 SVGACBHeader *cb_hdr; 835 836 if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE)) 837 return -ENOMEM; 838 839 dheader = dma_pool_alloc(man->dheaders, GFP_KERNEL, 840 &header->handle); 841 if (!dheader) 842 return -ENOMEM; 843 844 header->inline_space = true; 845 header->size = VMW_CMDBUF_INLINE_SIZE; 846 cb_hdr = &dheader->cb_header; 847 header->cb_header = cb_hdr; 848 header->cmd = dheader->cmd; 849 memset(dheader, 0, sizeof(*dheader)); 850 cb_hdr->status = SVGA_CB_STATUS_NONE; 851 cb_hdr->flags = SVGA_CB_FLAG_NONE; 852 cb_hdr->ptr.pa = (u64)header->handle + 853 (u64)offsetof(struct vmw_cmdbuf_dheader, cmd); 854 855 return 0; 856 } 857 858 /** 859 * vmw_cmdbuf_alloc - Allocate a command buffer header complete with 860 * command buffer space. 861 * 862 * @man: The command buffer manager. 863 * @size: The requested size of the buffer space. 864 * @interruptible: Whether to sleep interruptible while waiting for space. 865 * @p_header: points to a header pointer to populate on successful return. 866 * 867 * Returns a pointer to command buffer space if successful. Otherwise 868 * returns an error pointer. The header pointer returned in @p_header should 869 * be used for upcoming calls to vmw_cmdbuf_reserve() and vmw_cmdbuf_commit(). 870 */ 871 void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man, 872 size_t size, bool interruptible, 873 struct vmw_cmdbuf_header **p_header) 874 { 875 struct vmw_cmdbuf_header *header; 876 int ret = 0; 877 878 *p_header = NULL; 879 880 header = kzalloc(sizeof(*header), GFP_KERNEL); 881 if (!header) 882 return ERR_PTR(-ENOMEM); 883 884 if (size <= VMW_CMDBUF_INLINE_SIZE) 885 ret = vmw_cmdbuf_space_inline(man, header, size); 886 else 887 ret = vmw_cmdbuf_space_pool(man, header, size, interruptible); 888 889 if (ret) { 890 kfree(header); 891 return ERR_PTR(ret); 892 } 893 894 header->man = man; 895 INIT_LIST_HEAD(&header->list); 896 header->cb_header->status = SVGA_CB_STATUS_NONE; 897 *p_header = header; 898 899 return header->cmd; 900 } 901 902 /** 903 * vmw_cmdbuf_reserve_cur - Reserve space for commands in the current 904 * command buffer. 905 * 906 * @man: The command buffer manager. 907 * @size: The requested size of the commands. 908 * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID. 909 * @interruptible: Whether to sleep interruptible while waiting for space. 910 * 911 * Returns a pointer to command buffer space if successful. Otherwise 912 * returns an error pointer. 913 */ 914 static void *vmw_cmdbuf_reserve_cur(struct vmw_cmdbuf_man *man, 915 size_t size, 916 int ctx_id, 917 bool interruptible) 918 { 919 struct vmw_cmdbuf_header *cur; 920 void *ret; 921 922 if (vmw_cmdbuf_cur_lock(man, interruptible)) 923 return ERR_PTR(-ERESTARTSYS); 924 925 cur = man->cur; 926 if (cur && (size + man->cur_pos > cur->size || 927 ((cur->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT) && 928 ctx_id != cur->cb_header->dxContext))) 929 __vmw_cmdbuf_cur_flush(man); 930 931 if (!man->cur) { 932 ret = vmw_cmdbuf_alloc(man, 933 max_t(size_t, size, man->default_size), 934 interruptible, &man->cur); 935 if (IS_ERR(ret)) { 936 vmw_cmdbuf_cur_unlock(man); 937 return ret; 938 } 939 940 cur = man->cur; 941 } 942 943 if (ctx_id != SVGA3D_INVALID_ID) { 944 cur->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT; 945 cur->cb_header->dxContext = ctx_id; 946 } 947 948 cur->reserved = size; 949 950 return (void *) (man->cur->cmd + man->cur_pos); 951 } 952 953 /** 954 * vmw_cmdbuf_commit_cur - Commit commands in the current command buffer. 955 * 956 * @man: The command buffer manager. 957 * @size: The size of the commands actually written. 958 * @flush: Whether to flush the command buffer immediately. 959 */ 960 static void vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man *man, 961 size_t size, bool flush) 962 { 963 struct vmw_cmdbuf_header *cur = man->cur; 964 965 WARN_ON(!mutex_is_locked(&man->cur_mutex)); 966 967 WARN_ON(size > cur->reserved); 968 man->cur_pos += size; 969 if (!size) 970 cur->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT; 971 if (flush) 972 __vmw_cmdbuf_cur_flush(man); 973 vmw_cmdbuf_cur_unlock(man); 974 } 975 976 /** 977 * vmw_cmdbuf_reserve - Reserve space for commands in a command buffer. 978 * 979 * @man: The command buffer manager. 980 * @size: The requested size of the commands. 981 * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID. 982 * @interruptible: Whether to sleep interruptible while waiting for space. 983 * @header: Header of the command buffer. NULL if the current command buffer 984 * should be used. 985 * 986 * Returns a pointer to command buffer space if successful. Otherwise 987 * returns an error pointer. 988 */ 989 void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size, 990 int ctx_id, bool interruptible, 991 struct vmw_cmdbuf_header *header) 992 { 993 if (!header) 994 return vmw_cmdbuf_reserve_cur(man, size, ctx_id, interruptible); 995 996 if (size > header->size) 997 return ERR_PTR(-EINVAL); 998 999 if (ctx_id != SVGA3D_INVALID_ID) { 1000 header->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT; 1001 header->cb_header->dxContext = ctx_id; 1002 } 1003 1004 header->reserved = size; 1005 return header->cmd; 1006 } 1007 1008 /** 1009 * vmw_cmdbuf_commit - Commit commands in a command buffer. 1010 * 1011 * @man: The command buffer manager. 1012 * @size: The size of the commands actually written. 1013 * @header: Header of the command buffer. NULL if the current command buffer 1014 * should be used. 1015 * @flush: Whether to flush the command buffer immediately. 1016 */ 1017 void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size, 1018 struct vmw_cmdbuf_header *header, bool flush) 1019 { 1020 if (!header) { 1021 vmw_cmdbuf_commit_cur(man, size, flush); 1022 return; 1023 } 1024 1025 (void) vmw_cmdbuf_cur_lock(man, false); 1026 __vmw_cmdbuf_cur_flush(man); 1027 WARN_ON(size > header->reserved); 1028 man->cur = header; 1029 man->cur_pos = size; 1030 if (!size) 1031 header->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT; 1032 if (flush) 1033 __vmw_cmdbuf_cur_flush(man); 1034 vmw_cmdbuf_cur_unlock(man); 1035 } 1036 1037 /** 1038 * vmw_cmdbuf_tasklet_schedule - Schedule the interrupt handler bottom half. 1039 * 1040 * @man: The command buffer manager. 1041 */ 1042 void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man *man) 1043 { 1044 if (!man) 1045 return; 1046 1047 tasklet_schedule(&man->tasklet); 1048 } 1049 1050 /** 1051 * vmw_cmdbuf_send_device_command - Send a command through the device context. 1052 * 1053 * @man: The command buffer manager. 1054 * @command: Pointer to the command to send. 1055 * @size: Size of the command. 1056 * 1057 * Synchronously sends a device context command. 1058 */ 1059 static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man, 1060 const void *command, 1061 size_t size) 1062 { 1063 struct vmw_cmdbuf_header *header; 1064 int status; 1065 void *cmd = vmw_cmdbuf_alloc(man, size, false, &header); 1066 1067 if (IS_ERR(cmd)) 1068 return PTR_ERR(cmd); 1069 1070 memcpy(cmd, command, size); 1071 header->cb_header->length = size; 1072 header->cb_context = SVGA_CB_CONTEXT_DEVICE; 1073 spin_lock_bh(&man->lock); 1074 status = vmw_cmdbuf_header_submit(header); 1075 spin_unlock_bh(&man->lock); 1076 vmw_cmdbuf_header_free(header); 1077 1078 if (status != SVGA_CB_STATUS_COMPLETED) { 1079 DRM_ERROR("Device context command failed with status %d\n", 1080 status); 1081 return -EINVAL; 1082 } 1083 1084 return 0; 1085 } 1086 1087 /** 1088 * vmw_cmdbuf_startstop - Send a start / stop command through the device 1089 * context. 1090 * 1091 * @man: The command buffer manager. 1092 * @enable: Whether to enable or disable the context. 1093 * 1094 * Synchronously sends a device start / stop context command. 1095 */ 1096 static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, 1097 bool enable) 1098 { 1099 struct { 1100 uint32 id; 1101 SVGADCCmdStartStop body; 1102 } __packed cmd; 1103 1104 cmd.id = SVGA_DC_CMD_START_STOP_CONTEXT; 1105 cmd.body.enable = (enable) ? 1 : 0; 1106 cmd.body.context = SVGA_CB_CONTEXT_0; 1107 1108 return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd)); 1109 } 1110 1111 /** 1112 * vmw_cmdbuf_set_pool_size - Set command buffer manager sizes 1113 * 1114 * @man: The command buffer manager. 1115 * @size: The size of the main space pool. 1116 * @default_size: The default size of the command buffer for small kernel 1117 * submissions. 1118 * 1119 * Set the size and allocate the main command buffer space pool, 1120 * as well as the default size of the command buffer for 1121 * small kernel submissions. If successful, this enables large command 1122 * submissions. Note that this function requires that rudimentary command 1123 * submission is already available and that the MOB memory manager is alive. 1124 * Returns 0 on success. Negative error code on failure. 1125 */ 1126 int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, 1127 size_t size, size_t default_size) 1128 { 1129 struct vmw_private *dev_priv = man->dev_priv; 1130 bool dummy; 1131 int ret; 1132 1133 if (man->has_pool) 1134 return -EINVAL; 1135 1136 /* First, try to allocate a huge chunk of DMA memory */ 1137 size = PAGE_ALIGN(size); 1138 man->map = dma_alloc_coherent(&dev_priv->dev->pdev->dev, size, 1139 &man->handle, GFP_KERNEL); 1140 if (man->map) { 1141 man->using_mob = false; 1142 } else { 1143 /* 1144 * DMA memory failed. If we can have command buffers in a 1145 * MOB, try to use that instead. Note that this will 1146 * actually call into the already enabled manager, when 1147 * binding the MOB. 1148 */ 1149 if (!(dev_priv->capabilities & SVGA_CAP_DX)) 1150 return -ENOMEM; 1151 1152 ret = ttm_bo_create(&dev_priv->bdev, size, ttm_bo_type_device, 1153 &vmw_mob_ne_placement, 0, false, NULL, 1154 &man->cmd_space); 1155 if (ret) 1156 return ret; 1157 1158 man->using_mob = true; 1159 ret = ttm_bo_kmap(man->cmd_space, 0, size >> PAGE_SHIFT, 1160 &man->map_obj); 1161 if (ret) 1162 goto out_no_map; 1163 1164 man->map = ttm_kmap_obj_virtual(&man->map_obj, &dummy); 1165 } 1166 1167 man->size = size; 1168 drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT); 1169 1170 man->has_pool = true; 1171 man->default_size = default_size; 1172 DRM_INFO("Using command buffers with %s pool.\n", 1173 (man->using_mob) ? "MOB" : "DMA"); 1174 1175 return 0; 1176 1177 out_no_map: 1178 if (man->using_mob) 1179 ttm_bo_unref(&man->cmd_space); 1180 1181 return ret; 1182 } 1183 1184 /** 1185 * vmw_cmdbuf_man_create: Create a command buffer manager and enable it for 1186 * inline command buffer submissions only. 1187 * 1188 * @dev_priv: Pointer to device private structure. 1189 * 1190 * Returns a pointer to a cummand buffer manager to success or error pointer 1191 * on failure. The command buffer manager will be enabled for submissions of 1192 * size VMW_CMDBUF_INLINE_SIZE only. 1193 */ 1194 struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv) 1195 { 1196 struct vmw_cmdbuf_man *man; 1197 struct vmw_cmdbuf_context *ctx; 1198 int i; 1199 int ret; 1200 1201 if (!(dev_priv->capabilities & SVGA_CAP_COMMAND_BUFFERS)) 1202 return ERR_PTR(-ENOSYS); 1203 1204 man = kzalloc(sizeof(*man), GFP_KERNEL); 1205 if (!man) 1206 return ERR_PTR(-ENOMEM); 1207 1208 man->headers = dma_pool_create("vmwgfx cmdbuf", 1209 &dev_priv->dev->pdev->dev, 1210 sizeof(SVGACBHeader), 1211 64, PAGE_SIZE); 1212 if (!man->headers) { 1213 ret = -ENOMEM; 1214 goto out_no_pool; 1215 } 1216 1217 man->dheaders = dma_pool_create("vmwgfx inline cmdbuf", 1218 &dev_priv->dev->pdev->dev, 1219 sizeof(struct vmw_cmdbuf_dheader), 1220 64, PAGE_SIZE); 1221 if (!man->dheaders) { 1222 ret = -ENOMEM; 1223 goto out_no_dpool; 1224 } 1225 1226 for_each_cmdbuf_ctx(man, i, ctx) 1227 vmw_cmdbuf_ctx_init(ctx); 1228 1229 INIT_LIST_HEAD(&man->error); 1230 spin_lock_init(&man->lock); 1231 mutex_init(&man->cur_mutex); 1232 mutex_init(&man->space_mutex); 1233 tasklet_init(&man->tasklet, vmw_cmdbuf_man_tasklet, 1234 (unsigned long) man); 1235 man->default_size = VMW_CMDBUF_INLINE_SIZE; 1236 init_waitqueue_head(&man->alloc_queue); 1237 init_waitqueue_head(&man->idle_queue); 1238 man->dev_priv = dev_priv; 1239 man->max_hw_submitted = SVGA_CB_MAX_QUEUED_PER_CONTEXT - 1; 1240 INIT_WORK(&man->work, &vmw_cmdbuf_work_func); 1241 vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ERROR, 1242 &dev_priv->error_waiters); 1243 ret = vmw_cmdbuf_startstop(man, true); 1244 if (ret) { 1245 DRM_ERROR("Failed starting command buffer context 0.\n"); 1246 vmw_cmdbuf_man_destroy(man); 1247 return ERR_PTR(ret); 1248 } 1249 1250 return man; 1251 1252 out_no_dpool: 1253 dma_pool_destroy(man->headers); 1254 out_no_pool: 1255 kfree(man); 1256 1257 return ERR_PTR(ret); 1258 } 1259 1260 /** 1261 * vmw_cmdbuf_remove_pool - Take down the main buffer space pool. 1262 * 1263 * @man: Pointer to a command buffer manager. 1264 * 1265 * This function removes the main buffer space pool, and should be called 1266 * before MOB memory management is removed. When this function has been called, 1267 * only small command buffer submissions of size VMW_CMDBUF_INLINE_SIZE or 1268 * less are allowed, and the default size of the command buffer for small kernel 1269 * submissions is also set to this size. 1270 */ 1271 void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man) 1272 { 1273 if (!man->has_pool) 1274 return; 1275 1276 man->has_pool = false; 1277 man->default_size = VMW_CMDBUF_INLINE_SIZE; 1278 (void) vmw_cmdbuf_idle(man, false, 10*HZ); 1279 if (man->using_mob) { 1280 (void) ttm_bo_kunmap(&man->map_obj); 1281 ttm_bo_unref(&man->cmd_space); 1282 } else { 1283 dma_free_coherent(&man->dev_priv->dev->pdev->dev, 1284 man->size, man->map, man->handle); 1285 } 1286 } 1287 1288 /** 1289 * vmw_cmdbuf_man_destroy - Take down a command buffer manager. 1290 * 1291 * @man: Pointer to a command buffer manager. 1292 * 1293 * This function idles and then destroys a command buffer manager. 1294 */ 1295 void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man) 1296 { 1297 WARN_ON_ONCE(man->has_pool); 1298 (void) vmw_cmdbuf_idle(man, false, 10*HZ); 1299 if (vmw_cmdbuf_startstop(man, false)) 1300 DRM_ERROR("Failed stopping command buffer context 0.\n"); 1301 1302 vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR, 1303 &man->dev_priv->error_waiters); 1304 tasklet_kill(&man->tasklet); 1305 (void) cancel_work_sync(&man->work); 1306 dma_pool_destroy(man->dheaders); 1307 dma_pool_destroy(man->headers); 1308 mutex_destroy(&man->cur_mutex); 1309 mutex_destroy(&man->space_mutex); 1310 kfree(man); 1311 } 1312