1 /**************************************************************************
2  *
3  * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include <drm/ttm/ttm_bo_api.h>
29 
30 #include "vmwgfx_drv.h"
31 
32 /*
33  * Size of inline command buffers. Try to make sure that a page size is a
34  * multiple of the DMA pool allocation size.
35  */
36 #define VMW_CMDBUF_INLINE_ALIGN 64
37 #define VMW_CMDBUF_INLINE_SIZE \
38 	(1024 - ALIGN(sizeof(SVGACBHeader), VMW_CMDBUF_INLINE_ALIGN))
39 
40 /**
41  * struct vmw_cmdbuf_context - Command buffer context queues
42  *
43  * @submitted: List of command buffers that have been submitted to the
44  * manager but not yet submitted to hardware.
45  * @hw_submitted: List of command buffers submitted to hardware.
46  * @preempted: List of preempted command buffers.
47  * @num_hw_submitted: Number of buffers currently being processed by hardware
48  */
49 struct vmw_cmdbuf_context {
50 	struct list_head submitted;
51 	struct list_head hw_submitted;
52 	struct list_head preempted;
53 	unsigned num_hw_submitted;
54 };
55 
56 /**
57  * struct vmw_cmdbuf_man: - Command buffer manager
58  *
59  * @cur_mutex: Mutex protecting the command buffer used for incremental small
60  * kernel command submissions, @cur.
61  * @space_mutex: Mutex to protect against starvation when we allocate
62  * main pool buffer space.
63  * @work: A struct work_struct implementeing command buffer error handling.
64  * Immutable.
65  * @dev_priv: Pointer to the device private struct. Immutable.
66  * @ctx: Array of command buffer context queues. The queues and the context
67  * data is protected by @lock.
68  * @error: List of command buffers that have caused device errors.
69  * Protected by @lock.
70  * @mm: Range manager for the command buffer space. Manager allocations and
71  * frees are protected by @lock.
72  * @cmd_space: Buffer object for the command buffer space, unless we were
73  * able to make a contigous coherent DMA memory allocation, @handle. Immutable.
74  * @map_obj: Mapping state for @cmd_space. Immutable.
75  * @map: Pointer to command buffer space. May be a mapped buffer object or
76  * a contigous coherent DMA memory allocation. Immutable.
77  * @cur: Command buffer for small kernel command submissions. Protected by
78  * the @cur_mutex.
79  * @cur_pos: Space already used in @cur. Protected by @cur_mutex.
80  * @default_size: Default size for the @cur command buffer. Immutable.
81  * @max_hw_submitted: Max number of in-flight command buffers the device can
82  * handle. Immutable.
83  * @lock: Spinlock protecting command submission queues.
84  * @header: Pool of DMA memory for device command buffer headers.
85  * Internal protection.
86  * @dheaders: Pool of DMA memory for device command buffer headers with trailing
87  * space for inline data. Internal protection.
88  * @tasklet: Tasklet struct for irq processing. Immutable.
89  * @alloc_queue: Wait queue for processes waiting to allocate command buffer
90  * space.
91  * @idle_queue: Wait queue for processes waiting for command buffer idle.
92  * @irq_on: Whether the process function has requested irq to be turned on.
93  * Protected by @lock.
94  * @using_mob: Whether the command buffer space is a MOB or a contigous DMA
95  * allocation. Immutable.
96  * @has_pool: Has a large pool of DMA memory which allows larger allocations.
97  * Typically this is false only during bootstrap.
98  * @handle: DMA address handle for the command buffer space if @using_mob is
99  * false. Immutable.
100  * @size: The size of the command buffer space. Immutable.
101  */
102 struct vmw_cmdbuf_man {
103 	struct mutex cur_mutex;
104 	struct mutex space_mutex;
105 	struct work_struct work;
106 	struct vmw_private *dev_priv;
107 	struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX];
108 	struct list_head error;
109 	struct drm_mm mm;
110 	struct ttm_buffer_object *cmd_space;
111 	struct ttm_bo_kmap_obj map_obj;
112 	u8 *map;
113 	struct vmw_cmdbuf_header *cur;
114 	size_t cur_pos;
115 	size_t default_size;
116 	unsigned max_hw_submitted;
117 	spinlock_t lock;
118 	struct dma_pool *headers;
119 	struct dma_pool *dheaders;
120 	struct tasklet_struct tasklet;
121 	wait_queue_head_t alloc_queue;
122 	wait_queue_head_t idle_queue;
123 	bool irq_on;
124 	bool using_mob;
125 	bool has_pool;
126 	dma_addr_t handle;
127 	size_t size;
128 };
129 
130 /**
131  * struct vmw_cmdbuf_header - Command buffer metadata
132  *
133  * @man: The command buffer manager.
134  * @cb_header: Device command buffer header, allocated from a DMA pool.
135  * @cb_context: The device command buffer context.
136  * @list: List head for attaching to the manager lists.
137  * @node: The range manager node.
138  * @handle. The DMA address of @cb_header. Handed to the device on command
139  * buffer submission.
140  * @cmd: Pointer to the command buffer space of this buffer.
141  * @size: Size of the command buffer space of this buffer.
142  * @reserved: Reserved space of this buffer.
143  * @inline_space: Whether inline command buffer space is used.
144  */
145 struct vmw_cmdbuf_header {
146 	struct vmw_cmdbuf_man *man;
147 	SVGACBHeader *cb_header;
148 	SVGACBContext cb_context;
149 	struct list_head list;
150 	struct drm_mm_node node;
151 	dma_addr_t handle;
152 	u8 *cmd;
153 	size_t size;
154 	size_t reserved;
155 	bool inline_space;
156 };
157 
158 /**
159  * struct vmw_cmdbuf_dheader - Device command buffer header with inline
160  * command buffer space.
161  *
162  * @cb_header: Device command buffer header.
163  * @cmd: Inline command buffer space.
164  */
165 struct vmw_cmdbuf_dheader {
166 	SVGACBHeader cb_header;
167 	u8 cmd[VMW_CMDBUF_INLINE_SIZE] __aligned(VMW_CMDBUF_INLINE_ALIGN);
168 };
169 
170 /**
171  * struct vmw_cmdbuf_alloc_info - Command buffer space allocation metadata
172  *
173  * @page_size: Size of requested command buffer space in pages.
174  * @node: Pointer to the range manager node.
175  * @done: True if this allocation has succeeded.
176  */
177 struct vmw_cmdbuf_alloc_info {
178 	size_t page_size;
179 	struct drm_mm_node *node;
180 	bool done;
181 };
182 
183 /* Loop over each context in the command buffer manager. */
184 #define for_each_cmdbuf_ctx(_man, _i, _ctx) \
185 	for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < SVGA_CB_CONTEXT_MAX; \
186 	     ++(_i), ++(_ctx))
187 
188 static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, bool enable);
189 
190 
191 /**
192  * vmw_cmdbuf_cur_lock - Helper to lock the cur_mutex.
193  *
194  * @man: The range manager.
195  * @interruptible: Whether to wait interruptible when locking.
196  */
197 static int vmw_cmdbuf_cur_lock(struct vmw_cmdbuf_man *man, bool interruptible)
198 {
199 	if (interruptible) {
200 		if (mutex_lock_interruptible(&man->cur_mutex))
201 			return -ERESTARTSYS;
202 	} else {
203 		mutex_lock(&man->cur_mutex);
204 	}
205 
206 	return 0;
207 }
208 
209 /**
210  * vmw_cmdbuf_cur_unlock - Helper to unlock the cur_mutex.
211  *
212  * @man: The range manager.
213  */
214 static void vmw_cmdbuf_cur_unlock(struct vmw_cmdbuf_man *man)
215 {
216 	mutex_unlock(&man->cur_mutex);
217 }
218 
219 /**
220  * vmw_cmdbuf_header_inline_free - Free a struct vmw_cmdbuf_header that has
221  * been used for the device context with inline command buffers.
222  * Need not be called locked.
223  *
224  * @header: Pointer to the header to free.
225  */
226 static void vmw_cmdbuf_header_inline_free(struct vmw_cmdbuf_header *header)
227 {
228 	struct vmw_cmdbuf_dheader *dheader;
229 
230 	if (WARN_ON_ONCE(!header->inline_space))
231 		return;
232 
233 	dheader = container_of(header->cb_header, struct vmw_cmdbuf_dheader,
234 			       cb_header);
235 	dma_pool_free(header->man->dheaders, dheader, header->handle);
236 	kfree(header);
237 }
238 
239 /**
240  * __vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header  and its
241  * associated structures.
242  *
243  * header: Pointer to the header to free.
244  *
245  * For internal use. Must be called with man::lock held.
246  */
247 static void __vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
248 {
249 	struct vmw_cmdbuf_man *man = header->man;
250 
251 	lockdep_assert_held_once(&man->lock);
252 
253 	if (header->inline_space) {
254 		vmw_cmdbuf_header_inline_free(header);
255 		return;
256 	}
257 
258 	drm_mm_remove_node(&header->node);
259 	wake_up_all(&man->alloc_queue);
260 	if (header->cb_header)
261 		dma_pool_free(man->headers, header->cb_header,
262 			      header->handle);
263 	kfree(header);
264 }
265 
266 /**
267  * vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header  and its
268  * associated structures.
269  *
270  * @header: Pointer to the header to free.
271  */
272 void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
273 {
274 	struct vmw_cmdbuf_man *man = header->man;
275 
276 	/* Avoid locking if inline_space */
277 	if (header->inline_space) {
278 		vmw_cmdbuf_header_inline_free(header);
279 		return;
280 	}
281 	spin_lock_bh(&man->lock);
282 	__vmw_cmdbuf_header_free(header);
283 	spin_unlock_bh(&man->lock);
284 }
285 
286 
287 /**
288  * vmw_cmbuf_header_submit: Submit a command buffer to hardware.
289  *
290  * @header: The header of the buffer to submit.
291  */
292 static int vmw_cmdbuf_header_submit(struct vmw_cmdbuf_header *header)
293 {
294 	struct vmw_cmdbuf_man *man = header->man;
295 	u32 val;
296 
297 	val = upper_32_bits(header->handle);
298 	vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val);
299 
300 	val = lower_32_bits(header->handle);
301 	val |= header->cb_context & SVGA_CB_CONTEXT_MASK;
302 	vmw_write(man->dev_priv, SVGA_REG_COMMAND_LOW, val);
303 
304 	return header->cb_header->status;
305 }
306 
307 /**
308  * vmw_cmdbuf_ctx_init: Initialize a command buffer context.
309  *
310  * @ctx: The command buffer context to initialize
311  */
312 static void vmw_cmdbuf_ctx_init(struct vmw_cmdbuf_context *ctx)
313 {
314 	INIT_LIST_HEAD(&ctx->hw_submitted);
315 	INIT_LIST_HEAD(&ctx->submitted);
316 	INIT_LIST_HEAD(&ctx->preempted);
317 	ctx->num_hw_submitted = 0;
318 }
319 
320 /**
321  * vmw_cmdbuf_ctx_submit: Submit command buffers from a command buffer
322  * context.
323  *
324  * @man: The command buffer manager.
325  * @ctx: The command buffer context.
326  *
327  * Submits command buffers to hardware until there are no more command
328  * buffers to submit or the hardware can't handle more command buffers.
329  */
330 static void vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man *man,
331 				  struct vmw_cmdbuf_context *ctx)
332 {
333 	while (ctx->num_hw_submitted < man->max_hw_submitted &&
334 	      !list_empty(&ctx->submitted)) {
335 		struct vmw_cmdbuf_header *entry;
336 		SVGACBStatus status;
337 
338 		entry = list_first_entry(&ctx->submitted,
339 					 struct vmw_cmdbuf_header,
340 					 list);
341 
342 		status = vmw_cmdbuf_header_submit(entry);
343 
344 		/* This should never happen */
345 		if (WARN_ON_ONCE(status == SVGA_CB_STATUS_QUEUE_FULL)) {
346 			entry->cb_header->status = SVGA_CB_STATUS_NONE;
347 			break;
348 		}
349 
350 		list_del(&entry->list);
351 		list_add_tail(&entry->list, &ctx->hw_submitted);
352 		ctx->num_hw_submitted++;
353 	}
354 
355 }
356 
357 /**
358  * vmw_cmdbuf_ctx_submit: Process a command buffer context.
359  *
360  * @man: The command buffer manager.
361  * @ctx: The command buffer context.
362  *
363  * Submit command buffers to hardware if possible, and process finished
364  * buffers. Typically freeing them, but on preemption or error take
365  * appropriate action. Wake up waiters if appropriate.
366  */
367 static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man,
368 				   struct vmw_cmdbuf_context *ctx,
369 				   int *notempty)
370 {
371 	struct vmw_cmdbuf_header *entry, *next;
372 
373 	vmw_cmdbuf_ctx_submit(man, ctx);
374 
375 	list_for_each_entry_safe(entry, next, &ctx->hw_submitted, list) {
376 		SVGACBStatus status = entry->cb_header->status;
377 
378 		if (status == SVGA_CB_STATUS_NONE)
379 			break;
380 
381 		list_del(&entry->list);
382 		wake_up_all(&man->idle_queue);
383 		ctx->num_hw_submitted--;
384 		switch (status) {
385 		case SVGA_CB_STATUS_COMPLETED:
386 			__vmw_cmdbuf_header_free(entry);
387 			break;
388 		case SVGA_CB_STATUS_COMMAND_ERROR:
389 		case SVGA_CB_STATUS_CB_HEADER_ERROR:
390 			list_add_tail(&entry->list, &man->error);
391 			schedule_work(&man->work);
392 			break;
393 		case SVGA_CB_STATUS_PREEMPTED:
394 			list_add(&entry->list, &ctx->preempted);
395 			break;
396 		default:
397 			WARN_ONCE(true, "Undefined command buffer status.\n");
398 			__vmw_cmdbuf_header_free(entry);
399 			break;
400 		}
401 	}
402 
403 	vmw_cmdbuf_ctx_submit(man, ctx);
404 	if (!list_empty(&ctx->submitted))
405 		(*notempty)++;
406 }
407 
408 /**
409  * vmw_cmdbuf_man_process - Process all command buffer contexts and
410  * switch on and off irqs as appropriate.
411  *
412  * @man: The command buffer manager.
413  *
414  * Calls vmw_cmdbuf_ctx_process() on all contexts. If any context has
415  * command buffers left that are not submitted to hardware, Make sure
416  * IRQ handling is turned on. Otherwise, make sure it's turned off.
417  */
418 static void vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man)
419 {
420 	int notempty;
421 	struct vmw_cmdbuf_context *ctx;
422 	int i;
423 
424 retry:
425 	notempty = 0;
426 	for_each_cmdbuf_ctx(man, i, ctx)
427 		vmw_cmdbuf_ctx_process(man, ctx, &notempty);
428 
429 	if (man->irq_on && !notempty) {
430 		vmw_generic_waiter_remove(man->dev_priv,
431 					  SVGA_IRQFLAG_COMMAND_BUFFER,
432 					  &man->dev_priv->cmdbuf_waiters);
433 		man->irq_on = false;
434 	} else if (!man->irq_on && notempty) {
435 		vmw_generic_waiter_add(man->dev_priv,
436 				       SVGA_IRQFLAG_COMMAND_BUFFER,
437 				       &man->dev_priv->cmdbuf_waiters);
438 		man->irq_on = true;
439 
440 		/* Rerun in case we just missed an irq. */
441 		goto retry;
442 	}
443 }
444 
445 /**
446  * vmw_cmdbuf_ctx_add - Schedule a command buffer for submission on a
447  * command buffer context
448  *
449  * @man: The command buffer manager.
450  * @header: The header of the buffer to submit.
451  * @cb_context: The command buffer context to use.
452  *
453  * This function adds @header to the "submitted" queue of the command
454  * buffer context identified by @cb_context. It then calls the command buffer
455  * manager processing to potentially submit the buffer to hardware.
456  * @man->lock needs to be held when calling this function.
457  */
458 static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man,
459 			       struct vmw_cmdbuf_header *header,
460 			       SVGACBContext cb_context)
461 {
462 	if (!(header->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT))
463 		header->cb_header->dxContext = 0;
464 	header->cb_context = cb_context;
465 	list_add_tail(&header->list, &man->ctx[cb_context].submitted);
466 
467 	vmw_cmdbuf_man_process(man);
468 }
469 
470 /**
471  * vmw_cmdbuf_man_tasklet - The main part of the command buffer interrupt
472  * handler implemented as a tasklet.
473  *
474  * @data: Tasklet closure. A pointer to the command buffer manager cast to
475  * an unsigned long.
476  *
477  * The bottom half (tasklet) of the interrupt handler simply calls into the
478  * command buffer processor to free finished buffers and submit any
479  * queued buffers to hardware.
480  */
481 static void vmw_cmdbuf_man_tasklet(unsigned long data)
482 {
483 	struct vmw_cmdbuf_man *man = (struct vmw_cmdbuf_man *) data;
484 
485 	spin_lock(&man->lock);
486 	vmw_cmdbuf_man_process(man);
487 	spin_unlock(&man->lock);
488 }
489 
490 /**
491  * vmw_cmdbuf_work_func - The deferred work function that handles
492  * command buffer errors.
493  *
494  * @work: The work func closure argument.
495  *
496  * Restarting the command buffer context after an error requires process
497  * context, so it is deferred to this work function.
498  */
499 static void vmw_cmdbuf_work_func(struct work_struct *work)
500 {
501 	struct vmw_cmdbuf_man *man =
502 		container_of(work, struct vmw_cmdbuf_man, work);
503 	struct vmw_cmdbuf_header *entry, *next;
504 	uint32_t dummy;
505 	bool restart = false;
506 
507 	spin_lock_bh(&man->lock);
508 	list_for_each_entry_safe(entry, next, &man->error, list) {
509 		restart = true;
510 		DRM_ERROR("Command buffer error.\n");
511 
512 		list_del(&entry->list);
513 		__vmw_cmdbuf_header_free(entry);
514 		wake_up_all(&man->idle_queue);
515 	}
516 	spin_unlock_bh(&man->lock);
517 
518 	if (restart && vmw_cmdbuf_startstop(man, true))
519 		DRM_ERROR("Failed restarting command buffer context 0.\n");
520 
521 	/* Send a new fence in case one was removed */
522 	vmw_fifo_send_fence(man->dev_priv, &dummy);
523 }
524 
525 /**
526  * vmw_cmdbuf_man idle - Check whether the command buffer manager is idle.
527  *
528  * @man: The command buffer manager.
529  * @check_preempted: Check also the preempted queue for pending command buffers.
530  *
531  */
532 static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man,
533 				bool check_preempted)
534 {
535 	struct vmw_cmdbuf_context *ctx;
536 	bool idle = false;
537 	int i;
538 
539 	spin_lock_bh(&man->lock);
540 	vmw_cmdbuf_man_process(man);
541 	for_each_cmdbuf_ctx(man, i, ctx) {
542 		if (!list_empty(&ctx->submitted) ||
543 		    !list_empty(&ctx->hw_submitted) ||
544 		    (check_preempted && !list_empty(&ctx->preempted)))
545 			goto out_unlock;
546 	}
547 
548 	idle = list_empty(&man->error);
549 
550 out_unlock:
551 	spin_unlock_bh(&man->lock);
552 
553 	return idle;
554 }
555 
556 /**
557  * __vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
558  * command submissions
559  *
560  * @man: The command buffer manager.
561  *
562  * Flushes the current command buffer without allocating a new one. A new one
563  * is automatically allocated when needed. Call with @man->cur_mutex held.
564  */
565 static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man)
566 {
567 	struct vmw_cmdbuf_header *cur = man->cur;
568 
569 	WARN_ON(!mutex_is_locked(&man->cur_mutex));
570 
571 	if (!cur)
572 		return;
573 
574 	spin_lock_bh(&man->lock);
575 	if (man->cur_pos == 0) {
576 		__vmw_cmdbuf_header_free(cur);
577 		goto out_unlock;
578 	}
579 
580 	man->cur->cb_header->length = man->cur_pos;
581 	vmw_cmdbuf_ctx_add(man, man->cur, SVGA_CB_CONTEXT_0);
582 out_unlock:
583 	spin_unlock_bh(&man->lock);
584 	man->cur = NULL;
585 	man->cur_pos = 0;
586 }
587 
588 /**
589  * vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
590  * command submissions
591  *
592  * @man: The command buffer manager.
593  * @interruptible: Whether to sleep interruptible when sleeping.
594  *
595  * Flushes the current command buffer without allocating a new one. A new one
596  * is automatically allocated when needed.
597  */
598 int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
599 			 bool interruptible)
600 {
601 	int ret = vmw_cmdbuf_cur_lock(man, interruptible);
602 
603 	if (ret)
604 		return ret;
605 
606 	__vmw_cmdbuf_cur_flush(man);
607 	vmw_cmdbuf_cur_unlock(man);
608 
609 	return 0;
610 }
611 
612 /**
613  * vmw_cmdbuf_idle - Wait for command buffer manager idle.
614  *
615  * @man: The command buffer manager.
616  * @interruptible: Sleep interruptible while waiting.
617  * @timeout: Time out after this many ticks.
618  *
619  * Wait until the command buffer manager has processed all command buffers,
620  * or until a timeout occurs. If a timeout occurs, the function will return
621  * -EBUSY.
622  */
623 int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
624 		    unsigned long timeout)
625 {
626 	int ret;
627 
628 	ret = vmw_cmdbuf_cur_flush(man, interruptible);
629 	vmw_generic_waiter_add(man->dev_priv,
630 			       SVGA_IRQFLAG_COMMAND_BUFFER,
631 			       &man->dev_priv->cmdbuf_waiters);
632 
633 	if (interruptible) {
634 		ret = wait_event_interruptible_timeout
635 			(man->idle_queue, vmw_cmdbuf_man_idle(man, true),
636 			 timeout);
637 	} else {
638 		ret = wait_event_timeout
639 			(man->idle_queue, vmw_cmdbuf_man_idle(man, true),
640 			 timeout);
641 	}
642 	vmw_generic_waiter_remove(man->dev_priv,
643 				  SVGA_IRQFLAG_COMMAND_BUFFER,
644 				  &man->dev_priv->cmdbuf_waiters);
645 	if (ret == 0) {
646 		if (!vmw_cmdbuf_man_idle(man, true))
647 			ret = -EBUSY;
648 		else
649 			ret = 0;
650 	}
651 	if (ret > 0)
652 		ret = 0;
653 
654 	return ret;
655 }
656 
657 /**
658  * vmw_cmdbuf_try_alloc - Try to allocate buffer space from the main pool.
659  *
660  * @man: The command buffer manager.
661  * @info: Allocation info. Will hold the size on entry and allocated mm node
662  * on successful return.
663  *
664  * Try to allocate buffer space from the main pool. Returns true if succeeded.
665  * If a fatal error was hit, the error code is returned in @info->ret.
666  */
667 static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
668 				 struct vmw_cmdbuf_alloc_info *info)
669 {
670 	int ret;
671 
672 	if (info->done)
673 		return true;
674 
675 	memset(info->node, 0, sizeof(*info->node));
676 	spin_lock_bh(&man->lock);
677 	ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
678 	if (ret) {
679 		vmw_cmdbuf_man_process(man);
680 		ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
681 	}
682 
683 	spin_unlock_bh(&man->lock);
684 	info->done = !ret;
685 
686 	return info->done;
687 }
688 
689 /**
690  * vmw_cmdbuf_alloc_space - Allocate buffer space from the main pool.
691  *
692  * @man: The command buffer manager.
693  * @node: Pointer to pre-allocated range-manager node.
694  * @size: The size of the allocation.
695  * @interruptible: Whether to sleep interruptible while waiting for space.
696  *
697  * This function allocates buffer space from the main pool, and if there is
698  * no space available ATM, it turns on IRQ handling and sleeps waiting for it to
699  * become available.
700  */
701 static int vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man,
702 				  struct drm_mm_node *node,
703 				  size_t size,
704 				  bool interruptible)
705 {
706 	struct vmw_cmdbuf_alloc_info info;
707 
708 	info.page_size = PAGE_ALIGN(size) >> PAGE_SHIFT;
709 	info.node = node;
710 	info.done = false;
711 
712 	/*
713 	 * To prevent starvation of large requests, only one allocating call
714 	 * at a time waiting for space.
715 	 */
716 	if (interruptible) {
717 		if (mutex_lock_interruptible(&man->space_mutex))
718 			return -ERESTARTSYS;
719 	} else {
720 		mutex_lock(&man->space_mutex);
721 	}
722 
723 	/* Try to allocate space without waiting. */
724 	if (vmw_cmdbuf_try_alloc(man, &info))
725 		goto out_unlock;
726 
727 	vmw_generic_waiter_add(man->dev_priv,
728 			       SVGA_IRQFLAG_COMMAND_BUFFER,
729 			       &man->dev_priv->cmdbuf_waiters);
730 
731 	if (interruptible) {
732 		int ret;
733 
734 		ret = wait_event_interruptible
735 			(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
736 		if (ret) {
737 			vmw_generic_waiter_remove
738 				(man->dev_priv, SVGA_IRQFLAG_COMMAND_BUFFER,
739 				 &man->dev_priv->cmdbuf_waiters);
740 			mutex_unlock(&man->space_mutex);
741 			return ret;
742 		}
743 	} else {
744 		wait_event(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
745 	}
746 	vmw_generic_waiter_remove(man->dev_priv,
747 				  SVGA_IRQFLAG_COMMAND_BUFFER,
748 				  &man->dev_priv->cmdbuf_waiters);
749 
750 out_unlock:
751 	mutex_unlock(&man->space_mutex);
752 
753 	return 0;
754 }
755 
756 /**
757  * vmw_cmdbuf_space_pool - Set up a command buffer header with command buffer
758  * space from the main pool.
759  *
760  * @man: The command buffer manager.
761  * @header: Pointer to the header to set up.
762  * @size: The requested size of the buffer space.
763  * @interruptible: Whether to sleep interruptible while waiting for space.
764  */
765 static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
766 				 struct vmw_cmdbuf_header *header,
767 				 size_t size,
768 				 bool interruptible)
769 {
770 	SVGACBHeader *cb_hdr;
771 	size_t offset;
772 	int ret;
773 
774 	if (!man->has_pool)
775 		return -ENOMEM;
776 
777 	ret = vmw_cmdbuf_alloc_space(man, &header->node,  size, interruptible);
778 
779 	if (ret)
780 		return ret;
781 
782 	header->cb_header = dma_pool_alloc(man->headers, GFP_KERNEL,
783 					   &header->handle);
784 	if (!header->cb_header) {
785 		ret = -ENOMEM;
786 		goto out_no_cb_header;
787 	}
788 
789 	header->size = header->node.size << PAGE_SHIFT;
790 	cb_hdr = header->cb_header;
791 	offset = header->node.start << PAGE_SHIFT;
792 	header->cmd = man->map + offset;
793 	memset(cb_hdr, 0, sizeof(*cb_hdr));
794 	if (man->using_mob) {
795 		cb_hdr->flags = SVGA_CB_FLAG_MOB;
796 		cb_hdr->ptr.mob.mobid = man->cmd_space->mem.start;
797 		cb_hdr->ptr.mob.mobOffset = offset;
798 	} else {
799 		cb_hdr->ptr.pa = (u64)man->handle + (u64)offset;
800 	}
801 
802 	return 0;
803 
804 out_no_cb_header:
805 	spin_lock_bh(&man->lock);
806 	drm_mm_remove_node(&header->node);
807 	spin_unlock_bh(&man->lock);
808 
809 	return ret;
810 }
811 
812 /**
813  * vmw_cmdbuf_space_inline - Set up a command buffer header with
814  * inline command buffer space.
815  *
816  * @man: The command buffer manager.
817  * @header: Pointer to the header to set up.
818  * @size: The requested size of the buffer space.
819  */
820 static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man,
821 				   struct vmw_cmdbuf_header *header,
822 				   int size)
823 {
824 	struct vmw_cmdbuf_dheader *dheader;
825 	SVGACBHeader *cb_hdr;
826 
827 	if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE))
828 		return -ENOMEM;
829 
830 	dheader = dma_pool_alloc(man->dheaders, GFP_KERNEL,
831 				 &header->handle);
832 	if (!dheader)
833 		return -ENOMEM;
834 
835 	header->inline_space = true;
836 	header->size = VMW_CMDBUF_INLINE_SIZE;
837 	cb_hdr = &dheader->cb_header;
838 	header->cb_header = cb_hdr;
839 	header->cmd = dheader->cmd;
840 	memset(dheader, 0, sizeof(*dheader));
841 	cb_hdr->status = SVGA_CB_STATUS_NONE;
842 	cb_hdr->flags = SVGA_CB_FLAG_NONE;
843 	cb_hdr->ptr.pa = (u64)header->handle +
844 		(u64)offsetof(struct vmw_cmdbuf_dheader, cmd);
845 
846 	return 0;
847 }
848 
849 /**
850  * vmw_cmdbuf_alloc - Allocate a command buffer header complete with
851  * command buffer space.
852  *
853  * @man: The command buffer manager.
854  * @size: The requested size of the buffer space.
855  * @interruptible: Whether to sleep interruptible while waiting for space.
856  * @p_header: points to a header pointer to populate on successful return.
857  *
858  * Returns a pointer to command buffer space if successful. Otherwise
859  * returns an error pointer. The header pointer returned in @p_header should
860  * be used for upcoming calls to vmw_cmdbuf_reserve() and vmw_cmdbuf_commit().
861  */
862 void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
863 		       size_t size, bool interruptible,
864 		       struct vmw_cmdbuf_header **p_header)
865 {
866 	struct vmw_cmdbuf_header *header;
867 	int ret = 0;
868 
869 	*p_header = NULL;
870 
871 	header = kzalloc(sizeof(*header), GFP_KERNEL);
872 	if (!header)
873 		return ERR_PTR(-ENOMEM);
874 
875 	if (size <= VMW_CMDBUF_INLINE_SIZE)
876 		ret = vmw_cmdbuf_space_inline(man, header, size);
877 	else
878 		ret = vmw_cmdbuf_space_pool(man, header, size, interruptible);
879 
880 	if (ret) {
881 		kfree(header);
882 		return ERR_PTR(ret);
883 	}
884 
885 	header->man = man;
886 	INIT_LIST_HEAD(&header->list);
887 	header->cb_header->status = SVGA_CB_STATUS_NONE;
888 	*p_header = header;
889 
890 	return header->cmd;
891 }
892 
893 /**
894  * vmw_cmdbuf_reserve_cur - Reserve space for commands in the current
895  * command buffer.
896  *
897  * @man: The command buffer manager.
898  * @size: The requested size of the commands.
899  * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
900  * @interruptible: Whether to sleep interruptible while waiting for space.
901  *
902  * Returns a pointer to command buffer space if successful. Otherwise
903  * returns an error pointer.
904  */
905 static void *vmw_cmdbuf_reserve_cur(struct vmw_cmdbuf_man *man,
906 				    size_t size,
907 				    int ctx_id,
908 				    bool interruptible)
909 {
910 	struct vmw_cmdbuf_header *cur;
911 	void *ret;
912 
913 	if (vmw_cmdbuf_cur_lock(man, interruptible))
914 		return ERR_PTR(-ERESTARTSYS);
915 
916 	cur = man->cur;
917 	if (cur && (size + man->cur_pos > cur->size ||
918 		    ((cur->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT) &&
919 		     ctx_id != cur->cb_header->dxContext)))
920 		__vmw_cmdbuf_cur_flush(man);
921 
922 	if (!man->cur) {
923 		ret = vmw_cmdbuf_alloc(man,
924 				       max_t(size_t, size, man->default_size),
925 				       interruptible, &man->cur);
926 		if (IS_ERR(ret)) {
927 			vmw_cmdbuf_cur_unlock(man);
928 			return ret;
929 		}
930 
931 		cur = man->cur;
932 	}
933 
934 	if (ctx_id != SVGA3D_INVALID_ID) {
935 		cur->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
936 		cur->cb_header->dxContext = ctx_id;
937 	}
938 
939 	cur->reserved = size;
940 
941 	return (void *) (man->cur->cmd + man->cur_pos);
942 }
943 
944 /**
945  * vmw_cmdbuf_commit_cur - Commit commands in the current command buffer.
946  *
947  * @man: The command buffer manager.
948  * @size: The size of the commands actually written.
949  * @flush: Whether to flush the command buffer immediately.
950  */
951 static void vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man *man,
952 				  size_t size, bool flush)
953 {
954 	struct vmw_cmdbuf_header *cur = man->cur;
955 
956 	WARN_ON(!mutex_is_locked(&man->cur_mutex));
957 
958 	WARN_ON(size > cur->reserved);
959 	man->cur_pos += size;
960 	if (!size)
961 		cur->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
962 	if (flush)
963 		__vmw_cmdbuf_cur_flush(man);
964 	vmw_cmdbuf_cur_unlock(man);
965 }
966 
967 /**
968  * vmw_cmdbuf_reserve - Reserve space for commands in a command buffer.
969  *
970  * @man: The command buffer manager.
971  * @size: The requested size of the commands.
972  * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
973  * @interruptible: Whether to sleep interruptible while waiting for space.
974  * @header: Header of the command buffer. NULL if the current command buffer
975  * should be used.
976  *
977  * Returns a pointer to command buffer space if successful. Otherwise
978  * returns an error pointer.
979  */
980 void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
981 			 int ctx_id, bool interruptible,
982 			 struct vmw_cmdbuf_header *header)
983 {
984 	if (!header)
985 		return vmw_cmdbuf_reserve_cur(man, size, ctx_id, interruptible);
986 
987 	if (size > header->size)
988 		return ERR_PTR(-EINVAL);
989 
990 	if (ctx_id != SVGA3D_INVALID_ID) {
991 		header->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
992 		header->cb_header->dxContext = ctx_id;
993 	}
994 
995 	header->reserved = size;
996 	return header->cmd;
997 }
998 
999 /**
1000  * vmw_cmdbuf_commit - Commit commands in a command buffer.
1001  *
1002  * @man: The command buffer manager.
1003  * @size: The size of the commands actually written.
1004  * @header: Header of the command buffer. NULL if the current command buffer
1005  * should be used.
1006  * @flush: Whether to flush the command buffer immediately.
1007  */
1008 void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
1009 		       struct vmw_cmdbuf_header *header, bool flush)
1010 {
1011 	if (!header) {
1012 		vmw_cmdbuf_commit_cur(man, size, flush);
1013 		return;
1014 	}
1015 
1016 	(void) vmw_cmdbuf_cur_lock(man, false);
1017 	__vmw_cmdbuf_cur_flush(man);
1018 	WARN_ON(size > header->reserved);
1019 	man->cur = header;
1020 	man->cur_pos = size;
1021 	if (!size)
1022 		header->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
1023 	if (flush)
1024 		__vmw_cmdbuf_cur_flush(man);
1025 	vmw_cmdbuf_cur_unlock(man);
1026 }
1027 
1028 /**
1029  * vmw_cmdbuf_tasklet_schedule - Schedule the interrupt handler bottom half.
1030  *
1031  * @man: The command buffer manager.
1032  */
1033 void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man *man)
1034 {
1035 	if (!man)
1036 		return;
1037 
1038 	tasklet_schedule(&man->tasklet);
1039 }
1040 
1041 /**
1042  * vmw_cmdbuf_send_device_command - Send a command through the device context.
1043  *
1044  * @man: The command buffer manager.
1045  * @command: Pointer to the command to send.
1046  * @size: Size of the command.
1047  *
1048  * Synchronously sends a device context command.
1049  */
1050 static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man,
1051 					  const void *command,
1052 					  size_t size)
1053 {
1054 	struct vmw_cmdbuf_header *header;
1055 	int status;
1056 	void *cmd = vmw_cmdbuf_alloc(man, size, false, &header);
1057 
1058 	if (IS_ERR(cmd))
1059 		return PTR_ERR(cmd);
1060 
1061 	memcpy(cmd, command, size);
1062 	header->cb_header->length = size;
1063 	header->cb_context = SVGA_CB_CONTEXT_DEVICE;
1064 	spin_lock_bh(&man->lock);
1065 	status = vmw_cmdbuf_header_submit(header);
1066 	spin_unlock_bh(&man->lock);
1067 	vmw_cmdbuf_header_free(header);
1068 
1069 	if (status != SVGA_CB_STATUS_COMPLETED) {
1070 		DRM_ERROR("Device context command failed with status %d\n",
1071 			  status);
1072 		return -EINVAL;
1073 	}
1074 
1075 	return 0;
1076 }
1077 
1078 /**
1079  * vmw_cmdbuf_startstop - Send a start / stop command through the device
1080  * context.
1081  *
1082  * @man: The command buffer manager.
1083  * @enable: Whether to enable or disable the context.
1084  *
1085  * Synchronously sends a device start / stop context command.
1086  */
1087 static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man,
1088 				bool enable)
1089 {
1090 	struct {
1091 		uint32 id;
1092 		SVGADCCmdStartStop body;
1093 	} __packed cmd;
1094 
1095 	cmd.id = SVGA_DC_CMD_START_STOP_CONTEXT;
1096 	cmd.body.enable = (enable) ? 1 : 0;
1097 	cmd.body.context = SVGA_CB_CONTEXT_0;
1098 
1099 	return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
1100 }
1101 
1102 /**
1103  * vmw_cmdbuf_set_pool_size - Set command buffer manager sizes
1104  *
1105  * @man: The command buffer manager.
1106  * @size: The size of the main space pool.
1107  * @default_size: The default size of the command buffer for small kernel
1108  * submissions.
1109  *
1110  * Set the size and allocate the main command buffer space pool,
1111  * as well as the default size of the command buffer for
1112  * small kernel submissions. If successful, this enables large command
1113  * submissions. Note that this function requires that rudimentary command
1114  * submission is already available and that the MOB memory manager is alive.
1115  * Returns 0 on success. Negative error code on failure.
1116  */
1117 int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
1118 			     size_t size, size_t default_size)
1119 {
1120 	struct vmw_private *dev_priv = man->dev_priv;
1121 	bool dummy;
1122 	int ret;
1123 
1124 	if (man->has_pool)
1125 		return -EINVAL;
1126 
1127 	/* First, try to allocate a huge chunk of DMA memory */
1128 	size = PAGE_ALIGN(size);
1129 	man->map = dma_alloc_coherent(&dev_priv->dev->pdev->dev, size,
1130 				      &man->handle, GFP_KERNEL);
1131 	if (man->map) {
1132 		man->using_mob = false;
1133 	} else {
1134 		/*
1135 		 * DMA memory failed. If we can have command buffers in a
1136 		 * MOB, try to use that instead. Note that this will
1137 		 * actually call into the already enabled manager, when
1138 		 * binding the MOB.
1139 		 */
1140 		if (!(dev_priv->capabilities & SVGA_CAP_DX))
1141 			return -ENOMEM;
1142 
1143 		ret = ttm_bo_create(&dev_priv->bdev, size, ttm_bo_type_device,
1144 				    &vmw_mob_ne_placement, 0, false, NULL,
1145 				    &man->cmd_space);
1146 		if (ret)
1147 			return ret;
1148 
1149 		man->using_mob = true;
1150 		ret = ttm_bo_kmap(man->cmd_space, 0, size >> PAGE_SHIFT,
1151 				  &man->map_obj);
1152 		if (ret)
1153 			goto out_no_map;
1154 
1155 		man->map = ttm_kmap_obj_virtual(&man->map_obj, &dummy);
1156 	}
1157 
1158 	man->size = size;
1159 	drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT);
1160 
1161 	man->has_pool = true;
1162 
1163 	/*
1164 	 * For now, set the default size to VMW_CMDBUF_INLINE_SIZE to
1165 	 * prevent deadlocks from happening when vmw_cmdbuf_space_pool()
1166 	 * needs to wait for space and we block on further command
1167 	 * submissions to be able to free up space.
1168 	 */
1169 	man->default_size = VMW_CMDBUF_INLINE_SIZE;
1170 	DRM_INFO("Using command buffers with %s pool.\n",
1171 		 (man->using_mob) ? "MOB" : "DMA");
1172 
1173 	return 0;
1174 
1175 out_no_map:
1176 	if (man->using_mob)
1177 		ttm_bo_unref(&man->cmd_space);
1178 
1179 	return ret;
1180 }
1181 
1182 /**
1183  * vmw_cmdbuf_man_create: Create a command buffer manager and enable it for
1184  * inline command buffer submissions only.
1185  *
1186  * @dev_priv: Pointer to device private structure.
1187  *
1188  * Returns a pointer to a cummand buffer manager to success or error pointer
1189  * on failure. The command buffer manager will be enabled for submissions of
1190  * size VMW_CMDBUF_INLINE_SIZE only.
1191  */
1192 struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
1193 {
1194 	struct vmw_cmdbuf_man *man;
1195 	struct vmw_cmdbuf_context *ctx;
1196 	int i;
1197 	int ret;
1198 
1199 	if (!(dev_priv->capabilities & SVGA_CAP_COMMAND_BUFFERS))
1200 		return ERR_PTR(-ENOSYS);
1201 
1202 	man = kzalloc(sizeof(*man), GFP_KERNEL);
1203 	if (!man)
1204 		return ERR_PTR(-ENOMEM);
1205 
1206 	man->headers = dma_pool_create("vmwgfx cmdbuf",
1207 				       &dev_priv->dev->pdev->dev,
1208 				       sizeof(SVGACBHeader),
1209 				       64, PAGE_SIZE);
1210 	if (!man->headers) {
1211 		ret = -ENOMEM;
1212 		goto out_no_pool;
1213 	}
1214 
1215 	man->dheaders = dma_pool_create("vmwgfx inline cmdbuf",
1216 					&dev_priv->dev->pdev->dev,
1217 					sizeof(struct vmw_cmdbuf_dheader),
1218 					64, PAGE_SIZE);
1219 	if (!man->dheaders) {
1220 		ret = -ENOMEM;
1221 		goto out_no_dpool;
1222 	}
1223 
1224 	for_each_cmdbuf_ctx(man, i, ctx)
1225 		vmw_cmdbuf_ctx_init(ctx);
1226 
1227 	INIT_LIST_HEAD(&man->error);
1228 	spin_lock_init(&man->lock);
1229 	mutex_init(&man->cur_mutex);
1230 	mutex_init(&man->space_mutex);
1231 	tasklet_init(&man->tasklet, vmw_cmdbuf_man_tasklet,
1232 		     (unsigned long) man);
1233 	man->default_size = VMW_CMDBUF_INLINE_SIZE;
1234 	init_waitqueue_head(&man->alloc_queue);
1235 	init_waitqueue_head(&man->idle_queue);
1236 	man->dev_priv = dev_priv;
1237 	man->max_hw_submitted = SVGA_CB_MAX_QUEUED_PER_CONTEXT - 1;
1238 	INIT_WORK(&man->work, &vmw_cmdbuf_work_func);
1239 	vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ERROR,
1240 			       &dev_priv->error_waiters);
1241 	ret = vmw_cmdbuf_startstop(man, true);
1242 	if (ret) {
1243 		DRM_ERROR("Failed starting command buffer context 0.\n");
1244 		vmw_cmdbuf_man_destroy(man);
1245 		return ERR_PTR(ret);
1246 	}
1247 
1248 	return man;
1249 
1250 out_no_dpool:
1251 	dma_pool_destroy(man->headers);
1252 out_no_pool:
1253 	kfree(man);
1254 
1255 	return ERR_PTR(ret);
1256 }
1257 
1258 /**
1259  * vmw_cmdbuf_remove_pool - Take down the main buffer space pool.
1260  *
1261  * @man: Pointer to a command buffer manager.
1262  *
1263  * This function removes the main buffer space pool, and should be called
1264  * before MOB memory management is removed. When this function has been called,
1265  * only small command buffer submissions of size VMW_CMDBUF_INLINE_SIZE or
1266  * less are allowed, and the default size of the command buffer for small kernel
1267  * submissions is also set to this size.
1268  */
1269 void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
1270 {
1271 	if (!man->has_pool)
1272 		return;
1273 
1274 	man->has_pool = false;
1275 	man->default_size = VMW_CMDBUF_INLINE_SIZE;
1276 	(void) vmw_cmdbuf_idle(man, false, 10*HZ);
1277 	if (man->using_mob) {
1278 		(void) ttm_bo_kunmap(&man->map_obj);
1279 		ttm_bo_unref(&man->cmd_space);
1280 	} else {
1281 		dma_free_coherent(&man->dev_priv->dev->pdev->dev,
1282 				  man->size, man->map, man->handle);
1283 	}
1284 }
1285 
1286 /**
1287  * vmw_cmdbuf_man_destroy - Take down a command buffer manager.
1288  *
1289  * @man: Pointer to a command buffer manager.
1290  *
1291  * This function idles and then destroys a command buffer manager.
1292  */
1293 void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man)
1294 {
1295 	WARN_ON_ONCE(man->has_pool);
1296 	(void) vmw_cmdbuf_idle(man, false, 10*HZ);
1297 	if (vmw_cmdbuf_startstop(man, false))
1298 		DRM_ERROR("Failed stopping command buffer context 0.\n");
1299 
1300 	vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR,
1301 				  &man->dev_priv->error_waiters);
1302 	tasklet_kill(&man->tasklet);
1303 	(void) cancel_work_sync(&man->work);
1304 	dma_pool_destroy(man->dheaders);
1305 	dma_pool_destroy(man->headers);
1306 	mutex_destroy(&man->cur_mutex);
1307 	mutex_destroy(&man->space_mutex);
1308 	kfree(man);
1309 }
1310