1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2012-2015 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include <linux/highmem.h>
29 
30 #include "vmwgfx_drv.h"
31 
32 /*
33  * If we set up the screen target otable, screen objects stop working.
34  */
35 
36 #define VMW_OTABLE_SETUP_SUB ((VMWGFX_ENABLE_SCREEN_TARGET_OTABLE ? 0 : 1))
37 
38 #ifdef CONFIG_64BIT
39 #define VMW_PPN_SIZE 8
40 #define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PTDEPTH64_0
41 #define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PTDEPTH64_1
42 #define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PTDEPTH64_2
43 #else
44 #define VMW_PPN_SIZE 4
45 #define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PTDEPTH_0
46 #define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PTDEPTH_1
47 #define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PTDEPTH_2
48 #endif
49 
50 /*
51  * struct vmw_mob - Structure containing page table and metadata for a
52  * Guest Memory OBject.
53  *
54  * @num_pages       Number of pages that make up the page table.
55  * @pt_level        The indirection level of the page table. 0-2.
56  * @pt_root_page    DMA address of the level 0 page of the page table.
57  */
58 struct vmw_mob {
59 	struct ttm_buffer_object *pt_bo;
60 	unsigned long num_pages;
61 	unsigned pt_level;
62 	dma_addr_t pt_root_page;
63 	uint32_t id;
64 };
65 
66 /*
67  * struct vmw_otable - Guest Memory OBject table metadata
68  *
69  * @size:           Size of the table (page-aligned).
70  * @page_table:     Pointer to a struct vmw_mob holding the page table.
71  */
72 static const struct vmw_otable pre_dx_tables[] = {
73 	{VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE, NULL, true},
74 	{VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE, NULL, true},
75 	{VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE, NULL, true},
76 	{VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE, NULL, true},
77 	{VMWGFX_NUM_GB_SCREEN_TARGET * SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE,
78 	 NULL, VMWGFX_ENABLE_SCREEN_TARGET_OTABLE}
79 };
80 
81 static const struct vmw_otable dx_tables[] = {
82 	{VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE, NULL, true},
83 	{VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE, NULL, true},
84 	{VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE, NULL, true},
85 	{VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE, NULL, true},
86 	{VMWGFX_NUM_GB_SCREEN_TARGET * SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE,
87 	 NULL, VMWGFX_ENABLE_SCREEN_TARGET_OTABLE},
88 	{VMWGFX_NUM_DXCONTEXT * sizeof(SVGAOTableDXContextEntry), NULL, true},
89 };
90 
91 static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
92 			       struct vmw_mob *mob);
93 static void vmw_mob_pt_setup(struct vmw_mob *mob,
94 			     struct vmw_piter data_iter,
95 			     unsigned long num_data_pages);
96 
97 
98 static inline void vmw_bo_unpin_unlocked(struct ttm_buffer_object *bo)
99 {
100 	int ret = ttm_bo_reserve(bo, false, true, NULL);
101 	BUG_ON(ret != 0);
102 	ttm_bo_unpin(bo);
103 	ttm_bo_unreserve(bo);
104 }
105 
106 
107 /*
108  * vmw_setup_otable_base - Issue an object table base setup command to
109  * the device
110  *
111  * @dev_priv:       Pointer to a device private structure
112  * @type:           Type of object table base
113  * @offset          Start of table offset into dev_priv::otable_bo
114  * @otable          Pointer to otable metadata;
115  *
116  * This function returns -ENOMEM if it fails to reserve fifo space,
117  * and may block waiting for fifo space.
118  */
119 static int vmw_setup_otable_base(struct vmw_private *dev_priv,
120 				 SVGAOTableType type,
121 				 struct ttm_buffer_object *otable_bo,
122 				 unsigned long offset,
123 				 struct vmw_otable *otable)
124 {
125 	struct {
126 		SVGA3dCmdHeader header;
127 		SVGA3dCmdSetOTableBase64 body;
128 	} *cmd;
129 	struct vmw_mob *mob;
130 	const struct vmw_sg_table *vsgt;
131 	struct vmw_piter iter;
132 	int ret;
133 
134 	BUG_ON(otable->page_table != NULL);
135 
136 	vsgt = vmw_bo_sg_table(otable_bo);
137 	vmw_piter_start(&iter, vsgt, offset >> PAGE_SHIFT);
138 	WARN_ON(!vmw_piter_next(&iter));
139 
140 	mob = vmw_mob_create(otable->size >> PAGE_SHIFT);
141 	if (unlikely(mob == NULL)) {
142 		DRM_ERROR("Failed creating OTable page table.\n");
143 		return -ENOMEM;
144 	}
145 
146 	if (otable->size <= PAGE_SIZE) {
147 		mob->pt_level = VMW_MOBFMT_PTDEPTH_0;
148 		mob->pt_root_page = vmw_piter_dma_addr(&iter);
149 	} else if (vsgt->num_regions == 1) {
150 		mob->pt_level = SVGA3D_MOBFMT_RANGE;
151 		mob->pt_root_page = vmw_piter_dma_addr(&iter);
152 	} else {
153 		ret = vmw_mob_pt_populate(dev_priv, mob);
154 		if (unlikely(ret != 0))
155 			goto out_no_populate;
156 
157 		vmw_mob_pt_setup(mob, iter, otable->size >> PAGE_SHIFT);
158 		mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1;
159 	}
160 
161 	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
162 	if (unlikely(cmd == NULL)) {
163 		ret = -ENOMEM;
164 		goto out_no_fifo;
165 	}
166 
167 	memset(cmd, 0, sizeof(*cmd));
168 	cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE64;
169 	cmd->header.size = sizeof(cmd->body);
170 	cmd->body.type = type;
171 	cmd->body.baseAddress = mob->pt_root_page >> PAGE_SHIFT;
172 	cmd->body.sizeInBytes = otable->size;
173 	cmd->body.validSizeInBytes = 0;
174 	cmd->body.ptDepth = mob->pt_level;
175 
176 	/*
177 	 * The device doesn't support this, But the otable size is
178 	 * determined at compile-time, so this BUG shouldn't trigger
179 	 * randomly.
180 	 */
181 	BUG_ON(mob->pt_level == VMW_MOBFMT_PTDEPTH_2);
182 
183 	vmw_cmd_commit(dev_priv, sizeof(*cmd));
184 	otable->page_table = mob;
185 
186 	return 0;
187 
188 out_no_fifo:
189 out_no_populate:
190 	vmw_mob_destroy(mob);
191 	return ret;
192 }
193 
194 /*
195  * vmw_takedown_otable_base - Issue an object table base takedown command
196  * to the device
197  *
198  * @dev_priv:       Pointer to a device private structure
199  * @type:           Type of object table base
200  *
201  */
202 static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
203 				     SVGAOTableType type,
204 				     struct vmw_otable *otable)
205 {
206 	struct {
207 		SVGA3dCmdHeader header;
208 		SVGA3dCmdSetOTableBase body;
209 	} *cmd;
210 	struct ttm_buffer_object *bo;
211 
212 	if (otable->page_table == NULL)
213 		return;
214 
215 	bo = otable->page_table->pt_bo;
216 	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
217 	if (unlikely(cmd == NULL))
218 		return;
219 
220 	memset(cmd, 0, sizeof(*cmd));
221 	cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE;
222 	cmd->header.size = sizeof(cmd->body);
223 	cmd->body.type = type;
224 	cmd->body.baseAddress = 0;
225 	cmd->body.sizeInBytes = 0;
226 	cmd->body.validSizeInBytes = 0;
227 	cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID;
228 	vmw_cmd_commit(dev_priv, sizeof(*cmd));
229 
230 	if (bo) {
231 		int ret;
232 
233 		ret = ttm_bo_reserve(bo, false, true, NULL);
234 		BUG_ON(ret != 0);
235 
236 		vmw_bo_fence_single(bo, NULL);
237 		ttm_bo_unreserve(bo);
238 	}
239 
240 	vmw_mob_destroy(otable->page_table);
241 	otable->page_table = NULL;
242 }
243 
244 
245 static int vmw_otable_batch_setup(struct vmw_private *dev_priv,
246 				  struct vmw_otable_batch *batch)
247 {
248 	unsigned long offset;
249 	unsigned long bo_size;
250 	struct vmw_otable *otables = batch->otables;
251 	SVGAOTableType i;
252 	int ret;
253 
254 	bo_size = 0;
255 	for (i = 0; i < batch->num_otables; ++i) {
256 		if (!otables[i].enabled)
257 			continue;
258 
259 		otables[i].size =
260 			(otables[i].size + PAGE_SIZE - 1) & PAGE_MASK;
261 		bo_size += otables[i].size;
262 	}
263 
264 	ret = vmw_bo_create_and_populate(dev_priv, bo_size, &batch->otable_bo);
265 	if (unlikely(ret != 0))
266 		return ret;
267 
268 	offset = 0;
269 	for (i = 0; i < batch->num_otables; ++i) {
270 		if (!batch->otables[i].enabled)
271 			continue;
272 
273 		ret = vmw_setup_otable_base(dev_priv, i, batch->otable_bo,
274 					    offset,
275 					    &otables[i]);
276 		if (unlikely(ret != 0))
277 			goto out_no_setup;
278 		offset += otables[i].size;
279 	}
280 
281 	return 0;
282 
283 out_no_setup:
284 	for (i = 0; i < batch->num_otables; ++i) {
285 		if (batch->otables[i].enabled)
286 			vmw_takedown_otable_base(dev_priv, i,
287 						 &batch->otables[i]);
288 	}
289 
290 	vmw_bo_unpin_unlocked(batch->otable_bo);
291 	ttm_bo_put(batch->otable_bo);
292 	batch->otable_bo = NULL;
293 	return ret;
294 }
295 
296 /*
297  * vmw_otables_setup - Set up guest backed memory object tables
298  *
299  * @dev_priv:       Pointer to a device private structure
300  *
301  * Takes care of the device guest backed surface
302  * initialization, by setting up the guest backed memory object tables.
303  * Returns 0 on success and various error codes on failure. A successful return
304  * means the object tables can be taken down using the vmw_otables_takedown
305  * function.
306  */
307 int vmw_otables_setup(struct vmw_private *dev_priv)
308 {
309 	struct vmw_otable **otables = &dev_priv->otable_batch.otables;
310 	int ret;
311 
312 	if (has_sm4_context(dev_priv)) {
313 		*otables = kmemdup(dx_tables, sizeof(dx_tables), GFP_KERNEL);
314 		if (!(*otables))
315 			return -ENOMEM;
316 
317 		dev_priv->otable_batch.num_otables = ARRAY_SIZE(dx_tables);
318 	} else {
319 		*otables = kmemdup(pre_dx_tables, sizeof(pre_dx_tables),
320 				   GFP_KERNEL);
321 		if (!(*otables))
322 			return -ENOMEM;
323 
324 		dev_priv->otable_batch.num_otables = ARRAY_SIZE(pre_dx_tables);
325 	}
326 
327 	ret = vmw_otable_batch_setup(dev_priv, &dev_priv->otable_batch);
328 	if (unlikely(ret != 0))
329 		goto out_setup;
330 
331 	return 0;
332 
333 out_setup:
334 	kfree(*otables);
335 	return ret;
336 }
337 
338 static void vmw_otable_batch_takedown(struct vmw_private *dev_priv,
339 			       struct vmw_otable_batch *batch)
340 {
341 	SVGAOTableType i;
342 	struct ttm_buffer_object *bo = batch->otable_bo;
343 	int ret;
344 
345 	for (i = 0; i < batch->num_otables; ++i)
346 		if (batch->otables[i].enabled)
347 			vmw_takedown_otable_base(dev_priv, i,
348 						 &batch->otables[i]);
349 
350 	ret = ttm_bo_reserve(bo, false, true, NULL);
351 	BUG_ON(ret != 0);
352 
353 	vmw_bo_fence_single(bo, NULL);
354 	ttm_bo_unpin(bo);
355 	ttm_bo_unreserve(bo);
356 
357 	ttm_bo_put(batch->otable_bo);
358 	batch->otable_bo = NULL;
359 }
360 
361 /*
362  * vmw_otables_takedown - Take down guest backed memory object tables
363  *
364  * @dev_priv:       Pointer to a device private structure
365  *
366  * Take down the Guest Memory Object tables.
367  */
368 void vmw_otables_takedown(struct vmw_private *dev_priv)
369 {
370 	vmw_otable_batch_takedown(dev_priv, &dev_priv->otable_batch);
371 	kfree(dev_priv->otable_batch.otables);
372 }
373 
374 /*
375  * vmw_mob_calculate_pt_pages - Calculate the number of page table pages
376  * needed for a guest backed memory object.
377  *
378  * @data_pages:  Number of data pages in the memory object buffer.
379  */
380 static unsigned long vmw_mob_calculate_pt_pages(unsigned long data_pages)
381 {
382 	unsigned long data_size = data_pages * PAGE_SIZE;
383 	unsigned long tot_size = 0;
384 
385 	while (likely(data_size > PAGE_SIZE)) {
386 		data_size = DIV_ROUND_UP(data_size, PAGE_SIZE);
387 		data_size *= VMW_PPN_SIZE;
388 		tot_size += (data_size + PAGE_SIZE - 1) & PAGE_MASK;
389 	}
390 
391 	return tot_size >> PAGE_SHIFT;
392 }
393 
394 /*
395  * vmw_mob_create - Create a mob, but don't populate it.
396  *
397  * @data_pages:  Number of data pages of the underlying buffer object.
398  */
399 struct vmw_mob *vmw_mob_create(unsigned long data_pages)
400 {
401 	struct vmw_mob *mob = kzalloc(sizeof(*mob), GFP_KERNEL);
402 
403 	if (unlikely(!mob))
404 		return NULL;
405 
406 	mob->num_pages = vmw_mob_calculate_pt_pages(data_pages);
407 
408 	return mob;
409 }
410 
411 /*
412  * vmw_mob_pt_populate - Populate the mob pagetable
413  *
414  * @mob:         Pointer to the mob the pagetable of which we want to
415  *               populate.
416  *
417  * This function allocates memory to be used for the pagetable, and
418  * adjusts TTM memory accounting accordingly. Returns ENOMEM if
419  * memory resources aren't sufficient and may cause TTM buffer objects
420  * to be swapped out by using the TTM memory accounting function.
421  */
422 static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
423 			       struct vmw_mob *mob)
424 {
425 	BUG_ON(mob->pt_bo != NULL);
426 
427 	return vmw_bo_create_and_populate(dev_priv, mob->num_pages * PAGE_SIZE, &mob->pt_bo);
428 }
429 
430 /**
431  * vmw_mob_assign_ppn - Assign a value to a page table entry
432  *
433  * @addr: Pointer to pointer to page table entry.
434  * @val: The page table entry
435  *
436  * Assigns a value to a page table entry pointed to by *@addr and increments
437  * *@addr according to the page table entry size.
438  */
439 #if (VMW_PPN_SIZE == 8)
440 static void vmw_mob_assign_ppn(u32 **addr, dma_addr_t val)
441 {
442 	*((u64 *) *addr) = val >> PAGE_SHIFT;
443 	*addr += 2;
444 }
445 #else
446 static void vmw_mob_assign_ppn(u32 **addr, dma_addr_t val)
447 {
448 	*(*addr)++ = val >> PAGE_SHIFT;
449 }
450 #endif
451 
452 /*
453  * vmw_mob_build_pt - Build a pagetable
454  *
455  * @data_addr:      Array of DMA addresses to the underlying buffer
456  *                  object's data pages.
457  * @num_data_pages: Number of buffer object data pages.
458  * @pt_pages:       Array of page pointers to the page table pages.
459  *
460  * Returns the number of page table pages actually used.
461  * Uses atomic kmaps of highmem pages to avoid TLB thrashing.
462  */
463 static unsigned long vmw_mob_build_pt(struct vmw_piter *data_iter,
464 				      unsigned long num_data_pages,
465 				      struct vmw_piter *pt_iter)
466 {
467 	unsigned long pt_size = num_data_pages * VMW_PPN_SIZE;
468 	unsigned long num_pt_pages = DIV_ROUND_UP(pt_size, PAGE_SIZE);
469 	unsigned long pt_page;
470 	u32 *addr, *save_addr;
471 	unsigned long i;
472 	struct page *page;
473 
474 	for (pt_page = 0; pt_page < num_pt_pages; ++pt_page) {
475 		page = vmw_piter_page(pt_iter);
476 
477 		save_addr = addr = kmap_atomic(page);
478 
479 		for (i = 0; i < PAGE_SIZE / VMW_PPN_SIZE; ++i) {
480 			vmw_mob_assign_ppn(&addr,
481 					   vmw_piter_dma_addr(data_iter));
482 			if (unlikely(--num_data_pages == 0))
483 				break;
484 			WARN_ON(!vmw_piter_next(data_iter));
485 		}
486 		kunmap_atomic(save_addr);
487 		vmw_piter_next(pt_iter);
488 	}
489 
490 	return num_pt_pages;
491 }
492 
493 /*
494  * vmw_mob_build_pt - Set up a multilevel mob pagetable
495  *
496  * @mob:            Pointer to a mob whose page table needs setting up.
497  * @data_addr       Array of DMA addresses to the buffer object's data
498  *                  pages.
499  * @num_data_pages: Number of buffer object data pages.
500  *
501  * Uses tail recursion to set up a multilevel mob page table.
502  */
503 static void vmw_mob_pt_setup(struct vmw_mob *mob,
504 			     struct vmw_piter data_iter,
505 			     unsigned long num_data_pages)
506 {
507 	unsigned long num_pt_pages = 0;
508 	struct ttm_buffer_object *bo = mob->pt_bo;
509 	struct vmw_piter save_pt_iter;
510 	struct vmw_piter pt_iter;
511 	const struct vmw_sg_table *vsgt;
512 	int ret;
513 
514 	ret = ttm_bo_reserve(bo, false, true, NULL);
515 	BUG_ON(ret != 0);
516 
517 	vsgt = vmw_bo_sg_table(bo);
518 	vmw_piter_start(&pt_iter, vsgt, 0);
519 	BUG_ON(!vmw_piter_next(&pt_iter));
520 	mob->pt_level = 0;
521 	while (likely(num_data_pages > 1)) {
522 		++mob->pt_level;
523 		BUG_ON(mob->pt_level > 2);
524 		save_pt_iter = pt_iter;
525 		num_pt_pages = vmw_mob_build_pt(&data_iter, num_data_pages,
526 						&pt_iter);
527 		data_iter = save_pt_iter;
528 		num_data_pages = num_pt_pages;
529 	}
530 
531 	mob->pt_root_page = vmw_piter_dma_addr(&save_pt_iter);
532 	ttm_bo_unreserve(bo);
533 }
534 
535 /*
536  * vmw_mob_destroy - Destroy a mob, unpopulating first if necessary.
537  *
538  * @mob:            Pointer to a mob to destroy.
539  */
540 void vmw_mob_destroy(struct vmw_mob *mob)
541 {
542 	if (mob->pt_bo) {
543 		vmw_bo_unpin_unlocked(mob->pt_bo);
544 		ttm_bo_put(mob->pt_bo);
545 		mob->pt_bo = NULL;
546 	}
547 	kfree(mob);
548 }
549 
550 /*
551  * vmw_mob_unbind - Hide a mob from the device.
552  *
553  * @dev_priv:       Pointer to a device private.
554  * @mob_id:         Device id of the mob to unbind.
555  */
556 void vmw_mob_unbind(struct vmw_private *dev_priv,
557 		    struct vmw_mob *mob)
558 {
559 	struct {
560 		SVGA3dCmdHeader header;
561 		SVGA3dCmdDestroyGBMob body;
562 	} *cmd;
563 	int ret;
564 	struct ttm_buffer_object *bo = mob->pt_bo;
565 
566 	if (bo) {
567 		ret = ttm_bo_reserve(bo, false, true, NULL);
568 		/*
569 		 * Noone else should be using this buffer.
570 		 */
571 		BUG_ON(ret != 0);
572 	}
573 
574 	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
575 	if (cmd) {
576 		cmd->header.id = SVGA_3D_CMD_DESTROY_GB_MOB;
577 		cmd->header.size = sizeof(cmd->body);
578 		cmd->body.mobid = mob->id;
579 		vmw_cmd_commit(dev_priv, sizeof(*cmd));
580 	}
581 
582 	if (bo) {
583 		vmw_bo_fence_single(bo, NULL);
584 		ttm_bo_unreserve(bo);
585 	}
586 	vmw_fifo_resource_dec(dev_priv);
587 }
588 
589 /*
590  * vmw_mob_bind - Make a mob visible to the device after first
591  *                populating it if necessary.
592  *
593  * @dev_priv:       Pointer to a device private.
594  * @mob:            Pointer to the mob we're making visible.
595  * @data_addr:      Array of DMA addresses to the data pages of the underlying
596  *                  buffer object.
597  * @num_data_pages: Number of data pages of the underlying buffer
598  *                  object.
599  * @mob_id:         Device id of the mob to bind
600  *
601  * This function is intended to be interfaced with the ttm_tt backend
602  * code.
603  */
604 int vmw_mob_bind(struct vmw_private *dev_priv,
605 		 struct vmw_mob *mob,
606 		 const struct vmw_sg_table *vsgt,
607 		 unsigned long num_data_pages,
608 		 int32_t mob_id)
609 {
610 	int ret;
611 	bool pt_set_up = false;
612 	struct vmw_piter data_iter;
613 	struct {
614 		SVGA3dCmdHeader header;
615 		SVGA3dCmdDefineGBMob64 body;
616 	} *cmd;
617 
618 	mob->id = mob_id;
619 	vmw_piter_start(&data_iter, vsgt, 0);
620 	if (unlikely(!vmw_piter_next(&data_iter)))
621 		return 0;
622 
623 	if (likely(num_data_pages == 1)) {
624 		mob->pt_level = VMW_MOBFMT_PTDEPTH_0;
625 		mob->pt_root_page = vmw_piter_dma_addr(&data_iter);
626 	} else if (vsgt->num_regions == 1) {
627 		mob->pt_level = SVGA3D_MOBFMT_RANGE;
628 		mob->pt_root_page = vmw_piter_dma_addr(&data_iter);
629 	} else if (unlikely(mob->pt_bo == NULL)) {
630 		ret = vmw_mob_pt_populate(dev_priv, mob);
631 		if (unlikely(ret != 0))
632 			return ret;
633 
634 		vmw_mob_pt_setup(mob, data_iter, num_data_pages);
635 		pt_set_up = true;
636 		mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1;
637 	}
638 
639 	vmw_fifo_resource_inc(dev_priv);
640 
641 	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
642 	if (unlikely(cmd == NULL))
643 		goto out_no_cmd_space;
644 
645 	cmd->header.id = SVGA_3D_CMD_DEFINE_GB_MOB64;
646 	cmd->header.size = sizeof(cmd->body);
647 	cmd->body.mobid = mob_id;
648 	cmd->body.ptDepth = mob->pt_level;
649 	cmd->body.base = mob->pt_root_page >> PAGE_SHIFT;
650 	cmd->body.sizeInBytes = num_data_pages * PAGE_SIZE;
651 
652 	vmw_cmd_commit(dev_priv, sizeof(*cmd));
653 
654 	return 0;
655 
656 out_no_cmd_space:
657 	vmw_fifo_resource_dec(dev_priv);
658 	if (pt_set_up) {
659 		vmw_bo_unpin_unlocked(mob->pt_bo);
660 		ttm_bo_put(mob->pt_bo);
661 		mob->pt_bo = NULL;
662 	}
663 
664 	return -ENOMEM;
665 }
666