1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include "vmwgfx_drv.h"
29 #include <drm/ttm/ttm_bo_driver.h>
30 #include <drm/ttm/ttm_placement.h>
31 #include <drm/ttm/ttm_page_alloc.h>
32 
33 static const struct ttm_place vram_placement_flags = {
34 	.fpfn = 0,
35 	.lpfn = 0,
36 	.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
37 };
38 
39 static const struct ttm_place vram_ne_placement_flags = {
40 	.fpfn = 0,
41 	.lpfn = 0,
42 	.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
43 };
44 
45 static const struct ttm_place sys_placement_flags = {
46 	.fpfn = 0,
47 	.lpfn = 0,
48 	.flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
49 };
50 
51 static const struct ttm_place sys_ne_placement_flags = {
52 	.fpfn = 0,
53 	.lpfn = 0,
54 	.flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
55 };
56 
57 static const struct ttm_place gmr_placement_flags = {
58 	.fpfn = 0,
59 	.lpfn = 0,
60 	.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
61 };
62 
63 static const struct ttm_place gmr_ne_placement_flags = {
64 	.fpfn = 0,
65 	.lpfn = 0,
66 	.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
67 };
68 
69 static const struct ttm_place mob_placement_flags = {
70 	.fpfn = 0,
71 	.lpfn = 0,
72 	.flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
73 };
74 
75 static const struct ttm_place mob_ne_placement_flags = {
76 	.fpfn = 0,
77 	.lpfn = 0,
78 	.flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
79 };
80 
81 struct ttm_placement vmw_vram_placement = {
82 	.num_placement = 1,
83 	.placement = &vram_placement_flags,
84 	.num_busy_placement = 1,
85 	.busy_placement = &vram_placement_flags
86 };
87 
88 static const struct ttm_place vram_gmr_placement_flags[] = {
89 	{
90 		.fpfn = 0,
91 		.lpfn = 0,
92 		.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
93 	}, {
94 		.fpfn = 0,
95 		.lpfn = 0,
96 		.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
97 	}
98 };
99 
100 static const struct ttm_place gmr_vram_placement_flags[] = {
101 	{
102 		.fpfn = 0,
103 		.lpfn = 0,
104 		.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
105 	}, {
106 		.fpfn = 0,
107 		.lpfn = 0,
108 		.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
109 	}
110 };
111 
112 struct ttm_placement vmw_vram_gmr_placement = {
113 	.num_placement = 2,
114 	.placement = vram_gmr_placement_flags,
115 	.num_busy_placement = 1,
116 	.busy_placement = &gmr_placement_flags
117 };
118 
119 static const struct ttm_place vram_gmr_ne_placement_flags[] = {
120 	{
121 		.fpfn = 0,
122 		.lpfn = 0,
123 		.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED |
124 			 TTM_PL_FLAG_NO_EVICT
125 	}, {
126 		.fpfn = 0,
127 		.lpfn = 0,
128 		.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED |
129 			 TTM_PL_FLAG_NO_EVICT
130 	}
131 };
132 
133 struct ttm_placement vmw_vram_gmr_ne_placement = {
134 	.num_placement = 2,
135 	.placement = vram_gmr_ne_placement_flags,
136 	.num_busy_placement = 1,
137 	.busy_placement = &gmr_ne_placement_flags
138 };
139 
140 struct ttm_placement vmw_vram_sys_placement = {
141 	.num_placement = 1,
142 	.placement = &vram_placement_flags,
143 	.num_busy_placement = 1,
144 	.busy_placement = &sys_placement_flags
145 };
146 
147 struct ttm_placement vmw_vram_ne_placement = {
148 	.num_placement = 1,
149 	.placement = &vram_ne_placement_flags,
150 	.num_busy_placement = 1,
151 	.busy_placement = &vram_ne_placement_flags
152 };
153 
154 struct ttm_placement vmw_sys_placement = {
155 	.num_placement = 1,
156 	.placement = &sys_placement_flags,
157 	.num_busy_placement = 1,
158 	.busy_placement = &sys_placement_flags
159 };
160 
161 struct ttm_placement vmw_sys_ne_placement = {
162 	.num_placement = 1,
163 	.placement = &sys_ne_placement_flags,
164 	.num_busy_placement = 1,
165 	.busy_placement = &sys_ne_placement_flags
166 };
167 
168 static const struct ttm_place evictable_placement_flags[] = {
169 	{
170 		.fpfn = 0,
171 		.lpfn = 0,
172 		.flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
173 	}, {
174 		.fpfn = 0,
175 		.lpfn = 0,
176 		.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
177 	}, {
178 		.fpfn = 0,
179 		.lpfn = 0,
180 		.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
181 	}, {
182 		.fpfn = 0,
183 		.lpfn = 0,
184 		.flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
185 	}
186 };
187 
188 static const struct ttm_place nonfixed_placement_flags[] = {
189 	{
190 		.fpfn = 0,
191 		.lpfn = 0,
192 		.flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
193 	}, {
194 		.fpfn = 0,
195 		.lpfn = 0,
196 		.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
197 	}, {
198 		.fpfn = 0,
199 		.lpfn = 0,
200 		.flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
201 	}
202 };
203 
204 struct ttm_placement vmw_evictable_placement = {
205 	.num_placement = 4,
206 	.placement = evictable_placement_flags,
207 	.num_busy_placement = 1,
208 	.busy_placement = &sys_placement_flags
209 };
210 
211 struct ttm_placement vmw_srf_placement = {
212 	.num_placement = 1,
213 	.num_busy_placement = 2,
214 	.placement = &gmr_placement_flags,
215 	.busy_placement = gmr_vram_placement_flags
216 };
217 
218 struct ttm_placement vmw_mob_placement = {
219 	.num_placement = 1,
220 	.num_busy_placement = 1,
221 	.placement = &mob_placement_flags,
222 	.busy_placement = &mob_placement_flags
223 };
224 
225 struct ttm_placement vmw_mob_ne_placement = {
226 	.num_placement = 1,
227 	.num_busy_placement = 1,
228 	.placement = &mob_ne_placement_flags,
229 	.busy_placement = &mob_ne_placement_flags
230 };
231 
232 struct ttm_placement vmw_nonfixed_placement = {
233 	.num_placement = 3,
234 	.placement = nonfixed_placement_flags,
235 	.num_busy_placement = 1,
236 	.busy_placement = &sys_placement_flags
237 };
238 
239 struct vmw_ttm_tt {
240 	struct ttm_dma_tt dma_ttm;
241 	struct vmw_private *dev_priv;
242 	int gmr_id;
243 	struct vmw_mob *mob;
244 	int mem_type;
245 	struct sg_table sgt;
246 	struct vmw_sg_table vsgt;
247 	uint64_t sg_alloc_size;
248 	bool mapped;
249 };
250 
251 const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt);
252 
253 /**
254  * Helper functions to advance a struct vmw_piter iterator.
255  *
256  * @viter: Pointer to the iterator.
257  *
258  * These functions return false if past the end of the list,
259  * true otherwise. Functions are selected depending on the current
260  * DMA mapping mode.
261  */
262 static bool __vmw_piter_non_sg_next(struct vmw_piter *viter)
263 {
264 	return ++(viter->i) < viter->num_pages;
265 }
266 
267 static bool __vmw_piter_sg_next(struct vmw_piter *viter)
268 {
269 	return __sg_page_iter_next(&viter->iter);
270 }
271 
272 
273 /**
274  * Helper functions to return a pointer to the current page.
275  *
276  * @viter: Pointer to the iterator
277  *
278  * These functions return a pointer to the page currently
279  * pointed to by @viter. Functions are selected depending on the
280  * current mapping mode.
281  */
282 static struct page *__vmw_piter_non_sg_page(struct vmw_piter *viter)
283 {
284 	return viter->pages[viter->i];
285 }
286 
287 static struct page *__vmw_piter_sg_page(struct vmw_piter *viter)
288 {
289 	return sg_page_iter_page(&viter->iter);
290 }
291 
292 
293 /**
294  * Helper functions to return the DMA address of the current page.
295  *
296  * @viter: Pointer to the iterator
297  *
298  * These functions return the DMA address of the page currently
299  * pointed to by @viter. Functions are selected depending on the
300  * current mapping mode.
301  */
302 static dma_addr_t __vmw_piter_phys_addr(struct vmw_piter *viter)
303 {
304 	return page_to_phys(viter->pages[viter->i]);
305 }
306 
307 static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter)
308 {
309 	return viter->addrs[viter->i];
310 }
311 
312 static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter)
313 {
314 	/*
315 	 * FIXME: This driver wrongly mixes DMA and CPU SG list iteration and
316 	 * needs revision. See
317 	 * https://lore.kernel.org/lkml/20190104223531.GA1705@ziepe.ca/
318 	 */
319 	return sg_page_iter_dma_address(
320 		container_of(&viter->iter, struct sg_dma_page_iter, base));
321 }
322 
323 
324 /**
325  * vmw_piter_start - Initialize a struct vmw_piter.
326  *
327  * @viter: Pointer to the iterator to initialize
328  * @vsgt: Pointer to a struct vmw_sg_table to initialize from
329  *
330  * Note that we're following the convention of __sg_page_iter_start, so that
331  * the iterator doesn't point to a valid page after initialization; it has
332  * to be advanced one step first.
333  */
334 void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt,
335 		     unsigned long p_offset)
336 {
337 	viter->i = p_offset - 1;
338 	viter->num_pages = vsgt->num_pages;
339 	switch (vsgt->mode) {
340 	case vmw_dma_phys:
341 		viter->next = &__vmw_piter_non_sg_next;
342 		viter->dma_address = &__vmw_piter_phys_addr;
343 		viter->page = &__vmw_piter_non_sg_page;
344 		viter->pages = vsgt->pages;
345 		break;
346 	case vmw_dma_alloc_coherent:
347 		viter->next = &__vmw_piter_non_sg_next;
348 		viter->dma_address = &__vmw_piter_dma_addr;
349 		viter->page = &__vmw_piter_non_sg_page;
350 		viter->addrs = vsgt->addrs;
351 		viter->pages = vsgt->pages;
352 		break;
353 	case vmw_dma_map_populate:
354 	case vmw_dma_map_bind:
355 		viter->next = &__vmw_piter_sg_next;
356 		viter->dma_address = &__vmw_piter_sg_addr;
357 		viter->page = &__vmw_piter_sg_page;
358 		__sg_page_iter_start(&viter->iter, vsgt->sgt->sgl,
359 				     vsgt->sgt->orig_nents, p_offset);
360 		break;
361 	default:
362 		BUG();
363 	}
364 }
365 
366 /**
367  * vmw_ttm_unmap_from_dma - unmap  device addresses previsouly mapped for
368  * TTM pages
369  *
370  * @vmw_tt: Pointer to a struct vmw_ttm_backend
371  *
372  * Used to free dma mappings previously mapped by vmw_ttm_map_for_dma.
373  */
374 static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt)
375 {
376 	struct device *dev = vmw_tt->dev_priv->dev->dev;
377 
378 	dma_unmap_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.nents,
379 		DMA_BIDIRECTIONAL);
380 	vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents;
381 }
382 
383 /**
384  * vmw_ttm_map_for_dma - map TTM pages to get device addresses
385  *
386  * @vmw_tt: Pointer to a struct vmw_ttm_backend
387  *
388  * This function is used to get device addresses from the kernel DMA layer.
389  * However, it's violating the DMA API in that when this operation has been
390  * performed, it's illegal for the CPU to write to the pages without first
391  * unmapping the DMA mappings, or calling dma_sync_sg_for_cpu(). It is
392  * therefore only legal to call this function if we know that the function
393  * dma_sync_sg_for_cpu() is a NOP, and dma_sync_sg_for_device() is at most
394  * a CPU write buffer flush.
395  */
396 static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt)
397 {
398 	struct device *dev = vmw_tt->dev_priv->dev->dev;
399 	int ret;
400 
401 	ret = dma_map_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.orig_nents,
402 			 DMA_BIDIRECTIONAL);
403 	if (unlikely(ret == 0))
404 		return -ENOMEM;
405 
406 	vmw_tt->sgt.nents = ret;
407 
408 	return 0;
409 }
410 
411 /**
412  * vmw_ttm_map_dma - Make sure TTM pages are visible to the device
413  *
414  * @vmw_tt: Pointer to a struct vmw_ttm_tt
415  *
416  * Select the correct function for and make sure the TTM pages are
417  * visible to the device. Allocate storage for the device mappings.
418  * If a mapping has already been performed, indicated by the storage
419  * pointer being non NULL, the function returns success.
420  */
421 static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
422 {
423 	struct vmw_private *dev_priv = vmw_tt->dev_priv;
424 	struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
425 	struct vmw_sg_table *vsgt = &vmw_tt->vsgt;
426 	struct ttm_operation_ctx ctx = {
427 		.interruptible = true,
428 		.no_wait_gpu = false
429 	};
430 	struct vmw_piter iter;
431 	dma_addr_t old;
432 	int ret = 0;
433 	static size_t sgl_size;
434 	static size_t sgt_size;
435 
436 	if (vmw_tt->mapped)
437 		return 0;
438 
439 	vsgt->mode = dev_priv->map_mode;
440 	vsgt->pages = vmw_tt->dma_ttm.ttm.pages;
441 	vsgt->num_pages = vmw_tt->dma_ttm.ttm.num_pages;
442 	vsgt->addrs = vmw_tt->dma_ttm.dma_address;
443 	vsgt->sgt = &vmw_tt->sgt;
444 
445 	switch (dev_priv->map_mode) {
446 	case vmw_dma_map_bind:
447 	case vmw_dma_map_populate:
448 		if (unlikely(!sgl_size)) {
449 			sgl_size = ttm_round_pot(sizeof(struct scatterlist));
450 			sgt_size = ttm_round_pot(sizeof(struct sg_table));
451 		}
452 		vmw_tt->sg_alloc_size = sgt_size + sgl_size * vsgt->num_pages;
453 		ret = ttm_mem_global_alloc(glob, vmw_tt->sg_alloc_size, &ctx);
454 		if (unlikely(ret != 0))
455 			return ret;
456 
457 		ret = sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages,
458 						vsgt->num_pages, 0,
459 						(unsigned long)
460 						vsgt->num_pages << PAGE_SHIFT,
461 						GFP_KERNEL);
462 		if (unlikely(ret != 0))
463 			goto out_sg_alloc_fail;
464 
465 		if (vsgt->num_pages > vmw_tt->sgt.nents) {
466 			uint64_t over_alloc =
467 				sgl_size * (vsgt->num_pages -
468 					    vmw_tt->sgt.nents);
469 
470 			ttm_mem_global_free(glob, over_alloc);
471 			vmw_tt->sg_alloc_size -= over_alloc;
472 		}
473 
474 		ret = vmw_ttm_map_for_dma(vmw_tt);
475 		if (unlikely(ret != 0))
476 			goto out_map_fail;
477 
478 		break;
479 	default:
480 		break;
481 	}
482 
483 	old = ~((dma_addr_t) 0);
484 	vmw_tt->vsgt.num_regions = 0;
485 	for (vmw_piter_start(&iter, vsgt, 0); vmw_piter_next(&iter);) {
486 		dma_addr_t cur = vmw_piter_dma_addr(&iter);
487 
488 		if (cur != old + PAGE_SIZE)
489 			vmw_tt->vsgt.num_regions++;
490 		old = cur;
491 	}
492 
493 	vmw_tt->mapped = true;
494 	return 0;
495 
496 out_map_fail:
497 	sg_free_table(vmw_tt->vsgt.sgt);
498 	vmw_tt->vsgt.sgt = NULL;
499 out_sg_alloc_fail:
500 	ttm_mem_global_free(glob, vmw_tt->sg_alloc_size);
501 	return ret;
502 }
503 
504 /**
505  * vmw_ttm_unmap_dma - Tear down any TTM page device mappings
506  *
507  * @vmw_tt: Pointer to a struct vmw_ttm_tt
508  *
509  * Tear down any previously set up device DMA mappings and free
510  * any storage space allocated for them. If there are no mappings set up,
511  * this function is a NOP.
512  */
513 static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt)
514 {
515 	struct vmw_private *dev_priv = vmw_tt->dev_priv;
516 
517 	if (!vmw_tt->vsgt.sgt)
518 		return;
519 
520 	switch (dev_priv->map_mode) {
521 	case vmw_dma_map_bind:
522 	case vmw_dma_map_populate:
523 		vmw_ttm_unmap_from_dma(vmw_tt);
524 		sg_free_table(vmw_tt->vsgt.sgt);
525 		vmw_tt->vsgt.sgt = NULL;
526 		ttm_mem_global_free(vmw_mem_glob(dev_priv),
527 				    vmw_tt->sg_alloc_size);
528 		break;
529 	default:
530 		break;
531 	}
532 	vmw_tt->mapped = false;
533 }
534 
535 
536 /**
537  * vmw_bo_map_dma - Make sure buffer object pages are visible to the device
538  *
539  * @bo: Pointer to a struct ttm_buffer_object
540  *
541  * Wrapper around vmw_ttm_map_dma, that takes a TTM buffer object pointer
542  * instead of a pointer to a struct vmw_ttm_backend as argument.
543  * Note that the buffer object must be either pinned or reserved before
544  * calling this function.
545  */
546 int vmw_bo_map_dma(struct ttm_buffer_object *bo)
547 {
548 	struct vmw_ttm_tt *vmw_tt =
549 		container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
550 
551 	return vmw_ttm_map_dma(vmw_tt);
552 }
553 
554 
555 /**
556  * vmw_bo_unmap_dma - Make sure buffer object pages are visible to the device
557  *
558  * @bo: Pointer to a struct ttm_buffer_object
559  *
560  * Wrapper around vmw_ttm_unmap_dma, that takes a TTM buffer object pointer
561  * instead of a pointer to a struct vmw_ttm_backend as argument.
562  */
563 void vmw_bo_unmap_dma(struct ttm_buffer_object *bo)
564 {
565 	struct vmw_ttm_tt *vmw_tt =
566 		container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
567 
568 	vmw_ttm_unmap_dma(vmw_tt);
569 }
570 
571 
572 /**
573  * vmw_bo_sg_table - Return a struct vmw_sg_table object for a
574  * TTM buffer object
575  *
576  * @bo: Pointer to a struct ttm_buffer_object
577  *
578  * Returns a pointer to a struct vmw_sg_table object. The object should
579  * not be freed after use.
580  * Note that for the device addresses to be valid, the buffer object must
581  * either be reserved or pinned.
582  */
583 const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo)
584 {
585 	struct vmw_ttm_tt *vmw_tt =
586 		container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
587 
588 	return &vmw_tt->vsgt;
589 }
590 
591 
592 static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
593 {
594 	struct vmw_ttm_tt *vmw_be =
595 		container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
596 	int ret;
597 
598 	ret = vmw_ttm_map_dma(vmw_be);
599 	if (unlikely(ret != 0))
600 		return ret;
601 
602 	vmw_be->gmr_id = bo_mem->start;
603 	vmw_be->mem_type = bo_mem->mem_type;
604 
605 	switch (bo_mem->mem_type) {
606 	case VMW_PL_GMR:
607 		return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt,
608 				    ttm->num_pages, vmw_be->gmr_id);
609 	case VMW_PL_MOB:
610 		if (unlikely(vmw_be->mob == NULL)) {
611 			vmw_be->mob =
612 				vmw_mob_create(ttm->num_pages);
613 			if (unlikely(vmw_be->mob == NULL))
614 				return -ENOMEM;
615 		}
616 
617 		return vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob,
618 				    &vmw_be->vsgt, ttm->num_pages,
619 				    vmw_be->gmr_id);
620 	default:
621 		BUG();
622 	}
623 	return 0;
624 }
625 
626 static int vmw_ttm_unbind(struct ttm_tt *ttm)
627 {
628 	struct vmw_ttm_tt *vmw_be =
629 		container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
630 
631 	switch (vmw_be->mem_type) {
632 	case VMW_PL_GMR:
633 		vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
634 		break;
635 	case VMW_PL_MOB:
636 		vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob);
637 		break;
638 	default:
639 		BUG();
640 	}
641 
642 	if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind)
643 		vmw_ttm_unmap_dma(vmw_be);
644 
645 	return 0;
646 }
647 
648 
649 static void vmw_ttm_destroy(struct ttm_tt *ttm)
650 {
651 	struct vmw_ttm_tt *vmw_be =
652 		container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
653 
654 	vmw_ttm_unmap_dma(vmw_be);
655 	if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
656 		ttm_dma_tt_fini(&vmw_be->dma_ttm);
657 	else
658 		ttm_tt_fini(ttm);
659 
660 	if (vmw_be->mob)
661 		vmw_mob_destroy(vmw_be->mob);
662 
663 	kfree(vmw_be);
664 }
665 
666 
667 static int vmw_ttm_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
668 {
669 	struct vmw_ttm_tt *vmw_tt =
670 		container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
671 	struct vmw_private *dev_priv = vmw_tt->dev_priv;
672 	struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
673 	int ret;
674 
675 	if (ttm->state != tt_unpopulated)
676 		return 0;
677 
678 	if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
679 		size_t size =
680 			ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
681 		ret = ttm_mem_global_alloc(glob, size, ctx);
682 		if (unlikely(ret != 0))
683 			return ret;
684 
685 		ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev,
686 					ctx);
687 		if (unlikely(ret != 0))
688 			ttm_mem_global_free(glob, size);
689 	} else
690 		ret = ttm_pool_populate(ttm, ctx);
691 
692 	return ret;
693 }
694 
695 static void vmw_ttm_unpopulate(struct ttm_tt *ttm)
696 {
697 	struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
698 						 dma_ttm.ttm);
699 	struct vmw_private *dev_priv = vmw_tt->dev_priv;
700 	struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
701 
702 
703 	if (vmw_tt->mob) {
704 		vmw_mob_destroy(vmw_tt->mob);
705 		vmw_tt->mob = NULL;
706 	}
707 
708 	vmw_ttm_unmap_dma(vmw_tt);
709 	if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
710 		size_t size =
711 			ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
712 
713 		ttm_dma_unpopulate(&vmw_tt->dma_ttm, dev_priv->dev->dev);
714 		ttm_mem_global_free(glob, size);
715 	} else
716 		ttm_pool_unpopulate(ttm);
717 }
718 
719 static struct ttm_backend_func vmw_ttm_func = {
720 	.bind = vmw_ttm_bind,
721 	.unbind = vmw_ttm_unbind,
722 	.destroy = vmw_ttm_destroy,
723 };
724 
725 static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
726 					uint32_t page_flags)
727 {
728 	struct vmw_ttm_tt *vmw_be;
729 	int ret;
730 
731 	vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL);
732 	if (!vmw_be)
733 		return NULL;
734 
735 	vmw_be->dma_ttm.ttm.func = &vmw_ttm_func;
736 	vmw_be->dev_priv = container_of(bo->bdev, struct vmw_private, bdev);
737 	vmw_be->mob = NULL;
738 
739 	if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
740 		ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bo, page_flags);
741 	else
742 		ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bo, page_flags);
743 	if (unlikely(ret != 0))
744 		goto out_no_init;
745 
746 	return &vmw_be->dma_ttm.ttm;
747 out_no_init:
748 	kfree(vmw_be);
749 	return NULL;
750 }
751 
752 static int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
753 {
754 	return 0;
755 }
756 
757 static int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
758 		      struct ttm_mem_type_manager *man)
759 {
760 	switch (type) {
761 	case TTM_PL_SYSTEM:
762 		/* System memory */
763 
764 		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
765 		man->available_caching = TTM_PL_FLAG_CACHED;
766 		man->default_caching = TTM_PL_FLAG_CACHED;
767 		break;
768 	case TTM_PL_VRAM:
769 		/* "On-card" video ram */
770 		man->func = &ttm_bo_manager_func;
771 		man->gpu_offset = 0;
772 		man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
773 		man->available_caching = TTM_PL_FLAG_CACHED;
774 		man->default_caching = TTM_PL_FLAG_CACHED;
775 		break;
776 	case VMW_PL_GMR:
777 	case VMW_PL_MOB:
778 		/*
779 		 * "Guest Memory Regions" is an aperture like feature with
780 		 *  one slot per bo. There is an upper limit of the number of
781 		 *  slots as well as the bo size.
782 		 */
783 		man->func = &vmw_gmrid_manager_func;
784 		man->gpu_offset = 0;
785 		man->flags = TTM_MEMTYPE_FLAG_CMA | TTM_MEMTYPE_FLAG_MAPPABLE;
786 		man->available_caching = TTM_PL_FLAG_CACHED;
787 		man->default_caching = TTM_PL_FLAG_CACHED;
788 		break;
789 	default:
790 		DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
791 		return -EINVAL;
792 	}
793 	return 0;
794 }
795 
796 static void vmw_evict_flags(struct ttm_buffer_object *bo,
797 		     struct ttm_placement *placement)
798 {
799 	*placement = vmw_sys_placement;
800 }
801 
802 static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
803 {
804 	struct ttm_object_file *tfile =
805 		vmw_fpriv((struct drm_file *)filp->private_data)->tfile;
806 
807 	return vmw_user_bo_verify_access(bo, tfile);
808 }
809 
810 static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
811 {
812 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
813 	struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
814 
815 	mem->bus.addr = NULL;
816 	mem->bus.is_iomem = false;
817 	mem->bus.offset = 0;
818 	mem->bus.size = mem->num_pages << PAGE_SHIFT;
819 	mem->bus.base = 0;
820 	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
821 		return -EINVAL;
822 	switch (mem->mem_type) {
823 	case TTM_PL_SYSTEM:
824 	case VMW_PL_GMR:
825 	case VMW_PL_MOB:
826 		return 0;
827 	case TTM_PL_VRAM:
828 		mem->bus.offset = mem->start << PAGE_SHIFT;
829 		mem->bus.base = dev_priv->vram_start;
830 		mem->bus.is_iomem = true;
831 		break;
832 	default:
833 		return -EINVAL;
834 	}
835 	return 0;
836 }
837 
838 static void vmw_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
839 {
840 }
841 
842 static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
843 {
844 	return 0;
845 }
846 
847 /**
848  * vmw_move_notify - TTM move_notify_callback
849  *
850  * @bo: The TTM buffer object about to move.
851  * @mem: The struct ttm_mem_reg indicating to what memory
852  *       region the move is taking place.
853  *
854  * Calls move_notify for all subsystems needing it.
855  * (currently only resources).
856  */
857 static void vmw_move_notify(struct ttm_buffer_object *bo,
858 			    bool evict,
859 			    struct ttm_mem_reg *mem)
860 {
861 	vmw_bo_move_notify(bo, mem);
862 	vmw_query_move_notify(bo, mem);
863 }
864 
865 
866 /**
867  * vmw_swap_notify - TTM move_notify_callback
868  *
869  * @bo: The TTM buffer object about to be swapped out.
870  */
871 static void vmw_swap_notify(struct ttm_buffer_object *bo)
872 {
873 	vmw_bo_swap_notify(bo);
874 	(void) ttm_bo_wait(bo, false, false);
875 }
876 
877 
878 struct ttm_bo_driver vmw_bo_driver = {
879 	.ttm_tt_create = &vmw_ttm_tt_create,
880 	.ttm_tt_populate = &vmw_ttm_populate,
881 	.ttm_tt_unpopulate = &vmw_ttm_unpopulate,
882 	.invalidate_caches = vmw_invalidate_caches,
883 	.init_mem_type = vmw_init_mem_type,
884 	.eviction_valuable = ttm_bo_eviction_valuable,
885 	.evict_flags = vmw_evict_flags,
886 	.move = NULL,
887 	.verify_access = vmw_verify_access,
888 	.move_notify = vmw_move_notify,
889 	.swap_notify = vmw_swap_notify,
890 	.fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
891 	.io_mem_reserve = &vmw_ttm_io_mem_reserve,
892 	.io_mem_free = &vmw_ttm_io_mem_free,
893 };
894