1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include "vmwgfx_drv.h"
29 #include <drm/ttm/ttm_bo_driver.h>
30 #include <drm/ttm/ttm_placement.h>
31 #include <drm/ttm/ttm_page_alloc.h>
32 
33 static const struct ttm_place vram_placement_flags = {
34 	.fpfn = 0,
35 	.lpfn = 0,
36 	.mem_type = TTM_PL_VRAM,
37 	.flags = TTM_PL_FLAG_CACHED
38 };
39 
40 static const struct ttm_place vram_ne_placement_flags = {
41 	.fpfn = 0,
42 	.lpfn = 0,
43 	.mem_type = TTM_PL_VRAM,
44 	.flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
45 };
46 
47 static const struct ttm_place sys_placement_flags = {
48 	.fpfn = 0,
49 	.lpfn = 0,
50 	.mem_type = TTM_PL_SYSTEM,
51 	.flags = TTM_PL_FLAG_CACHED
52 };
53 
54 static const struct ttm_place sys_ne_placement_flags = {
55 	.fpfn = 0,
56 	.lpfn = 0,
57 	.mem_type = TTM_PL_SYSTEM,
58 	.flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
59 };
60 
61 static const struct ttm_place gmr_placement_flags = {
62 	.fpfn = 0,
63 	.lpfn = 0,
64 	.mem_type = VMW_PL_GMR,
65 	.flags = TTM_PL_FLAG_CACHED
66 };
67 
68 static const struct ttm_place gmr_ne_placement_flags = {
69 	.fpfn = 0,
70 	.lpfn = 0,
71 	.mem_type = VMW_PL_GMR,
72 	.flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
73 };
74 
75 static const struct ttm_place mob_placement_flags = {
76 	.fpfn = 0,
77 	.lpfn = 0,
78 	.mem_type = VMW_PL_MOB,
79 	.flags = TTM_PL_FLAG_CACHED
80 };
81 
82 static const struct ttm_place mob_ne_placement_flags = {
83 	.fpfn = 0,
84 	.lpfn = 0,
85 	.mem_type = VMW_PL_MOB,
86 	.flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
87 };
88 
89 struct ttm_placement vmw_vram_placement = {
90 	.num_placement = 1,
91 	.placement = &vram_placement_flags,
92 	.num_busy_placement = 1,
93 	.busy_placement = &vram_placement_flags
94 };
95 
96 static const struct ttm_place vram_gmr_placement_flags[] = {
97 	{
98 		.fpfn = 0,
99 		.lpfn = 0,
100 		.mem_type = TTM_PL_VRAM,
101 		.flags = TTM_PL_FLAG_CACHED
102 	}, {
103 		.fpfn = 0,
104 		.lpfn = 0,
105 		.mem_type = VMW_PL_GMR,
106 		.flags = TTM_PL_FLAG_CACHED
107 	}
108 };
109 
110 static const struct ttm_place gmr_vram_placement_flags[] = {
111 	{
112 		.fpfn = 0,
113 		.lpfn = 0,
114 		.mem_type = VMW_PL_GMR,
115 		.flags = TTM_PL_FLAG_CACHED
116 	}, {
117 		.fpfn = 0,
118 		.lpfn = 0,
119 		.mem_type = TTM_PL_VRAM,
120 		.flags = TTM_PL_FLAG_CACHED
121 	}
122 };
123 
124 struct ttm_placement vmw_vram_gmr_placement = {
125 	.num_placement = 2,
126 	.placement = vram_gmr_placement_flags,
127 	.num_busy_placement = 1,
128 	.busy_placement = &gmr_placement_flags
129 };
130 
131 static const struct ttm_place vram_gmr_ne_placement_flags[] = {
132 	{
133 		.fpfn = 0,
134 		.lpfn = 0,
135 		.mem_type = TTM_PL_VRAM,
136 		.flags = TTM_PL_FLAG_CACHED |
137 			 TTM_PL_FLAG_NO_EVICT
138 	}, {
139 		.fpfn = 0,
140 		.lpfn = 0,
141 		.mem_type = VMW_PL_GMR,
142 		.flags = TTM_PL_FLAG_CACHED |
143 			 TTM_PL_FLAG_NO_EVICT
144 	}
145 };
146 
147 struct ttm_placement vmw_vram_gmr_ne_placement = {
148 	.num_placement = 2,
149 	.placement = vram_gmr_ne_placement_flags,
150 	.num_busy_placement = 1,
151 	.busy_placement = &gmr_ne_placement_flags
152 };
153 
154 struct ttm_placement vmw_vram_sys_placement = {
155 	.num_placement = 1,
156 	.placement = &vram_placement_flags,
157 	.num_busy_placement = 1,
158 	.busy_placement = &sys_placement_flags
159 };
160 
161 struct ttm_placement vmw_vram_ne_placement = {
162 	.num_placement = 1,
163 	.placement = &vram_ne_placement_flags,
164 	.num_busy_placement = 1,
165 	.busy_placement = &vram_ne_placement_flags
166 };
167 
168 struct ttm_placement vmw_sys_placement = {
169 	.num_placement = 1,
170 	.placement = &sys_placement_flags,
171 	.num_busy_placement = 1,
172 	.busy_placement = &sys_placement_flags
173 };
174 
175 struct ttm_placement vmw_sys_ne_placement = {
176 	.num_placement = 1,
177 	.placement = &sys_ne_placement_flags,
178 	.num_busy_placement = 1,
179 	.busy_placement = &sys_ne_placement_flags
180 };
181 
182 static const struct ttm_place evictable_placement_flags[] = {
183 	{
184 		.fpfn = 0,
185 		.lpfn = 0,
186 		.mem_type = TTM_PL_SYSTEM,
187 		.flags = TTM_PL_FLAG_CACHED
188 	}, {
189 		.fpfn = 0,
190 		.lpfn = 0,
191 		.mem_type = TTM_PL_VRAM,
192 		.flags = TTM_PL_FLAG_CACHED
193 	}, {
194 		.fpfn = 0,
195 		.lpfn = 0,
196 		.mem_type = VMW_PL_GMR,
197 		.flags = TTM_PL_FLAG_CACHED
198 	}, {
199 		.fpfn = 0,
200 		.lpfn = 0,
201 		.mem_type = VMW_PL_MOB,
202 		.flags = TTM_PL_FLAG_CACHED
203 	}
204 };
205 
206 static const struct ttm_place nonfixed_placement_flags[] = {
207 	{
208 		.fpfn = 0,
209 		.lpfn = 0,
210 		.mem_type = TTM_PL_SYSTEM,
211 		.flags = TTM_PL_FLAG_CACHED
212 	}, {
213 		.fpfn = 0,
214 		.lpfn = 0,
215 		.mem_type = VMW_PL_GMR,
216 		.flags = TTM_PL_FLAG_CACHED
217 	}, {
218 		.fpfn = 0,
219 		.lpfn = 0,
220 		.mem_type = VMW_PL_MOB,
221 		.flags = TTM_PL_FLAG_CACHED
222 	}
223 };
224 
225 struct ttm_placement vmw_evictable_placement = {
226 	.num_placement = 4,
227 	.placement = evictable_placement_flags,
228 	.num_busy_placement = 1,
229 	.busy_placement = &sys_placement_flags
230 };
231 
232 struct ttm_placement vmw_srf_placement = {
233 	.num_placement = 1,
234 	.num_busy_placement = 2,
235 	.placement = &gmr_placement_flags,
236 	.busy_placement = gmr_vram_placement_flags
237 };
238 
239 struct ttm_placement vmw_mob_placement = {
240 	.num_placement = 1,
241 	.num_busy_placement = 1,
242 	.placement = &mob_placement_flags,
243 	.busy_placement = &mob_placement_flags
244 };
245 
246 struct ttm_placement vmw_mob_ne_placement = {
247 	.num_placement = 1,
248 	.num_busy_placement = 1,
249 	.placement = &mob_ne_placement_flags,
250 	.busy_placement = &mob_ne_placement_flags
251 };
252 
253 struct ttm_placement vmw_nonfixed_placement = {
254 	.num_placement = 3,
255 	.placement = nonfixed_placement_flags,
256 	.num_busy_placement = 1,
257 	.busy_placement = &sys_placement_flags
258 };
259 
260 struct vmw_ttm_tt {
261 	struct ttm_dma_tt dma_ttm;
262 	struct vmw_private *dev_priv;
263 	int gmr_id;
264 	struct vmw_mob *mob;
265 	int mem_type;
266 	struct sg_table sgt;
267 	struct vmw_sg_table vsgt;
268 	uint64_t sg_alloc_size;
269 	bool mapped;
270 	bool bound;
271 };
272 
273 const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt);
274 
275 /**
276  * Helper functions to advance a struct vmw_piter iterator.
277  *
278  * @viter: Pointer to the iterator.
279  *
280  * These functions return false if past the end of the list,
281  * true otherwise. Functions are selected depending on the current
282  * DMA mapping mode.
283  */
284 static bool __vmw_piter_non_sg_next(struct vmw_piter *viter)
285 {
286 	return ++(viter->i) < viter->num_pages;
287 }
288 
289 static bool __vmw_piter_sg_next(struct vmw_piter *viter)
290 {
291 	bool ret = __vmw_piter_non_sg_next(viter);
292 
293 	return __sg_page_iter_dma_next(&viter->iter) && ret;
294 }
295 
296 
297 /**
298  * Helper functions to return a pointer to the current page.
299  *
300  * @viter: Pointer to the iterator
301  *
302  * These functions return a pointer to the page currently
303  * pointed to by @viter. Functions are selected depending on the
304  * current mapping mode.
305  */
306 static struct page *__vmw_piter_non_sg_page(struct vmw_piter *viter)
307 {
308 	return viter->pages[viter->i];
309 }
310 
311 /**
312  * Helper functions to return the DMA address of the current page.
313  *
314  * @viter: Pointer to the iterator
315  *
316  * These functions return the DMA address of the page currently
317  * pointed to by @viter. Functions are selected depending on the
318  * current mapping mode.
319  */
320 static dma_addr_t __vmw_piter_phys_addr(struct vmw_piter *viter)
321 {
322 	return page_to_phys(viter->pages[viter->i]);
323 }
324 
325 static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter)
326 {
327 	return viter->addrs[viter->i];
328 }
329 
330 static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter)
331 {
332 	return sg_page_iter_dma_address(&viter->iter);
333 }
334 
335 
336 /**
337  * vmw_piter_start - Initialize a struct vmw_piter.
338  *
339  * @viter: Pointer to the iterator to initialize
340  * @vsgt: Pointer to a struct vmw_sg_table to initialize from
341  *
342  * Note that we're following the convention of __sg_page_iter_start, so that
343  * the iterator doesn't point to a valid page after initialization; it has
344  * to be advanced one step first.
345  */
346 void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt,
347 		     unsigned long p_offset)
348 {
349 	viter->i = p_offset - 1;
350 	viter->num_pages = vsgt->num_pages;
351 	viter->page = &__vmw_piter_non_sg_page;
352 	viter->pages = vsgt->pages;
353 	switch (vsgt->mode) {
354 	case vmw_dma_phys:
355 		viter->next = &__vmw_piter_non_sg_next;
356 		viter->dma_address = &__vmw_piter_phys_addr;
357 		break;
358 	case vmw_dma_alloc_coherent:
359 		viter->next = &__vmw_piter_non_sg_next;
360 		viter->dma_address = &__vmw_piter_dma_addr;
361 		viter->addrs = vsgt->addrs;
362 		break;
363 	case vmw_dma_map_populate:
364 	case vmw_dma_map_bind:
365 		viter->next = &__vmw_piter_sg_next;
366 		viter->dma_address = &__vmw_piter_sg_addr;
367 		__sg_page_iter_start(&viter->iter.base, vsgt->sgt->sgl,
368 				     vsgt->sgt->orig_nents, p_offset);
369 		break;
370 	default:
371 		BUG();
372 	}
373 }
374 
375 /**
376  * vmw_ttm_unmap_from_dma - unmap  device addresses previsouly mapped for
377  * TTM pages
378  *
379  * @vmw_tt: Pointer to a struct vmw_ttm_backend
380  *
381  * Used to free dma mappings previously mapped by vmw_ttm_map_for_dma.
382  */
383 static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt)
384 {
385 	struct device *dev = vmw_tt->dev_priv->dev->dev;
386 
387 	dma_unmap_sgtable(dev, &vmw_tt->sgt, DMA_BIDIRECTIONAL, 0);
388 	vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents;
389 }
390 
391 /**
392  * vmw_ttm_map_for_dma - map TTM pages to get device addresses
393  *
394  * @vmw_tt: Pointer to a struct vmw_ttm_backend
395  *
396  * This function is used to get device addresses from the kernel DMA layer.
397  * However, it's violating the DMA API in that when this operation has been
398  * performed, it's illegal for the CPU to write to the pages without first
399  * unmapping the DMA mappings, or calling dma_sync_sg_for_cpu(). It is
400  * therefore only legal to call this function if we know that the function
401  * dma_sync_sg_for_cpu() is a NOP, and dma_sync_sg_for_device() is at most
402  * a CPU write buffer flush.
403  */
404 static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt)
405 {
406 	struct device *dev = vmw_tt->dev_priv->dev->dev;
407 
408 	return dma_map_sgtable(dev, &vmw_tt->sgt, DMA_BIDIRECTIONAL, 0);
409 }
410 
411 /**
412  * vmw_ttm_map_dma - Make sure TTM pages are visible to the device
413  *
414  * @vmw_tt: Pointer to a struct vmw_ttm_tt
415  *
416  * Select the correct function for and make sure the TTM pages are
417  * visible to the device. Allocate storage for the device mappings.
418  * If a mapping has already been performed, indicated by the storage
419  * pointer being non NULL, the function returns success.
420  */
421 static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
422 {
423 	struct vmw_private *dev_priv = vmw_tt->dev_priv;
424 	struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
425 	struct vmw_sg_table *vsgt = &vmw_tt->vsgt;
426 	struct ttm_operation_ctx ctx = {
427 		.interruptible = true,
428 		.no_wait_gpu = false
429 	};
430 	struct vmw_piter iter;
431 	dma_addr_t old;
432 	int ret = 0;
433 	static size_t sgl_size;
434 	static size_t sgt_size;
435 
436 	if (vmw_tt->mapped)
437 		return 0;
438 
439 	vsgt->mode = dev_priv->map_mode;
440 	vsgt->pages = vmw_tt->dma_ttm.ttm.pages;
441 	vsgt->num_pages = vmw_tt->dma_ttm.ttm.num_pages;
442 	vsgt->addrs = vmw_tt->dma_ttm.dma_address;
443 	vsgt->sgt = &vmw_tt->sgt;
444 
445 	switch (dev_priv->map_mode) {
446 	case vmw_dma_map_bind:
447 	case vmw_dma_map_populate:
448 		if (unlikely(!sgl_size)) {
449 			sgl_size = ttm_round_pot(sizeof(struct scatterlist));
450 			sgt_size = ttm_round_pot(sizeof(struct sg_table));
451 		}
452 		vmw_tt->sg_alloc_size = sgt_size + sgl_size * vsgt->num_pages;
453 		ret = ttm_mem_global_alloc(glob, vmw_tt->sg_alloc_size, &ctx);
454 		if (unlikely(ret != 0))
455 			return ret;
456 
457 		ret = __sg_alloc_table_from_pages
458 			(&vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0,
459 			 (unsigned long) vsgt->num_pages << PAGE_SHIFT,
460 			 dma_get_max_seg_size(dev_priv->dev->dev),
461 			 GFP_KERNEL);
462 		if (unlikely(ret != 0))
463 			goto out_sg_alloc_fail;
464 
465 		if (vsgt->num_pages > vmw_tt->sgt.orig_nents) {
466 			uint64_t over_alloc =
467 				sgl_size * (vsgt->num_pages -
468 					    vmw_tt->sgt.orig_nents);
469 
470 			ttm_mem_global_free(glob, over_alloc);
471 			vmw_tt->sg_alloc_size -= over_alloc;
472 		}
473 
474 		ret = vmw_ttm_map_for_dma(vmw_tt);
475 		if (unlikely(ret != 0))
476 			goto out_map_fail;
477 
478 		break;
479 	default:
480 		break;
481 	}
482 
483 	old = ~((dma_addr_t) 0);
484 	vmw_tt->vsgt.num_regions = 0;
485 	for (vmw_piter_start(&iter, vsgt, 0); vmw_piter_next(&iter);) {
486 		dma_addr_t cur = vmw_piter_dma_addr(&iter);
487 
488 		if (cur != old + PAGE_SIZE)
489 			vmw_tt->vsgt.num_regions++;
490 		old = cur;
491 	}
492 
493 	vmw_tt->mapped = true;
494 	return 0;
495 
496 out_map_fail:
497 	sg_free_table(vmw_tt->vsgt.sgt);
498 	vmw_tt->vsgt.sgt = NULL;
499 out_sg_alloc_fail:
500 	ttm_mem_global_free(glob, vmw_tt->sg_alloc_size);
501 	return ret;
502 }
503 
504 /**
505  * vmw_ttm_unmap_dma - Tear down any TTM page device mappings
506  *
507  * @vmw_tt: Pointer to a struct vmw_ttm_tt
508  *
509  * Tear down any previously set up device DMA mappings and free
510  * any storage space allocated for them. If there are no mappings set up,
511  * this function is a NOP.
512  */
513 static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt)
514 {
515 	struct vmw_private *dev_priv = vmw_tt->dev_priv;
516 
517 	if (!vmw_tt->vsgt.sgt)
518 		return;
519 
520 	switch (dev_priv->map_mode) {
521 	case vmw_dma_map_bind:
522 	case vmw_dma_map_populate:
523 		vmw_ttm_unmap_from_dma(vmw_tt);
524 		sg_free_table(vmw_tt->vsgt.sgt);
525 		vmw_tt->vsgt.sgt = NULL;
526 		ttm_mem_global_free(vmw_mem_glob(dev_priv),
527 				    vmw_tt->sg_alloc_size);
528 		break;
529 	default:
530 		break;
531 	}
532 	vmw_tt->mapped = false;
533 }
534 
535 /**
536  * vmw_bo_sg_table - Return a struct vmw_sg_table object for a
537  * TTM buffer object
538  *
539  * @bo: Pointer to a struct ttm_buffer_object
540  *
541  * Returns a pointer to a struct vmw_sg_table object. The object should
542  * not be freed after use.
543  * Note that for the device addresses to be valid, the buffer object must
544  * either be reserved or pinned.
545  */
546 const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo)
547 {
548 	struct vmw_ttm_tt *vmw_tt =
549 		container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
550 
551 	return &vmw_tt->vsgt;
552 }
553 
554 
555 static int vmw_ttm_bind(struct ttm_bo_device *bdev,
556 			struct ttm_tt *ttm, struct ttm_resource *bo_mem)
557 {
558 	struct vmw_ttm_tt *vmw_be =
559 		container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
560 	int ret = 0;
561 
562 	if (!bo_mem)
563 		return -EINVAL;
564 
565 	if (vmw_be->bound)
566 		return 0;
567 
568 	ret = vmw_ttm_map_dma(vmw_be);
569 	if (unlikely(ret != 0))
570 		return ret;
571 
572 	vmw_be->gmr_id = bo_mem->start;
573 	vmw_be->mem_type = bo_mem->mem_type;
574 
575 	switch (bo_mem->mem_type) {
576 	case VMW_PL_GMR:
577 		ret = vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt,
578 				    ttm->num_pages, vmw_be->gmr_id);
579 		break;
580 	case VMW_PL_MOB:
581 		if (unlikely(vmw_be->mob == NULL)) {
582 			vmw_be->mob =
583 				vmw_mob_create(ttm->num_pages);
584 			if (unlikely(vmw_be->mob == NULL))
585 				return -ENOMEM;
586 		}
587 
588 		ret = vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob,
589 				    &vmw_be->vsgt, ttm->num_pages,
590 				    vmw_be->gmr_id);
591 		break;
592 	default:
593 		BUG();
594 	}
595 	vmw_be->bound = true;
596 	return ret;
597 }
598 
599 static void vmw_ttm_unbind(struct ttm_bo_device *bdev,
600 			   struct ttm_tt *ttm)
601 {
602 	struct vmw_ttm_tt *vmw_be =
603 		container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
604 
605 	if (!vmw_be->bound)
606 		return;
607 
608 	switch (vmw_be->mem_type) {
609 	case VMW_PL_GMR:
610 		vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
611 		break;
612 	case VMW_PL_MOB:
613 		vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob);
614 		break;
615 	default:
616 		BUG();
617 	}
618 
619 	if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind)
620 		vmw_ttm_unmap_dma(vmw_be);
621 	vmw_be->bound = false;
622 }
623 
624 
625 static void vmw_ttm_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
626 {
627 	struct vmw_ttm_tt *vmw_be =
628 		container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
629 
630 	vmw_ttm_unbind(bdev, ttm);
631 	ttm_tt_destroy_common(bdev, ttm);
632 	vmw_ttm_unmap_dma(vmw_be);
633 	if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
634 		ttm_dma_tt_fini(&vmw_be->dma_ttm);
635 	else
636 		ttm_tt_fini(ttm);
637 
638 	if (vmw_be->mob)
639 		vmw_mob_destroy(vmw_be->mob);
640 
641 	kfree(vmw_be);
642 }
643 
644 
645 static int vmw_ttm_populate(struct ttm_bo_device *bdev,
646 			    struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
647 {
648 	struct vmw_ttm_tt *vmw_tt =
649 		container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
650 	struct vmw_private *dev_priv = vmw_tt->dev_priv;
651 	struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
652 	int ret;
653 
654 	if (ttm_tt_is_populated(ttm))
655 		return 0;
656 
657 	if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
658 		size_t size =
659 			ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
660 		ret = ttm_mem_global_alloc(glob, size, ctx);
661 		if (unlikely(ret != 0))
662 			return ret;
663 
664 		ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev,
665 					ctx);
666 		if (unlikely(ret != 0))
667 			ttm_mem_global_free(glob, size);
668 	} else
669 		ret = ttm_pool_populate(ttm, ctx);
670 
671 	return ret;
672 }
673 
674 static void vmw_ttm_unpopulate(struct ttm_bo_device *bdev,
675 			       struct ttm_tt *ttm)
676 {
677 	struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
678 						 dma_ttm.ttm);
679 	struct vmw_private *dev_priv = vmw_tt->dev_priv;
680 	struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
681 
682 
683 	if (vmw_tt->mob) {
684 		vmw_mob_destroy(vmw_tt->mob);
685 		vmw_tt->mob = NULL;
686 	}
687 
688 	vmw_ttm_unmap_dma(vmw_tt);
689 	if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
690 		size_t size =
691 			ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
692 
693 		ttm_dma_unpopulate(&vmw_tt->dma_ttm, dev_priv->dev->dev);
694 		ttm_mem_global_free(glob, size);
695 	} else
696 		ttm_pool_unpopulate(ttm);
697 }
698 
699 static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
700 					uint32_t page_flags)
701 {
702 	struct vmw_ttm_tt *vmw_be;
703 	int ret;
704 
705 	vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL);
706 	if (!vmw_be)
707 		return NULL;
708 
709 	vmw_be->dev_priv = container_of(bo->bdev, struct vmw_private, bdev);
710 	vmw_be->mob = NULL;
711 
712 	if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
713 		ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bo, page_flags);
714 	else
715 		ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bo, page_flags);
716 	if (unlikely(ret != 0))
717 		goto out_no_init;
718 
719 	return &vmw_be->dma_ttm.ttm;
720 out_no_init:
721 	kfree(vmw_be);
722 	return NULL;
723 }
724 
725 static void vmw_evict_flags(struct ttm_buffer_object *bo,
726 		     struct ttm_placement *placement)
727 {
728 	*placement = vmw_sys_placement;
729 }
730 
731 static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
732 {
733 	struct ttm_object_file *tfile =
734 		vmw_fpriv((struct drm_file *)filp->private_data)->tfile;
735 
736 	return vmw_user_bo_verify_access(bo, tfile);
737 }
738 
739 static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem)
740 {
741 	struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
742 
743 	switch (mem->mem_type) {
744 	case TTM_PL_SYSTEM:
745 	case VMW_PL_GMR:
746 	case VMW_PL_MOB:
747 		return 0;
748 	case TTM_PL_VRAM:
749 		mem->bus.offset = (mem->start << PAGE_SHIFT) +
750 			dev_priv->vram_start;
751 		mem->bus.is_iomem = true;
752 		break;
753 	default:
754 		return -EINVAL;
755 	}
756 	return 0;
757 }
758 
759 /**
760  * vmw_move_notify - TTM move_notify_callback
761  *
762  * @bo: The TTM buffer object about to move.
763  * @mem: The struct ttm_resource indicating to what memory
764  *       region the move is taking place.
765  *
766  * Calls move_notify for all subsystems needing it.
767  * (currently only resources).
768  */
769 static void vmw_move_notify(struct ttm_buffer_object *bo,
770 			    bool evict,
771 			    struct ttm_resource *mem)
772 {
773 	vmw_bo_move_notify(bo, mem);
774 	vmw_query_move_notify(bo, mem);
775 }
776 
777 
778 /**
779  * vmw_swap_notify - TTM move_notify_callback
780  *
781  * @bo: The TTM buffer object about to be swapped out.
782  */
783 static void vmw_swap_notify(struct ttm_buffer_object *bo)
784 {
785 	vmw_bo_swap_notify(bo);
786 	(void) ttm_bo_wait(bo, false, false);
787 }
788 
789 
790 struct ttm_bo_driver vmw_bo_driver = {
791 	.ttm_tt_create = &vmw_ttm_tt_create,
792 	.ttm_tt_populate = &vmw_ttm_populate,
793 	.ttm_tt_unpopulate = &vmw_ttm_unpopulate,
794 	.ttm_tt_bind = &vmw_ttm_bind,
795 	.ttm_tt_unbind = &vmw_ttm_unbind,
796 	.ttm_tt_destroy = &vmw_ttm_destroy,
797 	.eviction_valuable = ttm_bo_eviction_valuable,
798 	.evict_flags = vmw_evict_flags,
799 	.move = NULL,
800 	.verify_access = vmw_verify_access,
801 	.move_notify = vmw_move_notify,
802 	.swap_notify = vmw_swap_notify,
803 	.io_mem_reserve = &vmw_ttm_io_mem_reserve,
804 };
805 
806 int vmw_bo_create_and_populate(struct vmw_private *dev_priv,
807 			       unsigned long bo_size,
808 			       struct ttm_buffer_object **bo_p)
809 {
810 	struct ttm_operation_ctx ctx = {
811 		.interruptible = false,
812 		.no_wait_gpu = false
813 	};
814 	struct ttm_buffer_object *bo;
815 	int ret;
816 
817 	ret = ttm_bo_create(&dev_priv->bdev, bo_size,
818 			    ttm_bo_type_device,
819 			    &vmw_sys_ne_placement,
820 			    0, false, &bo);
821 
822 	if (unlikely(ret != 0))
823 		return ret;
824 
825 	ret = ttm_bo_reserve(bo, false, true, NULL);
826 	BUG_ON(ret != 0);
827 	ret = vmw_ttm_populate(bo->bdev, bo->ttm, &ctx);
828 	if (likely(ret == 0)) {
829 		struct vmw_ttm_tt *vmw_tt =
830 			container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
831 		ret = vmw_ttm_map_dma(vmw_tt);
832 	}
833 
834 	ttm_bo_unreserve(bo);
835 
836 	if (likely(ret == 0))
837 		*bo_p = bo;
838 	return ret;
839 }
840