xref: /openbmc/linux/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c (revision aad29a73199b7fbccfbabea3f1ee627ad1924f52)
18038d2a9SDave Airlie // SPDX-License-Identifier: GPL-2.0 OR MIT
2f1d34bfdSThomas Hellstrom /**************************************************************************
3f1d34bfdSThomas Hellstrom  *
409881d29SZack Rusin  * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
5f1d34bfdSThomas Hellstrom  *
6f1d34bfdSThomas Hellstrom  * Permission is hereby granted, free of charge, to any person obtaining a
7f1d34bfdSThomas Hellstrom  * copy of this software and associated documentation files (the
8f1d34bfdSThomas Hellstrom  * "Software"), to deal in the Software without restriction, including
9f1d34bfdSThomas Hellstrom  * without limitation the rights to use, copy, modify, merge, publish,
10f1d34bfdSThomas Hellstrom  * distribute, sub license, and/or sell copies of the Software, and to
11f1d34bfdSThomas Hellstrom  * permit persons to whom the Software is furnished to do so, subject to
12f1d34bfdSThomas Hellstrom  * the following conditions:
13f1d34bfdSThomas Hellstrom  *
14f1d34bfdSThomas Hellstrom  * The above copyright notice and this permission notice (including the
15f1d34bfdSThomas Hellstrom  * next paragraph) shall be included in all copies or substantial portions
16f1d34bfdSThomas Hellstrom  * of the Software.
17f1d34bfdSThomas Hellstrom  *
18f1d34bfdSThomas Hellstrom  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19f1d34bfdSThomas Hellstrom  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20f1d34bfdSThomas Hellstrom  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21f1d34bfdSThomas Hellstrom  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22f1d34bfdSThomas Hellstrom  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23f1d34bfdSThomas Hellstrom  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24f1d34bfdSThomas Hellstrom  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25f1d34bfdSThomas Hellstrom  *
26f1d34bfdSThomas Hellstrom  **************************************************************************/
27f1d34bfdSThomas Hellstrom 
2809881d29SZack Rusin #include "vmwgfx_bo.h"
29f1d34bfdSThomas Hellstrom #include "vmwgfx_drv.h"
30f1d34bfdSThomas Hellstrom #include <drm/ttm/ttm_placement.h>
31f1d34bfdSThomas Hellstrom 
32f1d34bfdSThomas Hellstrom static const struct ttm_place vram_placement_flags = {
33f1d34bfdSThomas Hellstrom 	.fpfn = 0,
34f1d34bfdSThomas Hellstrom 	.lpfn = 0,
3548e07c23SChristian König 	.mem_type = TTM_PL_VRAM,
36ce65b874SChristian König 	.flags = 0
37f1d34bfdSThomas Hellstrom };
38f1d34bfdSThomas Hellstrom 
39f1d34bfdSThomas Hellstrom static const struct ttm_place sys_placement_flags = {
40f1d34bfdSThomas Hellstrom 	.fpfn = 0,
41f1d34bfdSThomas Hellstrom 	.lpfn = 0,
4248e07c23SChristian König 	.mem_type = TTM_PL_SYSTEM,
43ce65b874SChristian König 	.flags = 0
44f1d34bfdSThomas Hellstrom };
45f1d34bfdSThomas Hellstrom 
46f1d34bfdSThomas Hellstrom static const struct ttm_place gmr_placement_flags = {
47f1d34bfdSThomas Hellstrom 	.fpfn = 0,
48f1d34bfdSThomas Hellstrom 	.lpfn = 0,
4948e07c23SChristian König 	.mem_type = VMW_PL_GMR,
50ce65b874SChristian König 	.flags = 0
51f1d34bfdSThomas Hellstrom };
52f1d34bfdSThomas Hellstrom 
53f1d34bfdSThomas Hellstrom struct ttm_placement vmw_vram_placement = {
54f1d34bfdSThomas Hellstrom 	.num_placement = 1,
55f1d34bfdSThomas Hellstrom 	.placement = &vram_placement_flags,
56f1d34bfdSThomas Hellstrom 	.num_busy_placement = 1,
57f1d34bfdSThomas Hellstrom 	.busy_placement = &vram_placement_flags
58f1d34bfdSThomas Hellstrom };
59f1d34bfdSThomas Hellstrom 
60f1d34bfdSThomas Hellstrom static const struct ttm_place vram_gmr_placement_flags[] = {
61f1d34bfdSThomas Hellstrom 	{
62f1d34bfdSThomas Hellstrom 		.fpfn = 0,
63f1d34bfdSThomas Hellstrom 		.lpfn = 0,
6448e07c23SChristian König 		.mem_type = TTM_PL_VRAM,
65ce65b874SChristian König 		.flags = 0
66f1d34bfdSThomas Hellstrom 	}, {
67f1d34bfdSThomas Hellstrom 		.fpfn = 0,
68f1d34bfdSThomas Hellstrom 		.lpfn = 0,
6948e07c23SChristian König 		.mem_type = VMW_PL_GMR,
70ce65b874SChristian König 		.flags = 0
71f1d34bfdSThomas Hellstrom 	}
72f1d34bfdSThomas Hellstrom };
73f1d34bfdSThomas Hellstrom 
74f1d34bfdSThomas Hellstrom struct ttm_placement vmw_vram_gmr_placement = {
75f1d34bfdSThomas Hellstrom 	.num_placement = 2,
76f1d34bfdSThomas Hellstrom 	.placement = vram_gmr_placement_flags,
77f1d34bfdSThomas Hellstrom 	.num_busy_placement = 1,
78f1d34bfdSThomas Hellstrom 	.busy_placement = &gmr_placement_flags
79f1d34bfdSThomas Hellstrom };
80f1d34bfdSThomas Hellstrom 
81f1d34bfdSThomas Hellstrom struct ttm_placement vmw_sys_placement = {
82f1d34bfdSThomas Hellstrom 	.num_placement = 1,
83f1d34bfdSThomas Hellstrom 	.placement = &sys_placement_flags,
84f1d34bfdSThomas Hellstrom 	.num_busy_placement = 1,
85f1d34bfdSThomas Hellstrom 	.busy_placement = &sys_placement_flags
86f1d34bfdSThomas Hellstrom };
87f1d34bfdSThomas Hellstrom 
88f1d34bfdSThomas Hellstrom const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt);
89f1d34bfdSThomas Hellstrom 
90f1d34bfdSThomas Hellstrom /**
912cd80dbdSZack Rusin  * __vmw_piter_non_sg_next: Helper functions to advance
922cd80dbdSZack Rusin  * a struct vmw_piter iterator.
93f1d34bfdSThomas Hellstrom  *
94f1d34bfdSThomas Hellstrom  * @viter: Pointer to the iterator.
95f1d34bfdSThomas Hellstrom  *
96f1d34bfdSThomas Hellstrom  * These functions return false if past the end of the list,
97f1d34bfdSThomas Hellstrom  * true otherwise. Functions are selected depending on the current
98f1d34bfdSThomas Hellstrom  * DMA mapping mode.
99f1d34bfdSThomas Hellstrom  */
__vmw_piter_non_sg_next(struct vmw_piter * viter)100f1d34bfdSThomas Hellstrom static bool __vmw_piter_non_sg_next(struct vmw_piter *viter)
101f1d34bfdSThomas Hellstrom {
102f1d34bfdSThomas Hellstrom 	return ++(viter->i) < viter->num_pages;
103f1d34bfdSThomas Hellstrom }
104f1d34bfdSThomas Hellstrom 
__vmw_piter_sg_next(struct vmw_piter * viter)105f1d34bfdSThomas Hellstrom static bool __vmw_piter_sg_next(struct vmw_piter *viter)
106f1d34bfdSThomas Hellstrom {
1078dc39cfcSThomas Hellstrom 	bool ret = __vmw_piter_non_sg_next(viter);
1088dc39cfcSThomas Hellstrom 
1098dc39cfcSThomas Hellstrom 	return __sg_page_iter_dma_next(&viter->iter) && ret;
110f1d34bfdSThomas Hellstrom }
111f1d34bfdSThomas Hellstrom 
112f1d34bfdSThomas Hellstrom 
__vmw_piter_dma_addr(struct vmw_piter * viter)113f1d34bfdSThomas Hellstrom static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter)
114f1d34bfdSThomas Hellstrom {
115f1d34bfdSThomas Hellstrom 	return viter->addrs[viter->i];
116f1d34bfdSThomas Hellstrom }
117f1d34bfdSThomas Hellstrom 
__vmw_piter_sg_addr(struct vmw_piter * viter)118f1d34bfdSThomas Hellstrom static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter)
119f1d34bfdSThomas Hellstrom {
1208dc39cfcSThomas Hellstrom 	return sg_page_iter_dma_address(&viter->iter);
121f1d34bfdSThomas Hellstrom }
122f1d34bfdSThomas Hellstrom 
123f1d34bfdSThomas Hellstrom 
124f1d34bfdSThomas Hellstrom /**
125f1d34bfdSThomas Hellstrom  * vmw_piter_start - Initialize a struct vmw_piter.
126f1d34bfdSThomas Hellstrom  *
127f1d34bfdSThomas Hellstrom  * @viter: Pointer to the iterator to initialize
128f1d34bfdSThomas Hellstrom  * @vsgt: Pointer to a struct vmw_sg_table to initialize from
129a38feeaaSLee Jones  * @p_offset: Pointer offset used to update current array position
130f1d34bfdSThomas Hellstrom  *
131f1d34bfdSThomas Hellstrom  * Note that we're following the convention of __sg_page_iter_start, so that
132f1d34bfdSThomas Hellstrom  * the iterator doesn't point to a valid page after initialization; it has
133f1d34bfdSThomas Hellstrom  * to be advanced one step first.
134f1d34bfdSThomas Hellstrom  */
vmw_piter_start(struct vmw_piter * viter,const struct vmw_sg_table * vsgt,unsigned long p_offset)135f1d34bfdSThomas Hellstrom void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt,
136f1d34bfdSThomas Hellstrom 		     unsigned long p_offset)
137f1d34bfdSThomas Hellstrom {
138f1d34bfdSThomas Hellstrom 	viter->i = p_offset - 1;
139f1d34bfdSThomas Hellstrom 	viter->num_pages = vsgt->num_pages;
1408dc39cfcSThomas Hellstrom 	viter->pages = vsgt->pages;
141f1d34bfdSThomas Hellstrom 	switch (vsgt->mode) {
142f1d34bfdSThomas Hellstrom 	case vmw_dma_alloc_coherent:
143f1d34bfdSThomas Hellstrom 		viter->next = &__vmw_piter_non_sg_next;
144f1d34bfdSThomas Hellstrom 		viter->dma_address = &__vmw_piter_dma_addr;
145f1d34bfdSThomas Hellstrom 		viter->addrs = vsgt->addrs;
146f1d34bfdSThomas Hellstrom 		break;
147f1d34bfdSThomas Hellstrom 	case vmw_dma_map_populate:
148f1d34bfdSThomas Hellstrom 	case vmw_dma_map_bind:
149f1d34bfdSThomas Hellstrom 		viter->next = &__vmw_piter_sg_next;
150f1d34bfdSThomas Hellstrom 		viter->dma_address = &__vmw_piter_sg_addr;
1518dc39cfcSThomas Hellstrom 		__sg_page_iter_start(&viter->iter.base, vsgt->sgt->sgl,
152f1d34bfdSThomas Hellstrom 				     vsgt->sgt->orig_nents, p_offset);
153f1d34bfdSThomas Hellstrom 		break;
154f1d34bfdSThomas Hellstrom 	default:
155f1d34bfdSThomas Hellstrom 		BUG();
156f1d34bfdSThomas Hellstrom 	}
157f1d34bfdSThomas Hellstrom }
158f1d34bfdSThomas Hellstrom 
159f1d34bfdSThomas Hellstrom /**
160f1d34bfdSThomas Hellstrom  * vmw_ttm_unmap_from_dma - unmap  device addresses previsouly mapped for
161f1d34bfdSThomas Hellstrom  * TTM pages
162f1d34bfdSThomas Hellstrom  *
163f1d34bfdSThomas Hellstrom  * @vmw_tt: Pointer to a struct vmw_ttm_backend
164f1d34bfdSThomas Hellstrom  *
165f1d34bfdSThomas Hellstrom  * Used to free dma mappings previously mapped by vmw_ttm_map_for_dma.
166f1d34bfdSThomas Hellstrom  */
vmw_ttm_unmap_from_dma(struct vmw_ttm_tt * vmw_tt)167f1d34bfdSThomas Hellstrom static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt)
168f1d34bfdSThomas Hellstrom {
1699703bb32SZack Rusin 	struct device *dev = vmw_tt->dev_priv->drm.dev;
170f1d34bfdSThomas Hellstrom 
171c915c2cbSMarek Szyprowski 	dma_unmap_sgtable(dev, &vmw_tt->sgt, DMA_BIDIRECTIONAL, 0);
172f1d34bfdSThomas Hellstrom 	vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents;
173f1d34bfdSThomas Hellstrom }
174f1d34bfdSThomas Hellstrom 
175f1d34bfdSThomas Hellstrom /**
176f1d34bfdSThomas Hellstrom  * vmw_ttm_map_for_dma - map TTM pages to get device addresses
177f1d34bfdSThomas Hellstrom  *
178f1d34bfdSThomas Hellstrom  * @vmw_tt: Pointer to a struct vmw_ttm_backend
179f1d34bfdSThomas Hellstrom  *
180f1d34bfdSThomas Hellstrom  * This function is used to get device addresses from the kernel DMA layer.
181f1d34bfdSThomas Hellstrom  * However, it's violating the DMA API in that when this operation has been
182f1d34bfdSThomas Hellstrom  * performed, it's illegal for the CPU to write to the pages without first
183f1d34bfdSThomas Hellstrom  * unmapping the DMA mappings, or calling dma_sync_sg_for_cpu(). It is
184f1d34bfdSThomas Hellstrom  * therefore only legal to call this function if we know that the function
185f1d34bfdSThomas Hellstrom  * dma_sync_sg_for_cpu() is a NOP, and dma_sync_sg_for_device() is at most
186f1d34bfdSThomas Hellstrom  * a CPU write buffer flush.
187f1d34bfdSThomas Hellstrom  */
vmw_ttm_map_for_dma(struct vmw_ttm_tt * vmw_tt)188f1d34bfdSThomas Hellstrom static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt)
189f1d34bfdSThomas Hellstrom {
1909703bb32SZack Rusin 	struct device *dev = vmw_tt->dev_priv->drm.dev;
191f1d34bfdSThomas Hellstrom 
192c915c2cbSMarek Szyprowski 	return dma_map_sgtable(dev, &vmw_tt->sgt, DMA_BIDIRECTIONAL, 0);
193f1d34bfdSThomas Hellstrom }
194f1d34bfdSThomas Hellstrom 
195f1d34bfdSThomas Hellstrom /**
196f1d34bfdSThomas Hellstrom  * vmw_ttm_map_dma - Make sure TTM pages are visible to the device
197f1d34bfdSThomas Hellstrom  *
198f1d34bfdSThomas Hellstrom  * @vmw_tt: Pointer to a struct vmw_ttm_tt
199f1d34bfdSThomas Hellstrom  *
200f1d34bfdSThomas Hellstrom  * Select the correct function for and make sure the TTM pages are
201f1d34bfdSThomas Hellstrom  * visible to the device. Allocate storage for the device mappings.
202f1d34bfdSThomas Hellstrom  * If a mapping has already been performed, indicated by the storage
203f1d34bfdSThomas Hellstrom  * pointer being non NULL, the function returns success.
204f1d34bfdSThomas Hellstrom  */
vmw_ttm_map_dma(struct vmw_ttm_tt * vmw_tt)205f1d34bfdSThomas Hellstrom static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
206f1d34bfdSThomas Hellstrom {
207f1d34bfdSThomas Hellstrom 	struct vmw_private *dev_priv = vmw_tt->dev_priv;
208f1d34bfdSThomas Hellstrom 	struct vmw_sg_table *vsgt = &vmw_tt->vsgt;
209f1d34bfdSThomas Hellstrom 	int ret = 0;
210f1d34bfdSThomas Hellstrom 
211f1d34bfdSThomas Hellstrom 	if (vmw_tt->mapped)
212f1d34bfdSThomas Hellstrom 		return 0;
213f1d34bfdSThomas Hellstrom 
214f1d34bfdSThomas Hellstrom 	vsgt->mode = dev_priv->map_mode;
215e34b8feeSChristian König 	vsgt->pages = vmw_tt->dma_ttm.pages;
216e34b8feeSChristian König 	vsgt->num_pages = vmw_tt->dma_ttm.num_pages;
217f1d34bfdSThomas Hellstrom 	vsgt->addrs = vmw_tt->dma_ttm.dma_address;
2188afa13a0SZack Rusin 	vsgt->sgt = NULL;
219f1d34bfdSThomas Hellstrom 
220f1d34bfdSThomas Hellstrom 	switch (dev_priv->map_mode) {
221f1d34bfdSThomas Hellstrom 	case vmw_dma_map_bind:
222f1d34bfdSThomas Hellstrom 	case vmw_dma_map_populate:
22365674218SZack Rusin 		if (vmw_tt->dma_ttm.page_flags  & TTM_TT_FLAG_EXTERNAL) {
22465674218SZack Rusin 			vsgt->sgt = vmw_tt->dma_ttm.sg;
22565674218SZack Rusin 		} else {
2268afa13a0SZack Rusin 			vsgt->sgt = &vmw_tt->sgt;
22765674218SZack Rusin 			ret = sg_alloc_table_from_pages_segment(&vmw_tt->sgt,
22865674218SZack Rusin 				vsgt->pages, vsgt->num_pages, 0,
229bde15555SThomas Hellstrom 				(unsigned long)vsgt->num_pages << PAGE_SHIFT,
23065674218SZack Rusin 				dma_get_max_seg_size(dev_priv->drm.dev),
23165674218SZack Rusin 				GFP_KERNEL);
23290e7a6deSMaor Gottlieb 			if (ret)
233f1d34bfdSThomas Hellstrom 				goto out_sg_alloc_fail;
23465674218SZack Rusin 		}
235f1d34bfdSThomas Hellstrom 
236f1d34bfdSThomas Hellstrom 		ret = vmw_ttm_map_for_dma(vmw_tt);
237f1d34bfdSThomas Hellstrom 		if (unlikely(ret != 0))
238f1d34bfdSThomas Hellstrom 			goto out_map_fail;
239f1d34bfdSThomas Hellstrom 
240f1d34bfdSThomas Hellstrom 		break;
241f1d34bfdSThomas Hellstrom 	default:
242f1d34bfdSThomas Hellstrom 		break;
243f1d34bfdSThomas Hellstrom 	}
244f1d34bfdSThomas Hellstrom 
245f1d34bfdSThomas Hellstrom 	vmw_tt->mapped = true;
246f1d34bfdSThomas Hellstrom 	return 0;
247f1d34bfdSThomas Hellstrom 
248f1d34bfdSThomas Hellstrom out_map_fail:
24965674218SZack Rusin 	drm_warn(&dev_priv->drm, "VSG table map failed!");
25065674218SZack Rusin 	sg_free_table(vsgt->sgt);
25165674218SZack Rusin 	vsgt->sgt = NULL;
252f1d34bfdSThomas Hellstrom out_sg_alloc_fail:
253f1d34bfdSThomas Hellstrom 	return ret;
254f1d34bfdSThomas Hellstrom }
255f1d34bfdSThomas Hellstrom 
256f1d34bfdSThomas Hellstrom /**
257f1d34bfdSThomas Hellstrom  * vmw_ttm_unmap_dma - Tear down any TTM page device mappings
258f1d34bfdSThomas Hellstrom  *
259f1d34bfdSThomas Hellstrom  * @vmw_tt: Pointer to a struct vmw_ttm_tt
260f1d34bfdSThomas Hellstrom  *
261f1d34bfdSThomas Hellstrom  * Tear down any previously set up device DMA mappings and free
262f1d34bfdSThomas Hellstrom  * any storage space allocated for them. If there are no mappings set up,
263f1d34bfdSThomas Hellstrom  * this function is a NOP.
264f1d34bfdSThomas Hellstrom  */
vmw_ttm_unmap_dma(struct vmw_ttm_tt * vmw_tt)265f1d34bfdSThomas Hellstrom static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt)
266f1d34bfdSThomas Hellstrom {
267f1d34bfdSThomas Hellstrom 	struct vmw_private *dev_priv = vmw_tt->dev_priv;
268f1d34bfdSThomas Hellstrom 
269f1d34bfdSThomas Hellstrom 	if (!vmw_tt->vsgt.sgt)
270f1d34bfdSThomas Hellstrom 		return;
271f1d34bfdSThomas Hellstrom 
272f1d34bfdSThomas Hellstrom 	switch (dev_priv->map_mode) {
273f1d34bfdSThomas Hellstrom 	case vmw_dma_map_bind:
274f1d34bfdSThomas Hellstrom 	case vmw_dma_map_populate:
275f1d34bfdSThomas Hellstrom 		vmw_ttm_unmap_from_dma(vmw_tt);
276f1d34bfdSThomas Hellstrom 		sg_free_table(vmw_tt->vsgt.sgt);
277f1d34bfdSThomas Hellstrom 		vmw_tt->vsgt.sgt = NULL;
278f1d34bfdSThomas Hellstrom 		break;
279f1d34bfdSThomas Hellstrom 	default:
280f1d34bfdSThomas Hellstrom 		break;
281f1d34bfdSThomas Hellstrom 	}
282f1d34bfdSThomas Hellstrom 	vmw_tt->mapped = false;
283f1d34bfdSThomas Hellstrom }
284f1d34bfdSThomas Hellstrom 
285f1d34bfdSThomas Hellstrom /**
286f1d34bfdSThomas Hellstrom  * vmw_bo_sg_table - Return a struct vmw_sg_table object for a
287f1d34bfdSThomas Hellstrom  * TTM buffer object
288f1d34bfdSThomas Hellstrom  *
289f1d34bfdSThomas Hellstrom  * @bo: Pointer to a struct ttm_buffer_object
290f1d34bfdSThomas Hellstrom  *
291f1d34bfdSThomas Hellstrom  * Returns a pointer to a struct vmw_sg_table object. The object should
292f1d34bfdSThomas Hellstrom  * not be freed after use.
293f1d34bfdSThomas Hellstrom  * Note that for the device addresses to be valid, the buffer object must
294f1d34bfdSThomas Hellstrom  * either be reserved or pinned.
295f1d34bfdSThomas Hellstrom  */
vmw_bo_sg_table(struct ttm_buffer_object * bo)296f1d34bfdSThomas Hellstrom const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo)
297f1d34bfdSThomas Hellstrom {
298f1d34bfdSThomas Hellstrom 	struct vmw_ttm_tt *vmw_tt =
299e34b8feeSChristian König 		container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm);
300f1d34bfdSThomas Hellstrom 
301f1d34bfdSThomas Hellstrom 	return &vmw_tt->vsgt;
302f1d34bfdSThomas Hellstrom }
303f1d34bfdSThomas Hellstrom 
304f1d34bfdSThomas Hellstrom 
vmw_ttm_bind(struct ttm_device * bdev,struct ttm_tt * ttm,struct ttm_resource * bo_mem)3058af8a109SChristian König static int vmw_ttm_bind(struct ttm_device *bdev,
3060a667b50SDave Airlie 			struct ttm_tt *ttm, struct ttm_resource *bo_mem)
307f1d34bfdSThomas Hellstrom {
308f1d34bfdSThomas Hellstrom 	struct vmw_ttm_tt *vmw_be =
309e34b8feeSChristian König 		container_of(ttm, struct vmw_ttm_tt, dma_ttm);
3100b988ca1SDave Airlie 	int ret = 0;
3110b988ca1SDave Airlie 
3120b988ca1SDave Airlie 	if (!bo_mem)
3130b988ca1SDave Airlie 		return -EINVAL;
3140b988ca1SDave Airlie 
3150b988ca1SDave Airlie 	if (vmw_be->bound)
3160b988ca1SDave Airlie 		return 0;
317f1d34bfdSThomas Hellstrom 
318f1d34bfdSThomas Hellstrom 	ret = vmw_ttm_map_dma(vmw_be);
319f1d34bfdSThomas Hellstrom 	if (unlikely(ret != 0))
320f1d34bfdSThomas Hellstrom 		return ret;
321f1d34bfdSThomas Hellstrom 
322f1d34bfdSThomas Hellstrom 	vmw_be->gmr_id = bo_mem->start;
323f1d34bfdSThomas Hellstrom 	vmw_be->mem_type = bo_mem->mem_type;
324f1d34bfdSThomas Hellstrom 
325f1d34bfdSThomas Hellstrom 	switch (bo_mem->mem_type) {
326f1d34bfdSThomas Hellstrom 	case VMW_PL_GMR:
3270b988ca1SDave Airlie 		ret = vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt,
328f1d34bfdSThomas Hellstrom 				    ttm->num_pages, vmw_be->gmr_id);
3290b988ca1SDave Airlie 		break;
330f1d34bfdSThomas Hellstrom 	case VMW_PL_MOB:
331f1d34bfdSThomas Hellstrom 		if (unlikely(vmw_be->mob == NULL)) {
332f1d34bfdSThomas Hellstrom 			vmw_be->mob =
333f1d34bfdSThomas Hellstrom 				vmw_mob_create(ttm->num_pages);
334f1d34bfdSThomas Hellstrom 			if (unlikely(vmw_be->mob == NULL))
335f1d34bfdSThomas Hellstrom 				return -ENOMEM;
336f1d34bfdSThomas Hellstrom 		}
337f1d34bfdSThomas Hellstrom 
3380b988ca1SDave Airlie 		ret = vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob,
339f1d34bfdSThomas Hellstrom 				    &vmw_be->vsgt, ttm->num_pages,
340f1d34bfdSThomas Hellstrom 				    vmw_be->gmr_id);
3410b988ca1SDave Airlie 		break;
342f6be2326SZack Rusin 	case VMW_PL_SYSTEM:
343f6be2326SZack Rusin 		/* Nothing to be done for a system bind */
344f6be2326SZack Rusin 		break;
345f1d34bfdSThomas Hellstrom 	default:
346f1d34bfdSThomas Hellstrom 		BUG();
347f1d34bfdSThomas Hellstrom 	}
3480b988ca1SDave Airlie 	vmw_be->bound = true;
3490b988ca1SDave Airlie 	return ret;
350f1d34bfdSThomas Hellstrom }
351f1d34bfdSThomas Hellstrom 
vmw_ttm_unbind(struct ttm_device * bdev,struct ttm_tt * ttm)3528af8a109SChristian König static void vmw_ttm_unbind(struct ttm_device *bdev,
3530a667b50SDave Airlie 			   struct ttm_tt *ttm)
354f1d34bfdSThomas Hellstrom {
355f1d34bfdSThomas Hellstrom 	struct vmw_ttm_tt *vmw_be =
356e34b8feeSChristian König 		container_of(ttm, struct vmw_ttm_tt, dma_ttm);
357f1d34bfdSThomas Hellstrom 
3580b988ca1SDave Airlie 	if (!vmw_be->bound)
3590b988ca1SDave Airlie 		return;
3600b988ca1SDave Airlie 
361f1d34bfdSThomas Hellstrom 	switch (vmw_be->mem_type) {
362f1d34bfdSThomas Hellstrom 	case VMW_PL_GMR:
363f1d34bfdSThomas Hellstrom 		vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
364f1d34bfdSThomas Hellstrom 		break;
365f1d34bfdSThomas Hellstrom 	case VMW_PL_MOB:
366f1d34bfdSThomas Hellstrom 		vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob);
367f1d34bfdSThomas Hellstrom 		break;
368f6be2326SZack Rusin 	case VMW_PL_SYSTEM:
369f6be2326SZack Rusin 		break;
370f1d34bfdSThomas Hellstrom 	default:
371f1d34bfdSThomas Hellstrom 		BUG();
372f1d34bfdSThomas Hellstrom 	}
373f1d34bfdSThomas Hellstrom 
374f1d34bfdSThomas Hellstrom 	if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind)
375f1d34bfdSThomas Hellstrom 		vmw_ttm_unmap_dma(vmw_be);
3760b988ca1SDave Airlie 	vmw_be->bound = false;
377f1d34bfdSThomas Hellstrom }
378f1d34bfdSThomas Hellstrom 
379f1d34bfdSThomas Hellstrom 
vmw_ttm_destroy(struct ttm_device * bdev,struct ttm_tt * ttm)3808af8a109SChristian König static void vmw_ttm_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
381f1d34bfdSThomas Hellstrom {
382f1d34bfdSThomas Hellstrom 	struct vmw_ttm_tt *vmw_be =
383e34b8feeSChristian König 		container_of(ttm, struct vmw_ttm_tt, dma_ttm);
384f1d34bfdSThomas Hellstrom 
385f1d34bfdSThomas Hellstrom 	vmw_ttm_unmap_dma(vmw_be);
386f1d34bfdSThomas Hellstrom 	ttm_tt_fini(ttm);
387f1d34bfdSThomas Hellstrom 	if (vmw_be->mob)
388f1d34bfdSThomas Hellstrom 		vmw_mob_destroy(vmw_be->mob);
389f1d34bfdSThomas Hellstrom 
390f1d34bfdSThomas Hellstrom 	kfree(vmw_be);
391f1d34bfdSThomas Hellstrom }
392f1d34bfdSThomas Hellstrom 
393f1d34bfdSThomas Hellstrom 
vmw_ttm_populate(struct ttm_device * bdev,struct ttm_tt * ttm,struct ttm_operation_ctx * ctx)3948af8a109SChristian König static int vmw_ttm_populate(struct ttm_device *bdev,
3950a667b50SDave Airlie 			    struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
396f1d34bfdSThomas Hellstrom {
39765674218SZack Rusin 	bool external = (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0;
398f07069daSChristian König 
3997eec9151SDave Airlie 	if (ttm_tt_is_populated(ttm))
400f1d34bfdSThomas Hellstrom 		return 0;
401f1d34bfdSThomas Hellstrom 
40265674218SZack Rusin 	if (external && ttm->sg)
40365674218SZack Rusin 		return  drm_prime_sg_to_dma_addr_array(ttm->sg,
40465674218SZack Rusin 						       ttm->dma_address,
40565674218SZack Rusin 						       ttm->num_pages);
406f07069daSChristian König 
40765674218SZack Rusin 	return ttm_pool_alloc(&bdev->pool, ttm, ctx);
408f1d34bfdSThomas Hellstrom }
409f1d34bfdSThomas Hellstrom 
vmw_ttm_unpopulate(struct ttm_device * bdev,struct ttm_tt * ttm)4108af8a109SChristian König static void vmw_ttm_unpopulate(struct ttm_device *bdev,
4110a667b50SDave Airlie 			       struct ttm_tt *ttm)
412f1d34bfdSThomas Hellstrom {
413f1d34bfdSThomas Hellstrom 	struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
414e34b8feeSChristian König 						 dma_ttm);
41565674218SZack Rusin 	bool external = (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0;
41665674218SZack Rusin 
41765674218SZack Rusin 	if (external)
41865674218SZack Rusin 		return;
419f1d34bfdSThomas Hellstrom 
420e54163e9SChristian König 	vmw_ttm_unbind(bdev, ttm);
421e54163e9SChristian König 
422f1d34bfdSThomas Hellstrom 	if (vmw_tt->mob) {
423f1d34bfdSThomas Hellstrom 		vmw_mob_destroy(vmw_tt->mob);
424f1d34bfdSThomas Hellstrom 		vmw_tt->mob = NULL;
425f1d34bfdSThomas Hellstrom 	}
426f1d34bfdSThomas Hellstrom 
427f1d34bfdSThomas Hellstrom 	vmw_ttm_unmap_dma(vmw_tt);
428f07069daSChristian König 
4298567d515SChristian König 	ttm_pool_free(&bdev->pool, ttm);
430f1d34bfdSThomas Hellstrom }
431f1d34bfdSThomas Hellstrom 
vmw_ttm_tt_create(struct ttm_buffer_object * bo,uint32_t page_flags)432f1d34bfdSThomas Hellstrom static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
433f1d34bfdSThomas Hellstrom 					uint32_t page_flags)
434f1d34bfdSThomas Hellstrom {
435f1d34bfdSThomas Hellstrom 	struct vmw_ttm_tt *vmw_be;
436f1d34bfdSThomas Hellstrom 	int ret;
43765674218SZack Rusin 	bool external = bo->type == ttm_bo_type_sg;
438f1d34bfdSThomas Hellstrom 
439f1d34bfdSThomas Hellstrom 	vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL);
440f1d34bfdSThomas Hellstrom 	if (!vmw_be)
441f1d34bfdSThomas Hellstrom 		return NULL;
442f1d34bfdSThomas Hellstrom 
443668b2066SZack Rusin 	vmw_be->dev_priv = vmw_priv_from_ttm(bo->bdev);
444f1d34bfdSThomas Hellstrom 	vmw_be->mob = NULL;
445f1d34bfdSThomas Hellstrom 
44665674218SZack Rusin 	if (external)
44765674218SZack Rusin 		page_flags |= TTM_TT_FLAG_EXTERNAL | TTM_TT_FLAG_EXTERNAL_MAPPABLE;
44865674218SZack Rusin 
44965674218SZack Rusin 	if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent || external)
4504c515bb1SChristian König 		ret = ttm_sg_tt_init(&vmw_be->dma_ttm, bo, page_flags,
4511b4ea4c5SChristian König 				     ttm_cached);
452f1d34bfdSThomas Hellstrom 	else
453e34b8feeSChristian König 		ret = ttm_tt_init(&vmw_be->dma_ttm, bo, page_flags,
454e36764ecSRamalingam C 				  ttm_cached, 0);
455f1d34bfdSThomas Hellstrom 	if (unlikely(ret != 0))
456f1d34bfdSThomas Hellstrom 		goto out_no_init;
457f1d34bfdSThomas Hellstrom 
458e34b8feeSChristian König 	return &vmw_be->dma_ttm;
459f1d34bfdSThomas Hellstrom out_no_init:
460f1d34bfdSThomas Hellstrom 	kfree(vmw_be);
461f1d34bfdSThomas Hellstrom 	return NULL;
462f1d34bfdSThomas Hellstrom }
463f1d34bfdSThomas Hellstrom 
vmw_evict_flags(struct ttm_buffer_object * bo,struct ttm_placement * placement)464f1d34bfdSThomas Hellstrom static void vmw_evict_flags(struct ttm_buffer_object *bo,
465f1d34bfdSThomas Hellstrom 		     struct ttm_placement *placement)
466f1d34bfdSThomas Hellstrom {
467f1d34bfdSThomas Hellstrom 	*placement = vmw_sys_placement;
468f1d34bfdSThomas Hellstrom }
469f1d34bfdSThomas Hellstrom 
vmw_ttm_io_mem_reserve(struct ttm_device * bdev,struct ttm_resource * mem)4708af8a109SChristian König static int vmw_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
471f1d34bfdSThomas Hellstrom {
472668b2066SZack Rusin 	struct vmw_private *dev_priv = vmw_priv_from_ttm(bdev);
473f1d34bfdSThomas Hellstrom 
474f1d34bfdSThomas Hellstrom 	switch (mem->mem_type) {
475f1d34bfdSThomas Hellstrom 	case TTM_PL_SYSTEM:
476f6be2326SZack Rusin 	case VMW_PL_SYSTEM:
477f1d34bfdSThomas Hellstrom 	case VMW_PL_GMR:
478f1d34bfdSThomas Hellstrom 	case VMW_PL_MOB:
479f1d34bfdSThomas Hellstrom 		return 0;
480f1d34bfdSThomas Hellstrom 	case TTM_PL_VRAM:
48154d04ea8SChristian König 		mem->bus.offset = (mem->start << PAGE_SHIFT) +
48254d04ea8SChristian König 			dev_priv->vram_start;
483f1d34bfdSThomas Hellstrom 		mem->bus.is_iomem = true;
4841cf65c45SChristian König 		mem->bus.caching = ttm_cached;
485f1d34bfdSThomas Hellstrom 		break;
486f1d34bfdSThomas Hellstrom 	default:
487f1d34bfdSThomas Hellstrom 		return -EINVAL;
488f1d34bfdSThomas Hellstrom 	}
489f1d34bfdSThomas Hellstrom 	return 0;
490f1d34bfdSThomas Hellstrom }
491f1d34bfdSThomas Hellstrom 
492f1d34bfdSThomas Hellstrom /**
493f1d34bfdSThomas Hellstrom  * vmw_move_notify - TTM move_notify_callback
494f1d34bfdSThomas Hellstrom  *
495f1d34bfdSThomas Hellstrom  * @bo: The TTM buffer object about to move.
4966cf9dc23SChristian König  * @old_mem: The old memory where we move from
4976cf9dc23SChristian König  * @new_mem: The struct ttm_resource indicating to what memory
498f1d34bfdSThomas Hellstrom  *       region the move is taking place.
499f1d34bfdSThomas Hellstrom  *
500f1d34bfdSThomas Hellstrom  * Calls move_notify for all subsystems needing it.
501f1d34bfdSThomas Hellstrom  * (currently only resources).
502f1d34bfdSThomas Hellstrom  */
vmw_move_notify(struct ttm_buffer_object * bo,struct ttm_resource * old_mem,struct ttm_resource * new_mem)503f1d34bfdSThomas Hellstrom static void vmw_move_notify(struct ttm_buffer_object *bo,
5046cf9dc23SChristian König 			    struct ttm_resource *old_mem,
5056cf9dc23SChristian König 			    struct ttm_resource *new_mem)
506f1d34bfdSThomas Hellstrom {
5076cf9dc23SChristian König 	vmw_bo_move_notify(bo, new_mem);
5086cf9dc23SChristian König 	vmw_query_move_notify(bo, old_mem, new_mem);
509f1d34bfdSThomas Hellstrom }
510f1d34bfdSThomas Hellstrom 
511f1d34bfdSThomas Hellstrom 
512f1d34bfdSThomas Hellstrom /**
513f1d34bfdSThomas Hellstrom  * vmw_swap_notify - TTM move_notify_callback
514f1d34bfdSThomas Hellstrom  *
515f1d34bfdSThomas Hellstrom  * @bo: The TTM buffer object about to be swapped out.
516f1d34bfdSThomas Hellstrom  */
vmw_swap_notify(struct ttm_buffer_object * bo)517f1d34bfdSThomas Hellstrom static void vmw_swap_notify(struct ttm_buffer_object *bo)
518f1d34bfdSThomas Hellstrom {
519e9431ea5SThomas Hellstrom 	vmw_bo_swap_notify(bo);
520f1d34bfdSThomas Hellstrom 	(void) ttm_bo_wait(bo, false, false);
521f1d34bfdSThomas Hellstrom }
522f1d34bfdSThomas Hellstrom 
vmw_memtype_is_system(uint32_t mem_type)523f6be2326SZack Rusin static bool vmw_memtype_is_system(uint32_t mem_type)
524f6be2326SZack Rusin {
525f6be2326SZack Rusin 	return mem_type == TTM_PL_SYSTEM || mem_type == VMW_PL_SYSTEM;
526f6be2326SZack Rusin }
527f6be2326SZack Rusin 
vmw_move(struct ttm_buffer_object * bo,bool evict,struct ttm_operation_ctx * ctx,struct ttm_resource * new_mem,struct ttm_place * hop)528bcff5d3eSDave Airlie static int vmw_move(struct ttm_buffer_object *bo,
529bcff5d3eSDave Airlie 		    bool evict,
530bcff5d3eSDave Airlie 		    struct ttm_operation_ctx *ctx,
531ebdf5651SDave Airlie 		    struct ttm_resource *new_mem,
532ebdf5651SDave Airlie 		    struct ttm_place *hop)
533bcff5d3eSDave Airlie {
534a44df74cSZack Rusin 	struct ttm_resource_manager *new_man;
535a44df74cSZack Rusin 	struct ttm_resource_manager *old_man = NULL;
536a44df74cSZack Rusin 	int ret = 0;
537a44df74cSZack Rusin 
538a44df74cSZack Rusin 	new_man = ttm_manager_type(bo->bdev, new_mem->mem_type);
539a44df74cSZack Rusin 	if (bo->resource)
540a44df74cSZack Rusin 		old_man = ttm_manager_type(bo->bdev, bo->resource->mem_type);
541bcff5d3eSDave Airlie 
542f6be2326SZack Rusin 	if (new_man->use_tt && !vmw_memtype_is_system(new_mem->mem_type)) {
543bfe5e585SDave Airlie 		ret = vmw_ttm_bind(bo->bdev, bo->ttm, new_mem);
544bfe5e585SDave Airlie 		if (ret)
545bfe5e585SDave Airlie 			return ret;
546bfe5e585SDave Airlie 	}
547bfe5e585SDave Airlie 
548a44df74cSZack Rusin 	if (!bo->resource || (bo->resource->mem_type == TTM_PL_SYSTEM &&
549a44df74cSZack Rusin 			      bo->ttm == NULL)) {
550a44df74cSZack Rusin 		ttm_bo_move_null(bo, new_mem);
551a44df74cSZack Rusin 		return 0;
552a44df74cSZack Rusin 	}
553a44df74cSZack Rusin 
554d3116756SChristian König 	vmw_move_notify(bo, bo->resource, new_mem);
5556d820003SDave Airlie 
556a44df74cSZack Rusin 	if (old_man && old_man->use_tt && new_man->use_tt) {
557f6be2326SZack Rusin 		if (vmw_memtype_is_system(bo->resource->mem_type)) {
5585b7a2c92SNirmoy Das 			ttm_bo_move_null(bo, new_mem);
559bcff5d3eSDave Airlie 			return 0;
560bcff5d3eSDave Airlie 		}
56129a1d482SDave Airlie 		ret = ttm_bo_wait_ctx(bo, ctx);
562c37d951cSDave Airlie 		if (ret)
5636d820003SDave Airlie 			goto fail;
56429a1d482SDave Airlie 
56529a1d482SDave Airlie 		vmw_ttm_unbind(bo->bdev, bo->ttm);
566bfa3357eSChristian König 		ttm_resource_free(bo, &bo->resource);
567c37d951cSDave Airlie 		ttm_bo_assign_mem(bo, new_mem);
568c37d951cSDave Airlie 		return 0;
569bcff5d3eSDave Airlie 	} else {
5706d820003SDave Airlie 		ret = ttm_bo_move_memcpy(bo, ctx, new_mem);
5716d820003SDave Airlie 		if (ret)
5726d820003SDave Airlie 			goto fail;
573bcff5d3eSDave Airlie 	}
5746d820003SDave Airlie 	return 0;
5756d820003SDave Airlie fail:
576d3116756SChristian König 	vmw_move_notify(bo, new_mem, bo->resource);
5776d820003SDave Airlie 	return ret;
578bcff5d3eSDave Airlie }
579f1d34bfdSThomas Hellstrom 
5808af8a109SChristian König struct ttm_device_funcs vmw_bo_driver = {
581f1d34bfdSThomas Hellstrom 	.ttm_tt_create = &vmw_ttm_tt_create,
582f1d34bfdSThomas Hellstrom 	.ttm_tt_populate = &vmw_ttm_populate,
583f1d34bfdSThomas Hellstrom 	.ttm_tt_unpopulate = &vmw_ttm_unpopulate,
584debf8ab9SDave Airlie 	.ttm_tt_destroy = &vmw_ttm_destroy,
585f1d34bfdSThomas Hellstrom 	.eviction_valuable = ttm_bo_eviction_valuable,
586f1d34bfdSThomas Hellstrom 	.evict_flags = vmw_evict_flags,
587bcff5d3eSDave Airlie 	.move = vmw_move,
588f1d34bfdSThomas Hellstrom 	.swap_notify = vmw_swap_notify,
589f1d34bfdSThomas Hellstrom 	.io_mem_reserve = &vmw_ttm_io_mem_reserve,
590f1d34bfdSThomas Hellstrom };
59156dc01f1SDave Airlie 
vmw_bo_create_and_populate(struct vmw_private * dev_priv,size_t bo_size,u32 domain,struct vmw_bo ** bo_p)59256dc01f1SDave Airlie int vmw_bo_create_and_populate(struct vmw_private *dev_priv,
593668b2066SZack Rusin 			       size_t bo_size, u32 domain,
594668b2066SZack Rusin 			       struct vmw_bo **bo_p)
59556dc01f1SDave Airlie {
59656dc01f1SDave Airlie 	struct ttm_operation_ctx ctx = {
59756dc01f1SDave Airlie 		.interruptible = false,
59856dc01f1SDave Airlie 		.no_wait_gpu = false
59956dc01f1SDave Airlie 	};
600668b2066SZack Rusin 	struct vmw_bo *vbo;
60156dc01f1SDave Airlie 	int ret;
602668b2066SZack Rusin 	struct vmw_bo_params bo_params = {
603668b2066SZack Rusin 		.domain = domain,
604668b2066SZack Rusin 		.busy_domain = domain,
605668b2066SZack Rusin 		.bo_type = ttm_bo_type_kernel,
606668b2066SZack Rusin 		.size = bo_size,
607*5faf45beSIan Forbes 		.pin = true,
608*5faf45beSIan Forbes 		.keep_resv = true,
609668b2066SZack Rusin 	};
61056dc01f1SDave Airlie 
611668b2066SZack Rusin 	ret = vmw_bo_create(dev_priv, &bo_params, &vbo);
61256dc01f1SDave Airlie 	if (unlikely(ret != 0))
61356dc01f1SDave Airlie 		return ret;
61456dc01f1SDave Airlie 
615668b2066SZack Rusin 	ret = vmw_ttm_populate(vbo->tbo.bdev, vbo->tbo.ttm, &ctx);
616a2d6ddc4SDave Airlie 	if (likely(ret == 0)) {
617a2d6ddc4SDave Airlie 		struct vmw_ttm_tt *vmw_tt =
618668b2066SZack Rusin 			container_of(vbo->tbo.ttm, struct vmw_ttm_tt, dma_ttm);
619a2d6ddc4SDave Airlie 		ret = vmw_ttm_map_dma(vmw_tt);
620a2d6ddc4SDave Airlie 	}
62156dc01f1SDave Airlie 
622668b2066SZack Rusin 	ttm_bo_unreserve(&vbo->tbo);
62356dc01f1SDave Airlie 
62456dc01f1SDave Airlie 	if (likely(ret == 0))
625668b2066SZack Rusin 		*bo_p = vbo;
62656dc01f1SDave Airlie 	return ret;
62756dc01f1SDave Airlie }
628