xref: /openbmc/linux/drivers/accel/ivpu/ivpu_gem.c (revision c900529f3d9161bfde5cca0754f83b4d3c3e0220)
1647371a6SJacek Lawrynowicz // SPDX-License-Identifier: GPL-2.0-only
2647371a6SJacek Lawrynowicz /*
3647371a6SJacek Lawrynowicz  * Copyright (C) 2020-2023 Intel Corporation
4647371a6SJacek Lawrynowicz  */
5647371a6SJacek Lawrynowicz 
6647371a6SJacek Lawrynowicz #include <linux/dma-buf.h>
7647371a6SJacek Lawrynowicz #include <linux/highmem.h>
8647371a6SJacek Lawrynowicz #include <linux/module.h>
9647371a6SJacek Lawrynowicz #include <linux/set_memory.h>
10647371a6SJacek Lawrynowicz #include <linux/xarray.h>
11647371a6SJacek Lawrynowicz 
12647371a6SJacek Lawrynowicz #include <drm/drm_cache.h>
13647371a6SJacek Lawrynowicz #include <drm/drm_debugfs.h>
14647371a6SJacek Lawrynowicz #include <drm/drm_file.h>
15647371a6SJacek Lawrynowicz #include <drm/drm_utils.h>
16647371a6SJacek Lawrynowicz 
17647371a6SJacek Lawrynowicz #include "ivpu_drv.h"
18647371a6SJacek Lawrynowicz #include "ivpu_gem.h"
19647371a6SJacek Lawrynowicz #include "ivpu_hw.h"
20647371a6SJacek Lawrynowicz #include "ivpu_mmu.h"
21647371a6SJacek Lawrynowicz #include "ivpu_mmu_context.h"
22647371a6SJacek Lawrynowicz 
23647371a6SJacek Lawrynowicz MODULE_IMPORT_NS(DMA_BUF);
24647371a6SJacek Lawrynowicz 
25647371a6SJacek Lawrynowicz static const struct drm_gem_object_funcs ivpu_gem_funcs;
26647371a6SJacek Lawrynowicz 
27647371a6SJacek Lawrynowicz static struct lock_class_key prime_bo_lock_class_key;
28647371a6SJacek Lawrynowicz 
prime_alloc_pages_locked(struct ivpu_bo * bo)29647371a6SJacek Lawrynowicz static int __must_check prime_alloc_pages_locked(struct ivpu_bo *bo)
30647371a6SJacek Lawrynowicz {
31647371a6SJacek Lawrynowicz 	/* Pages are managed by the underlying dma-buf */
32647371a6SJacek Lawrynowicz 	return 0;
33647371a6SJacek Lawrynowicz }
34647371a6SJacek Lawrynowicz 
prime_free_pages_locked(struct ivpu_bo * bo)35647371a6SJacek Lawrynowicz static void prime_free_pages_locked(struct ivpu_bo *bo)
36647371a6SJacek Lawrynowicz {
37647371a6SJacek Lawrynowicz 	/* Pages are managed by the underlying dma-buf */
38647371a6SJacek Lawrynowicz }
39647371a6SJacek Lawrynowicz 
prime_map_pages_locked(struct ivpu_bo * bo)40647371a6SJacek Lawrynowicz static int prime_map_pages_locked(struct ivpu_bo *bo)
41647371a6SJacek Lawrynowicz {
42647371a6SJacek Lawrynowicz 	struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
43647371a6SJacek Lawrynowicz 	struct sg_table *sgt;
44647371a6SJacek Lawrynowicz 
45d20a8f40SStanislaw Gruszka 	sgt = dma_buf_map_attachment_unlocked(bo->base.import_attach, DMA_BIDIRECTIONAL);
46647371a6SJacek Lawrynowicz 	if (IS_ERR(sgt)) {
47647371a6SJacek Lawrynowicz 		ivpu_err(vdev, "Failed to map attachment: %ld\n", PTR_ERR(sgt));
48647371a6SJacek Lawrynowicz 		return PTR_ERR(sgt);
49647371a6SJacek Lawrynowicz 	}
50647371a6SJacek Lawrynowicz 
51647371a6SJacek Lawrynowicz 	bo->sgt = sgt;
52647371a6SJacek Lawrynowicz 	return 0;
53647371a6SJacek Lawrynowicz }
54647371a6SJacek Lawrynowicz 
prime_unmap_pages_locked(struct ivpu_bo * bo)55647371a6SJacek Lawrynowicz static void prime_unmap_pages_locked(struct ivpu_bo *bo)
56647371a6SJacek Lawrynowicz {
57d20a8f40SStanislaw Gruszka 	dma_buf_unmap_attachment_unlocked(bo->base.import_attach, bo->sgt, DMA_BIDIRECTIONAL);
58647371a6SJacek Lawrynowicz 	bo->sgt = NULL;
59647371a6SJacek Lawrynowicz }
60647371a6SJacek Lawrynowicz 
61647371a6SJacek Lawrynowicz static const struct ivpu_bo_ops prime_ops = {
62647371a6SJacek Lawrynowicz 	.type = IVPU_BO_TYPE_PRIME,
63647371a6SJacek Lawrynowicz 	.name = "prime",
64647371a6SJacek Lawrynowicz 	.alloc_pages = prime_alloc_pages_locked,
65647371a6SJacek Lawrynowicz 	.free_pages = prime_free_pages_locked,
66647371a6SJacek Lawrynowicz 	.map_pages = prime_map_pages_locked,
67647371a6SJacek Lawrynowicz 	.unmap_pages = prime_unmap_pages_locked,
68647371a6SJacek Lawrynowicz };
69647371a6SJacek Lawrynowicz 
shmem_alloc_pages_locked(struct ivpu_bo * bo)70647371a6SJacek Lawrynowicz static int __must_check shmem_alloc_pages_locked(struct ivpu_bo *bo)
71647371a6SJacek Lawrynowicz {
72647371a6SJacek Lawrynowicz 	int npages = bo->base.size >> PAGE_SHIFT;
73647371a6SJacek Lawrynowicz 	struct page **pages;
74647371a6SJacek Lawrynowicz 
75647371a6SJacek Lawrynowicz 	pages = drm_gem_get_pages(&bo->base);
76647371a6SJacek Lawrynowicz 	if (IS_ERR(pages))
77647371a6SJacek Lawrynowicz 		return PTR_ERR(pages);
78647371a6SJacek Lawrynowicz 
79647371a6SJacek Lawrynowicz 	if (bo->flags & DRM_IVPU_BO_WC)
80647371a6SJacek Lawrynowicz 		set_pages_array_wc(pages, npages);
81647371a6SJacek Lawrynowicz 	else if (bo->flags & DRM_IVPU_BO_UNCACHED)
82647371a6SJacek Lawrynowicz 		set_pages_array_uc(pages, npages);
83647371a6SJacek Lawrynowicz 
84647371a6SJacek Lawrynowicz 	bo->pages = pages;
85647371a6SJacek Lawrynowicz 	return 0;
86647371a6SJacek Lawrynowicz }
87647371a6SJacek Lawrynowicz 
shmem_free_pages_locked(struct ivpu_bo * bo)88647371a6SJacek Lawrynowicz static void shmem_free_pages_locked(struct ivpu_bo *bo)
89647371a6SJacek Lawrynowicz {
90647371a6SJacek Lawrynowicz 	if (ivpu_bo_cache_mode(bo) != DRM_IVPU_BO_CACHED)
91647371a6SJacek Lawrynowicz 		set_pages_array_wb(bo->pages, bo->base.size >> PAGE_SHIFT);
92647371a6SJacek Lawrynowicz 
93647371a6SJacek Lawrynowicz 	drm_gem_put_pages(&bo->base, bo->pages, true, false);
94647371a6SJacek Lawrynowicz 	bo->pages = NULL;
95647371a6SJacek Lawrynowicz }
96647371a6SJacek Lawrynowicz 
ivpu_bo_map_pages_locked(struct ivpu_bo * bo)97647371a6SJacek Lawrynowicz static int ivpu_bo_map_pages_locked(struct ivpu_bo *bo)
98647371a6SJacek Lawrynowicz {
99647371a6SJacek Lawrynowicz 	int npages = bo->base.size >> PAGE_SHIFT;
100647371a6SJacek Lawrynowicz 	struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
101647371a6SJacek Lawrynowicz 	struct sg_table *sgt;
102647371a6SJacek Lawrynowicz 	int ret;
103647371a6SJacek Lawrynowicz 
104647371a6SJacek Lawrynowicz 	sgt = drm_prime_pages_to_sg(&vdev->drm, bo->pages, npages);
105647371a6SJacek Lawrynowicz 	if (IS_ERR(sgt)) {
106647371a6SJacek Lawrynowicz 		ivpu_err(vdev, "Failed to allocate sgtable\n");
107647371a6SJacek Lawrynowicz 		return PTR_ERR(sgt);
108647371a6SJacek Lawrynowicz 	}
109647371a6SJacek Lawrynowicz 
110647371a6SJacek Lawrynowicz 	ret = dma_map_sgtable(vdev->drm.dev, sgt, DMA_BIDIRECTIONAL, 0);
111647371a6SJacek Lawrynowicz 	if (ret) {
112647371a6SJacek Lawrynowicz 		ivpu_err(vdev, "Failed to map BO in IOMMU: %d\n", ret);
113647371a6SJacek Lawrynowicz 		goto err_free_sgt;
114647371a6SJacek Lawrynowicz 	}
115647371a6SJacek Lawrynowicz 
116647371a6SJacek Lawrynowicz 	bo->sgt = sgt;
117647371a6SJacek Lawrynowicz 	return 0;
118647371a6SJacek Lawrynowicz 
119647371a6SJacek Lawrynowicz err_free_sgt:
120647371a6SJacek Lawrynowicz 	kfree(sgt);
121647371a6SJacek Lawrynowicz 	return ret;
122647371a6SJacek Lawrynowicz }
123647371a6SJacek Lawrynowicz 
ivpu_bo_unmap_pages_locked(struct ivpu_bo * bo)124647371a6SJacek Lawrynowicz static void ivpu_bo_unmap_pages_locked(struct ivpu_bo *bo)
125647371a6SJacek Lawrynowicz {
126647371a6SJacek Lawrynowicz 	struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
127647371a6SJacek Lawrynowicz 
128647371a6SJacek Lawrynowicz 	dma_unmap_sgtable(vdev->drm.dev, bo->sgt, DMA_BIDIRECTIONAL, 0);
129647371a6SJacek Lawrynowicz 	sg_free_table(bo->sgt);
130647371a6SJacek Lawrynowicz 	kfree(bo->sgt);
131647371a6SJacek Lawrynowicz 	bo->sgt = NULL;
132647371a6SJacek Lawrynowicz }
133647371a6SJacek Lawrynowicz 
134647371a6SJacek Lawrynowicz static const struct ivpu_bo_ops shmem_ops = {
135647371a6SJacek Lawrynowicz 	.type = IVPU_BO_TYPE_SHMEM,
136647371a6SJacek Lawrynowicz 	.name = "shmem",
137647371a6SJacek Lawrynowicz 	.alloc_pages = shmem_alloc_pages_locked,
138647371a6SJacek Lawrynowicz 	.free_pages = shmem_free_pages_locked,
139647371a6SJacek Lawrynowicz 	.map_pages = ivpu_bo_map_pages_locked,
140647371a6SJacek Lawrynowicz 	.unmap_pages = ivpu_bo_unmap_pages_locked,
141647371a6SJacek Lawrynowicz };
142647371a6SJacek Lawrynowicz 
internal_alloc_pages_locked(struct ivpu_bo * bo)143647371a6SJacek Lawrynowicz static int __must_check internal_alloc_pages_locked(struct ivpu_bo *bo)
144647371a6SJacek Lawrynowicz {
145647371a6SJacek Lawrynowicz 	unsigned int i, npages = bo->base.size >> PAGE_SHIFT;
146647371a6SJacek Lawrynowicz 	struct page **pages;
147647371a6SJacek Lawrynowicz 	int ret;
148647371a6SJacek Lawrynowicz 
149647371a6SJacek Lawrynowicz 	pages = kvmalloc_array(npages, sizeof(*bo->pages), GFP_KERNEL);
150647371a6SJacek Lawrynowicz 	if (!pages)
151647371a6SJacek Lawrynowicz 		return -ENOMEM;
152647371a6SJacek Lawrynowicz 
153647371a6SJacek Lawrynowicz 	for (i = 0; i < npages; i++) {
154647371a6SJacek Lawrynowicz 		pages[i] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
155647371a6SJacek Lawrynowicz 		if (!pages[i]) {
156647371a6SJacek Lawrynowicz 			ret = -ENOMEM;
157647371a6SJacek Lawrynowicz 			goto err_free_pages;
158647371a6SJacek Lawrynowicz 		}
159647371a6SJacek Lawrynowicz 		cond_resched();
160647371a6SJacek Lawrynowicz 	}
161647371a6SJacek Lawrynowicz 
162647371a6SJacek Lawrynowicz 	bo->pages = pages;
163647371a6SJacek Lawrynowicz 	return 0;
164647371a6SJacek Lawrynowicz 
165647371a6SJacek Lawrynowicz err_free_pages:
166647371a6SJacek Lawrynowicz 	while (i--)
167647371a6SJacek Lawrynowicz 		put_page(pages[i]);
168647371a6SJacek Lawrynowicz 	kvfree(pages);
169647371a6SJacek Lawrynowicz 	return ret;
170647371a6SJacek Lawrynowicz }
171647371a6SJacek Lawrynowicz 
internal_free_pages_locked(struct ivpu_bo * bo)172647371a6SJacek Lawrynowicz static void internal_free_pages_locked(struct ivpu_bo *bo)
173647371a6SJacek Lawrynowicz {
174647371a6SJacek Lawrynowicz 	unsigned int i, npages = bo->base.size >> PAGE_SHIFT;
175647371a6SJacek Lawrynowicz 
17619635463SKarol Wachowski 	if (ivpu_bo_cache_mode(bo) != DRM_IVPU_BO_CACHED)
17719635463SKarol Wachowski 		set_pages_array_wb(bo->pages, bo->base.size >> PAGE_SHIFT);
17819635463SKarol Wachowski 
179647371a6SJacek Lawrynowicz 	for (i = 0; i < npages; i++)
180647371a6SJacek Lawrynowicz 		put_page(bo->pages[i]);
181647371a6SJacek Lawrynowicz 
182647371a6SJacek Lawrynowicz 	kvfree(bo->pages);
183647371a6SJacek Lawrynowicz 	bo->pages = NULL;
184647371a6SJacek Lawrynowicz }
185647371a6SJacek Lawrynowicz 
186647371a6SJacek Lawrynowicz static const struct ivpu_bo_ops internal_ops = {
187647371a6SJacek Lawrynowicz 	.type = IVPU_BO_TYPE_INTERNAL,
188647371a6SJacek Lawrynowicz 	.name = "internal",
189647371a6SJacek Lawrynowicz 	.alloc_pages = internal_alloc_pages_locked,
190647371a6SJacek Lawrynowicz 	.free_pages = internal_free_pages_locked,
191647371a6SJacek Lawrynowicz 	.map_pages = ivpu_bo_map_pages_locked,
192647371a6SJacek Lawrynowicz 	.unmap_pages = ivpu_bo_unmap_pages_locked,
193647371a6SJacek Lawrynowicz };
194647371a6SJacek Lawrynowicz 
ivpu_bo_alloc_and_map_pages_locked(struct ivpu_bo * bo)195647371a6SJacek Lawrynowicz static int __must_check ivpu_bo_alloc_and_map_pages_locked(struct ivpu_bo *bo)
196647371a6SJacek Lawrynowicz {
197647371a6SJacek Lawrynowicz 	struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
198647371a6SJacek Lawrynowicz 	int ret;
199647371a6SJacek Lawrynowicz 
200647371a6SJacek Lawrynowicz 	lockdep_assert_held(&bo->lock);
201647371a6SJacek Lawrynowicz 	drm_WARN_ON(&vdev->drm, bo->sgt);
202647371a6SJacek Lawrynowicz 
203647371a6SJacek Lawrynowicz 	ret = bo->ops->alloc_pages(bo);
204647371a6SJacek Lawrynowicz 	if (ret) {
205647371a6SJacek Lawrynowicz 		ivpu_err(vdev, "Failed to allocate pages for BO: %d", ret);
206647371a6SJacek Lawrynowicz 		return ret;
207647371a6SJacek Lawrynowicz 	}
208647371a6SJacek Lawrynowicz 
209647371a6SJacek Lawrynowicz 	ret = bo->ops->map_pages(bo);
210647371a6SJacek Lawrynowicz 	if (ret) {
211647371a6SJacek Lawrynowicz 		ivpu_err(vdev, "Failed to map pages for BO: %d", ret);
212647371a6SJacek Lawrynowicz 		goto err_free_pages;
213647371a6SJacek Lawrynowicz 	}
214647371a6SJacek Lawrynowicz 	return ret;
215647371a6SJacek Lawrynowicz 
216647371a6SJacek Lawrynowicz err_free_pages:
217647371a6SJacek Lawrynowicz 	bo->ops->free_pages(bo);
218647371a6SJacek Lawrynowicz 	return ret;
219647371a6SJacek Lawrynowicz }
220647371a6SJacek Lawrynowicz 
ivpu_bo_unmap_and_free_pages(struct ivpu_bo * bo)221647371a6SJacek Lawrynowicz static void ivpu_bo_unmap_and_free_pages(struct ivpu_bo *bo)
222647371a6SJacek Lawrynowicz {
223647371a6SJacek Lawrynowicz 	mutex_lock(&bo->lock);
224647371a6SJacek Lawrynowicz 
225647371a6SJacek Lawrynowicz 	WARN_ON(!bo->sgt);
226647371a6SJacek Lawrynowicz 	bo->ops->unmap_pages(bo);
227647371a6SJacek Lawrynowicz 	WARN_ON(bo->sgt);
228647371a6SJacek Lawrynowicz 	bo->ops->free_pages(bo);
229647371a6SJacek Lawrynowicz 	WARN_ON(bo->pages);
230647371a6SJacek Lawrynowicz 
231647371a6SJacek Lawrynowicz 	mutex_unlock(&bo->lock);
232647371a6SJacek Lawrynowicz }
233647371a6SJacek Lawrynowicz 
234647371a6SJacek Lawrynowicz /*
235647371a6SJacek Lawrynowicz  * ivpu_bo_pin() - pin the backing physical pages and map them to VPU.
236647371a6SJacek Lawrynowicz  *
237647371a6SJacek Lawrynowicz  * This function pins physical memory pages, then maps the physical pages
238647371a6SJacek Lawrynowicz  * to IOMMU address space and finally updates the VPU MMU page tables
239647371a6SJacek Lawrynowicz  * to allow the VPU to translate VPU address to IOMMU address.
240647371a6SJacek Lawrynowicz  */
ivpu_bo_pin(struct ivpu_bo * bo)241647371a6SJacek Lawrynowicz int __must_check ivpu_bo_pin(struct ivpu_bo *bo)
242647371a6SJacek Lawrynowicz {
243647371a6SJacek Lawrynowicz 	struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
244647371a6SJacek Lawrynowicz 	int ret = 0;
245647371a6SJacek Lawrynowicz 
246647371a6SJacek Lawrynowicz 	mutex_lock(&bo->lock);
247647371a6SJacek Lawrynowicz 
248647371a6SJacek Lawrynowicz 	if (!bo->vpu_addr) {
249647371a6SJacek Lawrynowicz 		ivpu_err(vdev, "vpu_addr not set for BO ctx_id: %d handle: %d\n",
250647371a6SJacek Lawrynowicz 			 bo->ctx->id, bo->handle);
251647371a6SJacek Lawrynowicz 		ret = -EINVAL;
252647371a6SJacek Lawrynowicz 		goto unlock;
253647371a6SJacek Lawrynowicz 	}
254647371a6SJacek Lawrynowicz 
255647371a6SJacek Lawrynowicz 	if (!bo->sgt) {
256647371a6SJacek Lawrynowicz 		ret = ivpu_bo_alloc_and_map_pages_locked(bo);
257647371a6SJacek Lawrynowicz 		if (ret)
258647371a6SJacek Lawrynowicz 			goto unlock;
259647371a6SJacek Lawrynowicz 	}
260647371a6SJacek Lawrynowicz 
261647371a6SJacek Lawrynowicz 	if (!bo->mmu_mapped) {
262647371a6SJacek Lawrynowicz 		ret = ivpu_mmu_context_map_sgt(vdev, bo->ctx, bo->vpu_addr, bo->sgt,
263647371a6SJacek Lawrynowicz 					       ivpu_bo_is_snooped(bo));
264647371a6SJacek Lawrynowicz 		if (ret) {
265647371a6SJacek Lawrynowicz 			ivpu_err(vdev, "Failed to map BO in MMU: %d\n", ret);
266647371a6SJacek Lawrynowicz 			goto unlock;
267647371a6SJacek Lawrynowicz 		}
268647371a6SJacek Lawrynowicz 		bo->mmu_mapped = true;
269647371a6SJacek Lawrynowicz 	}
270647371a6SJacek Lawrynowicz 
271647371a6SJacek Lawrynowicz unlock:
272647371a6SJacek Lawrynowicz 	mutex_unlock(&bo->lock);
273647371a6SJacek Lawrynowicz 
274647371a6SJacek Lawrynowicz 	return ret;
275647371a6SJacek Lawrynowicz }
276647371a6SJacek Lawrynowicz 
277647371a6SJacek Lawrynowicz static int
ivpu_bo_alloc_vpu_addr(struct ivpu_bo * bo,struct ivpu_mmu_context * ctx,const struct ivpu_addr_range * range)278647371a6SJacek Lawrynowicz ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
279647371a6SJacek Lawrynowicz 		       const struct ivpu_addr_range *range)
280647371a6SJacek Lawrynowicz {
281647371a6SJacek Lawrynowicz 	struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
282647371a6SJacek Lawrynowicz 	int ret;
283647371a6SJacek Lawrynowicz 
284647371a6SJacek Lawrynowicz 	if (!range) {
285*162f17b2SKarol Wachowski 		if (bo->flags & DRM_IVPU_BO_SHAVE_MEM)
286*162f17b2SKarol Wachowski 			range = &vdev->hw->ranges.shave;
287*162f17b2SKarol Wachowski 		else if (bo->flags & DRM_IVPU_BO_DMA_MEM)
288*162f17b2SKarol Wachowski 			range = &vdev->hw->ranges.dma;
289647371a6SJacek Lawrynowicz 		else
290*162f17b2SKarol Wachowski 			range = &vdev->hw->ranges.user;
291647371a6SJacek Lawrynowicz 	}
292647371a6SJacek Lawrynowicz 
293647371a6SJacek Lawrynowicz 	mutex_lock(&ctx->lock);
294647371a6SJacek Lawrynowicz 	ret = ivpu_mmu_context_insert_node_locked(ctx, range, bo->base.size, &bo->mm_node);
295647371a6SJacek Lawrynowicz 	if (!ret) {
296647371a6SJacek Lawrynowicz 		bo->ctx = ctx;
297647371a6SJacek Lawrynowicz 		bo->vpu_addr = bo->mm_node.start;
298647371a6SJacek Lawrynowicz 		list_add_tail(&bo->ctx_node, &ctx->bo_list);
299647371a6SJacek Lawrynowicz 	}
300647371a6SJacek Lawrynowicz 	mutex_unlock(&ctx->lock);
301647371a6SJacek Lawrynowicz 
302647371a6SJacek Lawrynowicz 	return ret;
303647371a6SJacek Lawrynowicz }
304647371a6SJacek Lawrynowicz 
ivpu_bo_free_vpu_addr(struct ivpu_bo * bo)305647371a6SJacek Lawrynowicz static void ivpu_bo_free_vpu_addr(struct ivpu_bo *bo)
306647371a6SJacek Lawrynowicz {
307647371a6SJacek Lawrynowicz 	struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
308647371a6SJacek Lawrynowicz 	struct ivpu_mmu_context *ctx = bo->ctx;
309647371a6SJacek Lawrynowicz 
310647371a6SJacek Lawrynowicz 	ivpu_dbg(vdev, BO, "remove from ctx: ctx %d vpu_addr 0x%llx allocated %d mmu_mapped %d\n",
311647371a6SJacek Lawrynowicz 		 ctx->id, bo->vpu_addr, (bool)bo->sgt, bo->mmu_mapped);
312647371a6SJacek Lawrynowicz 
313647371a6SJacek Lawrynowicz 	mutex_lock(&bo->lock);
314647371a6SJacek Lawrynowicz 
315647371a6SJacek Lawrynowicz 	if (bo->mmu_mapped) {
316647371a6SJacek Lawrynowicz 		drm_WARN_ON(&vdev->drm, !bo->sgt);
317647371a6SJacek Lawrynowicz 		ivpu_mmu_context_unmap_sgt(vdev, ctx, bo->vpu_addr, bo->sgt);
318647371a6SJacek Lawrynowicz 		bo->mmu_mapped = false;
319647371a6SJacek Lawrynowicz 	}
320647371a6SJacek Lawrynowicz 
321647371a6SJacek Lawrynowicz 	mutex_lock(&ctx->lock);
322647371a6SJacek Lawrynowicz 	list_del(&bo->ctx_node);
323647371a6SJacek Lawrynowicz 	bo->vpu_addr = 0;
324647371a6SJacek Lawrynowicz 	bo->ctx = NULL;
325647371a6SJacek Lawrynowicz 	ivpu_mmu_context_remove_node_locked(ctx, &bo->mm_node);
326647371a6SJacek Lawrynowicz 	mutex_unlock(&ctx->lock);
327647371a6SJacek Lawrynowicz 
328647371a6SJacek Lawrynowicz 	mutex_unlock(&bo->lock);
329647371a6SJacek Lawrynowicz }
330647371a6SJacek Lawrynowicz 
ivpu_bo_remove_all_bos_from_context(struct ivpu_mmu_context * ctx)331647371a6SJacek Lawrynowicz void ivpu_bo_remove_all_bos_from_context(struct ivpu_mmu_context *ctx)
332647371a6SJacek Lawrynowicz {
333647371a6SJacek Lawrynowicz 	struct ivpu_bo *bo, *tmp;
334647371a6SJacek Lawrynowicz 
335647371a6SJacek Lawrynowicz 	list_for_each_entry_safe(bo, tmp, &ctx->bo_list, ctx_node)
336647371a6SJacek Lawrynowicz 		ivpu_bo_free_vpu_addr(bo);
337647371a6SJacek Lawrynowicz }
338647371a6SJacek Lawrynowicz 
339647371a6SJacek Lawrynowicz static struct ivpu_bo *
ivpu_bo_alloc(struct ivpu_device * vdev,struct ivpu_mmu_context * mmu_context,u64 size,u32 flags,const struct ivpu_bo_ops * ops,const struct ivpu_addr_range * range,u64 user_ptr)340647371a6SJacek Lawrynowicz ivpu_bo_alloc(struct ivpu_device *vdev, struct ivpu_mmu_context *mmu_context,
341647371a6SJacek Lawrynowicz 	      u64 size, u32 flags, const struct ivpu_bo_ops *ops,
342647371a6SJacek Lawrynowicz 	      const struct ivpu_addr_range *range, u64 user_ptr)
343647371a6SJacek Lawrynowicz {
344647371a6SJacek Lawrynowicz 	struct ivpu_bo *bo;
345647371a6SJacek Lawrynowicz 	int ret = 0;
346647371a6SJacek Lawrynowicz 
347647371a6SJacek Lawrynowicz 	if (drm_WARN_ON(&vdev->drm, size == 0 || !PAGE_ALIGNED(size)))
348647371a6SJacek Lawrynowicz 		return ERR_PTR(-EINVAL);
349647371a6SJacek Lawrynowicz 
350647371a6SJacek Lawrynowicz 	switch (flags & DRM_IVPU_BO_CACHE_MASK) {
351647371a6SJacek Lawrynowicz 	case DRM_IVPU_BO_CACHED:
352647371a6SJacek Lawrynowicz 	case DRM_IVPU_BO_UNCACHED:
353647371a6SJacek Lawrynowicz 	case DRM_IVPU_BO_WC:
354647371a6SJacek Lawrynowicz 		break;
355647371a6SJacek Lawrynowicz 	default:
356647371a6SJacek Lawrynowicz 		return ERR_PTR(-EINVAL);
357647371a6SJacek Lawrynowicz 	}
358647371a6SJacek Lawrynowicz 
359647371a6SJacek Lawrynowicz 	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
360647371a6SJacek Lawrynowicz 	if (!bo)
361647371a6SJacek Lawrynowicz 		return ERR_PTR(-ENOMEM);
362647371a6SJacek Lawrynowicz 
363647371a6SJacek Lawrynowicz 	mutex_init(&bo->lock);
364647371a6SJacek Lawrynowicz 	bo->base.funcs = &ivpu_gem_funcs;
365647371a6SJacek Lawrynowicz 	bo->flags = flags;
366647371a6SJacek Lawrynowicz 	bo->ops = ops;
367647371a6SJacek Lawrynowicz 	bo->user_ptr = user_ptr;
368647371a6SJacek Lawrynowicz 
369647371a6SJacek Lawrynowicz 	if (ops->type == IVPU_BO_TYPE_SHMEM)
370647371a6SJacek Lawrynowicz 		ret = drm_gem_object_init(&vdev->drm, &bo->base, size);
371647371a6SJacek Lawrynowicz 	else
372647371a6SJacek Lawrynowicz 		drm_gem_private_object_init(&vdev->drm, &bo->base, size);
373647371a6SJacek Lawrynowicz 
374647371a6SJacek Lawrynowicz 	if (ret) {
375647371a6SJacek Lawrynowicz 		ivpu_err(vdev, "Failed to initialize drm object\n");
376647371a6SJacek Lawrynowicz 		goto err_free;
377647371a6SJacek Lawrynowicz 	}
378647371a6SJacek Lawrynowicz 
379647371a6SJacek Lawrynowicz 	if (flags & DRM_IVPU_BO_MAPPABLE) {
380647371a6SJacek Lawrynowicz 		ret = drm_gem_create_mmap_offset(&bo->base);
381647371a6SJacek Lawrynowicz 		if (ret) {
382647371a6SJacek Lawrynowicz 			ivpu_err(vdev, "Failed to allocate mmap offset\n");
383647371a6SJacek Lawrynowicz 			goto err_release;
384647371a6SJacek Lawrynowicz 		}
385647371a6SJacek Lawrynowicz 	}
386647371a6SJacek Lawrynowicz 
387647371a6SJacek Lawrynowicz 	if (mmu_context) {
388647371a6SJacek Lawrynowicz 		ret = ivpu_bo_alloc_vpu_addr(bo, mmu_context, range);
389647371a6SJacek Lawrynowicz 		if (ret) {
390647371a6SJacek Lawrynowicz 			ivpu_err(vdev, "Failed to add BO to context: %d\n", ret);
391647371a6SJacek Lawrynowicz 			goto err_release;
392647371a6SJacek Lawrynowicz 		}
393647371a6SJacek Lawrynowicz 	}
394647371a6SJacek Lawrynowicz 
395647371a6SJacek Lawrynowicz 	return bo;
396647371a6SJacek Lawrynowicz 
397647371a6SJacek Lawrynowicz err_release:
398647371a6SJacek Lawrynowicz 	drm_gem_object_release(&bo->base);
399647371a6SJacek Lawrynowicz err_free:
400647371a6SJacek Lawrynowicz 	kfree(bo);
401647371a6SJacek Lawrynowicz 	return ERR_PTR(ret);
402647371a6SJacek Lawrynowicz }
403647371a6SJacek Lawrynowicz 
ivpu_bo_free(struct drm_gem_object * obj)404647371a6SJacek Lawrynowicz static void ivpu_bo_free(struct drm_gem_object *obj)
405647371a6SJacek Lawrynowicz {
406647371a6SJacek Lawrynowicz 	struct ivpu_bo *bo = to_ivpu_bo(obj);
407647371a6SJacek Lawrynowicz 	struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
408647371a6SJacek Lawrynowicz 
409647371a6SJacek Lawrynowicz 	if (bo->ctx)
410647371a6SJacek Lawrynowicz 		ivpu_dbg(vdev, BO, "free: ctx %d vpu_addr 0x%llx allocated %d mmu_mapped %d\n",
411647371a6SJacek Lawrynowicz 			 bo->ctx->id, bo->vpu_addr, (bool)bo->sgt, bo->mmu_mapped);
412647371a6SJacek Lawrynowicz 	else
413647371a6SJacek Lawrynowicz 		ivpu_dbg(vdev, BO, "free: ctx (released) allocated %d mmu_mapped %d\n",
414647371a6SJacek Lawrynowicz 			 (bool)bo->sgt, bo->mmu_mapped);
415647371a6SJacek Lawrynowicz 
416647371a6SJacek Lawrynowicz 	drm_WARN_ON(&vdev->drm, !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ));
417647371a6SJacek Lawrynowicz 
418647371a6SJacek Lawrynowicz 	vunmap(bo->kvaddr);
419647371a6SJacek Lawrynowicz 
420647371a6SJacek Lawrynowicz 	if (bo->ctx)
421647371a6SJacek Lawrynowicz 		ivpu_bo_free_vpu_addr(bo);
422647371a6SJacek Lawrynowicz 
423647371a6SJacek Lawrynowicz 	if (bo->sgt)
424647371a6SJacek Lawrynowicz 		ivpu_bo_unmap_and_free_pages(bo);
425647371a6SJacek Lawrynowicz 
426647371a6SJacek Lawrynowicz 	if (bo->base.import_attach)
427647371a6SJacek Lawrynowicz 		drm_prime_gem_destroy(&bo->base, bo->sgt);
428647371a6SJacek Lawrynowicz 
429647371a6SJacek Lawrynowicz 	drm_gem_object_release(&bo->base);
430647371a6SJacek Lawrynowicz 
431647371a6SJacek Lawrynowicz 	mutex_destroy(&bo->lock);
432647371a6SJacek Lawrynowicz 	kfree(bo);
433647371a6SJacek Lawrynowicz }
434647371a6SJacek Lawrynowicz 
ivpu_bo_mmap(struct drm_gem_object * obj,struct vm_area_struct * vma)435647371a6SJacek Lawrynowicz static int ivpu_bo_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
436647371a6SJacek Lawrynowicz {
437647371a6SJacek Lawrynowicz 	struct ivpu_bo *bo = to_ivpu_bo(obj);
438647371a6SJacek Lawrynowicz 	struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
439647371a6SJacek Lawrynowicz 
440647371a6SJacek Lawrynowicz 	ivpu_dbg(vdev, BO, "mmap: ctx %u handle %u vpu_addr 0x%llx size %zu type %s",
441647371a6SJacek Lawrynowicz 		 bo->ctx->id, bo->handle, bo->vpu_addr, bo->base.size, bo->ops->name);
442647371a6SJacek Lawrynowicz 
443647371a6SJacek Lawrynowicz 	if (obj->import_attach) {
444647371a6SJacek Lawrynowicz 		/* Drop the reference drm_gem_mmap_obj() acquired.*/
445647371a6SJacek Lawrynowicz 		drm_gem_object_put(obj);
446647371a6SJacek Lawrynowicz 		vma->vm_private_data = NULL;
447647371a6SJacek Lawrynowicz 		return dma_buf_mmap(obj->dma_buf, vma, 0);
448647371a6SJacek Lawrynowicz 	}
449647371a6SJacek Lawrynowicz 
4503822a7c4SLinus Torvalds 	vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND);
451647371a6SJacek Lawrynowicz 	vma->vm_page_prot = ivpu_bo_pgprot(bo, vm_get_page_prot(vma->vm_flags));
452647371a6SJacek Lawrynowicz 
453647371a6SJacek Lawrynowicz 	return 0;
454647371a6SJacek Lawrynowicz }
455647371a6SJacek Lawrynowicz 
ivpu_bo_get_sg_table(struct drm_gem_object * obj)456647371a6SJacek Lawrynowicz static struct sg_table *ivpu_bo_get_sg_table(struct drm_gem_object *obj)
457647371a6SJacek Lawrynowicz {
458647371a6SJacek Lawrynowicz 	struct ivpu_bo *bo = to_ivpu_bo(obj);
459647371a6SJacek Lawrynowicz 	loff_t npages = obj->size >> PAGE_SHIFT;
460647371a6SJacek Lawrynowicz 	int ret = 0;
461647371a6SJacek Lawrynowicz 
462647371a6SJacek Lawrynowicz 	mutex_lock(&bo->lock);
463647371a6SJacek Lawrynowicz 
464647371a6SJacek Lawrynowicz 	if (!bo->sgt)
465647371a6SJacek Lawrynowicz 		ret = ivpu_bo_alloc_and_map_pages_locked(bo);
466647371a6SJacek Lawrynowicz 
467647371a6SJacek Lawrynowicz 	mutex_unlock(&bo->lock);
468647371a6SJacek Lawrynowicz 
469647371a6SJacek Lawrynowicz 	if (ret)
470647371a6SJacek Lawrynowicz 		return ERR_PTR(ret);
471647371a6SJacek Lawrynowicz 
472647371a6SJacek Lawrynowicz 	return drm_prime_pages_to_sg(obj->dev, bo->pages, npages);
473647371a6SJacek Lawrynowicz }
474647371a6SJacek Lawrynowicz 
ivpu_vm_fault(struct vm_fault * vmf)475647371a6SJacek Lawrynowicz static vm_fault_t ivpu_vm_fault(struct vm_fault *vmf)
476647371a6SJacek Lawrynowicz {
477647371a6SJacek Lawrynowicz 	struct vm_area_struct *vma = vmf->vma;
478647371a6SJacek Lawrynowicz 	struct drm_gem_object *obj = vma->vm_private_data;
479647371a6SJacek Lawrynowicz 	struct ivpu_bo *bo = to_ivpu_bo(obj);
480647371a6SJacek Lawrynowicz 	loff_t npages = obj->size >> PAGE_SHIFT;
481647371a6SJacek Lawrynowicz 	pgoff_t page_offset;
482647371a6SJacek Lawrynowicz 	struct page *page;
483647371a6SJacek Lawrynowicz 	vm_fault_t ret;
484647371a6SJacek Lawrynowicz 	int err;
485647371a6SJacek Lawrynowicz 
486647371a6SJacek Lawrynowicz 	mutex_lock(&bo->lock);
487647371a6SJacek Lawrynowicz 
488647371a6SJacek Lawrynowicz 	if (!bo->sgt) {
489647371a6SJacek Lawrynowicz 		err = ivpu_bo_alloc_and_map_pages_locked(bo);
490647371a6SJacek Lawrynowicz 		if (err) {
491647371a6SJacek Lawrynowicz 			ret = vmf_error(err);
492647371a6SJacek Lawrynowicz 			goto unlock;
493647371a6SJacek Lawrynowicz 		}
494647371a6SJacek Lawrynowicz 	}
495647371a6SJacek Lawrynowicz 
496647371a6SJacek Lawrynowicz 	/* We don't use vmf->pgoff since that has the fake offset */
497647371a6SJacek Lawrynowicz 	page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
498647371a6SJacek Lawrynowicz 	if (page_offset >= npages) {
499647371a6SJacek Lawrynowicz 		ret = VM_FAULT_SIGBUS;
500647371a6SJacek Lawrynowicz 	} else {
501647371a6SJacek Lawrynowicz 		page = bo->pages[page_offset];
502647371a6SJacek Lawrynowicz 		ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
503647371a6SJacek Lawrynowicz 	}
504647371a6SJacek Lawrynowicz 
505647371a6SJacek Lawrynowicz unlock:
506647371a6SJacek Lawrynowicz 	mutex_unlock(&bo->lock);
507647371a6SJacek Lawrynowicz 
508647371a6SJacek Lawrynowicz 	return ret;
509647371a6SJacek Lawrynowicz }
510647371a6SJacek Lawrynowicz 
511647371a6SJacek Lawrynowicz static const struct vm_operations_struct ivpu_vm_ops = {
512647371a6SJacek Lawrynowicz 	.fault = ivpu_vm_fault,
513647371a6SJacek Lawrynowicz 	.open = drm_gem_vm_open,
514647371a6SJacek Lawrynowicz 	.close = drm_gem_vm_close,
515647371a6SJacek Lawrynowicz };
516647371a6SJacek Lawrynowicz 
517647371a6SJacek Lawrynowicz static const struct drm_gem_object_funcs ivpu_gem_funcs = {
518647371a6SJacek Lawrynowicz 	.free = ivpu_bo_free,
519647371a6SJacek Lawrynowicz 	.mmap = ivpu_bo_mmap,
520647371a6SJacek Lawrynowicz 	.vm_ops = &ivpu_vm_ops,
521647371a6SJacek Lawrynowicz 	.get_sg_table = ivpu_bo_get_sg_table,
522647371a6SJacek Lawrynowicz };
523647371a6SJacek Lawrynowicz 
524647371a6SJacek Lawrynowicz int
ivpu_bo_create_ioctl(struct drm_device * dev,void * data,struct drm_file * file)525647371a6SJacek Lawrynowicz ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
526647371a6SJacek Lawrynowicz {
527647371a6SJacek Lawrynowicz 	struct ivpu_file_priv *file_priv = file->driver_priv;
528647371a6SJacek Lawrynowicz 	struct ivpu_device *vdev = file_priv->vdev;
529647371a6SJacek Lawrynowicz 	struct drm_ivpu_bo_create *args = data;
530647371a6SJacek Lawrynowicz 	u64 size = PAGE_ALIGN(args->size);
531647371a6SJacek Lawrynowicz 	struct ivpu_bo *bo;
532647371a6SJacek Lawrynowicz 	int ret;
533647371a6SJacek Lawrynowicz 
534647371a6SJacek Lawrynowicz 	if (args->flags & ~DRM_IVPU_BO_FLAGS)
535647371a6SJacek Lawrynowicz 		return -EINVAL;
536647371a6SJacek Lawrynowicz 
537647371a6SJacek Lawrynowicz 	if (size == 0)
538647371a6SJacek Lawrynowicz 		return -EINVAL;
539647371a6SJacek Lawrynowicz 
540647371a6SJacek Lawrynowicz 	bo = ivpu_bo_alloc(vdev, &file_priv->ctx, size, args->flags, &shmem_ops, NULL, 0);
541647371a6SJacek Lawrynowicz 	if (IS_ERR(bo)) {
542647371a6SJacek Lawrynowicz 		ivpu_err(vdev, "Failed to create BO: %pe (ctx %u size %llu flags 0x%x)",
543647371a6SJacek Lawrynowicz 			 bo, file_priv->ctx.id, args->size, args->flags);
544647371a6SJacek Lawrynowicz 		return PTR_ERR(bo);
545647371a6SJacek Lawrynowicz 	}
546647371a6SJacek Lawrynowicz 
547647371a6SJacek Lawrynowicz 	ret = drm_gem_handle_create(file, &bo->base, &bo->handle);
548647371a6SJacek Lawrynowicz 	if (!ret) {
549647371a6SJacek Lawrynowicz 		args->vpu_addr = bo->vpu_addr;
550647371a6SJacek Lawrynowicz 		args->handle = bo->handle;
551647371a6SJacek Lawrynowicz 	}
552647371a6SJacek Lawrynowicz 
553647371a6SJacek Lawrynowicz 	drm_gem_object_put(&bo->base);
554647371a6SJacek Lawrynowicz 
555647371a6SJacek Lawrynowicz 	ivpu_dbg(vdev, BO, "alloc shmem: ctx %u vpu_addr 0x%llx size %zu flags 0x%x\n",
556647371a6SJacek Lawrynowicz 		 file_priv->ctx.id, bo->vpu_addr, bo->base.size, bo->flags);
557647371a6SJacek Lawrynowicz 
558647371a6SJacek Lawrynowicz 	return ret;
559647371a6SJacek Lawrynowicz }
560647371a6SJacek Lawrynowicz 
561647371a6SJacek Lawrynowicz struct ivpu_bo *
ivpu_bo_alloc_internal(struct ivpu_device * vdev,u64 vpu_addr,u64 size,u32 flags)562647371a6SJacek Lawrynowicz ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 flags)
563647371a6SJacek Lawrynowicz {
564647371a6SJacek Lawrynowicz 	const struct ivpu_addr_range *range;
565647371a6SJacek Lawrynowicz 	struct ivpu_addr_range fixed_range;
566647371a6SJacek Lawrynowicz 	struct ivpu_bo *bo;
567647371a6SJacek Lawrynowicz 	pgprot_t prot;
568647371a6SJacek Lawrynowicz 	int ret;
569647371a6SJacek Lawrynowicz 
570647371a6SJacek Lawrynowicz 	drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(vpu_addr));
571647371a6SJacek Lawrynowicz 	drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(size));
572647371a6SJacek Lawrynowicz 
573647371a6SJacek Lawrynowicz 	if (vpu_addr) {
574647371a6SJacek Lawrynowicz 		fixed_range.start = vpu_addr;
575647371a6SJacek Lawrynowicz 		fixed_range.end = vpu_addr + size;
576647371a6SJacek Lawrynowicz 		range = &fixed_range;
577647371a6SJacek Lawrynowicz 	} else {
578*162f17b2SKarol Wachowski 		range = &vdev->hw->ranges.global;
579647371a6SJacek Lawrynowicz 	}
580647371a6SJacek Lawrynowicz 
581647371a6SJacek Lawrynowicz 	bo = ivpu_bo_alloc(vdev, &vdev->gctx, size, flags, &internal_ops, range, 0);
582647371a6SJacek Lawrynowicz 	if (IS_ERR(bo)) {
583647371a6SJacek Lawrynowicz 		ivpu_err(vdev, "Failed to create BO: %pe (vpu_addr 0x%llx size %llu flags 0x%x)",
584647371a6SJacek Lawrynowicz 			 bo, vpu_addr, size, flags);
585647371a6SJacek Lawrynowicz 		return NULL;
586647371a6SJacek Lawrynowicz 	}
587647371a6SJacek Lawrynowicz 
588647371a6SJacek Lawrynowicz 	ret = ivpu_bo_pin(bo);
589647371a6SJacek Lawrynowicz 	if (ret)
590647371a6SJacek Lawrynowicz 		goto err_put;
591647371a6SJacek Lawrynowicz 
592647371a6SJacek Lawrynowicz 	if (ivpu_bo_cache_mode(bo) != DRM_IVPU_BO_CACHED)
593647371a6SJacek Lawrynowicz 		drm_clflush_pages(bo->pages, bo->base.size >> PAGE_SHIFT);
594647371a6SJacek Lawrynowicz 
59519635463SKarol Wachowski 	if (bo->flags & DRM_IVPU_BO_WC)
59619635463SKarol Wachowski 		set_pages_array_wc(bo->pages, bo->base.size >> PAGE_SHIFT);
59719635463SKarol Wachowski 	else if (bo->flags & DRM_IVPU_BO_UNCACHED)
59819635463SKarol Wachowski 		set_pages_array_uc(bo->pages, bo->base.size >> PAGE_SHIFT);
59919635463SKarol Wachowski 
600647371a6SJacek Lawrynowicz 	prot = ivpu_bo_pgprot(bo, PAGE_KERNEL);
601647371a6SJacek Lawrynowicz 	bo->kvaddr = vmap(bo->pages, bo->base.size >> PAGE_SHIFT, VM_MAP, prot);
602647371a6SJacek Lawrynowicz 	if (!bo->kvaddr) {
603647371a6SJacek Lawrynowicz 		ivpu_err(vdev, "Failed to map BO into kernel virtual memory\n");
604647371a6SJacek Lawrynowicz 		goto err_put;
605647371a6SJacek Lawrynowicz 	}
606647371a6SJacek Lawrynowicz 
607647371a6SJacek Lawrynowicz 	ivpu_dbg(vdev, BO, "alloc internal: ctx 0 vpu_addr 0x%llx size %zu flags 0x%x\n",
608647371a6SJacek Lawrynowicz 		 bo->vpu_addr, bo->base.size, flags);
609647371a6SJacek Lawrynowicz 
610647371a6SJacek Lawrynowicz 	return bo;
611647371a6SJacek Lawrynowicz 
612647371a6SJacek Lawrynowicz err_put:
613647371a6SJacek Lawrynowicz 	drm_gem_object_put(&bo->base);
614647371a6SJacek Lawrynowicz 	return NULL;
615647371a6SJacek Lawrynowicz }
616647371a6SJacek Lawrynowicz 
ivpu_bo_free_internal(struct ivpu_bo * bo)617647371a6SJacek Lawrynowicz void ivpu_bo_free_internal(struct ivpu_bo *bo)
618647371a6SJacek Lawrynowicz {
619647371a6SJacek Lawrynowicz 	drm_gem_object_put(&bo->base);
620647371a6SJacek Lawrynowicz }
621647371a6SJacek Lawrynowicz 
ivpu_gem_prime_import(struct drm_device * dev,struct dma_buf * buf)622647371a6SJacek Lawrynowicz struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
623647371a6SJacek Lawrynowicz {
624647371a6SJacek Lawrynowicz 	struct ivpu_device *vdev = to_ivpu_device(dev);
625647371a6SJacek Lawrynowicz 	struct dma_buf_attachment *attach;
626647371a6SJacek Lawrynowicz 	struct ivpu_bo *bo;
627647371a6SJacek Lawrynowicz 
628647371a6SJacek Lawrynowicz 	attach = dma_buf_attach(buf, dev->dev);
629647371a6SJacek Lawrynowicz 	if (IS_ERR(attach))
630647371a6SJacek Lawrynowicz 		return ERR_CAST(attach);
631647371a6SJacek Lawrynowicz 
632647371a6SJacek Lawrynowicz 	get_dma_buf(buf);
633647371a6SJacek Lawrynowicz 
634647371a6SJacek Lawrynowicz 	bo = ivpu_bo_alloc(vdev, NULL, buf->size, DRM_IVPU_BO_MAPPABLE, &prime_ops, NULL, 0);
635647371a6SJacek Lawrynowicz 	if (IS_ERR(bo)) {
636647371a6SJacek Lawrynowicz 		ivpu_err(vdev, "Failed to import BO: %pe (size %lu)", bo, buf->size);
637647371a6SJacek Lawrynowicz 		goto err_detach;
638647371a6SJacek Lawrynowicz 	}
639647371a6SJacek Lawrynowicz 
640647371a6SJacek Lawrynowicz 	lockdep_set_class(&bo->lock, &prime_bo_lock_class_key);
641647371a6SJacek Lawrynowicz 
642647371a6SJacek Lawrynowicz 	bo->base.import_attach = attach;
643647371a6SJacek Lawrynowicz 
644647371a6SJacek Lawrynowicz 	return &bo->base;
645647371a6SJacek Lawrynowicz 
646647371a6SJacek Lawrynowicz err_detach:
647647371a6SJacek Lawrynowicz 	dma_buf_detach(buf, attach);
648647371a6SJacek Lawrynowicz 	dma_buf_put(buf);
649647371a6SJacek Lawrynowicz 	return ERR_CAST(bo);
650647371a6SJacek Lawrynowicz }
651647371a6SJacek Lawrynowicz 
ivpu_bo_info_ioctl(struct drm_device * dev,void * data,struct drm_file * file)652647371a6SJacek Lawrynowicz int ivpu_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
653647371a6SJacek Lawrynowicz {
654647371a6SJacek Lawrynowicz 	struct ivpu_file_priv *file_priv = file->driver_priv;
655647371a6SJacek Lawrynowicz 	struct ivpu_device *vdev = to_ivpu_device(dev);
656647371a6SJacek Lawrynowicz 	struct drm_ivpu_bo_info *args = data;
657647371a6SJacek Lawrynowicz 	struct drm_gem_object *obj;
658647371a6SJacek Lawrynowicz 	struct ivpu_bo *bo;
659647371a6SJacek Lawrynowicz 	int ret = 0;
660647371a6SJacek Lawrynowicz 
661647371a6SJacek Lawrynowicz 	obj = drm_gem_object_lookup(file, args->handle);
662647371a6SJacek Lawrynowicz 	if (!obj)
663647371a6SJacek Lawrynowicz 		return -ENOENT;
664647371a6SJacek Lawrynowicz 
665647371a6SJacek Lawrynowicz 	bo = to_ivpu_bo(obj);
666647371a6SJacek Lawrynowicz 
667647371a6SJacek Lawrynowicz 	mutex_lock(&bo->lock);
668647371a6SJacek Lawrynowicz 
669647371a6SJacek Lawrynowicz 	if (!bo->ctx) {
670647371a6SJacek Lawrynowicz 		ret = ivpu_bo_alloc_vpu_addr(bo, &file_priv->ctx, NULL);
671647371a6SJacek Lawrynowicz 		if (ret) {
672647371a6SJacek Lawrynowicz 			ivpu_err(vdev, "Failed to allocate vpu_addr: %d\n", ret);
673647371a6SJacek Lawrynowicz 			goto unlock;
674647371a6SJacek Lawrynowicz 		}
675647371a6SJacek Lawrynowicz 	}
676647371a6SJacek Lawrynowicz 
677647371a6SJacek Lawrynowicz 	args->flags = bo->flags;
678647371a6SJacek Lawrynowicz 	args->mmap_offset = drm_vma_node_offset_addr(&obj->vma_node);
679647371a6SJacek Lawrynowicz 	args->vpu_addr = bo->vpu_addr;
680647371a6SJacek Lawrynowicz 	args->size = obj->size;
681647371a6SJacek Lawrynowicz unlock:
682647371a6SJacek Lawrynowicz 	mutex_unlock(&bo->lock);
683647371a6SJacek Lawrynowicz 	drm_gem_object_put(obj);
684647371a6SJacek Lawrynowicz 	return ret;
685647371a6SJacek Lawrynowicz }
686647371a6SJacek Lawrynowicz 
ivpu_bo_wait_ioctl(struct drm_device * dev,void * data,struct drm_file * file)687cd727221SJacek Lawrynowicz int ivpu_bo_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
688cd727221SJacek Lawrynowicz {
689cd727221SJacek Lawrynowicz 	struct drm_ivpu_bo_wait *args = data;
690cd727221SJacek Lawrynowicz 	struct drm_gem_object *obj;
691cd727221SJacek Lawrynowicz 	unsigned long timeout;
692cd727221SJacek Lawrynowicz 	long ret;
693cd727221SJacek Lawrynowicz 
694cd727221SJacek Lawrynowicz 	timeout = drm_timeout_abs_to_jiffies(args->timeout_ns);
695cd727221SJacek Lawrynowicz 
696cd727221SJacek Lawrynowicz 	obj = drm_gem_object_lookup(file, args->handle);
697cd727221SJacek Lawrynowicz 	if (!obj)
698cd727221SJacek Lawrynowicz 		return -EINVAL;
699cd727221SJacek Lawrynowicz 
700cd727221SJacek Lawrynowicz 	ret = dma_resv_wait_timeout(obj->resv, DMA_RESV_USAGE_READ, true, timeout);
701cd727221SJacek Lawrynowicz 	if (ret == 0) {
702cd727221SJacek Lawrynowicz 		ret = -ETIMEDOUT;
703cd727221SJacek Lawrynowicz 	} else if (ret > 0) {
704cd727221SJacek Lawrynowicz 		ret = 0;
705cd727221SJacek Lawrynowicz 		args->job_status = to_ivpu_bo(obj)->job_status;
706cd727221SJacek Lawrynowicz 	}
707cd727221SJacek Lawrynowicz 
708cd727221SJacek Lawrynowicz 	drm_gem_object_put(obj);
709cd727221SJacek Lawrynowicz 
710cd727221SJacek Lawrynowicz 	return ret;
711cd727221SJacek Lawrynowicz }
712cd727221SJacek Lawrynowicz 
ivpu_bo_print_info(struct ivpu_bo * bo,struct drm_printer * p)713647371a6SJacek Lawrynowicz static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p)
714647371a6SJacek Lawrynowicz {
715647371a6SJacek Lawrynowicz 	unsigned long dma_refcount = 0;
716647371a6SJacek Lawrynowicz 
717647371a6SJacek Lawrynowicz 	if (bo->base.dma_buf && bo->base.dma_buf->file)
718647371a6SJacek Lawrynowicz 		dma_refcount = atomic_long_read(&bo->base.dma_buf->file->f_count);
719647371a6SJacek Lawrynowicz 
720647371a6SJacek Lawrynowicz 	drm_printf(p, "%5u %6d %16llx %10lu %10u %12lu %14s\n",
721647371a6SJacek Lawrynowicz 		   bo->ctx->id, bo->handle, bo->vpu_addr, bo->base.size,
722647371a6SJacek Lawrynowicz 		   kref_read(&bo->base.refcount), dma_refcount, bo->ops->name);
723647371a6SJacek Lawrynowicz }
724647371a6SJacek Lawrynowicz 
ivpu_bo_list(struct drm_device * dev,struct drm_printer * p)725647371a6SJacek Lawrynowicz void ivpu_bo_list(struct drm_device *dev, struct drm_printer *p)
726647371a6SJacek Lawrynowicz {
727647371a6SJacek Lawrynowicz 	struct ivpu_device *vdev = to_ivpu_device(dev);
728647371a6SJacek Lawrynowicz 	struct ivpu_file_priv *file_priv;
729647371a6SJacek Lawrynowicz 	unsigned long ctx_id;
730647371a6SJacek Lawrynowicz 	struct ivpu_bo *bo;
731647371a6SJacek Lawrynowicz 
732647371a6SJacek Lawrynowicz 	drm_printf(p, "%5s %6s %16s %10s %10s %12s %14s\n",
733647371a6SJacek Lawrynowicz 		   "ctx", "handle", "vpu_addr", "size", "refcount", "dma_refcount", "type");
734647371a6SJacek Lawrynowicz 
735647371a6SJacek Lawrynowicz 	mutex_lock(&vdev->gctx.lock);
736647371a6SJacek Lawrynowicz 	list_for_each_entry(bo, &vdev->gctx.bo_list, ctx_node)
737647371a6SJacek Lawrynowicz 		ivpu_bo_print_info(bo, p);
738647371a6SJacek Lawrynowicz 	mutex_unlock(&vdev->gctx.lock);
739647371a6SJacek Lawrynowicz 
740647371a6SJacek Lawrynowicz 	xa_for_each(&vdev->context_xa, ctx_id, file_priv) {
741647371a6SJacek Lawrynowicz 		file_priv = ivpu_file_priv_get_by_ctx_id(vdev, ctx_id);
742647371a6SJacek Lawrynowicz 		if (!file_priv)
743647371a6SJacek Lawrynowicz 			continue;
744647371a6SJacek Lawrynowicz 
745647371a6SJacek Lawrynowicz 		mutex_lock(&file_priv->ctx.lock);
746647371a6SJacek Lawrynowicz 		list_for_each_entry(bo, &file_priv->ctx.bo_list, ctx_node)
747647371a6SJacek Lawrynowicz 			ivpu_bo_print_info(bo, p);
748647371a6SJacek Lawrynowicz 		mutex_unlock(&file_priv->ctx.lock);
749647371a6SJacek Lawrynowicz 
750647371a6SJacek Lawrynowicz 		ivpu_file_priv_put(&file_priv);
751647371a6SJacek Lawrynowicz 	}
752647371a6SJacek Lawrynowicz }
753647371a6SJacek Lawrynowicz 
ivpu_bo_list_print(struct drm_device * dev)754647371a6SJacek Lawrynowicz void ivpu_bo_list_print(struct drm_device *dev)
755647371a6SJacek Lawrynowicz {
756647371a6SJacek Lawrynowicz 	struct drm_printer p = drm_info_printer(dev->dev);
757647371a6SJacek Lawrynowicz 
758647371a6SJacek Lawrynowicz 	ivpu_bo_list(dev, &p);
759647371a6SJacek Lawrynowicz }
760