xref: /openbmc/linux/drivers/gpu/drm/lima/lima_gem.c (revision 51c7b447)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
3 
4 #include <linux/mm.h>
5 #include <linux/sync_file.h>
6 #include <linux/pfn_t.h>
7 
8 #include <drm/drm_file.h>
9 #include <drm/drm_syncobj.h>
10 #include <drm/drm_utils.h>
11 
12 #include <drm/lima_drm.h>
13 
14 #include "lima_drv.h"
15 #include "lima_gem.h"
16 #include "lima_gem_prime.h"
17 #include "lima_vm.h"
18 #include "lima_object.h"
19 
20 int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file,
21 			   u32 size, u32 flags, u32 *handle)
22 {
23 	int err;
24 	struct lima_bo *bo;
25 	struct lima_device *ldev = to_lima_dev(dev);
26 
27 	bo = lima_bo_create(ldev, size, flags, NULL, NULL);
28 	if (IS_ERR(bo))
29 		return PTR_ERR(bo);
30 
31 	err = drm_gem_handle_create(file, &bo->gem, handle);
32 
33 	/* drop reference from allocate - handle holds it now */
34 	drm_gem_object_put_unlocked(&bo->gem);
35 
36 	return err;
37 }
38 
39 void lima_gem_free_object(struct drm_gem_object *obj)
40 {
41 	struct lima_bo *bo = to_lima_bo(obj);
42 
43 	if (!list_empty(&bo->va))
44 		dev_err(obj->dev->dev, "lima gem free bo still has va\n");
45 
46 	lima_bo_destroy(bo);
47 }
48 
49 int lima_gem_object_open(struct drm_gem_object *obj, struct drm_file *file)
50 {
51 	struct lima_bo *bo = to_lima_bo(obj);
52 	struct lima_drm_priv *priv = to_lima_drm_priv(file);
53 	struct lima_vm *vm = priv->vm;
54 
55 	return lima_vm_bo_add(vm, bo, true);
56 }
57 
58 void lima_gem_object_close(struct drm_gem_object *obj, struct drm_file *file)
59 {
60 	struct lima_bo *bo = to_lima_bo(obj);
61 	struct lima_drm_priv *priv = to_lima_drm_priv(file);
62 	struct lima_vm *vm = priv->vm;
63 
64 	lima_vm_bo_del(vm, bo);
65 }
66 
67 int lima_gem_get_info(struct drm_file *file, u32 handle, u32 *va, u64 *offset)
68 {
69 	struct drm_gem_object *obj;
70 	struct lima_bo *bo;
71 	struct lima_drm_priv *priv = to_lima_drm_priv(file);
72 	struct lima_vm *vm = priv->vm;
73 	int err;
74 
75 	obj = drm_gem_object_lookup(file, handle);
76 	if (!obj)
77 		return -ENOENT;
78 
79 	bo = to_lima_bo(obj);
80 
81 	*va = lima_vm_get_va(vm, bo);
82 
83 	err = drm_gem_create_mmap_offset(obj);
84 	if (!err)
85 		*offset = drm_vma_node_offset_addr(&obj->vma_node);
86 
87 	drm_gem_object_put_unlocked(obj);
88 	return err;
89 }
90 
91 static vm_fault_t lima_gem_fault(struct vm_fault *vmf)
92 {
93 	struct vm_area_struct *vma = vmf->vma;
94 	struct drm_gem_object *obj = vma->vm_private_data;
95 	struct lima_bo *bo = to_lima_bo(obj);
96 	pfn_t pfn;
97 	pgoff_t pgoff;
98 
99 	/* We don't use vmf->pgoff since that has the fake offset: */
100 	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
101 	pfn = __pfn_to_pfn_t(page_to_pfn(bo->pages[pgoff]), PFN_DEV);
102 
103 	return vmf_insert_mixed(vma, vmf->address, pfn);
104 }
105 
106 const struct vm_operations_struct lima_gem_vm_ops = {
107 	.fault = lima_gem_fault,
108 	.open = drm_gem_vm_open,
109 	.close = drm_gem_vm_close,
110 };
111 
112 void lima_set_vma_flags(struct vm_area_struct *vma)
113 {
114 	pgprot_t prot = vm_get_page_prot(vma->vm_flags);
115 
116 	vma->vm_flags |= VM_MIXEDMAP;
117 	vma->vm_flags &= ~VM_PFNMAP;
118 	vma->vm_page_prot = pgprot_writecombine(prot);
119 }
120 
121 int lima_gem_mmap(struct file *filp, struct vm_area_struct *vma)
122 {
123 	int ret;
124 
125 	ret = drm_gem_mmap(filp, vma);
126 	if (ret)
127 		return ret;
128 
129 	lima_set_vma_flags(vma);
130 	return 0;
131 }
132 
133 static int lima_gem_sync_bo(struct lima_sched_task *task, struct lima_bo *bo,
134 			    bool write, bool explicit)
135 {
136 	int err = 0;
137 
138 	if (!write) {
139 		err = reservation_object_reserve_shared(bo->gem.resv, 1);
140 		if (err)
141 			return err;
142 	}
143 
144 	/* explicit sync use user passed dep fence */
145 	if (explicit)
146 		return 0;
147 
148 	/* implicit sync use bo fence in resv obj */
149 	if (write) {
150 		unsigned nr_fences;
151 		struct dma_fence **fences;
152 		int i;
153 
154 		err = reservation_object_get_fences_rcu(
155 			bo->gem.resv, NULL, &nr_fences, &fences);
156 		if (err || !nr_fences)
157 			return err;
158 
159 		for (i = 0; i < nr_fences; i++) {
160 			err = lima_sched_task_add_dep(task, fences[i]);
161 			if (err)
162 				break;
163 		}
164 
165 		/* for error case free remaining fences */
166 		for ( ; i < nr_fences; i++)
167 			dma_fence_put(fences[i]);
168 
169 		kfree(fences);
170 	} else {
171 		struct dma_fence *fence;
172 
173 		fence = reservation_object_get_excl_rcu(bo->gem.resv);
174 		if (fence) {
175 			err = lima_sched_task_add_dep(task, fence);
176 			if (err)
177 				dma_fence_put(fence);
178 		}
179 	}
180 
181 	return err;
182 }
183 
184 static int lima_gem_lock_bos(struct lima_bo **bos, u32 nr_bos,
185 			     struct ww_acquire_ctx *ctx)
186 {
187 	int i, ret = 0, contended, slow_locked = -1;
188 
189 	ww_acquire_init(ctx, &reservation_ww_class);
190 
191 retry:
192 	for (i = 0; i < nr_bos; i++) {
193 		if (i == slow_locked) {
194 			slow_locked = -1;
195 			continue;
196 		}
197 
198 		ret = ww_mutex_lock_interruptible(&bos[i]->gem.resv->lock, ctx);
199 		if (ret < 0) {
200 			contended = i;
201 			goto err;
202 		}
203 	}
204 
205 	ww_acquire_done(ctx);
206 	return 0;
207 
208 err:
209 	for (i--; i >= 0; i--)
210 		ww_mutex_unlock(&bos[i]->gem.resv->lock);
211 
212 	if (slow_locked >= 0)
213 		ww_mutex_unlock(&bos[slow_locked]->gem.resv->lock);
214 
215 	if (ret == -EDEADLK) {
216 		/* we lost out in a seqno race, lock and retry.. */
217 		ret = ww_mutex_lock_slow_interruptible(
218 			&bos[contended]->gem.resv->lock, ctx);
219 		if (!ret) {
220 			slow_locked = contended;
221 			goto retry;
222 		}
223 	}
224 	ww_acquire_fini(ctx);
225 
226 	return ret;
227 }
228 
229 static void lima_gem_unlock_bos(struct lima_bo **bos, u32 nr_bos,
230 				struct ww_acquire_ctx *ctx)
231 {
232 	int i;
233 
234 	for (i = 0; i < nr_bos; i++)
235 		ww_mutex_unlock(&bos[i]->gem.resv->lock);
236 	ww_acquire_fini(ctx);
237 }
238 
239 static int lima_gem_add_deps(struct drm_file *file, struct lima_submit *submit)
240 {
241 	int i, err;
242 
243 	for (i = 0; i < ARRAY_SIZE(submit->in_sync); i++) {
244 		struct dma_fence *fence = NULL;
245 
246 		if (!submit->in_sync[i])
247 			continue;
248 
249 		err = drm_syncobj_find_fence(file, submit->in_sync[i],
250 					     0, 0, &fence);
251 		if (err)
252 			return err;
253 
254 		err = lima_sched_task_add_dep(submit->task, fence);
255 		if (err) {
256 			dma_fence_put(fence);
257 			return err;
258 		}
259 	}
260 
261 	return 0;
262 }
263 
264 int lima_gem_submit(struct drm_file *file, struct lima_submit *submit)
265 {
266 	int i, err = 0;
267 	struct ww_acquire_ctx ctx;
268 	struct lima_drm_priv *priv = to_lima_drm_priv(file);
269 	struct lima_vm *vm = priv->vm;
270 	struct drm_syncobj *out_sync = NULL;
271 	struct dma_fence *fence;
272 	struct lima_bo **bos = submit->lbos;
273 
274 	if (submit->out_sync) {
275 		out_sync = drm_syncobj_find(file, submit->out_sync);
276 		if (!out_sync)
277 			return -ENOENT;
278 	}
279 
280 	for (i = 0; i < submit->nr_bos; i++) {
281 		struct drm_gem_object *obj;
282 		struct lima_bo *bo;
283 
284 		obj = drm_gem_object_lookup(file, submit->bos[i].handle);
285 		if (!obj) {
286 			err = -ENOENT;
287 			goto err_out0;
288 		}
289 
290 		bo = to_lima_bo(obj);
291 
292 		/* increase refcnt of gpu va map to prevent unmapped when executing,
293 		 * will be decreased when task done
294 		 */
295 		err = lima_vm_bo_add(vm, bo, false);
296 		if (err) {
297 			drm_gem_object_put_unlocked(obj);
298 			goto err_out0;
299 		}
300 
301 		bos[i] = bo;
302 	}
303 
304 	err = lima_gem_lock_bos(bos, submit->nr_bos, &ctx);
305 	if (err)
306 		goto err_out0;
307 
308 	err = lima_sched_task_init(
309 		submit->task, submit->ctx->context + submit->pipe,
310 		bos, submit->nr_bos, vm);
311 	if (err)
312 		goto err_out1;
313 
314 	err = lima_gem_add_deps(file, submit);
315 	if (err)
316 		goto err_out2;
317 
318 	for (i = 0; i < submit->nr_bos; i++) {
319 		err = lima_gem_sync_bo(
320 			submit->task, bos[i],
321 			submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE,
322 			submit->flags & LIMA_SUBMIT_FLAG_EXPLICIT_FENCE);
323 		if (err)
324 			goto err_out2;
325 	}
326 
327 	fence = lima_sched_context_queue_task(
328 		submit->ctx->context + submit->pipe, submit->task);
329 
330 	for (i = 0; i < submit->nr_bos; i++) {
331 		if (submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE)
332 			reservation_object_add_excl_fence(bos[i]->gem.resv, fence);
333 		else
334 			reservation_object_add_shared_fence(bos[i]->gem.resv, fence);
335 	}
336 
337 	lima_gem_unlock_bos(bos, submit->nr_bos, &ctx);
338 
339 	for (i = 0; i < submit->nr_bos; i++)
340 		drm_gem_object_put_unlocked(&bos[i]->gem);
341 
342 	if (out_sync) {
343 		drm_syncobj_replace_fence(out_sync, fence);
344 		drm_syncobj_put(out_sync);
345 	}
346 
347 	dma_fence_put(fence);
348 
349 	return 0;
350 
351 err_out2:
352 	lima_sched_task_fini(submit->task);
353 err_out1:
354 	lima_gem_unlock_bos(bos, submit->nr_bos, &ctx);
355 err_out0:
356 	for (i = 0; i < submit->nr_bos; i++) {
357 		if (!bos[i])
358 			break;
359 		lima_vm_bo_del(vm, bos[i]);
360 		drm_gem_object_put_unlocked(&bos[i]->gem);
361 	}
362 	if (out_sync)
363 		drm_syncobj_put(out_sync);
364 	return err;
365 }
366 
367 int lima_gem_wait(struct drm_file *file, u32 handle, u32 op, s64 timeout_ns)
368 {
369 	bool write = op & LIMA_GEM_WAIT_WRITE;
370 	long ret, timeout;
371 
372 	if (!op)
373 		return 0;
374 
375 	timeout = drm_timeout_abs_to_jiffies(timeout_ns);
376 
377 	ret = drm_gem_reservation_object_wait(file, handle, write, timeout);
378 	if (ret == 0)
379 		ret = timeout ? -ETIMEDOUT : -EBUSY;
380 
381 	return ret;
382 }
383