xref: /openbmc/linux/drivers/xen/gntdev-dmabuf.c (revision aeb64ff3)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /*
4  * Xen dma-buf functionality for gntdev.
5  *
6  * DMA buffer implementation is based on drivers/gpu/drm/drm_prime.c.
7  *
8  * Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
9  */
10 
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/dma-buf.h>
14 #include <linux/slab.h>
15 #include <linux/types.h>
16 #include <linux/uaccess.h>
17 
18 #include <xen/xen.h>
19 #include <xen/grant_table.h>
20 
21 #include "gntdev-common.h"
22 #include "gntdev-dmabuf.h"
23 
24 #ifndef GRANT_INVALID_REF
25 /*
26  * Note on usage of grant reference 0 as invalid grant reference:
27  * grant reference 0 is valid, but never exposed to a driver,
28  * because of the fact it is already in use/reserved by the PV console.
29  */
30 #define GRANT_INVALID_REF	0
31 #endif
32 
33 struct gntdev_dmabuf {
34 	struct gntdev_dmabuf_priv *priv;
35 	struct dma_buf *dmabuf;
36 	struct list_head next;
37 	int fd;
38 
39 	union {
40 		struct {
41 			/* Exported buffers are reference counted. */
42 			struct kref refcount;
43 
44 			struct gntdev_priv *priv;
45 			struct gntdev_grant_map *map;
46 		} exp;
47 		struct {
48 			/* Granted references of the imported buffer. */
49 			grant_ref_t *refs;
50 			/* Scatter-gather table of the imported buffer. */
51 			struct sg_table *sgt;
52 			/* dma-buf attachment of the imported buffer. */
53 			struct dma_buf_attachment *attach;
54 		} imp;
55 	} u;
56 
57 	/* Number of pages this buffer has. */
58 	int nr_pages;
59 	/* Pages of this buffer. */
60 	struct page **pages;
61 };
62 
63 struct gntdev_dmabuf_wait_obj {
64 	struct list_head next;
65 	struct gntdev_dmabuf *gntdev_dmabuf;
66 	struct completion completion;
67 };
68 
69 struct gntdev_dmabuf_attachment {
70 	struct sg_table *sgt;
71 	enum dma_data_direction dir;
72 };
73 
74 struct gntdev_dmabuf_priv {
75 	/* List of exported DMA buffers. */
76 	struct list_head exp_list;
77 	/* List of wait objects. */
78 	struct list_head exp_wait_list;
79 	/* List of imported DMA buffers. */
80 	struct list_head imp_list;
81 	/* This is the lock which protects dma_buf_xxx lists. */
82 	struct mutex lock;
83 	/*
84 	 * We reference this file while exporting dma-bufs, so
85 	 * the grant device context is not destroyed while there are
86 	 * external users alive.
87 	 */
88 	struct file *filp;
89 };
90 
91 /* DMA buffer export support. */
92 
93 /* Implementation of wait for exported DMA buffer to be released. */
94 
95 static void dmabuf_exp_release(struct kref *kref);
96 
97 static struct gntdev_dmabuf_wait_obj *
98 dmabuf_exp_wait_obj_new(struct gntdev_dmabuf_priv *priv,
99 			struct gntdev_dmabuf *gntdev_dmabuf)
100 {
101 	struct gntdev_dmabuf_wait_obj *obj;
102 
103 	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
104 	if (!obj)
105 		return ERR_PTR(-ENOMEM);
106 
107 	init_completion(&obj->completion);
108 	obj->gntdev_dmabuf = gntdev_dmabuf;
109 
110 	mutex_lock(&priv->lock);
111 	list_add(&obj->next, &priv->exp_wait_list);
112 	/* Put our reference and wait for gntdev_dmabuf's release to fire. */
113 	kref_put(&gntdev_dmabuf->u.exp.refcount, dmabuf_exp_release);
114 	mutex_unlock(&priv->lock);
115 	return obj;
116 }
117 
118 static void dmabuf_exp_wait_obj_free(struct gntdev_dmabuf_priv *priv,
119 				     struct gntdev_dmabuf_wait_obj *obj)
120 {
121 	mutex_lock(&priv->lock);
122 	list_del(&obj->next);
123 	mutex_unlock(&priv->lock);
124 	kfree(obj);
125 }
126 
127 static int dmabuf_exp_wait_obj_wait(struct gntdev_dmabuf_wait_obj *obj,
128 				    u32 wait_to_ms)
129 {
130 	if (wait_for_completion_timeout(&obj->completion,
131 			msecs_to_jiffies(wait_to_ms)) <= 0)
132 		return -ETIMEDOUT;
133 
134 	return 0;
135 }
136 
137 static void dmabuf_exp_wait_obj_signal(struct gntdev_dmabuf_priv *priv,
138 				       struct gntdev_dmabuf *gntdev_dmabuf)
139 {
140 	struct gntdev_dmabuf_wait_obj *obj;
141 
142 	list_for_each_entry(obj, &priv->exp_wait_list, next)
143 		if (obj->gntdev_dmabuf == gntdev_dmabuf) {
144 			pr_debug("Found gntdev_dmabuf in the wait list, wake\n");
145 			complete_all(&obj->completion);
146 			break;
147 		}
148 }
149 
150 static struct gntdev_dmabuf *
151 dmabuf_exp_wait_obj_get_dmabuf(struct gntdev_dmabuf_priv *priv, int fd)
152 {
153 	struct gntdev_dmabuf *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT);
154 
155 	mutex_lock(&priv->lock);
156 	list_for_each_entry(gntdev_dmabuf, &priv->exp_list, next)
157 		if (gntdev_dmabuf->fd == fd) {
158 			pr_debug("Found gntdev_dmabuf in the wait list\n");
159 			kref_get(&gntdev_dmabuf->u.exp.refcount);
160 			ret = gntdev_dmabuf;
161 			break;
162 		}
163 	mutex_unlock(&priv->lock);
164 	return ret;
165 }
166 
167 static int dmabuf_exp_wait_released(struct gntdev_dmabuf_priv *priv, int fd,
168 				    int wait_to_ms)
169 {
170 	struct gntdev_dmabuf *gntdev_dmabuf;
171 	struct gntdev_dmabuf_wait_obj *obj;
172 	int ret;
173 
174 	pr_debug("Will wait for dma-buf with fd %d\n", fd);
175 	/*
176 	 * Try to find the DMA buffer: if not found means that
177 	 * either the buffer has already been released or file descriptor
178 	 * provided is wrong.
179 	 */
180 	gntdev_dmabuf = dmabuf_exp_wait_obj_get_dmabuf(priv, fd);
181 	if (IS_ERR(gntdev_dmabuf))
182 		return PTR_ERR(gntdev_dmabuf);
183 
184 	/*
185 	 * gntdev_dmabuf still exists and is reference count locked by us now,
186 	 * so prepare to wait: allocate wait object and add it to the wait list,
187 	 * so we can find it on release.
188 	 */
189 	obj = dmabuf_exp_wait_obj_new(priv, gntdev_dmabuf);
190 	if (IS_ERR(obj))
191 		return PTR_ERR(obj);
192 
193 	ret = dmabuf_exp_wait_obj_wait(obj, wait_to_ms);
194 	dmabuf_exp_wait_obj_free(priv, obj);
195 	return ret;
196 }
197 
198 /* DMA buffer export support. */
199 
200 static struct sg_table *
201 dmabuf_pages_to_sgt(struct page **pages, unsigned int nr_pages)
202 {
203 	struct sg_table *sgt;
204 	int ret;
205 
206 	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
207 	if (!sgt) {
208 		ret = -ENOMEM;
209 		goto out;
210 	}
211 
212 	ret = sg_alloc_table_from_pages(sgt, pages, nr_pages, 0,
213 					nr_pages << PAGE_SHIFT,
214 					GFP_KERNEL);
215 	if (ret)
216 		goto out;
217 
218 	return sgt;
219 
220 out:
221 	kfree(sgt);
222 	return ERR_PTR(ret);
223 }
224 
225 static int dmabuf_exp_ops_attach(struct dma_buf *dma_buf,
226 				 struct dma_buf_attachment *attach)
227 {
228 	struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach;
229 
230 	gntdev_dmabuf_attach = kzalloc(sizeof(*gntdev_dmabuf_attach),
231 				       GFP_KERNEL);
232 	if (!gntdev_dmabuf_attach)
233 		return -ENOMEM;
234 
235 	gntdev_dmabuf_attach->dir = DMA_NONE;
236 	attach->priv = gntdev_dmabuf_attach;
237 	return 0;
238 }
239 
240 static void dmabuf_exp_ops_detach(struct dma_buf *dma_buf,
241 				  struct dma_buf_attachment *attach)
242 {
243 	struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach = attach->priv;
244 
245 	if (gntdev_dmabuf_attach) {
246 		struct sg_table *sgt = gntdev_dmabuf_attach->sgt;
247 
248 		if (sgt) {
249 			if (gntdev_dmabuf_attach->dir != DMA_NONE)
250 				dma_unmap_sg_attrs(attach->dev, sgt->sgl,
251 						   sgt->nents,
252 						   gntdev_dmabuf_attach->dir,
253 						   DMA_ATTR_SKIP_CPU_SYNC);
254 			sg_free_table(sgt);
255 		}
256 
257 		kfree(sgt);
258 		kfree(gntdev_dmabuf_attach);
259 		attach->priv = NULL;
260 	}
261 }
262 
263 static struct sg_table *
264 dmabuf_exp_ops_map_dma_buf(struct dma_buf_attachment *attach,
265 			   enum dma_data_direction dir)
266 {
267 	struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach = attach->priv;
268 	struct gntdev_dmabuf *gntdev_dmabuf = attach->dmabuf->priv;
269 	struct sg_table *sgt;
270 
271 	pr_debug("Mapping %d pages for dev %p\n", gntdev_dmabuf->nr_pages,
272 		 attach->dev);
273 
274 	if (dir == DMA_NONE || !gntdev_dmabuf_attach)
275 		return ERR_PTR(-EINVAL);
276 
277 	/* Return the cached mapping when possible. */
278 	if (gntdev_dmabuf_attach->dir == dir)
279 		return gntdev_dmabuf_attach->sgt;
280 
281 	/*
282 	 * Two mappings with different directions for the same attachment are
283 	 * not allowed.
284 	 */
285 	if (gntdev_dmabuf_attach->dir != DMA_NONE)
286 		return ERR_PTR(-EBUSY);
287 
288 	sgt = dmabuf_pages_to_sgt(gntdev_dmabuf->pages,
289 				  gntdev_dmabuf->nr_pages);
290 	if (!IS_ERR(sgt)) {
291 		if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
292 				      DMA_ATTR_SKIP_CPU_SYNC)) {
293 			sg_free_table(sgt);
294 			kfree(sgt);
295 			sgt = ERR_PTR(-ENOMEM);
296 		} else {
297 			gntdev_dmabuf_attach->sgt = sgt;
298 			gntdev_dmabuf_attach->dir = dir;
299 		}
300 	}
301 	if (IS_ERR(sgt))
302 		pr_debug("Failed to map sg table for dev %p\n", attach->dev);
303 	return sgt;
304 }
305 
306 static void dmabuf_exp_ops_unmap_dma_buf(struct dma_buf_attachment *attach,
307 					 struct sg_table *sgt,
308 					 enum dma_data_direction dir)
309 {
310 	/* Not implemented. The unmap is done at dmabuf_exp_ops_detach(). */
311 }
312 
313 static void dmabuf_exp_release(struct kref *kref)
314 {
315 	struct gntdev_dmabuf *gntdev_dmabuf =
316 		container_of(kref, struct gntdev_dmabuf, u.exp.refcount);
317 
318 	dmabuf_exp_wait_obj_signal(gntdev_dmabuf->priv, gntdev_dmabuf);
319 	list_del(&gntdev_dmabuf->next);
320 	fput(gntdev_dmabuf->priv->filp);
321 	kfree(gntdev_dmabuf);
322 }
323 
324 static void dmabuf_exp_remove_map(struct gntdev_priv *priv,
325 				  struct gntdev_grant_map *map)
326 {
327 	mutex_lock(&priv->lock);
328 	list_del(&map->next);
329 	gntdev_put_map(NULL /* already removed */, map);
330 	mutex_unlock(&priv->lock);
331 }
332 
333 static void dmabuf_exp_ops_release(struct dma_buf *dma_buf)
334 {
335 	struct gntdev_dmabuf *gntdev_dmabuf = dma_buf->priv;
336 	struct gntdev_dmabuf_priv *priv = gntdev_dmabuf->priv;
337 
338 	dmabuf_exp_remove_map(gntdev_dmabuf->u.exp.priv,
339 			      gntdev_dmabuf->u.exp.map);
340 	mutex_lock(&priv->lock);
341 	kref_put(&gntdev_dmabuf->u.exp.refcount, dmabuf_exp_release);
342 	mutex_unlock(&priv->lock);
343 }
344 
345 static void *dmabuf_exp_ops_kmap(struct dma_buf *dma_buf,
346 				 unsigned long page_num)
347 {
348 	/* Not implemented. */
349 	return NULL;
350 }
351 
352 static void dmabuf_exp_ops_kunmap(struct dma_buf *dma_buf,
353 				  unsigned long page_num, void *addr)
354 {
355 	/* Not implemented. */
356 }
357 
358 static int dmabuf_exp_ops_mmap(struct dma_buf *dma_buf,
359 			       struct vm_area_struct *vma)
360 {
361 	/* Not implemented. */
362 	return 0;
363 }
364 
365 static const struct dma_buf_ops dmabuf_exp_ops =  {
366 	.attach = dmabuf_exp_ops_attach,
367 	.detach = dmabuf_exp_ops_detach,
368 	.map_dma_buf = dmabuf_exp_ops_map_dma_buf,
369 	.unmap_dma_buf = dmabuf_exp_ops_unmap_dma_buf,
370 	.release = dmabuf_exp_ops_release,
371 	.map = dmabuf_exp_ops_kmap,
372 	.unmap = dmabuf_exp_ops_kunmap,
373 	.mmap = dmabuf_exp_ops_mmap,
374 };
375 
376 struct gntdev_dmabuf_export_args {
377 	struct gntdev_priv *priv;
378 	struct gntdev_grant_map *map;
379 	struct gntdev_dmabuf_priv *dmabuf_priv;
380 	struct device *dev;
381 	int count;
382 	struct page **pages;
383 	u32 fd;
384 };
385 
386 static int dmabuf_exp_from_pages(struct gntdev_dmabuf_export_args *args)
387 {
388 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
389 	struct gntdev_dmabuf *gntdev_dmabuf;
390 	int ret;
391 
392 	gntdev_dmabuf = kzalloc(sizeof(*gntdev_dmabuf), GFP_KERNEL);
393 	if (!gntdev_dmabuf)
394 		return -ENOMEM;
395 
396 	kref_init(&gntdev_dmabuf->u.exp.refcount);
397 
398 	gntdev_dmabuf->priv = args->dmabuf_priv;
399 	gntdev_dmabuf->nr_pages = args->count;
400 	gntdev_dmabuf->pages = args->pages;
401 	gntdev_dmabuf->u.exp.priv = args->priv;
402 	gntdev_dmabuf->u.exp.map = args->map;
403 
404 	exp_info.exp_name = KBUILD_MODNAME;
405 	if (args->dev->driver && args->dev->driver->owner)
406 		exp_info.owner = args->dev->driver->owner;
407 	else
408 		exp_info.owner = THIS_MODULE;
409 	exp_info.ops = &dmabuf_exp_ops;
410 	exp_info.size = args->count << PAGE_SHIFT;
411 	exp_info.flags = O_RDWR;
412 	exp_info.priv = gntdev_dmabuf;
413 
414 	gntdev_dmabuf->dmabuf = dma_buf_export(&exp_info);
415 	if (IS_ERR(gntdev_dmabuf->dmabuf)) {
416 		ret = PTR_ERR(gntdev_dmabuf->dmabuf);
417 		gntdev_dmabuf->dmabuf = NULL;
418 		goto fail;
419 	}
420 
421 	ret = dma_buf_fd(gntdev_dmabuf->dmabuf, O_CLOEXEC);
422 	if (ret < 0)
423 		goto fail;
424 
425 	gntdev_dmabuf->fd = ret;
426 	args->fd = ret;
427 
428 	pr_debug("Exporting DMA buffer with fd %d\n", ret);
429 
430 	mutex_lock(&args->dmabuf_priv->lock);
431 	list_add(&gntdev_dmabuf->next, &args->dmabuf_priv->exp_list);
432 	mutex_unlock(&args->dmabuf_priv->lock);
433 	get_file(gntdev_dmabuf->priv->filp);
434 	return 0;
435 
436 fail:
437 	if (gntdev_dmabuf->dmabuf)
438 		dma_buf_put(gntdev_dmabuf->dmabuf);
439 	kfree(gntdev_dmabuf);
440 	return ret;
441 }
442 
443 static struct gntdev_grant_map *
444 dmabuf_exp_alloc_backing_storage(struct gntdev_priv *priv, int dmabuf_flags,
445 				 int count)
446 {
447 	struct gntdev_grant_map *map;
448 
449 	if (unlikely(gntdev_test_page_count(count)))
450 		return ERR_PTR(-EINVAL);
451 
452 	if ((dmabuf_flags & GNTDEV_DMA_FLAG_WC) &&
453 	    (dmabuf_flags & GNTDEV_DMA_FLAG_COHERENT)) {
454 		pr_debug("Wrong dma-buf flags: 0x%x\n", dmabuf_flags);
455 		return ERR_PTR(-EINVAL);
456 	}
457 
458 	map = gntdev_alloc_map(priv, count, dmabuf_flags);
459 	if (!map)
460 		return ERR_PTR(-ENOMEM);
461 
462 	return map;
463 }
464 
465 static int dmabuf_exp_from_refs(struct gntdev_priv *priv, int flags,
466 				int count, u32 domid, u32 *refs, u32 *fd)
467 {
468 	struct gntdev_grant_map *map;
469 	struct gntdev_dmabuf_export_args args;
470 	int i, ret;
471 
472 	map = dmabuf_exp_alloc_backing_storage(priv, flags, count);
473 	if (IS_ERR(map))
474 		return PTR_ERR(map);
475 
476 	for (i = 0; i < count; i++) {
477 		map->grants[i].domid = domid;
478 		map->grants[i].ref = refs[i];
479 	}
480 
481 	mutex_lock(&priv->lock);
482 	gntdev_add_map(priv, map);
483 	mutex_unlock(&priv->lock);
484 
485 	map->flags |= GNTMAP_host_map;
486 #if defined(CONFIG_X86)
487 	map->flags |= GNTMAP_device_map;
488 #endif
489 
490 	ret = gntdev_map_grant_pages(map);
491 	if (ret < 0)
492 		goto out;
493 
494 	args.priv = priv;
495 	args.map = map;
496 	args.dev = priv->dma_dev;
497 	args.dmabuf_priv = priv->dmabuf_priv;
498 	args.count = map->count;
499 	args.pages = map->pages;
500 	args.fd = -1; /* Shut up unnecessary gcc warning for i386 */
501 
502 	ret = dmabuf_exp_from_pages(&args);
503 	if (ret < 0)
504 		goto out;
505 
506 	*fd = args.fd;
507 	return 0;
508 
509 out:
510 	dmabuf_exp_remove_map(priv, map);
511 	return ret;
512 }
513 
514 /* DMA buffer import support. */
515 
516 static int
517 dmabuf_imp_grant_foreign_access(struct page **pages, u32 *refs,
518 				int count, int domid)
519 {
520 	grant_ref_t priv_gref_head;
521 	int i, ret;
522 
523 	ret = gnttab_alloc_grant_references(count, &priv_gref_head);
524 	if (ret < 0) {
525 		pr_debug("Cannot allocate grant references, ret %d\n", ret);
526 		return ret;
527 	}
528 
529 	for (i = 0; i < count; i++) {
530 		int cur_ref;
531 
532 		cur_ref = gnttab_claim_grant_reference(&priv_gref_head);
533 		if (cur_ref < 0) {
534 			ret = cur_ref;
535 			pr_debug("Cannot claim grant reference, ret %d\n", ret);
536 			goto out;
537 		}
538 
539 		gnttab_grant_foreign_access_ref(cur_ref, domid,
540 						xen_page_to_gfn(pages[i]), 0);
541 		refs[i] = cur_ref;
542 	}
543 
544 	return 0;
545 
546 out:
547 	gnttab_free_grant_references(priv_gref_head);
548 	return ret;
549 }
550 
551 static void dmabuf_imp_end_foreign_access(u32 *refs, int count)
552 {
553 	int i;
554 
555 	for (i = 0; i < count; i++)
556 		if (refs[i] != GRANT_INVALID_REF)
557 			gnttab_end_foreign_access(refs[i], 0, 0UL);
558 }
559 
560 static void dmabuf_imp_free_storage(struct gntdev_dmabuf *gntdev_dmabuf)
561 {
562 	kfree(gntdev_dmabuf->pages);
563 	kfree(gntdev_dmabuf->u.imp.refs);
564 	kfree(gntdev_dmabuf);
565 }
566 
567 static struct gntdev_dmabuf *dmabuf_imp_alloc_storage(int count)
568 {
569 	struct gntdev_dmabuf *gntdev_dmabuf;
570 	int i;
571 
572 	gntdev_dmabuf = kzalloc(sizeof(*gntdev_dmabuf), GFP_KERNEL);
573 	if (!gntdev_dmabuf)
574 		goto fail_no_free;
575 
576 	gntdev_dmabuf->u.imp.refs = kcalloc(count,
577 					    sizeof(gntdev_dmabuf->u.imp.refs[0]),
578 					    GFP_KERNEL);
579 	if (!gntdev_dmabuf->u.imp.refs)
580 		goto fail;
581 
582 	gntdev_dmabuf->pages = kcalloc(count,
583 				       sizeof(gntdev_dmabuf->pages[0]),
584 				       GFP_KERNEL);
585 	if (!gntdev_dmabuf->pages)
586 		goto fail;
587 
588 	gntdev_dmabuf->nr_pages = count;
589 
590 	for (i = 0; i < count; i++)
591 		gntdev_dmabuf->u.imp.refs[i] = GRANT_INVALID_REF;
592 
593 	return gntdev_dmabuf;
594 
595 fail:
596 	dmabuf_imp_free_storage(gntdev_dmabuf);
597 fail_no_free:
598 	return ERR_PTR(-ENOMEM);
599 }
600 
601 static struct gntdev_dmabuf *
602 dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
603 		   int fd, int count, int domid)
604 {
605 	struct gntdev_dmabuf *gntdev_dmabuf, *ret;
606 	struct dma_buf *dma_buf;
607 	struct dma_buf_attachment *attach;
608 	struct sg_table *sgt;
609 	struct sg_page_iter sg_iter;
610 	int i;
611 
612 	dma_buf = dma_buf_get(fd);
613 	if (IS_ERR(dma_buf))
614 		return ERR_CAST(dma_buf);
615 
616 	gntdev_dmabuf = dmabuf_imp_alloc_storage(count);
617 	if (IS_ERR(gntdev_dmabuf)) {
618 		ret = gntdev_dmabuf;
619 		goto fail_put;
620 	}
621 
622 	gntdev_dmabuf->priv = priv;
623 	gntdev_dmabuf->fd = fd;
624 
625 	attach = dma_buf_attach(dma_buf, dev);
626 	if (IS_ERR(attach)) {
627 		ret = ERR_CAST(attach);
628 		goto fail_free_obj;
629 	}
630 
631 	gntdev_dmabuf->u.imp.attach = attach;
632 
633 	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
634 	if (IS_ERR(sgt)) {
635 		ret = ERR_CAST(sgt);
636 		goto fail_detach;
637 	}
638 
639 	/* Check number of pages that imported buffer has. */
640 	if (attach->dmabuf->size != gntdev_dmabuf->nr_pages << PAGE_SHIFT) {
641 		ret = ERR_PTR(-EINVAL);
642 		pr_debug("DMA buffer has %zu pages, user-space expects %d\n",
643 			 attach->dmabuf->size, gntdev_dmabuf->nr_pages);
644 		goto fail_unmap;
645 	}
646 
647 	gntdev_dmabuf->u.imp.sgt = sgt;
648 
649 	/* Now convert sgt to array of pages and check for page validity. */
650 	i = 0;
651 	for_each_sg_page(sgt->sgl, &sg_iter, sgt->nents, 0) {
652 		struct page *page = sg_page_iter_page(&sg_iter);
653 		/*
654 		 * Check if page is valid: this can happen if we are given
655 		 * a page from VRAM or other resources which are not backed
656 		 * by a struct page.
657 		 */
658 		if (!pfn_valid(page_to_pfn(page))) {
659 			ret = ERR_PTR(-EINVAL);
660 			goto fail_unmap;
661 		}
662 
663 		gntdev_dmabuf->pages[i++] = page;
664 	}
665 
666 	ret = ERR_PTR(dmabuf_imp_grant_foreign_access(gntdev_dmabuf->pages,
667 						      gntdev_dmabuf->u.imp.refs,
668 						      count, domid));
669 	if (IS_ERR(ret))
670 		goto fail_end_access;
671 
672 	pr_debug("Imported DMA buffer with fd %d\n", fd);
673 
674 	mutex_lock(&priv->lock);
675 	list_add(&gntdev_dmabuf->next, &priv->imp_list);
676 	mutex_unlock(&priv->lock);
677 
678 	return gntdev_dmabuf;
679 
680 fail_end_access:
681 	dmabuf_imp_end_foreign_access(gntdev_dmabuf->u.imp.refs, count);
682 fail_unmap:
683 	dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
684 fail_detach:
685 	dma_buf_detach(dma_buf, attach);
686 fail_free_obj:
687 	dmabuf_imp_free_storage(gntdev_dmabuf);
688 fail_put:
689 	dma_buf_put(dma_buf);
690 	return ret;
691 }
692 
693 /*
694  * Find the hyper dma-buf by its file descriptor and remove
695  * it from the buffer's list.
696  */
697 static struct gntdev_dmabuf *
698 dmabuf_imp_find_unlink(struct gntdev_dmabuf_priv *priv, int fd)
699 {
700 	struct gntdev_dmabuf *q, *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT);
701 
702 	mutex_lock(&priv->lock);
703 	list_for_each_entry_safe(gntdev_dmabuf, q, &priv->imp_list, next) {
704 		if (gntdev_dmabuf->fd == fd) {
705 			pr_debug("Found gntdev_dmabuf in the import list\n");
706 			ret = gntdev_dmabuf;
707 			list_del(&gntdev_dmabuf->next);
708 			break;
709 		}
710 	}
711 	mutex_unlock(&priv->lock);
712 	return ret;
713 }
714 
715 static int dmabuf_imp_release(struct gntdev_dmabuf_priv *priv, u32 fd)
716 {
717 	struct gntdev_dmabuf *gntdev_dmabuf;
718 	struct dma_buf_attachment *attach;
719 	struct dma_buf *dma_buf;
720 
721 	gntdev_dmabuf = dmabuf_imp_find_unlink(priv, fd);
722 	if (IS_ERR(gntdev_dmabuf))
723 		return PTR_ERR(gntdev_dmabuf);
724 
725 	pr_debug("Releasing DMA buffer with fd %d\n", fd);
726 
727 	dmabuf_imp_end_foreign_access(gntdev_dmabuf->u.imp.refs,
728 				      gntdev_dmabuf->nr_pages);
729 
730 	attach = gntdev_dmabuf->u.imp.attach;
731 
732 	if (gntdev_dmabuf->u.imp.sgt)
733 		dma_buf_unmap_attachment(attach, gntdev_dmabuf->u.imp.sgt,
734 					 DMA_BIDIRECTIONAL);
735 	dma_buf = attach->dmabuf;
736 	dma_buf_detach(attach->dmabuf, attach);
737 	dma_buf_put(dma_buf);
738 
739 	dmabuf_imp_free_storage(gntdev_dmabuf);
740 	return 0;
741 }
742 
743 static void dmabuf_imp_release_all(struct gntdev_dmabuf_priv *priv)
744 {
745 	struct gntdev_dmabuf *q, *gntdev_dmabuf;
746 
747 	list_for_each_entry_safe(gntdev_dmabuf, q, &priv->imp_list, next)
748 		dmabuf_imp_release(priv, gntdev_dmabuf->fd);
749 }
750 
751 /* DMA buffer IOCTL support. */
752 
753 long gntdev_ioctl_dmabuf_exp_from_refs(struct gntdev_priv *priv, int use_ptemod,
754 				       struct ioctl_gntdev_dmabuf_exp_from_refs __user *u)
755 {
756 	struct ioctl_gntdev_dmabuf_exp_from_refs op;
757 	u32 *refs;
758 	long ret;
759 
760 	if (use_ptemod) {
761 		pr_debug("Cannot provide dma-buf: use_ptemode %d\n",
762 			 use_ptemod);
763 		return -EINVAL;
764 	}
765 
766 	if (copy_from_user(&op, u, sizeof(op)) != 0)
767 		return -EFAULT;
768 
769 	if (unlikely(gntdev_test_page_count(op.count)))
770 		return -EINVAL;
771 
772 	refs = kcalloc(op.count, sizeof(*refs), GFP_KERNEL);
773 	if (!refs)
774 		return -ENOMEM;
775 
776 	if (copy_from_user(refs, u->refs, sizeof(*refs) * op.count) != 0) {
777 		ret = -EFAULT;
778 		goto out;
779 	}
780 
781 	ret = dmabuf_exp_from_refs(priv, op.flags, op.count,
782 				   op.domid, refs, &op.fd);
783 	if (ret)
784 		goto out;
785 
786 	if (copy_to_user(u, &op, sizeof(op)) != 0)
787 		ret = -EFAULT;
788 
789 out:
790 	kfree(refs);
791 	return ret;
792 }
793 
794 long gntdev_ioctl_dmabuf_exp_wait_released(struct gntdev_priv *priv,
795 					   struct ioctl_gntdev_dmabuf_exp_wait_released __user *u)
796 {
797 	struct ioctl_gntdev_dmabuf_exp_wait_released op;
798 
799 	if (copy_from_user(&op, u, sizeof(op)) != 0)
800 		return -EFAULT;
801 
802 	return dmabuf_exp_wait_released(priv->dmabuf_priv, op.fd,
803 					op.wait_to_ms);
804 }
805 
806 long gntdev_ioctl_dmabuf_imp_to_refs(struct gntdev_priv *priv,
807 				     struct ioctl_gntdev_dmabuf_imp_to_refs __user *u)
808 {
809 	struct ioctl_gntdev_dmabuf_imp_to_refs op;
810 	struct gntdev_dmabuf *gntdev_dmabuf;
811 	long ret;
812 
813 	if (copy_from_user(&op, u, sizeof(op)) != 0)
814 		return -EFAULT;
815 
816 	if (unlikely(gntdev_test_page_count(op.count)))
817 		return -EINVAL;
818 
819 	gntdev_dmabuf = dmabuf_imp_to_refs(priv->dmabuf_priv,
820 					   priv->dma_dev, op.fd,
821 					   op.count, op.domid);
822 	if (IS_ERR(gntdev_dmabuf))
823 		return PTR_ERR(gntdev_dmabuf);
824 
825 	if (copy_to_user(u->refs, gntdev_dmabuf->u.imp.refs,
826 			 sizeof(*u->refs) * op.count) != 0) {
827 		ret = -EFAULT;
828 		goto out_release;
829 	}
830 	return 0;
831 
832 out_release:
833 	dmabuf_imp_release(priv->dmabuf_priv, op.fd);
834 	return ret;
835 }
836 
837 long gntdev_ioctl_dmabuf_imp_release(struct gntdev_priv *priv,
838 				     struct ioctl_gntdev_dmabuf_imp_release __user *u)
839 {
840 	struct ioctl_gntdev_dmabuf_imp_release op;
841 
842 	if (copy_from_user(&op, u, sizeof(op)) != 0)
843 		return -EFAULT;
844 
845 	return dmabuf_imp_release(priv->dmabuf_priv, op.fd);
846 }
847 
848 struct gntdev_dmabuf_priv *gntdev_dmabuf_init(struct file *filp)
849 {
850 	struct gntdev_dmabuf_priv *priv;
851 
852 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
853 	if (!priv)
854 		return ERR_PTR(-ENOMEM);
855 
856 	mutex_init(&priv->lock);
857 	INIT_LIST_HEAD(&priv->exp_list);
858 	INIT_LIST_HEAD(&priv->exp_wait_list);
859 	INIT_LIST_HEAD(&priv->imp_list);
860 
861 	priv->filp = filp;
862 
863 	return priv;
864 }
865 
866 void gntdev_dmabuf_fini(struct gntdev_dmabuf_priv *priv)
867 {
868 	dmabuf_imp_release_all(priv);
869 	kfree(priv);
870 }
871