xref: /openbmc/linux/drivers/gpu/drm/drm_prime.c (revision ae3473231e77a3f1909d48cd144cebe5e1d049b3)
1 /*
2  * Copyright © 2012 Red Hat
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Dave Airlie <airlied@redhat.com>
25  *      Rob Clark <rob.clark@linaro.org>
26  *
27  */
28 
29 #include <linux/export.h>
30 #include <linux/dma-buf.h>
31 #include <linux/rbtree.h>
32 #include <drm/drmP.h>
33 #include <drm/drm_gem.h>
34 
35 #include "drm_internal.h"
36 
37 /*
38  * DMA-BUF/GEM Object references and lifetime overview:
39  *
40  * On the export the dma_buf holds a reference to the exporting GEM
41  * object. It takes this reference in handle_to_fd_ioctl, when it
42  * first calls .prime_export and stores the exporting GEM object in
43  * the dma_buf priv. This reference is released when the dma_buf
44  * object goes away in the driver .release function.
45  *
46  * On the import the importing GEM object holds a reference to the
47  * dma_buf (which in turn holds a ref to the exporting GEM object).
48  * It takes that reference in the fd_to_handle ioctl.
49  * It calls dma_buf_get, creates an attachment to it and stores the
50  * attachment in the GEM object. When this attachment is destroyed
51  * when the imported object is destroyed, we remove the attachment
52  * and drop the reference to the dma_buf.
53  *
54  * Thus the chain of references always flows in one direction
55  * (avoiding loops): importing_gem -> dmabuf -> exporting_gem
56  *
57  * Self-importing: if userspace is using PRIME as a replacement for flink
58  * then it will get a fd->handle request for a GEM object that it created.
59  * Drivers should detect this situation and return back the gem object
60  * from the dma-buf private.  Prime will do this automatically for drivers that
61  * use the drm_gem_prime_{import,export} helpers.
62  */
63 
64 struct drm_prime_member {
65 	struct dma_buf *dma_buf;
66 	uint32_t handle;
67 
68 	struct rb_node dmabuf_rb;
69 	struct rb_node handle_rb;
70 };
71 
72 struct drm_prime_attachment {
73 	struct sg_table *sgt;
74 	enum dma_data_direction dir;
75 };
76 
77 static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
78 				    struct dma_buf *dma_buf, uint32_t handle)
79 {
80 	struct drm_prime_member *member;
81 	struct rb_node **p, *rb;
82 
83 	member = kmalloc(sizeof(*member), GFP_KERNEL);
84 	if (!member)
85 		return -ENOMEM;
86 
87 	get_dma_buf(dma_buf);
88 	member->dma_buf = dma_buf;
89 	member->handle = handle;
90 
91 	rb = NULL;
92 	p = &prime_fpriv->dmabufs.rb_node;
93 	while (*p) {
94 		struct drm_prime_member *pos;
95 
96 		rb = *p;
97 		pos = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
98 		if (dma_buf > pos->dma_buf)
99 			p = &rb->rb_right;
100 		else
101 			p = &rb->rb_left;
102 	}
103 	rb_link_node(&member->dmabuf_rb, rb, p);
104 	rb_insert_color(&member->dmabuf_rb, &prime_fpriv->dmabufs);
105 
106 	rb = NULL;
107 	p = &prime_fpriv->handles.rb_node;
108 	while (*p) {
109 		struct drm_prime_member *pos;
110 
111 		rb = *p;
112 		pos = rb_entry(rb, struct drm_prime_member, handle_rb);
113 		if (handle > pos->handle)
114 			p = &rb->rb_right;
115 		else
116 			p = &rb->rb_left;
117 	}
118 	rb_link_node(&member->handle_rb, rb, p);
119 	rb_insert_color(&member->handle_rb, &prime_fpriv->handles);
120 
121 	return 0;
122 }
123 
124 static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv,
125 						      uint32_t handle)
126 {
127 	struct rb_node *rb;
128 
129 	rb = prime_fpriv->handles.rb_node;
130 	while (rb) {
131 		struct drm_prime_member *member;
132 
133 		member = rb_entry(rb, struct drm_prime_member, handle_rb);
134 		if (member->handle == handle)
135 			return member->dma_buf;
136 		else if (member->handle < handle)
137 			rb = rb->rb_right;
138 		else
139 			rb = rb->rb_left;
140 	}
141 
142 	return NULL;
143 }
144 
145 static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv,
146 				       struct dma_buf *dma_buf,
147 				       uint32_t *handle)
148 {
149 	struct rb_node *rb;
150 
151 	rb = prime_fpriv->dmabufs.rb_node;
152 	while (rb) {
153 		struct drm_prime_member *member;
154 
155 		member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
156 		if (member->dma_buf == dma_buf) {
157 			*handle = member->handle;
158 			return 0;
159 		} else if (member->dma_buf < dma_buf) {
160 			rb = rb->rb_right;
161 		} else {
162 			rb = rb->rb_left;
163 		}
164 	}
165 
166 	return -ENOENT;
167 }
168 
169 static int drm_gem_map_attach(struct dma_buf *dma_buf,
170 			      struct device *target_dev,
171 			      struct dma_buf_attachment *attach)
172 {
173 	struct drm_prime_attachment *prime_attach;
174 	struct drm_gem_object *obj = dma_buf->priv;
175 	struct drm_device *dev = obj->dev;
176 
177 	prime_attach = kzalloc(sizeof(*prime_attach), GFP_KERNEL);
178 	if (!prime_attach)
179 		return -ENOMEM;
180 
181 	prime_attach->dir = DMA_NONE;
182 	attach->priv = prime_attach;
183 
184 	if (!dev->driver->gem_prime_pin)
185 		return 0;
186 
187 	return dev->driver->gem_prime_pin(obj);
188 }
189 
190 static void drm_gem_map_detach(struct dma_buf *dma_buf,
191 			       struct dma_buf_attachment *attach)
192 {
193 	struct drm_prime_attachment *prime_attach = attach->priv;
194 	struct drm_gem_object *obj = dma_buf->priv;
195 	struct drm_device *dev = obj->dev;
196 	struct sg_table *sgt;
197 
198 	if (dev->driver->gem_prime_unpin)
199 		dev->driver->gem_prime_unpin(obj);
200 
201 	if (!prime_attach)
202 		return;
203 
204 	sgt = prime_attach->sgt;
205 	if (sgt) {
206 		if (prime_attach->dir != DMA_NONE)
207 			dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
208 					prime_attach->dir);
209 		sg_free_table(sgt);
210 	}
211 
212 	kfree(sgt);
213 	kfree(prime_attach);
214 	attach->priv = NULL;
215 }
216 
217 void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
218 					struct dma_buf *dma_buf)
219 {
220 	struct rb_node *rb;
221 
222 	rb = prime_fpriv->dmabufs.rb_node;
223 	while (rb) {
224 		struct drm_prime_member *member;
225 
226 		member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
227 		if (member->dma_buf == dma_buf) {
228 			rb_erase(&member->handle_rb, &prime_fpriv->handles);
229 			rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs);
230 
231 			dma_buf_put(dma_buf);
232 			kfree(member);
233 			return;
234 		} else if (member->dma_buf < dma_buf) {
235 			rb = rb->rb_right;
236 		} else {
237 			rb = rb->rb_left;
238 		}
239 	}
240 }
241 
242 static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
243 					    enum dma_data_direction dir)
244 {
245 	struct drm_prime_attachment *prime_attach = attach->priv;
246 	struct drm_gem_object *obj = attach->dmabuf->priv;
247 	struct sg_table *sgt;
248 
249 	if (WARN_ON(dir == DMA_NONE || !prime_attach))
250 		return ERR_PTR(-EINVAL);
251 
252 	/* return the cached mapping when possible */
253 	if (prime_attach->dir == dir)
254 		return prime_attach->sgt;
255 
256 	/*
257 	 * two mappings with different directions for the same attachment are
258 	 * not allowed
259 	 */
260 	if (WARN_ON(prime_attach->dir != DMA_NONE))
261 		return ERR_PTR(-EBUSY);
262 
263 	sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
264 
265 	if (!IS_ERR(sgt)) {
266 		if (!dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir)) {
267 			sg_free_table(sgt);
268 			kfree(sgt);
269 			sgt = ERR_PTR(-ENOMEM);
270 		} else {
271 			prime_attach->sgt = sgt;
272 			prime_attach->dir = dir;
273 		}
274 	}
275 
276 	return sgt;
277 }
278 
279 static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
280 				  struct sg_table *sgt,
281 				  enum dma_data_direction dir)
282 {
283 	/* nothing to be done here */
284 }
285 
286 /**
287  * drm_gem_dmabuf_export - dma_buf export implementation for GEM
288  * @dev: parent device for the exported dmabuf
289  * @exp_info: the export information used by dma_buf_export()
290  *
291  * This wraps dma_buf_export() for use by generic GEM drivers that are using
292  * drm_gem_dmabuf_release(). In addition to calling dma_buf_export(), we take
293  * a reference to the &drm_device and the exported &drm_gem_object (stored in
294  * exp_info->priv) which is released by drm_gem_dmabuf_release().
295  *
296  * Returns the new dmabuf.
297  */
298 struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
299 				      struct dma_buf_export_info *exp_info)
300 {
301 	struct dma_buf *dma_buf;
302 
303 	dma_buf = dma_buf_export(exp_info);
304 	if (IS_ERR(dma_buf))
305 		return dma_buf;
306 
307 	drm_dev_ref(dev);
308 	drm_gem_object_reference(exp_info->priv);
309 
310 	return dma_buf;
311 }
312 EXPORT_SYMBOL(drm_gem_dmabuf_export);
313 
314 /**
315  * drm_gem_dmabuf_release - dma_buf release implementation for GEM
316  * @dma_buf: buffer to be released
317  *
318  * Generic release function for dma_bufs exported as PRIME buffers. GEM drivers
319  * must use this in their dma_buf ops structure as the release callback.
320  * drm_gem_dmabuf_release() should be used in conjunction with
321  * drm_gem_dmabuf_export().
322  */
323 void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
324 {
325 	struct drm_gem_object *obj = dma_buf->priv;
326 	struct drm_device *dev = obj->dev;
327 
328 	/* drop the reference on the export fd holds */
329 	drm_gem_object_unreference_unlocked(obj);
330 
331 	drm_dev_unref(dev);
332 }
333 EXPORT_SYMBOL(drm_gem_dmabuf_release);
334 
335 static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
336 {
337 	struct drm_gem_object *obj = dma_buf->priv;
338 	struct drm_device *dev = obj->dev;
339 
340 	return dev->driver->gem_prime_vmap(obj);
341 }
342 
343 static void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
344 {
345 	struct drm_gem_object *obj = dma_buf->priv;
346 	struct drm_device *dev = obj->dev;
347 
348 	dev->driver->gem_prime_vunmap(obj, vaddr);
349 }
350 
351 static void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
352 					unsigned long page_num)
353 {
354 	return NULL;
355 }
356 
357 static void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
358 					 unsigned long page_num, void *addr)
359 {
360 
361 }
362 static void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf,
363 				 unsigned long page_num)
364 {
365 	return NULL;
366 }
367 
368 static void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
369 				  unsigned long page_num, void *addr)
370 {
371 
372 }
373 
374 static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf,
375 			       struct vm_area_struct *vma)
376 {
377 	struct drm_gem_object *obj = dma_buf->priv;
378 	struct drm_device *dev = obj->dev;
379 
380 	if (!dev->driver->gem_prime_mmap)
381 		return -ENOSYS;
382 
383 	return dev->driver->gem_prime_mmap(obj, vma);
384 }
385 
386 static const struct dma_buf_ops drm_gem_prime_dmabuf_ops =  {
387 	.attach = drm_gem_map_attach,
388 	.detach = drm_gem_map_detach,
389 	.map_dma_buf = drm_gem_map_dma_buf,
390 	.unmap_dma_buf = drm_gem_unmap_dma_buf,
391 	.release = drm_gem_dmabuf_release,
392 	.kmap = drm_gem_dmabuf_kmap,
393 	.kmap_atomic = drm_gem_dmabuf_kmap_atomic,
394 	.kunmap = drm_gem_dmabuf_kunmap,
395 	.kunmap_atomic = drm_gem_dmabuf_kunmap_atomic,
396 	.mmap = drm_gem_dmabuf_mmap,
397 	.vmap = drm_gem_dmabuf_vmap,
398 	.vunmap = drm_gem_dmabuf_vunmap,
399 };
400 
401 /**
402  * DOC: PRIME Helpers
403  *
404  * Drivers can implement @gem_prime_export and @gem_prime_import in terms of
405  * simpler APIs by using the helper functions @drm_gem_prime_export and
406  * @drm_gem_prime_import.  These functions implement dma-buf support in terms of
407  * six lower-level driver callbacks:
408  *
409  * Export callbacks:
410  *
411  *  * @gem_prime_pin (optional): prepare a GEM object for exporting
412  *  * @gem_prime_get_sg_table: provide a scatter/gather table of pinned pages
413  *  * @gem_prime_vmap: vmap a buffer exported by your driver
414  *  * @gem_prime_vunmap: vunmap a buffer exported by your driver
415  *  * @gem_prime_mmap (optional): mmap a buffer exported by your driver
416  *
417  * Import callback:
418  *
419  *  * @gem_prime_import_sg_table (import): produce a GEM object from another
420  *    driver's scatter/gather table
421  */
422 
423 /**
424  * drm_gem_prime_export - helper library implementation of the export callback
425  * @dev: drm_device to export from
426  * @obj: GEM object to export
427  * @flags: flags like DRM_CLOEXEC and DRM_RDWR
428  *
429  * This is the implementation of the gem_prime_export functions for GEM drivers
430  * using the PRIME helpers.
431  */
432 struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
433 				     struct drm_gem_object *obj,
434 				     int flags)
435 {
436 	struct dma_buf_export_info exp_info = {
437 		.exp_name = KBUILD_MODNAME, /* white lie for debug */
438 		.owner = dev->driver->fops->owner,
439 		.ops = &drm_gem_prime_dmabuf_ops,
440 		.size = obj->size,
441 		.flags = flags,
442 		.priv = obj,
443 	};
444 
445 	if (dev->driver->gem_prime_res_obj)
446 		exp_info.resv = dev->driver->gem_prime_res_obj(obj);
447 
448 	return drm_gem_dmabuf_export(dev, &exp_info);
449 }
450 EXPORT_SYMBOL(drm_gem_prime_export);
451 
452 static struct dma_buf *export_and_register_object(struct drm_device *dev,
453 						  struct drm_gem_object *obj,
454 						  uint32_t flags)
455 {
456 	struct dma_buf *dmabuf;
457 
458 	/* prevent races with concurrent gem_close. */
459 	if (obj->handle_count == 0) {
460 		dmabuf = ERR_PTR(-ENOENT);
461 		return dmabuf;
462 	}
463 
464 	dmabuf = dev->driver->gem_prime_export(dev, obj, flags);
465 	if (IS_ERR(dmabuf)) {
466 		/* normally the created dma-buf takes ownership of the ref,
467 		 * but if that fails then drop the ref
468 		 */
469 		return dmabuf;
470 	}
471 
472 	/*
473 	 * Note that callers do not need to clean up the export cache
474 	 * since the check for obj->handle_count guarantees that someone
475 	 * will clean it up.
476 	 */
477 	obj->dma_buf = dmabuf;
478 	get_dma_buf(obj->dma_buf);
479 
480 	return dmabuf;
481 }
482 
483 /**
484  * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers
485  * @dev: dev to export the buffer from
486  * @file_priv: drm file-private structure
487  * @handle: buffer handle to export
488  * @flags: flags like DRM_CLOEXEC
489  * @prime_fd: pointer to storage for the fd id of the create dma-buf
490  *
491  * This is the PRIME export function which must be used mandatorily by GEM
492  * drivers to ensure correct lifetime management of the underlying GEM object.
493  * The actual exporting from GEM object to a dma-buf is done through the
494  * gem_prime_export driver callback.
495  */
496 int drm_gem_prime_handle_to_fd(struct drm_device *dev,
497 			       struct drm_file *file_priv, uint32_t handle,
498 			       uint32_t flags,
499 			       int *prime_fd)
500 {
501 	struct drm_gem_object *obj;
502 	int ret = 0;
503 	struct dma_buf *dmabuf;
504 
505 	mutex_lock(&file_priv->prime.lock);
506 	obj = drm_gem_object_lookup(file_priv, handle);
507 	if (!obj)  {
508 		ret = -ENOENT;
509 		goto out_unlock;
510 	}
511 
512 	dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle);
513 	if (dmabuf) {
514 		get_dma_buf(dmabuf);
515 		goto out_have_handle;
516 	}
517 
518 	mutex_lock(&dev->object_name_lock);
519 	/* re-export the original imported object */
520 	if (obj->import_attach) {
521 		dmabuf = obj->import_attach->dmabuf;
522 		get_dma_buf(dmabuf);
523 		goto out_have_obj;
524 	}
525 
526 	if (obj->dma_buf) {
527 		get_dma_buf(obj->dma_buf);
528 		dmabuf = obj->dma_buf;
529 		goto out_have_obj;
530 	}
531 
532 	dmabuf = export_and_register_object(dev, obj, flags);
533 	if (IS_ERR(dmabuf)) {
534 		/* normally the created dma-buf takes ownership of the ref,
535 		 * but if that fails then drop the ref
536 		 */
537 		ret = PTR_ERR(dmabuf);
538 		mutex_unlock(&dev->object_name_lock);
539 		goto out;
540 	}
541 
542 out_have_obj:
543 	/*
544 	 * If we've exported this buffer then cheat and add it to the import list
545 	 * so we get the correct handle back. We must do this under the
546 	 * protection of dev->object_name_lock to ensure that a racing gem close
547 	 * ioctl doesn't miss to remove this buffer handle from the cache.
548 	 */
549 	ret = drm_prime_add_buf_handle(&file_priv->prime,
550 				       dmabuf, handle);
551 	mutex_unlock(&dev->object_name_lock);
552 	if (ret)
553 		goto fail_put_dmabuf;
554 
555 out_have_handle:
556 	ret = dma_buf_fd(dmabuf, flags);
557 	/*
558 	 * We must _not_ remove the buffer from the handle cache since the newly
559 	 * created dma buf is already linked in the global obj->dma_buf pointer,
560 	 * and that is invariant as long as a userspace gem handle exists.
561 	 * Closing the handle will clean out the cache anyway, so we don't leak.
562 	 */
563 	if (ret < 0) {
564 		goto fail_put_dmabuf;
565 	} else {
566 		*prime_fd = ret;
567 		ret = 0;
568 	}
569 
570 	goto out;
571 
572 fail_put_dmabuf:
573 	dma_buf_put(dmabuf);
574 out:
575 	drm_gem_object_unreference_unlocked(obj);
576 out_unlock:
577 	mutex_unlock(&file_priv->prime.lock);
578 
579 	return ret;
580 }
581 EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
582 
583 /**
584  * drm_gem_prime_import - helper library implementation of the import callback
585  * @dev: drm_device to import into
586  * @dma_buf: dma-buf object to import
587  *
588  * This is the implementation of the gem_prime_import functions for GEM drivers
589  * using the PRIME helpers.
590  */
591 struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
592 					    struct dma_buf *dma_buf)
593 {
594 	struct dma_buf_attachment *attach;
595 	struct sg_table *sgt;
596 	struct drm_gem_object *obj;
597 	int ret;
598 
599 	if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
600 		obj = dma_buf->priv;
601 		if (obj->dev == dev) {
602 			/*
603 			 * Importing dmabuf exported from out own gem increases
604 			 * refcount on gem itself instead of f_count of dmabuf.
605 			 */
606 			drm_gem_object_reference(obj);
607 			return obj;
608 		}
609 	}
610 
611 	if (!dev->driver->gem_prime_import_sg_table)
612 		return ERR_PTR(-EINVAL);
613 
614 	attach = dma_buf_attach(dma_buf, dev->dev);
615 	if (IS_ERR(attach))
616 		return ERR_CAST(attach);
617 
618 	get_dma_buf(dma_buf);
619 
620 	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
621 	if (IS_ERR(sgt)) {
622 		ret = PTR_ERR(sgt);
623 		goto fail_detach;
624 	}
625 
626 	obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt);
627 	if (IS_ERR(obj)) {
628 		ret = PTR_ERR(obj);
629 		goto fail_unmap;
630 	}
631 
632 	obj->import_attach = attach;
633 
634 	return obj;
635 
636 fail_unmap:
637 	dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
638 fail_detach:
639 	dma_buf_detach(dma_buf, attach);
640 	dma_buf_put(dma_buf);
641 
642 	return ERR_PTR(ret);
643 }
644 EXPORT_SYMBOL(drm_gem_prime_import);
645 
646 /**
647  * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
648  * @dev: dev to export the buffer from
649  * @file_priv: drm file-private structure
650  * @prime_fd: fd id of the dma-buf which should be imported
651  * @handle: pointer to storage for the handle of the imported buffer object
652  *
653  * This is the PRIME import function which must be used mandatorily by GEM
654  * drivers to ensure correct lifetime management of the underlying GEM object.
655  * The actual importing of GEM object from the dma-buf is done through the
656  * gem_import_export driver callback.
657  */
658 int drm_gem_prime_fd_to_handle(struct drm_device *dev,
659 			       struct drm_file *file_priv, int prime_fd,
660 			       uint32_t *handle)
661 {
662 	struct dma_buf *dma_buf;
663 	struct drm_gem_object *obj;
664 	int ret;
665 
666 	dma_buf = dma_buf_get(prime_fd);
667 	if (IS_ERR(dma_buf))
668 		return PTR_ERR(dma_buf);
669 
670 	mutex_lock(&file_priv->prime.lock);
671 
672 	ret = drm_prime_lookup_buf_handle(&file_priv->prime,
673 			dma_buf, handle);
674 	if (ret == 0)
675 		goto out_put;
676 
677 	/* never seen this one, need to import */
678 	mutex_lock(&dev->object_name_lock);
679 	obj = dev->driver->gem_prime_import(dev, dma_buf);
680 	if (IS_ERR(obj)) {
681 		ret = PTR_ERR(obj);
682 		goto out_unlock;
683 	}
684 
685 	if (obj->dma_buf) {
686 		WARN_ON(obj->dma_buf != dma_buf);
687 	} else {
688 		obj->dma_buf = dma_buf;
689 		get_dma_buf(dma_buf);
690 	}
691 
692 	/* _handle_create_tail unconditionally unlocks dev->object_name_lock. */
693 	ret = drm_gem_handle_create_tail(file_priv, obj, handle);
694 	drm_gem_object_unreference_unlocked(obj);
695 	if (ret)
696 		goto out_put;
697 
698 	ret = drm_prime_add_buf_handle(&file_priv->prime,
699 			dma_buf, *handle);
700 	mutex_unlock(&file_priv->prime.lock);
701 	if (ret)
702 		goto fail;
703 
704 	dma_buf_put(dma_buf);
705 
706 	return 0;
707 
708 fail:
709 	/* hmm, if driver attached, we are relying on the free-object path
710 	 * to detach.. which seems ok..
711 	 */
712 	drm_gem_handle_delete(file_priv, *handle);
713 	dma_buf_put(dma_buf);
714 	return ret;
715 
716 out_unlock:
717 	mutex_unlock(&dev->object_name_lock);
718 out_put:
719 	mutex_unlock(&file_priv->prime.lock);
720 	dma_buf_put(dma_buf);
721 	return ret;
722 }
723 EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
724 
725 int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
726 				 struct drm_file *file_priv)
727 {
728 	struct drm_prime_handle *args = data;
729 
730 	if (!drm_core_check_feature(dev, DRIVER_PRIME))
731 		return -EINVAL;
732 
733 	if (!dev->driver->prime_handle_to_fd)
734 		return -ENOSYS;
735 
736 	/* check flags are valid */
737 	if (args->flags & ~(DRM_CLOEXEC | DRM_RDWR))
738 		return -EINVAL;
739 
740 	return dev->driver->prime_handle_to_fd(dev, file_priv,
741 			args->handle, args->flags, &args->fd);
742 }
743 
744 int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
745 				 struct drm_file *file_priv)
746 {
747 	struct drm_prime_handle *args = data;
748 
749 	if (!drm_core_check_feature(dev, DRIVER_PRIME))
750 		return -EINVAL;
751 
752 	if (!dev->driver->prime_fd_to_handle)
753 		return -ENOSYS;
754 
755 	return dev->driver->prime_fd_to_handle(dev, file_priv,
756 			args->fd, &args->handle);
757 }
758 
759 /**
760  * drm_prime_pages_to_sg - converts a page array into an sg list
761  * @pages: pointer to the array of page pointers to convert
762  * @nr_pages: length of the page vector
763  *
764  * This helper creates an sg table object from a set of pages
765  * the driver is responsible for mapping the pages into the
766  * importers address space for use with dma_buf itself.
767  */
768 struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages)
769 {
770 	struct sg_table *sg = NULL;
771 	int ret;
772 
773 	sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
774 	if (!sg) {
775 		ret = -ENOMEM;
776 		goto out;
777 	}
778 
779 	ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
780 				nr_pages << PAGE_SHIFT, GFP_KERNEL);
781 	if (ret)
782 		goto out;
783 
784 	return sg;
785 out:
786 	kfree(sg);
787 	return ERR_PTR(ret);
788 }
789 EXPORT_SYMBOL(drm_prime_pages_to_sg);
790 
791 /**
792  * drm_prime_sg_to_page_addr_arrays - convert an sg table into a page array
793  * @sgt: scatter-gather table to convert
794  * @pages: array of page pointers to store the page array in
795  * @addrs: optional array to store the dma bus address of each page
796  * @max_pages: size of both the passed-in arrays
797  *
798  * Exports an sg table into an array of pages and addresses. This is currently
799  * required by the TTM driver in order to do correct fault handling.
800  */
801 int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
802 				     dma_addr_t *addrs, int max_pages)
803 {
804 	unsigned count;
805 	struct scatterlist *sg;
806 	struct page *page;
807 	u32 len;
808 	int pg_index;
809 	dma_addr_t addr;
810 
811 	pg_index = 0;
812 	for_each_sg(sgt->sgl, sg, sgt->nents, count) {
813 		len = sg->length;
814 		page = sg_page(sg);
815 		addr = sg_dma_address(sg);
816 
817 		while (len > 0) {
818 			if (WARN_ON(pg_index >= max_pages))
819 				return -1;
820 			pages[pg_index] = page;
821 			if (addrs)
822 				addrs[pg_index] = addr;
823 
824 			page++;
825 			addr += PAGE_SIZE;
826 			len -= PAGE_SIZE;
827 			pg_index++;
828 		}
829 	}
830 	return 0;
831 }
832 EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
833 
834 /**
835  * drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object
836  * @obj: GEM object which was created from a dma-buf
837  * @sg: the sg-table which was pinned at import time
838  *
839  * This is the cleanup functions which GEM drivers need to call when they use
840  * @drm_gem_prime_import to import dma-bufs.
841  */
842 void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
843 {
844 	struct dma_buf_attachment *attach;
845 	struct dma_buf *dma_buf;
846 	attach = obj->import_attach;
847 	if (sg)
848 		dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
849 	dma_buf = attach->dmabuf;
850 	dma_buf_detach(attach->dmabuf, attach);
851 	/* remove the reference */
852 	dma_buf_put(dma_buf);
853 }
854 EXPORT_SYMBOL(drm_prime_gem_destroy);
855 
856 void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
857 {
858 	mutex_init(&prime_fpriv->lock);
859 	prime_fpriv->dmabufs = RB_ROOT;
860 	prime_fpriv->handles = RB_ROOT;
861 }
862 
863 void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
864 {
865 	/* by now drm_gem_release should've made sure the list is empty */
866 	WARN_ON(!RB_EMPTY_ROOT(&prime_fpriv->dmabufs));
867 }
868