xref: /openbmc/linux/drivers/gpu/drm/drm_prime.c (revision afb46f79)
1 /*
2  * Copyright © 2012 Red Hat
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Dave Airlie <airlied@redhat.com>
25  *      Rob Clark <rob.clark@linaro.org>
26  *
27  */
28 
29 #include <linux/export.h>
30 #include <linux/dma-buf.h>
31 #include <drm/drmP.h>
32 
33 /*
34  * DMA-BUF/GEM Object references and lifetime overview:
35  *
36  * On the export the dma_buf holds a reference to the exporting GEM
37  * object. It takes this reference in handle_to_fd_ioctl, when it
38  * first calls .prime_export and stores the exporting GEM object in
39  * the dma_buf priv. This reference is released when the dma_buf
40  * object goes away in the driver .release function.
41  *
42  * On the import the importing GEM object holds a reference to the
43  * dma_buf (which in turn holds a ref to the exporting GEM object).
44  * It takes that reference in the fd_to_handle ioctl.
45  * It calls dma_buf_get, creates an attachment to it and stores the
46  * attachment in the GEM object. When this attachment is destroyed
47  * when the imported object is destroyed, we remove the attachment
48  * and drop the reference to the dma_buf.
49  *
50  * Thus the chain of references always flows in one direction
51  * (avoiding loops): importing_gem -> dmabuf -> exporting_gem
52  *
53  * Self-importing: if userspace is using PRIME as a replacement for flink
54  * then it will get a fd->handle request for a GEM object that it created.
55  * Drivers should detect this situation and return back the gem object
56  * from the dma-buf private.  Prime will do this automatically for drivers that
57  * use the drm_gem_prime_{import,export} helpers.
58  */
59 
60 struct drm_prime_member {
61 	struct list_head entry;
62 	struct dma_buf *dma_buf;
63 	uint32_t handle;
64 };
65 
66 struct drm_prime_attachment {
67 	struct sg_table *sgt;
68 	enum dma_data_direction dir;
69 };
70 
71 static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
72 				    struct dma_buf *dma_buf, uint32_t handle)
73 {
74 	struct drm_prime_member *member;
75 
76 	member = kmalloc(sizeof(*member), GFP_KERNEL);
77 	if (!member)
78 		return -ENOMEM;
79 
80 	get_dma_buf(dma_buf);
81 	member->dma_buf = dma_buf;
82 	member->handle = handle;
83 	list_add(&member->entry, &prime_fpriv->head);
84 	return 0;
85 }
86 
87 static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv,
88 						      uint32_t handle)
89 {
90 	struct drm_prime_member *member;
91 
92 	list_for_each_entry(member, &prime_fpriv->head, entry) {
93 		if (member->handle == handle)
94 			return member->dma_buf;
95 	}
96 
97 	return NULL;
98 }
99 
100 static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv,
101 				       struct dma_buf *dma_buf,
102 				       uint32_t *handle)
103 {
104 	struct drm_prime_member *member;
105 
106 	list_for_each_entry(member, &prime_fpriv->head, entry) {
107 		if (member->dma_buf == dma_buf) {
108 			*handle = member->handle;
109 			return 0;
110 		}
111 	}
112 	return -ENOENT;
113 }
114 
115 static int drm_gem_map_attach(struct dma_buf *dma_buf,
116 			      struct device *target_dev,
117 			      struct dma_buf_attachment *attach)
118 {
119 	struct drm_prime_attachment *prime_attach;
120 	struct drm_gem_object *obj = dma_buf->priv;
121 	struct drm_device *dev = obj->dev;
122 
123 	prime_attach = kzalloc(sizeof(*prime_attach), GFP_KERNEL);
124 	if (!prime_attach)
125 		return -ENOMEM;
126 
127 	prime_attach->dir = DMA_NONE;
128 	attach->priv = prime_attach;
129 
130 	if (!dev->driver->gem_prime_pin)
131 		return 0;
132 
133 	return dev->driver->gem_prime_pin(obj);
134 }
135 
136 static void drm_gem_map_detach(struct dma_buf *dma_buf,
137 			       struct dma_buf_attachment *attach)
138 {
139 	struct drm_prime_attachment *prime_attach = attach->priv;
140 	struct drm_gem_object *obj = dma_buf->priv;
141 	struct drm_device *dev = obj->dev;
142 	struct sg_table *sgt;
143 
144 	if (dev->driver->gem_prime_unpin)
145 		dev->driver->gem_prime_unpin(obj);
146 
147 	if (!prime_attach)
148 		return;
149 
150 	sgt = prime_attach->sgt;
151 	if (sgt) {
152 		if (prime_attach->dir != DMA_NONE)
153 			dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
154 					prime_attach->dir);
155 		sg_free_table(sgt);
156 	}
157 
158 	kfree(sgt);
159 	kfree(prime_attach);
160 	attach->priv = NULL;
161 }
162 
163 void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
164 					struct dma_buf *dma_buf)
165 {
166 	struct drm_prime_member *member, *safe;
167 
168 	list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) {
169 		if (member->dma_buf == dma_buf) {
170 			dma_buf_put(dma_buf);
171 			list_del(&member->entry);
172 			kfree(member);
173 		}
174 	}
175 }
176 
177 static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
178 					    enum dma_data_direction dir)
179 {
180 	struct drm_prime_attachment *prime_attach = attach->priv;
181 	struct drm_gem_object *obj = attach->dmabuf->priv;
182 	struct sg_table *sgt;
183 
184 	if (WARN_ON(dir == DMA_NONE || !prime_attach))
185 		return ERR_PTR(-EINVAL);
186 
187 	/* return the cached mapping when possible */
188 	if (prime_attach->dir == dir)
189 		return prime_attach->sgt;
190 
191 	/*
192 	 * two mappings with different directions for the same attachment are
193 	 * not allowed
194 	 */
195 	if (WARN_ON(prime_attach->dir != DMA_NONE))
196 		return ERR_PTR(-EBUSY);
197 
198 	sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
199 
200 	if (!IS_ERR(sgt)) {
201 		if (!dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir)) {
202 			sg_free_table(sgt);
203 			kfree(sgt);
204 			sgt = ERR_PTR(-ENOMEM);
205 		} else {
206 			prime_attach->sgt = sgt;
207 			prime_attach->dir = dir;
208 		}
209 	}
210 
211 	return sgt;
212 }
213 
214 static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
215 				  struct sg_table *sgt,
216 				  enum dma_data_direction dir)
217 {
218 	/* nothing to be done here */
219 }
220 
221 /**
222  * drm_gem_dmabuf_release - dma_buf release implementation for GEM
223  * @dma_buf: buffer to be released
224  *
225  * Generic release function for dma_bufs exported as PRIME buffers. GEM drivers
226  * must use this in their dma_buf ops structure as the release callback.
227  */
228 void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
229 {
230 	struct drm_gem_object *obj = dma_buf->priv;
231 
232 	/* drop the reference on the export fd holds */
233 	drm_gem_object_unreference_unlocked(obj);
234 }
235 EXPORT_SYMBOL(drm_gem_dmabuf_release);
236 
237 static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
238 {
239 	struct drm_gem_object *obj = dma_buf->priv;
240 	struct drm_device *dev = obj->dev;
241 
242 	return dev->driver->gem_prime_vmap(obj);
243 }
244 
245 static void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
246 {
247 	struct drm_gem_object *obj = dma_buf->priv;
248 	struct drm_device *dev = obj->dev;
249 
250 	dev->driver->gem_prime_vunmap(obj, vaddr);
251 }
252 
253 static void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
254 					unsigned long page_num)
255 {
256 	return NULL;
257 }
258 
259 static void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
260 					 unsigned long page_num, void *addr)
261 {
262 
263 }
264 static void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf,
265 				 unsigned long page_num)
266 {
267 	return NULL;
268 }
269 
270 static void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
271 				  unsigned long page_num, void *addr)
272 {
273 
274 }
275 
276 static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf,
277 			       struct vm_area_struct *vma)
278 {
279 	struct drm_gem_object *obj = dma_buf->priv;
280 	struct drm_device *dev = obj->dev;
281 
282 	if (!dev->driver->gem_prime_mmap)
283 		return -ENOSYS;
284 
285 	return dev->driver->gem_prime_mmap(obj, vma);
286 }
287 
288 static const struct dma_buf_ops drm_gem_prime_dmabuf_ops =  {
289 	.attach = drm_gem_map_attach,
290 	.detach = drm_gem_map_detach,
291 	.map_dma_buf = drm_gem_map_dma_buf,
292 	.unmap_dma_buf = drm_gem_unmap_dma_buf,
293 	.release = drm_gem_dmabuf_release,
294 	.kmap = drm_gem_dmabuf_kmap,
295 	.kmap_atomic = drm_gem_dmabuf_kmap_atomic,
296 	.kunmap = drm_gem_dmabuf_kunmap,
297 	.kunmap_atomic = drm_gem_dmabuf_kunmap_atomic,
298 	.mmap = drm_gem_dmabuf_mmap,
299 	.vmap = drm_gem_dmabuf_vmap,
300 	.vunmap = drm_gem_dmabuf_vunmap,
301 };
302 
303 /**
304  * DOC: PRIME Helpers
305  *
306  * Drivers can implement @gem_prime_export and @gem_prime_import in terms of
307  * simpler APIs by using the helper functions @drm_gem_prime_export and
308  * @drm_gem_prime_import.  These functions implement dma-buf support in terms of
309  * five lower-level driver callbacks:
310  *
311  * Export callbacks:
312  *
313  *  - @gem_prime_pin (optional): prepare a GEM object for exporting
314  *
315  *  - @gem_prime_get_sg_table: provide a scatter/gather table of pinned pages
316  *
317  *  - @gem_prime_vmap: vmap a buffer exported by your driver
318  *
319  *  - @gem_prime_vunmap: vunmap a buffer exported by your driver
320  *
321  * Import callback:
322  *
323  *  - @gem_prime_import_sg_table (import): produce a GEM object from another
324  *    driver's scatter/gather table
325  */
326 
327 /**
328  * drm_gem_prime_export - helper library implemention of the export callback
329  * @dev: drm_device to export from
330  * @obj: GEM object to export
331  * @flags: flags like DRM_CLOEXEC
332  *
333  * This is the implementation of the gem_prime_export functions for GEM drivers
334  * using the PRIME helpers.
335  */
336 struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
337 				     struct drm_gem_object *obj, int flags)
338 {
339 	return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size, flags);
340 }
341 EXPORT_SYMBOL(drm_gem_prime_export);
342 
343 static struct dma_buf *export_and_register_object(struct drm_device *dev,
344 						  struct drm_gem_object *obj,
345 						  uint32_t flags)
346 {
347 	struct dma_buf *dmabuf;
348 
349 	/* prevent races with concurrent gem_close. */
350 	if (obj->handle_count == 0) {
351 		dmabuf = ERR_PTR(-ENOENT);
352 		return dmabuf;
353 	}
354 
355 	dmabuf = dev->driver->gem_prime_export(dev, obj, flags);
356 	if (IS_ERR(dmabuf)) {
357 		/* normally the created dma-buf takes ownership of the ref,
358 		 * but if that fails then drop the ref
359 		 */
360 		return dmabuf;
361 	}
362 
363 	/*
364 	 * Note that callers do not need to clean up the export cache
365 	 * since the check for obj->handle_count guarantees that someone
366 	 * will clean it up.
367 	 */
368 	obj->dma_buf = dmabuf;
369 	get_dma_buf(obj->dma_buf);
370 	/* Grab a new ref since the callers is now used by the dma-buf */
371 	drm_gem_object_reference(obj);
372 
373 	return dmabuf;
374 }
375 
376 /**
377  * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers
378  * @dev: dev to export the buffer from
379  * @file_priv: drm file-private structure
380  * @handle: buffer handle to export
381  * @flags: flags like DRM_CLOEXEC
382  * @prime_fd: pointer to storage for the fd id of the create dma-buf
383  *
384  * This is the PRIME export function which must be used mandatorily by GEM
385  * drivers to ensure correct lifetime management of the underlying GEM object.
386  * The actual exporting from GEM object to a dma-buf is done through the
387  * gem_prime_export driver callback.
388  */
389 int drm_gem_prime_handle_to_fd(struct drm_device *dev,
390 			       struct drm_file *file_priv, uint32_t handle,
391 			       uint32_t flags,
392 			       int *prime_fd)
393 {
394 	struct drm_gem_object *obj;
395 	int ret = 0;
396 	struct dma_buf *dmabuf;
397 
398 	mutex_lock(&file_priv->prime.lock);
399 	obj = drm_gem_object_lookup(dev, file_priv, handle);
400 	if (!obj)  {
401 		ret = -ENOENT;
402 		goto out_unlock;
403 	}
404 
405 	dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle);
406 	if (dmabuf) {
407 		get_dma_buf(dmabuf);
408 		goto out_have_handle;
409 	}
410 
411 	mutex_lock(&dev->object_name_lock);
412 	/* re-export the original imported object */
413 	if (obj->import_attach) {
414 		dmabuf = obj->import_attach->dmabuf;
415 		get_dma_buf(dmabuf);
416 		goto out_have_obj;
417 	}
418 
419 	if (obj->dma_buf) {
420 		get_dma_buf(obj->dma_buf);
421 		dmabuf = obj->dma_buf;
422 		goto out_have_obj;
423 	}
424 
425 	dmabuf = export_and_register_object(dev, obj, flags);
426 	if (IS_ERR(dmabuf)) {
427 		/* normally the created dma-buf takes ownership of the ref,
428 		 * but if that fails then drop the ref
429 		 */
430 		ret = PTR_ERR(dmabuf);
431 		mutex_unlock(&dev->object_name_lock);
432 		goto out;
433 	}
434 
435 out_have_obj:
436 	/*
437 	 * If we've exported this buffer then cheat and add it to the import list
438 	 * so we get the correct handle back. We must do this under the
439 	 * protection of dev->object_name_lock to ensure that a racing gem close
440 	 * ioctl doesn't miss to remove this buffer handle from the cache.
441 	 */
442 	ret = drm_prime_add_buf_handle(&file_priv->prime,
443 				       dmabuf, handle);
444 	mutex_unlock(&dev->object_name_lock);
445 	if (ret)
446 		goto fail_put_dmabuf;
447 
448 out_have_handle:
449 	ret = dma_buf_fd(dmabuf, flags);
450 	/*
451 	 * We must _not_ remove the buffer from the handle cache since the newly
452 	 * created dma buf is already linked in the global obj->dma_buf pointer,
453 	 * and that is invariant as long as a userspace gem handle exists.
454 	 * Closing the handle will clean out the cache anyway, so we don't leak.
455 	 */
456 	if (ret < 0) {
457 		goto fail_put_dmabuf;
458 	} else {
459 		*prime_fd = ret;
460 		ret = 0;
461 	}
462 
463 	goto out;
464 
465 fail_put_dmabuf:
466 	dma_buf_put(dmabuf);
467 out:
468 	drm_gem_object_unreference_unlocked(obj);
469 out_unlock:
470 	mutex_unlock(&file_priv->prime.lock);
471 
472 	return ret;
473 }
474 EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
475 
476 /**
477  * drm_gem_prime_import - helper library implemention of the import callback
478  * @dev: drm_device to import into
479  * @dma_buf: dma-buf object to import
480  *
481  * This is the implementation of the gem_prime_import functions for GEM drivers
482  * using the PRIME helpers.
483  */
484 struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
485 					    struct dma_buf *dma_buf)
486 {
487 	struct dma_buf_attachment *attach;
488 	struct sg_table *sgt;
489 	struct drm_gem_object *obj;
490 	int ret;
491 
492 	if (!dev->driver->gem_prime_import_sg_table)
493 		return ERR_PTR(-EINVAL);
494 
495 	if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
496 		obj = dma_buf->priv;
497 		if (obj->dev == dev) {
498 			/*
499 			 * Importing dmabuf exported from out own gem increases
500 			 * refcount on gem itself instead of f_count of dmabuf.
501 			 */
502 			drm_gem_object_reference(obj);
503 			return obj;
504 		}
505 	}
506 
507 	attach = dma_buf_attach(dma_buf, dev->dev);
508 	if (IS_ERR(attach))
509 		return ERR_CAST(attach);
510 
511 	get_dma_buf(dma_buf);
512 
513 	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
514 	if (IS_ERR(sgt)) {
515 		ret = PTR_ERR(sgt);
516 		goto fail_detach;
517 	}
518 
519 	obj = dev->driver->gem_prime_import_sg_table(dev, dma_buf->size, sgt);
520 	if (IS_ERR(obj)) {
521 		ret = PTR_ERR(obj);
522 		goto fail_unmap;
523 	}
524 
525 	obj->import_attach = attach;
526 
527 	return obj;
528 
529 fail_unmap:
530 	dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
531 fail_detach:
532 	dma_buf_detach(dma_buf, attach);
533 	dma_buf_put(dma_buf);
534 
535 	return ERR_PTR(ret);
536 }
537 EXPORT_SYMBOL(drm_gem_prime_import);
538 
539 /**
540  * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
541  * @dev: dev to export the buffer from
542  * @file_priv: drm file-private structure
543  * @prime_fd: fd id of the dma-buf which should be imported
544  * @handle: pointer to storage for the handle of the imported buffer object
545  *
546  * This is the PRIME import function which must be used mandatorily by GEM
547  * drivers to ensure correct lifetime management of the underlying GEM object.
548  * The actual importing of GEM object from the dma-buf is done through the
549  * gem_import_export driver callback.
550  */
551 int drm_gem_prime_fd_to_handle(struct drm_device *dev,
552 			       struct drm_file *file_priv, int prime_fd,
553 			       uint32_t *handle)
554 {
555 	struct dma_buf *dma_buf;
556 	struct drm_gem_object *obj;
557 	int ret;
558 
559 	dma_buf = dma_buf_get(prime_fd);
560 	if (IS_ERR(dma_buf))
561 		return PTR_ERR(dma_buf);
562 
563 	mutex_lock(&file_priv->prime.lock);
564 
565 	ret = drm_prime_lookup_buf_handle(&file_priv->prime,
566 			dma_buf, handle);
567 	if (ret == 0)
568 		goto out_put;
569 
570 	/* never seen this one, need to import */
571 	mutex_lock(&dev->object_name_lock);
572 	obj = dev->driver->gem_prime_import(dev, dma_buf);
573 	if (IS_ERR(obj)) {
574 		ret = PTR_ERR(obj);
575 		goto out_unlock;
576 	}
577 
578 	if (obj->dma_buf) {
579 		WARN_ON(obj->dma_buf != dma_buf);
580 	} else {
581 		obj->dma_buf = dma_buf;
582 		get_dma_buf(dma_buf);
583 	}
584 
585 	/* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
586 	ret = drm_gem_handle_create_tail(file_priv, obj, handle);
587 	drm_gem_object_unreference_unlocked(obj);
588 	if (ret)
589 		goto out_put;
590 
591 	ret = drm_prime_add_buf_handle(&file_priv->prime,
592 			dma_buf, *handle);
593 	if (ret)
594 		goto fail;
595 
596 	mutex_unlock(&file_priv->prime.lock);
597 
598 	dma_buf_put(dma_buf);
599 
600 	return 0;
601 
602 fail:
603 	/* hmm, if driver attached, we are relying on the free-object path
604 	 * to detach.. which seems ok..
605 	 */
606 	drm_gem_handle_delete(file_priv, *handle);
607 out_unlock:
608 	mutex_unlock(&dev->object_name_lock);
609 out_put:
610 	dma_buf_put(dma_buf);
611 	mutex_unlock(&file_priv->prime.lock);
612 	return ret;
613 }
614 EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
615 
616 int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
617 				 struct drm_file *file_priv)
618 {
619 	struct drm_prime_handle *args = data;
620 	uint32_t flags;
621 
622 	if (!drm_core_check_feature(dev, DRIVER_PRIME))
623 		return -EINVAL;
624 
625 	if (!dev->driver->prime_handle_to_fd)
626 		return -ENOSYS;
627 
628 	/* check flags are valid */
629 	if (args->flags & ~DRM_CLOEXEC)
630 		return -EINVAL;
631 
632 	/* we only want to pass DRM_CLOEXEC which is == O_CLOEXEC */
633 	flags = args->flags & DRM_CLOEXEC;
634 
635 	return dev->driver->prime_handle_to_fd(dev, file_priv,
636 			args->handle, flags, &args->fd);
637 }
638 
639 int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
640 				 struct drm_file *file_priv)
641 {
642 	struct drm_prime_handle *args = data;
643 
644 	if (!drm_core_check_feature(dev, DRIVER_PRIME))
645 		return -EINVAL;
646 
647 	if (!dev->driver->prime_fd_to_handle)
648 		return -ENOSYS;
649 
650 	return dev->driver->prime_fd_to_handle(dev, file_priv,
651 			args->fd, &args->handle);
652 }
653 
654 /**
655  * drm_prime_pages_to_sg - converts a page array into an sg list
656  * @pages: pointer to the array of page pointers to convert
657  * @nr_pages: length of the page vector
658  *
659  * This helper creates an sg table object from a set of pages
660  * the driver is responsible for mapping the pages into the
661  * importers address space for use with dma_buf itself.
662  */
663 struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages)
664 {
665 	struct sg_table *sg = NULL;
666 	int ret;
667 
668 	sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
669 	if (!sg) {
670 		ret = -ENOMEM;
671 		goto out;
672 	}
673 
674 	ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
675 				nr_pages << PAGE_SHIFT, GFP_KERNEL);
676 	if (ret)
677 		goto out;
678 
679 	return sg;
680 out:
681 	kfree(sg);
682 	return ERR_PTR(ret);
683 }
684 EXPORT_SYMBOL(drm_prime_pages_to_sg);
685 
686 /**
687  * drm_prime_sg_to_page_addr_arrays - convert an sg table into a page array
688  * @sgt: scatter-gather table to convert
689  * @pages: array of page pointers to store the page array in
690  * @addrs: optional array to store the dma bus address of each page
691  * @max_pages: size of both the passed-in arrays
692  *
693  * Exports an sg table into an array of pages and addresses. This is currently
694  * required by the TTM driver in order to do correct fault handling.
695  */
696 int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
697 				     dma_addr_t *addrs, int max_pages)
698 {
699 	unsigned count;
700 	struct scatterlist *sg;
701 	struct page *page;
702 	u32 len;
703 	int pg_index;
704 	dma_addr_t addr;
705 
706 	pg_index = 0;
707 	for_each_sg(sgt->sgl, sg, sgt->nents, count) {
708 		len = sg->length;
709 		page = sg_page(sg);
710 		addr = sg_dma_address(sg);
711 
712 		while (len > 0) {
713 			if (WARN_ON(pg_index >= max_pages))
714 				return -1;
715 			pages[pg_index] = page;
716 			if (addrs)
717 				addrs[pg_index] = addr;
718 
719 			page++;
720 			addr += PAGE_SIZE;
721 			len -= PAGE_SIZE;
722 			pg_index++;
723 		}
724 	}
725 	return 0;
726 }
727 EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
728 
729 /**
730  * drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object
731  * @obj: GEM object which was created from a dma-buf
732  * @sg: the sg-table which was pinned at import time
733  *
734  * This is the cleanup functions which GEM drivers need to call when they use
735  * @drm_gem_prime_import to import dma-bufs.
736  */
737 void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
738 {
739 	struct dma_buf_attachment *attach;
740 	struct dma_buf *dma_buf;
741 	attach = obj->import_attach;
742 	if (sg)
743 		dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
744 	dma_buf = attach->dmabuf;
745 	dma_buf_detach(attach->dmabuf, attach);
746 	/* remove the reference */
747 	dma_buf_put(dma_buf);
748 }
749 EXPORT_SYMBOL(drm_prime_gem_destroy);
750 
751 void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
752 {
753 	INIT_LIST_HEAD(&prime_fpriv->head);
754 	mutex_init(&prime_fpriv->lock);
755 }
756 
757 void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
758 {
759 	/* by now drm_gem_release should've made sure the list is empty */
760 	WARN_ON(!list_empty(&prime_fpriv->head));
761 }
762