1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2018 Noralf Trønnes
4  */
5 
6 #include <linux/dma-buf.h>
7 #include <linux/export.h>
8 #include <linux/module.h>
9 #include <linux/mutex.h>
10 #include <linux/shmem_fs.h>
11 #include <linux/slab.h>
12 #include <linux/vmalloc.h>
13 #include <linux/module.h>
14 
15 #ifdef CONFIG_X86
16 #include <asm/set_memory.h>
17 #endif
18 
19 #include <drm/drm.h>
20 #include <drm/drm_device.h>
21 #include <drm/drm_drv.h>
22 #include <drm/drm_gem_shmem_helper.h>
23 #include <drm/drm_prime.h>
24 #include <drm/drm_print.h>
25 
26 MODULE_IMPORT_NS(DMA_BUF);
27 
28 /**
29  * DOC: overview
30  *
31  * This library provides helpers for GEM objects backed by shmem buffers
32  * allocated using anonymous pageable memory.
33  *
34  * Functions that operate on the GEM object receive struct &drm_gem_shmem_object.
35  * For GEM callback helpers in struct &drm_gem_object functions, see likewise
36  * named functions with an _object_ infix (e.g., drm_gem_shmem_object_vmap() wraps
37  * drm_gem_shmem_vmap()). These helpers perform the necessary type conversion.
38  */
39 
40 static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
41 	.free = drm_gem_shmem_object_free,
42 	.print_info = drm_gem_shmem_object_print_info,
43 	.pin = drm_gem_shmem_object_pin,
44 	.unpin = drm_gem_shmem_object_unpin,
45 	.get_sg_table = drm_gem_shmem_object_get_sg_table,
46 	.vmap = drm_gem_shmem_object_vmap,
47 	.vunmap = drm_gem_shmem_object_vunmap,
48 	.mmap = drm_gem_shmem_object_mmap,
49 };
50 
51 static struct drm_gem_shmem_object *
52 __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
53 {
54 	struct drm_gem_shmem_object *shmem;
55 	struct drm_gem_object *obj;
56 	int ret = 0;
57 
58 	size = PAGE_ALIGN(size);
59 
60 	if (dev->driver->gem_create_object) {
61 		obj = dev->driver->gem_create_object(dev, size);
62 		if (IS_ERR(obj))
63 			return ERR_CAST(obj);
64 		shmem = to_drm_gem_shmem_obj(obj);
65 	} else {
66 		shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
67 		if (!shmem)
68 			return ERR_PTR(-ENOMEM);
69 		obj = &shmem->base;
70 	}
71 
72 	if (!obj->funcs)
73 		obj->funcs = &drm_gem_shmem_funcs;
74 
75 	if (private) {
76 		drm_gem_private_object_init(dev, obj, size);
77 		shmem->map_wc = false; /* dma-buf mappings use always writecombine */
78 	} else {
79 		ret = drm_gem_object_init(dev, obj, size);
80 	}
81 	if (ret)
82 		goto err_free;
83 
84 	ret = drm_gem_create_mmap_offset(obj);
85 	if (ret)
86 		goto err_release;
87 
88 	mutex_init(&shmem->pages_lock);
89 	mutex_init(&shmem->vmap_lock);
90 	INIT_LIST_HEAD(&shmem->madv_list);
91 
92 	if (!private) {
93 		/*
94 		 * Our buffers are kept pinned, so allocating them
95 		 * from the MOVABLE zone is a really bad idea, and
96 		 * conflicts with CMA. See comments above new_inode()
97 		 * why this is required _and_ expected if you're
98 		 * going to pin these pages.
99 		 */
100 		mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
101 				     __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
102 	}
103 
104 	return shmem;
105 
106 err_release:
107 	drm_gem_object_release(obj);
108 err_free:
109 	kfree(obj);
110 
111 	return ERR_PTR(ret);
112 }
113 /**
114  * drm_gem_shmem_create - Allocate an object with the given size
115  * @dev: DRM device
116  * @size: Size of the object to allocate
117  *
118  * This function creates a shmem GEM object.
119  *
120  * Returns:
121  * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
122  * error code on failure.
123  */
124 struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
125 {
126 	return __drm_gem_shmem_create(dev, size, false);
127 }
128 EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
129 
130 /**
131  * drm_gem_shmem_free - Free resources associated with a shmem GEM object
132  * @shmem: shmem GEM object to free
133  *
134  * This function cleans up the GEM object state and frees the memory used to
135  * store the object itself.
136  */
137 void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
138 {
139 	struct drm_gem_object *obj = &shmem->base;
140 
141 	WARN_ON(shmem->vmap_use_count);
142 
143 	if (obj->import_attach) {
144 		drm_prime_gem_destroy(obj, shmem->sgt);
145 	} else {
146 		if (shmem->sgt) {
147 			dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
148 					  DMA_BIDIRECTIONAL, 0);
149 			sg_free_table(shmem->sgt);
150 			kfree(shmem->sgt);
151 		}
152 		if (shmem->pages)
153 			drm_gem_shmem_put_pages(shmem);
154 	}
155 
156 	WARN_ON(shmem->pages_use_count);
157 
158 	drm_gem_object_release(obj);
159 	mutex_destroy(&shmem->pages_lock);
160 	mutex_destroy(&shmem->vmap_lock);
161 	kfree(shmem);
162 }
163 EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
164 
165 static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
166 {
167 	struct drm_gem_object *obj = &shmem->base;
168 	struct page **pages;
169 
170 	if (shmem->pages_use_count++ > 0)
171 		return 0;
172 
173 	pages = drm_gem_get_pages(obj);
174 	if (IS_ERR(pages)) {
175 		DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages));
176 		shmem->pages_use_count = 0;
177 		return PTR_ERR(pages);
178 	}
179 
180 	/*
181 	 * TODO: Allocating WC pages which are correctly flushed is only
182 	 * supported on x86. Ideal solution would be a GFP_WC flag, which also
183 	 * ttm_pool.c could use.
184 	 */
185 #ifdef CONFIG_X86
186 	if (shmem->map_wc)
187 		set_pages_array_wc(pages, obj->size >> PAGE_SHIFT);
188 #endif
189 
190 	shmem->pages = pages;
191 
192 	return 0;
193 }
194 
195 /*
196  * drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object
197  * @shmem: shmem GEM object
198  *
199  * This function makes sure that backing pages exists for the shmem GEM object
200  * and increases the use count.
201  *
202  * Returns:
203  * 0 on success or a negative error code on failure.
204  */
205 int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
206 {
207 	int ret;
208 
209 	WARN_ON(shmem->base.import_attach);
210 
211 	ret = mutex_lock_interruptible(&shmem->pages_lock);
212 	if (ret)
213 		return ret;
214 	ret = drm_gem_shmem_get_pages_locked(shmem);
215 	mutex_unlock(&shmem->pages_lock);
216 
217 	return ret;
218 }
219 EXPORT_SYMBOL(drm_gem_shmem_get_pages);
220 
221 static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
222 {
223 	struct drm_gem_object *obj = &shmem->base;
224 
225 	if (WARN_ON_ONCE(!shmem->pages_use_count))
226 		return;
227 
228 	if (--shmem->pages_use_count > 0)
229 		return;
230 
231 #ifdef CONFIG_X86
232 	if (shmem->map_wc)
233 		set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
234 #endif
235 
236 	drm_gem_put_pages(obj, shmem->pages,
237 			  shmem->pages_mark_dirty_on_put,
238 			  shmem->pages_mark_accessed_on_put);
239 	shmem->pages = NULL;
240 }
241 
242 /*
243  * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
244  * @shmem: shmem GEM object
245  *
246  * This function decreases the use count and puts the backing pages when use drops to zero.
247  */
248 void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
249 {
250 	mutex_lock(&shmem->pages_lock);
251 	drm_gem_shmem_put_pages_locked(shmem);
252 	mutex_unlock(&shmem->pages_lock);
253 }
254 EXPORT_SYMBOL(drm_gem_shmem_put_pages);
255 
256 /**
257  * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
258  * @shmem: shmem GEM object
259  *
260  * This function makes sure the backing pages are pinned in memory while the
261  * buffer is exported.
262  *
263  * Returns:
264  * 0 on success or a negative error code on failure.
265  */
266 int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
267 {
268 	WARN_ON(shmem->base.import_attach);
269 
270 	return drm_gem_shmem_get_pages(shmem);
271 }
272 EXPORT_SYMBOL(drm_gem_shmem_pin);
273 
274 /**
275  * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
276  * @shmem: shmem GEM object
277  *
278  * This function removes the requirement that the backing pages are pinned in
279  * memory.
280  */
281 void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem)
282 {
283 	WARN_ON(shmem->base.import_attach);
284 
285 	drm_gem_shmem_put_pages(shmem);
286 }
287 EXPORT_SYMBOL(drm_gem_shmem_unpin);
288 
289 static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
290 				     struct iosys_map *map)
291 {
292 	struct drm_gem_object *obj = &shmem->base;
293 	int ret = 0;
294 
295 	if (shmem->vmap_use_count++ > 0) {
296 		iosys_map_set_vaddr(map, shmem->vaddr);
297 		return 0;
298 	}
299 
300 	if (obj->import_attach) {
301 		ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
302 		if (!ret) {
303 			if (WARN_ON(map->is_iomem)) {
304 				ret = -EIO;
305 				goto err_put_pages;
306 			}
307 			shmem->vaddr = map->vaddr;
308 		}
309 	} else {
310 		pgprot_t prot = PAGE_KERNEL;
311 
312 		ret = drm_gem_shmem_get_pages(shmem);
313 		if (ret)
314 			goto err_zero_use;
315 
316 		if (shmem->map_wc)
317 			prot = pgprot_writecombine(prot);
318 		shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
319 				    VM_MAP, prot);
320 		if (!shmem->vaddr)
321 			ret = -ENOMEM;
322 		else
323 			iosys_map_set_vaddr(map, shmem->vaddr);
324 	}
325 
326 	if (ret) {
327 		DRM_DEBUG_KMS("Failed to vmap pages, error %d\n", ret);
328 		goto err_put_pages;
329 	}
330 
331 	return 0;
332 
333 err_put_pages:
334 	if (!obj->import_attach)
335 		drm_gem_shmem_put_pages(shmem);
336 err_zero_use:
337 	shmem->vmap_use_count = 0;
338 
339 	return ret;
340 }
341 
342 /*
343  * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
344  * @shmem: shmem GEM object
345  * @map: Returns the kernel virtual address of the SHMEM GEM object's backing
346  *       store.
347  *
348  * This function makes sure that a contiguous kernel virtual address mapping
349  * exists for the buffer backing the shmem GEM object. It hides the differences
350  * between dma-buf imported and natively allocated objects.
351  *
352  * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap().
353  *
354  * Returns:
355  * 0 on success or a negative error code on failure.
356  */
357 int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
358 		       struct iosys_map *map)
359 {
360 	int ret;
361 
362 	ret = mutex_lock_interruptible(&shmem->vmap_lock);
363 	if (ret)
364 		return ret;
365 	ret = drm_gem_shmem_vmap_locked(shmem, map);
366 	mutex_unlock(&shmem->vmap_lock);
367 
368 	return ret;
369 }
370 EXPORT_SYMBOL(drm_gem_shmem_vmap);
371 
372 static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
373 					struct iosys_map *map)
374 {
375 	struct drm_gem_object *obj = &shmem->base;
376 
377 	if (WARN_ON_ONCE(!shmem->vmap_use_count))
378 		return;
379 
380 	if (--shmem->vmap_use_count > 0)
381 		return;
382 
383 	if (obj->import_attach) {
384 		dma_buf_vunmap(obj->import_attach->dmabuf, map);
385 	} else {
386 		vunmap(shmem->vaddr);
387 		drm_gem_shmem_put_pages(shmem);
388 	}
389 
390 	shmem->vaddr = NULL;
391 }
392 
393 /*
394  * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object
395  * @shmem: shmem GEM object
396  * @map: Kernel virtual address where the SHMEM GEM object was mapped
397  *
398  * This function cleans up a kernel virtual address mapping acquired by
399  * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to
400  * zero.
401  *
402  * This function hides the differences between dma-buf imported and natively
403  * allocated objects.
404  */
405 void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
406 			  struct iosys_map *map)
407 {
408 	mutex_lock(&shmem->vmap_lock);
409 	drm_gem_shmem_vunmap_locked(shmem, map);
410 	mutex_unlock(&shmem->vmap_lock);
411 }
412 EXPORT_SYMBOL(drm_gem_shmem_vunmap);
413 
414 static struct drm_gem_shmem_object *
415 drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
416 				 struct drm_device *dev, size_t size,
417 				 uint32_t *handle)
418 {
419 	struct drm_gem_shmem_object *shmem;
420 	int ret;
421 
422 	shmem = drm_gem_shmem_create(dev, size);
423 	if (IS_ERR(shmem))
424 		return shmem;
425 
426 	/*
427 	 * Allocate an id of idr table where the obj is registered
428 	 * and handle has the id what user can see.
429 	 */
430 	ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
431 	/* drop reference from allocate - handle holds it now. */
432 	drm_gem_object_put(&shmem->base);
433 	if (ret)
434 		return ERR_PTR(ret);
435 
436 	return shmem;
437 }
438 
439 /* Update madvise status, returns true if not purged, else
440  * false or -errno.
441  */
442 int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv)
443 {
444 	mutex_lock(&shmem->pages_lock);
445 
446 	if (shmem->madv >= 0)
447 		shmem->madv = madv;
448 
449 	madv = shmem->madv;
450 
451 	mutex_unlock(&shmem->pages_lock);
452 
453 	return (madv >= 0);
454 }
455 EXPORT_SYMBOL(drm_gem_shmem_madvise);
456 
457 void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem)
458 {
459 	struct drm_gem_object *obj = &shmem->base;
460 	struct drm_device *dev = obj->dev;
461 
462 	WARN_ON(!drm_gem_shmem_is_purgeable(shmem));
463 
464 	dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
465 	sg_free_table(shmem->sgt);
466 	kfree(shmem->sgt);
467 	shmem->sgt = NULL;
468 
469 	drm_gem_shmem_put_pages_locked(shmem);
470 
471 	shmem->madv = -1;
472 
473 	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
474 	drm_gem_free_mmap_offset(obj);
475 
476 	/* Our goal here is to return as much of the memory as
477 	 * is possible back to the system as we are called from OOM.
478 	 * To do this we must instruct the shmfs to drop all of its
479 	 * backing pages, *now*.
480 	 */
481 	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
482 
483 	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1);
484 }
485 EXPORT_SYMBOL(drm_gem_shmem_purge_locked);
486 
487 bool drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
488 {
489 	if (!mutex_trylock(&shmem->pages_lock))
490 		return false;
491 	drm_gem_shmem_purge_locked(shmem);
492 	mutex_unlock(&shmem->pages_lock);
493 
494 	return true;
495 }
496 EXPORT_SYMBOL(drm_gem_shmem_purge);
497 
498 /**
499  * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
500  * @file: DRM file structure to create the dumb buffer for
501  * @dev: DRM device
502  * @args: IOCTL data
503  *
504  * This function computes the pitch of the dumb buffer and rounds it up to an
505  * integer number of bytes per pixel. Drivers for hardware that doesn't have
506  * any additional restrictions on the pitch can directly use this function as
507  * their &drm_driver.dumb_create callback.
508  *
509  * For hardware with additional restrictions, drivers can adjust the fields
510  * set up by userspace before calling into this function.
511  *
512  * Returns:
513  * 0 on success or a negative error code on failure.
514  */
515 int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
516 			      struct drm_mode_create_dumb *args)
517 {
518 	u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
519 	struct drm_gem_shmem_object *shmem;
520 
521 	if (!args->pitch || !args->size) {
522 		args->pitch = min_pitch;
523 		args->size = PAGE_ALIGN(args->pitch * args->height);
524 	} else {
525 		/* ensure sane minimum values */
526 		if (args->pitch < min_pitch)
527 			args->pitch = min_pitch;
528 		if (args->size < args->pitch * args->height)
529 			args->size = PAGE_ALIGN(args->pitch * args->height);
530 	}
531 
532 	shmem = drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
533 
534 	return PTR_ERR_OR_ZERO(shmem);
535 }
536 EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
537 
538 static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
539 {
540 	struct vm_area_struct *vma = vmf->vma;
541 	struct drm_gem_object *obj = vma->vm_private_data;
542 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
543 	loff_t num_pages = obj->size >> PAGE_SHIFT;
544 	vm_fault_t ret;
545 	struct page *page;
546 	pgoff_t page_offset;
547 
548 	/* We don't use vmf->pgoff since that has the fake offset */
549 	page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
550 
551 	mutex_lock(&shmem->pages_lock);
552 
553 	if (page_offset >= num_pages ||
554 	    WARN_ON_ONCE(!shmem->pages) ||
555 	    shmem->madv < 0) {
556 		ret = VM_FAULT_SIGBUS;
557 	} else {
558 		page = shmem->pages[page_offset];
559 
560 		ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
561 	}
562 
563 	mutex_unlock(&shmem->pages_lock);
564 
565 	return ret;
566 }
567 
568 static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
569 {
570 	struct drm_gem_object *obj = vma->vm_private_data;
571 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
572 	int ret;
573 
574 	WARN_ON(shmem->base.import_attach);
575 
576 	ret = drm_gem_shmem_get_pages(shmem);
577 	WARN_ON_ONCE(ret != 0);
578 
579 	drm_gem_vm_open(vma);
580 }
581 
582 static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
583 {
584 	struct drm_gem_object *obj = vma->vm_private_data;
585 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
586 
587 	drm_gem_shmem_put_pages(shmem);
588 	drm_gem_vm_close(vma);
589 }
590 
591 static const struct vm_operations_struct drm_gem_shmem_vm_ops = {
592 	.fault = drm_gem_shmem_fault,
593 	.open = drm_gem_shmem_vm_open,
594 	.close = drm_gem_shmem_vm_close,
595 };
596 
597 /**
598  * drm_gem_shmem_mmap - Memory-map a shmem GEM object
599  * @shmem: shmem GEM object
600  * @vma: VMA for the area to be mapped
601  *
602  * This function implements an augmented version of the GEM DRM file mmap
603  * operation for shmem objects.
604  *
605  * Returns:
606  * 0 on success or a negative error code on failure.
607  */
608 int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma)
609 {
610 	struct drm_gem_object *obj = &shmem->base;
611 	int ret;
612 
613 	if (obj->import_attach) {
614 		/* Drop the reference drm_gem_mmap_obj() acquired.*/
615 		drm_gem_object_put(obj);
616 		vma->vm_private_data = NULL;
617 
618 		return dma_buf_mmap(obj->dma_buf, vma, 0);
619 	}
620 
621 	ret = drm_gem_shmem_get_pages(shmem);
622 	if (ret) {
623 		drm_gem_vm_close(vma);
624 		return ret;
625 	}
626 
627 	vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND;
628 	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
629 	if (shmem->map_wc)
630 		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
631 	vma->vm_ops = &drm_gem_shmem_vm_ops;
632 
633 	return 0;
634 }
635 EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
636 
637 /**
638  * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
639  * @shmem: shmem GEM object
640  * @p: DRM printer
641  * @indent: Tab indentation level
642  */
643 void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
644 			      struct drm_printer *p, unsigned int indent)
645 {
646 	drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
647 	drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
648 	drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
649 }
650 EXPORT_SYMBOL(drm_gem_shmem_print_info);
651 
652 /**
653  * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
654  *                              pages for a shmem GEM object
655  * @shmem: shmem GEM object
656  *
657  * This function exports a scatter/gather table suitable for PRIME usage by
658  * calling the standard DMA mapping API.
659  *
660  * Drivers who need to acquire an scatter/gather table for objects need to call
661  * drm_gem_shmem_get_pages_sgt() instead.
662  *
663  * Returns:
664  * A pointer to the scatter/gather table of pinned pages or NULL on failure.
665  */
666 struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem)
667 {
668 	struct drm_gem_object *obj = &shmem->base;
669 
670 	WARN_ON(shmem->base.import_attach);
671 
672 	return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
673 }
674 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
675 
676 /**
677  * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
678  *				 scatter/gather table for a shmem GEM object.
679  * @shmem: shmem GEM object
680  *
681  * This function returns a scatter/gather table suitable for driver usage. If
682  * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
683  * table created.
684  *
685  * This is the main function for drivers to get at backing storage, and it hides
686  * and difference between dma-buf imported and natively allocated objects.
687  * drm_gem_shmem_get_sg_table() should not be directly called by drivers.
688  *
689  * Returns:
690  * A pointer to the scatter/gather table of pinned pages or errno on failure.
691  */
692 struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem)
693 {
694 	struct drm_gem_object *obj = &shmem->base;
695 	int ret;
696 	struct sg_table *sgt;
697 
698 	if (shmem->sgt)
699 		return shmem->sgt;
700 
701 	WARN_ON(obj->import_attach);
702 
703 	ret = drm_gem_shmem_get_pages(shmem);
704 	if (ret)
705 		return ERR_PTR(ret);
706 
707 	sgt = drm_gem_shmem_get_sg_table(shmem);
708 	if (IS_ERR(sgt)) {
709 		ret = PTR_ERR(sgt);
710 		goto err_put_pages;
711 	}
712 	/* Map the pages for use by the h/w. */
713 	ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
714 	if (ret)
715 		goto err_free_sgt;
716 
717 	shmem->sgt = sgt;
718 
719 	return sgt;
720 
721 err_free_sgt:
722 	sg_free_table(sgt);
723 	kfree(sgt);
724 err_put_pages:
725 	drm_gem_shmem_put_pages(shmem);
726 	return ERR_PTR(ret);
727 }
728 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt);
729 
730 /**
731  * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
732  *                 another driver's scatter/gather table of pinned pages
733  * @dev: Device to import into
734  * @attach: DMA-BUF attachment
735  * @sgt: Scatter/gather table of pinned pages
736  *
737  * This function imports a scatter/gather table exported via DMA-BUF by
738  * another driver. Drivers that use the shmem helpers should set this as their
739  * &drm_driver.gem_prime_import_sg_table callback.
740  *
741  * Returns:
742  * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
743  * error code on failure.
744  */
745 struct drm_gem_object *
746 drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
747 				    struct dma_buf_attachment *attach,
748 				    struct sg_table *sgt)
749 {
750 	size_t size = PAGE_ALIGN(attach->dmabuf->size);
751 	struct drm_gem_shmem_object *shmem;
752 
753 	shmem = __drm_gem_shmem_create(dev, size, true);
754 	if (IS_ERR(shmem))
755 		return ERR_CAST(shmem);
756 
757 	shmem->sgt = sgt;
758 
759 	DRM_DEBUG_PRIME("size = %zu\n", size);
760 
761 	return &shmem->base;
762 }
763 EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
764 
765 MODULE_DESCRIPTION("DRM SHMEM memory-management helpers");
766 MODULE_IMPORT_NS(DMA_BUF);
767 MODULE_LICENSE("GPL v2");
768