1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2018 Noralf Trønnes
4  */
5 
6 #include <linux/dma-buf.h>
7 #include <linux/export.h>
8 #include <linux/module.h>
9 #include <linux/mutex.h>
10 #include <linux/shmem_fs.h>
11 #include <linux/slab.h>
12 #include <linux/vmalloc.h>
13 #include <linux/module.h>
14 
15 #ifdef CONFIG_X86
16 #include <asm/set_memory.h>
17 #endif
18 
19 #include <drm/drm.h>
20 #include <drm/drm_device.h>
21 #include <drm/drm_drv.h>
22 #include <drm/drm_gem_shmem_helper.h>
23 #include <drm/drm_prime.h>
24 #include <drm/drm_print.h>
25 
26 MODULE_IMPORT_NS(DMA_BUF);
27 
28 /**
29  * DOC: overview
30  *
31  * This library provides helpers for GEM objects backed by shmem buffers
32  * allocated using anonymous pageable memory.
33  *
34  * Functions that operate on the GEM object receive struct &drm_gem_shmem_object.
35  * For GEM callback helpers in struct &drm_gem_object functions, see likewise
36  * named functions with an _object_ infix (e.g., drm_gem_shmem_object_vmap() wraps
37  * drm_gem_shmem_vmap()). These helpers perform the necessary type conversion.
38  */
39 
40 static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
41 	.free = drm_gem_shmem_object_free,
42 	.print_info = drm_gem_shmem_object_print_info,
43 	.pin = drm_gem_shmem_object_pin,
44 	.unpin = drm_gem_shmem_object_unpin,
45 	.get_sg_table = drm_gem_shmem_object_get_sg_table,
46 	.vmap = drm_gem_shmem_object_vmap,
47 	.vunmap = drm_gem_shmem_object_vunmap,
48 	.mmap = drm_gem_shmem_object_mmap,
49 };
50 
51 static struct drm_gem_shmem_object *
52 __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
53 {
54 	struct drm_gem_shmem_object *shmem;
55 	struct drm_gem_object *obj;
56 	int ret = 0;
57 
58 	size = PAGE_ALIGN(size);
59 
60 	if (dev->driver->gem_create_object) {
61 		obj = dev->driver->gem_create_object(dev, size);
62 		if (IS_ERR(obj))
63 			return ERR_CAST(obj);
64 		shmem = to_drm_gem_shmem_obj(obj);
65 	} else {
66 		shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
67 		if (!shmem)
68 			return ERR_PTR(-ENOMEM);
69 		obj = &shmem->base;
70 	}
71 
72 	if (!obj->funcs)
73 		obj->funcs = &drm_gem_shmem_funcs;
74 
75 	if (private) {
76 		drm_gem_private_object_init(dev, obj, size);
77 		shmem->map_wc = false; /* dma-buf mappings use always writecombine */
78 	} else {
79 		ret = drm_gem_object_init(dev, obj, size);
80 	}
81 	if (ret)
82 		goto err_free;
83 
84 	ret = drm_gem_create_mmap_offset(obj);
85 	if (ret)
86 		goto err_release;
87 
88 	mutex_init(&shmem->pages_lock);
89 	mutex_init(&shmem->vmap_lock);
90 	INIT_LIST_HEAD(&shmem->madv_list);
91 
92 	if (!private) {
93 		/*
94 		 * Our buffers are kept pinned, so allocating them
95 		 * from the MOVABLE zone is a really bad idea, and
96 		 * conflicts with CMA. See comments above new_inode()
97 		 * why this is required _and_ expected if you're
98 		 * going to pin these pages.
99 		 */
100 		mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
101 				     __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
102 	}
103 
104 	return shmem;
105 
106 err_release:
107 	drm_gem_object_release(obj);
108 err_free:
109 	kfree(obj);
110 
111 	return ERR_PTR(ret);
112 }
113 /**
114  * drm_gem_shmem_create - Allocate an object with the given size
115  * @dev: DRM device
116  * @size: Size of the object to allocate
117  *
118  * This function creates a shmem GEM object.
119  *
120  * Returns:
121  * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
122  * error code on failure.
123  */
124 struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
125 {
126 	return __drm_gem_shmem_create(dev, size, false);
127 }
128 EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
129 
130 /**
131  * drm_gem_shmem_free - Free resources associated with a shmem GEM object
132  * @shmem: shmem GEM object to free
133  *
134  * This function cleans up the GEM object state and frees the memory used to
135  * store the object itself.
136  */
137 void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
138 {
139 	struct drm_gem_object *obj = &shmem->base;
140 
141 	WARN_ON(shmem->vmap_use_count);
142 
143 	if (obj->import_attach) {
144 		drm_prime_gem_destroy(obj, shmem->sgt);
145 	} else {
146 		if (shmem->sgt) {
147 			dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
148 					  DMA_BIDIRECTIONAL, 0);
149 			sg_free_table(shmem->sgt);
150 			kfree(shmem->sgt);
151 		}
152 		if (shmem->pages)
153 			drm_gem_shmem_put_pages(shmem);
154 	}
155 
156 	WARN_ON(shmem->pages_use_count);
157 
158 	drm_gem_object_release(obj);
159 	mutex_destroy(&shmem->pages_lock);
160 	mutex_destroy(&shmem->vmap_lock);
161 	kfree(shmem);
162 }
163 EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
164 
165 static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
166 {
167 	struct drm_gem_object *obj = &shmem->base;
168 	struct page **pages;
169 
170 	if (shmem->pages_use_count++ > 0)
171 		return 0;
172 
173 	pages = drm_gem_get_pages(obj);
174 	if (IS_ERR(pages)) {
175 		DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages));
176 		shmem->pages_use_count = 0;
177 		return PTR_ERR(pages);
178 	}
179 
180 	/*
181 	 * TODO: Allocating WC pages which are correctly flushed is only
182 	 * supported on x86. Ideal solution would be a GFP_WC flag, which also
183 	 * ttm_pool.c could use.
184 	 */
185 #ifdef CONFIG_X86
186 	if (shmem->map_wc)
187 		set_pages_array_wc(pages, obj->size >> PAGE_SHIFT);
188 #endif
189 
190 	shmem->pages = pages;
191 
192 	return 0;
193 }
194 
195 /*
196  * drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object
197  * @shmem: shmem GEM object
198  *
199  * This function makes sure that backing pages exists for the shmem GEM object
200  * and increases the use count.
201  *
202  * Returns:
203  * 0 on success or a negative error code on failure.
204  */
205 int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
206 {
207 	int ret;
208 
209 	WARN_ON(shmem->base.import_attach);
210 
211 	ret = mutex_lock_interruptible(&shmem->pages_lock);
212 	if (ret)
213 		return ret;
214 	ret = drm_gem_shmem_get_pages_locked(shmem);
215 	mutex_unlock(&shmem->pages_lock);
216 
217 	return ret;
218 }
219 EXPORT_SYMBOL(drm_gem_shmem_get_pages);
220 
221 static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
222 {
223 	struct drm_gem_object *obj = &shmem->base;
224 
225 	if (WARN_ON_ONCE(!shmem->pages_use_count))
226 		return;
227 
228 	if (--shmem->pages_use_count > 0)
229 		return;
230 
231 #ifdef CONFIG_X86
232 	if (shmem->map_wc)
233 		set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
234 #endif
235 
236 	drm_gem_put_pages(obj, shmem->pages,
237 			  shmem->pages_mark_dirty_on_put,
238 			  shmem->pages_mark_accessed_on_put);
239 	shmem->pages = NULL;
240 }
241 
242 /*
243  * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
244  * @shmem: shmem GEM object
245  *
246  * This function decreases the use count and puts the backing pages when use drops to zero.
247  */
248 void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
249 {
250 	mutex_lock(&shmem->pages_lock);
251 	drm_gem_shmem_put_pages_locked(shmem);
252 	mutex_unlock(&shmem->pages_lock);
253 }
254 EXPORT_SYMBOL(drm_gem_shmem_put_pages);
255 
256 /**
257  * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
258  * @shmem: shmem GEM object
259  *
260  * This function makes sure the backing pages are pinned in memory while the
261  * buffer is exported.
262  *
263  * Returns:
264  * 0 on success or a negative error code on failure.
265  */
266 int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
267 {
268 	WARN_ON(shmem->base.import_attach);
269 
270 	return drm_gem_shmem_get_pages(shmem);
271 }
272 EXPORT_SYMBOL(drm_gem_shmem_pin);
273 
274 /**
275  * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
276  * @shmem: shmem GEM object
277  *
278  * This function removes the requirement that the backing pages are pinned in
279  * memory.
280  */
281 void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem)
282 {
283 	WARN_ON(shmem->base.import_attach);
284 
285 	drm_gem_shmem_put_pages(shmem);
286 }
287 EXPORT_SYMBOL(drm_gem_shmem_unpin);
288 
289 static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, struct dma_buf_map *map)
290 {
291 	struct drm_gem_object *obj = &shmem->base;
292 	int ret = 0;
293 
294 	if (shmem->vmap_use_count++ > 0) {
295 		dma_buf_map_set_vaddr(map, shmem->vaddr);
296 		return 0;
297 	}
298 
299 	if (obj->import_attach) {
300 		ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
301 		if (!ret) {
302 			if (WARN_ON(map->is_iomem)) {
303 				ret = -EIO;
304 				goto err_put_pages;
305 			}
306 			shmem->vaddr = map->vaddr;
307 		}
308 	} else {
309 		pgprot_t prot = PAGE_KERNEL;
310 
311 		ret = drm_gem_shmem_get_pages(shmem);
312 		if (ret)
313 			goto err_zero_use;
314 
315 		if (shmem->map_wc)
316 			prot = pgprot_writecombine(prot);
317 		shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
318 				    VM_MAP, prot);
319 		if (!shmem->vaddr)
320 			ret = -ENOMEM;
321 		else
322 			dma_buf_map_set_vaddr(map, shmem->vaddr);
323 	}
324 
325 	if (ret) {
326 		DRM_DEBUG_KMS("Failed to vmap pages, error %d\n", ret);
327 		goto err_put_pages;
328 	}
329 
330 	return 0;
331 
332 err_put_pages:
333 	if (!obj->import_attach)
334 		drm_gem_shmem_put_pages(shmem);
335 err_zero_use:
336 	shmem->vmap_use_count = 0;
337 
338 	return ret;
339 }
340 
341 /*
342  * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
343  * @shmem: shmem GEM object
344  * @map: Returns the kernel virtual address of the SHMEM GEM object's backing
345  *       store.
346  *
347  * This function makes sure that a contiguous kernel virtual address mapping
348  * exists for the buffer backing the shmem GEM object. It hides the differences
349  * between dma-buf imported and natively allocated objects.
350  *
351  * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap().
352  *
353  * Returns:
354  * 0 on success or a negative error code on failure.
355  */
356 int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, struct dma_buf_map *map)
357 {
358 	int ret;
359 
360 	ret = mutex_lock_interruptible(&shmem->vmap_lock);
361 	if (ret)
362 		return ret;
363 	ret = drm_gem_shmem_vmap_locked(shmem, map);
364 	mutex_unlock(&shmem->vmap_lock);
365 
366 	return ret;
367 }
368 EXPORT_SYMBOL(drm_gem_shmem_vmap);
369 
370 static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
371 					struct dma_buf_map *map)
372 {
373 	struct drm_gem_object *obj = &shmem->base;
374 
375 	if (WARN_ON_ONCE(!shmem->vmap_use_count))
376 		return;
377 
378 	if (--shmem->vmap_use_count > 0)
379 		return;
380 
381 	if (obj->import_attach) {
382 		dma_buf_vunmap(obj->import_attach->dmabuf, map);
383 	} else {
384 		vunmap(shmem->vaddr);
385 		drm_gem_shmem_put_pages(shmem);
386 	}
387 
388 	shmem->vaddr = NULL;
389 }
390 
391 /*
392  * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object
393  * @shmem: shmem GEM object
394  * @map: Kernel virtual address where the SHMEM GEM object was mapped
395  *
396  * This function cleans up a kernel virtual address mapping acquired by
397  * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to
398  * zero.
399  *
400  * This function hides the differences between dma-buf imported and natively
401  * allocated objects.
402  */
403 void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, struct dma_buf_map *map)
404 {
405 	mutex_lock(&shmem->vmap_lock);
406 	drm_gem_shmem_vunmap_locked(shmem, map);
407 	mutex_unlock(&shmem->vmap_lock);
408 }
409 EXPORT_SYMBOL(drm_gem_shmem_vunmap);
410 
411 static struct drm_gem_shmem_object *
412 drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
413 				 struct drm_device *dev, size_t size,
414 				 uint32_t *handle)
415 {
416 	struct drm_gem_shmem_object *shmem;
417 	int ret;
418 
419 	shmem = drm_gem_shmem_create(dev, size);
420 	if (IS_ERR(shmem))
421 		return shmem;
422 
423 	/*
424 	 * Allocate an id of idr table where the obj is registered
425 	 * and handle has the id what user can see.
426 	 */
427 	ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
428 	/* drop reference from allocate - handle holds it now. */
429 	drm_gem_object_put(&shmem->base);
430 	if (ret)
431 		return ERR_PTR(ret);
432 
433 	return shmem;
434 }
435 
436 /* Update madvise status, returns true if not purged, else
437  * false or -errno.
438  */
439 int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv)
440 {
441 	mutex_lock(&shmem->pages_lock);
442 
443 	if (shmem->madv >= 0)
444 		shmem->madv = madv;
445 
446 	madv = shmem->madv;
447 
448 	mutex_unlock(&shmem->pages_lock);
449 
450 	return (madv >= 0);
451 }
452 EXPORT_SYMBOL(drm_gem_shmem_madvise);
453 
454 void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem)
455 {
456 	struct drm_gem_object *obj = &shmem->base;
457 	struct drm_device *dev = obj->dev;
458 
459 	WARN_ON(!drm_gem_shmem_is_purgeable(shmem));
460 
461 	dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
462 	sg_free_table(shmem->sgt);
463 	kfree(shmem->sgt);
464 	shmem->sgt = NULL;
465 
466 	drm_gem_shmem_put_pages_locked(shmem);
467 
468 	shmem->madv = -1;
469 
470 	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
471 	drm_gem_free_mmap_offset(obj);
472 
473 	/* Our goal here is to return as much of the memory as
474 	 * is possible back to the system as we are called from OOM.
475 	 * To do this we must instruct the shmfs to drop all of its
476 	 * backing pages, *now*.
477 	 */
478 	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
479 
480 	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1);
481 }
482 EXPORT_SYMBOL(drm_gem_shmem_purge_locked);
483 
484 bool drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
485 {
486 	if (!mutex_trylock(&shmem->pages_lock))
487 		return false;
488 	drm_gem_shmem_purge_locked(shmem);
489 	mutex_unlock(&shmem->pages_lock);
490 
491 	return true;
492 }
493 EXPORT_SYMBOL(drm_gem_shmem_purge);
494 
495 /**
496  * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
497  * @file: DRM file structure to create the dumb buffer for
498  * @dev: DRM device
499  * @args: IOCTL data
500  *
501  * This function computes the pitch of the dumb buffer and rounds it up to an
502  * integer number of bytes per pixel. Drivers for hardware that doesn't have
503  * any additional restrictions on the pitch can directly use this function as
504  * their &drm_driver.dumb_create callback.
505  *
506  * For hardware with additional restrictions, drivers can adjust the fields
507  * set up by userspace before calling into this function.
508  *
509  * Returns:
510  * 0 on success or a negative error code on failure.
511  */
512 int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
513 			      struct drm_mode_create_dumb *args)
514 {
515 	u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
516 	struct drm_gem_shmem_object *shmem;
517 
518 	if (!args->pitch || !args->size) {
519 		args->pitch = min_pitch;
520 		args->size = PAGE_ALIGN(args->pitch * args->height);
521 	} else {
522 		/* ensure sane minimum values */
523 		if (args->pitch < min_pitch)
524 			args->pitch = min_pitch;
525 		if (args->size < args->pitch * args->height)
526 			args->size = PAGE_ALIGN(args->pitch * args->height);
527 	}
528 
529 	shmem = drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
530 
531 	return PTR_ERR_OR_ZERO(shmem);
532 }
533 EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
534 
535 static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
536 {
537 	struct vm_area_struct *vma = vmf->vma;
538 	struct drm_gem_object *obj = vma->vm_private_data;
539 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
540 	loff_t num_pages = obj->size >> PAGE_SHIFT;
541 	vm_fault_t ret;
542 	struct page *page;
543 	pgoff_t page_offset;
544 
545 	/* We don't use vmf->pgoff since that has the fake offset */
546 	page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
547 
548 	mutex_lock(&shmem->pages_lock);
549 
550 	if (page_offset >= num_pages ||
551 	    WARN_ON_ONCE(!shmem->pages) ||
552 	    shmem->madv < 0) {
553 		ret = VM_FAULT_SIGBUS;
554 	} else {
555 		page = shmem->pages[page_offset];
556 
557 		ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
558 	}
559 
560 	mutex_unlock(&shmem->pages_lock);
561 
562 	return ret;
563 }
564 
565 static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
566 {
567 	struct drm_gem_object *obj = vma->vm_private_data;
568 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
569 	int ret;
570 
571 	WARN_ON(shmem->base.import_attach);
572 
573 	ret = drm_gem_shmem_get_pages(shmem);
574 	WARN_ON_ONCE(ret != 0);
575 
576 	drm_gem_vm_open(vma);
577 }
578 
579 static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
580 {
581 	struct drm_gem_object *obj = vma->vm_private_data;
582 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
583 
584 	drm_gem_shmem_put_pages(shmem);
585 	drm_gem_vm_close(vma);
586 }
587 
588 static const struct vm_operations_struct drm_gem_shmem_vm_ops = {
589 	.fault = drm_gem_shmem_fault,
590 	.open = drm_gem_shmem_vm_open,
591 	.close = drm_gem_shmem_vm_close,
592 };
593 
594 /**
595  * drm_gem_shmem_mmap - Memory-map a shmem GEM object
596  * @shmem: shmem GEM object
597  * @vma: VMA for the area to be mapped
598  *
599  * This function implements an augmented version of the GEM DRM file mmap
600  * operation for shmem objects.
601  *
602  * Returns:
603  * 0 on success or a negative error code on failure.
604  */
605 int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma)
606 {
607 	struct drm_gem_object *obj = &shmem->base;
608 	int ret;
609 
610 	if (obj->import_attach) {
611 		/* Drop the reference drm_gem_mmap_obj() acquired.*/
612 		drm_gem_object_put(obj);
613 		vma->vm_private_data = NULL;
614 
615 		return dma_buf_mmap(obj->dma_buf, vma, 0);
616 	}
617 
618 	ret = drm_gem_shmem_get_pages(shmem);
619 	if (ret) {
620 		drm_gem_vm_close(vma);
621 		return ret;
622 	}
623 
624 	vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND;
625 	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
626 	if (shmem->map_wc)
627 		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
628 	vma->vm_ops = &drm_gem_shmem_vm_ops;
629 
630 	return 0;
631 }
632 EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
633 
634 /**
635  * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
636  * @shmem: shmem GEM object
637  * @p: DRM printer
638  * @indent: Tab indentation level
639  */
640 void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
641 			      struct drm_printer *p, unsigned int indent)
642 {
643 	drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
644 	drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
645 	drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
646 }
647 EXPORT_SYMBOL(drm_gem_shmem_print_info);
648 
649 /**
650  * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
651  *                              pages for a shmem GEM object
652  * @shmem: shmem GEM object
653  *
654  * This function exports a scatter/gather table suitable for PRIME usage by
655  * calling the standard DMA mapping API.
656  *
657  * Drivers who need to acquire an scatter/gather table for objects need to call
658  * drm_gem_shmem_get_pages_sgt() instead.
659  *
660  * Returns:
661  * A pointer to the scatter/gather table of pinned pages or NULL on failure.
662  */
663 struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem)
664 {
665 	struct drm_gem_object *obj = &shmem->base;
666 
667 	WARN_ON(shmem->base.import_attach);
668 
669 	return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
670 }
671 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
672 
673 /**
674  * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
675  *				 scatter/gather table for a shmem GEM object.
676  * @shmem: shmem GEM object
677  *
678  * This function returns a scatter/gather table suitable for driver usage. If
679  * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
680  * table created.
681  *
682  * This is the main function for drivers to get at backing storage, and it hides
683  * and difference between dma-buf imported and natively allocated objects.
684  * drm_gem_shmem_get_sg_table() should not be directly called by drivers.
685  *
686  * Returns:
687  * A pointer to the scatter/gather table of pinned pages or errno on failure.
688  */
689 struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem)
690 {
691 	struct drm_gem_object *obj = &shmem->base;
692 	int ret;
693 	struct sg_table *sgt;
694 
695 	if (shmem->sgt)
696 		return shmem->sgt;
697 
698 	WARN_ON(obj->import_attach);
699 
700 	ret = drm_gem_shmem_get_pages(shmem);
701 	if (ret)
702 		return ERR_PTR(ret);
703 
704 	sgt = drm_gem_shmem_get_sg_table(shmem);
705 	if (IS_ERR(sgt)) {
706 		ret = PTR_ERR(sgt);
707 		goto err_put_pages;
708 	}
709 	/* Map the pages for use by the h/w. */
710 	ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
711 	if (ret)
712 		goto err_free_sgt;
713 
714 	shmem->sgt = sgt;
715 
716 	return sgt;
717 
718 err_free_sgt:
719 	sg_free_table(sgt);
720 	kfree(sgt);
721 err_put_pages:
722 	drm_gem_shmem_put_pages(shmem);
723 	return ERR_PTR(ret);
724 }
725 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt);
726 
727 /**
728  * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
729  *                 another driver's scatter/gather table of pinned pages
730  * @dev: Device to import into
731  * @attach: DMA-BUF attachment
732  * @sgt: Scatter/gather table of pinned pages
733  *
734  * This function imports a scatter/gather table exported via DMA-BUF by
735  * another driver. Drivers that use the shmem helpers should set this as their
736  * &drm_driver.gem_prime_import_sg_table callback.
737  *
738  * Returns:
739  * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
740  * error code on failure.
741  */
742 struct drm_gem_object *
743 drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
744 				    struct dma_buf_attachment *attach,
745 				    struct sg_table *sgt)
746 {
747 	size_t size = PAGE_ALIGN(attach->dmabuf->size);
748 	struct drm_gem_shmem_object *shmem;
749 
750 	shmem = __drm_gem_shmem_create(dev, size, true);
751 	if (IS_ERR(shmem))
752 		return ERR_CAST(shmem);
753 
754 	shmem->sgt = sgt;
755 
756 	DRM_DEBUG_PRIME("size = %zu\n", size);
757 
758 	return &shmem->base;
759 }
760 EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
761 
762 MODULE_DESCRIPTION("DRM SHMEM memory-management helpers");
763 MODULE_IMPORT_NS(DMA_BUF);
764 MODULE_LICENSE("GPL v2");
765