1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2018 Noralf Trønnes
4  */
5 
6 #include <linux/dma-buf.h>
7 #include <linux/export.h>
8 #include <linux/module.h>
9 #include <linux/mutex.h>
10 #include <linux/shmem_fs.h>
11 #include <linux/slab.h>
12 #include <linux/vmalloc.h>
13 #include <linux/module.h>
14 
15 #ifdef CONFIG_X86
16 #include <asm/set_memory.h>
17 #endif
18 
19 #include <drm/drm.h>
20 #include <drm/drm_device.h>
21 #include <drm/drm_drv.h>
22 #include <drm/drm_gem_shmem_helper.h>
23 #include <drm/drm_prime.h>
24 #include <drm/drm_print.h>
25 
26 MODULE_IMPORT_NS(DMA_BUF);
27 
28 /**
29  * DOC: overview
30  *
31  * This library provides helpers for GEM objects backed by shmem buffers
32  * allocated using anonymous pageable memory.
33  *
34  * Functions that operate on the GEM object receive struct &drm_gem_shmem_object.
35  * For GEM callback helpers in struct &drm_gem_object functions, see likewise
36  * named functions with an _object_ infix (e.g., drm_gem_shmem_object_vmap() wraps
37  * drm_gem_shmem_vmap()). These helpers perform the necessary type conversion.
38  */
39 
40 static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
41 	.free = drm_gem_shmem_object_free,
42 	.print_info = drm_gem_shmem_object_print_info,
43 	.pin = drm_gem_shmem_object_pin,
44 	.unpin = drm_gem_shmem_object_unpin,
45 	.get_sg_table = drm_gem_shmem_object_get_sg_table,
46 	.vmap = drm_gem_shmem_object_vmap,
47 	.vunmap = drm_gem_shmem_object_vunmap,
48 	.mmap = drm_gem_shmem_object_mmap,
49 	.vm_ops = &drm_gem_shmem_vm_ops,
50 };
51 
52 static struct drm_gem_shmem_object *
53 __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
54 {
55 	struct drm_gem_shmem_object *shmem;
56 	struct drm_gem_object *obj;
57 	int ret = 0;
58 
59 	size = PAGE_ALIGN(size);
60 
61 	if (dev->driver->gem_create_object) {
62 		obj = dev->driver->gem_create_object(dev, size);
63 		if (IS_ERR(obj))
64 			return ERR_CAST(obj);
65 		shmem = to_drm_gem_shmem_obj(obj);
66 	} else {
67 		shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
68 		if (!shmem)
69 			return ERR_PTR(-ENOMEM);
70 		obj = &shmem->base;
71 	}
72 
73 	if (!obj->funcs)
74 		obj->funcs = &drm_gem_shmem_funcs;
75 
76 	if (private) {
77 		drm_gem_private_object_init(dev, obj, size);
78 		shmem->map_wc = false; /* dma-buf mappings use always writecombine */
79 	} else {
80 		ret = drm_gem_object_init(dev, obj, size);
81 	}
82 	if (ret) {
83 		drm_gem_private_object_fini(obj);
84 		goto err_free;
85 	}
86 
87 	ret = drm_gem_create_mmap_offset(obj);
88 	if (ret)
89 		goto err_release;
90 
91 	mutex_init(&shmem->pages_lock);
92 	mutex_init(&shmem->vmap_lock);
93 	INIT_LIST_HEAD(&shmem->madv_list);
94 
95 	if (!private) {
96 		/*
97 		 * Our buffers are kept pinned, so allocating them
98 		 * from the MOVABLE zone is a really bad idea, and
99 		 * conflicts with CMA. See comments above new_inode()
100 		 * why this is required _and_ expected if you're
101 		 * going to pin these pages.
102 		 */
103 		mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
104 				     __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
105 	}
106 
107 	return shmem;
108 
109 err_release:
110 	drm_gem_object_release(obj);
111 err_free:
112 	kfree(obj);
113 
114 	return ERR_PTR(ret);
115 }
116 /**
117  * drm_gem_shmem_create - Allocate an object with the given size
118  * @dev: DRM device
119  * @size: Size of the object to allocate
120  *
121  * This function creates a shmem GEM object.
122  *
123  * Returns:
124  * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
125  * error code on failure.
126  */
127 struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
128 {
129 	return __drm_gem_shmem_create(dev, size, false);
130 }
131 EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
132 
133 /**
134  * drm_gem_shmem_free - Free resources associated with a shmem GEM object
135  * @shmem: shmem GEM object to free
136  *
137  * This function cleans up the GEM object state and frees the memory used to
138  * store the object itself.
139  */
140 void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
141 {
142 	struct drm_gem_object *obj = &shmem->base;
143 
144 	WARN_ON(shmem->vmap_use_count);
145 
146 	if (obj->import_attach) {
147 		drm_prime_gem_destroy(obj, shmem->sgt);
148 	} else {
149 		if (shmem->sgt) {
150 			dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
151 					  DMA_BIDIRECTIONAL, 0);
152 			sg_free_table(shmem->sgt);
153 			kfree(shmem->sgt);
154 		}
155 		if (shmem->pages)
156 			drm_gem_shmem_put_pages(shmem);
157 	}
158 
159 	WARN_ON(shmem->pages_use_count);
160 
161 	drm_gem_object_release(obj);
162 	mutex_destroy(&shmem->pages_lock);
163 	mutex_destroy(&shmem->vmap_lock);
164 	kfree(shmem);
165 }
166 EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
167 
168 static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
169 {
170 	struct drm_gem_object *obj = &shmem->base;
171 	struct page **pages;
172 
173 	if (shmem->pages_use_count++ > 0)
174 		return 0;
175 
176 	pages = drm_gem_get_pages(obj);
177 	if (IS_ERR(pages)) {
178 		DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages));
179 		shmem->pages_use_count = 0;
180 		return PTR_ERR(pages);
181 	}
182 
183 	/*
184 	 * TODO: Allocating WC pages which are correctly flushed is only
185 	 * supported on x86. Ideal solution would be a GFP_WC flag, which also
186 	 * ttm_pool.c could use.
187 	 */
188 #ifdef CONFIG_X86
189 	if (shmem->map_wc)
190 		set_pages_array_wc(pages, obj->size >> PAGE_SHIFT);
191 #endif
192 
193 	shmem->pages = pages;
194 
195 	return 0;
196 }
197 
198 /*
199  * drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object
200  * @shmem: shmem GEM object
201  *
202  * This function makes sure that backing pages exists for the shmem GEM object
203  * and increases the use count.
204  *
205  * Returns:
206  * 0 on success or a negative error code on failure.
207  */
208 int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
209 {
210 	int ret;
211 
212 	WARN_ON(shmem->base.import_attach);
213 
214 	ret = mutex_lock_interruptible(&shmem->pages_lock);
215 	if (ret)
216 		return ret;
217 	ret = drm_gem_shmem_get_pages_locked(shmem);
218 	mutex_unlock(&shmem->pages_lock);
219 
220 	return ret;
221 }
222 EXPORT_SYMBOL(drm_gem_shmem_get_pages);
223 
224 static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
225 {
226 	struct drm_gem_object *obj = &shmem->base;
227 
228 	if (WARN_ON_ONCE(!shmem->pages_use_count))
229 		return;
230 
231 	if (--shmem->pages_use_count > 0)
232 		return;
233 
234 #ifdef CONFIG_X86
235 	if (shmem->map_wc)
236 		set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
237 #endif
238 
239 	drm_gem_put_pages(obj, shmem->pages,
240 			  shmem->pages_mark_dirty_on_put,
241 			  shmem->pages_mark_accessed_on_put);
242 	shmem->pages = NULL;
243 }
244 
245 /*
246  * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
247  * @shmem: shmem GEM object
248  *
249  * This function decreases the use count and puts the backing pages when use drops to zero.
250  */
251 void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
252 {
253 	mutex_lock(&shmem->pages_lock);
254 	drm_gem_shmem_put_pages_locked(shmem);
255 	mutex_unlock(&shmem->pages_lock);
256 }
257 EXPORT_SYMBOL(drm_gem_shmem_put_pages);
258 
259 /**
260  * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
261  * @shmem: shmem GEM object
262  *
263  * This function makes sure the backing pages are pinned in memory while the
264  * buffer is exported.
265  *
266  * Returns:
267  * 0 on success or a negative error code on failure.
268  */
269 int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
270 {
271 	WARN_ON(shmem->base.import_attach);
272 
273 	return drm_gem_shmem_get_pages(shmem);
274 }
275 EXPORT_SYMBOL(drm_gem_shmem_pin);
276 
277 /**
278  * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
279  * @shmem: shmem GEM object
280  *
281  * This function removes the requirement that the backing pages are pinned in
282  * memory.
283  */
284 void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem)
285 {
286 	WARN_ON(shmem->base.import_attach);
287 
288 	drm_gem_shmem_put_pages(shmem);
289 }
290 EXPORT_SYMBOL(drm_gem_shmem_unpin);
291 
292 static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
293 				     struct iosys_map *map)
294 {
295 	struct drm_gem_object *obj = &shmem->base;
296 	int ret = 0;
297 
298 	if (shmem->vmap_use_count++ > 0) {
299 		iosys_map_set_vaddr(map, shmem->vaddr);
300 		return 0;
301 	}
302 
303 	if (obj->import_attach) {
304 		ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
305 		if (!ret) {
306 			if (WARN_ON(map->is_iomem)) {
307 				dma_buf_vunmap(obj->import_attach->dmabuf, map);
308 				ret = -EIO;
309 				goto err_put_pages;
310 			}
311 			shmem->vaddr = map->vaddr;
312 		}
313 	} else {
314 		pgprot_t prot = PAGE_KERNEL;
315 
316 		ret = drm_gem_shmem_get_pages(shmem);
317 		if (ret)
318 			goto err_zero_use;
319 
320 		if (shmem->map_wc)
321 			prot = pgprot_writecombine(prot);
322 		shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
323 				    VM_MAP, prot);
324 		if (!shmem->vaddr)
325 			ret = -ENOMEM;
326 		else
327 			iosys_map_set_vaddr(map, shmem->vaddr);
328 	}
329 
330 	if (ret) {
331 		DRM_DEBUG_KMS("Failed to vmap pages, error %d\n", ret);
332 		goto err_put_pages;
333 	}
334 
335 	return 0;
336 
337 err_put_pages:
338 	if (!obj->import_attach)
339 		drm_gem_shmem_put_pages(shmem);
340 err_zero_use:
341 	shmem->vmap_use_count = 0;
342 
343 	return ret;
344 }
345 
346 /*
347  * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
348  * @shmem: shmem GEM object
349  * @map: Returns the kernel virtual address of the SHMEM GEM object's backing
350  *       store.
351  *
352  * This function makes sure that a contiguous kernel virtual address mapping
353  * exists for the buffer backing the shmem GEM object. It hides the differences
354  * between dma-buf imported and natively allocated objects.
355  *
356  * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap().
357  *
358  * Returns:
359  * 0 on success or a negative error code on failure.
360  */
361 int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
362 		       struct iosys_map *map)
363 {
364 	int ret;
365 
366 	ret = mutex_lock_interruptible(&shmem->vmap_lock);
367 	if (ret)
368 		return ret;
369 	ret = drm_gem_shmem_vmap_locked(shmem, map);
370 	mutex_unlock(&shmem->vmap_lock);
371 
372 	return ret;
373 }
374 EXPORT_SYMBOL(drm_gem_shmem_vmap);
375 
376 static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
377 					struct iosys_map *map)
378 {
379 	struct drm_gem_object *obj = &shmem->base;
380 
381 	if (WARN_ON_ONCE(!shmem->vmap_use_count))
382 		return;
383 
384 	if (--shmem->vmap_use_count > 0)
385 		return;
386 
387 	if (obj->import_attach) {
388 		dma_buf_vunmap(obj->import_attach->dmabuf, map);
389 	} else {
390 		vunmap(shmem->vaddr);
391 		drm_gem_shmem_put_pages(shmem);
392 	}
393 
394 	shmem->vaddr = NULL;
395 }
396 
397 /*
398  * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object
399  * @shmem: shmem GEM object
400  * @map: Kernel virtual address where the SHMEM GEM object was mapped
401  *
402  * This function cleans up a kernel virtual address mapping acquired by
403  * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to
404  * zero.
405  *
406  * This function hides the differences between dma-buf imported and natively
407  * allocated objects.
408  */
409 void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
410 			  struct iosys_map *map)
411 {
412 	mutex_lock(&shmem->vmap_lock);
413 	drm_gem_shmem_vunmap_locked(shmem, map);
414 	mutex_unlock(&shmem->vmap_lock);
415 }
416 EXPORT_SYMBOL(drm_gem_shmem_vunmap);
417 
418 static struct drm_gem_shmem_object *
419 drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
420 				 struct drm_device *dev, size_t size,
421 				 uint32_t *handle)
422 {
423 	struct drm_gem_shmem_object *shmem;
424 	int ret;
425 
426 	shmem = drm_gem_shmem_create(dev, size);
427 	if (IS_ERR(shmem))
428 		return shmem;
429 
430 	/*
431 	 * Allocate an id of idr table where the obj is registered
432 	 * and handle has the id what user can see.
433 	 */
434 	ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
435 	/* drop reference from allocate - handle holds it now. */
436 	drm_gem_object_put(&shmem->base);
437 	if (ret)
438 		return ERR_PTR(ret);
439 
440 	return shmem;
441 }
442 
443 /* Update madvise status, returns true if not purged, else
444  * false or -errno.
445  */
446 int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv)
447 {
448 	mutex_lock(&shmem->pages_lock);
449 
450 	if (shmem->madv >= 0)
451 		shmem->madv = madv;
452 
453 	madv = shmem->madv;
454 
455 	mutex_unlock(&shmem->pages_lock);
456 
457 	return (madv >= 0);
458 }
459 EXPORT_SYMBOL(drm_gem_shmem_madvise);
460 
461 void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem)
462 {
463 	struct drm_gem_object *obj = &shmem->base;
464 	struct drm_device *dev = obj->dev;
465 
466 	WARN_ON(!drm_gem_shmem_is_purgeable(shmem));
467 
468 	dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
469 	sg_free_table(shmem->sgt);
470 	kfree(shmem->sgt);
471 	shmem->sgt = NULL;
472 
473 	drm_gem_shmem_put_pages_locked(shmem);
474 
475 	shmem->madv = -1;
476 
477 	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
478 	drm_gem_free_mmap_offset(obj);
479 
480 	/* Our goal here is to return as much of the memory as
481 	 * is possible back to the system as we are called from OOM.
482 	 * To do this we must instruct the shmfs to drop all of its
483 	 * backing pages, *now*.
484 	 */
485 	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
486 
487 	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1);
488 }
489 EXPORT_SYMBOL(drm_gem_shmem_purge_locked);
490 
491 bool drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
492 {
493 	if (!mutex_trylock(&shmem->pages_lock))
494 		return false;
495 	drm_gem_shmem_purge_locked(shmem);
496 	mutex_unlock(&shmem->pages_lock);
497 
498 	return true;
499 }
500 EXPORT_SYMBOL(drm_gem_shmem_purge);
501 
502 /**
503  * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
504  * @file: DRM file structure to create the dumb buffer for
505  * @dev: DRM device
506  * @args: IOCTL data
507  *
508  * This function computes the pitch of the dumb buffer and rounds it up to an
509  * integer number of bytes per pixel. Drivers for hardware that doesn't have
510  * any additional restrictions on the pitch can directly use this function as
511  * their &drm_driver.dumb_create callback.
512  *
513  * For hardware with additional restrictions, drivers can adjust the fields
514  * set up by userspace before calling into this function.
515  *
516  * Returns:
517  * 0 on success or a negative error code on failure.
518  */
519 int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
520 			      struct drm_mode_create_dumb *args)
521 {
522 	u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
523 	struct drm_gem_shmem_object *shmem;
524 
525 	if (!args->pitch || !args->size) {
526 		args->pitch = min_pitch;
527 		args->size = PAGE_ALIGN(args->pitch * args->height);
528 	} else {
529 		/* ensure sane minimum values */
530 		if (args->pitch < min_pitch)
531 			args->pitch = min_pitch;
532 		if (args->size < args->pitch * args->height)
533 			args->size = PAGE_ALIGN(args->pitch * args->height);
534 	}
535 
536 	shmem = drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
537 
538 	return PTR_ERR_OR_ZERO(shmem);
539 }
540 EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
541 
542 static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
543 {
544 	struct vm_area_struct *vma = vmf->vma;
545 	struct drm_gem_object *obj = vma->vm_private_data;
546 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
547 	loff_t num_pages = obj->size >> PAGE_SHIFT;
548 	vm_fault_t ret;
549 	struct page *page;
550 	pgoff_t page_offset;
551 
552 	/* We don't use vmf->pgoff since that has the fake offset */
553 	page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
554 
555 	mutex_lock(&shmem->pages_lock);
556 
557 	if (page_offset >= num_pages ||
558 	    WARN_ON_ONCE(!shmem->pages) ||
559 	    shmem->madv < 0) {
560 		ret = VM_FAULT_SIGBUS;
561 	} else {
562 		page = shmem->pages[page_offset];
563 
564 		ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
565 	}
566 
567 	mutex_unlock(&shmem->pages_lock);
568 
569 	return ret;
570 }
571 
572 static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
573 {
574 	struct drm_gem_object *obj = vma->vm_private_data;
575 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
576 
577 	WARN_ON(shmem->base.import_attach);
578 
579 	mutex_lock(&shmem->pages_lock);
580 
581 	/*
582 	 * We should have already pinned the pages when the buffer was first
583 	 * mmap'd, vm_open() just grabs an additional reference for the new
584 	 * mm the vma is getting copied into (ie. on fork()).
585 	 */
586 	if (!WARN_ON_ONCE(!shmem->pages_use_count))
587 		shmem->pages_use_count++;
588 
589 	mutex_unlock(&shmem->pages_lock);
590 
591 	drm_gem_vm_open(vma);
592 }
593 
594 static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
595 {
596 	struct drm_gem_object *obj = vma->vm_private_data;
597 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
598 
599 	drm_gem_shmem_put_pages(shmem);
600 	drm_gem_vm_close(vma);
601 }
602 
603 const struct vm_operations_struct drm_gem_shmem_vm_ops = {
604 	.fault = drm_gem_shmem_fault,
605 	.open = drm_gem_shmem_vm_open,
606 	.close = drm_gem_shmem_vm_close,
607 };
608 EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
609 
610 /**
611  * drm_gem_shmem_mmap - Memory-map a shmem GEM object
612  * @shmem: shmem GEM object
613  * @vma: VMA for the area to be mapped
614  *
615  * This function implements an augmented version of the GEM DRM file mmap
616  * operation for shmem objects.
617  *
618  * Returns:
619  * 0 on success or a negative error code on failure.
620  */
621 int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma)
622 {
623 	struct drm_gem_object *obj = &shmem->base;
624 	int ret;
625 
626 	if (obj->import_attach) {
627 		/* Drop the reference drm_gem_mmap_obj() acquired.*/
628 		drm_gem_object_put(obj);
629 		vma->vm_private_data = NULL;
630 
631 		return dma_buf_mmap(obj->dma_buf, vma, 0);
632 	}
633 
634 	ret = drm_gem_shmem_get_pages(shmem);
635 	if (ret)
636 		return ret;
637 
638 	vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
639 	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
640 	if (shmem->map_wc)
641 		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
642 
643 	return 0;
644 }
645 EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
646 
647 /**
648  * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
649  * @shmem: shmem GEM object
650  * @p: DRM printer
651  * @indent: Tab indentation level
652  */
653 void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
654 			      struct drm_printer *p, unsigned int indent)
655 {
656 	drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
657 	drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
658 	drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
659 }
660 EXPORT_SYMBOL(drm_gem_shmem_print_info);
661 
662 /**
663  * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
664  *                              pages for a shmem GEM object
665  * @shmem: shmem GEM object
666  *
667  * This function exports a scatter/gather table suitable for PRIME usage by
668  * calling the standard DMA mapping API.
669  *
670  * Drivers who need to acquire an scatter/gather table for objects need to call
671  * drm_gem_shmem_get_pages_sgt() instead.
672  *
673  * Returns:
674  * A pointer to the scatter/gather table of pinned pages or error pointer on failure.
675  */
676 struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem)
677 {
678 	struct drm_gem_object *obj = &shmem->base;
679 
680 	WARN_ON(shmem->base.import_attach);
681 
682 	return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
683 }
684 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
685 
686 /**
687  * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
688  *				 scatter/gather table for a shmem GEM object.
689  * @shmem: shmem GEM object
690  *
691  * This function returns a scatter/gather table suitable for driver usage. If
692  * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
693  * table created.
694  *
695  * This is the main function for drivers to get at backing storage, and it hides
696  * and difference between dma-buf imported and natively allocated objects.
697  * drm_gem_shmem_get_sg_table() should not be directly called by drivers.
698  *
699  * Returns:
700  * A pointer to the scatter/gather table of pinned pages or errno on failure.
701  */
702 struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem)
703 {
704 	struct drm_gem_object *obj = &shmem->base;
705 	int ret;
706 	struct sg_table *sgt;
707 
708 	if (shmem->sgt)
709 		return shmem->sgt;
710 
711 	WARN_ON(obj->import_attach);
712 
713 	ret = drm_gem_shmem_get_pages(shmem);
714 	if (ret)
715 		return ERR_PTR(ret);
716 
717 	sgt = drm_gem_shmem_get_sg_table(shmem);
718 	if (IS_ERR(sgt)) {
719 		ret = PTR_ERR(sgt);
720 		goto err_put_pages;
721 	}
722 	/* Map the pages for use by the h/w. */
723 	ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
724 	if (ret)
725 		goto err_free_sgt;
726 
727 	shmem->sgt = sgt;
728 
729 	return sgt;
730 
731 err_free_sgt:
732 	sg_free_table(sgt);
733 	kfree(sgt);
734 err_put_pages:
735 	drm_gem_shmem_put_pages(shmem);
736 	return ERR_PTR(ret);
737 }
738 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt);
739 
740 /**
741  * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
742  *                 another driver's scatter/gather table of pinned pages
743  * @dev: Device to import into
744  * @attach: DMA-BUF attachment
745  * @sgt: Scatter/gather table of pinned pages
746  *
747  * This function imports a scatter/gather table exported via DMA-BUF by
748  * another driver. Drivers that use the shmem helpers should set this as their
749  * &drm_driver.gem_prime_import_sg_table callback.
750  *
751  * Returns:
752  * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
753  * error code on failure.
754  */
755 struct drm_gem_object *
756 drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
757 				    struct dma_buf_attachment *attach,
758 				    struct sg_table *sgt)
759 {
760 	size_t size = PAGE_ALIGN(attach->dmabuf->size);
761 	struct drm_gem_shmem_object *shmem;
762 
763 	shmem = __drm_gem_shmem_create(dev, size, true);
764 	if (IS_ERR(shmem))
765 		return ERR_CAST(shmem);
766 
767 	shmem->sgt = sgt;
768 
769 	drm_dbg_prime(dev, "size = %zu\n", size);
770 
771 	return &shmem->base;
772 }
773 EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
774 
775 MODULE_DESCRIPTION("DRM SHMEM memory-management helpers");
776 MODULE_IMPORT_NS(DMA_BUF);
777 MODULE_LICENSE("GPL v2");
778