1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2018 Noralf Trønnes
4  */
5 
6 #include <linux/dma-buf.h>
7 #include <linux/export.h>
8 #include <linux/module.h>
9 #include <linux/mutex.h>
10 #include <linux/shmem_fs.h>
11 #include <linux/slab.h>
12 #include <linux/vmalloc.h>
13 #include <linux/module.h>
14 
15 #ifdef CONFIG_X86
16 #include <asm/set_memory.h>
17 #endif
18 
19 #include <drm/drm.h>
20 #include <drm/drm_device.h>
21 #include <drm/drm_drv.h>
22 #include <drm/drm_gem_shmem_helper.h>
23 #include <drm/drm_prime.h>
24 #include <drm/drm_print.h>
25 
26 MODULE_IMPORT_NS(DMA_BUF);
27 
28 /**
29  * DOC: overview
30  *
31  * This library provides helpers for GEM objects backed by shmem buffers
32  * allocated using anonymous pageable memory.
33  *
34  * Functions that operate on the GEM object receive struct &drm_gem_shmem_object.
35  * For GEM callback helpers in struct &drm_gem_object functions, see likewise
36  * named functions with an _object_ infix (e.g., drm_gem_shmem_object_vmap() wraps
37  * drm_gem_shmem_vmap()). These helpers perform the necessary type conversion.
38  */
39 
40 static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
41 	.free = drm_gem_shmem_object_free,
42 	.print_info = drm_gem_shmem_object_print_info,
43 	.pin = drm_gem_shmem_object_pin,
44 	.unpin = drm_gem_shmem_object_unpin,
45 	.get_sg_table = drm_gem_shmem_object_get_sg_table,
46 	.vmap = drm_gem_shmem_object_vmap,
47 	.vunmap = drm_gem_shmem_object_vunmap,
48 	.mmap = drm_gem_shmem_object_mmap,
49 	.vm_ops = &drm_gem_shmem_vm_ops,
50 };
51 
52 static struct drm_gem_shmem_object *
53 __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
54 {
55 	struct drm_gem_shmem_object *shmem;
56 	struct drm_gem_object *obj;
57 	int ret = 0;
58 
59 	size = PAGE_ALIGN(size);
60 
61 	if (dev->driver->gem_create_object) {
62 		obj = dev->driver->gem_create_object(dev, size);
63 		if (IS_ERR(obj))
64 			return ERR_CAST(obj);
65 		shmem = to_drm_gem_shmem_obj(obj);
66 	} else {
67 		shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
68 		if (!shmem)
69 			return ERR_PTR(-ENOMEM);
70 		obj = &shmem->base;
71 	}
72 
73 	if (!obj->funcs)
74 		obj->funcs = &drm_gem_shmem_funcs;
75 
76 	if (private) {
77 		drm_gem_private_object_init(dev, obj, size);
78 		shmem->map_wc = false; /* dma-buf mappings use always writecombine */
79 	} else {
80 		ret = drm_gem_object_init(dev, obj, size);
81 	}
82 	if (ret)
83 		goto err_free;
84 
85 	ret = drm_gem_create_mmap_offset(obj);
86 	if (ret)
87 		goto err_release;
88 
89 	mutex_init(&shmem->pages_lock);
90 	mutex_init(&shmem->vmap_lock);
91 	INIT_LIST_HEAD(&shmem->madv_list);
92 
93 	if (!private) {
94 		/*
95 		 * Our buffers are kept pinned, so allocating them
96 		 * from the MOVABLE zone is a really bad idea, and
97 		 * conflicts with CMA. See comments above new_inode()
98 		 * why this is required _and_ expected if you're
99 		 * going to pin these pages.
100 		 */
101 		mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
102 				     __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
103 	}
104 
105 	return shmem;
106 
107 err_release:
108 	drm_gem_object_release(obj);
109 err_free:
110 	kfree(obj);
111 
112 	return ERR_PTR(ret);
113 }
114 /**
115  * drm_gem_shmem_create - Allocate an object with the given size
116  * @dev: DRM device
117  * @size: Size of the object to allocate
118  *
119  * This function creates a shmem GEM object.
120  *
121  * Returns:
122  * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
123  * error code on failure.
124  */
125 struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
126 {
127 	return __drm_gem_shmem_create(dev, size, false);
128 }
129 EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
130 
131 /**
132  * drm_gem_shmem_free - Free resources associated with a shmem GEM object
133  * @shmem: shmem GEM object to free
134  *
135  * This function cleans up the GEM object state and frees the memory used to
136  * store the object itself.
137  */
138 void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
139 {
140 	struct drm_gem_object *obj = &shmem->base;
141 
142 	WARN_ON(shmem->vmap_use_count);
143 
144 	if (obj->import_attach) {
145 		drm_prime_gem_destroy(obj, shmem->sgt);
146 	} else {
147 		if (shmem->sgt) {
148 			dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
149 					  DMA_BIDIRECTIONAL, 0);
150 			sg_free_table(shmem->sgt);
151 			kfree(shmem->sgt);
152 		}
153 		if (shmem->pages)
154 			drm_gem_shmem_put_pages(shmem);
155 	}
156 
157 	WARN_ON(shmem->pages_use_count);
158 
159 	drm_gem_object_release(obj);
160 	mutex_destroy(&shmem->pages_lock);
161 	mutex_destroy(&shmem->vmap_lock);
162 	kfree(shmem);
163 }
164 EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
165 
166 static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
167 {
168 	struct drm_gem_object *obj = &shmem->base;
169 	struct page **pages;
170 
171 	if (shmem->pages_use_count++ > 0)
172 		return 0;
173 
174 	pages = drm_gem_get_pages(obj);
175 	if (IS_ERR(pages)) {
176 		DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages));
177 		shmem->pages_use_count = 0;
178 		return PTR_ERR(pages);
179 	}
180 
181 	/*
182 	 * TODO: Allocating WC pages which are correctly flushed is only
183 	 * supported on x86. Ideal solution would be a GFP_WC flag, which also
184 	 * ttm_pool.c could use.
185 	 */
186 #ifdef CONFIG_X86
187 	if (shmem->map_wc)
188 		set_pages_array_wc(pages, obj->size >> PAGE_SHIFT);
189 #endif
190 
191 	shmem->pages = pages;
192 
193 	return 0;
194 }
195 
196 /*
197  * drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object
198  * @shmem: shmem GEM object
199  *
200  * This function makes sure that backing pages exists for the shmem GEM object
201  * and increases the use count.
202  *
203  * Returns:
204  * 0 on success or a negative error code on failure.
205  */
206 int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
207 {
208 	int ret;
209 
210 	WARN_ON(shmem->base.import_attach);
211 
212 	ret = mutex_lock_interruptible(&shmem->pages_lock);
213 	if (ret)
214 		return ret;
215 	ret = drm_gem_shmem_get_pages_locked(shmem);
216 	mutex_unlock(&shmem->pages_lock);
217 
218 	return ret;
219 }
220 EXPORT_SYMBOL(drm_gem_shmem_get_pages);
221 
222 static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
223 {
224 	struct drm_gem_object *obj = &shmem->base;
225 
226 	if (WARN_ON_ONCE(!shmem->pages_use_count))
227 		return;
228 
229 	if (--shmem->pages_use_count > 0)
230 		return;
231 
232 #ifdef CONFIG_X86
233 	if (shmem->map_wc)
234 		set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
235 #endif
236 
237 	drm_gem_put_pages(obj, shmem->pages,
238 			  shmem->pages_mark_dirty_on_put,
239 			  shmem->pages_mark_accessed_on_put);
240 	shmem->pages = NULL;
241 }
242 
243 /*
244  * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
245  * @shmem: shmem GEM object
246  *
247  * This function decreases the use count and puts the backing pages when use drops to zero.
248  */
249 void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
250 {
251 	mutex_lock(&shmem->pages_lock);
252 	drm_gem_shmem_put_pages_locked(shmem);
253 	mutex_unlock(&shmem->pages_lock);
254 }
255 EXPORT_SYMBOL(drm_gem_shmem_put_pages);
256 
257 /**
258  * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
259  * @shmem: shmem GEM object
260  *
261  * This function makes sure the backing pages are pinned in memory while the
262  * buffer is exported.
263  *
264  * Returns:
265  * 0 on success or a negative error code on failure.
266  */
267 int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
268 {
269 	WARN_ON(shmem->base.import_attach);
270 
271 	return drm_gem_shmem_get_pages(shmem);
272 }
273 EXPORT_SYMBOL(drm_gem_shmem_pin);
274 
275 /**
276  * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
277  * @shmem: shmem GEM object
278  *
279  * This function removes the requirement that the backing pages are pinned in
280  * memory.
281  */
282 void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem)
283 {
284 	WARN_ON(shmem->base.import_attach);
285 
286 	drm_gem_shmem_put_pages(shmem);
287 }
288 EXPORT_SYMBOL(drm_gem_shmem_unpin);
289 
290 static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, struct dma_buf_map *map)
291 {
292 	struct drm_gem_object *obj = &shmem->base;
293 	int ret = 0;
294 
295 	if (shmem->vmap_use_count++ > 0) {
296 		dma_buf_map_set_vaddr(map, shmem->vaddr);
297 		return 0;
298 	}
299 
300 	if (obj->import_attach) {
301 		ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
302 		if (!ret) {
303 			if (WARN_ON(map->is_iomem)) {
304 				ret = -EIO;
305 				goto err_put_pages;
306 			}
307 			shmem->vaddr = map->vaddr;
308 		}
309 	} else {
310 		pgprot_t prot = PAGE_KERNEL;
311 
312 		ret = drm_gem_shmem_get_pages(shmem);
313 		if (ret)
314 			goto err_zero_use;
315 
316 		if (shmem->map_wc)
317 			prot = pgprot_writecombine(prot);
318 		shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
319 				    VM_MAP, prot);
320 		if (!shmem->vaddr)
321 			ret = -ENOMEM;
322 		else
323 			dma_buf_map_set_vaddr(map, shmem->vaddr);
324 	}
325 
326 	if (ret) {
327 		DRM_DEBUG_KMS("Failed to vmap pages, error %d\n", ret);
328 		goto err_put_pages;
329 	}
330 
331 	return 0;
332 
333 err_put_pages:
334 	if (!obj->import_attach)
335 		drm_gem_shmem_put_pages(shmem);
336 err_zero_use:
337 	shmem->vmap_use_count = 0;
338 
339 	return ret;
340 }
341 
342 /*
343  * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
344  * @shmem: shmem GEM object
345  * @map: Returns the kernel virtual address of the SHMEM GEM object's backing
346  *       store.
347  *
348  * This function makes sure that a contiguous kernel virtual address mapping
349  * exists for the buffer backing the shmem GEM object. It hides the differences
350  * between dma-buf imported and natively allocated objects.
351  *
352  * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap().
353  *
354  * Returns:
355  * 0 on success or a negative error code on failure.
356  */
357 int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, struct dma_buf_map *map)
358 {
359 	int ret;
360 
361 	ret = mutex_lock_interruptible(&shmem->vmap_lock);
362 	if (ret)
363 		return ret;
364 	ret = drm_gem_shmem_vmap_locked(shmem, map);
365 	mutex_unlock(&shmem->vmap_lock);
366 
367 	return ret;
368 }
369 EXPORT_SYMBOL(drm_gem_shmem_vmap);
370 
371 static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
372 					struct dma_buf_map *map)
373 {
374 	struct drm_gem_object *obj = &shmem->base;
375 
376 	if (WARN_ON_ONCE(!shmem->vmap_use_count))
377 		return;
378 
379 	if (--shmem->vmap_use_count > 0)
380 		return;
381 
382 	if (obj->import_attach) {
383 		dma_buf_vunmap(obj->import_attach->dmabuf, map);
384 	} else {
385 		vunmap(shmem->vaddr);
386 		drm_gem_shmem_put_pages(shmem);
387 	}
388 
389 	shmem->vaddr = NULL;
390 }
391 
392 /*
393  * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object
394  * @shmem: shmem GEM object
395  * @map: Kernel virtual address where the SHMEM GEM object was mapped
396  *
397  * This function cleans up a kernel virtual address mapping acquired by
398  * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to
399  * zero.
400  *
401  * This function hides the differences between dma-buf imported and natively
402  * allocated objects.
403  */
404 void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, struct dma_buf_map *map)
405 {
406 	mutex_lock(&shmem->vmap_lock);
407 	drm_gem_shmem_vunmap_locked(shmem, map);
408 	mutex_unlock(&shmem->vmap_lock);
409 }
410 EXPORT_SYMBOL(drm_gem_shmem_vunmap);
411 
412 static struct drm_gem_shmem_object *
413 drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
414 				 struct drm_device *dev, size_t size,
415 				 uint32_t *handle)
416 {
417 	struct drm_gem_shmem_object *shmem;
418 	int ret;
419 
420 	shmem = drm_gem_shmem_create(dev, size);
421 	if (IS_ERR(shmem))
422 		return shmem;
423 
424 	/*
425 	 * Allocate an id of idr table where the obj is registered
426 	 * and handle has the id what user can see.
427 	 */
428 	ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
429 	/* drop reference from allocate - handle holds it now. */
430 	drm_gem_object_put(&shmem->base);
431 	if (ret)
432 		return ERR_PTR(ret);
433 
434 	return shmem;
435 }
436 
437 /* Update madvise status, returns true if not purged, else
438  * false or -errno.
439  */
440 int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv)
441 {
442 	mutex_lock(&shmem->pages_lock);
443 
444 	if (shmem->madv >= 0)
445 		shmem->madv = madv;
446 
447 	madv = shmem->madv;
448 
449 	mutex_unlock(&shmem->pages_lock);
450 
451 	return (madv >= 0);
452 }
453 EXPORT_SYMBOL(drm_gem_shmem_madvise);
454 
455 void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem)
456 {
457 	struct drm_gem_object *obj = &shmem->base;
458 	struct drm_device *dev = obj->dev;
459 
460 	WARN_ON(!drm_gem_shmem_is_purgeable(shmem));
461 
462 	dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
463 	sg_free_table(shmem->sgt);
464 	kfree(shmem->sgt);
465 	shmem->sgt = NULL;
466 
467 	drm_gem_shmem_put_pages_locked(shmem);
468 
469 	shmem->madv = -1;
470 
471 	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
472 	drm_gem_free_mmap_offset(obj);
473 
474 	/* Our goal here is to return as much of the memory as
475 	 * is possible back to the system as we are called from OOM.
476 	 * To do this we must instruct the shmfs to drop all of its
477 	 * backing pages, *now*.
478 	 */
479 	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
480 
481 	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1);
482 }
483 EXPORT_SYMBOL(drm_gem_shmem_purge_locked);
484 
485 bool drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
486 {
487 	if (!mutex_trylock(&shmem->pages_lock))
488 		return false;
489 	drm_gem_shmem_purge_locked(shmem);
490 	mutex_unlock(&shmem->pages_lock);
491 
492 	return true;
493 }
494 EXPORT_SYMBOL(drm_gem_shmem_purge);
495 
496 /**
497  * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
498  * @file: DRM file structure to create the dumb buffer for
499  * @dev: DRM device
500  * @args: IOCTL data
501  *
502  * This function computes the pitch of the dumb buffer and rounds it up to an
503  * integer number of bytes per pixel. Drivers for hardware that doesn't have
504  * any additional restrictions on the pitch can directly use this function as
505  * their &drm_driver.dumb_create callback.
506  *
507  * For hardware with additional restrictions, drivers can adjust the fields
508  * set up by userspace before calling into this function.
509  *
510  * Returns:
511  * 0 on success or a negative error code on failure.
512  */
513 int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
514 			      struct drm_mode_create_dumb *args)
515 {
516 	u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
517 	struct drm_gem_shmem_object *shmem;
518 
519 	if (!args->pitch || !args->size) {
520 		args->pitch = min_pitch;
521 		args->size = PAGE_ALIGN(args->pitch * args->height);
522 	} else {
523 		/* ensure sane minimum values */
524 		if (args->pitch < min_pitch)
525 			args->pitch = min_pitch;
526 		if (args->size < args->pitch * args->height)
527 			args->size = PAGE_ALIGN(args->pitch * args->height);
528 	}
529 
530 	shmem = drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
531 
532 	return PTR_ERR_OR_ZERO(shmem);
533 }
534 EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
535 
536 static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
537 {
538 	struct vm_area_struct *vma = vmf->vma;
539 	struct drm_gem_object *obj = vma->vm_private_data;
540 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
541 	loff_t num_pages = obj->size >> PAGE_SHIFT;
542 	vm_fault_t ret;
543 	struct page *page;
544 	pgoff_t page_offset;
545 
546 	/* We don't use vmf->pgoff since that has the fake offset */
547 	page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
548 
549 	mutex_lock(&shmem->pages_lock);
550 
551 	if (page_offset >= num_pages ||
552 	    WARN_ON_ONCE(!shmem->pages) ||
553 	    shmem->madv < 0) {
554 		ret = VM_FAULT_SIGBUS;
555 	} else {
556 		page = shmem->pages[page_offset];
557 
558 		ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
559 	}
560 
561 	mutex_unlock(&shmem->pages_lock);
562 
563 	return ret;
564 }
565 
566 static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
567 {
568 	struct drm_gem_object *obj = vma->vm_private_data;
569 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
570 	int ret;
571 
572 	WARN_ON(shmem->base.import_attach);
573 
574 	ret = drm_gem_shmem_get_pages(shmem);
575 	WARN_ON_ONCE(ret != 0);
576 
577 	drm_gem_vm_open(vma);
578 }
579 
580 static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
581 {
582 	struct drm_gem_object *obj = vma->vm_private_data;
583 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
584 
585 	drm_gem_shmem_put_pages(shmem);
586 	drm_gem_vm_close(vma);
587 }
588 
589 const struct vm_operations_struct drm_gem_shmem_vm_ops = {
590 	.fault = drm_gem_shmem_fault,
591 	.open = drm_gem_shmem_vm_open,
592 	.close = drm_gem_shmem_vm_close,
593 };
594 EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
595 
596 /**
597  * drm_gem_shmem_mmap - Memory-map a shmem GEM object
598  * @shmem: shmem GEM object
599  * @vma: VMA for the area to be mapped
600  *
601  * This function implements an augmented version of the GEM DRM file mmap
602  * operation for shmem objects.
603  *
604  * Returns:
605  * 0 on success or a negative error code on failure.
606  */
607 int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma)
608 {
609 	struct drm_gem_object *obj = &shmem->base;
610 	int ret;
611 
612 	if (obj->import_attach) {
613 		/* Drop the reference drm_gem_mmap_obj() acquired.*/
614 		drm_gem_object_put(obj);
615 		vma->vm_private_data = NULL;
616 
617 		return dma_buf_mmap(obj->dma_buf, vma, 0);
618 	}
619 
620 	ret = drm_gem_shmem_get_pages(shmem);
621 	if (ret) {
622 		drm_gem_vm_close(vma);
623 		return ret;
624 	}
625 
626 	vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
627 	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
628 	if (shmem->map_wc)
629 		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
630 
631 	return 0;
632 }
633 EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
634 
635 /**
636  * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
637  * @shmem: shmem GEM object
638  * @p: DRM printer
639  * @indent: Tab indentation level
640  */
641 void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
642 			      struct drm_printer *p, unsigned int indent)
643 {
644 	drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
645 	drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
646 	drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
647 }
648 EXPORT_SYMBOL(drm_gem_shmem_print_info);
649 
650 /**
651  * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
652  *                              pages for a shmem GEM object
653  * @shmem: shmem GEM object
654  *
655  * This function exports a scatter/gather table suitable for PRIME usage by
656  * calling the standard DMA mapping API.
657  *
658  * Drivers who need to acquire an scatter/gather table for objects need to call
659  * drm_gem_shmem_get_pages_sgt() instead.
660  *
661  * Returns:
662  * A pointer to the scatter/gather table of pinned pages or NULL on failure.
663  */
664 struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem)
665 {
666 	struct drm_gem_object *obj = &shmem->base;
667 
668 	WARN_ON(shmem->base.import_attach);
669 
670 	return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
671 }
672 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
673 
674 /**
675  * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
676  *				 scatter/gather table for a shmem GEM object.
677  * @shmem: shmem GEM object
678  *
679  * This function returns a scatter/gather table suitable for driver usage. If
680  * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
681  * table created.
682  *
683  * This is the main function for drivers to get at backing storage, and it hides
684  * and difference between dma-buf imported and natively allocated objects.
685  * drm_gem_shmem_get_sg_table() should not be directly called by drivers.
686  *
687  * Returns:
688  * A pointer to the scatter/gather table of pinned pages or errno on failure.
689  */
690 struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem)
691 {
692 	struct drm_gem_object *obj = &shmem->base;
693 	int ret;
694 	struct sg_table *sgt;
695 
696 	if (shmem->sgt)
697 		return shmem->sgt;
698 
699 	WARN_ON(obj->import_attach);
700 
701 	ret = drm_gem_shmem_get_pages(shmem);
702 	if (ret)
703 		return ERR_PTR(ret);
704 
705 	sgt = drm_gem_shmem_get_sg_table(shmem);
706 	if (IS_ERR(sgt)) {
707 		ret = PTR_ERR(sgt);
708 		goto err_put_pages;
709 	}
710 	/* Map the pages for use by the h/w. */
711 	ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
712 	if (ret)
713 		goto err_free_sgt;
714 
715 	shmem->sgt = sgt;
716 
717 	return sgt;
718 
719 err_free_sgt:
720 	sg_free_table(sgt);
721 	kfree(sgt);
722 err_put_pages:
723 	drm_gem_shmem_put_pages(shmem);
724 	return ERR_PTR(ret);
725 }
726 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt);
727 
728 /**
729  * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
730  *                 another driver's scatter/gather table of pinned pages
731  * @dev: Device to import into
732  * @attach: DMA-BUF attachment
733  * @sgt: Scatter/gather table of pinned pages
734  *
735  * This function imports a scatter/gather table exported via DMA-BUF by
736  * another driver. Drivers that use the shmem helpers should set this as their
737  * &drm_driver.gem_prime_import_sg_table callback.
738  *
739  * Returns:
740  * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
741  * error code on failure.
742  */
743 struct drm_gem_object *
744 drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
745 				    struct dma_buf_attachment *attach,
746 				    struct sg_table *sgt)
747 {
748 	size_t size = PAGE_ALIGN(attach->dmabuf->size);
749 	struct drm_gem_shmem_object *shmem;
750 
751 	shmem = __drm_gem_shmem_create(dev, size, true);
752 	if (IS_ERR(shmem))
753 		return ERR_CAST(shmem);
754 
755 	shmem->sgt = sgt;
756 
757 	DRM_DEBUG_PRIME("size = %zu\n", size);
758 
759 	return &shmem->base;
760 }
761 EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
762 
763 MODULE_DESCRIPTION("DRM SHMEM memory-management helpers");
764 MODULE_IMPORT_NS(DMA_BUF);
765 MODULE_LICENSE("GPL v2");
766