1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2018 Noralf Trønnes
4  */
5 
6 #include <linux/dma-buf.h>
7 #include <linux/export.h>
8 #include <linux/module.h>
9 #include <linux/mutex.h>
10 #include <linux/shmem_fs.h>
11 #include <linux/slab.h>
12 #include <linux/vmalloc.h>
13 #include <linux/module.h>
14 
15 #ifdef CONFIG_X86
16 #include <asm/set_memory.h>
17 #endif
18 
19 #include <drm/drm.h>
20 #include <drm/drm_device.h>
21 #include <drm/drm_drv.h>
22 #include <drm/drm_gem_shmem_helper.h>
23 #include <drm/drm_prime.h>
24 #include <drm/drm_print.h>
25 
26 MODULE_IMPORT_NS(DMA_BUF);
27 
28 /**
29  * DOC: overview
30  *
31  * This library provides helpers for GEM objects backed by shmem buffers
32  * allocated using anonymous pageable memory.
33  *
34  * Functions that operate on the GEM object receive struct &drm_gem_shmem_object.
35  * For GEM callback helpers in struct &drm_gem_object functions, see likewise
36  * named functions with an _object_ infix (e.g., drm_gem_shmem_object_vmap() wraps
37  * drm_gem_shmem_vmap()). These helpers perform the necessary type conversion.
38  */
39 
40 static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
41 	.free = drm_gem_shmem_object_free,
42 	.print_info = drm_gem_shmem_object_print_info,
43 	.pin = drm_gem_shmem_object_pin,
44 	.unpin = drm_gem_shmem_object_unpin,
45 	.get_sg_table = drm_gem_shmem_object_get_sg_table,
46 	.vmap = drm_gem_shmem_object_vmap,
47 	.vunmap = drm_gem_shmem_object_vunmap,
48 	.mmap = drm_gem_shmem_object_mmap,
49 	.vm_ops = &drm_gem_shmem_vm_ops,
50 };
51 
52 static struct drm_gem_shmem_object *
53 __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
54 {
55 	struct drm_gem_shmem_object *shmem;
56 	struct drm_gem_object *obj;
57 	int ret = 0;
58 
59 	size = PAGE_ALIGN(size);
60 
61 	if (dev->driver->gem_create_object) {
62 		obj = dev->driver->gem_create_object(dev, size);
63 		if (IS_ERR(obj))
64 			return ERR_CAST(obj);
65 		shmem = to_drm_gem_shmem_obj(obj);
66 	} else {
67 		shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
68 		if (!shmem)
69 			return ERR_PTR(-ENOMEM);
70 		obj = &shmem->base;
71 	}
72 
73 	if (!obj->funcs)
74 		obj->funcs = &drm_gem_shmem_funcs;
75 
76 	if (private) {
77 		drm_gem_private_object_init(dev, obj, size);
78 		shmem->map_wc = false; /* dma-buf mappings use always writecombine */
79 	} else {
80 		ret = drm_gem_object_init(dev, obj, size);
81 	}
82 	if (ret) {
83 		drm_gem_private_object_fini(obj);
84 		goto err_free;
85 	}
86 
87 	ret = drm_gem_create_mmap_offset(obj);
88 	if (ret)
89 		goto err_release;
90 
91 	mutex_init(&shmem->pages_lock);
92 	mutex_init(&shmem->vmap_lock);
93 	INIT_LIST_HEAD(&shmem->madv_list);
94 
95 	if (!private) {
96 		/*
97 		 * Our buffers are kept pinned, so allocating them
98 		 * from the MOVABLE zone is a really bad idea, and
99 		 * conflicts with CMA. See comments above new_inode()
100 		 * why this is required _and_ expected if you're
101 		 * going to pin these pages.
102 		 */
103 		mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
104 				     __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
105 	}
106 
107 	return shmem;
108 
109 err_release:
110 	drm_gem_object_release(obj);
111 err_free:
112 	kfree(obj);
113 
114 	return ERR_PTR(ret);
115 }
116 /**
117  * drm_gem_shmem_create - Allocate an object with the given size
118  * @dev: DRM device
119  * @size: Size of the object to allocate
120  *
121  * This function creates a shmem GEM object.
122  *
123  * Returns:
124  * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
125  * error code on failure.
126  */
127 struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
128 {
129 	return __drm_gem_shmem_create(dev, size, false);
130 }
131 EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
132 
133 /**
134  * drm_gem_shmem_free - Free resources associated with a shmem GEM object
135  * @shmem: shmem GEM object to free
136  *
137  * This function cleans up the GEM object state and frees the memory used to
138  * store the object itself.
139  */
140 void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
141 {
142 	struct drm_gem_object *obj = &shmem->base;
143 
144 	drm_WARN_ON(obj->dev, shmem->vmap_use_count);
145 
146 	if (obj->import_attach) {
147 		drm_prime_gem_destroy(obj, shmem->sgt);
148 	} else {
149 		if (shmem->sgt) {
150 			dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
151 					  DMA_BIDIRECTIONAL, 0);
152 			sg_free_table(shmem->sgt);
153 			kfree(shmem->sgt);
154 		}
155 		if (shmem->pages)
156 			drm_gem_shmem_put_pages(shmem);
157 	}
158 
159 	drm_WARN_ON(obj->dev, shmem->pages_use_count);
160 
161 	drm_gem_object_release(obj);
162 	mutex_destroy(&shmem->pages_lock);
163 	mutex_destroy(&shmem->vmap_lock);
164 	kfree(shmem);
165 }
166 EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
167 
168 static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
169 {
170 	struct drm_gem_object *obj = &shmem->base;
171 	struct page **pages;
172 
173 	if (shmem->pages_use_count++ > 0)
174 		return 0;
175 
176 	pages = drm_gem_get_pages(obj);
177 	if (IS_ERR(pages)) {
178 		drm_dbg_kms(obj->dev, "Failed to get pages (%ld)\n",
179 			    PTR_ERR(pages));
180 		shmem->pages_use_count = 0;
181 		return PTR_ERR(pages);
182 	}
183 
184 	/*
185 	 * TODO: Allocating WC pages which are correctly flushed is only
186 	 * supported on x86. Ideal solution would be a GFP_WC flag, which also
187 	 * ttm_pool.c could use.
188 	 */
189 #ifdef CONFIG_X86
190 	if (shmem->map_wc)
191 		set_pages_array_wc(pages, obj->size >> PAGE_SHIFT);
192 #endif
193 
194 	shmem->pages = pages;
195 
196 	return 0;
197 }
198 
199 /*
200  * drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object
201  * @shmem: shmem GEM object
202  *
203  * This function makes sure that backing pages exists for the shmem GEM object
204  * and increases the use count.
205  *
206  * Returns:
207  * 0 on success or a negative error code on failure.
208  */
209 int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
210 {
211 	struct drm_gem_object *obj = &shmem->base;
212 	int ret;
213 
214 	drm_WARN_ON(obj->dev, obj->import_attach);
215 
216 	ret = mutex_lock_interruptible(&shmem->pages_lock);
217 	if (ret)
218 		return ret;
219 	ret = drm_gem_shmem_get_pages_locked(shmem);
220 	mutex_unlock(&shmem->pages_lock);
221 
222 	return ret;
223 }
224 EXPORT_SYMBOL(drm_gem_shmem_get_pages);
225 
226 static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
227 {
228 	struct drm_gem_object *obj = &shmem->base;
229 
230 	if (drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
231 		return;
232 
233 	if (--shmem->pages_use_count > 0)
234 		return;
235 
236 #ifdef CONFIG_X86
237 	if (shmem->map_wc)
238 		set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
239 #endif
240 
241 	drm_gem_put_pages(obj, shmem->pages,
242 			  shmem->pages_mark_dirty_on_put,
243 			  shmem->pages_mark_accessed_on_put);
244 	shmem->pages = NULL;
245 }
246 
247 /*
248  * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
249  * @shmem: shmem GEM object
250  *
251  * This function decreases the use count and puts the backing pages when use drops to zero.
252  */
253 void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
254 {
255 	mutex_lock(&shmem->pages_lock);
256 	drm_gem_shmem_put_pages_locked(shmem);
257 	mutex_unlock(&shmem->pages_lock);
258 }
259 EXPORT_SYMBOL(drm_gem_shmem_put_pages);
260 
261 /**
262  * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
263  * @shmem: shmem GEM object
264  *
265  * This function makes sure the backing pages are pinned in memory while the
266  * buffer is exported.
267  *
268  * Returns:
269  * 0 on success or a negative error code on failure.
270  */
271 int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
272 {
273 	struct drm_gem_object *obj = &shmem->base;
274 
275 	drm_WARN_ON(obj->dev, obj->import_attach);
276 
277 	return drm_gem_shmem_get_pages(shmem);
278 }
279 EXPORT_SYMBOL(drm_gem_shmem_pin);
280 
281 /**
282  * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
283  * @shmem: shmem GEM object
284  *
285  * This function removes the requirement that the backing pages are pinned in
286  * memory.
287  */
288 void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem)
289 {
290 	struct drm_gem_object *obj = &shmem->base;
291 
292 	drm_WARN_ON(obj->dev, obj->import_attach);
293 
294 	drm_gem_shmem_put_pages(shmem);
295 }
296 EXPORT_SYMBOL(drm_gem_shmem_unpin);
297 
298 static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
299 				     struct iosys_map *map)
300 {
301 	struct drm_gem_object *obj = &shmem->base;
302 	int ret = 0;
303 
304 	if (obj->import_attach) {
305 		ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
306 		if (!ret) {
307 			if (drm_WARN_ON(obj->dev, map->is_iomem)) {
308 				dma_buf_vunmap(obj->import_attach->dmabuf, map);
309 				return -EIO;
310 			}
311 		}
312 	} else {
313 		pgprot_t prot = PAGE_KERNEL;
314 
315 		if (shmem->vmap_use_count++ > 0) {
316 			iosys_map_set_vaddr(map, shmem->vaddr);
317 			return 0;
318 		}
319 
320 		ret = drm_gem_shmem_get_pages(shmem);
321 		if (ret)
322 			goto err_zero_use;
323 
324 		if (shmem->map_wc)
325 			prot = pgprot_writecombine(prot);
326 		shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
327 				    VM_MAP, prot);
328 		if (!shmem->vaddr)
329 			ret = -ENOMEM;
330 		else
331 			iosys_map_set_vaddr(map, shmem->vaddr);
332 	}
333 
334 	if (ret) {
335 		drm_dbg_kms(obj->dev, "Failed to vmap pages, error %d\n", ret);
336 		goto err_put_pages;
337 	}
338 
339 	return 0;
340 
341 err_put_pages:
342 	if (!obj->import_attach)
343 		drm_gem_shmem_put_pages(shmem);
344 err_zero_use:
345 	shmem->vmap_use_count = 0;
346 
347 	return ret;
348 }
349 
350 /*
351  * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
352  * @shmem: shmem GEM object
353  * @map: Returns the kernel virtual address of the SHMEM GEM object's backing
354  *       store.
355  *
356  * This function makes sure that a contiguous kernel virtual address mapping
357  * exists for the buffer backing the shmem GEM object. It hides the differences
358  * between dma-buf imported and natively allocated objects.
359  *
360  * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap().
361  *
362  * Returns:
363  * 0 on success or a negative error code on failure.
364  */
365 int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
366 		       struct iosys_map *map)
367 {
368 	int ret;
369 
370 	ret = mutex_lock_interruptible(&shmem->vmap_lock);
371 	if (ret)
372 		return ret;
373 	ret = drm_gem_shmem_vmap_locked(shmem, map);
374 	mutex_unlock(&shmem->vmap_lock);
375 
376 	return ret;
377 }
378 EXPORT_SYMBOL(drm_gem_shmem_vmap);
379 
380 static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
381 					struct iosys_map *map)
382 {
383 	struct drm_gem_object *obj = &shmem->base;
384 
385 	if (obj->import_attach) {
386 		dma_buf_vunmap(obj->import_attach->dmabuf, map);
387 	} else {
388 		if (drm_WARN_ON_ONCE(obj->dev, !shmem->vmap_use_count))
389 			return;
390 
391 		if (--shmem->vmap_use_count > 0)
392 			return;
393 
394 		vunmap(shmem->vaddr);
395 		drm_gem_shmem_put_pages(shmem);
396 	}
397 
398 	shmem->vaddr = NULL;
399 }
400 
401 /*
402  * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object
403  * @shmem: shmem GEM object
404  * @map: Kernel virtual address where the SHMEM GEM object was mapped
405  *
406  * This function cleans up a kernel virtual address mapping acquired by
407  * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to
408  * zero.
409  *
410  * This function hides the differences between dma-buf imported and natively
411  * allocated objects.
412  */
413 void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
414 			  struct iosys_map *map)
415 {
416 	mutex_lock(&shmem->vmap_lock);
417 	drm_gem_shmem_vunmap_locked(shmem, map);
418 	mutex_unlock(&shmem->vmap_lock);
419 }
420 EXPORT_SYMBOL(drm_gem_shmem_vunmap);
421 
422 static int
423 drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
424 				 struct drm_device *dev, size_t size,
425 				 uint32_t *handle)
426 {
427 	struct drm_gem_shmem_object *shmem;
428 	int ret;
429 
430 	shmem = drm_gem_shmem_create(dev, size);
431 	if (IS_ERR(shmem))
432 		return PTR_ERR(shmem);
433 
434 	/*
435 	 * Allocate an id of idr table where the obj is registered
436 	 * and handle has the id what user can see.
437 	 */
438 	ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
439 	/* drop reference from allocate - handle holds it now. */
440 	drm_gem_object_put(&shmem->base);
441 
442 	return ret;
443 }
444 
445 /* Update madvise status, returns true if not purged, else
446  * false or -errno.
447  */
448 int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv)
449 {
450 	mutex_lock(&shmem->pages_lock);
451 
452 	if (shmem->madv >= 0)
453 		shmem->madv = madv;
454 
455 	madv = shmem->madv;
456 
457 	mutex_unlock(&shmem->pages_lock);
458 
459 	return (madv >= 0);
460 }
461 EXPORT_SYMBOL(drm_gem_shmem_madvise);
462 
463 void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem)
464 {
465 	struct drm_gem_object *obj = &shmem->base;
466 	struct drm_device *dev = obj->dev;
467 
468 	drm_WARN_ON(obj->dev, !drm_gem_shmem_is_purgeable(shmem));
469 
470 	dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
471 	sg_free_table(shmem->sgt);
472 	kfree(shmem->sgt);
473 	shmem->sgt = NULL;
474 
475 	drm_gem_shmem_put_pages_locked(shmem);
476 
477 	shmem->madv = -1;
478 
479 	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
480 	drm_gem_free_mmap_offset(obj);
481 
482 	/* Our goal here is to return as much of the memory as
483 	 * is possible back to the system as we are called from OOM.
484 	 * To do this we must instruct the shmfs to drop all of its
485 	 * backing pages, *now*.
486 	 */
487 	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
488 
489 	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1);
490 }
491 EXPORT_SYMBOL(drm_gem_shmem_purge_locked);
492 
493 bool drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
494 {
495 	if (!mutex_trylock(&shmem->pages_lock))
496 		return false;
497 	drm_gem_shmem_purge_locked(shmem);
498 	mutex_unlock(&shmem->pages_lock);
499 
500 	return true;
501 }
502 EXPORT_SYMBOL(drm_gem_shmem_purge);
503 
504 /**
505  * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
506  * @file: DRM file structure to create the dumb buffer for
507  * @dev: DRM device
508  * @args: IOCTL data
509  *
510  * This function computes the pitch of the dumb buffer and rounds it up to an
511  * integer number of bytes per pixel. Drivers for hardware that doesn't have
512  * any additional restrictions on the pitch can directly use this function as
513  * their &drm_driver.dumb_create callback.
514  *
515  * For hardware with additional restrictions, drivers can adjust the fields
516  * set up by userspace before calling into this function.
517  *
518  * Returns:
519  * 0 on success or a negative error code on failure.
520  */
521 int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
522 			      struct drm_mode_create_dumb *args)
523 {
524 	u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
525 
526 	if (!args->pitch || !args->size) {
527 		args->pitch = min_pitch;
528 		args->size = PAGE_ALIGN(args->pitch * args->height);
529 	} else {
530 		/* ensure sane minimum values */
531 		if (args->pitch < min_pitch)
532 			args->pitch = min_pitch;
533 		if (args->size < args->pitch * args->height)
534 			args->size = PAGE_ALIGN(args->pitch * args->height);
535 	}
536 
537 	return drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
538 }
539 EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
540 
541 static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
542 {
543 	struct vm_area_struct *vma = vmf->vma;
544 	struct drm_gem_object *obj = vma->vm_private_data;
545 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
546 	loff_t num_pages = obj->size >> PAGE_SHIFT;
547 	vm_fault_t ret;
548 	struct page *page;
549 	pgoff_t page_offset;
550 
551 	/* We don't use vmf->pgoff since that has the fake offset */
552 	page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
553 
554 	mutex_lock(&shmem->pages_lock);
555 
556 	if (page_offset >= num_pages ||
557 	    drm_WARN_ON_ONCE(obj->dev, !shmem->pages) ||
558 	    shmem->madv < 0) {
559 		ret = VM_FAULT_SIGBUS;
560 	} else {
561 		page = shmem->pages[page_offset];
562 
563 		ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
564 	}
565 
566 	mutex_unlock(&shmem->pages_lock);
567 
568 	return ret;
569 }
570 
571 static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
572 {
573 	struct drm_gem_object *obj = vma->vm_private_data;
574 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
575 
576 	drm_WARN_ON(obj->dev, obj->import_attach);
577 
578 	mutex_lock(&shmem->pages_lock);
579 
580 	/*
581 	 * We should have already pinned the pages when the buffer was first
582 	 * mmap'd, vm_open() just grabs an additional reference for the new
583 	 * mm the vma is getting copied into (ie. on fork()).
584 	 */
585 	if (!drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
586 		shmem->pages_use_count++;
587 
588 	mutex_unlock(&shmem->pages_lock);
589 
590 	drm_gem_vm_open(vma);
591 }
592 
593 static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
594 {
595 	struct drm_gem_object *obj = vma->vm_private_data;
596 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
597 
598 	drm_gem_shmem_put_pages(shmem);
599 	drm_gem_vm_close(vma);
600 }
601 
602 const struct vm_operations_struct drm_gem_shmem_vm_ops = {
603 	.fault = drm_gem_shmem_fault,
604 	.open = drm_gem_shmem_vm_open,
605 	.close = drm_gem_shmem_vm_close,
606 };
607 EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
608 
609 /**
610  * drm_gem_shmem_mmap - Memory-map a shmem GEM object
611  * @shmem: shmem GEM object
612  * @vma: VMA for the area to be mapped
613  *
614  * This function implements an augmented version of the GEM DRM file mmap
615  * operation for shmem objects.
616  *
617  * Returns:
618  * 0 on success or a negative error code on failure.
619  */
620 int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma)
621 {
622 	struct drm_gem_object *obj = &shmem->base;
623 	int ret;
624 
625 	if (obj->import_attach) {
626 		vma->vm_private_data = NULL;
627 		ret = dma_buf_mmap(obj->dma_buf, vma, 0);
628 
629 		/* Drop the reference drm_gem_mmap_obj() acquired.*/
630 		if (!ret)
631 			drm_gem_object_put(obj);
632 
633 		return ret;
634 	}
635 
636 	ret = drm_gem_shmem_get_pages(shmem);
637 	if (ret)
638 		return ret;
639 
640 	vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
641 	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
642 	if (shmem->map_wc)
643 		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
644 
645 	return 0;
646 }
647 EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
648 
649 /**
650  * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
651  * @shmem: shmem GEM object
652  * @p: DRM printer
653  * @indent: Tab indentation level
654  */
655 void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
656 			      struct drm_printer *p, unsigned int indent)
657 {
658 	if (shmem->base.import_attach)
659 		return;
660 
661 	drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
662 	drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
663 	drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
664 }
665 EXPORT_SYMBOL(drm_gem_shmem_print_info);
666 
667 /**
668  * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
669  *                              pages for a shmem GEM object
670  * @shmem: shmem GEM object
671  *
672  * This function exports a scatter/gather table suitable for PRIME usage by
673  * calling the standard DMA mapping API.
674  *
675  * Drivers who need to acquire an scatter/gather table for objects need to call
676  * drm_gem_shmem_get_pages_sgt() instead.
677  *
678  * Returns:
679  * A pointer to the scatter/gather table of pinned pages or error pointer on failure.
680  */
681 struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem)
682 {
683 	struct drm_gem_object *obj = &shmem->base;
684 
685 	drm_WARN_ON(obj->dev, obj->import_attach);
686 
687 	return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
688 }
689 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
690 
691 static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_object *shmem)
692 {
693 	struct drm_gem_object *obj = &shmem->base;
694 	int ret;
695 	struct sg_table *sgt;
696 
697 	if (shmem->sgt)
698 		return shmem->sgt;
699 
700 	drm_WARN_ON(obj->dev, obj->import_attach);
701 
702 	ret = drm_gem_shmem_get_pages_locked(shmem);
703 	if (ret)
704 		return ERR_PTR(ret);
705 
706 	sgt = drm_gem_shmem_get_sg_table(shmem);
707 	if (IS_ERR(sgt)) {
708 		ret = PTR_ERR(sgt);
709 		goto err_put_pages;
710 	}
711 	/* Map the pages for use by the h/w. */
712 	ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
713 	if (ret)
714 		goto err_free_sgt;
715 
716 	shmem->sgt = sgt;
717 
718 	return sgt;
719 
720 err_free_sgt:
721 	sg_free_table(sgt);
722 	kfree(sgt);
723 err_put_pages:
724 	drm_gem_shmem_put_pages_locked(shmem);
725 	return ERR_PTR(ret);
726 }
727 
728 /**
729  * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
730  *				 scatter/gather table for a shmem GEM object.
731  * @shmem: shmem GEM object
732  *
733  * This function returns a scatter/gather table suitable for driver usage. If
734  * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
735  * table created.
736  *
737  * This is the main function for drivers to get at backing storage, and it hides
738  * and difference between dma-buf imported and natively allocated objects.
739  * drm_gem_shmem_get_sg_table() should not be directly called by drivers.
740  *
741  * Returns:
742  * A pointer to the scatter/gather table of pinned pages or errno on failure.
743  */
744 struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem)
745 {
746 	int ret;
747 	struct sg_table *sgt;
748 
749 	ret = mutex_lock_interruptible(&shmem->pages_lock);
750 	if (ret)
751 		return ERR_PTR(ret);
752 	sgt = drm_gem_shmem_get_pages_sgt_locked(shmem);
753 	mutex_unlock(&shmem->pages_lock);
754 
755 	return sgt;
756 }
757 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt);
758 
759 /**
760  * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
761  *                 another driver's scatter/gather table of pinned pages
762  * @dev: Device to import into
763  * @attach: DMA-BUF attachment
764  * @sgt: Scatter/gather table of pinned pages
765  *
766  * This function imports a scatter/gather table exported via DMA-BUF by
767  * another driver. Drivers that use the shmem helpers should set this as their
768  * &drm_driver.gem_prime_import_sg_table callback.
769  *
770  * Returns:
771  * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
772  * error code on failure.
773  */
774 struct drm_gem_object *
775 drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
776 				    struct dma_buf_attachment *attach,
777 				    struct sg_table *sgt)
778 {
779 	size_t size = PAGE_ALIGN(attach->dmabuf->size);
780 	struct drm_gem_shmem_object *shmem;
781 
782 	shmem = __drm_gem_shmem_create(dev, size, true);
783 	if (IS_ERR(shmem))
784 		return ERR_CAST(shmem);
785 
786 	shmem->sgt = sgt;
787 
788 	drm_dbg_prime(dev, "size = %zu\n", size);
789 
790 	return &shmem->base;
791 }
792 EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
793 
794 MODULE_DESCRIPTION("DRM SHMEM memory-management helpers");
795 MODULE_IMPORT_NS(DMA_BUF);
796 MODULE_LICENSE("GPL v2");
797