1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2018 Noralf Trønnes
4  */
5 
6 #include <linux/dma-buf.h>
7 #include <linux/export.h>
8 #include <linux/mutex.h>
9 #include <linux/shmem_fs.h>
10 #include <linux/slab.h>
11 #include <linux/vmalloc.h>
12 
13 #ifdef CONFIG_X86
14 #include <asm/set_memory.h>
15 #endif
16 
17 #include <drm/drm.h>
18 #include <drm/drm_device.h>
19 #include <drm/drm_drv.h>
20 #include <drm/drm_gem_shmem_helper.h>
21 #include <drm/drm_prime.h>
22 #include <drm/drm_print.h>
23 
24 MODULE_IMPORT_NS(DMA_BUF);
25 
26 /**
27  * DOC: overview
28  *
29  * This library provides helpers for GEM objects backed by shmem buffers
30  * allocated using anonymous pageable memory.
31  */
32 
33 static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
34 	.free = drm_gem_shmem_free_object,
35 	.print_info = drm_gem_shmem_print_info,
36 	.pin = drm_gem_shmem_pin,
37 	.unpin = drm_gem_shmem_unpin,
38 	.get_sg_table = drm_gem_shmem_get_sg_table,
39 	.vmap = drm_gem_shmem_vmap,
40 	.vunmap = drm_gem_shmem_vunmap,
41 	.mmap = drm_gem_shmem_mmap,
42 };
43 
44 static struct drm_gem_shmem_object *
45 __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
46 {
47 	struct drm_gem_shmem_object *shmem;
48 	struct drm_gem_object *obj;
49 	int ret = 0;
50 
51 	size = PAGE_ALIGN(size);
52 
53 	if (dev->driver->gem_create_object)
54 		obj = dev->driver->gem_create_object(dev, size);
55 	else
56 		obj = kzalloc(sizeof(*shmem), GFP_KERNEL);
57 	if (!obj)
58 		return ERR_PTR(-ENOMEM);
59 
60 	shmem = to_drm_gem_shmem_obj(obj);
61 
62 	if (!obj->funcs)
63 		obj->funcs = &drm_gem_shmem_funcs;
64 
65 	if (private) {
66 		drm_gem_private_object_init(dev, obj, size);
67 		shmem->map_wc = false; /* dma-buf mappings use always writecombine */
68 	} else {
69 		ret = drm_gem_object_init(dev, obj, size);
70 	}
71 	if (ret)
72 		goto err_free;
73 
74 	ret = drm_gem_create_mmap_offset(obj);
75 	if (ret)
76 		goto err_release;
77 
78 	mutex_init(&shmem->pages_lock);
79 	mutex_init(&shmem->vmap_lock);
80 	INIT_LIST_HEAD(&shmem->madv_list);
81 
82 	if (!private) {
83 		/*
84 		 * Our buffers are kept pinned, so allocating them
85 		 * from the MOVABLE zone is a really bad idea, and
86 		 * conflicts with CMA. See comments above new_inode()
87 		 * why this is required _and_ expected if you're
88 		 * going to pin these pages.
89 		 */
90 		mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
91 				     __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
92 	}
93 
94 	return shmem;
95 
96 err_release:
97 	drm_gem_object_release(obj);
98 err_free:
99 	kfree(obj);
100 
101 	return ERR_PTR(ret);
102 }
103 /**
104  * drm_gem_shmem_create - Allocate an object with the given size
105  * @dev: DRM device
106  * @size: Size of the object to allocate
107  *
108  * This function creates a shmem GEM object.
109  *
110  * Returns:
111  * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
112  * error code on failure.
113  */
114 struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
115 {
116 	return __drm_gem_shmem_create(dev, size, false);
117 }
118 EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
119 
120 /**
121  * drm_gem_shmem_free_object - Free resources associated with a shmem GEM object
122  * @obj: GEM object to free
123  *
124  * This function cleans up the GEM object state and frees the memory used to
125  * store the object itself. It should be used to implement
126  * &drm_gem_object_funcs.free.
127  */
128 void drm_gem_shmem_free_object(struct drm_gem_object *obj)
129 {
130 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
131 
132 	WARN_ON(shmem->vmap_use_count);
133 
134 	if (obj->import_attach) {
135 		drm_prime_gem_destroy(obj, shmem->sgt);
136 	} else {
137 		if (shmem->sgt) {
138 			dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
139 					  DMA_BIDIRECTIONAL, 0);
140 			sg_free_table(shmem->sgt);
141 			kfree(shmem->sgt);
142 		}
143 		if (shmem->pages)
144 			drm_gem_shmem_put_pages(shmem);
145 	}
146 
147 	WARN_ON(shmem->pages_use_count);
148 
149 	drm_gem_object_release(obj);
150 	mutex_destroy(&shmem->pages_lock);
151 	mutex_destroy(&shmem->vmap_lock);
152 	kfree(shmem);
153 }
154 EXPORT_SYMBOL_GPL(drm_gem_shmem_free_object);
155 
156 static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
157 {
158 	struct drm_gem_object *obj = &shmem->base;
159 	struct page **pages;
160 
161 	if (shmem->pages_use_count++ > 0)
162 		return 0;
163 
164 	pages = drm_gem_get_pages(obj);
165 	if (IS_ERR(pages)) {
166 		DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages));
167 		shmem->pages_use_count = 0;
168 		return PTR_ERR(pages);
169 	}
170 
171 	/*
172 	 * TODO: Allocating WC pages which are correctly flushed is only
173 	 * supported on x86. Ideal solution would be a GFP_WC flag, which also
174 	 * ttm_pool.c could use.
175 	 */
176 #ifdef CONFIG_X86
177 	if (shmem->map_wc)
178 		set_pages_array_wc(pages, obj->size >> PAGE_SHIFT);
179 #endif
180 
181 	shmem->pages = pages;
182 
183 	return 0;
184 }
185 
186 /*
187  * drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object
188  * @shmem: shmem GEM object
189  *
190  * This function makes sure that backing pages exists for the shmem GEM object
191  * and increases the use count.
192  *
193  * Returns:
194  * 0 on success or a negative error code on failure.
195  */
196 int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
197 {
198 	int ret;
199 
200 	WARN_ON(shmem->base.import_attach);
201 
202 	ret = mutex_lock_interruptible(&shmem->pages_lock);
203 	if (ret)
204 		return ret;
205 	ret = drm_gem_shmem_get_pages_locked(shmem);
206 	mutex_unlock(&shmem->pages_lock);
207 
208 	return ret;
209 }
210 EXPORT_SYMBOL(drm_gem_shmem_get_pages);
211 
212 static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
213 {
214 	struct drm_gem_object *obj = &shmem->base;
215 
216 	if (WARN_ON_ONCE(!shmem->pages_use_count))
217 		return;
218 
219 	if (--shmem->pages_use_count > 0)
220 		return;
221 
222 #ifdef CONFIG_X86
223 	if (shmem->map_wc)
224 		set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
225 #endif
226 
227 	drm_gem_put_pages(obj, shmem->pages,
228 			  shmem->pages_mark_dirty_on_put,
229 			  shmem->pages_mark_accessed_on_put);
230 	shmem->pages = NULL;
231 }
232 
233 /*
234  * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
235  * @shmem: shmem GEM object
236  *
237  * This function decreases the use count and puts the backing pages when use drops to zero.
238  */
239 void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
240 {
241 	mutex_lock(&shmem->pages_lock);
242 	drm_gem_shmem_put_pages_locked(shmem);
243 	mutex_unlock(&shmem->pages_lock);
244 }
245 EXPORT_SYMBOL(drm_gem_shmem_put_pages);
246 
247 /**
248  * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
249  * @obj: GEM object
250  *
251  * This function makes sure the backing pages are pinned in memory while the
252  * buffer is exported. It should only be used to implement
253  * &drm_gem_object_funcs.pin.
254  *
255  * Returns:
256  * 0 on success or a negative error code on failure.
257  */
258 int drm_gem_shmem_pin(struct drm_gem_object *obj)
259 {
260 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
261 
262 	WARN_ON(shmem->base.import_attach);
263 
264 	return drm_gem_shmem_get_pages(shmem);
265 }
266 EXPORT_SYMBOL(drm_gem_shmem_pin);
267 
268 /**
269  * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
270  * @obj: GEM object
271  *
272  * This function removes the requirement that the backing pages are pinned in
273  * memory. It should only be used to implement &drm_gem_object_funcs.unpin.
274  */
275 void drm_gem_shmem_unpin(struct drm_gem_object *obj)
276 {
277 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
278 
279 	WARN_ON(shmem->base.import_attach);
280 
281 	drm_gem_shmem_put_pages(shmem);
282 }
283 EXPORT_SYMBOL(drm_gem_shmem_unpin);
284 
285 static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, struct dma_buf_map *map)
286 {
287 	struct drm_gem_object *obj = &shmem->base;
288 	int ret = 0;
289 
290 	if (shmem->vmap_use_count++ > 0) {
291 		dma_buf_map_set_vaddr(map, shmem->vaddr);
292 		return 0;
293 	}
294 
295 	if (obj->import_attach) {
296 		ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
297 		if (!ret) {
298 			if (WARN_ON(map->is_iomem)) {
299 				ret = -EIO;
300 				goto err_put_pages;
301 			}
302 			shmem->vaddr = map->vaddr;
303 		}
304 	} else {
305 		pgprot_t prot = PAGE_KERNEL;
306 
307 		ret = drm_gem_shmem_get_pages(shmem);
308 		if (ret)
309 			goto err_zero_use;
310 
311 		if (shmem->map_wc)
312 			prot = pgprot_writecombine(prot);
313 		shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
314 				    VM_MAP, prot);
315 		if (!shmem->vaddr)
316 			ret = -ENOMEM;
317 		else
318 			dma_buf_map_set_vaddr(map, shmem->vaddr);
319 	}
320 
321 	if (ret) {
322 		DRM_DEBUG_KMS("Failed to vmap pages, error %d\n", ret);
323 		goto err_put_pages;
324 	}
325 
326 	return 0;
327 
328 err_put_pages:
329 	if (!obj->import_attach)
330 		drm_gem_shmem_put_pages(shmem);
331 err_zero_use:
332 	shmem->vmap_use_count = 0;
333 
334 	return ret;
335 }
336 
337 /*
338  * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
339  * @shmem: shmem GEM object
340  * @map: Returns the kernel virtual address of the SHMEM GEM object's backing
341  *       store.
342  *
343  * This function makes sure that a contiguous kernel virtual address mapping
344  * exists for the buffer backing the shmem GEM object.
345  *
346  * This function can be used to implement &drm_gem_object_funcs.vmap. But it can
347  * also be called by drivers directly, in which case it will hide the
348  * differences between dma-buf imported and natively allocated objects.
349  *
350  * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap().
351  *
352  * Returns:
353  * 0 on success or a negative error code on failure.
354  */
355 int drm_gem_shmem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
356 {
357 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
358 	int ret;
359 
360 	ret = mutex_lock_interruptible(&shmem->vmap_lock);
361 	if (ret)
362 		return ret;
363 	ret = drm_gem_shmem_vmap_locked(shmem, map);
364 	mutex_unlock(&shmem->vmap_lock);
365 
366 	return ret;
367 }
368 EXPORT_SYMBOL(drm_gem_shmem_vmap);
369 
370 static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
371 					struct dma_buf_map *map)
372 {
373 	struct drm_gem_object *obj = &shmem->base;
374 
375 	if (WARN_ON_ONCE(!shmem->vmap_use_count))
376 		return;
377 
378 	if (--shmem->vmap_use_count > 0)
379 		return;
380 
381 	if (obj->import_attach) {
382 		dma_buf_vunmap(obj->import_attach->dmabuf, map);
383 	} else {
384 		vunmap(shmem->vaddr);
385 		drm_gem_shmem_put_pages(shmem);
386 	}
387 
388 	shmem->vaddr = NULL;
389 }
390 
391 /*
392  * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object
393  * @shmem: shmem GEM object
394  * @map: Kernel virtual address where the SHMEM GEM object was mapped
395  *
396  * This function cleans up a kernel virtual address mapping acquired by
397  * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to
398  * zero.
399  *
400  * This function can be used to implement &drm_gem_object_funcs.vmap. But it can
401  * also be called by drivers directly, in which case it will hide the
402  * differences between dma-buf imported and natively allocated objects.
403  */
404 void drm_gem_shmem_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map)
405 {
406 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
407 
408 	mutex_lock(&shmem->vmap_lock);
409 	drm_gem_shmem_vunmap_locked(shmem, map);
410 	mutex_unlock(&shmem->vmap_lock);
411 }
412 EXPORT_SYMBOL(drm_gem_shmem_vunmap);
413 
414 struct drm_gem_shmem_object *
415 drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
416 				 struct drm_device *dev, size_t size,
417 				 uint32_t *handle)
418 {
419 	struct drm_gem_shmem_object *shmem;
420 	int ret;
421 
422 	shmem = drm_gem_shmem_create(dev, size);
423 	if (IS_ERR(shmem))
424 		return shmem;
425 
426 	/*
427 	 * Allocate an id of idr table where the obj is registered
428 	 * and handle has the id what user can see.
429 	 */
430 	ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
431 	/* drop reference from allocate - handle holds it now. */
432 	drm_gem_object_put(&shmem->base);
433 	if (ret)
434 		return ERR_PTR(ret);
435 
436 	return shmem;
437 }
438 EXPORT_SYMBOL(drm_gem_shmem_create_with_handle);
439 
440 /* Update madvise status, returns true if not purged, else
441  * false or -errno.
442  */
443 int drm_gem_shmem_madvise(struct drm_gem_object *obj, int madv)
444 {
445 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
446 
447 	mutex_lock(&shmem->pages_lock);
448 
449 	if (shmem->madv >= 0)
450 		shmem->madv = madv;
451 
452 	madv = shmem->madv;
453 
454 	mutex_unlock(&shmem->pages_lock);
455 
456 	return (madv >= 0);
457 }
458 EXPORT_SYMBOL(drm_gem_shmem_madvise);
459 
460 void drm_gem_shmem_purge_locked(struct drm_gem_object *obj)
461 {
462 	struct drm_device *dev = obj->dev;
463 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
464 
465 	WARN_ON(!drm_gem_shmem_is_purgeable(shmem));
466 
467 	dma_unmap_sgtable(obj->dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
468 	sg_free_table(shmem->sgt);
469 	kfree(shmem->sgt);
470 	shmem->sgt = NULL;
471 
472 	drm_gem_shmem_put_pages_locked(shmem);
473 
474 	shmem->madv = -1;
475 
476 	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
477 	drm_gem_free_mmap_offset(obj);
478 
479 	/* Our goal here is to return as much of the memory as
480 	 * is possible back to the system as we are called from OOM.
481 	 * To do this we must instruct the shmfs to drop all of its
482 	 * backing pages, *now*.
483 	 */
484 	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
485 
486 	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
487 			0, (loff_t)-1);
488 }
489 EXPORT_SYMBOL(drm_gem_shmem_purge_locked);
490 
491 bool drm_gem_shmem_purge(struct drm_gem_object *obj)
492 {
493 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
494 
495 	if (!mutex_trylock(&shmem->pages_lock))
496 		return false;
497 	drm_gem_shmem_purge_locked(obj);
498 	mutex_unlock(&shmem->pages_lock);
499 
500 	return true;
501 }
502 EXPORT_SYMBOL(drm_gem_shmem_purge);
503 
504 /**
505  * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
506  * @file: DRM file structure to create the dumb buffer for
507  * @dev: DRM device
508  * @args: IOCTL data
509  *
510  * This function computes the pitch of the dumb buffer and rounds it up to an
511  * integer number of bytes per pixel. Drivers for hardware that doesn't have
512  * any additional restrictions on the pitch can directly use this function as
513  * their &drm_driver.dumb_create callback.
514  *
515  * For hardware with additional restrictions, drivers can adjust the fields
516  * set up by userspace before calling into this function.
517  *
518  * Returns:
519  * 0 on success or a negative error code on failure.
520  */
521 int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
522 			      struct drm_mode_create_dumb *args)
523 {
524 	u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
525 	struct drm_gem_shmem_object *shmem;
526 
527 	if (!args->pitch || !args->size) {
528 		args->pitch = min_pitch;
529 		args->size = PAGE_ALIGN(args->pitch * args->height);
530 	} else {
531 		/* ensure sane minimum values */
532 		if (args->pitch < min_pitch)
533 			args->pitch = min_pitch;
534 		if (args->size < args->pitch * args->height)
535 			args->size = PAGE_ALIGN(args->pitch * args->height);
536 	}
537 
538 	shmem = drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
539 
540 	return PTR_ERR_OR_ZERO(shmem);
541 }
542 EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
543 
544 static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
545 {
546 	struct vm_area_struct *vma = vmf->vma;
547 	struct drm_gem_object *obj = vma->vm_private_data;
548 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
549 	loff_t num_pages = obj->size >> PAGE_SHIFT;
550 	vm_fault_t ret;
551 	struct page *page;
552 	pgoff_t page_offset;
553 
554 	/* We don't use vmf->pgoff since that has the fake offset */
555 	page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
556 
557 	mutex_lock(&shmem->pages_lock);
558 
559 	if (page_offset >= num_pages ||
560 	    WARN_ON_ONCE(!shmem->pages) ||
561 	    shmem->madv < 0) {
562 		ret = VM_FAULT_SIGBUS;
563 	} else {
564 		page = shmem->pages[page_offset];
565 
566 		ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
567 	}
568 
569 	mutex_unlock(&shmem->pages_lock);
570 
571 	return ret;
572 }
573 
574 static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
575 {
576 	struct drm_gem_object *obj = vma->vm_private_data;
577 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
578 	int ret;
579 
580 	WARN_ON(shmem->base.import_attach);
581 
582 	ret = drm_gem_shmem_get_pages(shmem);
583 	WARN_ON_ONCE(ret != 0);
584 
585 	drm_gem_vm_open(vma);
586 }
587 
588 static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
589 {
590 	struct drm_gem_object *obj = vma->vm_private_data;
591 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
592 
593 	drm_gem_shmem_put_pages(shmem);
594 	drm_gem_vm_close(vma);
595 }
596 
597 static const struct vm_operations_struct drm_gem_shmem_vm_ops = {
598 	.fault = drm_gem_shmem_fault,
599 	.open = drm_gem_shmem_vm_open,
600 	.close = drm_gem_shmem_vm_close,
601 };
602 
603 /**
604  * drm_gem_shmem_mmap - Memory-map a shmem GEM object
605  * @obj: gem object
606  * @vma: VMA for the area to be mapped
607  *
608  * This function implements an augmented version of the GEM DRM file mmap
609  * operation for shmem objects. Drivers which employ the shmem helpers should
610  * use this function as their &drm_gem_object_funcs.mmap handler.
611  *
612  * Returns:
613  * 0 on success or a negative error code on failure.
614  */
615 int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
616 {
617 	struct drm_gem_shmem_object *shmem;
618 	int ret;
619 
620 	if (obj->import_attach) {
621 		/* Drop the reference drm_gem_mmap_obj() acquired.*/
622 		drm_gem_object_put(obj);
623 		vma->vm_private_data = NULL;
624 
625 		return dma_buf_mmap(obj->dma_buf, vma, 0);
626 	}
627 
628 	shmem = to_drm_gem_shmem_obj(obj);
629 
630 	ret = drm_gem_shmem_get_pages(shmem);
631 	if (ret) {
632 		drm_gem_vm_close(vma);
633 		return ret;
634 	}
635 
636 	vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND;
637 	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
638 	if (shmem->map_wc)
639 		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
640 	vma->vm_ops = &drm_gem_shmem_vm_ops;
641 
642 	return 0;
643 }
644 EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
645 
646 /**
647  * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
648  * @p: DRM printer
649  * @indent: Tab indentation level
650  * @obj: GEM object
651  *
652  * This implements the &drm_gem_object_funcs.info callback.
653  */
654 void drm_gem_shmem_print_info(struct drm_printer *p, unsigned int indent,
655 			      const struct drm_gem_object *obj)
656 {
657 	const struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
658 
659 	drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
660 	drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
661 	drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
662 }
663 EXPORT_SYMBOL(drm_gem_shmem_print_info);
664 
665 /**
666  * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
667  *                              pages for a shmem GEM object
668  * @obj: GEM object
669  *
670  * This function exports a scatter/gather table suitable for PRIME usage by
671  * calling the standard DMA mapping API. Drivers should not call this function
672  * directly, instead it should only be used as an implementation for
673  * &drm_gem_object_funcs.get_sg_table.
674  *
675  * Drivers who need to acquire an scatter/gather table for objects need to call
676  * drm_gem_shmem_get_pages_sgt() instead.
677  *
678  * Returns:
679  * A pointer to the scatter/gather table of pinned pages or NULL on failure.
680  */
681 struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_object *obj)
682 {
683 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
684 
685 	WARN_ON(shmem->base.import_attach);
686 
687 	return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
688 }
689 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
690 
691 /**
692  * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
693  *				 scatter/gather table for a shmem GEM object.
694  * @obj: GEM object
695  *
696  * This function returns a scatter/gather table suitable for driver usage. If
697  * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
698  * table created.
699  *
700  * This is the main function for drivers to get at backing storage, and it hides
701  * and difference between dma-buf imported and natively allocated objects.
702  * drm_gem_shmem_get_sg_table() should not be directly called by drivers.
703  *
704  * Returns:
705  * A pointer to the scatter/gather table of pinned pages or errno on failure.
706  */
707 struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_object *obj)
708 {
709 	int ret;
710 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
711 	struct sg_table *sgt;
712 
713 	if (shmem->sgt)
714 		return shmem->sgt;
715 
716 	WARN_ON(obj->import_attach);
717 
718 	ret = drm_gem_shmem_get_pages(shmem);
719 	if (ret)
720 		return ERR_PTR(ret);
721 
722 	sgt = drm_gem_shmem_get_sg_table(&shmem->base);
723 	if (IS_ERR(sgt)) {
724 		ret = PTR_ERR(sgt);
725 		goto err_put_pages;
726 	}
727 	/* Map the pages for use by the h/w. */
728 	ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
729 	if (ret)
730 		goto err_free_sgt;
731 
732 	shmem->sgt = sgt;
733 
734 	return sgt;
735 
736 err_free_sgt:
737 	sg_free_table(sgt);
738 	kfree(sgt);
739 err_put_pages:
740 	drm_gem_shmem_put_pages(shmem);
741 	return ERR_PTR(ret);
742 }
743 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt);
744 
745 /**
746  * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
747  *                 another driver's scatter/gather table of pinned pages
748  * @dev: Device to import into
749  * @attach: DMA-BUF attachment
750  * @sgt: Scatter/gather table of pinned pages
751  *
752  * This function imports a scatter/gather table exported via DMA-BUF by
753  * another driver. Drivers that use the shmem helpers should set this as their
754  * &drm_driver.gem_prime_import_sg_table callback.
755  *
756  * Returns:
757  * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
758  * error code on failure.
759  */
760 struct drm_gem_object *
761 drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
762 				    struct dma_buf_attachment *attach,
763 				    struct sg_table *sgt)
764 {
765 	size_t size = PAGE_ALIGN(attach->dmabuf->size);
766 	struct drm_gem_shmem_object *shmem;
767 
768 	shmem = __drm_gem_shmem_create(dev, size, true);
769 	if (IS_ERR(shmem))
770 		return ERR_CAST(shmem);
771 
772 	shmem->sgt = sgt;
773 
774 	DRM_DEBUG_PRIME("size = %zu\n", size);
775 
776 	return &shmem->base;
777 }
778 EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
779