1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2018 Noralf Trønnes
4  */
5 
6 #include <linux/dma-buf.h>
7 #include <linux/export.h>
8 #include <linux/mutex.h>
9 #include <linux/shmem_fs.h>
10 #include <linux/slab.h>
11 #include <linux/vmalloc.h>
12 
13 #ifdef CONFIG_X86
14 #include <asm/set_memory.h>
15 #endif
16 
17 #include <drm/drm.h>
18 #include <drm/drm_device.h>
19 #include <drm/drm_drv.h>
20 #include <drm/drm_gem_shmem_helper.h>
21 #include <drm/drm_prime.h>
22 #include <drm/drm_print.h>
23 
24 /**
25  * DOC: overview
26  *
27  * This library provides helpers for GEM objects backed by shmem buffers
28  * allocated using anonymous pageable memory.
29  */
30 
31 static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
32 	.free = drm_gem_shmem_free_object,
33 	.print_info = drm_gem_shmem_print_info,
34 	.pin = drm_gem_shmem_pin,
35 	.unpin = drm_gem_shmem_unpin,
36 	.get_sg_table = drm_gem_shmem_get_sg_table,
37 	.vmap = drm_gem_shmem_vmap,
38 	.vunmap = drm_gem_shmem_vunmap,
39 	.mmap = drm_gem_shmem_mmap,
40 };
41 
42 static struct drm_gem_shmem_object *
43 __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
44 {
45 	struct drm_gem_shmem_object *shmem;
46 	struct drm_gem_object *obj;
47 	int ret = 0;
48 
49 	size = PAGE_ALIGN(size);
50 
51 	if (dev->driver->gem_create_object)
52 		obj = dev->driver->gem_create_object(dev, size);
53 	else
54 		obj = kzalloc(sizeof(*shmem), GFP_KERNEL);
55 	if (!obj)
56 		return ERR_PTR(-ENOMEM);
57 
58 	shmem = to_drm_gem_shmem_obj(obj);
59 
60 	if (!obj->funcs)
61 		obj->funcs = &drm_gem_shmem_funcs;
62 
63 	if (private) {
64 		drm_gem_private_object_init(dev, obj, size);
65 		shmem->map_wc = false; /* dma-buf mappings use always writecombine */
66 	} else {
67 		ret = drm_gem_object_init(dev, obj, size);
68 	}
69 	if (ret)
70 		goto err_free;
71 
72 	ret = drm_gem_create_mmap_offset(obj);
73 	if (ret)
74 		goto err_release;
75 
76 	mutex_init(&shmem->pages_lock);
77 	mutex_init(&shmem->vmap_lock);
78 	INIT_LIST_HEAD(&shmem->madv_list);
79 
80 	if (!private) {
81 		/*
82 		 * Our buffers are kept pinned, so allocating them
83 		 * from the MOVABLE zone is a really bad idea, and
84 		 * conflicts with CMA. See comments above new_inode()
85 		 * why this is required _and_ expected if you're
86 		 * going to pin these pages.
87 		 */
88 		mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
89 				     __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
90 	}
91 
92 	return shmem;
93 
94 err_release:
95 	drm_gem_object_release(obj);
96 err_free:
97 	kfree(obj);
98 
99 	return ERR_PTR(ret);
100 }
101 /**
102  * drm_gem_shmem_create - Allocate an object with the given size
103  * @dev: DRM device
104  * @size: Size of the object to allocate
105  *
106  * This function creates a shmem GEM object.
107  *
108  * Returns:
109  * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
110  * error code on failure.
111  */
112 struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
113 {
114 	return __drm_gem_shmem_create(dev, size, false);
115 }
116 EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
117 
118 /**
119  * drm_gem_shmem_free_object - Free resources associated with a shmem GEM object
120  * @obj: GEM object to free
121  *
122  * This function cleans up the GEM object state and frees the memory used to
123  * store the object itself. It should be used to implement
124  * &drm_gem_object_funcs.free.
125  */
126 void drm_gem_shmem_free_object(struct drm_gem_object *obj)
127 {
128 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
129 
130 	WARN_ON(shmem->vmap_use_count);
131 
132 	if (obj->import_attach) {
133 		drm_prime_gem_destroy(obj, shmem->sgt);
134 	} else {
135 		if (shmem->sgt) {
136 			dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
137 					  DMA_BIDIRECTIONAL, 0);
138 			sg_free_table(shmem->sgt);
139 			kfree(shmem->sgt);
140 		}
141 		if (shmem->pages)
142 			drm_gem_shmem_put_pages(shmem);
143 	}
144 
145 	WARN_ON(shmem->pages_use_count);
146 
147 	drm_gem_object_release(obj);
148 	mutex_destroy(&shmem->pages_lock);
149 	mutex_destroy(&shmem->vmap_lock);
150 	kfree(shmem);
151 }
152 EXPORT_SYMBOL_GPL(drm_gem_shmem_free_object);
153 
154 static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
155 {
156 	struct drm_gem_object *obj = &shmem->base;
157 	struct page **pages;
158 
159 	if (shmem->pages_use_count++ > 0)
160 		return 0;
161 
162 	pages = drm_gem_get_pages(obj);
163 	if (IS_ERR(pages)) {
164 		DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages));
165 		shmem->pages_use_count = 0;
166 		return PTR_ERR(pages);
167 	}
168 
169 	/*
170 	 * TODO: Allocating WC pages which are correctly flushed is only
171 	 * supported on x86. Ideal solution would be a GFP_WC flag, which also
172 	 * ttm_pool.c could use.
173 	 */
174 #ifdef CONFIG_X86
175 	if (shmem->map_wc)
176 		set_pages_array_wc(pages, obj->size >> PAGE_SHIFT);
177 #endif
178 
179 	shmem->pages = pages;
180 
181 	return 0;
182 }
183 
184 /*
185  * drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object
186  * @shmem: shmem GEM object
187  *
188  * This function makes sure that backing pages exists for the shmem GEM object
189  * and increases the use count.
190  *
191  * Returns:
192  * 0 on success or a negative error code on failure.
193  */
194 int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
195 {
196 	int ret;
197 
198 	WARN_ON(shmem->base.import_attach);
199 
200 	ret = mutex_lock_interruptible(&shmem->pages_lock);
201 	if (ret)
202 		return ret;
203 	ret = drm_gem_shmem_get_pages_locked(shmem);
204 	mutex_unlock(&shmem->pages_lock);
205 
206 	return ret;
207 }
208 EXPORT_SYMBOL(drm_gem_shmem_get_pages);
209 
210 static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
211 {
212 	struct drm_gem_object *obj = &shmem->base;
213 
214 	if (WARN_ON_ONCE(!shmem->pages_use_count))
215 		return;
216 
217 	if (--shmem->pages_use_count > 0)
218 		return;
219 
220 #ifdef CONFIG_X86
221 	if (shmem->map_wc)
222 		set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
223 #endif
224 
225 	drm_gem_put_pages(obj, shmem->pages,
226 			  shmem->pages_mark_dirty_on_put,
227 			  shmem->pages_mark_accessed_on_put);
228 	shmem->pages = NULL;
229 }
230 
231 /*
232  * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
233  * @shmem: shmem GEM object
234  *
235  * This function decreases the use count and puts the backing pages when use drops to zero.
236  */
237 void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
238 {
239 	mutex_lock(&shmem->pages_lock);
240 	drm_gem_shmem_put_pages_locked(shmem);
241 	mutex_unlock(&shmem->pages_lock);
242 }
243 EXPORT_SYMBOL(drm_gem_shmem_put_pages);
244 
245 /**
246  * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
247  * @obj: GEM object
248  *
249  * This function makes sure the backing pages are pinned in memory while the
250  * buffer is exported. It should only be used to implement
251  * &drm_gem_object_funcs.pin.
252  *
253  * Returns:
254  * 0 on success or a negative error code on failure.
255  */
256 int drm_gem_shmem_pin(struct drm_gem_object *obj)
257 {
258 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
259 
260 	WARN_ON(shmem->base.import_attach);
261 
262 	return drm_gem_shmem_get_pages(shmem);
263 }
264 EXPORT_SYMBOL(drm_gem_shmem_pin);
265 
266 /**
267  * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
268  * @obj: GEM object
269  *
270  * This function removes the requirement that the backing pages are pinned in
271  * memory. It should only be used to implement &drm_gem_object_funcs.unpin.
272  */
273 void drm_gem_shmem_unpin(struct drm_gem_object *obj)
274 {
275 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
276 
277 	WARN_ON(shmem->base.import_attach);
278 
279 	drm_gem_shmem_put_pages(shmem);
280 }
281 EXPORT_SYMBOL(drm_gem_shmem_unpin);
282 
283 static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, struct dma_buf_map *map)
284 {
285 	struct drm_gem_object *obj = &shmem->base;
286 	int ret = 0;
287 
288 	if (shmem->vmap_use_count++ > 0) {
289 		dma_buf_map_set_vaddr(map, shmem->vaddr);
290 		return 0;
291 	}
292 
293 	if (obj->import_attach) {
294 		ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
295 		if (!ret) {
296 			if (WARN_ON(map->is_iomem)) {
297 				ret = -EIO;
298 				goto err_put_pages;
299 			}
300 			shmem->vaddr = map->vaddr;
301 		}
302 	} else {
303 		pgprot_t prot = PAGE_KERNEL;
304 
305 		ret = drm_gem_shmem_get_pages(shmem);
306 		if (ret)
307 			goto err_zero_use;
308 
309 		if (shmem->map_wc)
310 			prot = pgprot_writecombine(prot);
311 		shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
312 				    VM_MAP, prot);
313 		if (!shmem->vaddr)
314 			ret = -ENOMEM;
315 		else
316 			dma_buf_map_set_vaddr(map, shmem->vaddr);
317 	}
318 
319 	if (ret) {
320 		DRM_DEBUG_KMS("Failed to vmap pages, error %d\n", ret);
321 		goto err_put_pages;
322 	}
323 
324 	return 0;
325 
326 err_put_pages:
327 	if (!obj->import_attach)
328 		drm_gem_shmem_put_pages(shmem);
329 err_zero_use:
330 	shmem->vmap_use_count = 0;
331 
332 	return ret;
333 }
334 
335 /*
336  * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
337  * @shmem: shmem GEM object
338  * @map: Returns the kernel virtual address of the SHMEM GEM object's backing
339  *       store.
340  *
341  * This function makes sure that a contiguous kernel virtual address mapping
342  * exists for the buffer backing the shmem GEM object.
343  *
344  * This function can be used to implement &drm_gem_object_funcs.vmap. But it can
345  * also be called by drivers directly, in which case it will hide the
346  * differences between dma-buf imported and natively allocated objects.
347  *
348  * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap().
349  *
350  * Returns:
351  * 0 on success or a negative error code on failure.
352  */
353 int drm_gem_shmem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
354 {
355 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
356 	int ret;
357 
358 	ret = mutex_lock_interruptible(&shmem->vmap_lock);
359 	if (ret)
360 		return ret;
361 	ret = drm_gem_shmem_vmap_locked(shmem, map);
362 	mutex_unlock(&shmem->vmap_lock);
363 
364 	return ret;
365 }
366 EXPORT_SYMBOL(drm_gem_shmem_vmap);
367 
368 static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
369 					struct dma_buf_map *map)
370 {
371 	struct drm_gem_object *obj = &shmem->base;
372 
373 	if (WARN_ON_ONCE(!shmem->vmap_use_count))
374 		return;
375 
376 	if (--shmem->vmap_use_count > 0)
377 		return;
378 
379 	if (obj->import_attach) {
380 		dma_buf_vunmap(obj->import_attach->dmabuf, map);
381 	} else {
382 		vunmap(shmem->vaddr);
383 		drm_gem_shmem_put_pages(shmem);
384 	}
385 
386 	shmem->vaddr = NULL;
387 }
388 
389 /*
390  * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object
391  * @shmem: shmem GEM object
392  * @map: Kernel virtual address where the SHMEM GEM object was mapped
393  *
394  * This function cleans up a kernel virtual address mapping acquired by
395  * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to
396  * zero.
397  *
398  * This function can be used to implement &drm_gem_object_funcs.vmap. But it can
399  * also be called by drivers directly, in which case it will hide the
400  * differences between dma-buf imported and natively allocated objects.
401  */
402 void drm_gem_shmem_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map)
403 {
404 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
405 
406 	mutex_lock(&shmem->vmap_lock);
407 	drm_gem_shmem_vunmap_locked(shmem, map);
408 	mutex_unlock(&shmem->vmap_lock);
409 }
410 EXPORT_SYMBOL(drm_gem_shmem_vunmap);
411 
412 struct drm_gem_shmem_object *
413 drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
414 				 struct drm_device *dev, size_t size,
415 				 uint32_t *handle)
416 {
417 	struct drm_gem_shmem_object *shmem;
418 	int ret;
419 
420 	shmem = drm_gem_shmem_create(dev, size);
421 	if (IS_ERR(shmem))
422 		return shmem;
423 
424 	/*
425 	 * Allocate an id of idr table where the obj is registered
426 	 * and handle has the id what user can see.
427 	 */
428 	ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
429 	/* drop reference from allocate - handle holds it now. */
430 	drm_gem_object_put(&shmem->base);
431 	if (ret)
432 		return ERR_PTR(ret);
433 
434 	return shmem;
435 }
436 EXPORT_SYMBOL(drm_gem_shmem_create_with_handle);
437 
438 /* Update madvise status, returns true if not purged, else
439  * false or -errno.
440  */
441 int drm_gem_shmem_madvise(struct drm_gem_object *obj, int madv)
442 {
443 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
444 
445 	mutex_lock(&shmem->pages_lock);
446 
447 	if (shmem->madv >= 0)
448 		shmem->madv = madv;
449 
450 	madv = shmem->madv;
451 
452 	mutex_unlock(&shmem->pages_lock);
453 
454 	return (madv >= 0);
455 }
456 EXPORT_SYMBOL(drm_gem_shmem_madvise);
457 
458 void drm_gem_shmem_purge_locked(struct drm_gem_object *obj)
459 {
460 	struct drm_device *dev = obj->dev;
461 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
462 
463 	WARN_ON(!drm_gem_shmem_is_purgeable(shmem));
464 
465 	dma_unmap_sgtable(obj->dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
466 	sg_free_table(shmem->sgt);
467 	kfree(shmem->sgt);
468 	shmem->sgt = NULL;
469 
470 	drm_gem_shmem_put_pages_locked(shmem);
471 
472 	shmem->madv = -1;
473 
474 	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
475 	drm_gem_free_mmap_offset(obj);
476 
477 	/* Our goal here is to return as much of the memory as
478 	 * is possible back to the system as we are called from OOM.
479 	 * To do this we must instruct the shmfs to drop all of its
480 	 * backing pages, *now*.
481 	 */
482 	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
483 
484 	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
485 			0, (loff_t)-1);
486 }
487 EXPORT_SYMBOL(drm_gem_shmem_purge_locked);
488 
489 bool drm_gem_shmem_purge(struct drm_gem_object *obj)
490 {
491 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
492 
493 	if (!mutex_trylock(&shmem->pages_lock))
494 		return false;
495 	drm_gem_shmem_purge_locked(obj);
496 	mutex_unlock(&shmem->pages_lock);
497 
498 	return true;
499 }
500 EXPORT_SYMBOL(drm_gem_shmem_purge);
501 
502 /**
503  * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
504  * @file: DRM file structure to create the dumb buffer for
505  * @dev: DRM device
506  * @args: IOCTL data
507  *
508  * This function computes the pitch of the dumb buffer and rounds it up to an
509  * integer number of bytes per pixel. Drivers for hardware that doesn't have
510  * any additional restrictions on the pitch can directly use this function as
511  * their &drm_driver.dumb_create callback.
512  *
513  * For hardware with additional restrictions, drivers can adjust the fields
514  * set up by userspace before calling into this function.
515  *
516  * Returns:
517  * 0 on success or a negative error code on failure.
518  */
519 int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
520 			      struct drm_mode_create_dumb *args)
521 {
522 	u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
523 	struct drm_gem_shmem_object *shmem;
524 
525 	if (!args->pitch || !args->size) {
526 		args->pitch = min_pitch;
527 		args->size = PAGE_ALIGN(args->pitch * args->height);
528 	} else {
529 		/* ensure sane minimum values */
530 		if (args->pitch < min_pitch)
531 			args->pitch = min_pitch;
532 		if (args->size < args->pitch * args->height)
533 			args->size = PAGE_ALIGN(args->pitch * args->height);
534 	}
535 
536 	shmem = drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
537 
538 	return PTR_ERR_OR_ZERO(shmem);
539 }
540 EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
541 
542 static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
543 {
544 	struct vm_area_struct *vma = vmf->vma;
545 	struct drm_gem_object *obj = vma->vm_private_data;
546 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
547 	loff_t num_pages = obj->size >> PAGE_SHIFT;
548 	vm_fault_t ret;
549 	struct page *page;
550 	pgoff_t page_offset;
551 
552 	/* We don't use vmf->pgoff since that has the fake offset */
553 	page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
554 
555 	mutex_lock(&shmem->pages_lock);
556 
557 	if (page_offset >= num_pages ||
558 	    WARN_ON_ONCE(!shmem->pages) ||
559 	    shmem->madv < 0) {
560 		ret = VM_FAULT_SIGBUS;
561 	} else {
562 		page = shmem->pages[page_offset];
563 
564 		ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
565 	}
566 
567 	mutex_unlock(&shmem->pages_lock);
568 
569 	return ret;
570 }
571 
572 static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
573 {
574 	struct drm_gem_object *obj = vma->vm_private_data;
575 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
576 	int ret;
577 
578 	WARN_ON(shmem->base.import_attach);
579 
580 	ret = drm_gem_shmem_get_pages(shmem);
581 	WARN_ON_ONCE(ret != 0);
582 
583 	drm_gem_vm_open(vma);
584 }
585 
586 static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
587 {
588 	struct drm_gem_object *obj = vma->vm_private_data;
589 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
590 
591 	drm_gem_shmem_put_pages(shmem);
592 	drm_gem_vm_close(vma);
593 }
594 
595 static const struct vm_operations_struct drm_gem_shmem_vm_ops = {
596 	.fault = drm_gem_shmem_fault,
597 	.open = drm_gem_shmem_vm_open,
598 	.close = drm_gem_shmem_vm_close,
599 };
600 
601 /**
602  * drm_gem_shmem_mmap - Memory-map a shmem GEM object
603  * @obj: gem object
604  * @vma: VMA for the area to be mapped
605  *
606  * This function implements an augmented version of the GEM DRM file mmap
607  * operation for shmem objects. Drivers which employ the shmem helpers should
608  * use this function as their &drm_gem_object_funcs.mmap handler.
609  *
610  * Returns:
611  * 0 on success or a negative error code on failure.
612  */
613 int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
614 {
615 	struct drm_gem_shmem_object *shmem;
616 	int ret;
617 
618 	if (obj->import_attach) {
619 		/* Drop the reference drm_gem_mmap_obj() acquired.*/
620 		drm_gem_object_put(obj);
621 		vma->vm_private_data = NULL;
622 
623 		return dma_buf_mmap(obj->dma_buf, vma, 0);
624 	}
625 
626 	shmem = to_drm_gem_shmem_obj(obj);
627 
628 	ret = drm_gem_shmem_get_pages(shmem);
629 	if (ret) {
630 		drm_gem_vm_close(vma);
631 		return ret;
632 	}
633 
634 	vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND;
635 	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
636 	if (shmem->map_wc)
637 		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
638 	vma->vm_ops = &drm_gem_shmem_vm_ops;
639 
640 	return 0;
641 }
642 EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
643 
644 /**
645  * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
646  * @p: DRM printer
647  * @indent: Tab indentation level
648  * @obj: GEM object
649  *
650  * This implements the &drm_gem_object_funcs.info callback.
651  */
652 void drm_gem_shmem_print_info(struct drm_printer *p, unsigned int indent,
653 			      const struct drm_gem_object *obj)
654 {
655 	const struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
656 
657 	drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
658 	drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
659 	drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
660 }
661 EXPORT_SYMBOL(drm_gem_shmem_print_info);
662 
663 /**
664  * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
665  *                              pages for a shmem GEM object
666  * @obj: GEM object
667  *
668  * This function exports a scatter/gather table suitable for PRIME usage by
669  * calling the standard DMA mapping API. Drivers should not call this function
670  * directly, instead it should only be used as an implementation for
671  * &drm_gem_object_funcs.get_sg_table.
672  *
673  * Drivers who need to acquire an scatter/gather table for objects need to call
674  * drm_gem_shmem_get_pages_sgt() instead.
675  *
676  * Returns:
677  * A pointer to the scatter/gather table of pinned pages or NULL on failure.
678  */
679 struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_object *obj)
680 {
681 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
682 
683 	WARN_ON(shmem->base.import_attach);
684 
685 	return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
686 }
687 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
688 
689 /**
690  * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
691  *				 scatter/gather table for a shmem GEM object.
692  * @obj: GEM object
693  *
694  * This function returns a scatter/gather table suitable for driver usage. If
695  * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
696  * table created.
697  *
698  * This is the main function for drivers to get at backing storage, and it hides
699  * and difference between dma-buf imported and natively allocated objects.
700  * drm_gem_shmem_get_sg_table() should not be directly called by drivers.
701  *
702  * Returns:
703  * A pointer to the scatter/gather table of pinned pages or errno on failure.
704  */
705 struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_object *obj)
706 {
707 	int ret;
708 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
709 	struct sg_table *sgt;
710 
711 	if (shmem->sgt)
712 		return shmem->sgt;
713 
714 	WARN_ON(obj->import_attach);
715 
716 	ret = drm_gem_shmem_get_pages(shmem);
717 	if (ret)
718 		return ERR_PTR(ret);
719 
720 	sgt = drm_gem_shmem_get_sg_table(&shmem->base);
721 	if (IS_ERR(sgt)) {
722 		ret = PTR_ERR(sgt);
723 		goto err_put_pages;
724 	}
725 	/* Map the pages for use by the h/w. */
726 	ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
727 	if (ret)
728 		goto err_free_sgt;
729 
730 	shmem->sgt = sgt;
731 
732 	return sgt;
733 
734 err_free_sgt:
735 	sg_free_table(sgt);
736 	kfree(sgt);
737 err_put_pages:
738 	drm_gem_shmem_put_pages(shmem);
739 	return ERR_PTR(ret);
740 }
741 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt);
742 
743 /**
744  * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
745  *                 another driver's scatter/gather table of pinned pages
746  * @dev: Device to import into
747  * @attach: DMA-BUF attachment
748  * @sgt: Scatter/gather table of pinned pages
749  *
750  * This function imports a scatter/gather table exported via DMA-BUF by
751  * another driver. Drivers that use the shmem helpers should set this as their
752  * &drm_driver.gem_prime_import_sg_table callback.
753  *
754  * Returns:
755  * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
756  * error code on failure.
757  */
758 struct drm_gem_object *
759 drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
760 				    struct dma_buf_attachment *attach,
761 				    struct sg_table *sgt)
762 {
763 	size_t size = PAGE_ALIGN(attach->dmabuf->size);
764 	struct drm_gem_shmem_object *shmem;
765 
766 	shmem = __drm_gem_shmem_create(dev, size, true);
767 	if (IS_ERR(shmem))
768 		return ERR_CAST(shmem);
769 
770 	shmem->sgt = sgt;
771 
772 	DRM_DEBUG_PRIME("size = %zu\n", size);
773 
774 	return &shmem->base;
775 }
776 EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
777