1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 #include <drm/drm_debugfs.h>
4 #include <drm/drm_device.h>
5 #include <drm/drm_file.h>
6 #include <drm/drm_framebuffer.h>
7 #include <drm/drm_gem_ttm_helper.h>
8 #include <drm/drm_gem_vram_helper.h>
9 #include <drm/drm_mode.h>
10 #include <drm/drm_plane.h>
11 #include <drm/drm_prime.h>
12 #include <drm/drm_simple_kms_helper.h>
13 #include <drm/ttm/ttm_page_alloc.h>
14 
15 static const struct drm_gem_object_funcs drm_gem_vram_object_funcs;
16 
17 /**
18  * DOC: overview
19  *
20  * This library provides a GEM buffer object that is backed by video RAM
21  * (VRAM). It can be used for framebuffer devices with dedicated memory.
22  *
23  * The data structure &struct drm_vram_mm and its helpers implement a memory
24  * manager for simple framebuffer devices with dedicated video memory. Buffer
25  * objects are either placed in video RAM or evicted to system memory. The rsp.
26  * buffer object is provided by &struct drm_gem_vram_object.
27  */
28 
29 /*
30  * Buffer-objects helpers
31  */
32 
33 static void drm_gem_vram_cleanup(struct drm_gem_vram_object *gbo)
34 {
35 	/* We got here via ttm_bo_put(), which means that the
36 	 * TTM buffer object in 'bo' has already been cleaned
37 	 * up; only release the GEM object.
38 	 */
39 
40 	WARN_ON(gbo->kmap_use_count);
41 	WARN_ON(gbo->kmap.virtual);
42 
43 	drm_gem_object_release(&gbo->bo.base);
44 }
45 
46 static void drm_gem_vram_destroy(struct drm_gem_vram_object *gbo)
47 {
48 	drm_gem_vram_cleanup(gbo);
49 	kfree(gbo);
50 }
51 
52 static void ttm_buffer_object_destroy(struct ttm_buffer_object *bo)
53 {
54 	struct drm_gem_vram_object *gbo = drm_gem_vram_of_bo(bo);
55 
56 	drm_gem_vram_destroy(gbo);
57 }
58 
59 static void drm_gem_vram_placement(struct drm_gem_vram_object *gbo,
60 				   unsigned long pl_flag)
61 {
62 	unsigned int i;
63 	unsigned int c = 0;
64 	u32 invariant_flags = pl_flag & TTM_PL_FLAG_TOPDOWN;
65 
66 	gbo->placement.placement = gbo->placements;
67 	gbo->placement.busy_placement = gbo->placements;
68 
69 	if (pl_flag & TTM_PL_FLAG_VRAM)
70 		gbo->placements[c++].flags = TTM_PL_FLAG_WC |
71 					     TTM_PL_FLAG_UNCACHED |
72 					     TTM_PL_FLAG_VRAM |
73 					     invariant_flags;
74 
75 	if (pl_flag & TTM_PL_FLAG_SYSTEM)
76 		gbo->placements[c++].flags = TTM_PL_MASK_CACHING |
77 					     TTM_PL_FLAG_SYSTEM |
78 					     invariant_flags;
79 
80 	if (!c)
81 		gbo->placements[c++].flags = TTM_PL_MASK_CACHING |
82 					     TTM_PL_FLAG_SYSTEM |
83 					     invariant_flags;
84 
85 	gbo->placement.num_placement = c;
86 	gbo->placement.num_busy_placement = c;
87 
88 	for (i = 0; i < c; ++i) {
89 		gbo->placements[i].fpfn = 0;
90 		gbo->placements[i].lpfn = 0;
91 	}
92 }
93 
94 static int drm_gem_vram_init(struct drm_device *dev,
95 			     struct ttm_bo_device *bdev,
96 			     struct drm_gem_vram_object *gbo,
97 			     size_t size, unsigned long pg_align,
98 			     bool interruptible)
99 {
100 	int ret;
101 	size_t acc_size;
102 
103 	gbo->bo.base.funcs = &drm_gem_vram_object_funcs;
104 
105 	ret = drm_gem_object_init(dev, &gbo->bo.base, size);
106 	if (ret)
107 		return ret;
108 
109 	acc_size = ttm_bo_dma_acc_size(bdev, size, sizeof(*gbo));
110 
111 	gbo->bo.bdev = bdev;
112 	drm_gem_vram_placement(gbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
113 
114 	ret = ttm_bo_init(bdev, &gbo->bo, size, ttm_bo_type_device,
115 			  &gbo->placement, pg_align, interruptible, acc_size,
116 			  NULL, NULL, ttm_buffer_object_destroy);
117 	if (ret)
118 		goto err_drm_gem_object_release;
119 
120 	return 0;
121 
122 err_drm_gem_object_release:
123 	drm_gem_object_release(&gbo->bo.base);
124 	return ret;
125 }
126 
127 /**
128  * drm_gem_vram_create() - Creates a VRAM-backed GEM object
129  * @dev:		the DRM device
130  * @bdev:		the TTM BO device backing the object
131  * @size:		the buffer size in bytes
132  * @pg_align:		the buffer's alignment in multiples of the page size
133  * @interruptible:	sleep interruptible if waiting for memory
134  *
135  * Returns:
136  * A new instance of &struct drm_gem_vram_object on success, or
137  * an ERR_PTR()-encoded error code otherwise.
138  */
139 struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev,
140 						struct ttm_bo_device *bdev,
141 						size_t size,
142 						unsigned long pg_align,
143 						bool interruptible)
144 {
145 	struct drm_gem_vram_object *gbo;
146 	int ret;
147 
148 	gbo = kzalloc(sizeof(*gbo), GFP_KERNEL);
149 	if (!gbo)
150 		return ERR_PTR(-ENOMEM);
151 
152 	ret = drm_gem_vram_init(dev, bdev, gbo, size, pg_align, interruptible);
153 	if (ret < 0)
154 		goto err_kfree;
155 
156 	return gbo;
157 
158 err_kfree:
159 	kfree(gbo);
160 	return ERR_PTR(ret);
161 }
162 EXPORT_SYMBOL(drm_gem_vram_create);
163 
164 /**
165  * drm_gem_vram_put() - Releases a reference to a VRAM-backed GEM object
166  * @gbo:	the GEM VRAM object
167  *
168  * See ttm_bo_put() for more information.
169  */
170 void drm_gem_vram_put(struct drm_gem_vram_object *gbo)
171 {
172 	ttm_bo_put(&gbo->bo);
173 }
174 EXPORT_SYMBOL(drm_gem_vram_put);
175 
176 /**
177  * drm_gem_vram_mmap_offset() - Returns a GEM VRAM object's mmap offset
178  * @gbo:	the GEM VRAM object
179  *
180  * See drm_vma_node_offset_addr() for more information.
181  *
182  * Returns:
183  * The buffer object's offset for userspace mappings on success, or
184  * 0 if no offset is allocated.
185  */
186 u64 drm_gem_vram_mmap_offset(struct drm_gem_vram_object *gbo)
187 {
188 	return drm_vma_node_offset_addr(&gbo->bo.base.vma_node);
189 }
190 EXPORT_SYMBOL(drm_gem_vram_mmap_offset);
191 
192 /**
193  * drm_gem_vram_offset() - \
194 	Returns a GEM VRAM object's offset in video memory
195  * @gbo:	the GEM VRAM object
196  *
197  * This function returns the buffer object's offset in the device's video
198  * memory. The buffer object has to be pinned to %TTM_PL_VRAM.
199  *
200  * Returns:
201  * The buffer object's offset in video memory on success, or
202  * a negative errno code otherwise.
203  */
204 s64 drm_gem_vram_offset(struct drm_gem_vram_object *gbo)
205 {
206 	if (WARN_ON_ONCE(!gbo->pin_count))
207 		return (s64)-ENODEV;
208 	return gbo->bo.offset;
209 }
210 EXPORT_SYMBOL(drm_gem_vram_offset);
211 
212 static int drm_gem_vram_pin_locked(struct drm_gem_vram_object *gbo,
213 				   unsigned long pl_flag)
214 {
215 	int i, ret;
216 	struct ttm_operation_ctx ctx = { false, false };
217 
218 	if (gbo->pin_count)
219 		goto out;
220 
221 	if (pl_flag)
222 		drm_gem_vram_placement(gbo, pl_flag);
223 
224 	for (i = 0; i < gbo->placement.num_placement; ++i)
225 		gbo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
226 
227 	ret = ttm_bo_validate(&gbo->bo, &gbo->placement, &ctx);
228 	if (ret < 0)
229 		return ret;
230 
231 out:
232 	++gbo->pin_count;
233 
234 	return 0;
235 }
236 
237 /**
238  * drm_gem_vram_pin() - Pins a GEM VRAM object in a region.
239  * @gbo:	the GEM VRAM object
240  * @pl_flag:	a bitmask of possible memory regions
241  *
242  * Pinning a buffer object ensures that it is not evicted from
243  * a memory region. A pinned buffer object has to be unpinned before
244  * it can be pinned to another region. If the pl_flag argument is 0,
245  * the buffer is pinned at its current location (video RAM or system
246  * memory).
247  *
248  * Small buffer objects, such as cursor images, can lead to memory
249  * fragmentation if they are pinned in the middle of video RAM. This
250  * is especially a problem on devices with only a small amount of
251  * video RAM. Fragmentation can prevent the primary framebuffer from
252  * fitting in, even though there's enough memory overall. The modifier
253  * DRM_GEM_VRAM_PL_FLAG_TOPDOWN marks the buffer object to be pinned
254  * at the high end of the memory region to avoid fragmentation.
255  *
256  * Returns:
257  * 0 on success, or
258  * a negative error code otherwise.
259  */
260 int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag)
261 {
262 	int ret;
263 
264 	ret = ttm_bo_reserve(&gbo->bo, true, false, NULL);
265 	if (ret)
266 		return ret;
267 	ret = drm_gem_vram_pin_locked(gbo, pl_flag);
268 	ttm_bo_unreserve(&gbo->bo);
269 
270 	return ret;
271 }
272 EXPORT_SYMBOL(drm_gem_vram_pin);
273 
274 static int drm_gem_vram_unpin_locked(struct drm_gem_vram_object *gbo)
275 {
276 	int i, ret;
277 	struct ttm_operation_ctx ctx = { false, false };
278 
279 	if (WARN_ON_ONCE(!gbo->pin_count))
280 		return 0;
281 
282 	--gbo->pin_count;
283 	if (gbo->pin_count)
284 		return 0;
285 
286 	for (i = 0; i < gbo->placement.num_placement ; ++i)
287 		gbo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
288 
289 	ret = ttm_bo_validate(&gbo->bo, &gbo->placement, &ctx);
290 	if (ret < 0)
291 		return ret;
292 
293 	return 0;
294 }
295 
296 /**
297  * drm_gem_vram_unpin() - Unpins a GEM VRAM object
298  * @gbo:	the GEM VRAM object
299  *
300  * Returns:
301  * 0 on success, or
302  * a negative error code otherwise.
303  */
304 int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo)
305 {
306 	int ret;
307 
308 	ret = ttm_bo_reserve(&gbo->bo, true, false, NULL);
309 	if (ret)
310 		return ret;
311 	ret = drm_gem_vram_unpin_locked(gbo);
312 	ttm_bo_unreserve(&gbo->bo);
313 
314 	return ret;
315 }
316 EXPORT_SYMBOL(drm_gem_vram_unpin);
317 
318 static void *drm_gem_vram_kmap_locked(struct drm_gem_vram_object *gbo,
319 				      bool map, bool *is_iomem)
320 {
321 	int ret;
322 	struct ttm_bo_kmap_obj *kmap = &gbo->kmap;
323 
324 	if (gbo->kmap_use_count > 0)
325 		goto out;
326 
327 	if (kmap->virtual || !map)
328 		goto out;
329 
330 	ret = ttm_bo_kmap(&gbo->bo, 0, gbo->bo.num_pages, kmap);
331 	if (ret)
332 		return ERR_PTR(ret);
333 
334 out:
335 	if (!kmap->virtual) {
336 		if (is_iomem)
337 			*is_iomem = false;
338 		return NULL; /* not mapped; don't increment ref */
339 	}
340 	++gbo->kmap_use_count;
341 	if (is_iomem)
342 		return ttm_kmap_obj_virtual(kmap, is_iomem);
343 	return kmap->virtual;
344 }
345 
346 /**
347  * drm_gem_vram_kmap() - Maps a GEM VRAM object into kernel address space
348  * @gbo:	the GEM VRAM object
349  * @map:	establish a mapping if necessary
350  * @is_iomem:	returns true if the mapped memory is I/O memory, or false \
351 	otherwise; can be NULL
352  *
353  * This function maps the buffer object into the kernel's address space
354  * or returns the current mapping. If the parameter map is false, the
355  * function only queries the current mapping, but does not establish a
356  * new one.
357  *
358  * Returns:
359  * The buffers virtual address if mapped, or
360  * NULL if not mapped, or
361  * an ERR_PTR()-encoded error code otherwise.
362  */
363 void *drm_gem_vram_kmap(struct drm_gem_vram_object *gbo, bool map,
364 			bool *is_iomem)
365 {
366 	int ret;
367 	void *virtual;
368 
369 	ret = ttm_bo_reserve(&gbo->bo, true, false, NULL);
370 	if (ret)
371 		return ERR_PTR(ret);
372 	virtual = drm_gem_vram_kmap_locked(gbo, map, is_iomem);
373 	ttm_bo_unreserve(&gbo->bo);
374 
375 	return virtual;
376 }
377 EXPORT_SYMBOL(drm_gem_vram_kmap);
378 
379 static void drm_gem_vram_kunmap_locked(struct drm_gem_vram_object *gbo)
380 {
381 	if (WARN_ON_ONCE(!gbo->kmap_use_count))
382 		return;
383 	if (--gbo->kmap_use_count > 0)
384 		return;
385 
386 	/*
387 	 * Permanently mapping and unmapping buffers adds overhead from
388 	 * updating the page tables and creates debugging output. Therefore,
389 	 * we delay the actual unmap operation until the BO gets evicted
390 	 * from memory. See drm_gem_vram_bo_driver_move_notify().
391 	 */
392 }
393 
394 /**
395  * drm_gem_vram_kunmap() - Unmaps a GEM VRAM object
396  * @gbo:	the GEM VRAM object
397  */
398 void drm_gem_vram_kunmap(struct drm_gem_vram_object *gbo)
399 {
400 	int ret;
401 
402 	ret = ttm_bo_reserve(&gbo->bo, false, false, NULL);
403 	if (WARN_ONCE(ret, "ttm_bo_reserve_failed(): ret=%d\n", ret))
404 		return;
405 	drm_gem_vram_kunmap_locked(gbo);
406 	ttm_bo_unreserve(&gbo->bo);
407 }
408 EXPORT_SYMBOL(drm_gem_vram_kunmap);
409 
410 /**
411  * drm_gem_vram_vmap() - Pins and maps a GEM VRAM object into kernel address
412  *                       space
413  * @gbo:	The GEM VRAM object to map
414  *
415  * The vmap function pins a GEM VRAM object to its current location, either
416  * system or video memory, and maps its buffer into kernel address space.
417  * As pinned object cannot be relocated, you should avoid pinning objects
418  * permanently. Call drm_gem_vram_vunmap() with the returned address to
419  * unmap and unpin the GEM VRAM object.
420  *
421  * If you have special requirements for the pinning or mapping operations,
422  * call drm_gem_vram_pin() and drm_gem_vram_kmap() directly.
423  *
424  * Returns:
425  * The buffer's virtual address on success, or
426  * an ERR_PTR()-encoded error code otherwise.
427  */
428 void *drm_gem_vram_vmap(struct drm_gem_vram_object *gbo)
429 {
430 	int ret;
431 	void *base;
432 
433 	ret = ttm_bo_reserve(&gbo->bo, true, false, NULL);
434 	if (ret)
435 		return ERR_PTR(ret);
436 
437 	ret = drm_gem_vram_pin_locked(gbo, 0);
438 	if (ret)
439 		goto err_ttm_bo_unreserve;
440 	base = drm_gem_vram_kmap_locked(gbo, true, NULL);
441 	if (IS_ERR(base)) {
442 		ret = PTR_ERR(base);
443 		goto err_drm_gem_vram_unpin_locked;
444 	}
445 
446 	ttm_bo_unreserve(&gbo->bo);
447 
448 	return base;
449 
450 err_drm_gem_vram_unpin_locked:
451 	drm_gem_vram_unpin_locked(gbo);
452 err_ttm_bo_unreserve:
453 	ttm_bo_unreserve(&gbo->bo);
454 	return ERR_PTR(ret);
455 }
456 EXPORT_SYMBOL(drm_gem_vram_vmap);
457 
458 /**
459  * drm_gem_vram_vunmap() - Unmaps and unpins a GEM VRAM object
460  * @gbo:	The GEM VRAM object to unmap
461  * @vaddr:	The mapping's base address as returned by drm_gem_vram_vmap()
462  *
463  * A call to drm_gem_vram_vunmap() unmaps and unpins a GEM VRAM buffer. See
464  * the documentation for drm_gem_vram_vmap() for more information.
465  */
466 void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo, void *vaddr)
467 {
468 	int ret;
469 
470 	ret = ttm_bo_reserve(&gbo->bo, false, false, NULL);
471 	if (WARN_ONCE(ret, "ttm_bo_reserve_failed(): ret=%d\n", ret))
472 		return;
473 
474 	drm_gem_vram_kunmap_locked(gbo);
475 	drm_gem_vram_unpin_locked(gbo);
476 
477 	ttm_bo_unreserve(&gbo->bo);
478 }
479 EXPORT_SYMBOL(drm_gem_vram_vunmap);
480 
481 /**
482  * drm_gem_vram_fill_create_dumb() - \
483 	Helper for implementing &struct drm_driver.dumb_create
484  * @file:		the DRM file
485  * @dev:		the DRM device
486  * @bdev:		the TTM BO device managing the buffer object
487  * @pg_align:		the buffer's alignment in multiples of the page size
488  * @interruptible:	sleep interruptible if waiting for memory
489  * @args:		the arguments as provided to \
490 				&struct drm_driver.dumb_create
491  *
492  * This helper function fills &struct drm_mode_create_dumb, which is used
493  * by &struct drm_driver.dumb_create. Implementations of this interface
494  * should forwards their arguments to this helper, plus the driver-specific
495  * parameters.
496  *
497  * Returns:
498  * 0 on success, or
499  * a negative error code otherwise.
500  */
501 int drm_gem_vram_fill_create_dumb(struct drm_file *file,
502 				  struct drm_device *dev,
503 				  struct ttm_bo_device *bdev,
504 				  unsigned long pg_align,
505 				  bool interruptible,
506 				  struct drm_mode_create_dumb *args)
507 {
508 	size_t pitch, size;
509 	struct drm_gem_vram_object *gbo;
510 	int ret;
511 	u32 handle;
512 
513 	pitch = args->width * ((args->bpp + 7) / 8);
514 	size = pitch * args->height;
515 
516 	size = roundup(size, PAGE_SIZE);
517 	if (!size)
518 		return -EINVAL;
519 
520 	gbo = drm_gem_vram_create(dev, bdev, size, pg_align, interruptible);
521 	if (IS_ERR(gbo))
522 		return PTR_ERR(gbo);
523 
524 	ret = drm_gem_handle_create(file, &gbo->bo.base, &handle);
525 	if (ret)
526 		goto err_drm_gem_object_put_unlocked;
527 
528 	drm_gem_object_put_unlocked(&gbo->bo.base);
529 
530 	args->pitch = pitch;
531 	args->size = size;
532 	args->handle = handle;
533 
534 	return 0;
535 
536 err_drm_gem_object_put_unlocked:
537 	drm_gem_object_put_unlocked(&gbo->bo.base);
538 	return ret;
539 }
540 EXPORT_SYMBOL(drm_gem_vram_fill_create_dumb);
541 
542 /*
543  * Helpers for struct ttm_bo_driver
544  */
545 
546 static bool drm_is_gem_vram(struct ttm_buffer_object *bo)
547 {
548 	return (bo->destroy == ttm_buffer_object_destroy);
549 }
550 
551 static void drm_gem_vram_bo_driver_evict_flags(struct drm_gem_vram_object *gbo,
552 					       struct ttm_placement *pl)
553 {
554 	drm_gem_vram_placement(gbo, TTM_PL_FLAG_SYSTEM);
555 	*pl = gbo->placement;
556 }
557 
558 static void drm_gem_vram_bo_driver_move_notify(struct drm_gem_vram_object *gbo,
559 					       bool evict,
560 					       struct ttm_mem_reg *new_mem)
561 {
562 	struct ttm_bo_kmap_obj *kmap = &gbo->kmap;
563 
564 	if (WARN_ON_ONCE(gbo->kmap_use_count))
565 		return;
566 
567 	if (!kmap->virtual)
568 		return;
569 	ttm_bo_kunmap(kmap);
570 	kmap->virtual = NULL;
571 }
572 
573 /*
574  * Helpers for struct drm_gem_object_funcs
575  */
576 
577 /**
578  * drm_gem_vram_object_free() - \
579 	Implements &struct drm_gem_object_funcs.free
580  * @gem:       GEM object. Refers to &struct drm_gem_vram_object.gem
581  */
582 static void drm_gem_vram_object_free(struct drm_gem_object *gem)
583 {
584 	struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
585 
586 	drm_gem_vram_put(gbo);
587 }
588 
589 /*
590  * Helpers for dump buffers
591  */
592 
593 /**
594  * drm_gem_vram_driver_create_dumb() - \
595 	Implements &struct drm_driver.dumb_create
596  * @file:		the DRM file
597  * @dev:		the DRM device
598  * @args:		the arguments as provided to \
599 				&struct drm_driver.dumb_create
600  *
601  * This function requires the driver to use @drm_device.vram_mm for its
602  * instance of VRAM MM.
603  *
604  * Returns:
605  * 0 on success, or
606  * a negative error code otherwise.
607  */
608 int drm_gem_vram_driver_dumb_create(struct drm_file *file,
609 				    struct drm_device *dev,
610 				    struct drm_mode_create_dumb *args)
611 {
612 	if (WARN_ONCE(!dev->vram_mm, "VRAM MM not initialized"))
613 		return -EINVAL;
614 
615 	return drm_gem_vram_fill_create_dumb(file, dev, &dev->vram_mm->bdev, 0,
616 					     false, args);
617 }
618 EXPORT_SYMBOL(drm_gem_vram_driver_dumb_create);
619 
620 /**
621  * drm_gem_vram_driver_dumb_mmap_offset() - \
622 	Implements &struct drm_driver.dumb_mmap_offset
623  * @file:	DRM file pointer.
624  * @dev:	DRM device.
625  * @handle:	GEM handle
626  * @offset:	Returns the mapping's memory offset on success
627  *
628  * Returns:
629  * 0 on success, or
630  * a negative errno code otherwise.
631  */
632 int drm_gem_vram_driver_dumb_mmap_offset(struct drm_file *file,
633 					 struct drm_device *dev,
634 					 uint32_t handle, uint64_t *offset)
635 {
636 	struct drm_gem_object *gem;
637 	struct drm_gem_vram_object *gbo;
638 
639 	gem = drm_gem_object_lookup(file, handle);
640 	if (!gem)
641 		return -ENOENT;
642 
643 	gbo = drm_gem_vram_of_gem(gem);
644 	*offset = drm_gem_vram_mmap_offset(gbo);
645 
646 	drm_gem_object_put_unlocked(gem);
647 
648 	return 0;
649 }
650 EXPORT_SYMBOL(drm_gem_vram_driver_dumb_mmap_offset);
651 
652 /*
653  * Helpers for struct drm_plane_helper_funcs
654  */
655 
656 /**
657  * drm_gem_vram_plane_helper_prepare_fb() - \
658  *	Implements &struct drm_plane_helper_funcs.prepare_fb
659  * @plane:	a DRM plane
660  * @new_state:	the plane's new state
661  *
662  * During plane updates, this function pins the GEM VRAM
663  * objects of the plane's new framebuffer to VRAM. Call
664  * drm_gem_vram_plane_helper_cleanup_fb() to unpin them.
665  *
666  * Returns:
667  *	0 on success, or
668  *	a negative errno code otherwise.
669  */
670 int
671 drm_gem_vram_plane_helper_prepare_fb(struct drm_plane *plane,
672 				     struct drm_plane_state *new_state)
673 {
674 	size_t i;
675 	struct drm_gem_vram_object *gbo;
676 	int ret;
677 
678 	if (!new_state->fb)
679 		return 0;
680 
681 	for (i = 0; i < ARRAY_SIZE(new_state->fb->obj); ++i) {
682 		if (!new_state->fb->obj[i])
683 			continue;
684 		gbo = drm_gem_vram_of_gem(new_state->fb->obj[i]);
685 		ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
686 		if (ret)
687 			goto err_drm_gem_vram_unpin;
688 	}
689 
690 	return 0;
691 
692 err_drm_gem_vram_unpin:
693 	while (i) {
694 		--i;
695 		gbo = drm_gem_vram_of_gem(new_state->fb->obj[i]);
696 		drm_gem_vram_unpin(gbo);
697 	}
698 	return ret;
699 }
700 EXPORT_SYMBOL(drm_gem_vram_plane_helper_prepare_fb);
701 
702 /**
703  * drm_gem_vram_plane_helper_cleanup_fb() - \
704  *	Implements &struct drm_plane_helper_funcs.cleanup_fb
705  * @plane:	a DRM plane
706  * @old_state:	the plane's old state
707  *
708  * During plane updates, this function unpins the GEM VRAM
709  * objects of the plane's old framebuffer from VRAM. Complements
710  * drm_gem_vram_plane_helper_prepare_fb().
711  */
712 void
713 drm_gem_vram_plane_helper_cleanup_fb(struct drm_plane *plane,
714 				     struct drm_plane_state *old_state)
715 {
716 	size_t i;
717 	struct drm_gem_vram_object *gbo;
718 
719 	if (!old_state->fb)
720 		return;
721 
722 	for (i = 0; i < ARRAY_SIZE(old_state->fb->obj); ++i) {
723 		if (!old_state->fb->obj[i])
724 			continue;
725 		gbo = drm_gem_vram_of_gem(old_state->fb->obj[i]);
726 		drm_gem_vram_unpin(gbo);
727 	}
728 }
729 EXPORT_SYMBOL(drm_gem_vram_plane_helper_cleanup_fb);
730 
731 /*
732  * Helpers for struct drm_simple_display_pipe_funcs
733  */
734 
735 /**
736  * drm_gem_vram_simple_display_pipe_prepare_fb() - \
737  *	Implements &struct drm_simple_display_pipe_funcs.prepare_fb
738  * @pipe:	a simple display pipe
739  * @new_state:	the plane's new state
740  *
741  * During plane updates, this function pins the GEM VRAM
742  * objects of the plane's new framebuffer to VRAM. Call
743  * drm_gem_vram_simple_display_pipe_cleanup_fb() to unpin them.
744  *
745  * Returns:
746  *	0 on success, or
747  *	a negative errno code otherwise.
748  */
749 int drm_gem_vram_simple_display_pipe_prepare_fb(
750 	struct drm_simple_display_pipe *pipe,
751 	struct drm_plane_state *new_state)
752 {
753 	return drm_gem_vram_plane_helper_prepare_fb(&pipe->plane, new_state);
754 }
755 EXPORT_SYMBOL(drm_gem_vram_simple_display_pipe_prepare_fb);
756 
757 /**
758  * drm_gem_vram_simple_display_pipe_cleanup_fb() - \
759  *	Implements &struct drm_simple_display_pipe_funcs.cleanup_fb
760  * @pipe:	a simple display pipe
761  * @old_state:	the plane's old state
762  *
763  * During plane updates, this function unpins the GEM VRAM
764  * objects of the plane's old framebuffer from VRAM. Complements
765  * drm_gem_vram_simple_display_pipe_prepare_fb().
766  */
767 void drm_gem_vram_simple_display_pipe_cleanup_fb(
768 	struct drm_simple_display_pipe *pipe,
769 	struct drm_plane_state *old_state)
770 {
771 	drm_gem_vram_plane_helper_cleanup_fb(&pipe->plane, old_state);
772 }
773 EXPORT_SYMBOL(drm_gem_vram_simple_display_pipe_cleanup_fb);
774 
775 /*
776  * PRIME helpers
777  */
778 
779 /**
780  * drm_gem_vram_object_pin() - \
781 	Implements &struct drm_gem_object_funcs.pin
782  * @gem:	The GEM object to pin
783  *
784  * Returns:
785  * 0 on success, or
786  * a negative errno code otherwise.
787  */
788 static int drm_gem_vram_object_pin(struct drm_gem_object *gem)
789 {
790 	struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
791 
792 	/* Fbdev console emulation is the use case of these PRIME
793 	 * helpers. This may involve updating a hardware buffer from
794 	 * a shadow FB. We pin the buffer to it's current location
795 	 * (either video RAM or system memory) to prevent it from
796 	 * being relocated during the update operation. If you require
797 	 * the buffer to be pinned to VRAM, implement a callback that
798 	 * sets the flags accordingly.
799 	 */
800 	return drm_gem_vram_pin(gbo, 0);
801 }
802 
803 /**
804  * drm_gem_vram_object_unpin() - \
805 	Implements &struct drm_gem_object_funcs.unpin
806  * @gem:	The GEM object to unpin
807  */
808 static void drm_gem_vram_object_unpin(struct drm_gem_object *gem)
809 {
810 	struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
811 
812 	drm_gem_vram_unpin(gbo);
813 }
814 
815 /**
816  * drm_gem_vram_object_vmap() - \
817 	Implements &struct drm_gem_object_funcs.vmap
818  * @gem:	The GEM object to map
819  *
820  * Returns:
821  * The buffers virtual address on success, or
822  * NULL otherwise.
823  */
824 static void *drm_gem_vram_object_vmap(struct drm_gem_object *gem)
825 {
826 	struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
827 	void *base;
828 
829 	base = drm_gem_vram_vmap(gbo);
830 	if (IS_ERR(base))
831 		return NULL;
832 	return base;
833 }
834 
835 /**
836  * drm_gem_vram_object_vunmap() - \
837 	Implements &struct drm_gem_object_funcs.vunmap
838  * @gem:	The GEM object to unmap
839  * @vaddr:	The mapping's base address
840  */
841 static void drm_gem_vram_object_vunmap(struct drm_gem_object *gem,
842 				       void *vaddr)
843 {
844 	struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
845 
846 	drm_gem_vram_vunmap(gbo, vaddr);
847 }
848 
849 /*
850  * GEM object funcs
851  */
852 
853 static const struct drm_gem_object_funcs drm_gem_vram_object_funcs = {
854 	.free	= drm_gem_vram_object_free,
855 	.pin	= drm_gem_vram_object_pin,
856 	.unpin	= drm_gem_vram_object_unpin,
857 	.vmap	= drm_gem_vram_object_vmap,
858 	.vunmap	= drm_gem_vram_object_vunmap,
859 	.mmap   = drm_gem_ttm_mmap,
860 	.print_info = drm_gem_ttm_print_info,
861 };
862 
863 /*
864  * VRAM memory manager
865  */
866 
867 /*
868  * TTM TT
869  */
870 
871 static void backend_func_destroy(struct ttm_tt *tt)
872 {
873 	ttm_tt_fini(tt);
874 	kfree(tt);
875 }
876 
877 static struct ttm_backend_func backend_func = {
878 	.destroy = backend_func_destroy
879 };
880 
881 /*
882  * TTM BO device
883  */
884 
885 static struct ttm_tt *bo_driver_ttm_tt_create(struct ttm_buffer_object *bo,
886 					      uint32_t page_flags)
887 {
888 	struct ttm_tt *tt;
889 	int ret;
890 
891 	tt = kzalloc(sizeof(*tt), GFP_KERNEL);
892 	if (!tt)
893 		return NULL;
894 
895 	tt->func = &backend_func;
896 
897 	ret = ttm_tt_init(tt, bo, page_flags);
898 	if (ret < 0)
899 		goto err_ttm_tt_init;
900 
901 	return tt;
902 
903 err_ttm_tt_init:
904 	kfree(tt);
905 	return NULL;
906 }
907 
908 static int bo_driver_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
909 				   struct ttm_mem_type_manager *man)
910 {
911 	switch (type) {
912 	case TTM_PL_SYSTEM:
913 		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
914 		man->available_caching = TTM_PL_MASK_CACHING;
915 		man->default_caching = TTM_PL_FLAG_CACHED;
916 		break;
917 	case TTM_PL_VRAM:
918 		man->func = &ttm_bo_manager_func;
919 		man->flags = TTM_MEMTYPE_FLAG_FIXED |
920 			     TTM_MEMTYPE_FLAG_MAPPABLE;
921 		man->available_caching = TTM_PL_FLAG_UNCACHED |
922 					 TTM_PL_FLAG_WC;
923 		man->default_caching = TTM_PL_FLAG_WC;
924 		break;
925 	default:
926 		return -EINVAL;
927 	}
928 	return 0;
929 }
930 
931 static void bo_driver_evict_flags(struct ttm_buffer_object *bo,
932 				  struct ttm_placement *placement)
933 {
934 	struct drm_gem_vram_object *gbo;
935 
936 	/* TTM may pass BOs that are not GEM VRAM BOs. */
937 	if (!drm_is_gem_vram(bo))
938 		return;
939 
940 	gbo = drm_gem_vram_of_bo(bo);
941 
942 	drm_gem_vram_bo_driver_evict_flags(gbo, placement);
943 }
944 
945 static void bo_driver_move_notify(struct ttm_buffer_object *bo,
946 				  bool evict,
947 				  struct ttm_mem_reg *new_mem)
948 {
949 	struct drm_gem_vram_object *gbo;
950 
951 	/* TTM may pass BOs that are not GEM VRAM BOs. */
952 	if (!drm_is_gem_vram(bo))
953 		return;
954 
955 	gbo = drm_gem_vram_of_bo(bo);
956 
957 	drm_gem_vram_bo_driver_move_notify(gbo, evict, new_mem);
958 }
959 
960 static int bo_driver_io_mem_reserve(struct ttm_bo_device *bdev,
961 				    struct ttm_mem_reg *mem)
962 {
963 	struct ttm_mem_type_manager *man = bdev->man + mem->mem_type;
964 	struct drm_vram_mm *vmm = drm_vram_mm_of_bdev(bdev);
965 
966 	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
967 		return -EINVAL;
968 
969 	mem->bus.addr = NULL;
970 	mem->bus.size = mem->num_pages << PAGE_SHIFT;
971 
972 	switch (mem->mem_type) {
973 	case TTM_PL_SYSTEM:	/* nothing to do */
974 		mem->bus.offset = 0;
975 		mem->bus.base = 0;
976 		mem->bus.is_iomem = false;
977 		break;
978 	case TTM_PL_VRAM:
979 		mem->bus.offset = mem->start << PAGE_SHIFT;
980 		mem->bus.base = vmm->vram_base;
981 		mem->bus.is_iomem = true;
982 		break;
983 	default:
984 		return -EINVAL;
985 	}
986 
987 	return 0;
988 }
989 
990 static void bo_driver_io_mem_free(struct ttm_bo_device *bdev,
991 				  struct ttm_mem_reg *mem)
992 { }
993 
994 static struct ttm_bo_driver bo_driver = {
995 	.ttm_tt_create = bo_driver_ttm_tt_create,
996 	.ttm_tt_populate = ttm_pool_populate,
997 	.ttm_tt_unpopulate = ttm_pool_unpopulate,
998 	.init_mem_type = bo_driver_init_mem_type,
999 	.eviction_valuable = ttm_bo_eviction_valuable,
1000 	.evict_flags = bo_driver_evict_flags,
1001 	.move_notify = bo_driver_move_notify,
1002 	.io_mem_reserve = bo_driver_io_mem_reserve,
1003 	.io_mem_free = bo_driver_io_mem_free,
1004 };
1005 
1006 /*
1007  * struct drm_vram_mm
1008  */
1009 
1010 #if defined(CONFIG_DEBUG_FS)
1011 static int drm_vram_mm_debugfs(struct seq_file *m, void *data)
1012 {
1013 	struct drm_info_node *node = (struct drm_info_node *) m->private;
1014 	struct drm_vram_mm *vmm = node->minor->dev->vram_mm;
1015 	struct drm_mm *mm = vmm->bdev.man[TTM_PL_VRAM].priv;
1016 	struct drm_printer p = drm_seq_file_printer(m);
1017 
1018 	spin_lock(&ttm_bo_glob.lru_lock);
1019 	drm_mm_print(mm, &p);
1020 	spin_unlock(&ttm_bo_glob.lru_lock);
1021 	return 0;
1022 }
1023 
1024 static const struct drm_info_list drm_vram_mm_debugfs_list[] = {
1025 	{ "vram-mm", drm_vram_mm_debugfs, 0, NULL },
1026 };
1027 #endif
1028 
1029 /**
1030  * drm_vram_mm_debugfs_init() - Register VRAM MM debugfs file.
1031  *
1032  * @minor: drm minor device.
1033  *
1034  * Returns:
1035  * 0 on success, or
1036  * a negative error code otherwise.
1037  */
1038 int drm_vram_mm_debugfs_init(struct drm_minor *minor)
1039 {
1040 	int ret = 0;
1041 
1042 #if defined(CONFIG_DEBUG_FS)
1043 	ret = drm_debugfs_create_files(drm_vram_mm_debugfs_list,
1044 				       ARRAY_SIZE(drm_vram_mm_debugfs_list),
1045 				       minor->debugfs_root, minor);
1046 #endif
1047 	return ret;
1048 }
1049 EXPORT_SYMBOL(drm_vram_mm_debugfs_init);
1050 
1051 static int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev,
1052 			    uint64_t vram_base, size_t vram_size)
1053 {
1054 	int ret;
1055 
1056 	vmm->vram_base = vram_base;
1057 	vmm->vram_size = vram_size;
1058 
1059 	ret = ttm_bo_device_init(&vmm->bdev, &bo_driver,
1060 				 dev->anon_inode->i_mapping,
1061 				 dev->vma_offset_manager,
1062 				 true);
1063 	if (ret)
1064 		return ret;
1065 
1066 	ret = ttm_bo_init_mm(&vmm->bdev, TTM_PL_VRAM, vram_size >> PAGE_SHIFT);
1067 	if (ret)
1068 		return ret;
1069 
1070 	return 0;
1071 }
1072 
1073 static void drm_vram_mm_cleanup(struct drm_vram_mm *vmm)
1074 {
1075 	ttm_bo_device_release(&vmm->bdev);
1076 }
1077 
1078 /*
1079  * Helpers for integration with struct drm_device
1080  */
1081 
1082 /**
1083  * drm_vram_helper_alloc_mm - Allocates a device's instance of \
1084 	&struct drm_vram_mm
1085  * @dev:	the DRM device
1086  * @vram_base:	the base address of the video memory
1087  * @vram_size:	the size of the video memory in bytes
1088  *
1089  * Returns:
1090  * The new instance of &struct drm_vram_mm on success, or
1091  * an ERR_PTR()-encoded errno code otherwise.
1092  */
1093 struct drm_vram_mm *drm_vram_helper_alloc_mm(
1094 	struct drm_device *dev, uint64_t vram_base, size_t vram_size)
1095 {
1096 	int ret;
1097 
1098 	if (WARN_ON(dev->vram_mm))
1099 		return dev->vram_mm;
1100 
1101 	dev->vram_mm = kzalloc(sizeof(*dev->vram_mm), GFP_KERNEL);
1102 	if (!dev->vram_mm)
1103 		return ERR_PTR(-ENOMEM);
1104 
1105 	ret = drm_vram_mm_init(dev->vram_mm, dev, vram_base, vram_size);
1106 	if (ret)
1107 		goto err_kfree;
1108 
1109 	return dev->vram_mm;
1110 
1111 err_kfree:
1112 	kfree(dev->vram_mm);
1113 	dev->vram_mm = NULL;
1114 	return ERR_PTR(ret);
1115 }
1116 EXPORT_SYMBOL(drm_vram_helper_alloc_mm);
1117 
1118 /**
1119  * drm_vram_helper_release_mm - Releases a device's instance of \
1120 	&struct drm_vram_mm
1121  * @dev:	the DRM device
1122  */
1123 void drm_vram_helper_release_mm(struct drm_device *dev)
1124 {
1125 	if (!dev->vram_mm)
1126 		return;
1127 
1128 	drm_vram_mm_cleanup(dev->vram_mm);
1129 	kfree(dev->vram_mm);
1130 	dev->vram_mm = NULL;
1131 }
1132 EXPORT_SYMBOL(drm_vram_helper_release_mm);
1133