1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 #include <drm/drm_debugfs.h>
4 #include <drm/drm_device.h>
5 #include <drm/drm_file.h>
6 #include <drm/drm_gem_ttm_helper.h>
7 #include <drm/drm_gem_vram_helper.h>
8 #include <drm/drm_mode.h>
9 #include <drm/drm_prime.h>
10 #include <drm/ttm/ttm_page_alloc.h>
11 
12 static const struct drm_gem_object_funcs drm_gem_vram_object_funcs;
13 
14 /**
15  * DOC: overview
16  *
17  * This library provides a GEM buffer object that is backed by video RAM
18  * (VRAM). It can be used for framebuffer devices with dedicated memory.
19  *
20  * The data structure &struct drm_vram_mm and its helpers implement a memory
21  * manager for simple framebuffer devices with dedicated video memory. Buffer
22  * objects are either placed in video RAM or evicted to system memory. The rsp.
23  * buffer object is provided by &struct drm_gem_vram_object.
24  */
25 
26 /*
27  * Buffer-objects helpers
28  */
29 
30 static void drm_gem_vram_cleanup(struct drm_gem_vram_object *gbo)
31 {
32 	/* We got here via ttm_bo_put(), which means that the
33 	 * TTM buffer object in 'bo' has already been cleaned
34 	 * up; only release the GEM object.
35 	 */
36 
37 	WARN_ON(gbo->kmap_use_count);
38 	WARN_ON(gbo->kmap.virtual);
39 
40 	drm_gem_object_release(&gbo->bo.base);
41 }
42 
43 static void drm_gem_vram_destroy(struct drm_gem_vram_object *gbo)
44 {
45 	drm_gem_vram_cleanup(gbo);
46 	kfree(gbo);
47 }
48 
49 static void ttm_buffer_object_destroy(struct ttm_buffer_object *bo)
50 {
51 	struct drm_gem_vram_object *gbo = drm_gem_vram_of_bo(bo);
52 
53 	drm_gem_vram_destroy(gbo);
54 }
55 
56 static void drm_gem_vram_placement(struct drm_gem_vram_object *gbo,
57 				   unsigned long pl_flag)
58 {
59 	unsigned int i;
60 	unsigned int c = 0;
61 	u32 invariant_flags = pl_flag & TTM_PL_FLAG_TOPDOWN;
62 
63 	gbo->placement.placement = gbo->placements;
64 	gbo->placement.busy_placement = gbo->placements;
65 
66 	if (pl_flag & TTM_PL_FLAG_VRAM)
67 		gbo->placements[c++].flags = TTM_PL_FLAG_WC |
68 					     TTM_PL_FLAG_UNCACHED |
69 					     TTM_PL_FLAG_VRAM |
70 					     invariant_flags;
71 
72 	if (pl_flag & TTM_PL_FLAG_SYSTEM)
73 		gbo->placements[c++].flags = TTM_PL_MASK_CACHING |
74 					     TTM_PL_FLAG_SYSTEM |
75 					     invariant_flags;
76 
77 	if (!c)
78 		gbo->placements[c++].flags = TTM_PL_MASK_CACHING |
79 					     TTM_PL_FLAG_SYSTEM |
80 					     invariant_flags;
81 
82 	gbo->placement.num_placement = c;
83 	gbo->placement.num_busy_placement = c;
84 
85 	for (i = 0; i < c; ++i) {
86 		gbo->placements[i].fpfn = 0;
87 		gbo->placements[i].lpfn = 0;
88 	}
89 }
90 
91 static int drm_gem_vram_init(struct drm_device *dev,
92 			     struct ttm_bo_device *bdev,
93 			     struct drm_gem_vram_object *gbo,
94 			     size_t size, unsigned long pg_align,
95 			     bool interruptible)
96 {
97 	int ret;
98 	size_t acc_size;
99 
100 	gbo->bo.base.funcs = &drm_gem_vram_object_funcs;
101 
102 	ret = drm_gem_object_init(dev, &gbo->bo.base, size);
103 	if (ret)
104 		return ret;
105 
106 	acc_size = ttm_bo_dma_acc_size(bdev, size, sizeof(*gbo));
107 
108 	gbo->bo.bdev = bdev;
109 	drm_gem_vram_placement(gbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
110 
111 	ret = ttm_bo_init(bdev, &gbo->bo, size, ttm_bo_type_device,
112 			  &gbo->placement, pg_align, interruptible, acc_size,
113 			  NULL, NULL, ttm_buffer_object_destroy);
114 	if (ret)
115 		goto err_drm_gem_object_release;
116 
117 	return 0;
118 
119 err_drm_gem_object_release:
120 	drm_gem_object_release(&gbo->bo.base);
121 	return ret;
122 }
123 
124 /**
125  * drm_gem_vram_create() - Creates a VRAM-backed GEM object
126  * @dev:		the DRM device
127  * @bdev:		the TTM BO device backing the object
128  * @size:		the buffer size in bytes
129  * @pg_align:		the buffer's alignment in multiples of the page size
130  * @interruptible:	sleep interruptible if waiting for memory
131  *
132  * Returns:
133  * A new instance of &struct drm_gem_vram_object on success, or
134  * an ERR_PTR()-encoded error code otherwise.
135  */
136 struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev,
137 						struct ttm_bo_device *bdev,
138 						size_t size,
139 						unsigned long pg_align,
140 						bool interruptible)
141 {
142 	struct drm_gem_vram_object *gbo;
143 	int ret;
144 
145 	gbo = kzalloc(sizeof(*gbo), GFP_KERNEL);
146 	if (!gbo)
147 		return ERR_PTR(-ENOMEM);
148 
149 	ret = drm_gem_vram_init(dev, bdev, gbo, size, pg_align, interruptible);
150 	if (ret < 0)
151 		goto err_kfree;
152 
153 	return gbo;
154 
155 err_kfree:
156 	kfree(gbo);
157 	return ERR_PTR(ret);
158 }
159 EXPORT_SYMBOL(drm_gem_vram_create);
160 
161 /**
162  * drm_gem_vram_put() - Releases a reference to a VRAM-backed GEM object
163  * @gbo:	the GEM VRAM object
164  *
165  * See ttm_bo_put() for more information.
166  */
167 void drm_gem_vram_put(struct drm_gem_vram_object *gbo)
168 {
169 	ttm_bo_put(&gbo->bo);
170 }
171 EXPORT_SYMBOL(drm_gem_vram_put);
172 
173 /**
174  * drm_gem_vram_mmap_offset() - Returns a GEM VRAM object's mmap offset
175  * @gbo:	the GEM VRAM object
176  *
177  * See drm_vma_node_offset_addr() for more information.
178  *
179  * Returns:
180  * The buffer object's offset for userspace mappings on success, or
181  * 0 if no offset is allocated.
182  */
183 u64 drm_gem_vram_mmap_offset(struct drm_gem_vram_object *gbo)
184 {
185 	return drm_vma_node_offset_addr(&gbo->bo.base.vma_node);
186 }
187 EXPORT_SYMBOL(drm_gem_vram_mmap_offset);
188 
189 /**
190  * drm_gem_vram_offset() - \
191 	Returns a GEM VRAM object's offset in video memory
192  * @gbo:	the GEM VRAM object
193  *
194  * This function returns the buffer object's offset in the device's video
195  * memory. The buffer object has to be pinned to %TTM_PL_VRAM.
196  *
197  * Returns:
198  * The buffer object's offset in video memory on success, or
199  * a negative errno code otherwise.
200  */
201 s64 drm_gem_vram_offset(struct drm_gem_vram_object *gbo)
202 {
203 	if (WARN_ON_ONCE(!gbo->pin_count))
204 		return (s64)-ENODEV;
205 	return gbo->bo.offset;
206 }
207 EXPORT_SYMBOL(drm_gem_vram_offset);
208 
209 static int drm_gem_vram_pin_locked(struct drm_gem_vram_object *gbo,
210 				   unsigned long pl_flag)
211 {
212 	int i, ret;
213 	struct ttm_operation_ctx ctx = { false, false };
214 
215 	if (gbo->pin_count)
216 		goto out;
217 
218 	if (pl_flag)
219 		drm_gem_vram_placement(gbo, pl_flag);
220 
221 	for (i = 0; i < gbo->placement.num_placement; ++i)
222 		gbo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
223 
224 	ret = ttm_bo_validate(&gbo->bo, &gbo->placement, &ctx);
225 	if (ret < 0)
226 		return ret;
227 
228 out:
229 	++gbo->pin_count;
230 
231 	return 0;
232 }
233 
234 /**
235  * drm_gem_vram_pin() - Pins a GEM VRAM object in a region.
236  * @gbo:	the GEM VRAM object
237  * @pl_flag:	a bitmask of possible memory regions
238  *
239  * Pinning a buffer object ensures that it is not evicted from
240  * a memory region. A pinned buffer object has to be unpinned before
241  * it can be pinned to another region. If the pl_flag argument is 0,
242  * the buffer is pinned at its current location (video RAM or system
243  * memory).
244  *
245  * Small buffer objects, such as cursor images, can lead to memory
246  * fragmentation if they are pinned in the middle of video RAM. This
247  * is especially a problem on devices with only a small amount of
248  * video RAM. Fragmentation can prevent the primary framebuffer from
249  * fitting in, even though there's enough memory overall. The modifier
250  * DRM_GEM_VRAM_PL_FLAG_TOPDOWN marks the buffer object to be pinned
251  * at the high end of the memory region to avoid fragmentation.
252  *
253  * Returns:
254  * 0 on success, or
255  * a negative error code otherwise.
256  */
257 int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag)
258 {
259 	int ret;
260 
261 	ret = ttm_bo_reserve(&gbo->bo, true, false, NULL);
262 	if (ret)
263 		return ret;
264 	ret = drm_gem_vram_pin_locked(gbo, pl_flag);
265 	ttm_bo_unreserve(&gbo->bo);
266 
267 	return ret;
268 }
269 EXPORT_SYMBOL(drm_gem_vram_pin);
270 
271 static int drm_gem_vram_unpin_locked(struct drm_gem_vram_object *gbo)
272 {
273 	int i, ret;
274 	struct ttm_operation_ctx ctx = { false, false };
275 
276 	if (WARN_ON_ONCE(!gbo->pin_count))
277 		return 0;
278 
279 	--gbo->pin_count;
280 	if (gbo->pin_count)
281 		return 0;
282 
283 	for (i = 0; i < gbo->placement.num_placement ; ++i)
284 		gbo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
285 
286 	ret = ttm_bo_validate(&gbo->bo, &gbo->placement, &ctx);
287 	if (ret < 0)
288 		return ret;
289 
290 	return 0;
291 }
292 
293 /**
294  * drm_gem_vram_unpin() - Unpins a GEM VRAM object
295  * @gbo:	the GEM VRAM object
296  *
297  * Returns:
298  * 0 on success, or
299  * a negative error code otherwise.
300  */
301 int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo)
302 {
303 	int ret;
304 
305 	ret = ttm_bo_reserve(&gbo->bo, true, false, NULL);
306 	if (ret)
307 		return ret;
308 	ret = drm_gem_vram_unpin_locked(gbo);
309 	ttm_bo_unreserve(&gbo->bo);
310 
311 	return ret;
312 }
313 EXPORT_SYMBOL(drm_gem_vram_unpin);
314 
315 static void *drm_gem_vram_kmap_locked(struct drm_gem_vram_object *gbo,
316 				      bool map, bool *is_iomem)
317 {
318 	int ret;
319 	struct ttm_bo_kmap_obj *kmap = &gbo->kmap;
320 
321 	if (gbo->kmap_use_count > 0)
322 		goto out;
323 
324 	if (kmap->virtual || !map)
325 		goto out;
326 
327 	ret = ttm_bo_kmap(&gbo->bo, 0, gbo->bo.num_pages, kmap);
328 	if (ret)
329 		return ERR_PTR(ret);
330 
331 out:
332 	if (!kmap->virtual) {
333 		if (is_iomem)
334 			*is_iomem = false;
335 		return NULL; /* not mapped; don't increment ref */
336 	}
337 	++gbo->kmap_use_count;
338 	if (is_iomem)
339 		return ttm_kmap_obj_virtual(kmap, is_iomem);
340 	return kmap->virtual;
341 }
342 
343 /**
344  * drm_gem_vram_kmap() - Maps a GEM VRAM object into kernel address space
345  * @gbo:	the GEM VRAM object
346  * @map:	establish a mapping if necessary
347  * @is_iomem:	returns true if the mapped memory is I/O memory, or false \
348 	otherwise; can be NULL
349  *
350  * This function maps the buffer object into the kernel's address space
351  * or returns the current mapping. If the parameter map is false, the
352  * function only queries the current mapping, but does not establish a
353  * new one.
354  *
355  * Returns:
356  * The buffers virtual address if mapped, or
357  * NULL if not mapped, or
358  * an ERR_PTR()-encoded error code otherwise.
359  */
360 void *drm_gem_vram_kmap(struct drm_gem_vram_object *gbo, bool map,
361 			bool *is_iomem)
362 {
363 	int ret;
364 	void *virtual;
365 
366 	ret = ttm_bo_reserve(&gbo->bo, true, false, NULL);
367 	if (ret)
368 		return ERR_PTR(ret);
369 	virtual = drm_gem_vram_kmap_locked(gbo, map, is_iomem);
370 	ttm_bo_unreserve(&gbo->bo);
371 
372 	return virtual;
373 }
374 EXPORT_SYMBOL(drm_gem_vram_kmap);
375 
376 static void drm_gem_vram_kunmap_locked(struct drm_gem_vram_object *gbo)
377 {
378 	if (WARN_ON_ONCE(!gbo->kmap_use_count))
379 		return;
380 	if (--gbo->kmap_use_count > 0)
381 		return;
382 
383 	/*
384 	 * Permanently mapping and unmapping buffers adds overhead from
385 	 * updating the page tables and creates debugging output. Therefore,
386 	 * we delay the actual unmap operation until the BO gets evicted
387 	 * from memory. See drm_gem_vram_bo_driver_move_notify().
388 	 */
389 }
390 
391 /**
392  * drm_gem_vram_kunmap() - Unmaps a GEM VRAM object
393  * @gbo:	the GEM VRAM object
394  */
395 void drm_gem_vram_kunmap(struct drm_gem_vram_object *gbo)
396 {
397 	int ret;
398 
399 	ret = ttm_bo_reserve(&gbo->bo, false, false, NULL);
400 	if (WARN_ONCE(ret, "ttm_bo_reserve_failed(): ret=%d\n", ret))
401 		return;
402 	drm_gem_vram_kunmap_locked(gbo);
403 	ttm_bo_unreserve(&gbo->bo);
404 }
405 EXPORT_SYMBOL(drm_gem_vram_kunmap);
406 
407 /**
408  * drm_gem_vram_vmap() - Pins and maps a GEM VRAM object into kernel address
409  *                       space
410  * @gbo:	The GEM VRAM object to map
411  *
412  * The vmap function pins a GEM VRAM object to its current location, either
413  * system or video memory, and maps its buffer into kernel address space.
414  * As pinned object cannot be relocated, you should avoid pinning objects
415  * permanently. Call drm_gem_vram_vunmap() with the returned address to
416  * unmap and unpin the GEM VRAM object.
417  *
418  * If you have special requirements for the pinning or mapping operations,
419  * call drm_gem_vram_pin() and drm_gem_vram_kmap() directly.
420  *
421  * Returns:
422  * The buffer's virtual address on success, or
423  * an ERR_PTR()-encoded error code otherwise.
424  */
425 void *drm_gem_vram_vmap(struct drm_gem_vram_object *gbo)
426 {
427 	int ret;
428 	void *base;
429 
430 	ret = ttm_bo_reserve(&gbo->bo, true, false, NULL);
431 	if (ret)
432 		return ERR_PTR(ret);
433 
434 	ret = drm_gem_vram_pin_locked(gbo, 0);
435 	if (ret)
436 		goto err_ttm_bo_unreserve;
437 	base = drm_gem_vram_kmap_locked(gbo, true, NULL);
438 	if (IS_ERR(base)) {
439 		ret = PTR_ERR(base);
440 		goto err_drm_gem_vram_unpin_locked;
441 	}
442 
443 	ttm_bo_unreserve(&gbo->bo);
444 
445 	return base;
446 
447 err_drm_gem_vram_unpin_locked:
448 	drm_gem_vram_unpin_locked(gbo);
449 err_ttm_bo_unreserve:
450 	ttm_bo_unreserve(&gbo->bo);
451 	return ERR_PTR(ret);
452 }
453 EXPORT_SYMBOL(drm_gem_vram_vmap);
454 
455 /**
456  * drm_gem_vram_vunmap() - Unmaps and unpins a GEM VRAM object
457  * @gbo:	The GEM VRAM object to unmap
458  * @vaddr:	The mapping's base address as returned by drm_gem_vram_vmap()
459  *
460  * A call to drm_gem_vram_vunmap() unmaps and unpins a GEM VRAM buffer. See
461  * the documentation for drm_gem_vram_vmap() for more information.
462  */
463 void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo, void *vaddr)
464 {
465 	int ret;
466 
467 	ret = ttm_bo_reserve(&gbo->bo, false, false, NULL);
468 	if (WARN_ONCE(ret, "ttm_bo_reserve_failed(): ret=%d\n", ret))
469 		return;
470 
471 	drm_gem_vram_kunmap_locked(gbo);
472 	drm_gem_vram_unpin_locked(gbo);
473 
474 	ttm_bo_unreserve(&gbo->bo);
475 }
476 EXPORT_SYMBOL(drm_gem_vram_vunmap);
477 
478 /**
479  * drm_gem_vram_fill_create_dumb() - \
480 	Helper for implementing &struct drm_driver.dumb_create
481  * @file:		the DRM file
482  * @dev:		the DRM device
483  * @bdev:		the TTM BO device managing the buffer object
484  * @pg_align:		the buffer's alignment in multiples of the page size
485  * @interruptible:	sleep interruptible if waiting for memory
486  * @args:		the arguments as provided to \
487 				&struct drm_driver.dumb_create
488  *
489  * This helper function fills &struct drm_mode_create_dumb, which is used
490  * by &struct drm_driver.dumb_create. Implementations of this interface
491  * should forwards their arguments to this helper, plus the driver-specific
492  * parameters.
493  *
494  * Returns:
495  * 0 on success, or
496  * a negative error code otherwise.
497  */
498 int drm_gem_vram_fill_create_dumb(struct drm_file *file,
499 				  struct drm_device *dev,
500 				  struct ttm_bo_device *bdev,
501 				  unsigned long pg_align,
502 				  bool interruptible,
503 				  struct drm_mode_create_dumb *args)
504 {
505 	size_t pitch, size;
506 	struct drm_gem_vram_object *gbo;
507 	int ret;
508 	u32 handle;
509 
510 	pitch = args->width * ((args->bpp + 7) / 8);
511 	size = pitch * args->height;
512 
513 	size = roundup(size, PAGE_SIZE);
514 	if (!size)
515 		return -EINVAL;
516 
517 	gbo = drm_gem_vram_create(dev, bdev, size, pg_align, interruptible);
518 	if (IS_ERR(gbo))
519 		return PTR_ERR(gbo);
520 
521 	ret = drm_gem_handle_create(file, &gbo->bo.base, &handle);
522 	if (ret)
523 		goto err_drm_gem_object_put_unlocked;
524 
525 	drm_gem_object_put_unlocked(&gbo->bo.base);
526 
527 	args->pitch = pitch;
528 	args->size = size;
529 	args->handle = handle;
530 
531 	return 0;
532 
533 err_drm_gem_object_put_unlocked:
534 	drm_gem_object_put_unlocked(&gbo->bo.base);
535 	return ret;
536 }
537 EXPORT_SYMBOL(drm_gem_vram_fill_create_dumb);
538 
539 /*
540  * Helpers for struct ttm_bo_driver
541  */
542 
543 static bool drm_is_gem_vram(struct ttm_buffer_object *bo)
544 {
545 	return (bo->destroy == ttm_buffer_object_destroy);
546 }
547 
548 static void drm_gem_vram_bo_driver_evict_flags(struct drm_gem_vram_object *gbo,
549 					       struct ttm_placement *pl)
550 {
551 	drm_gem_vram_placement(gbo, TTM_PL_FLAG_SYSTEM);
552 	*pl = gbo->placement;
553 }
554 
555 static int drm_gem_vram_bo_driver_verify_access(struct drm_gem_vram_object *gbo,
556 						struct file *filp)
557 {
558 	return drm_vma_node_verify_access(&gbo->bo.base.vma_node,
559 					  filp->private_data);
560 }
561 
562 static void drm_gem_vram_bo_driver_move_notify(struct drm_gem_vram_object *gbo,
563 					       bool evict,
564 					       struct ttm_mem_reg *new_mem)
565 {
566 	struct ttm_bo_kmap_obj *kmap = &gbo->kmap;
567 
568 	if (WARN_ON_ONCE(gbo->kmap_use_count))
569 		return;
570 
571 	if (!kmap->virtual)
572 		return;
573 	ttm_bo_kunmap(kmap);
574 	kmap->virtual = NULL;
575 }
576 
577 /*
578  * Helpers for struct drm_gem_object_funcs
579  */
580 
581 /**
582  * drm_gem_vram_object_free() - \
583 	Implements &struct drm_gem_object_funcs.free
584  * @gem:       GEM object. Refers to &struct drm_gem_vram_object.gem
585  */
586 static void drm_gem_vram_object_free(struct drm_gem_object *gem)
587 {
588 	struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
589 
590 	drm_gem_vram_put(gbo);
591 }
592 
593 /*
594  * Helpers for dump buffers
595  */
596 
597 /**
598  * drm_gem_vram_driver_create_dumb() - \
599 	Implements &struct drm_driver.dumb_create
600  * @file:		the DRM file
601  * @dev:		the DRM device
602  * @args:		the arguments as provided to \
603 				&struct drm_driver.dumb_create
604  *
605  * This function requires the driver to use @drm_device.vram_mm for its
606  * instance of VRAM MM.
607  *
608  * Returns:
609  * 0 on success, or
610  * a negative error code otherwise.
611  */
612 int drm_gem_vram_driver_dumb_create(struct drm_file *file,
613 				    struct drm_device *dev,
614 				    struct drm_mode_create_dumb *args)
615 {
616 	if (WARN_ONCE(!dev->vram_mm, "VRAM MM not initialized"))
617 		return -EINVAL;
618 
619 	return drm_gem_vram_fill_create_dumb(file, dev, &dev->vram_mm->bdev, 0,
620 					     false, args);
621 }
622 EXPORT_SYMBOL(drm_gem_vram_driver_dumb_create);
623 
624 /**
625  * drm_gem_vram_driver_dumb_mmap_offset() - \
626 	Implements &struct drm_driver.dumb_mmap_offset
627  * @file:	DRM file pointer.
628  * @dev:	DRM device.
629  * @handle:	GEM handle
630  * @offset:	Returns the mapping's memory offset on success
631  *
632  * Returns:
633  * 0 on success, or
634  * a negative errno code otherwise.
635  */
636 int drm_gem_vram_driver_dumb_mmap_offset(struct drm_file *file,
637 					 struct drm_device *dev,
638 					 uint32_t handle, uint64_t *offset)
639 {
640 	struct drm_gem_object *gem;
641 	struct drm_gem_vram_object *gbo;
642 
643 	gem = drm_gem_object_lookup(file, handle);
644 	if (!gem)
645 		return -ENOENT;
646 
647 	gbo = drm_gem_vram_of_gem(gem);
648 	*offset = drm_gem_vram_mmap_offset(gbo);
649 
650 	drm_gem_object_put_unlocked(gem);
651 
652 	return 0;
653 }
654 EXPORT_SYMBOL(drm_gem_vram_driver_dumb_mmap_offset);
655 
656 /*
657  * PRIME helpers
658  */
659 
660 /**
661  * drm_gem_vram_object_pin() - \
662 	Implements &struct drm_gem_object_funcs.pin
663  * @gem:	The GEM object to pin
664  *
665  * Returns:
666  * 0 on success, or
667  * a negative errno code otherwise.
668  */
669 static int drm_gem_vram_object_pin(struct drm_gem_object *gem)
670 {
671 	struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
672 
673 	/* Fbdev console emulation is the use case of these PRIME
674 	 * helpers. This may involve updating a hardware buffer from
675 	 * a shadow FB. We pin the buffer to it's current location
676 	 * (either video RAM or system memory) to prevent it from
677 	 * being relocated during the update operation. If you require
678 	 * the buffer to be pinned to VRAM, implement a callback that
679 	 * sets the flags accordingly.
680 	 */
681 	return drm_gem_vram_pin(gbo, 0);
682 }
683 
684 /**
685  * drm_gem_vram_object_unpin() - \
686 	Implements &struct drm_gem_object_funcs.unpin
687  * @gem:	The GEM object to unpin
688  */
689 static void drm_gem_vram_object_unpin(struct drm_gem_object *gem)
690 {
691 	struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
692 
693 	drm_gem_vram_unpin(gbo);
694 }
695 
696 /**
697  * drm_gem_vram_object_vmap() - \
698 	Implements &struct drm_gem_object_funcs.vmap
699  * @gem:	The GEM object to map
700  *
701  * Returns:
702  * The buffers virtual address on success, or
703  * NULL otherwise.
704  */
705 static void *drm_gem_vram_object_vmap(struct drm_gem_object *gem)
706 {
707 	struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
708 	void *base;
709 
710 	base = drm_gem_vram_vmap(gbo);
711 	if (IS_ERR(base))
712 		return NULL;
713 	return base;
714 }
715 
716 /**
717  * drm_gem_vram_object_vunmap() - \
718 	Implements &struct drm_gem_object_funcs.vunmap
719  * @gem:	The GEM object to unmap
720  * @vaddr:	The mapping's base address
721  */
722 static void drm_gem_vram_object_vunmap(struct drm_gem_object *gem,
723 				       void *vaddr)
724 {
725 	struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
726 
727 	drm_gem_vram_vunmap(gbo, vaddr);
728 }
729 
730 /*
731  * GEM object funcs
732  */
733 
734 static const struct drm_gem_object_funcs drm_gem_vram_object_funcs = {
735 	.free	= drm_gem_vram_object_free,
736 	.pin	= drm_gem_vram_object_pin,
737 	.unpin	= drm_gem_vram_object_unpin,
738 	.vmap	= drm_gem_vram_object_vmap,
739 	.vunmap	= drm_gem_vram_object_vunmap,
740 	.print_info = drm_gem_ttm_print_info,
741 };
742 
743 /*
744  * VRAM memory manager
745  */
746 
747 /*
748  * TTM TT
749  */
750 
751 static void backend_func_destroy(struct ttm_tt *tt)
752 {
753 	ttm_tt_fini(tt);
754 	kfree(tt);
755 }
756 
757 static struct ttm_backend_func backend_func = {
758 	.destroy = backend_func_destroy
759 };
760 
761 /*
762  * TTM BO device
763  */
764 
765 static struct ttm_tt *bo_driver_ttm_tt_create(struct ttm_buffer_object *bo,
766 					      uint32_t page_flags)
767 {
768 	struct ttm_tt *tt;
769 	int ret;
770 
771 	tt = kzalloc(sizeof(*tt), GFP_KERNEL);
772 	if (!tt)
773 		return NULL;
774 
775 	tt->func = &backend_func;
776 
777 	ret = ttm_tt_init(tt, bo, page_flags);
778 	if (ret < 0)
779 		goto err_ttm_tt_init;
780 
781 	return tt;
782 
783 err_ttm_tt_init:
784 	kfree(tt);
785 	return NULL;
786 }
787 
788 static int bo_driver_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
789 				   struct ttm_mem_type_manager *man)
790 {
791 	switch (type) {
792 	case TTM_PL_SYSTEM:
793 		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
794 		man->available_caching = TTM_PL_MASK_CACHING;
795 		man->default_caching = TTM_PL_FLAG_CACHED;
796 		break;
797 	case TTM_PL_VRAM:
798 		man->func = &ttm_bo_manager_func;
799 		man->flags = TTM_MEMTYPE_FLAG_FIXED |
800 			     TTM_MEMTYPE_FLAG_MAPPABLE;
801 		man->available_caching = TTM_PL_FLAG_UNCACHED |
802 					 TTM_PL_FLAG_WC;
803 		man->default_caching = TTM_PL_FLAG_WC;
804 		break;
805 	default:
806 		return -EINVAL;
807 	}
808 	return 0;
809 }
810 
811 static void bo_driver_evict_flags(struct ttm_buffer_object *bo,
812 				  struct ttm_placement *placement)
813 {
814 	struct drm_gem_vram_object *gbo;
815 
816 	/* TTM may pass BOs that are not GEM VRAM BOs. */
817 	if (!drm_is_gem_vram(bo))
818 		return;
819 
820 	gbo = drm_gem_vram_of_bo(bo);
821 
822 	drm_gem_vram_bo_driver_evict_flags(gbo, placement);
823 }
824 
825 static int bo_driver_verify_access(struct ttm_buffer_object *bo,
826 				   struct file *filp)
827 {
828 	struct drm_gem_vram_object *gbo;
829 
830 	/* TTM may pass BOs that are not GEM VRAM BOs. */
831 	if (!drm_is_gem_vram(bo))
832 		return -EINVAL;
833 
834 	gbo = drm_gem_vram_of_bo(bo);
835 
836 	return drm_gem_vram_bo_driver_verify_access(gbo, filp);
837 }
838 
839 static void bo_driver_move_notify(struct ttm_buffer_object *bo,
840 				  bool evict,
841 				  struct ttm_mem_reg *new_mem)
842 {
843 	struct drm_gem_vram_object *gbo;
844 
845 	/* TTM may pass BOs that are not GEM VRAM BOs. */
846 	if (!drm_is_gem_vram(bo))
847 		return;
848 
849 	gbo = drm_gem_vram_of_bo(bo);
850 
851 	drm_gem_vram_bo_driver_move_notify(gbo, evict, new_mem);
852 }
853 
854 static int bo_driver_io_mem_reserve(struct ttm_bo_device *bdev,
855 				    struct ttm_mem_reg *mem)
856 {
857 	struct ttm_mem_type_manager *man = bdev->man + mem->mem_type;
858 	struct drm_vram_mm *vmm = drm_vram_mm_of_bdev(bdev);
859 
860 	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
861 		return -EINVAL;
862 
863 	mem->bus.addr = NULL;
864 	mem->bus.size = mem->num_pages << PAGE_SHIFT;
865 
866 	switch (mem->mem_type) {
867 	case TTM_PL_SYSTEM:	/* nothing to do */
868 		mem->bus.offset = 0;
869 		mem->bus.base = 0;
870 		mem->bus.is_iomem = false;
871 		break;
872 	case TTM_PL_VRAM:
873 		mem->bus.offset = mem->start << PAGE_SHIFT;
874 		mem->bus.base = vmm->vram_base;
875 		mem->bus.is_iomem = true;
876 		break;
877 	default:
878 		return -EINVAL;
879 	}
880 
881 	return 0;
882 }
883 
884 static void bo_driver_io_mem_free(struct ttm_bo_device *bdev,
885 				  struct ttm_mem_reg *mem)
886 { }
887 
888 static struct ttm_bo_driver bo_driver = {
889 	.ttm_tt_create = bo_driver_ttm_tt_create,
890 	.ttm_tt_populate = ttm_pool_populate,
891 	.ttm_tt_unpopulate = ttm_pool_unpopulate,
892 	.init_mem_type = bo_driver_init_mem_type,
893 	.eviction_valuable = ttm_bo_eviction_valuable,
894 	.evict_flags = bo_driver_evict_flags,
895 	.verify_access = bo_driver_verify_access,
896 	.move_notify = bo_driver_move_notify,
897 	.io_mem_reserve = bo_driver_io_mem_reserve,
898 	.io_mem_free = bo_driver_io_mem_free,
899 };
900 
901 /*
902  * struct drm_vram_mm
903  */
904 
905 #if defined(CONFIG_DEBUG_FS)
906 static int drm_vram_mm_debugfs(struct seq_file *m, void *data)
907 {
908 	struct drm_info_node *node = (struct drm_info_node *) m->private;
909 	struct drm_vram_mm *vmm = node->minor->dev->vram_mm;
910 	struct drm_mm *mm = vmm->bdev.man[TTM_PL_VRAM].priv;
911 	struct ttm_bo_global *glob = vmm->bdev.glob;
912 	struct drm_printer p = drm_seq_file_printer(m);
913 
914 	spin_lock(&glob->lru_lock);
915 	drm_mm_print(mm, &p);
916 	spin_unlock(&glob->lru_lock);
917 	return 0;
918 }
919 
920 static const struct drm_info_list drm_vram_mm_debugfs_list[] = {
921 	{ "vram-mm", drm_vram_mm_debugfs, 0, NULL },
922 };
923 #endif
924 
925 /**
926  * drm_vram_mm_debugfs_init() - Register VRAM MM debugfs file.
927  *
928  * @minor: drm minor device.
929  *
930  * Returns:
931  * 0 on success, or
932  * a negative error code otherwise.
933  */
934 int drm_vram_mm_debugfs_init(struct drm_minor *minor)
935 {
936 	int ret = 0;
937 
938 #if defined(CONFIG_DEBUG_FS)
939 	ret = drm_debugfs_create_files(drm_vram_mm_debugfs_list,
940 				       ARRAY_SIZE(drm_vram_mm_debugfs_list),
941 				       minor->debugfs_root, minor);
942 #endif
943 	return ret;
944 }
945 EXPORT_SYMBOL(drm_vram_mm_debugfs_init);
946 
947 static int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev,
948 			    uint64_t vram_base, size_t vram_size)
949 {
950 	int ret;
951 
952 	vmm->vram_base = vram_base;
953 	vmm->vram_size = vram_size;
954 
955 	ret = ttm_bo_device_init(&vmm->bdev, &bo_driver,
956 				 dev->anon_inode->i_mapping,
957 				 dev->vma_offset_manager,
958 				 true);
959 	if (ret)
960 		return ret;
961 
962 	ret = ttm_bo_init_mm(&vmm->bdev, TTM_PL_VRAM, vram_size >> PAGE_SHIFT);
963 	if (ret)
964 		return ret;
965 
966 	return 0;
967 }
968 
969 static void drm_vram_mm_cleanup(struct drm_vram_mm *vmm)
970 {
971 	ttm_bo_device_release(&vmm->bdev);
972 }
973 
974 static int drm_vram_mm_mmap(struct file *filp, struct vm_area_struct *vma,
975 			    struct drm_vram_mm *vmm)
976 {
977 	return ttm_bo_mmap(filp, vma, &vmm->bdev);
978 }
979 
980 /*
981  * Helpers for integration with struct drm_device
982  */
983 
984 /**
985  * drm_vram_helper_alloc_mm - Allocates a device's instance of \
986 	&struct drm_vram_mm
987  * @dev:	the DRM device
988  * @vram_base:	the base address of the video memory
989  * @vram_size:	the size of the video memory in bytes
990  *
991  * Returns:
992  * The new instance of &struct drm_vram_mm on success, or
993  * an ERR_PTR()-encoded errno code otherwise.
994  */
995 struct drm_vram_mm *drm_vram_helper_alloc_mm(
996 	struct drm_device *dev, uint64_t vram_base, size_t vram_size)
997 {
998 	int ret;
999 
1000 	if (WARN_ON(dev->vram_mm))
1001 		return dev->vram_mm;
1002 
1003 	dev->vram_mm = kzalloc(sizeof(*dev->vram_mm), GFP_KERNEL);
1004 	if (!dev->vram_mm)
1005 		return ERR_PTR(-ENOMEM);
1006 
1007 	ret = drm_vram_mm_init(dev->vram_mm, dev, vram_base, vram_size);
1008 	if (ret)
1009 		goto err_kfree;
1010 
1011 	return dev->vram_mm;
1012 
1013 err_kfree:
1014 	kfree(dev->vram_mm);
1015 	dev->vram_mm = NULL;
1016 	return ERR_PTR(ret);
1017 }
1018 EXPORT_SYMBOL(drm_vram_helper_alloc_mm);
1019 
1020 /**
1021  * drm_vram_helper_release_mm - Releases a device's instance of \
1022 	&struct drm_vram_mm
1023  * @dev:	the DRM device
1024  */
1025 void drm_vram_helper_release_mm(struct drm_device *dev)
1026 {
1027 	if (!dev->vram_mm)
1028 		return;
1029 
1030 	drm_vram_mm_cleanup(dev->vram_mm);
1031 	kfree(dev->vram_mm);
1032 	dev->vram_mm = NULL;
1033 }
1034 EXPORT_SYMBOL(drm_vram_helper_release_mm);
1035 
1036 /*
1037  * Helpers for &struct file_operations
1038  */
1039 
1040 /**
1041  * drm_vram_mm_file_operations_mmap() - \
1042 	Implements &struct file_operations.mmap()
1043  * @filp:	the mapping's file structure
1044  * @vma:	the mapping's memory area
1045  *
1046  * Returns:
1047  * 0 on success, or
1048  * a negative error code otherwise.
1049  */
1050 int drm_vram_mm_file_operations_mmap(
1051 	struct file *filp, struct vm_area_struct *vma)
1052 {
1053 	struct drm_file *file_priv = filp->private_data;
1054 	struct drm_device *dev = file_priv->minor->dev;
1055 
1056 	if (WARN_ONCE(!dev->vram_mm, "VRAM MM not initialized"))
1057 		return -EINVAL;
1058 
1059 	return drm_vram_mm_mmap(filp, vma, dev->vram_mm);
1060 }
1061 EXPORT_SYMBOL(drm_vram_mm_file_operations_mmap);
1062