1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 #include <drm/drm_debugfs.h>
4 #include <drm/drm_device.h>
5 #include <drm/drm_file.h>
6 #include <drm/drm_gem_ttm_helper.h>
7 #include <drm/drm_gem_vram_helper.h>
8 #include <drm/drm_mode.h>
9 #include <drm/drm_prime.h>
10 #include <drm/ttm/ttm_page_alloc.h>
11 
12 static const struct drm_gem_object_funcs drm_gem_vram_object_funcs;
13 
14 /**
15  * DOC: overview
16  *
17  * This library provides a GEM buffer object that is backed by video RAM
18  * (VRAM). It can be used for framebuffer devices with dedicated memory.
19  *
20  * The data structure &struct drm_vram_mm and its helpers implement a memory
21  * manager for simple framebuffer devices with dedicated video memory. Buffer
22  * objects are either placed in video RAM or evicted to system memory. The rsp.
23  * buffer object is provided by &struct drm_gem_vram_object.
24  */
25 
26 /*
27  * Buffer-objects helpers
28  */
29 
30 static void drm_gem_vram_cleanup(struct drm_gem_vram_object *gbo)
31 {
32 	/* We got here via ttm_bo_put(), which means that the
33 	 * TTM buffer object in 'bo' has already been cleaned
34 	 * up; only release the GEM object.
35 	 */
36 
37 	WARN_ON(gbo->kmap_use_count);
38 	WARN_ON(gbo->kmap.virtual);
39 
40 	drm_gem_object_release(&gbo->bo.base);
41 }
42 
43 static void drm_gem_vram_destroy(struct drm_gem_vram_object *gbo)
44 {
45 	drm_gem_vram_cleanup(gbo);
46 	kfree(gbo);
47 }
48 
49 static void ttm_buffer_object_destroy(struct ttm_buffer_object *bo)
50 {
51 	struct drm_gem_vram_object *gbo = drm_gem_vram_of_bo(bo);
52 
53 	drm_gem_vram_destroy(gbo);
54 }
55 
56 static void drm_gem_vram_placement(struct drm_gem_vram_object *gbo,
57 				   unsigned long pl_flag)
58 {
59 	unsigned int i;
60 	unsigned int c = 0;
61 
62 	gbo->placement.placement = gbo->placements;
63 	gbo->placement.busy_placement = gbo->placements;
64 
65 	if (pl_flag & TTM_PL_FLAG_VRAM)
66 		gbo->placements[c++].flags = TTM_PL_FLAG_WC |
67 					     TTM_PL_FLAG_UNCACHED |
68 					     TTM_PL_FLAG_VRAM;
69 
70 	if (pl_flag & TTM_PL_FLAG_SYSTEM)
71 		gbo->placements[c++].flags = TTM_PL_MASK_CACHING |
72 					     TTM_PL_FLAG_SYSTEM;
73 
74 	if (!c)
75 		gbo->placements[c++].flags = TTM_PL_MASK_CACHING |
76 					     TTM_PL_FLAG_SYSTEM;
77 
78 	gbo->placement.num_placement = c;
79 	gbo->placement.num_busy_placement = c;
80 
81 	for (i = 0; i < c; ++i) {
82 		gbo->placements[i].fpfn = 0;
83 		gbo->placements[i].lpfn = 0;
84 	}
85 }
86 
87 static int drm_gem_vram_init(struct drm_device *dev,
88 			     struct ttm_bo_device *bdev,
89 			     struct drm_gem_vram_object *gbo,
90 			     size_t size, unsigned long pg_align,
91 			     bool interruptible)
92 {
93 	int ret;
94 	size_t acc_size;
95 
96 	gbo->bo.base.funcs = &drm_gem_vram_object_funcs;
97 
98 	ret = drm_gem_object_init(dev, &gbo->bo.base, size);
99 	if (ret)
100 		return ret;
101 
102 	acc_size = ttm_bo_dma_acc_size(bdev, size, sizeof(*gbo));
103 
104 	gbo->bo.bdev = bdev;
105 	drm_gem_vram_placement(gbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
106 
107 	ret = ttm_bo_init(bdev, &gbo->bo, size, ttm_bo_type_device,
108 			  &gbo->placement, pg_align, interruptible, acc_size,
109 			  NULL, NULL, ttm_buffer_object_destroy);
110 	if (ret)
111 		goto err_drm_gem_object_release;
112 
113 	return 0;
114 
115 err_drm_gem_object_release:
116 	drm_gem_object_release(&gbo->bo.base);
117 	return ret;
118 }
119 
120 /**
121  * drm_gem_vram_create() - Creates a VRAM-backed GEM object
122  * @dev:		the DRM device
123  * @bdev:		the TTM BO device backing the object
124  * @size:		the buffer size in bytes
125  * @pg_align:		the buffer's alignment in multiples of the page size
126  * @interruptible:	sleep interruptible if waiting for memory
127  *
128  * Returns:
129  * A new instance of &struct drm_gem_vram_object on success, or
130  * an ERR_PTR()-encoded error code otherwise.
131  */
132 struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev,
133 						struct ttm_bo_device *bdev,
134 						size_t size,
135 						unsigned long pg_align,
136 						bool interruptible)
137 {
138 	struct drm_gem_vram_object *gbo;
139 	int ret;
140 
141 	gbo = kzalloc(sizeof(*gbo), GFP_KERNEL);
142 	if (!gbo)
143 		return ERR_PTR(-ENOMEM);
144 
145 	ret = drm_gem_vram_init(dev, bdev, gbo, size, pg_align, interruptible);
146 	if (ret < 0)
147 		goto err_kfree;
148 
149 	return gbo;
150 
151 err_kfree:
152 	kfree(gbo);
153 	return ERR_PTR(ret);
154 }
155 EXPORT_SYMBOL(drm_gem_vram_create);
156 
157 /**
158  * drm_gem_vram_put() - Releases a reference to a VRAM-backed GEM object
159  * @gbo:	the GEM VRAM object
160  *
161  * See ttm_bo_put() for more information.
162  */
163 void drm_gem_vram_put(struct drm_gem_vram_object *gbo)
164 {
165 	ttm_bo_put(&gbo->bo);
166 }
167 EXPORT_SYMBOL(drm_gem_vram_put);
168 
169 /**
170  * drm_gem_vram_mmap_offset() - Returns a GEM VRAM object's mmap offset
171  * @gbo:	the GEM VRAM object
172  *
173  * See drm_vma_node_offset_addr() for more information.
174  *
175  * Returns:
176  * The buffer object's offset for userspace mappings on success, or
177  * 0 if no offset is allocated.
178  */
179 u64 drm_gem_vram_mmap_offset(struct drm_gem_vram_object *gbo)
180 {
181 	return drm_vma_node_offset_addr(&gbo->bo.base.vma_node);
182 }
183 EXPORT_SYMBOL(drm_gem_vram_mmap_offset);
184 
185 /**
186  * drm_gem_vram_offset() - \
187 	Returns a GEM VRAM object's offset in video memory
188  * @gbo:	the GEM VRAM object
189  *
190  * This function returns the buffer object's offset in the device's video
191  * memory. The buffer object has to be pinned to %TTM_PL_VRAM.
192  *
193  * Returns:
194  * The buffer object's offset in video memory on success, or
195  * a negative errno code otherwise.
196  */
197 s64 drm_gem_vram_offset(struct drm_gem_vram_object *gbo)
198 {
199 	if (WARN_ON_ONCE(!gbo->pin_count))
200 		return (s64)-ENODEV;
201 	return gbo->bo.offset;
202 }
203 EXPORT_SYMBOL(drm_gem_vram_offset);
204 
205 static int drm_gem_vram_pin_locked(struct drm_gem_vram_object *gbo,
206 				   unsigned long pl_flag)
207 {
208 	int i, ret;
209 	struct ttm_operation_ctx ctx = { false, false };
210 
211 	if (gbo->pin_count)
212 		goto out;
213 
214 	if (pl_flag)
215 		drm_gem_vram_placement(gbo, pl_flag);
216 
217 	for (i = 0; i < gbo->placement.num_placement; ++i)
218 		gbo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
219 
220 	ret = ttm_bo_validate(&gbo->bo, &gbo->placement, &ctx);
221 	if (ret < 0)
222 		return ret;
223 
224 out:
225 	++gbo->pin_count;
226 
227 	return 0;
228 }
229 
230 /**
231  * drm_gem_vram_pin() - Pins a GEM VRAM object in a region.
232  * @gbo:	the GEM VRAM object
233  * @pl_flag:	a bitmask of possible memory regions
234  *
235  * Pinning a buffer object ensures that it is not evicted from
236  * a memory region. A pinned buffer object has to be unpinned before
237  * it can be pinned to another region. If the pl_flag argument is 0,
238  * the buffer is pinned at its current location (video RAM or system
239  * memory).
240  *
241  * Returns:
242  * 0 on success, or
243  * a negative error code otherwise.
244  */
245 int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag)
246 {
247 	int ret;
248 
249 	ret = ttm_bo_reserve(&gbo->bo, true, false, NULL);
250 	if (ret)
251 		return ret;
252 	ret = drm_gem_vram_pin_locked(gbo, pl_flag);
253 	ttm_bo_unreserve(&gbo->bo);
254 
255 	return ret;
256 }
257 EXPORT_SYMBOL(drm_gem_vram_pin);
258 
259 static int drm_gem_vram_unpin_locked(struct drm_gem_vram_object *gbo)
260 {
261 	int i, ret;
262 	struct ttm_operation_ctx ctx = { false, false };
263 
264 	if (WARN_ON_ONCE(!gbo->pin_count))
265 		return 0;
266 
267 	--gbo->pin_count;
268 	if (gbo->pin_count)
269 		return 0;
270 
271 	for (i = 0; i < gbo->placement.num_placement ; ++i)
272 		gbo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
273 
274 	ret = ttm_bo_validate(&gbo->bo, &gbo->placement, &ctx);
275 	if (ret < 0)
276 		return ret;
277 
278 	return 0;
279 }
280 
281 /**
282  * drm_gem_vram_unpin() - Unpins a GEM VRAM object
283  * @gbo:	the GEM VRAM object
284  *
285  * Returns:
286  * 0 on success, or
287  * a negative error code otherwise.
288  */
289 int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo)
290 {
291 	int ret;
292 
293 	ret = ttm_bo_reserve(&gbo->bo, true, false, NULL);
294 	if (ret)
295 		return ret;
296 	ret = drm_gem_vram_unpin_locked(gbo);
297 	ttm_bo_unreserve(&gbo->bo);
298 
299 	return ret;
300 }
301 EXPORT_SYMBOL(drm_gem_vram_unpin);
302 
303 static void *drm_gem_vram_kmap_locked(struct drm_gem_vram_object *gbo,
304 				      bool map, bool *is_iomem)
305 {
306 	int ret;
307 	struct ttm_bo_kmap_obj *kmap = &gbo->kmap;
308 
309 	if (gbo->kmap_use_count > 0)
310 		goto out;
311 
312 	if (kmap->virtual || !map)
313 		goto out;
314 
315 	ret = ttm_bo_kmap(&gbo->bo, 0, gbo->bo.num_pages, kmap);
316 	if (ret)
317 		return ERR_PTR(ret);
318 
319 out:
320 	if (!kmap->virtual) {
321 		if (is_iomem)
322 			*is_iomem = false;
323 		return NULL; /* not mapped; don't increment ref */
324 	}
325 	++gbo->kmap_use_count;
326 	if (is_iomem)
327 		return ttm_kmap_obj_virtual(kmap, is_iomem);
328 	return kmap->virtual;
329 }
330 
331 /**
332  * drm_gem_vram_kmap() - Maps a GEM VRAM object into kernel address space
333  * @gbo:	the GEM VRAM object
334  * @map:	establish a mapping if necessary
335  * @is_iomem:	returns true if the mapped memory is I/O memory, or false \
336 	otherwise; can be NULL
337  *
338  * This function maps the buffer object into the kernel's address space
339  * or returns the current mapping. If the parameter map is false, the
340  * function only queries the current mapping, but does not establish a
341  * new one.
342  *
343  * Returns:
344  * The buffers virtual address if mapped, or
345  * NULL if not mapped, or
346  * an ERR_PTR()-encoded error code otherwise.
347  */
348 void *drm_gem_vram_kmap(struct drm_gem_vram_object *gbo, bool map,
349 			bool *is_iomem)
350 {
351 	int ret;
352 	void *virtual;
353 
354 	ret = ttm_bo_reserve(&gbo->bo, true, false, NULL);
355 	if (ret)
356 		return ERR_PTR(ret);
357 	virtual = drm_gem_vram_kmap_locked(gbo, map, is_iomem);
358 	ttm_bo_unreserve(&gbo->bo);
359 
360 	return virtual;
361 }
362 EXPORT_SYMBOL(drm_gem_vram_kmap);
363 
364 static void drm_gem_vram_kunmap_locked(struct drm_gem_vram_object *gbo)
365 {
366 	if (WARN_ON_ONCE(!gbo->kmap_use_count))
367 		return;
368 	if (--gbo->kmap_use_count > 0)
369 		return;
370 
371 	/*
372 	 * Permanently mapping and unmapping buffers adds overhead from
373 	 * updating the page tables and creates debugging output. Therefore,
374 	 * we delay the actual unmap operation until the BO gets evicted
375 	 * from memory. See drm_gem_vram_bo_driver_move_notify().
376 	 */
377 }
378 
379 /**
380  * drm_gem_vram_kunmap() - Unmaps a GEM VRAM object
381  * @gbo:	the GEM VRAM object
382  */
383 void drm_gem_vram_kunmap(struct drm_gem_vram_object *gbo)
384 {
385 	int ret;
386 
387 	ret = ttm_bo_reserve(&gbo->bo, false, false, NULL);
388 	if (WARN_ONCE(ret, "ttm_bo_reserve_failed(): ret=%d\n", ret))
389 		return;
390 	drm_gem_vram_kunmap_locked(gbo);
391 	ttm_bo_unreserve(&gbo->bo);
392 }
393 EXPORT_SYMBOL(drm_gem_vram_kunmap);
394 
395 /**
396  * drm_gem_vram_fill_create_dumb() - \
397 	Helper for implementing &struct drm_driver.dumb_create
398  * @file:		the DRM file
399  * @dev:		the DRM device
400  * @bdev:		the TTM BO device managing the buffer object
401  * @pg_align:		the buffer's alignment in multiples of the page size
402  * @interruptible:	sleep interruptible if waiting for memory
403  * @args:		the arguments as provided to \
404 				&struct drm_driver.dumb_create
405  *
406  * This helper function fills &struct drm_mode_create_dumb, which is used
407  * by &struct drm_driver.dumb_create. Implementations of this interface
408  * should forwards their arguments to this helper, plus the driver-specific
409  * parameters.
410  *
411  * Returns:
412  * 0 on success, or
413  * a negative error code otherwise.
414  */
415 int drm_gem_vram_fill_create_dumb(struct drm_file *file,
416 				  struct drm_device *dev,
417 				  struct ttm_bo_device *bdev,
418 				  unsigned long pg_align,
419 				  bool interruptible,
420 				  struct drm_mode_create_dumb *args)
421 {
422 	size_t pitch, size;
423 	struct drm_gem_vram_object *gbo;
424 	int ret;
425 	u32 handle;
426 
427 	pitch = args->width * ((args->bpp + 7) / 8);
428 	size = pitch * args->height;
429 
430 	size = roundup(size, PAGE_SIZE);
431 	if (!size)
432 		return -EINVAL;
433 
434 	gbo = drm_gem_vram_create(dev, bdev, size, pg_align, interruptible);
435 	if (IS_ERR(gbo))
436 		return PTR_ERR(gbo);
437 
438 	ret = drm_gem_handle_create(file, &gbo->bo.base, &handle);
439 	if (ret)
440 		goto err_drm_gem_object_put_unlocked;
441 
442 	drm_gem_object_put_unlocked(&gbo->bo.base);
443 
444 	args->pitch = pitch;
445 	args->size = size;
446 	args->handle = handle;
447 
448 	return 0;
449 
450 err_drm_gem_object_put_unlocked:
451 	drm_gem_object_put_unlocked(&gbo->bo.base);
452 	return ret;
453 }
454 EXPORT_SYMBOL(drm_gem_vram_fill_create_dumb);
455 
456 /*
457  * Helpers for struct ttm_bo_driver
458  */
459 
460 static bool drm_is_gem_vram(struct ttm_buffer_object *bo)
461 {
462 	return (bo->destroy == ttm_buffer_object_destroy);
463 }
464 
465 static void drm_gem_vram_bo_driver_evict_flags(struct drm_gem_vram_object *gbo,
466 					       struct ttm_placement *pl)
467 {
468 	drm_gem_vram_placement(gbo, TTM_PL_FLAG_SYSTEM);
469 	*pl = gbo->placement;
470 }
471 
472 static int drm_gem_vram_bo_driver_verify_access(struct drm_gem_vram_object *gbo,
473 						struct file *filp)
474 {
475 	return drm_vma_node_verify_access(&gbo->bo.base.vma_node,
476 					  filp->private_data);
477 }
478 
479 static void drm_gem_vram_bo_driver_move_notify(struct drm_gem_vram_object *gbo,
480 					       bool evict,
481 					       struct ttm_mem_reg *new_mem)
482 {
483 	struct ttm_bo_kmap_obj *kmap = &gbo->kmap;
484 
485 	if (WARN_ON_ONCE(gbo->kmap_use_count))
486 		return;
487 
488 	if (!kmap->virtual)
489 		return;
490 	ttm_bo_kunmap(kmap);
491 	kmap->virtual = NULL;
492 }
493 
494 /*
495  * Helpers for struct drm_gem_object_funcs
496  */
497 
498 /**
499  * drm_gem_vram_object_free() - \
500 	Implements &struct drm_gem_object_funcs.free
501  * @gem:       GEM object. Refers to &struct drm_gem_vram_object.gem
502  */
503 static void drm_gem_vram_object_free(struct drm_gem_object *gem)
504 {
505 	struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
506 
507 	drm_gem_vram_put(gbo);
508 }
509 
510 /*
511  * Helpers for dump buffers
512  */
513 
514 /**
515  * drm_gem_vram_driver_create_dumb() - \
516 	Implements &struct drm_driver.dumb_create
517  * @file:		the DRM file
518  * @dev:		the DRM device
519  * @args:		the arguments as provided to \
520 				&struct drm_driver.dumb_create
521  *
522  * This function requires the driver to use @drm_device.vram_mm for its
523  * instance of VRAM MM.
524  *
525  * Returns:
526  * 0 on success, or
527  * a negative error code otherwise.
528  */
529 int drm_gem_vram_driver_dumb_create(struct drm_file *file,
530 				    struct drm_device *dev,
531 				    struct drm_mode_create_dumb *args)
532 {
533 	if (WARN_ONCE(!dev->vram_mm, "VRAM MM not initialized"))
534 		return -EINVAL;
535 
536 	return drm_gem_vram_fill_create_dumb(file, dev, &dev->vram_mm->bdev, 0,
537 					     false, args);
538 }
539 EXPORT_SYMBOL(drm_gem_vram_driver_dumb_create);
540 
541 /**
542  * drm_gem_vram_driver_dumb_mmap_offset() - \
543 	Implements &struct drm_driver.dumb_mmap_offset
544  * @file:	DRM file pointer.
545  * @dev:	DRM device.
546  * @handle:	GEM handle
547  * @offset:	Returns the mapping's memory offset on success
548  *
549  * Returns:
550  * 0 on success, or
551  * a negative errno code otherwise.
552  */
553 int drm_gem_vram_driver_dumb_mmap_offset(struct drm_file *file,
554 					 struct drm_device *dev,
555 					 uint32_t handle, uint64_t *offset)
556 {
557 	struct drm_gem_object *gem;
558 	struct drm_gem_vram_object *gbo;
559 
560 	gem = drm_gem_object_lookup(file, handle);
561 	if (!gem)
562 		return -ENOENT;
563 
564 	gbo = drm_gem_vram_of_gem(gem);
565 	*offset = drm_gem_vram_mmap_offset(gbo);
566 
567 	drm_gem_object_put_unlocked(gem);
568 
569 	return 0;
570 }
571 EXPORT_SYMBOL(drm_gem_vram_driver_dumb_mmap_offset);
572 
573 /*
574  * PRIME helpers
575  */
576 
577 /**
578  * drm_gem_vram_object_pin() - \
579 	Implements &struct drm_gem_object_funcs.pin
580  * @gem:	The GEM object to pin
581  *
582  * Returns:
583  * 0 on success, or
584  * a negative errno code otherwise.
585  */
586 static int drm_gem_vram_object_pin(struct drm_gem_object *gem)
587 {
588 	struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
589 
590 	/* Fbdev console emulation is the use case of these PRIME
591 	 * helpers. This may involve updating a hardware buffer from
592 	 * a shadow FB. We pin the buffer to it's current location
593 	 * (either video RAM or system memory) to prevent it from
594 	 * being relocated during the update operation. If you require
595 	 * the buffer to be pinned to VRAM, implement a callback that
596 	 * sets the flags accordingly.
597 	 */
598 	return drm_gem_vram_pin(gbo, 0);
599 }
600 
601 /**
602  * drm_gem_vram_object_unpin() - \
603 	Implements &struct drm_gem_object_funcs.unpin
604  * @gem:	The GEM object to unpin
605  */
606 static void drm_gem_vram_object_unpin(struct drm_gem_object *gem)
607 {
608 	struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
609 
610 	drm_gem_vram_unpin(gbo);
611 }
612 
613 /**
614  * drm_gem_vram_object_vmap() - \
615 	Implements &struct drm_gem_object_funcs.vmap
616  * @gem:	The GEM object to map
617  *
618  * Returns:
619  * The buffers virtual address on success, or
620  * NULL otherwise.
621  */
622 static void *drm_gem_vram_object_vmap(struct drm_gem_object *gem)
623 {
624 	struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
625 	int ret;
626 	void *base;
627 
628 	ret = ttm_bo_reserve(&gbo->bo, true, false, NULL);
629 	if (ret)
630 		return ERR_PTR(ret);
631 
632 	ret = drm_gem_vram_pin_locked(gbo, 0);
633 	if (ret)
634 		goto err_ttm_bo_unreserve;
635 	base = drm_gem_vram_kmap_locked(gbo, true, NULL);
636 	if (IS_ERR(base)) {
637 		ret = PTR_ERR(base);
638 		goto err_drm_gem_vram_unpin_locked;
639 	}
640 
641 	ttm_bo_unreserve(&gbo->bo);
642 
643 	return base;
644 
645 err_drm_gem_vram_unpin_locked:
646 	drm_gem_vram_unpin_locked(gbo);
647 err_ttm_bo_unreserve:
648 	ttm_bo_unreserve(&gbo->bo);
649 	return ERR_PTR(ret);
650 }
651 
652 /**
653  * drm_gem_vram_object_vunmap() - \
654 	Implements &struct drm_gem_object_funcs.vunmap
655  * @gem:	The GEM object to unmap
656  * @vaddr:	The mapping's base address
657  */
658 static void drm_gem_vram_object_vunmap(struct drm_gem_object *gem,
659 				       void *vaddr)
660 {
661 	struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
662 	int ret;
663 
664 	ret = ttm_bo_reserve(&gbo->bo, false, false, NULL);
665 	if (WARN_ONCE(ret, "ttm_bo_reserve_failed(): ret=%d\n", ret))
666 		return;
667 
668 	drm_gem_vram_kunmap_locked(gbo);
669 	drm_gem_vram_unpin_locked(gbo);
670 
671 	ttm_bo_unreserve(&gbo->bo);
672 }
673 
674 /*
675  * GEM object funcs
676  */
677 
678 static const struct drm_gem_object_funcs drm_gem_vram_object_funcs = {
679 	.free	= drm_gem_vram_object_free,
680 	.pin	= drm_gem_vram_object_pin,
681 	.unpin	= drm_gem_vram_object_unpin,
682 	.vmap	= drm_gem_vram_object_vmap,
683 	.vunmap	= drm_gem_vram_object_vunmap,
684 	.print_info = drm_gem_ttm_print_info,
685 };
686 
687 /*
688  * VRAM memory manager
689  */
690 
691 /*
692  * TTM TT
693  */
694 
695 static void backend_func_destroy(struct ttm_tt *tt)
696 {
697 	ttm_tt_fini(tt);
698 	kfree(tt);
699 }
700 
701 static struct ttm_backend_func backend_func = {
702 	.destroy = backend_func_destroy
703 };
704 
705 /*
706  * TTM BO device
707  */
708 
709 static struct ttm_tt *bo_driver_ttm_tt_create(struct ttm_buffer_object *bo,
710 					      uint32_t page_flags)
711 {
712 	struct ttm_tt *tt;
713 	int ret;
714 
715 	tt = kzalloc(sizeof(*tt), GFP_KERNEL);
716 	if (!tt)
717 		return NULL;
718 
719 	tt->func = &backend_func;
720 
721 	ret = ttm_tt_init(tt, bo, page_flags);
722 	if (ret < 0)
723 		goto err_ttm_tt_init;
724 
725 	return tt;
726 
727 err_ttm_tt_init:
728 	kfree(tt);
729 	return NULL;
730 }
731 
732 static int bo_driver_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
733 				   struct ttm_mem_type_manager *man)
734 {
735 	switch (type) {
736 	case TTM_PL_SYSTEM:
737 		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
738 		man->available_caching = TTM_PL_MASK_CACHING;
739 		man->default_caching = TTM_PL_FLAG_CACHED;
740 		break;
741 	case TTM_PL_VRAM:
742 		man->func = &ttm_bo_manager_func;
743 		man->flags = TTM_MEMTYPE_FLAG_FIXED |
744 			     TTM_MEMTYPE_FLAG_MAPPABLE;
745 		man->available_caching = TTM_PL_FLAG_UNCACHED |
746 					 TTM_PL_FLAG_WC;
747 		man->default_caching = TTM_PL_FLAG_WC;
748 		break;
749 	default:
750 		return -EINVAL;
751 	}
752 	return 0;
753 }
754 
755 static void bo_driver_evict_flags(struct ttm_buffer_object *bo,
756 				  struct ttm_placement *placement)
757 {
758 	struct drm_gem_vram_object *gbo;
759 
760 	/* TTM may pass BOs that are not GEM VRAM BOs. */
761 	if (!drm_is_gem_vram(bo))
762 		return;
763 
764 	gbo = drm_gem_vram_of_bo(bo);
765 
766 	drm_gem_vram_bo_driver_evict_flags(gbo, placement);
767 }
768 
769 static int bo_driver_verify_access(struct ttm_buffer_object *bo,
770 				   struct file *filp)
771 {
772 	struct drm_gem_vram_object *gbo;
773 
774 	/* TTM may pass BOs that are not GEM VRAM BOs. */
775 	if (!drm_is_gem_vram(bo))
776 		return -EINVAL;
777 
778 	gbo = drm_gem_vram_of_bo(bo);
779 
780 	return drm_gem_vram_bo_driver_verify_access(gbo, filp);
781 }
782 
783 static void bo_driver_move_notify(struct ttm_buffer_object *bo,
784 				  bool evict,
785 				  struct ttm_mem_reg *new_mem)
786 {
787 	struct drm_gem_vram_object *gbo;
788 
789 	/* TTM may pass BOs that are not GEM VRAM BOs. */
790 	if (!drm_is_gem_vram(bo))
791 		return;
792 
793 	gbo = drm_gem_vram_of_bo(bo);
794 
795 	drm_gem_vram_bo_driver_move_notify(gbo, evict, new_mem);
796 }
797 
798 static int bo_driver_io_mem_reserve(struct ttm_bo_device *bdev,
799 				    struct ttm_mem_reg *mem)
800 {
801 	struct ttm_mem_type_manager *man = bdev->man + mem->mem_type;
802 	struct drm_vram_mm *vmm = drm_vram_mm_of_bdev(bdev);
803 
804 	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
805 		return -EINVAL;
806 
807 	mem->bus.addr = NULL;
808 	mem->bus.size = mem->num_pages << PAGE_SHIFT;
809 
810 	switch (mem->mem_type) {
811 	case TTM_PL_SYSTEM:	/* nothing to do */
812 		mem->bus.offset = 0;
813 		mem->bus.base = 0;
814 		mem->bus.is_iomem = false;
815 		break;
816 	case TTM_PL_VRAM:
817 		mem->bus.offset = mem->start << PAGE_SHIFT;
818 		mem->bus.base = vmm->vram_base;
819 		mem->bus.is_iomem = true;
820 		break;
821 	default:
822 		return -EINVAL;
823 	}
824 
825 	return 0;
826 }
827 
828 static void bo_driver_io_mem_free(struct ttm_bo_device *bdev,
829 				  struct ttm_mem_reg *mem)
830 { }
831 
832 static struct ttm_bo_driver bo_driver = {
833 	.ttm_tt_create = bo_driver_ttm_tt_create,
834 	.ttm_tt_populate = ttm_pool_populate,
835 	.ttm_tt_unpopulate = ttm_pool_unpopulate,
836 	.init_mem_type = bo_driver_init_mem_type,
837 	.eviction_valuable = ttm_bo_eviction_valuable,
838 	.evict_flags = bo_driver_evict_flags,
839 	.verify_access = bo_driver_verify_access,
840 	.move_notify = bo_driver_move_notify,
841 	.io_mem_reserve = bo_driver_io_mem_reserve,
842 	.io_mem_free = bo_driver_io_mem_free,
843 };
844 
845 /*
846  * struct drm_vram_mm
847  */
848 
849 #if defined(CONFIG_DEBUG_FS)
850 static int drm_vram_mm_debugfs(struct seq_file *m, void *data)
851 {
852 	struct drm_info_node *node = (struct drm_info_node *) m->private;
853 	struct drm_vram_mm *vmm = node->minor->dev->vram_mm;
854 	struct drm_mm *mm = vmm->bdev.man[TTM_PL_VRAM].priv;
855 	struct ttm_bo_global *glob = vmm->bdev.glob;
856 	struct drm_printer p = drm_seq_file_printer(m);
857 
858 	spin_lock(&glob->lru_lock);
859 	drm_mm_print(mm, &p);
860 	spin_unlock(&glob->lru_lock);
861 	return 0;
862 }
863 
864 static const struct drm_info_list drm_vram_mm_debugfs_list[] = {
865 	{ "vram-mm", drm_vram_mm_debugfs, 0, NULL },
866 };
867 #endif
868 
869 /**
870  * drm_vram_mm_debugfs_init() - Register VRAM MM debugfs file.
871  *
872  * @minor: drm minor device.
873  *
874  * Returns:
875  * 0 on success, or
876  * a negative error code otherwise.
877  */
878 int drm_vram_mm_debugfs_init(struct drm_minor *minor)
879 {
880 	int ret = 0;
881 
882 #if defined(CONFIG_DEBUG_FS)
883 	ret = drm_debugfs_create_files(drm_vram_mm_debugfs_list,
884 				       ARRAY_SIZE(drm_vram_mm_debugfs_list),
885 				       minor->debugfs_root, minor);
886 #endif
887 	return ret;
888 }
889 EXPORT_SYMBOL(drm_vram_mm_debugfs_init);
890 
891 static int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev,
892 			    uint64_t vram_base, size_t vram_size)
893 {
894 	int ret;
895 
896 	vmm->vram_base = vram_base;
897 	vmm->vram_size = vram_size;
898 
899 	ret = ttm_bo_device_init(&vmm->bdev, &bo_driver,
900 				 dev->anon_inode->i_mapping,
901 				 dev->vma_offset_manager,
902 				 true);
903 	if (ret)
904 		return ret;
905 
906 	ret = ttm_bo_init_mm(&vmm->bdev, TTM_PL_VRAM, vram_size >> PAGE_SHIFT);
907 	if (ret)
908 		return ret;
909 
910 	return 0;
911 }
912 
913 static void drm_vram_mm_cleanup(struct drm_vram_mm *vmm)
914 {
915 	ttm_bo_device_release(&vmm->bdev);
916 }
917 
918 static int drm_vram_mm_mmap(struct file *filp, struct vm_area_struct *vma,
919 			    struct drm_vram_mm *vmm)
920 {
921 	return ttm_bo_mmap(filp, vma, &vmm->bdev);
922 }
923 
924 /*
925  * Helpers for integration with struct drm_device
926  */
927 
928 /**
929  * drm_vram_helper_alloc_mm - Allocates a device's instance of \
930 	&struct drm_vram_mm
931  * @dev:	the DRM device
932  * @vram_base:	the base address of the video memory
933  * @vram_size:	the size of the video memory in bytes
934  *
935  * Returns:
936  * The new instance of &struct drm_vram_mm on success, or
937  * an ERR_PTR()-encoded errno code otherwise.
938  */
939 struct drm_vram_mm *drm_vram_helper_alloc_mm(
940 	struct drm_device *dev, uint64_t vram_base, size_t vram_size)
941 {
942 	int ret;
943 
944 	if (WARN_ON(dev->vram_mm))
945 		return dev->vram_mm;
946 
947 	dev->vram_mm = kzalloc(sizeof(*dev->vram_mm), GFP_KERNEL);
948 	if (!dev->vram_mm)
949 		return ERR_PTR(-ENOMEM);
950 
951 	ret = drm_vram_mm_init(dev->vram_mm, dev, vram_base, vram_size);
952 	if (ret)
953 		goto err_kfree;
954 
955 	return dev->vram_mm;
956 
957 err_kfree:
958 	kfree(dev->vram_mm);
959 	dev->vram_mm = NULL;
960 	return ERR_PTR(ret);
961 }
962 EXPORT_SYMBOL(drm_vram_helper_alloc_mm);
963 
964 /**
965  * drm_vram_helper_release_mm - Releases a device's instance of \
966 	&struct drm_vram_mm
967  * @dev:	the DRM device
968  */
969 void drm_vram_helper_release_mm(struct drm_device *dev)
970 {
971 	if (!dev->vram_mm)
972 		return;
973 
974 	drm_vram_mm_cleanup(dev->vram_mm);
975 	kfree(dev->vram_mm);
976 	dev->vram_mm = NULL;
977 }
978 EXPORT_SYMBOL(drm_vram_helper_release_mm);
979 
980 /*
981  * Helpers for &struct file_operations
982  */
983 
984 /**
985  * drm_vram_mm_file_operations_mmap() - \
986 	Implements &struct file_operations.mmap()
987  * @filp:	the mapping's file structure
988  * @vma:	the mapping's memory area
989  *
990  * Returns:
991  * 0 on success, or
992  * a negative error code otherwise.
993  */
994 int drm_vram_mm_file_operations_mmap(
995 	struct file *filp, struct vm_area_struct *vma)
996 {
997 	struct drm_file *file_priv = filp->private_data;
998 	struct drm_device *dev = file_priv->minor->dev;
999 
1000 	if (WARN_ONCE(!dev->vram_mm, "VRAM MM not initialized"))
1001 		return -EINVAL;
1002 
1003 	return drm_vram_mm_mmap(filp, vma, dev->vram_mm);
1004 }
1005 EXPORT_SYMBOL(drm_vram_mm_file_operations_mmap);
1006