1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 #include <drm/drm_gem_vram_helper.h>
4 #include <drm/drm_device.h>
5 #include <drm/drm_mode.h>
6 #include <drm/drm_prime.h>
7 #include <drm/drm_vram_mm_helper.h>
8 #include <drm/ttm/ttm_page_alloc.h>
9 
10 /**
11  * DOC: overview
12  *
13  * This library provides a GEM buffer object that is backed by video RAM
14  * (VRAM). It can be used for framebuffer devices with dedicated memory.
15  */
16 
17 /*
18  * Buffer-objects helpers
19  */
20 
21 static void drm_gem_vram_cleanup(struct drm_gem_vram_object *gbo)
22 {
23 	/* We got here via ttm_bo_put(), which means that the
24 	 * TTM buffer object in 'bo' has already been cleaned
25 	 * up; only release the GEM object.
26 	 */
27 	drm_gem_object_release(&gbo->gem);
28 }
29 
30 static void drm_gem_vram_destroy(struct drm_gem_vram_object *gbo)
31 {
32 	drm_gem_vram_cleanup(gbo);
33 	kfree(gbo);
34 }
35 
36 static void ttm_buffer_object_destroy(struct ttm_buffer_object *bo)
37 {
38 	struct drm_gem_vram_object *gbo = drm_gem_vram_of_bo(bo);
39 
40 	drm_gem_vram_destroy(gbo);
41 }
42 
43 static void drm_gem_vram_placement(struct drm_gem_vram_object *gbo,
44 				   unsigned long pl_flag)
45 {
46 	unsigned int i;
47 	unsigned int c = 0;
48 
49 	gbo->placement.placement = gbo->placements;
50 	gbo->placement.busy_placement = gbo->placements;
51 
52 	if (pl_flag & TTM_PL_FLAG_VRAM)
53 		gbo->placements[c++].flags = TTM_PL_FLAG_WC |
54 					     TTM_PL_FLAG_UNCACHED |
55 					     TTM_PL_FLAG_VRAM;
56 
57 	if (pl_flag & TTM_PL_FLAG_SYSTEM)
58 		gbo->placements[c++].flags = TTM_PL_MASK_CACHING |
59 					     TTM_PL_FLAG_SYSTEM;
60 
61 	if (!c)
62 		gbo->placements[c++].flags = TTM_PL_MASK_CACHING |
63 					     TTM_PL_FLAG_SYSTEM;
64 
65 	gbo->placement.num_placement = c;
66 	gbo->placement.num_busy_placement = c;
67 
68 	for (i = 0; i < c; ++i) {
69 		gbo->placements[i].fpfn = 0;
70 		gbo->placements[i].lpfn = 0;
71 	}
72 }
73 
74 static int drm_gem_vram_init(struct drm_device *dev,
75 			     struct ttm_bo_device *bdev,
76 			     struct drm_gem_vram_object *gbo,
77 			     size_t size, unsigned long pg_align,
78 			     bool interruptible)
79 {
80 	int ret;
81 	size_t acc_size;
82 
83 	ret = drm_gem_object_init(dev, &gbo->gem, size);
84 	if (ret)
85 		return ret;
86 
87 	acc_size = ttm_bo_dma_acc_size(bdev, size, sizeof(*gbo));
88 
89 	gbo->bo.bdev = bdev;
90 	drm_gem_vram_placement(gbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
91 
92 	ret = ttm_bo_init(bdev, &gbo->bo, size, ttm_bo_type_device,
93 			  &gbo->placement, pg_align, interruptible, acc_size,
94 			  NULL, NULL, ttm_buffer_object_destroy);
95 	if (ret)
96 		goto err_drm_gem_object_release;
97 
98 	return 0;
99 
100 err_drm_gem_object_release:
101 	drm_gem_object_release(&gbo->gem);
102 	return ret;
103 }
104 
105 /**
106  * drm_gem_vram_create() - Creates a VRAM-backed GEM object
107  * @dev:		the DRM device
108  * @bdev:		the TTM BO device backing the object
109  * @size:		the buffer size in bytes
110  * @pg_align:		the buffer's alignment in multiples of the page size
111  * @interruptible:	sleep interruptible if waiting for memory
112  *
113  * Returns:
114  * A new instance of &struct drm_gem_vram_object on success, or
115  * an ERR_PTR()-encoded error code otherwise.
116  */
117 struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev,
118 						struct ttm_bo_device *bdev,
119 						size_t size,
120 						unsigned long pg_align,
121 						bool interruptible)
122 {
123 	struct drm_gem_vram_object *gbo;
124 	int ret;
125 
126 	gbo = kzalloc(sizeof(*gbo), GFP_KERNEL);
127 	if (!gbo)
128 		return ERR_PTR(-ENOMEM);
129 
130 	ret = drm_gem_vram_init(dev, bdev, gbo, size, pg_align, interruptible);
131 	if (ret < 0)
132 		goto err_kfree;
133 
134 	return gbo;
135 
136 err_kfree:
137 	kfree(gbo);
138 	return ERR_PTR(ret);
139 }
140 EXPORT_SYMBOL(drm_gem_vram_create);
141 
142 /**
143  * drm_gem_vram_put() - Releases a reference to a VRAM-backed GEM object
144  * @gbo:	the GEM VRAM object
145  *
146  * See ttm_bo_put() for more information.
147  */
148 void drm_gem_vram_put(struct drm_gem_vram_object *gbo)
149 {
150 	ttm_bo_put(&gbo->bo);
151 }
152 EXPORT_SYMBOL(drm_gem_vram_put);
153 
154 /**
155  * drm_gem_vram_mmap_offset() - Returns a GEM VRAM object's mmap offset
156  * @gbo:	the GEM VRAM object
157  *
158  * See drm_vma_node_offset_addr() for more information.
159  *
160  * Returns:
161  * The buffer object's offset for userspace mappings on success, or
162  * 0 if no offset is allocated.
163  */
164 u64 drm_gem_vram_mmap_offset(struct drm_gem_vram_object *gbo)
165 {
166 	return drm_vma_node_offset_addr(&gbo->bo.vma_node);
167 }
168 EXPORT_SYMBOL(drm_gem_vram_mmap_offset);
169 
170 /**
171  * drm_gem_vram_offset() - \
172 	Returns a GEM VRAM object's offset in video memory
173  * @gbo:	the GEM VRAM object
174  *
175  * This function returns the buffer object's offset in the device's video
176  * memory. The buffer object has to be pinned to %TTM_PL_VRAM.
177  *
178  * Returns:
179  * The buffer object's offset in video memory on success, or
180  * a negative errno code otherwise.
181  */
182 s64 drm_gem_vram_offset(struct drm_gem_vram_object *gbo)
183 {
184 	if (WARN_ON_ONCE(!gbo->pin_count))
185 		return (s64)-ENODEV;
186 	return gbo->bo.offset;
187 }
188 EXPORT_SYMBOL(drm_gem_vram_offset);
189 
190 /**
191  * drm_gem_vram_pin() - Pins a GEM VRAM object in a region.
192  * @gbo:	the GEM VRAM object
193  * @pl_flag:	a bitmask of possible memory regions
194  *
195  * Pinning a buffer object ensures that it is not evicted from
196  * a memory region. A pinned buffer object has to be unpinned before
197  * it can be pinned to another region. If the pl_flag argument is 0,
198  * the buffer is pinned at its current location (video RAM or system
199  * memory).
200  *
201  * Returns:
202  * 0 on success, or
203  * a negative error code otherwise.
204  */
205 int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag)
206 {
207 	int i, ret;
208 	struct ttm_operation_ctx ctx = { false, false };
209 
210 	ret = ttm_bo_reserve(&gbo->bo, true, false, NULL);
211 	if (ret < 0)
212 		return ret;
213 
214 	if (gbo->pin_count)
215 		goto out;
216 
217 	if (pl_flag)
218 		drm_gem_vram_placement(gbo, pl_flag);
219 
220 	for (i = 0; i < gbo->placement.num_placement; ++i)
221 		gbo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
222 
223 	ret = ttm_bo_validate(&gbo->bo, &gbo->placement, &ctx);
224 	if (ret < 0)
225 		goto err_ttm_bo_unreserve;
226 
227 out:
228 	++gbo->pin_count;
229 	ttm_bo_unreserve(&gbo->bo);
230 
231 	return 0;
232 
233 err_ttm_bo_unreserve:
234 	ttm_bo_unreserve(&gbo->bo);
235 	return ret;
236 }
237 EXPORT_SYMBOL(drm_gem_vram_pin);
238 
239 /**
240  * drm_gem_vram_unpin() - Unpins a GEM VRAM object
241  * @gbo:	the GEM VRAM object
242  *
243  * Returns:
244  * 0 on success, or
245  * a negative error code otherwise.
246  */
247 int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo)
248 {
249 	int i, ret;
250 	struct ttm_operation_ctx ctx = { false, false };
251 
252 	ret = ttm_bo_reserve(&gbo->bo, true, false, NULL);
253 	if (ret < 0)
254 		return ret;
255 
256 	if (WARN_ON_ONCE(!gbo->pin_count))
257 		goto out;
258 
259 	--gbo->pin_count;
260 	if (gbo->pin_count)
261 		goto out;
262 
263 	for (i = 0; i < gbo->placement.num_placement ; ++i)
264 		gbo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
265 
266 	ret = ttm_bo_validate(&gbo->bo, &gbo->placement, &ctx);
267 	if (ret < 0)
268 		goto err_ttm_bo_unreserve;
269 
270 out:
271 	ttm_bo_unreserve(&gbo->bo);
272 
273 	return 0;
274 
275 err_ttm_bo_unreserve:
276 	ttm_bo_unreserve(&gbo->bo);
277 	return ret;
278 }
279 EXPORT_SYMBOL(drm_gem_vram_unpin);
280 
281 /**
282  * drm_gem_vram_kmap() - Maps a GEM VRAM object into kernel address space
283  * @gbo:	the GEM VRAM object
284  * @map:	establish a mapping if necessary
285  * @is_iomem:	returns true if the mapped memory is I/O memory, or false \
286 	otherwise; can be NULL
287  *
288  * This function maps the buffer object into the kernel's address space
289  * or returns the current mapping. If the parameter map is false, the
290  * function only queries the current mapping, but does not establish a
291  * new one.
292  *
293  * Returns:
294  * The buffers virtual address if mapped, or
295  * NULL if not mapped, or
296  * an ERR_PTR()-encoded error code otherwise.
297  */
298 void *drm_gem_vram_kmap(struct drm_gem_vram_object *gbo, bool map,
299 			bool *is_iomem)
300 {
301 	int ret;
302 	struct ttm_bo_kmap_obj *kmap = &gbo->kmap;
303 
304 	if (kmap->virtual || !map)
305 		goto out;
306 
307 	ret = ttm_bo_kmap(&gbo->bo, 0, gbo->bo.num_pages, kmap);
308 	if (ret)
309 		return ERR_PTR(ret);
310 
311 out:
312 	if (!is_iomem)
313 		return kmap->virtual;
314 	if (!kmap->virtual) {
315 		*is_iomem = false;
316 		return NULL;
317 	}
318 	return ttm_kmap_obj_virtual(kmap, is_iomem);
319 }
320 EXPORT_SYMBOL(drm_gem_vram_kmap);
321 
322 /**
323  * drm_gem_vram_kunmap() - Unmaps a GEM VRAM object
324  * @gbo:	the GEM VRAM object
325  */
326 void drm_gem_vram_kunmap(struct drm_gem_vram_object *gbo)
327 {
328 	struct ttm_bo_kmap_obj *kmap = &gbo->kmap;
329 
330 	if (!kmap->virtual)
331 		return;
332 
333 	ttm_bo_kunmap(kmap);
334 	kmap->virtual = NULL;
335 }
336 EXPORT_SYMBOL(drm_gem_vram_kunmap);
337 
338 /**
339  * drm_gem_vram_fill_create_dumb() - \
340 	Helper for implementing &struct drm_driver.dumb_create
341  * @file:		the DRM file
342  * @dev:		the DRM device
343  * @bdev:		the TTM BO device managing the buffer object
344  * @pg_align:		the buffer's alignment in multiples of the page size
345  * @interruptible:	sleep interruptible if waiting for memory
346  * @args:		the arguments as provided to \
347 				&struct drm_driver.dumb_create
348  *
349  * This helper function fills &struct drm_mode_create_dumb, which is used
350  * by &struct drm_driver.dumb_create. Implementations of this interface
351  * should forwards their arguments to this helper, plus the driver-specific
352  * parameters.
353  *
354  * Returns:
355  * 0 on success, or
356  * a negative error code otherwise.
357  */
358 int drm_gem_vram_fill_create_dumb(struct drm_file *file,
359 				  struct drm_device *dev,
360 				  struct ttm_bo_device *bdev,
361 				  unsigned long pg_align,
362 				  bool interruptible,
363 				  struct drm_mode_create_dumb *args)
364 {
365 	size_t pitch, size;
366 	struct drm_gem_vram_object *gbo;
367 	int ret;
368 	u32 handle;
369 
370 	pitch = args->width * ((args->bpp + 7) / 8);
371 	size = pitch * args->height;
372 
373 	size = roundup(size, PAGE_SIZE);
374 	if (!size)
375 		return -EINVAL;
376 
377 	gbo = drm_gem_vram_create(dev, bdev, size, pg_align, interruptible);
378 	if (IS_ERR(gbo))
379 		return PTR_ERR(gbo);
380 
381 	ret = drm_gem_handle_create(file, &gbo->gem, &handle);
382 	if (ret)
383 		goto err_drm_gem_object_put_unlocked;
384 
385 	drm_gem_object_put_unlocked(&gbo->gem);
386 
387 	args->pitch = pitch;
388 	args->size = size;
389 	args->handle = handle;
390 
391 	return 0;
392 
393 err_drm_gem_object_put_unlocked:
394 	drm_gem_object_put_unlocked(&gbo->gem);
395 	return ret;
396 }
397 EXPORT_SYMBOL(drm_gem_vram_fill_create_dumb);
398 
399 /*
400  * Helpers for struct ttm_bo_driver
401  */
402 
403 static bool drm_is_gem_vram(struct ttm_buffer_object *bo)
404 {
405 	return (bo->destroy == ttm_buffer_object_destroy);
406 }
407 
408 /**
409  * drm_gem_vram_bo_driver_evict_flags() - \
410 	Implements &struct ttm_bo_driver.evict_flags
411  * @bo:	TTM buffer object. Refers to &struct drm_gem_vram_object.bo
412  * @pl:	TTM placement information.
413  */
414 void drm_gem_vram_bo_driver_evict_flags(struct ttm_buffer_object *bo,
415 					struct ttm_placement *pl)
416 {
417 	struct drm_gem_vram_object *gbo;
418 
419 	/* TTM may pass BOs that are not GEM VRAM BOs. */
420 	if (!drm_is_gem_vram(bo))
421 		return;
422 
423 	gbo = drm_gem_vram_of_bo(bo);
424 	drm_gem_vram_placement(gbo, TTM_PL_FLAG_SYSTEM);
425 	*pl = gbo->placement;
426 }
427 EXPORT_SYMBOL(drm_gem_vram_bo_driver_evict_flags);
428 
429 /**
430  * drm_gem_vram_bo_driver_verify_access() - \
431 	Implements &struct ttm_bo_driver.verify_access
432  * @bo:		TTM buffer object. Refers to &struct drm_gem_vram_object.bo
433  * @filp:	File pointer.
434  *
435  * Returns:
436  * 0 on success, or
437  * a negative errno code otherwise.
438  */
439 int drm_gem_vram_bo_driver_verify_access(struct ttm_buffer_object *bo,
440 					 struct file *filp)
441 {
442 	struct drm_gem_vram_object *gbo = drm_gem_vram_of_bo(bo);
443 
444 	return drm_vma_node_verify_access(&gbo->gem.vma_node,
445 					  filp->private_data);
446 }
447 EXPORT_SYMBOL(drm_gem_vram_bo_driver_verify_access);
448 
449 /*
450  * drm_gem_vram_mm_funcs - Functions for &struct drm_vram_mm
451  *
452  * Most users of @struct drm_gem_vram_object will also use
453  * @struct drm_vram_mm. This instance of &struct drm_vram_mm_funcs
454  * can be used to connect both.
455  */
456 const struct drm_vram_mm_funcs drm_gem_vram_mm_funcs = {
457 	.evict_flags = drm_gem_vram_bo_driver_evict_flags,
458 	.verify_access = drm_gem_vram_bo_driver_verify_access
459 };
460 EXPORT_SYMBOL(drm_gem_vram_mm_funcs);
461 
462 /*
463  * Helpers for struct drm_driver
464  */
465 
466 /**
467  * drm_gem_vram_driver_gem_free_object_unlocked() - \
468 	Implements &struct drm_driver.gem_free_object_unlocked
469  * @gem:	GEM object. Refers to &struct drm_gem_vram_object.gem
470  */
471 void drm_gem_vram_driver_gem_free_object_unlocked(struct drm_gem_object *gem)
472 {
473 	struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
474 
475 	drm_gem_vram_put(gbo);
476 }
477 EXPORT_SYMBOL(drm_gem_vram_driver_gem_free_object_unlocked);
478 
479 /**
480  * drm_gem_vram_driver_create_dumb() - \
481 	Implements &struct drm_driver.dumb_create
482  * @file:		the DRM file
483  * @dev:		the DRM device
484  * @args:		the arguments as provided to \
485 				&struct drm_driver.dumb_create
486  *
487  * This function requires the driver to use @drm_device.vram_mm for its
488  * instance of VRAM MM.
489  *
490  * Returns:
491  * 0 on success, or
492  * a negative error code otherwise.
493  */
494 int drm_gem_vram_driver_dumb_create(struct drm_file *file,
495 				    struct drm_device *dev,
496 				    struct drm_mode_create_dumb *args)
497 {
498 	if (WARN_ONCE(!dev->vram_mm, "VRAM MM not initialized"))
499 		return -EINVAL;
500 
501 	return drm_gem_vram_fill_create_dumb(file, dev, &dev->vram_mm->bdev, 0,
502 					     false, args);
503 }
504 EXPORT_SYMBOL(drm_gem_vram_driver_dumb_create);
505 
506 /**
507  * drm_gem_vram_driver_dumb_mmap_offset() - \
508 	Implements &struct drm_driver.dumb_mmap_offset
509  * @file:	DRM file pointer.
510  * @dev:	DRM device.
511  * @handle:	GEM handle
512  * @offset:	Returns the mapping's memory offset on success
513  *
514  * Returns:
515  * 0 on success, or
516  * a negative errno code otherwise.
517  */
518 int drm_gem_vram_driver_dumb_mmap_offset(struct drm_file *file,
519 					 struct drm_device *dev,
520 					 uint32_t handle, uint64_t *offset)
521 {
522 	struct drm_gem_object *gem;
523 	struct drm_gem_vram_object *gbo;
524 
525 	gem = drm_gem_object_lookup(file, handle);
526 	if (!gem)
527 		return -ENOENT;
528 
529 	gbo = drm_gem_vram_of_gem(gem);
530 	*offset = drm_gem_vram_mmap_offset(gbo);
531 
532 	drm_gem_object_put_unlocked(gem);
533 
534 	return 0;
535 }
536 EXPORT_SYMBOL(drm_gem_vram_driver_dumb_mmap_offset);
537 
538 /*
539  * PRIME helpers for struct drm_driver
540  */
541 
542 /**
543  * drm_gem_vram_driver_gem_prime_pin() - \
544 	Implements &struct drm_driver.gem_prime_pin
545  * @gem:	The GEM object to pin
546  *
547  * Returns:
548  * 0 on success, or
549  * a negative errno code otherwise.
550  */
551 int drm_gem_vram_driver_gem_prime_pin(struct drm_gem_object *gem)
552 {
553 	struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
554 
555 	/* Fbdev console emulation is the use case of these PRIME
556 	 * helpers. This may involve updating a hardware buffer from
557 	 * a shadow FB. We pin the buffer to it's current location
558 	 * (either video RAM or system memory) to prevent it from
559 	 * being relocated during the update operation. If you require
560 	 * the buffer to be pinned to VRAM, implement a callback that
561 	 * sets the flags accordingly.
562 	 */
563 	return drm_gem_vram_pin(gbo, 0);
564 }
565 EXPORT_SYMBOL(drm_gem_vram_driver_gem_prime_pin);
566 
567 /**
568  * drm_gem_vram_driver_gem_prime_unpin() - \
569 	Implements &struct drm_driver.gem_prime_unpin
570  * @gem:	The GEM object to unpin
571  */
572 void drm_gem_vram_driver_gem_prime_unpin(struct drm_gem_object *gem)
573 {
574 	struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
575 
576 	drm_gem_vram_unpin(gbo);
577 }
578 EXPORT_SYMBOL(drm_gem_vram_driver_gem_prime_unpin);
579 
580 /**
581  * drm_gem_vram_driver_gem_prime_vmap() - \
582 	Implements &struct drm_driver.gem_prime_vmap
583  * @gem:	The GEM object to map
584  *
585  * Returns:
586  * The buffers virtual address on success, or
587  * NULL otherwise.
588  */
589 void *drm_gem_vram_driver_gem_prime_vmap(struct drm_gem_object *gem)
590 {
591 	struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
592 	int ret;
593 	void *base;
594 
595 	ret = drm_gem_vram_pin(gbo, 0);
596 	if (ret)
597 		return NULL;
598 	base = drm_gem_vram_kmap(gbo, true, NULL);
599 	if (IS_ERR(base)) {
600 		drm_gem_vram_unpin(gbo);
601 		return NULL;
602 	}
603 	return base;
604 }
605 EXPORT_SYMBOL(drm_gem_vram_driver_gem_prime_vmap);
606 
607 /**
608  * drm_gem_vram_driver_gem_prime_vunmap() - \
609 	Implements &struct drm_driver.gem_prime_vunmap
610  * @gem:	The GEM object to unmap
611  * @vaddr:	The mapping's base address
612  */
613 void drm_gem_vram_driver_gem_prime_vunmap(struct drm_gem_object *gem,
614 					  void *vaddr)
615 {
616 	struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
617 
618 	drm_gem_vram_kunmap(gbo);
619 	drm_gem_vram_unpin(gbo);
620 }
621 EXPORT_SYMBOL(drm_gem_vram_driver_gem_prime_vunmap);
622 
623 /**
624  * drm_gem_vram_driver_gem_prime_mmap() - \
625 	Implements &struct drm_driver.gem_prime_mmap
626  * @gem:	The GEM object to map
627  * @vma:	The VMA describing the mapping
628  *
629  * Returns:
630  * 0 on success, or
631  * a negative errno code otherwise.
632  */
633 int drm_gem_vram_driver_gem_prime_mmap(struct drm_gem_object *gem,
634 				       struct vm_area_struct *vma)
635 {
636 	struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
637 
638 	gbo->gem.vma_node.vm_node.start = gbo->bo.vma_node.vm_node.start;
639 	return drm_gem_prime_mmap(gem, vma);
640 }
641 EXPORT_SYMBOL(drm_gem_vram_driver_gem_prime_mmap);
642