xref: /openbmc/linux/drivers/gpu/drm/vc4/vc4_bo.c (revision d2c43ff1)
1 /*
2  *  Copyright © 2015 Broadcom
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8 
9 /**
10  * DOC: VC4 GEM BO management support
11  *
12  * The VC4 GPU architecture (both scanout and rendering) has direct
13  * access to system memory with no MMU in between.  To support it, we
14  * use the GEM CMA helper functions to allocate contiguous ranges of
15  * physical memory for our BOs.
16  *
17  * Since the CMA allocator is very slow, we keep a cache of recently
18  * freed BOs around so that the kernel's allocation of objects for 3D
19  * rendering can return quickly.
20  */
21 
22 #include <linux/dma-buf.h>
23 
24 #include "vc4_drv.h"
25 #include "uapi/drm/vc4_drm.h"
26 
27 static void vc4_bo_stats_dump(struct vc4_dev *vc4)
28 {
29 	DRM_INFO("num bos allocated: %d\n",
30 		 vc4->bo_stats.num_allocated);
31 	DRM_INFO("size bos allocated: %dkb\n",
32 		 vc4->bo_stats.size_allocated / 1024);
33 	DRM_INFO("num bos used: %d\n",
34 		 vc4->bo_stats.num_allocated - vc4->bo_stats.num_cached);
35 	DRM_INFO("size bos used: %dkb\n",
36 		 (vc4->bo_stats.size_allocated -
37 		  vc4->bo_stats.size_cached) / 1024);
38 	DRM_INFO("num bos cached: %d\n",
39 		 vc4->bo_stats.num_cached);
40 	DRM_INFO("size bos cached: %dkb\n",
41 		 vc4->bo_stats.size_cached / 1024);
42 }
43 
44 #ifdef CONFIG_DEBUG_FS
45 int vc4_bo_stats_debugfs(struct seq_file *m, void *unused)
46 {
47 	struct drm_info_node *node = (struct drm_info_node *)m->private;
48 	struct drm_device *dev = node->minor->dev;
49 	struct vc4_dev *vc4 = to_vc4_dev(dev);
50 	struct vc4_bo_stats stats;
51 
52 	/* Take a snapshot of the current stats with the lock held. */
53 	mutex_lock(&vc4->bo_lock);
54 	stats = vc4->bo_stats;
55 	mutex_unlock(&vc4->bo_lock);
56 
57 	seq_printf(m, "num bos allocated: %d\n",
58 		   stats.num_allocated);
59 	seq_printf(m, "size bos allocated: %dkb\n",
60 		   stats.size_allocated / 1024);
61 	seq_printf(m, "num bos used: %d\n",
62 		   stats.num_allocated - stats.num_cached);
63 	seq_printf(m, "size bos used: %dkb\n",
64 		   (stats.size_allocated - stats.size_cached) / 1024);
65 	seq_printf(m, "num bos cached: %d\n",
66 		   stats.num_cached);
67 	seq_printf(m, "size bos cached: %dkb\n",
68 		   stats.size_cached / 1024);
69 
70 	return 0;
71 }
72 #endif
73 
74 static uint32_t bo_page_index(size_t size)
75 {
76 	return (size / PAGE_SIZE) - 1;
77 }
78 
79 /* Must be called with bo_lock held. */
80 static void vc4_bo_destroy(struct vc4_bo *bo)
81 {
82 	struct drm_gem_object *obj = &bo->base.base;
83 	struct vc4_dev *vc4 = to_vc4_dev(obj->dev);
84 
85 	if (bo->validated_shader) {
86 		kfree(bo->validated_shader->texture_samples);
87 		kfree(bo->validated_shader);
88 		bo->validated_shader = NULL;
89 	}
90 
91 	vc4->bo_stats.num_allocated--;
92 	vc4->bo_stats.size_allocated -= obj->size;
93 
94 	reservation_object_fini(&bo->_resv);
95 
96 	drm_gem_cma_free_object(obj);
97 }
98 
99 /* Must be called with bo_lock held. */
100 static void vc4_bo_remove_from_cache(struct vc4_bo *bo)
101 {
102 	struct drm_gem_object *obj = &bo->base.base;
103 	struct vc4_dev *vc4 = to_vc4_dev(obj->dev);
104 
105 	vc4->bo_stats.num_cached--;
106 	vc4->bo_stats.size_cached -= obj->size;
107 
108 	list_del(&bo->unref_head);
109 	list_del(&bo->size_head);
110 }
111 
112 static struct list_head *vc4_get_cache_list_for_size(struct drm_device *dev,
113 						     size_t size)
114 {
115 	struct vc4_dev *vc4 = to_vc4_dev(dev);
116 	uint32_t page_index = bo_page_index(size);
117 
118 	if (vc4->bo_cache.size_list_size <= page_index) {
119 		uint32_t new_size = max(vc4->bo_cache.size_list_size * 2,
120 					page_index + 1);
121 		struct list_head *new_list;
122 		uint32_t i;
123 
124 		new_list = kmalloc_array(new_size, sizeof(struct list_head),
125 					 GFP_KERNEL);
126 		if (!new_list)
127 			return NULL;
128 
129 		/* Rebase the old cached BO lists to their new list
130 		 * head locations.
131 		 */
132 		for (i = 0; i < vc4->bo_cache.size_list_size; i++) {
133 			struct list_head *old_list =
134 				&vc4->bo_cache.size_list[i];
135 
136 			if (list_empty(old_list))
137 				INIT_LIST_HEAD(&new_list[i]);
138 			else
139 				list_replace(old_list, &new_list[i]);
140 		}
141 		/* And initialize the brand new BO list heads. */
142 		for (i = vc4->bo_cache.size_list_size; i < new_size; i++)
143 			INIT_LIST_HEAD(&new_list[i]);
144 
145 		kfree(vc4->bo_cache.size_list);
146 		vc4->bo_cache.size_list = new_list;
147 		vc4->bo_cache.size_list_size = new_size;
148 	}
149 
150 	return &vc4->bo_cache.size_list[page_index];
151 }
152 
153 static void vc4_bo_cache_purge(struct drm_device *dev)
154 {
155 	struct vc4_dev *vc4 = to_vc4_dev(dev);
156 
157 	mutex_lock(&vc4->bo_lock);
158 	while (!list_empty(&vc4->bo_cache.time_list)) {
159 		struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list,
160 						    struct vc4_bo, unref_head);
161 		vc4_bo_remove_from_cache(bo);
162 		vc4_bo_destroy(bo);
163 	}
164 	mutex_unlock(&vc4->bo_lock);
165 }
166 
167 static struct vc4_bo *vc4_bo_get_from_cache(struct drm_device *dev,
168 					    uint32_t size)
169 {
170 	struct vc4_dev *vc4 = to_vc4_dev(dev);
171 	uint32_t page_index = bo_page_index(size);
172 	struct vc4_bo *bo = NULL;
173 
174 	size = roundup(size, PAGE_SIZE);
175 
176 	mutex_lock(&vc4->bo_lock);
177 	if (page_index >= vc4->bo_cache.size_list_size)
178 		goto out;
179 
180 	if (list_empty(&vc4->bo_cache.size_list[page_index]))
181 		goto out;
182 
183 	bo = list_first_entry(&vc4->bo_cache.size_list[page_index],
184 			      struct vc4_bo, size_head);
185 	vc4_bo_remove_from_cache(bo);
186 	kref_init(&bo->base.base.refcount);
187 
188 out:
189 	mutex_unlock(&vc4->bo_lock);
190 	return bo;
191 }
192 
193 /**
194  * vc4_gem_create_object - Implementation of driver->gem_create_object.
195  * @dev: DRM device
196  * @size: Size in bytes of the memory the object will reference
197  *
198  * This lets the CMA helpers allocate object structs for us, and keep
199  * our BO stats correct.
200  */
201 struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
202 {
203 	struct vc4_dev *vc4 = to_vc4_dev(dev);
204 	struct vc4_bo *bo;
205 
206 	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
207 	if (!bo)
208 		return ERR_PTR(-ENOMEM);
209 
210 	mutex_lock(&vc4->bo_lock);
211 	vc4->bo_stats.num_allocated++;
212 	vc4->bo_stats.size_allocated += size;
213 	mutex_unlock(&vc4->bo_lock);
214 	bo->resv = &bo->_resv;
215 	reservation_object_init(bo->resv);
216 
217 	return &bo->base.base;
218 }
219 
220 struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
221 			     bool allow_unzeroed)
222 {
223 	size_t size = roundup(unaligned_size, PAGE_SIZE);
224 	struct vc4_dev *vc4 = to_vc4_dev(dev);
225 	struct drm_gem_cma_object *cma_obj;
226 	struct vc4_bo *bo;
227 
228 	if (size == 0)
229 		return ERR_PTR(-EINVAL);
230 
231 	/* First, try to get a vc4_bo from the kernel BO cache. */
232 	bo = vc4_bo_get_from_cache(dev, size);
233 	if (bo) {
234 		if (!allow_unzeroed)
235 			memset(bo->base.vaddr, 0, bo->base.base.size);
236 		return bo;
237 	}
238 
239 	cma_obj = drm_gem_cma_create(dev, size);
240 	if (IS_ERR(cma_obj)) {
241 		/*
242 		 * If we've run out of CMA memory, kill the cache of
243 		 * CMA allocations we've got laying around and try again.
244 		 */
245 		vc4_bo_cache_purge(dev);
246 
247 		cma_obj = drm_gem_cma_create(dev, size);
248 		if (IS_ERR(cma_obj)) {
249 			DRM_ERROR("Failed to allocate from CMA:\n");
250 			vc4_bo_stats_dump(vc4);
251 			return ERR_PTR(-ENOMEM);
252 		}
253 	}
254 	return to_vc4_bo(&cma_obj->base);
255 }
256 
257 int vc4_dumb_create(struct drm_file *file_priv,
258 		    struct drm_device *dev,
259 		    struct drm_mode_create_dumb *args)
260 {
261 	int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
262 	struct vc4_bo *bo = NULL;
263 	int ret;
264 
265 	if (args->pitch < min_pitch)
266 		args->pitch = min_pitch;
267 
268 	if (args->size < args->pitch * args->height)
269 		args->size = args->pitch * args->height;
270 
271 	bo = vc4_bo_create(dev, args->size, false);
272 	if (IS_ERR(bo))
273 		return PTR_ERR(bo);
274 
275 	ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
276 	drm_gem_object_unreference_unlocked(&bo->base.base);
277 
278 	return ret;
279 }
280 
281 /* Must be called with bo_lock held. */
282 static void vc4_bo_cache_free_old(struct drm_device *dev)
283 {
284 	struct vc4_dev *vc4 = to_vc4_dev(dev);
285 	unsigned long expire_time = jiffies - msecs_to_jiffies(1000);
286 
287 	while (!list_empty(&vc4->bo_cache.time_list)) {
288 		struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list,
289 						    struct vc4_bo, unref_head);
290 		if (time_before(expire_time, bo->free_time)) {
291 			mod_timer(&vc4->bo_cache.time_timer,
292 				  round_jiffies_up(jiffies +
293 						   msecs_to_jiffies(1000)));
294 			return;
295 		}
296 
297 		vc4_bo_remove_from_cache(bo);
298 		vc4_bo_destroy(bo);
299 	}
300 }
301 
302 /* Called on the last userspace/kernel unreference of the BO.  Returns
303  * it to the BO cache if possible, otherwise frees it.
304  */
305 void vc4_free_object(struct drm_gem_object *gem_bo)
306 {
307 	struct drm_device *dev = gem_bo->dev;
308 	struct vc4_dev *vc4 = to_vc4_dev(dev);
309 	struct vc4_bo *bo = to_vc4_bo(gem_bo);
310 	struct list_head *cache_list;
311 
312 	mutex_lock(&vc4->bo_lock);
313 	/* If the object references someone else's memory, we can't cache it.
314 	 */
315 	if (gem_bo->import_attach) {
316 		vc4_bo_destroy(bo);
317 		goto out;
318 	}
319 
320 	/* Don't cache if it was publicly named. */
321 	if (gem_bo->name) {
322 		vc4_bo_destroy(bo);
323 		goto out;
324 	}
325 
326 	/* If this object was partially constructed but CMA allocation
327 	 * had failed, just free it.
328 	 */
329 	if (!bo->base.vaddr) {
330 		vc4_bo_destroy(bo);
331 		goto out;
332 	}
333 
334 	cache_list = vc4_get_cache_list_for_size(dev, gem_bo->size);
335 	if (!cache_list) {
336 		vc4_bo_destroy(bo);
337 		goto out;
338 	}
339 
340 	if (bo->validated_shader) {
341 		kfree(bo->validated_shader->texture_samples);
342 		kfree(bo->validated_shader);
343 		bo->validated_shader = NULL;
344 	}
345 
346 	bo->t_format = false;
347 	bo->free_time = jiffies;
348 	list_add(&bo->size_head, cache_list);
349 	list_add(&bo->unref_head, &vc4->bo_cache.time_list);
350 
351 	vc4->bo_stats.num_cached++;
352 	vc4->bo_stats.size_cached += gem_bo->size;
353 
354 	vc4_bo_cache_free_old(dev);
355 
356 out:
357 	mutex_unlock(&vc4->bo_lock);
358 }
359 
360 static void vc4_bo_cache_time_work(struct work_struct *work)
361 {
362 	struct vc4_dev *vc4 =
363 		container_of(work, struct vc4_dev, bo_cache.time_work);
364 	struct drm_device *dev = vc4->dev;
365 
366 	mutex_lock(&vc4->bo_lock);
367 	vc4_bo_cache_free_old(dev);
368 	mutex_unlock(&vc4->bo_lock);
369 }
370 
371 static void vc4_bo_cache_time_timer(unsigned long data)
372 {
373 	struct drm_device *dev = (struct drm_device *)data;
374 	struct vc4_dev *vc4 = to_vc4_dev(dev);
375 
376 	schedule_work(&vc4->bo_cache.time_work);
377 }
378 
379 struct reservation_object *vc4_prime_res_obj(struct drm_gem_object *obj)
380 {
381 	struct vc4_bo *bo = to_vc4_bo(obj);
382 
383 	return bo->resv;
384 }
385 
386 struct dma_buf *
387 vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags)
388 {
389 	struct vc4_bo *bo = to_vc4_bo(obj);
390 
391 	if (bo->validated_shader) {
392 		DRM_ERROR("Attempting to export shader BO\n");
393 		return ERR_PTR(-EINVAL);
394 	}
395 
396 	return drm_gem_prime_export(dev, obj, flags);
397 }
398 
399 int vc4_mmap(struct file *filp, struct vm_area_struct *vma)
400 {
401 	struct drm_gem_object *gem_obj;
402 	struct vc4_bo *bo;
403 	int ret;
404 
405 	ret = drm_gem_mmap(filp, vma);
406 	if (ret)
407 		return ret;
408 
409 	gem_obj = vma->vm_private_data;
410 	bo = to_vc4_bo(gem_obj);
411 
412 	if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
413 		DRM_ERROR("mmaping of shader BOs for writing not allowed.\n");
414 		return -EINVAL;
415 	}
416 
417 	/*
418 	 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
419 	 * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
420 	 * the whole buffer.
421 	 */
422 	vma->vm_flags &= ~VM_PFNMAP;
423 	vma->vm_pgoff = 0;
424 
425 	ret = dma_mmap_wc(bo->base.base.dev->dev, vma, bo->base.vaddr,
426 			  bo->base.paddr, vma->vm_end - vma->vm_start);
427 	if (ret)
428 		drm_gem_vm_close(vma);
429 
430 	return ret;
431 }
432 
433 int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
434 {
435 	struct vc4_bo *bo = to_vc4_bo(obj);
436 
437 	if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
438 		DRM_ERROR("mmaping of shader BOs for writing not allowed.\n");
439 		return -EINVAL;
440 	}
441 
442 	return drm_gem_cma_prime_mmap(obj, vma);
443 }
444 
445 void *vc4_prime_vmap(struct drm_gem_object *obj)
446 {
447 	struct vc4_bo *bo = to_vc4_bo(obj);
448 
449 	if (bo->validated_shader) {
450 		DRM_ERROR("mmaping of shader BOs not allowed.\n");
451 		return ERR_PTR(-EINVAL);
452 	}
453 
454 	return drm_gem_cma_prime_vmap(obj);
455 }
456 
457 struct drm_gem_object *
458 vc4_prime_import_sg_table(struct drm_device *dev,
459 			  struct dma_buf_attachment *attach,
460 			  struct sg_table *sgt)
461 {
462 	struct drm_gem_object *obj;
463 	struct vc4_bo *bo;
464 
465 	obj = drm_gem_cma_prime_import_sg_table(dev, attach, sgt);
466 	if (IS_ERR(obj))
467 		return obj;
468 
469 	bo = to_vc4_bo(obj);
470 	bo->resv = attach->dmabuf->resv;
471 
472 	return obj;
473 }
474 
475 int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
476 			struct drm_file *file_priv)
477 {
478 	struct drm_vc4_create_bo *args = data;
479 	struct vc4_bo *bo = NULL;
480 	int ret;
481 
482 	/*
483 	 * We can't allocate from the BO cache, because the BOs don't
484 	 * get zeroed, and that might leak data between users.
485 	 */
486 	bo = vc4_bo_create(dev, args->size, false);
487 	if (IS_ERR(bo))
488 		return PTR_ERR(bo);
489 
490 	ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
491 	drm_gem_object_unreference_unlocked(&bo->base.base);
492 
493 	return ret;
494 }
495 
496 int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
497 		      struct drm_file *file_priv)
498 {
499 	struct drm_vc4_mmap_bo *args = data;
500 	struct drm_gem_object *gem_obj;
501 
502 	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
503 	if (!gem_obj) {
504 		DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
505 		return -EINVAL;
506 	}
507 
508 	/* The mmap offset was set up at BO allocation time. */
509 	args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
510 
511 	drm_gem_object_unreference_unlocked(gem_obj);
512 	return 0;
513 }
514 
515 int
516 vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
517 			   struct drm_file *file_priv)
518 {
519 	struct drm_vc4_create_shader_bo *args = data;
520 	struct vc4_bo *bo = NULL;
521 	int ret;
522 
523 	if (args->size == 0)
524 		return -EINVAL;
525 
526 	if (args->size % sizeof(u64) != 0)
527 		return -EINVAL;
528 
529 	if (args->flags != 0) {
530 		DRM_INFO("Unknown flags set: 0x%08x\n", args->flags);
531 		return -EINVAL;
532 	}
533 
534 	if (args->pad != 0) {
535 		DRM_INFO("Pad set: 0x%08x\n", args->pad);
536 		return -EINVAL;
537 	}
538 
539 	bo = vc4_bo_create(dev, args->size, true);
540 	if (IS_ERR(bo))
541 		return PTR_ERR(bo);
542 
543 	if (copy_from_user(bo->base.vaddr,
544 			     (void __user *)(uintptr_t)args->data,
545 			     args->size)) {
546 		ret = -EFAULT;
547 		goto fail;
548 	}
549 	/* Clear the rest of the memory from allocating from the BO
550 	 * cache.
551 	 */
552 	memset(bo->base.vaddr + args->size, 0,
553 	       bo->base.base.size - args->size);
554 
555 	bo->validated_shader = vc4_validate_shader(&bo->base);
556 	if (!bo->validated_shader) {
557 		ret = -EINVAL;
558 		goto fail;
559 	}
560 
561 	/* We have to create the handle after validation, to avoid
562 	 * races for users to do doing things like mmap the shader BO.
563 	 */
564 	ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
565 
566  fail:
567 	drm_gem_object_unreference_unlocked(&bo->base.base);
568 
569 	return ret;
570 }
571 
572 /**
573  * vc4_set_tiling_ioctl() - Sets the tiling modifier for a BO.
574  * @dev: DRM device
575  * @data: ioctl argument
576  * @file_priv: DRM file for this fd
577  *
578  * The tiling state of the BO decides the default modifier of an fb if
579  * no specific modifier was set by userspace, and the return value of
580  * vc4_get_tiling_ioctl() (so that userspace can treat a BO it
581  * received from dmabuf as the same tiling format as the producer
582  * used).
583  */
584 int vc4_set_tiling_ioctl(struct drm_device *dev, void *data,
585 			 struct drm_file *file_priv)
586 {
587 	struct drm_vc4_set_tiling *args = data;
588 	struct drm_gem_object *gem_obj;
589 	struct vc4_bo *bo;
590 	bool t_format;
591 
592 	if (args->flags != 0)
593 		return -EINVAL;
594 
595 	switch (args->modifier) {
596 	case DRM_FORMAT_MOD_NONE:
597 		t_format = false;
598 		break;
599 	case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED:
600 		t_format = true;
601 		break;
602 	default:
603 		return -EINVAL;
604 	}
605 
606 	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
607 	if (!gem_obj) {
608 		DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
609 		return -ENOENT;
610 	}
611 	bo = to_vc4_bo(gem_obj);
612 	bo->t_format = t_format;
613 
614 	drm_gem_object_unreference_unlocked(gem_obj);
615 
616 	return 0;
617 }
618 
619 /**
620  * vc4_get_tiling_ioctl() - Gets the tiling modifier for a BO.
621  * @dev: DRM device
622  * @data: ioctl argument
623  * @file_priv: DRM file for this fd
624  *
625  * Returns the tiling modifier for a BO as set by vc4_set_tiling_ioctl().
626  */
627 int vc4_get_tiling_ioctl(struct drm_device *dev, void *data,
628 			 struct drm_file *file_priv)
629 {
630 	struct drm_vc4_get_tiling *args = data;
631 	struct drm_gem_object *gem_obj;
632 	struct vc4_bo *bo;
633 
634 	if (args->flags != 0 || args->modifier != 0)
635 		return -EINVAL;
636 
637 	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
638 	if (!gem_obj) {
639 		DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
640 		return -ENOENT;
641 	}
642 	bo = to_vc4_bo(gem_obj);
643 
644 	if (bo->t_format)
645 		args->modifier = DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED;
646 	else
647 		args->modifier = DRM_FORMAT_MOD_NONE;
648 
649 	drm_gem_object_unreference_unlocked(gem_obj);
650 
651 	return 0;
652 }
653 
654 void vc4_bo_cache_init(struct drm_device *dev)
655 {
656 	struct vc4_dev *vc4 = to_vc4_dev(dev);
657 
658 	mutex_init(&vc4->bo_lock);
659 
660 	INIT_LIST_HEAD(&vc4->bo_cache.time_list);
661 
662 	INIT_WORK(&vc4->bo_cache.time_work, vc4_bo_cache_time_work);
663 	setup_timer(&vc4->bo_cache.time_timer,
664 		    vc4_bo_cache_time_timer,
665 		    (unsigned long)dev);
666 }
667 
668 void vc4_bo_cache_destroy(struct drm_device *dev)
669 {
670 	struct vc4_dev *vc4 = to_vc4_dev(dev);
671 
672 	del_timer(&vc4->bo_cache.time_timer);
673 	cancel_work_sync(&vc4->bo_cache.time_work);
674 
675 	vc4_bo_cache_purge(dev);
676 
677 	if (vc4->bo_stats.num_allocated) {
678 		DRM_ERROR("Destroying BO cache while BOs still allocated:\n");
679 		vc4_bo_stats_dump(vc4);
680 	}
681 }
682