xref: /openbmc/linux/drivers/gpu/drm/i915/gvt/dmabuf.c (revision e15a5365)
1 /*
2  * Copyright 2017 Intel Corporation. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Zhiyuan Lv <zhiyuan.lv@intel.com>
25  *
26  * Contributors:
27  *    Xiaoguang Chen
28  *    Tina Zhang <tina.zhang@intel.com>
29  */
30 
31 #include <linux/dma-buf.h>
32 #include <linux/vfio.h>
33 
34 #include "i915_drv.h"
35 #include "gvt.h"
36 
37 #define GEN8_DECODE_PTE(pte) (pte & GENMASK_ULL(63, 12))
38 
39 static int vgpu_pin_dma_address(struct intel_vgpu *vgpu,
40 				unsigned long size,
41 				dma_addr_t dma_addr)
42 {
43 	int ret = 0;
44 
45 	if (intel_gvt_hypervisor_dma_pin_guest_page(vgpu, dma_addr))
46 		ret = -EINVAL;
47 
48 	return ret;
49 }
50 
51 static void vgpu_unpin_dma_address(struct intel_vgpu *vgpu,
52 				   dma_addr_t dma_addr)
53 {
54 	intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, dma_addr);
55 }
56 
57 static int vgpu_gem_get_pages(
58 		struct drm_i915_gem_object *obj)
59 {
60 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
61 	struct intel_vgpu *vgpu;
62 	struct sg_table *st;
63 	struct scatterlist *sg;
64 	int i, j, ret;
65 	gen8_pte_t __iomem *gtt_entries;
66 	struct intel_vgpu_fb_info *fb_info;
67 	u32 page_num;
68 
69 	fb_info = (struct intel_vgpu_fb_info *)obj->gvt_info;
70 	if (drm_WARN_ON(&dev_priv->drm, !fb_info))
71 		return -ENODEV;
72 
73 	vgpu = fb_info->obj->vgpu;
74 	if (drm_WARN_ON(&dev_priv->drm, !vgpu))
75 		return -ENODEV;
76 
77 	st = kmalloc(sizeof(*st), GFP_KERNEL);
78 	if (unlikely(!st))
79 		return -ENOMEM;
80 
81 	page_num = obj->base.size >> PAGE_SHIFT;
82 	ret = sg_alloc_table(st, page_num, GFP_KERNEL);
83 	if (ret) {
84 		kfree(st);
85 		return ret;
86 	}
87 	gtt_entries = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm +
88 		(fb_info->start >> PAGE_SHIFT);
89 	for_each_sg(st->sgl, sg, page_num, i) {
90 		dma_addr_t dma_addr =
91 			GEN8_DECODE_PTE(readq(&gtt_entries[i]));
92 		if (vgpu_pin_dma_address(vgpu, PAGE_SIZE, dma_addr)) {
93 			ret = -EINVAL;
94 			goto out;
95 		}
96 
97 		sg->offset = 0;
98 		sg->length = PAGE_SIZE;
99 		sg_dma_len(sg) = PAGE_SIZE;
100 		sg_dma_address(sg) = dma_addr;
101 	}
102 
103 	__i915_gem_object_set_pages(obj, st, PAGE_SIZE);
104 out:
105 	if (ret) {
106 		dma_addr_t dma_addr;
107 
108 		for_each_sg(st->sgl, sg, i, j) {
109 			dma_addr = sg_dma_address(sg);
110 			if (dma_addr)
111 				vgpu_unpin_dma_address(vgpu, dma_addr);
112 		}
113 		sg_free_table(st);
114 		kfree(st);
115 	}
116 
117 	return ret;
118 
119 }
120 
121 static void vgpu_gem_put_pages(struct drm_i915_gem_object *obj,
122 		struct sg_table *pages)
123 {
124 	struct scatterlist *sg;
125 
126 	if (obj->base.dma_buf) {
127 		struct intel_vgpu_fb_info *fb_info = obj->gvt_info;
128 		struct intel_vgpu_dmabuf_obj *obj = fb_info->obj;
129 		struct intel_vgpu *vgpu = obj->vgpu;
130 		int i;
131 
132 		for_each_sg(pages->sgl, sg, fb_info->size, i)
133 			vgpu_unpin_dma_address(vgpu,
134 					       sg_dma_address(sg));
135 	}
136 
137 	sg_free_table(pages);
138 	kfree(pages);
139 }
140 
141 static void dmabuf_gem_object_free(struct kref *kref)
142 {
143 	struct intel_vgpu_dmabuf_obj *obj =
144 		container_of(kref, struct intel_vgpu_dmabuf_obj, kref);
145 	struct intel_vgpu *vgpu = obj->vgpu;
146 	struct list_head *pos;
147 	struct intel_vgpu_dmabuf_obj *dmabuf_obj;
148 
149 	if (vgpu && vgpu->active && !list_empty(&vgpu->dmabuf_obj_list_head)) {
150 		list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
151 			dmabuf_obj = container_of(pos,
152 					struct intel_vgpu_dmabuf_obj, list);
153 			if (dmabuf_obj == obj) {
154 				list_del(pos);
155 				intel_gvt_hypervisor_put_vfio_device(vgpu);
156 				idr_remove(&vgpu->object_idr,
157 					   dmabuf_obj->dmabuf_id);
158 				kfree(dmabuf_obj->info);
159 				kfree(dmabuf_obj);
160 				break;
161 			}
162 		}
163 	} else {
164 		/* Free the orphan dmabuf_objs here */
165 		kfree(obj->info);
166 		kfree(obj);
167 	}
168 }
169 
170 
171 static inline void dmabuf_obj_get(struct intel_vgpu_dmabuf_obj *obj)
172 {
173 	kref_get(&obj->kref);
174 }
175 
176 static inline void dmabuf_obj_put(struct intel_vgpu_dmabuf_obj *obj)
177 {
178 	kref_put(&obj->kref, dmabuf_gem_object_free);
179 }
180 
181 static void vgpu_gem_release(struct drm_i915_gem_object *gem_obj)
182 {
183 
184 	struct intel_vgpu_fb_info *fb_info = gem_obj->gvt_info;
185 	struct intel_vgpu_dmabuf_obj *obj = fb_info->obj;
186 	struct intel_vgpu *vgpu = obj->vgpu;
187 
188 	if (vgpu) {
189 		mutex_lock(&vgpu->dmabuf_lock);
190 		gem_obj->base.dma_buf = NULL;
191 		dmabuf_obj_put(obj);
192 		mutex_unlock(&vgpu->dmabuf_lock);
193 	} else {
194 		/* vgpu is NULL, as it has been removed already */
195 		gem_obj->base.dma_buf = NULL;
196 		dmabuf_obj_put(obj);
197 	}
198 }
199 
200 static const struct drm_i915_gem_object_ops intel_vgpu_gem_ops = {
201 	.name = "i915_gem_object_vgpu",
202 	.flags = I915_GEM_OBJECT_IS_PROXY,
203 	.get_pages = vgpu_gem_get_pages,
204 	.put_pages = vgpu_gem_put_pages,
205 	.release = vgpu_gem_release,
206 };
207 
208 static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
209 		struct intel_vgpu_fb_info *info)
210 {
211 	static struct lock_class_key lock_class;
212 	struct drm_i915_private *dev_priv = to_i915(dev);
213 	struct drm_i915_gem_object *obj;
214 
215 	obj = i915_gem_object_alloc();
216 	if (obj == NULL)
217 		return NULL;
218 
219 	drm_gem_private_object_init(dev, &obj->base,
220 		roundup(info->size, PAGE_SIZE));
221 	i915_gem_object_init(obj, &intel_vgpu_gem_ops, &lock_class);
222 	i915_gem_object_set_readonly(obj);
223 
224 	obj->read_domains = I915_GEM_DOMAIN_GTT;
225 	obj->write_domain = 0;
226 	if (INTEL_GEN(dev_priv) >= 9) {
227 		unsigned int tiling_mode = 0;
228 		unsigned int stride = 0;
229 
230 		switch (info->drm_format_mod) {
231 		case DRM_FORMAT_MOD_LINEAR:
232 			tiling_mode = I915_TILING_NONE;
233 			break;
234 		case I915_FORMAT_MOD_X_TILED:
235 			tiling_mode = I915_TILING_X;
236 			stride = info->stride;
237 			break;
238 		case I915_FORMAT_MOD_Y_TILED:
239 		case I915_FORMAT_MOD_Yf_TILED:
240 			tiling_mode = I915_TILING_Y;
241 			stride = info->stride;
242 			break;
243 		default:
244 			gvt_dbg_core("invalid drm_format_mod %llx for tiling\n",
245 				     info->drm_format_mod);
246 		}
247 		obj->tiling_and_stride = tiling_mode | stride;
248 	} else {
249 		obj->tiling_and_stride = info->drm_format_mod ?
250 					I915_TILING_X : 0;
251 	}
252 
253 	return obj;
254 }
255 
256 static bool validate_hotspot(struct intel_vgpu_cursor_plane_format *c)
257 {
258 	if (c && c->x_hot <= c->width && c->y_hot <= c->height)
259 		return true;
260 	else
261 		return false;
262 }
263 
264 static int vgpu_get_plane_info(struct drm_device *dev,
265 		struct intel_vgpu *vgpu,
266 		struct intel_vgpu_fb_info *info,
267 		int plane_id)
268 {
269 	struct intel_vgpu_primary_plane_format p;
270 	struct intel_vgpu_cursor_plane_format c;
271 	int ret, tile_height = 1;
272 
273 	memset(info, 0, sizeof(*info));
274 
275 	if (plane_id == DRM_PLANE_TYPE_PRIMARY) {
276 		ret = intel_vgpu_decode_primary_plane(vgpu, &p);
277 		if (ret)
278 			return ret;
279 		info->start = p.base;
280 		info->start_gpa = p.base_gpa;
281 		info->width = p.width;
282 		info->height = p.height;
283 		info->stride = p.stride;
284 		info->drm_format = p.drm_format;
285 
286 		switch (p.tiled) {
287 		case PLANE_CTL_TILED_LINEAR:
288 			info->drm_format_mod = DRM_FORMAT_MOD_LINEAR;
289 			break;
290 		case PLANE_CTL_TILED_X:
291 			info->drm_format_mod = I915_FORMAT_MOD_X_TILED;
292 			tile_height = 8;
293 			break;
294 		case PLANE_CTL_TILED_Y:
295 			info->drm_format_mod = I915_FORMAT_MOD_Y_TILED;
296 			tile_height = 32;
297 			break;
298 		case PLANE_CTL_TILED_YF:
299 			info->drm_format_mod = I915_FORMAT_MOD_Yf_TILED;
300 			tile_height = 32;
301 			break;
302 		default:
303 			gvt_vgpu_err("invalid tiling mode: %x\n", p.tiled);
304 		}
305 	} else if (plane_id == DRM_PLANE_TYPE_CURSOR) {
306 		ret = intel_vgpu_decode_cursor_plane(vgpu, &c);
307 		if (ret)
308 			return ret;
309 		info->start = c.base;
310 		info->start_gpa = c.base_gpa;
311 		info->width = c.width;
312 		info->height = c.height;
313 		info->stride = c.width * (c.bpp / 8);
314 		info->drm_format = c.drm_format;
315 		info->drm_format_mod = 0;
316 		info->x_pos = c.x_pos;
317 		info->y_pos = c.y_pos;
318 
319 		if (validate_hotspot(&c)) {
320 			info->x_hot = c.x_hot;
321 			info->y_hot = c.y_hot;
322 		} else {
323 			info->x_hot = UINT_MAX;
324 			info->y_hot = UINT_MAX;
325 		}
326 	} else {
327 		gvt_vgpu_err("invalid plane id:%d\n", plane_id);
328 		return -EINVAL;
329 	}
330 
331 	info->size = info->stride * roundup(info->height, tile_height);
332 	if (info->size == 0) {
333 		gvt_vgpu_err("fb size is zero\n");
334 		return -EINVAL;
335 	}
336 
337 	if (info->start & (PAGE_SIZE - 1)) {
338 		gvt_vgpu_err("Not aligned fb address:0x%llx\n", info->start);
339 		return -EFAULT;
340 	}
341 
342 	if (!intel_gvt_ggtt_validate_range(vgpu, info->start, info->size)) {
343 		gvt_vgpu_err("invalid gma addr\n");
344 		return -EFAULT;
345 	}
346 
347 	return 0;
348 }
349 
350 static struct intel_vgpu_dmabuf_obj *
351 pick_dmabuf_by_info(struct intel_vgpu *vgpu,
352 		    struct intel_vgpu_fb_info *latest_info)
353 {
354 	struct list_head *pos;
355 	struct intel_vgpu_fb_info *fb_info;
356 	struct intel_vgpu_dmabuf_obj *dmabuf_obj = NULL;
357 	struct intel_vgpu_dmabuf_obj *ret = NULL;
358 
359 	list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
360 		dmabuf_obj = container_of(pos, struct intel_vgpu_dmabuf_obj,
361 						list);
362 		if ((dmabuf_obj == NULL) ||
363 		    (dmabuf_obj->info == NULL))
364 			continue;
365 
366 		fb_info = (struct intel_vgpu_fb_info *)dmabuf_obj->info;
367 		if ((fb_info->start == latest_info->start) &&
368 		    (fb_info->start_gpa == latest_info->start_gpa) &&
369 		    (fb_info->size == latest_info->size) &&
370 		    (fb_info->drm_format_mod == latest_info->drm_format_mod) &&
371 		    (fb_info->drm_format == latest_info->drm_format) &&
372 		    (fb_info->width == latest_info->width) &&
373 		    (fb_info->height == latest_info->height)) {
374 			ret = dmabuf_obj;
375 			break;
376 		}
377 	}
378 
379 	return ret;
380 }
381 
382 static struct intel_vgpu_dmabuf_obj *
383 pick_dmabuf_by_num(struct intel_vgpu *vgpu, u32 id)
384 {
385 	struct list_head *pos;
386 	struct intel_vgpu_dmabuf_obj *dmabuf_obj = NULL;
387 	struct intel_vgpu_dmabuf_obj *ret = NULL;
388 
389 	list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
390 		dmabuf_obj = container_of(pos, struct intel_vgpu_dmabuf_obj,
391 						list);
392 		if (!dmabuf_obj)
393 			continue;
394 
395 		if (dmabuf_obj->dmabuf_id == id) {
396 			ret = dmabuf_obj;
397 			break;
398 		}
399 	}
400 
401 	return ret;
402 }
403 
404 static void update_fb_info(struct vfio_device_gfx_plane_info *gvt_dmabuf,
405 		      struct intel_vgpu_fb_info *fb_info)
406 {
407 	gvt_dmabuf->drm_format = fb_info->drm_format;
408 	gvt_dmabuf->drm_format_mod = fb_info->drm_format_mod;
409 	gvt_dmabuf->width = fb_info->width;
410 	gvt_dmabuf->height = fb_info->height;
411 	gvt_dmabuf->stride = fb_info->stride;
412 	gvt_dmabuf->size = fb_info->size;
413 	gvt_dmabuf->x_pos = fb_info->x_pos;
414 	gvt_dmabuf->y_pos = fb_info->y_pos;
415 	gvt_dmabuf->x_hot = fb_info->x_hot;
416 	gvt_dmabuf->y_hot = fb_info->y_hot;
417 }
418 
419 int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args)
420 {
421 	struct drm_device *dev = &vgpu->gvt->gt->i915->drm;
422 	struct vfio_device_gfx_plane_info *gfx_plane_info = args;
423 	struct intel_vgpu_dmabuf_obj *dmabuf_obj;
424 	struct intel_vgpu_fb_info fb_info;
425 	int ret = 0;
426 
427 	if (gfx_plane_info->flags == (VFIO_GFX_PLANE_TYPE_DMABUF |
428 				       VFIO_GFX_PLANE_TYPE_PROBE))
429 		return ret;
430 	else if ((gfx_plane_info->flags & ~VFIO_GFX_PLANE_TYPE_DMABUF) ||
431 			(!gfx_plane_info->flags))
432 		return -EINVAL;
433 
434 	ret = vgpu_get_plane_info(dev, vgpu, &fb_info,
435 					gfx_plane_info->drm_plane_type);
436 	if (ret != 0)
437 		goto out;
438 
439 	mutex_lock(&vgpu->dmabuf_lock);
440 	/* If exists, pick up the exposed dmabuf_obj */
441 	dmabuf_obj = pick_dmabuf_by_info(vgpu, &fb_info);
442 	if (dmabuf_obj) {
443 		update_fb_info(gfx_plane_info, &fb_info);
444 		gfx_plane_info->dmabuf_id = dmabuf_obj->dmabuf_id;
445 
446 		/* This buffer may be released between query_plane ioctl and
447 		 * get_dmabuf ioctl. Add the refcount to make sure it won't
448 		 * be released between the two ioctls.
449 		 */
450 		if (!dmabuf_obj->initref) {
451 			dmabuf_obj->initref = true;
452 			dmabuf_obj_get(dmabuf_obj);
453 		}
454 		ret = 0;
455 		gvt_dbg_dpy("vgpu%d: re-use dmabuf_obj ref %d, id %d\n",
456 			    vgpu->id, kref_read(&dmabuf_obj->kref),
457 			    gfx_plane_info->dmabuf_id);
458 		mutex_unlock(&vgpu->dmabuf_lock);
459 		goto out;
460 	}
461 
462 	mutex_unlock(&vgpu->dmabuf_lock);
463 
464 	/* Need to allocate a new one*/
465 	dmabuf_obj = kmalloc(sizeof(struct intel_vgpu_dmabuf_obj), GFP_KERNEL);
466 	if (unlikely(!dmabuf_obj)) {
467 		gvt_vgpu_err("alloc dmabuf_obj failed\n");
468 		ret = -ENOMEM;
469 		goto out;
470 	}
471 
472 	dmabuf_obj->info = kmalloc(sizeof(struct intel_vgpu_fb_info),
473 				   GFP_KERNEL);
474 	if (unlikely(!dmabuf_obj->info)) {
475 		gvt_vgpu_err("allocate intel vgpu fb info failed\n");
476 		ret = -ENOMEM;
477 		goto out_free_dmabuf;
478 	}
479 	memcpy(dmabuf_obj->info, &fb_info, sizeof(struct intel_vgpu_fb_info));
480 
481 	((struct intel_vgpu_fb_info *)dmabuf_obj->info)->obj = dmabuf_obj;
482 
483 	dmabuf_obj->vgpu = vgpu;
484 
485 	ret = idr_alloc(&vgpu->object_idr, dmabuf_obj, 1, 0, GFP_NOWAIT);
486 	if (ret < 0)
487 		goto out_free_info;
488 	gfx_plane_info->dmabuf_id = ret;
489 	dmabuf_obj->dmabuf_id = ret;
490 
491 	dmabuf_obj->initref = true;
492 
493 	kref_init(&dmabuf_obj->kref);
494 
495 	mutex_lock(&vgpu->dmabuf_lock);
496 	if (intel_gvt_hypervisor_get_vfio_device(vgpu)) {
497 		gvt_vgpu_err("get vfio device failed\n");
498 		mutex_unlock(&vgpu->dmabuf_lock);
499 		goto out_free_info;
500 	}
501 	mutex_unlock(&vgpu->dmabuf_lock);
502 
503 	update_fb_info(gfx_plane_info, &fb_info);
504 
505 	INIT_LIST_HEAD(&dmabuf_obj->list);
506 	mutex_lock(&vgpu->dmabuf_lock);
507 	list_add_tail(&dmabuf_obj->list, &vgpu->dmabuf_obj_list_head);
508 	mutex_unlock(&vgpu->dmabuf_lock);
509 
510 	gvt_dbg_dpy("vgpu%d: %s new dmabuf_obj ref %d, id %d\n", vgpu->id,
511 		    __func__, kref_read(&dmabuf_obj->kref), ret);
512 
513 	return 0;
514 
515 out_free_info:
516 	kfree(dmabuf_obj->info);
517 out_free_dmabuf:
518 	kfree(dmabuf_obj);
519 out:
520 	/* ENODEV means plane isn't ready, which might be a normal case. */
521 	return (ret == -ENODEV) ? 0 : ret;
522 }
523 
524 /* To associate an exposed dmabuf with the dmabuf_obj */
525 int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id)
526 {
527 	struct drm_device *dev = &vgpu->gvt->gt->i915->drm;
528 	struct intel_vgpu_dmabuf_obj *dmabuf_obj;
529 	struct drm_i915_gem_object *obj;
530 	struct dma_buf *dmabuf;
531 	int dmabuf_fd;
532 	int ret = 0;
533 
534 	mutex_lock(&vgpu->dmabuf_lock);
535 
536 	dmabuf_obj = pick_dmabuf_by_num(vgpu, dmabuf_id);
537 	if (dmabuf_obj == NULL) {
538 		gvt_vgpu_err("invalid dmabuf id:%d\n", dmabuf_id);
539 		ret = -EINVAL;
540 		goto out;
541 	}
542 
543 	obj = vgpu_create_gem(dev, dmabuf_obj->info);
544 	if (obj == NULL) {
545 		gvt_vgpu_err("create gvt gem obj failed\n");
546 		ret = -ENOMEM;
547 		goto out;
548 	}
549 
550 	obj->gvt_info = dmabuf_obj->info;
551 
552 	dmabuf = i915_gem_prime_export(&obj->base, DRM_CLOEXEC | DRM_RDWR);
553 	if (IS_ERR(dmabuf)) {
554 		gvt_vgpu_err("export dma-buf failed\n");
555 		ret = PTR_ERR(dmabuf);
556 		goto out_free_gem;
557 	}
558 
559 	ret = dma_buf_fd(dmabuf, DRM_CLOEXEC | DRM_RDWR);
560 	if (ret < 0) {
561 		gvt_vgpu_err("create dma-buf fd failed ret:%d\n", ret);
562 		goto out_free_dmabuf;
563 	}
564 	dmabuf_fd = ret;
565 
566 	dmabuf_obj_get(dmabuf_obj);
567 
568 	if (dmabuf_obj->initref) {
569 		dmabuf_obj->initref = false;
570 		dmabuf_obj_put(dmabuf_obj);
571 	}
572 
573 	mutex_unlock(&vgpu->dmabuf_lock);
574 
575 	gvt_dbg_dpy("vgpu%d: dmabuf:%d, dmabuf ref %d, fd:%d\n"
576 		    "        file count: %ld, GEM ref: %d\n",
577 		    vgpu->id, dmabuf_obj->dmabuf_id,
578 		    kref_read(&dmabuf_obj->kref),
579 		    dmabuf_fd,
580 		    file_count(dmabuf->file),
581 		    kref_read(&obj->base.refcount));
582 
583 	i915_gem_object_put(obj);
584 
585 	return dmabuf_fd;
586 
587 out_free_dmabuf:
588 	dma_buf_put(dmabuf);
589 out_free_gem:
590 	i915_gem_object_put(obj);
591 out:
592 	mutex_unlock(&vgpu->dmabuf_lock);
593 	return ret;
594 }
595 
596 void intel_vgpu_dmabuf_cleanup(struct intel_vgpu *vgpu)
597 {
598 	struct list_head *pos, *n;
599 	struct intel_vgpu_dmabuf_obj *dmabuf_obj;
600 
601 	mutex_lock(&vgpu->dmabuf_lock);
602 	list_for_each_safe(pos, n, &vgpu->dmabuf_obj_list_head) {
603 		dmabuf_obj = container_of(pos, struct intel_vgpu_dmabuf_obj,
604 						list);
605 		dmabuf_obj->vgpu = NULL;
606 
607 		idr_remove(&vgpu->object_idr, dmabuf_obj->dmabuf_id);
608 		intel_gvt_hypervisor_put_vfio_device(vgpu);
609 		list_del(pos);
610 
611 		/* dmabuf_obj might be freed in dmabuf_obj_put */
612 		if (dmabuf_obj->initref) {
613 			dmabuf_obj->initref = false;
614 			dmabuf_obj_put(dmabuf_obj);
615 		}
616 
617 	}
618 	mutex_unlock(&vgpu->dmabuf_lock);
619 }
620