xref: /openbmc/linux/drivers/gpu/drm/i915/gvt/dmabuf.c (revision ad10c920)
1 /*
2  * Copyright 2017 Intel Corporation. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Zhiyuan Lv <zhiyuan.lv@intel.com>
25  *
26  * Contributors:
27  *    Xiaoguang Chen
28  *    Tina Zhang <tina.zhang@intel.com>
29  */
30 
31 #include <linux/dma-buf.h>
32 #include <linux/mdev.h>
33 
34 #include <drm/drm_fourcc.h>
35 #include <drm/drm_plane.h>
36 
37 #include "gem/i915_gem_dmabuf.h"
38 
39 #include "i915_drv.h"
40 #include "i915_reg.h"
41 #include "gvt.h"
42 
43 #define GEN8_DECODE_PTE(pte) (pte & GENMASK_ULL(63, 12))
44 
45 static int vgpu_gem_get_pages(
46 		struct drm_i915_gem_object *obj)
47 {
48 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
49 	struct intel_vgpu *vgpu;
50 	struct sg_table *st;
51 	struct scatterlist *sg;
52 	int i, j, ret;
53 	gen8_pte_t __iomem *gtt_entries;
54 	struct intel_vgpu_fb_info *fb_info;
55 	u32 page_num;
56 
57 	fb_info = (struct intel_vgpu_fb_info *)obj->gvt_info;
58 	if (drm_WARN_ON(&dev_priv->drm, !fb_info))
59 		return -ENODEV;
60 
61 	vgpu = fb_info->obj->vgpu;
62 	if (drm_WARN_ON(&dev_priv->drm, !vgpu))
63 		return -ENODEV;
64 
65 	st = kmalloc(sizeof(*st), GFP_KERNEL);
66 	if (unlikely(!st))
67 		return -ENOMEM;
68 
69 	page_num = obj->base.size >> PAGE_SHIFT;
70 	ret = sg_alloc_table(st, page_num, GFP_KERNEL);
71 	if (ret) {
72 		kfree(st);
73 		return ret;
74 	}
75 	gtt_entries = (gen8_pte_t __iomem *)to_gt(dev_priv)->ggtt->gsm +
76 		(fb_info->start >> PAGE_SHIFT);
77 	for_each_sg(st->sgl, sg, page_num, i) {
78 		dma_addr_t dma_addr =
79 			GEN8_DECODE_PTE(readq(&gtt_entries[i]));
80 		if (intel_gvt_dma_pin_guest_page(vgpu, dma_addr)) {
81 			ret = -EINVAL;
82 			goto out;
83 		}
84 
85 		sg->offset = 0;
86 		sg->length = PAGE_SIZE;
87 		sg_dma_len(sg) = PAGE_SIZE;
88 		sg_dma_address(sg) = dma_addr;
89 	}
90 
91 	__i915_gem_object_set_pages(obj, st);
92 out:
93 	if (ret) {
94 		dma_addr_t dma_addr;
95 
96 		for_each_sg(st->sgl, sg, i, j) {
97 			dma_addr = sg_dma_address(sg);
98 			if (dma_addr)
99 				intel_gvt_dma_unmap_guest_page(vgpu, dma_addr);
100 		}
101 		sg_free_table(st);
102 		kfree(st);
103 	}
104 
105 	return ret;
106 
107 }
108 
109 static void vgpu_gem_put_pages(struct drm_i915_gem_object *obj,
110 		struct sg_table *pages)
111 {
112 	struct scatterlist *sg;
113 
114 	if (obj->base.dma_buf) {
115 		struct intel_vgpu_fb_info *fb_info = obj->gvt_info;
116 		struct intel_vgpu_dmabuf_obj *obj = fb_info->obj;
117 		struct intel_vgpu *vgpu = obj->vgpu;
118 		int i;
119 
120 		for_each_sg(pages->sgl, sg, fb_info->size, i)
121 			intel_gvt_dma_unmap_guest_page(vgpu,
122 					       sg_dma_address(sg));
123 	}
124 
125 	sg_free_table(pages);
126 	kfree(pages);
127 }
128 
129 static void dmabuf_gem_object_free(struct kref *kref)
130 {
131 	struct intel_vgpu_dmabuf_obj *obj =
132 		container_of(kref, struct intel_vgpu_dmabuf_obj, kref);
133 	struct intel_vgpu *vgpu = obj->vgpu;
134 	struct list_head *pos;
135 	struct intel_vgpu_dmabuf_obj *dmabuf_obj;
136 
137 	if (vgpu && test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status) &&
138 	    !list_empty(&vgpu->dmabuf_obj_list_head)) {
139 		list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
140 			dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list);
141 			if (dmabuf_obj == obj) {
142 				list_del(pos);
143 				idr_remove(&vgpu->object_idr,
144 					   dmabuf_obj->dmabuf_id);
145 				kfree(dmabuf_obj->info);
146 				kfree(dmabuf_obj);
147 				break;
148 			}
149 		}
150 	} else {
151 		/* Free the orphan dmabuf_objs here */
152 		kfree(obj->info);
153 		kfree(obj);
154 	}
155 }
156 
157 
158 static inline void dmabuf_obj_get(struct intel_vgpu_dmabuf_obj *obj)
159 {
160 	kref_get(&obj->kref);
161 }
162 
163 static inline void dmabuf_obj_put(struct intel_vgpu_dmabuf_obj *obj)
164 {
165 	kref_put(&obj->kref, dmabuf_gem_object_free);
166 }
167 
168 static void vgpu_gem_release(struct drm_i915_gem_object *gem_obj)
169 {
170 
171 	struct intel_vgpu_fb_info *fb_info = gem_obj->gvt_info;
172 	struct intel_vgpu_dmabuf_obj *obj = fb_info->obj;
173 	struct intel_vgpu *vgpu = obj->vgpu;
174 
175 	if (vgpu) {
176 		mutex_lock(&vgpu->dmabuf_lock);
177 		gem_obj->base.dma_buf = NULL;
178 		dmabuf_obj_put(obj);
179 		mutex_unlock(&vgpu->dmabuf_lock);
180 	} else {
181 		/* vgpu is NULL, as it has been removed already */
182 		gem_obj->base.dma_buf = NULL;
183 		dmabuf_obj_put(obj);
184 	}
185 }
186 
187 static const struct drm_i915_gem_object_ops intel_vgpu_gem_ops = {
188 	.name = "i915_gem_object_vgpu",
189 	.flags = I915_GEM_OBJECT_IS_PROXY,
190 	.get_pages = vgpu_gem_get_pages,
191 	.put_pages = vgpu_gem_put_pages,
192 	.release = vgpu_gem_release,
193 };
194 
195 static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
196 		struct intel_vgpu_fb_info *info)
197 {
198 	static struct lock_class_key lock_class;
199 	struct drm_i915_private *dev_priv = to_i915(dev);
200 	struct drm_i915_gem_object *obj;
201 
202 	obj = i915_gem_object_alloc();
203 	if (obj == NULL)
204 		return NULL;
205 
206 	drm_gem_private_object_init(dev, &obj->base,
207 		roundup(info->size, PAGE_SIZE));
208 	i915_gem_object_init(obj, &intel_vgpu_gem_ops, &lock_class, 0);
209 	i915_gem_object_set_readonly(obj);
210 
211 	obj->read_domains = I915_GEM_DOMAIN_GTT;
212 	obj->write_domain = 0;
213 	if (GRAPHICS_VER(dev_priv) >= 9) {
214 		unsigned int tiling_mode = 0;
215 		unsigned int stride = 0;
216 
217 		switch (info->drm_format_mod) {
218 		case DRM_FORMAT_MOD_LINEAR:
219 			tiling_mode = I915_TILING_NONE;
220 			break;
221 		case I915_FORMAT_MOD_X_TILED:
222 			tiling_mode = I915_TILING_X;
223 			stride = info->stride;
224 			break;
225 		case I915_FORMAT_MOD_Y_TILED:
226 		case I915_FORMAT_MOD_Yf_TILED:
227 			tiling_mode = I915_TILING_Y;
228 			stride = info->stride;
229 			break;
230 		default:
231 			gvt_dbg_core("invalid drm_format_mod %llx for tiling\n",
232 				     info->drm_format_mod);
233 		}
234 		obj->tiling_and_stride = tiling_mode | stride;
235 	} else {
236 		obj->tiling_and_stride = info->drm_format_mod ?
237 					I915_TILING_X : 0;
238 	}
239 
240 	return obj;
241 }
242 
243 static bool validate_hotspot(struct intel_vgpu_cursor_plane_format *c)
244 {
245 	if (c && c->x_hot <= c->width && c->y_hot <= c->height)
246 		return true;
247 	else
248 		return false;
249 }
250 
251 static int vgpu_get_plane_info(struct drm_device *dev,
252 		struct intel_vgpu *vgpu,
253 		struct intel_vgpu_fb_info *info,
254 		int plane_id)
255 {
256 	struct intel_vgpu_primary_plane_format p;
257 	struct intel_vgpu_cursor_plane_format c;
258 	int ret, tile_height = 1;
259 
260 	memset(info, 0, sizeof(*info));
261 
262 	if (plane_id == DRM_PLANE_TYPE_PRIMARY) {
263 		ret = intel_vgpu_decode_primary_plane(vgpu, &p);
264 		if (ret)
265 			return ret;
266 		info->start = p.base;
267 		info->start_gpa = p.base_gpa;
268 		info->width = p.width;
269 		info->height = p.height;
270 		info->stride = p.stride;
271 		info->drm_format = p.drm_format;
272 
273 		switch (p.tiled) {
274 		case PLANE_CTL_TILED_LINEAR:
275 			info->drm_format_mod = DRM_FORMAT_MOD_LINEAR;
276 			break;
277 		case PLANE_CTL_TILED_X:
278 			info->drm_format_mod = I915_FORMAT_MOD_X_TILED;
279 			tile_height = 8;
280 			break;
281 		case PLANE_CTL_TILED_Y:
282 			info->drm_format_mod = I915_FORMAT_MOD_Y_TILED;
283 			tile_height = 32;
284 			break;
285 		case PLANE_CTL_TILED_YF:
286 			info->drm_format_mod = I915_FORMAT_MOD_Yf_TILED;
287 			tile_height = 32;
288 			break;
289 		default:
290 			gvt_vgpu_err("invalid tiling mode: %x\n", p.tiled);
291 		}
292 	} else if (plane_id == DRM_PLANE_TYPE_CURSOR) {
293 		ret = intel_vgpu_decode_cursor_plane(vgpu, &c);
294 		if (ret)
295 			return ret;
296 		info->start = c.base;
297 		info->start_gpa = c.base_gpa;
298 		info->width = c.width;
299 		info->height = c.height;
300 		info->stride = c.width * (c.bpp / 8);
301 		info->drm_format = c.drm_format;
302 		info->drm_format_mod = 0;
303 		info->x_pos = c.x_pos;
304 		info->y_pos = c.y_pos;
305 
306 		if (validate_hotspot(&c)) {
307 			info->x_hot = c.x_hot;
308 			info->y_hot = c.y_hot;
309 		} else {
310 			info->x_hot = UINT_MAX;
311 			info->y_hot = UINT_MAX;
312 		}
313 	} else {
314 		gvt_vgpu_err("invalid plane id:%d\n", plane_id);
315 		return -EINVAL;
316 	}
317 
318 	info->size = info->stride * roundup(info->height, tile_height);
319 	if (info->size == 0) {
320 		gvt_vgpu_err("fb size is zero\n");
321 		return -EINVAL;
322 	}
323 
324 	if (info->start & (PAGE_SIZE - 1)) {
325 		gvt_vgpu_err("Not aligned fb address:0x%llx\n", info->start);
326 		return -EFAULT;
327 	}
328 
329 	if (!intel_gvt_ggtt_validate_range(vgpu, info->start, info->size)) {
330 		gvt_vgpu_err("invalid gma addr\n");
331 		return -EFAULT;
332 	}
333 
334 	return 0;
335 }
336 
337 static struct intel_vgpu_dmabuf_obj *
338 pick_dmabuf_by_info(struct intel_vgpu *vgpu,
339 		    struct intel_vgpu_fb_info *latest_info)
340 {
341 	struct list_head *pos;
342 	struct intel_vgpu_fb_info *fb_info;
343 	struct intel_vgpu_dmabuf_obj *dmabuf_obj = NULL;
344 	struct intel_vgpu_dmabuf_obj *ret = NULL;
345 
346 	list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
347 		dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list);
348 		if (!dmabuf_obj->info)
349 			continue;
350 
351 		fb_info = (struct intel_vgpu_fb_info *)dmabuf_obj->info;
352 		if ((fb_info->start == latest_info->start) &&
353 		    (fb_info->start_gpa == latest_info->start_gpa) &&
354 		    (fb_info->size == latest_info->size) &&
355 		    (fb_info->drm_format_mod == latest_info->drm_format_mod) &&
356 		    (fb_info->drm_format == latest_info->drm_format) &&
357 		    (fb_info->width == latest_info->width) &&
358 		    (fb_info->height == latest_info->height)) {
359 			ret = dmabuf_obj;
360 			break;
361 		}
362 	}
363 
364 	return ret;
365 }
366 
367 static struct intel_vgpu_dmabuf_obj *
368 pick_dmabuf_by_num(struct intel_vgpu *vgpu, u32 id)
369 {
370 	struct list_head *pos;
371 	struct intel_vgpu_dmabuf_obj *dmabuf_obj = NULL;
372 	struct intel_vgpu_dmabuf_obj *ret = NULL;
373 
374 	list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
375 		dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list);
376 		if (dmabuf_obj->dmabuf_id == id) {
377 			ret = dmabuf_obj;
378 			break;
379 		}
380 	}
381 
382 	return ret;
383 }
384 
385 static void update_fb_info(struct vfio_device_gfx_plane_info *gvt_dmabuf,
386 		      struct intel_vgpu_fb_info *fb_info)
387 {
388 	gvt_dmabuf->drm_format = fb_info->drm_format;
389 	gvt_dmabuf->drm_format_mod = fb_info->drm_format_mod;
390 	gvt_dmabuf->width = fb_info->width;
391 	gvt_dmabuf->height = fb_info->height;
392 	gvt_dmabuf->stride = fb_info->stride;
393 	gvt_dmabuf->size = fb_info->size;
394 	gvt_dmabuf->x_pos = fb_info->x_pos;
395 	gvt_dmabuf->y_pos = fb_info->y_pos;
396 	gvt_dmabuf->x_hot = fb_info->x_hot;
397 	gvt_dmabuf->y_hot = fb_info->y_hot;
398 }
399 
400 int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args)
401 {
402 	struct drm_device *dev = &vgpu->gvt->gt->i915->drm;
403 	struct vfio_device_gfx_plane_info *gfx_plane_info = args;
404 	struct intel_vgpu_dmabuf_obj *dmabuf_obj;
405 	struct intel_vgpu_fb_info fb_info;
406 	int ret = 0;
407 
408 	if (gfx_plane_info->flags == (VFIO_GFX_PLANE_TYPE_DMABUF |
409 				       VFIO_GFX_PLANE_TYPE_PROBE))
410 		return ret;
411 	else if ((gfx_plane_info->flags & ~VFIO_GFX_PLANE_TYPE_DMABUF) ||
412 			(!gfx_plane_info->flags))
413 		return -EINVAL;
414 
415 	ret = vgpu_get_plane_info(dev, vgpu, &fb_info,
416 					gfx_plane_info->drm_plane_type);
417 	if (ret != 0)
418 		goto out;
419 
420 	mutex_lock(&vgpu->dmabuf_lock);
421 	/* If exists, pick up the exposed dmabuf_obj */
422 	dmabuf_obj = pick_dmabuf_by_info(vgpu, &fb_info);
423 	if (dmabuf_obj) {
424 		update_fb_info(gfx_plane_info, &fb_info);
425 		gfx_plane_info->dmabuf_id = dmabuf_obj->dmabuf_id;
426 
427 		/* This buffer may be released between query_plane ioctl and
428 		 * get_dmabuf ioctl. Add the refcount to make sure it won't
429 		 * be released between the two ioctls.
430 		 */
431 		if (!dmabuf_obj->initref) {
432 			dmabuf_obj->initref = true;
433 			dmabuf_obj_get(dmabuf_obj);
434 		}
435 		ret = 0;
436 		gvt_dbg_dpy("vgpu%d: re-use dmabuf_obj ref %d, id %d\n",
437 			    vgpu->id, kref_read(&dmabuf_obj->kref),
438 			    gfx_plane_info->dmabuf_id);
439 		mutex_unlock(&vgpu->dmabuf_lock);
440 		goto out;
441 	}
442 
443 	mutex_unlock(&vgpu->dmabuf_lock);
444 
445 	/* Need to allocate a new one*/
446 	dmabuf_obj = kmalloc(sizeof(struct intel_vgpu_dmabuf_obj), GFP_KERNEL);
447 	if (unlikely(!dmabuf_obj)) {
448 		gvt_vgpu_err("alloc dmabuf_obj failed\n");
449 		ret = -ENOMEM;
450 		goto out;
451 	}
452 
453 	dmabuf_obj->info = kmalloc(sizeof(struct intel_vgpu_fb_info),
454 				   GFP_KERNEL);
455 	if (unlikely(!dmabuf_obj->info)) {
456 		gvt_vgpu_err("allocate intel vgpu fb info failed\n");
457 		ret = -ENOMEM;
458 		goto out_free_dmabuf;
459 	}
460 	memcpy(dmabuf_obj->info, &fb_info, sizeof(struct intel_vgpu_fb_info));
461 
462 	((struct intel_vgpu_fb_info *)dmabuf_obj->info)->obj = dmabuf_obj;
463 
464 	dmabuf_obj->vgpu = vgpu;
465 
466 	ret = idr_alloc(&vgpu->object_idr, dmabuf_obj, 1, 0, GFP_NOWAIT);
467 	if (ret < 0)
468 		goto out_free_info;
469 	gfx_plane_info->dmabuf_id = ret;
470 	dmabuf_obj->dmabuf_id = ret;
471 
472 	dmabuf_obj->initref = true;
473 
474 	kref_init(&dmabuf_obj->kref);
475 
476 	update_fb_info(gfx_plane_info, &fb_info);
477 
478 	INIT_LIST_HEAD(&dmabuf_obj->list);
479 	mutex_lock(&vgpu->dmabuf_lock);
480 	list_add_tail(&dmabuf_obj->list, &vgpu->dmabuf_obj_list_head);
481 	mutex_unlock(&vgpu->dmabuf_lock);
482 
483 	gvt_dbg_dpy("vgpu%d: %s new dmabuf_obj ref %d, id %d\n", vgpu->id,
484 		    __func__, kref_read(&dmabuf_obj->kref), ret);
485 
486 	return 0;
487 
488 out_free_info:
489 	kfree(dmabuf_obj->info);
490 out_free_dmabuf:
491 	kfree(dmabuf_obj);
492 out:
493 	/* ENODEV means plane isn't ready, which might be a normal case. */
494 	return (ret == -ENODEV) ? 0 : ret;
495 }
496 
497 /* To associate an exposed dmabuf with the dmabuf_obj */
498 int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id)
499 {
500 	struct drm_device *dev = &vgpu->gvt->gt->i915->drm;
501 	struct intel_vgpu_dmabuf_obj *dmabuf_obj;
502 	struct drm_i915_gem_object *obj;
503 	struct dma_buf *dmabuf;
504 	int dmabuf_fd;
505 	int ret = 0;
506 
507 	mutex_lock(&vgpu->dmabuf_lock);
508 
509 	dmabuf_obj = pick_dmabuf_by_num(vgpu, dmabuf_id);
510 	if (dmabuf_obj == NULL) {
511 		gvt_vgpu_err("invalid dmabuf id:%d\n", dmabuf_id);
512 		ret = -EINVAL;
513 		goto out;
514 	}
515 
516 	obj = vgpu_create_gem(dev, dmabuf_obj->info);
517 	if (obj == NULL) {
518 		gvt_vgpu_err("create gvt gem obj failed\n");
519 		ret = -ENOMEM;
520 		goto out;
521 	}
522 
523 	obj->gvt_info = dmabuf_obj->info;
524 
525 	dmabuf = i915_gem_prime_export(&obj->base, DRM_CLOEXEC | DRM_RDWR);
526 	if (IS_ERR(dmabuf)) {
527 		gvt_vgpu_err("export dma-buf failed\n");
528 		ret = PTR_ERR(dmabuf);
529 		goto out_free_gem;
530 	}
531 
532 	ret = dma_buf_fd(dmabuf, DRM_CLOEXEC | DRM_RDWR);
533 	if (ret < 0) {
534 		gvt_vgpu_err("create dma-buf fd failed ret:%d\n", ret);
535 		goto out_free_dmabuf;
536 	}
537 	dmabuf_fd = ret;
538 
539 	dmabuf_obj_get(dmabuf_obj);
540 
541 	if (dmabuf_obj->initref) {
542 		dmabuf_obj->initref = false;
543 		dmabuf_obj_put(dmabuf_obj);
544 	}
545 
546 	mutex_unlock(&vgpu->dmabuf_lock);
547 
548 	gvt_dbg_dpy("vgpu%d: dmabuf:%d, dmabuf ref %d, fd:%d\n"
549 		    "        file count: %ld, GEM ref: %d\n",
550 		    vgpu->id, dmabuf_obj->dmabuf_id,
551 		    kref_read(&dmabuf_obj->kref),
552 		    dmabuf_fd,
553 		    file_count(dmabuf->file),
554 		    kref_read(&obj->base.refcount));
555 
556 	i915_gem_object_put(obj);
557 
558 	return dmabuf_fd;
559 
560 out_free_dmabuf:
561 	dma_buf_put(dmabuf);
562 out_free_gem:
563 	i915_gem_object_put(obj);
564 out:
565 	mutex_unlock(&vgpu->dmabuf_lock);
566 	return ret;
567 }
568 
569 void intel_vgpu_dmabuf_cleanup(struct intel_vgpu *vgpu)
570 {
571 	struct list_head *pos, *n;
572 	struct intel_vgpu_dmabuf_obj *dmabuf_obj;
573 
574 	mutex_lock(&vgpu->dmabuf_lock);
575 	list_for_each_safe(pos, n, &vgpu->dmabuf_obj_list_head) {
576 		dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list);
577 		dmabuf_obj->vgpu = NULL;
578 
579 		idr_remove(&vgpu->object_idr, dmabuf_obj->dmabuf_id);
580 		list_del(pos);
581 
582 		/* dmabuf_obj might be freed in dmabuf_obj_put */
583 		if (dmabuf_obj->initref) {
584 			dmabuf_obj->initref = false;
585 			dmabuf_obj_put(dmabuf_obj);
586 		}
587 
588 	}
589 	mutex_unlock(&vgpu->dmabuf_lock);
590 }
591