1 /*
2  * Copyright (C) 2015 Red Hat, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sublicense, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial
15  * portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  */
25 
26 #include <drm/drm_file.h>
27 #include <drm/drm_fourcc.h>
28 
29 #include "virtgpu_drv.h"
30 
31 void virtio_gpu_gem_free_object(struct drm_gem_object *gem_obj)
32 {
33 	struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(gem_obj);
34 
35 	if (obj)
36 		virtio_gpu_object_unref(&obj);
37 }
38 
39 struct virtio_gpu_object*
40 virtio_gpu_alloc_object(struct drm_device *dev,
41 			struct virtio_gpu_object_params *params,
42 			struct virtio_gpu_fence *fence)
43 {
44 	struct virtio_gpu_device *vgdev = dev->dev_private;
45 	struct virtio_gpu_object *obj;
46 	int ret;
47 
48 	ret = virtio_gpu_object_create(vgdev, params, &obj, fence);
49 	if (ret)
50 		return ERR_PTR(ret);
51 
52 	return obj;
53 }
54 
55 int virtio_gpu_gem_create(struct drm_file *file,
56 			  struct drm_device *dev,
57 			  struct virtio_gpu_object_params *params,
58 			  struct drm_gem_object **obj_p,
59 			  uint32_t *handle_p)
60 {
61 	struct virtio_gpu_object *obj;
62 	int ret;
63 	u32 handle;
64 
65 	obj = virtio_gpu_alloc_object(dev, params, NULL);
66 	if (IS_ERR(obj))
67 		return PTR_ERR(obj);
68 
69 	ret = drm_gem_handle_create(file, &obj->gem_base, &handle);
70 	if (ret) {
71 		drm_gem_object_release(&obj->gem_base);
72 		return ret;
73 	}
74 
75 	*obj_p = &obj->gem_base;
76 
77 	/* drop reference from allocate - handle holds it now */
78 	drm_gem_object_put_unlocked(&obj->gem_base);
79 
80 	*handle_p = handle;
81 	return 0;
82 }
83 
84 int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
85 				struct drm_device *dev,
86 				struct drm_mode_create_dumb *args)
87 {
88 	struct drm_gem_object *gobj;
89 	struct virtio_gpu_object_params params = { 0 };
90 	int ret;
91 	uint32_t pitch;
92 
93 	if (args->bpp != 32)
94 		return -EINVAL;
95 
96 	pitch = args->width * 4;
97 	args->size = pitch * args->height;
98 	args->size = ALIGN(args->size, PAGE_SIZE);
99 
100 	params.format = virtio_gpu_translate_format(DRM_FORMAT_HOST_XRGB8888);
101 	params.width = args->width;
102 	params.height = args->height;
103 	params.size = args->size;
104 	params.dumb = true;
105 	ret = virtio_gpu_gem_create(file_priv, dev, &params, &gobj,
106 				    &args->handle);
107 	if (ret)
108 		goto fail;
109 
110 	args->pitch = pitch;
111 	return ret;
112 
113 fail:
114 	return ret;
115 }
116 
117 int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv,
118 			      struct drm_device *dev,
119 			      uint32_t handle, uint64_t *offset_p)
120 {
121 	struct drm_gem_object *gobj;
122 	struct virtio_gpu_object *obj;
123 
124 	BUG_ON(!offset_p);
125 	gobj = drm_gem_object_lookup(file_priv, handle);
126 	if (gobj == NULL)
127 		return -ENOENT;
128 	obj = gem_to_virtio_gpu_obj(gobj);
129 	*offset_p = virtio_gpu_object_mmap_offset(obj);
130 	drm_gem_object_put_unlocked(gobj);
131 	return 0;
132 }
133 
134 int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
135 			       struct drm_file *file)
136 {
137 	struct virtio_gpu_device *vgdev = obj->dev->dev_private;
138 	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
139 	struct virtio_gpu_object_array *objs;
140 
141 	if (!vgdev->has_virgl_3d)
142 		return 0;
143 
144 	objs = virtio_gpu_array_alloc(1);
145 	if (!objs)
146 		return -ENOMEM;
147 	virtio_gpu_array_add_obj(objs, obj);
148 
149 	virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id,
150 					       objs);
151 	return 0;
152 }
153 
154 void virtio_gpu_gem_object_close(struct drm_gem_object *obj,
155 				 struct drm_file *file)
156 {
157 	struct virtio_gpu_device *vgdev = obj->dev->dev_private;
158 	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
159 	struct virtio_gpu_object_array *objs;
160 
161 	if (!vgdev->has_virgl_3d)
162 		return;
163 
164 	objs = virtio_gpu_array_alloc(1);
165 	if (!objs)
166 		return;
167 	virtio_gpu_array_add_obj(objs, obj);
168 
169 	virtio_gpu_cmd_context_detach_resource(vgdev, vfpriv->ctx_id,
170 					       objs);
171 }
172 
173 struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents)
174 {
175 	struct virtio_gpu_object_array *objs;
176 	size_t size = sizeof(*objs) + sizeof(objs->objs[0]) * nents;
177 
178 	objs = kmalloc(size, GFP_KERNEL);
179 	if (!objs)
180 		return NULL;
181 
182 	objs->nents = 0;
183 	objs->total = nents;
184 	return objs;
185 }
186 
187 static void virtio_gpu_array_free(struct virtio_gpu_object_array *objs)
188 {
189 	kfree(objs);
190 }
191 
192 struct virtio_gpu_object_array*
193 virtio_gpu_array_from_handles(struct drm_file *drm_file, u32 *handles, u32 nents)
194 {
195 	struct virtio_gpu_object_array *objs;
196 	u32 i;
197 
198 	objs = virtio_gpu_array_alloc(nents);
199 	if (!objs)
200 		return NULL;
201 
202 	for (i = 0; i < nents; i++) {
203 		objs->objs[i] = drm_gem_object_lookup(drm_file, handles[i]);
204 		if (!objs->objs[i]) {
205 			objs->nents = i;
206 			virtio_gpu_array_put_free(objs);
207 			return NULL;
208 		}
209 	}
210 	objs->nents = i;
211 	return objs;
212 }
213 
214 void virtio_gpu_array_add_obj(struct virtio_gpu_object_array *objs,
215 			      struct drm_gem_object *obj)
216 {
217 	if (WARN_ON_ONCE(objs->nents == objs->total))
218 		return;
219 
220 	drm_gem_object_get(obj);
221 	objs->objs[objs->nents] = obj;
222 	objs->nents++;
223 }
224 
225 int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array *objs)
226 {
227 	int ret;
228 
229 	if (objs->nents == 1) {
230 		ret = dma_resv_lock_interruptible(objs->objs[0]->resv, NULL);
231 	} else {
232 		ret = drm_gem_lock_reservations(objs->objs, objs->nents,
233 						&objs->ticket);
234 	}
235 	return ret;
236 }
237 
238 void virtio_gpu_array_unlock_resv(struct virtio_gpu_object_array *objs)
239 {
240 	if (objs->nents == 1) {
241 		dma_resv_unlock(objs->objs[0]->resv);
242 	} else {
243 		drm_gem_unlock_reservations(objs->objs, objs->nents,
244 					    &objs->ticket);
245 	}
246 }
247 
248 void virtio_gpu_array_add_fence(struct virtio_gpu_object_array *objs,
249 				struct dma_fence *fence)
250 {
251 	int i;
252 
253 	for (i = 0; i < objs->nents; i++)
254 		dma_resv_add_excl_fence(objs->objs[i]->resv, fence);
255 }
256 
257 void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs)
258 {
259 	u32 i;
260 
261 	for (i = 0; i < objs->nents; i++)
262 		drm_gem_object_put_unlocked(objs->objs[i]);
263 	virtio_gpu_array_free(objs);
264 }
265