xref: /openbmc/linux/drivers/gpu/drm/qxl/qxl_ioctl.c (revision 80ecbd24)
1 /*
2  * Copyright 2013 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Dave Airlie
23  *          Alon Levy
24  */
25 
26 #include "qxl_drv.h"
27 #include "qxl_object.h"
28 
29 /*
30  * TODO: allocating a new gem(in qxl_bo) for each request.
31  * This is wasteful since bo's are page aligned.
32  */
33 static int qxl_alloc_ioctl(struct drm_device *dev, void *data,
34 			   struct drm_file *file_priv)
35 {
36 	struct qxl_device *qdev = dev->dev_private;
37 	struct drm_qxl_alloc *qxl_alloc = data;
38 	int ret;
39 	struct qxl_bo *qobj;
40 	uint32_t handle;
41 	u32 domain = QXL_GEM_DOMAIN_VRAM;
42 
43 	if (qxl_alloc->size == 0) {
44 		DRM_ERROR("invalid size %d\n", qxl_alloc->size);
45 		return -EINVAL;
46 	}
47 	ret = qxl_gem_object_create_with_handle(qdev, file_priv,
48 						domain,
49 						qxl_alloc->size,
50 						NULL,
51 						&qobj, &handle);
52 	if (ret) {
53 		DRM_ERROR("%s: failed to create gem ret=%d\n",
54 			  __func__, ret);
55 		return -ENOMEM;
56 	}
57 	qxl_alloc->handle = handle;
58 	return 0;
59 }
60 
61 static int qxl_map_ioctl(struct drm_device *dev, void *data,
62 			 struct drm_file *file_priv)
63 {
64 	struct qxl_device *qdev = dev->dev_private;
65 	struct drm_qxl_map *qxl_map = data;
66 
67 	return qxl_mode_dumb_mmap(file_priv, qdev->ddev, qxl_map->handle,
68 				  &qxl_map->offset);
69 }
70 
71 struct qxl_reloc_info {
72 	int type;
73 	struct qxl_bo *dst_bo;
74 	uint32_t dst_offset;
75 	struct qxl_bo *src_bo;
76 	int src_offset;
77 };
78 
79 /*
80  * dst must be validated, i.e. whole bo on vram/surfacesram (right now all bo's
81  * are on vram).
82  * *(dst + dst_off) = qxl_bo_physical_address(src, src_off)
83  */
84 static void
85 apply_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info)
86 {
87 	void *reloc_page;
88 	reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
89 	*(uint64_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = qxl_bo_physical_address(qdev,
90 											      info->src_bo,
91 											      info->src_offset);
92 	qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page);
93 }
94 
95 static void
96 apply_surf_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info)
97 {
98 	uint32_t id = 0;
99 	void *reloc_page;
100 
101 	if (info->src_bo && !info->src_bo->is_primary)
102 		id = info->src_bo->surface_id;
103 
104 	reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
105 	*(uint32_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = id;
106 	qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page);
107 }
108 
109 /* return holding the reference to this object */
110 static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev,
111 					 struct drm_file *file_priv, uint64_t handle,
112 					 struct qxl_release *release)
113 {
114 	struct drm_gem_object *gobj;
115 	struct qxl_bo *qobj;
116 	int ret;
117 
118 	gobj = drm_gem_object_lookup(qdev->ddev, file_priv, handle);
119 	if (!gobj)
120 		return NULL;
121 
122 	qobj = gem_to_qxl_bo(gobj);
123 
124 	ret = qxl_release_list_add(release, qobj);
125 	if (ret)
126 		return NULL;
127 
128 	return qobj;
129 }
130 
131 /*
132  * Usage of execbuffer:
133  * Relocations need to take into account the full QXLDrawable size.
134  * However, the command as passed from user space must *not* contain the initial
135  * QXLReleaseInfo struct (first XXX bytes)
136  */
137 static int qxl_process_single_command(struct qxl_device *qdev,
138 				      struct drm_qxl_command *cmd,
139 				      struct drm_file *file_priv)
140 {
141 	struct qxl_reloc_info *reloc_info;
142 	int release_type;
143 	struct qxl_release *release;
144 	struct qxl_bo *cmd_bo;
145 	void *fb_cmd;
146 	int i, j, ret, num_relocs;
147 	int unwritten;
148 
149 	switch (cmd->type) {
150 	case QXL_CMD_DRAW:
151 		release_type = QXL_RELEASE_DRAWABLE;
152 		break;
153 	case QXL_CMD_SURFACE:
154 	case QXL_CMD_CURSOR:
155 	default:
156 		DRM_DEBUG("Only draw commands in execbuffers\n");
157 		return -EINVAL;
158 		break;
159 	}
160 
161 	if (cmd->command_size > PAGE_SIZE - sizeof(union qxl_release_info))
162 		return -EINVAL;
163 
164 	if (!access_ok(VERIFY_READ,
165 		       (void *)(unsigned long)cmd->command,
166 		       cmd->command_size))
167 		return -EFAULT;
168 
169 	reloc_info = kmalloc(sizeof(struct qxl_reloc_info) * cmd->relocs_num, GFP_KERNEL);
170 	if (!reloc_info)
171 		return -ENOMEM;
172 
173 	ret = qxl_alloc_release_reserved(qdev,
174 					 sizeof(union qxl_release_info) +
175 					 cmd->command_size,
176 					 release_type,
177 					 &release,
178 					 &cmd_bo);
179 	if (ret)
180 		goto out_free_reloc;
181 
182 	/* TODO copy slow path code from i915 */
183 	fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
184 	unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)cmd->command, cmd->command_size);
185 
186 	{
187 		struct qxl_drawable *draw = fb_cmd;
188 		draw->mm_time = qdev->rom->mm_clock;
189 	}
190 
191 	qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd);
192 	if (unwritten) {
193 		DRM_ERROR("got unwritten %d\n", unwritten);
194 		ret = -EFAULT;
195 		goto out_free_release;
196 	}
197 
198 	/* fill out reloc info structs */
199 	num_relocs = 0;
200 	for (i = 0; i < cmd->relocs_num; ++i) {
201 		struct drm_qxl_reloc reloc;
202 
203 		if (DRM_COPY_FROM_USER(&reloc,
204 				       &((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i],
205 				       sizeof(reloc))) {
206 			ret = -EFAULT;
207 			goto out_free_bos;
208 		}
209 
210 		/* add the bos to the list of bos to validate -
211 		   need to validate first then process relocs? */
212 		if (reloc.reloc_type != QXL_RELOC_TYPE_BO && reloc.reloc_type != QXL_RELOC_TYPE_SURF) {
213 			DRM_DEBUG("unknown reloc type %d\n", reloc_info[i].type);
214 
215 			ret = -EINVAL;
216 			goto out_free_bos;
217 		}
218 		reloc_info[i].type = reloc.reloc_type;
219 
220 		if (reloc.dst_handle) {
221 			reloc_info[i].dst_bo = qxlhw_handle_to_bo(qdev, file_priv,
222 								  reloc.dst_handle, release);
223 			if (!reloc_info[i].dst_bo) {
224 				ret = -EINVAL;
225 				reloc_info[i].src_bo = NULL;
226 				goto out_free_bos;
227 			}
228 			reloc_info[i].dst_offset = reloc.dst_offset;
229 		} else {
230 			reloc_info[i].dst_bo = cmd_bo;
231 			reloc_info[i].dst_offset = reloc.dst_offset + release->release_offset;
232 		}
233 		num_relocs++;
234 
235 		/* reserve and validate the reloc dst bo */
236 		if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle > 0) {
237 			reloc_info[i].src_bo =
238 				qxlhw_handle_to_bo(qdev, file_priv,
239 						   reloc.src_handle, release);
240 			if (!reloc_info[i].src_bo) {
241 				if (reloc_info[i].dst_bo != cmd_bo)
242 					drm_gem_object_unreference_unlocked(&reloc_info[i].dst_bo->gem_base);
243 				ret = -EINVAL;
244 				goto out_free_bos;
245 			}
246 			reloc_info[i].src_offset = reloc.src_offset;
247 		} else {
248 			reloc_info[i].src_bo = NULL;
249 			reloc_info[i].src_offset = 0;
250 		}
251 	}
252 
253 	/* validate all buffers */
254 	ret = qxl_release_reserve_list(release, false);
255 	if (ret)
256 		goto out_free_bos;
257 
258 	for (i = 0; i < cmd->relocs_num; ++i) {
259 		if (reloc_info[i].type == QXL_RELOC_TYPE_BO)
260 			apply_reloc(qdev, &reloc_info[i]);
261 		else if (reloc_info[i].type == QXL_RELOC_TYPE_SURF)
262 			apply_surf_reloc(qdev, &reloc_info[i]);
263 	}
264 
265 	ret = qxl_push_command_ring_release(qdev, release, cmd->type, true);
266 	if (ret)
267 		qxl_release_backoff_reserve_list(release);
268 	else
269 		qxl_release_fence_buffer_objects(release);
270 
271 out_free_bos:
272 	for (j = 0; j < num_relocs; j++) {
273 		if (reloc_info[j].dst_bo != cmd_bo)
274 			drm_gem_object_unreference_unlocked(&reloc_info[j].dst_bo->gem_base);
275 		if (reloc_info[j].src_bo && reloc_info[j].src_bo != cmd_bo)
276 			drm_gem_object_unreference_unlocked(&reloc_info[j].src_bo->gem_base);
277 	}
278 out_free_release:
279 	if (ret)
280 		qxl_release_free(qdev, release);
281 out_free_reloc:
282 	kfree(reloc_info);
283 	return ret;
284 }
285 
286 static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
287 				struct drm_file *file_priv)
288 {
289 	struct qxl_device *qdev = dev->dev_private;
290 	struct drm_qxl_execbuffer *execbuffer = data;
291 	struct drm_qxl_command user_cmd;
292 	int cmd_num;
293 	int ret;
294 
295 	for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
296 
297 		struct drm_qxl_command *commands =
298 			(struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
299 
300 		if (DRM_COPY_FROM_USER(&user_cmd, &commands[cmd_num],
301 				       sizeof(user_cmd)))
302 			return -EFAULT;
303 
304 		ret = qxl_process_single_command(qdev, &user_cmd, file_priv);
305 		if (ret)
306 			return ret;
307 	}
308 	return 0;
309 }
310 
311 static int qxl_update_area_ioctl(struct drm_device *dev, void *data,
312 				 struct drm_file *file)
313 {
314 	struct qxl_device *qdev = dev->dev_private;
315 	struct drm_qxl_update_area *update_area = data;
316 	struct qxl_rect area = {.left = update_area->left,
317 				.top = update_area->top,
318 				.right = update_area->right,
319 				.bottom = update_area->bottom};
320 	int ret;
321 	struct drm_gem_object *gobj = NULL;
322 	struct qxl_bo *qobj = NULL;
323 
324 	if (update_area->left >= update_area->right ||
325 	    update_area->top >= update_area->bottom)
326 		return -EINVAL;
327 
328 	gobj = drm_gem_object_lookup(dev, file, update_area->handle);
329 	if (gobj == NULL)
330 		return -ENOENT;
331 
332 	qobj = gem_to_qxl_bo(gobj);
333 
334 	ret = qxl_bo_reserve(qobj, false);
335 	if (ret)
336 		goto out;
337 
338 	if (!qobj->pin_count) {
339 		qxl_ttm_placement_from_domain(qobj, qobj->type, false);
340 		ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
341 				      true, false);
342 		if (unlikely(ret))
343 			goto out;
344 	}
345 
346 	ret = qxl_bo_check_id(qdev, qobj);
347 	if (ret)
348 		goto out2;
349 	if (!qobj->surface_id)
350 		DRM_ERROR("got update area for surface with no id %d\n", update_area->handle);
351 	ret = qxl_io_update_area(qdev, qobj, &area);
352 
353 out2:
354 	qxl_bo_unreserve(qobj);
355 
356 out:
357 	drm_gem_object_unreference_unlocked(gobj);
358 	return ret;
359 }
360 
361 static int qxl_getparam_ioctl(struct drm_device *dev, void *data,
362 		       struct drm_file *file_priv)
363 {
364 	struct qxl_device *qdev = dev->dev_private;
365 	struct drm_qxl_getparam *param = data;
366 
367 	switch (param->param) {
368 	case QXL_PARAM_NUM_SURFACES:
369 		param->value = qdev->rom->n_surfaces;
370 		break;
371 	case QXL_PARAM_MAX_RELOCS:
372 		param->value = QXL_MAX_RES;
373 		break;
374 	default:
375 		return -EINVAL;
376 	}
377 	return 0;
378 }
379 
380 static int qxl_clientcap_ioctl(struct drm_device *dev, void *data,
381 				  struct drm_file *file_priv)
382 {
383 	struct qxl_device *qdev = dev->dev_private;
384 	struct drm_qxl_clientcap *param = data;
385 	int byte, idx;
386 
387 	byte = param->index / 8;
388 	idx = param->index % 8;
389 
390 	if (qdev->pdev->revision < 4)
391 		return -ENOSYS;
392 
393 	if (byte >= 58)
394 		return -ENOSYS;
395 
396 	if (qdev->rom->client_capabilities[byte] & (1 << idx))
397 		return 0;
398 	return -ENOSYS;
399 }
400 
401 static int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data,
402 				struct drm_file *file)
403 {
404 	struct qxl_device *qdev = dev->dev_private;
405 	struct drm_qxl_alloc_surf *param = data;
406 	struct qxl_bo *qobj;
407 	int handle;
408 	int ret;
409 	int size, actual_stride;
410 	struct qxl_surface surf;
411 
412 	/* work out size allocate bo with handle */
413 	actual_stride = param->stride < 0 ? -param->stride : param->stride;
414 	size = actual_stride * param->height + actual_stride;
415 
416 	surf.format = param->format;
417 	surf.width = param->width;
418 	surf.height = param->height;
419 	surf.stride = param->stride;
420 	surf.data = 0;
421 
422 	ret = qxl_gem_object_create_with_handle(qdev, file,
423 						QXL_GEM_DOMAIN_SURFACE,
424 						size,
425 						&surf,
426 						&qobj, &handle);
427 	if (ret) {
428 		DRM_ERROR("%s: failed to create gem ret=%d\n",
429 			  __func__, ret);
430 		return -ENOMEM;
431 	} else
432 		param->handle = handle;
433 	return ret;
434 }
435 
436 struct drm_ioctl_desc qxl_ioctls[] = {
437 	DRM_IOCTL_DEF_DRV(QXL_ALLOC, qxl_alloc_ioctl, DRM_AUTH|DRM_UNLOCKED),
438 
439 	DRM_IOCTL_DEF_DRV(QXL_MAP, qxl_map_ioctl, DRM_AUTH|DRM_UNLOCKED),
440 
441 	DRM_IOCTL_DEF_DRV(QXL_EXECBUFFER, qxl_execbuffer_ioctl,
442 							DRM_AUTH|DRM_UNLOCKED),
443 	DRM_IOCTL_DEF_DRV(QXL_UPDATE_AREA, qxl_update_area_ioctl,
444 							DRM_AUTH|DRM_UNLOCKED),
445 	DRM_IOCTL_DEF_DRV(QXL_GETPARAM, qxl_getparam_ioctl,
446 							DRM_AUTH|DRM_UNLOCKED),
447 	DRM_IOCTL_DEF_DRV(QXL_CLIENTCAP, qxl_clientcap_ioctl,
448 							DRM_AUTH|DRM_UNLOCKED),
449 
450 	DRM_IOCTL_DEF_DRV(QXL_ALLOC_SURF, qxl_alloc_surf_ioctl,
451 			  DRM_AUTH|DRM_UNLOCKED),
452 };
453 
454 int qxl_max_ioctls = DRM_ARRAY_SIZE(qxl_ioctls);
455