1 /*
2  * Copyright (C) 2017 Samsung Electronics Co.Ltd
3  * Authors:
4  *	Marek Szyprowski <m.szyprowski@samsung.com>
5  *
6  * Exynos DRM Image Post Processing (IPP) related functions
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the "Software"),
10  * to deal in the Software without restriction, including without limitation
11  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12  * and/or sell copies of the Software, and to permit persons to whom the
13  * Software is furnished to do so, subject to the following conditions:
14  *
15  * The above copyright notice and this permission notice shall be included in
16  * all copies or substantial portions of the Software.
17  */
18 
19 #include <drm/drmP.h>
20 #include <drm/drm_mode.h>
21 #include <drm/exynos_drm.h>
22 
23 #include "exynos_drm_drv.h"
24 #include "exynos_drm_gem.h"
25 #include "exynos_drm_ipp.h"
26 
27 static int num_ipp;
28 static LIST_HEAD(ipp_list);
29 
30 /**
31  * exynos_drm_ipp_register - Register a new picture processor hardware module
32  * @dev: DRM device
33  * @ipp: ipp module to init
34  * @funcs: callbacks for the new ipp object
35  * @caps: bitmask of ipp capabilities (%DRM_EXYNOS_IPP_CAP_*)
36  * @formats: array of supported formats
37  * @num_formats: size of the supported formats array
38  * @name: name (for debugging purposes)
39  *
40  * Initializes a ipp module.
41  *
42  * Returns:
43  * Zero on success, error code on failure.
44  */
45 int exynos_drm_ipp_register(struct device *dev, struct exynos_drm_ipp *ipp,
46 		const struct exynos_drm_ipp_funcs *funcs, unsigned int caps,
47 		const struct exynos_drm_ipp_formats *formats,
48 		unsigned int num_formats, const char *name)
49 {
50 	WARN_ON(!ipp);
51 	WARN_ON(!funcs);
52 	WARN_ON(!formats);
53 	WARN_ON(!num_formats);
54 
55 	spin_lock_init(&ipp->lock);
56 	INIT_LIST_HEAD(&ipp->todo_list);
57 	init_waitqueue_head(&ipp->done_wq);
58 	ipp->dev = dev;
59 	ipp->funcs = funcs;
60 	ipp->capabilities = caps;
61 	ipp->name = name;
62 	ipp->formats = formats;
63 	ipp->num_formats = num_formats;
64 
65 	/* ipp_list modification is serialized by component framework */
66 	list_add_tail(&ipp->head, &ipp_list);
67 	ipp->id = num_ipp++;
68 
69 	DRM_DEV_DEBUG_DRIVER(dev, "Registered ipp %d\n", ipp->id);
70 
71 	return 0;
72 }
73 
74 /**
75  * exynos_drm_ipp_unregister - Unregister the picture processor module
76  * @dev: DRM device
77  * @ipp: ipp module
78  */
79 void exynos_drm_ipp_unregister(struct device *dev,
80 			       struct exynos_drm_ipp *ipp)
81 {
82 	WARN_ON(ipp->task);
83 	WARN_ON(!list_empty(&ipp->todo_list));
84 	list_del(&ipp->head);
85 }
86 
87 /**
88  * exynos_drm_ipp_ioctl_get_res_ioctl - enumerate all ipp modules
89  * @dev: DRM device
90  * @data: ioctl data
91  * @file_priv: DRM file info
92  *
93  * Construct a list of ipp ids.
94  *
95  * Called by the user via ioctl.
96  *
97  * Returns:
98  * Zero on success, negative errno on failure.
99  */
100 int exynos_drm_ipp_get_res_ioctl(struct drm_device *dev, void *data,
101 				 struct drm_file *file_priv)
102 {
103 	struct drm_exynos_ioctl_ipp_get_res *resp = data;
104 	struct exynos_drm_ipp *ipp;
105 	uint32_t __user *ipp_ptr = (uint32_t __user *)
106 						(unsigned long)resp->ipp_id_ptr;
107 	unsigned int count = num_ipp, copied = 0;
108 
109 	/*
110 	 * This ioctl is called twice, once to determine how much space is
111 	 * needed, and the 2nd time to fill it.
112 	 */
113 	if (count && resp->count_ipps >= count) {
114 		list_for_each_entry(ipp, &ipp_list, head) {
115 			if (put_user(ipp->id, ipp_ptr + copied))
116 				return -EFAULT;
117 			copied++;
118 		}
119 	}
120 	resp->count_ipps = count;
121 
122 	return 0;
123 }
124 
125 static inline struct exynos_drm_ipp *__ipp_get(uint32_t id)
126 {
127 	struct exynos_drm_ipp *ipp;
128 
129 	list_for_each_entry(ipp, &ipp_list, head)
130 		if (ipp->id == id)
131 			return ipp;
132 	return NULL;
133 }
134 
135 /**
136  * exynos_drm_ipp_ioctl_get_caps - get ipp module capabilities and formats
137  * @dev: DRM device
138  * @data: ioctl data
139  * @file_priv: DRM file info
140  *
141  * Construct a structure describing ipp module capabilities.
142  *
143  * Called by the user via ioctl.
144  *
145  * Returns:
146  * Zero on success, negative errno on failure.
147  */
148 int exynos_drm_ipp_get_caps_ioctl(struct drm_device *dev, void *data,
149 				  struct drm_file *file_priv)
150 {
151 	struct drm_exynos_ioctl_ipp_get_caps *resp = data;
152 	void __user *ptr = (void __user *)(unsigned long)resp->formats_ptr;
153 	struct exynos_drm_ipp *ipp;
154 	int i;
155 
156 	ipp = __ipp_get(resp->ipp_id);
157 	if (!ipp)
158 		return -ENOENT;
159 
160 	resp->ipp_id = ipp->id;
161 	resp->capabilities = ipp->capabilities;
162 
163 	/*
164 	 * This ioctl is called twice, once to determine how much space is
165 	 * needed, and the 2nd time to fill it.
166 	 */
167 	if (resp->formats_count >= ipp->num_formats) {
168 		for (i = 0; i < ipp->num_formats; i++) {
169 			struct drm_exynos_ipp_format tmp = {
170 				.fourcc = ipp->formats[i].fourcc,
171 				.type = ipp->formats[i].type,
172 				.modifier = ipp->formats[i].modifier,
173 			};
174 
175 			if (copy_to_user(ptr, &tmp, sizeof(tmp)))
176 				return -EFAULT;
177 			ptr += sizeof(tmp);
178 		}
179 	}
180 	resp->formats_count = ipp->num_formats;
181 
182 	return 0;
183 }
184 
185 static inline const struct exynos_drm_ipp_formats *__ipp_format_get(
186 				struct exynos_drm_ipp *ipp, uint32_t fourcc,
187 				uint64_t mod, unsigned int type)
188 {
189 	int i;
190 
191 	for (i = 0; i < ipp->num_formats; i++) {
192 		if ((ipp->formats[i].type & type) &&
193 		    ipp->formats[i].fourcc == fourcc &&
194 		    ipp->formats[i].modifier == mod)
195 			return &ipp->formats[i];
196 	}
197 	return NULL;
198 }
199 
200 /**
201  * exynos_drm_ipp_get_limits_ioctl - get ipp module limits
202  * @dev: DRM device
203  * @data: ioctl data
204  * @file_priv: DRM file info
205  *
206  * Construct a structure describing ipp module limitations for provided
207  * picture format.
208  *
209  * Called by the user via ioctl.
210  *
211  * Returns:
212  * Zero on success, negative errno on failure.
213  */
214 int exynos_drm_ipp_get_limits_ioctl(struct drm_device *dev, void *data,
215 				    struct drm_file *file_priv)
216 {
217 	struct drm_exynos_ioctl_ipp_get_limits *resp = data;
218 	void __user *ptr = (void __user *)(unsigned long)resp->limits_ptr;
219 	const struct exynos_drm_ipp_formats *format;
220 	struct exynos_drm_ipp *ipp;
221 
222 	if (resp->type != DRM_EXYNOS_IPP_FORMAT_SOURCE &&
223 	    resp->type != DRM_EXYNOS_IPP_FORMAT_DESTINATION)
224 		return -EINVAL;
225 
226 	ipp = __ipp_get(resp->ipp_id);
227 	if (!ipp)
228 		return -ENOENT;
229 
230 	format = __ipp_format_get(ipp, resp->fourcc, resp->modifier,
231 				  resp->type);
232 	if (!format)
233 		return -EINVAL;
234 
235 	/*
236 	 * This ioctl is called twice, once to determine how much space is
237 	 * needed, and the 2nd time to fill it.
238 	 */
239 	if (format->num_limits && resp->limits_count >= format->num_limits)
240 		if (copy_to_user((void __user *)ptr, format->limits,
241 				 sizeof(*format->limits) * format->num_limits))
242 			return -EFAULT;
243 	resp->limits_count = format->num_limits;
244 
245 	return 0;
246 }
247 
248 struct drm_pending_exynos_ipp_event {
249 	struct drm_pending_event base;
250 	struct drm_exynos_ipp_event event;
251 };
252 
253 static inline struct exynos_drm_ipp_task *
254 			exynos_drm_ipp_task_alloc(struct exynos_drm_ipp *ipp)
255 {
256 	struct exynos_drm_ipp_task *task;
257 
258 	task = kzalloc(sizeof(*task), GFP_KERNEL);
259 	if (!task)
260 		return NULL;
261 
262 	task->dev = ipp->dev;
263 	task->ipp = ipp;
264 
265 	/* some defaults */
266 	task->src.rect.w = task->dst.rect.w = UINT_MAX;
267 	task->src.rect.h = task->dst.rect.h = UINT_MAX;
268 	task->transform.rotation = DRM_MODE_ROTATE_0;
269 
270 	DRM_DEV_DEBUG_DRIVER(task->dev, "Allocated task %pK\n", task);
271 
272 	return task;
273 }
274 
275 static const struct exynos_drm_param_map {
276 	unsigned int id;
277 	unsigned int size;
278 	unsigned int offset;
279 } exynos_drm_ipp_params_maps[] = {
280 	{
281 		DRM_EXYNOS_IPP_TASK_BUFFER | DRM_EXYNOS_IPP_TASK_TYPE_SOURCE,
282 		sizeof(struct drm_exynos_ipp_task_buffer),
283 		offsetof(struct exynos_drm_ipp_task, src.buf),
284 	}, {
285 		DRM_EXYNOS_IPP_TASK_BUFFER |
286 			DRM_EXYNOS_IPP_TASK_TYPE_DESTINATION,
287 		sizeof(struct drm_exynos_ipp_task_buffer),
288 		offsetof(struct exynos_drm_ipp_task, dst.buf),
289 	}, {
290 		DRM_EXYNOS_IPP_TASK_RECTANGLE | DRM_EXYNOS_IPP_TASK_TYPE_SOURCE,
291 		sizeof(struct drm_exynos_ipp_task_rect),
292 		offsetof(struct exynos_drm_ipp_task, src.rect),
293 	}, {
294 		DRM_EXYNOS_IPP_TASK_RECTANGLE |
295 			DRM_EXYNOS_IPP_TASK_TYPE_DESTINATION,
296 		sizeof(struct drm_exynos_ipp_task_rect),
297 		offsetof(struct exynos_drm_ipp_task, dst.rect),
298 	}, {
299 		DRM_EXYNOS_IPP_TASK_TRANSFORM,
300 		sizeof(struct drm_exynos_ipp_task_transform),
301 		offsetof(struct exynos_drm_ipp_task, transform),
302 	}, {
303 		DRM_EXYNOS_IPP_TASK_ALPHA,
304 		sizeof(struct drm_exynos_ipp_task_alpha),
305 		offsetof(struct exynos_drm_ipp_task, alpha),
306 	},
307 };
308 
309 static int exynos_drm_ipp_task_set(struct exynos_drm_ipp_task *task,
310 				   struct drm_exynos_ioctl_ipp_commit *arg)
311 {
312 	const struct exynos_drm_param_map *map = exynos_drm_ipp_params_maps;
313 	void __user *params = (void __user *)(unsigned long)arg->params_ptr;
314 	unsigned int size = arg->params_size;
315 	uint32_t id;
316 	int i;
317 
318 	while (size) {
319 		if (get_user(id, (uint32_t __user *)params))
320 			return -EFAULT;
321 
322 		for (i = 0; i < ARRAY_SIZE(exynos_drm_ipp_params_maps); i++)
323 			if (map[i].id == id)
324 				break;
325 		if (i == ARRAY_SIZE(exynos_drm_ipp_params_maps) ||
326 		    map[i].size > size)
327 			return -EINVAL;
328 
329 		if (copy_from_user((void *)task + map[i].offset, params,
330 				   map[i].size))
331 			return -EFAULT;
332 
333 		params += map[i].size;
334 		size -= map[i].size;
335 	}
336 
337 	DRM_DEV_DEBUG_DRIVER(task->dev,
338 			     "Got task %pK configuration from userspace\n",
339 			     task);
340 	return 0;
341 }
342 
343 static int exynos_drm_ipp_task_setup_buffer(struct exynos_drm_ipp_buffer *buf,
344 					    struct drm_file *filp)
345 {
346 	int ret = 0;
347 	int i;
348 
349 	/* get GEM buffers and check their size */
350 	for (i = 0; i < buf->format->num_planes; i++) {
351 		unsigned int height = (i == 0) ? buf->buf.height :
352 			     DIV_ROUND_UP(buf->buf.height, buf->format->vsub);
353 		unsigned long size = height * buf->buf.pitch[i];
354 		struct exynos_drm_gem *gem = exynos_drm_gem_get(filp,
355 							    buf->buf.gem_id[i]);
356 		if (!gem) {
357 			ret = -ENOENT;
358 			goto gem_free;
359 		}
360 		buf->exynos_gem[i] = gem;
361 
362 		if (size + buf->buf.offset[i] > buf->exynos_gem[i]->size) {
363 			i++;
364 			ret = -EINVAL;
365 			goto gem_free;
366 		}
367 		buf->dma_addr[i] = buf->exynos_gem[i]->dma_addr +
368 				   buf->buf.offset[i];
369 	}
370 
371 	return 0;
372 gem_free:
373 	while (i--) {
374 		exynos_drm_gem_put(buf->exynos_gem[i]);
375 		buf->exynos_gem[i] = NULL;
376 	}
377 	return ret;
378 }
379 
380 static void exynos_drm_ipp_task_release_buf(struct exynos_drm_ipp_buffer *buf)
381 {
382 	int i;
383 
384 	if (!buf->exynos_gem[0])
385 		return;
386 	for (i = 0; i < buf->format->num_planes; i++)
387 		exynos_drm_gem_put(buf->exynos_gem[i]);
388 }
389 
390 static void exynos_drm_ipp_task_free(struct exynos_drm_ipp *ipp,
391 				 struct exynos_drm_ipp_task *task)
392 {
393 	DRM_DEV_DEBUG_DRIVER(task->dev, "Freeing task %pK\n", task);
394 
395 	exynos_drm_ipp_task_release_buf(&task->src);
396 	exynos_drm_ipp_task_release_buf(&task->dst);
397 	if (task->event)
398 		drm_event_cancel_free(ipp->drm_dev, &task->event->base);
399 	kfree(task);
400 }
401 
402 struct drm_ipp_limit {
403 	struct drm_exynos_ipp_limit_val h;
404 	struct drm_exynos_ipp_limit_val v;
405 };
406 
407 enum drm_ipp_size_id {
408 	IPP_LIMIT_BUFFER, IPP_LIMIT_AREA, IPP_LIMIT_ROTATED, IPP_LIMIT_MAX
409 };
410 
411 static const enum drm_exynos_ipp_limit_type limit_id_fallback[IPP_LIMIT_MAX][4] = {
412 	[IPP_LIMIT_BUFFER]  = { DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER },
413 	[IPP_LIMIT_AREA]    = { DRM_EXYNOS_IPP_LIMIT_SIZE_AREA,
414 				DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER },
415 	[IPP_LIMIT_ROTATED] = { DRM_EXYNOS_IPP_LIMIT_SIZE_ROTATED,
416 				DRM_EXYNOS_IPP_LIMIT_SIZE_AREA,
417 				DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER },
418 };
419 
420 static inline void __limit_set_val(unsigned int *ptr, unsigned int val)
421 {
422 	if (!*ptr)
423 		*ptr = val;
424 }
425 
426 static void __get_size_limit(const struct drm_exynos_ipp_limit *limits,
427 			     unsigned int num_limits, enum drm_ipp_size_id id,
428 			     struct drm_ipp_limit *res)
429 {
430 	const struct drm_exynos_ipp_limit *l = limits;
431 	int i = 0;
432 
433 	memset(res, 0, sizeof(*res));
434 	for (i = 0; limit_id_fallback[id][i]; i++)
435 		for (l = limits; l - limits < num_limits; l++) {
436 			if (((l->type & DRM_EXYNOS_IPP_LIMIT_TYPE_MASK) !=
437 			      DRM_EXYNOS_IPP_LIMIT_TYPE_SIZE) ||
438 			    ((l->type & DRM_EXYNOS_IPP_LIMIT_SIZE_MASK) !=
439 						     limit_id_fallback[id][i]))
440 				continue;
441 			__limit_set_val(&res->h.min, l->h.min);
442 			__limit_set_val(&res->h.max, l->h.max);
443 			__limit_set_val(&res->h.align, l->h.align);
444 			__limit_set_val(&res->v.min, l->v.min);
445 			__limit_set_val(&res->v.max, l->v.max);
446 			__limit_set_val(&res->v.align, l->v.align);
447 		}
448 }
449 
450 static inline bool __align_check(unsigned int val, unsigned int align)
451 {
452 	if (align && (val & (align - 1))) {
453 		DRM_DEBUG_DRIVER("Value %d exceeds HW limits (align %d)\n",
454 				 val, align);
455 		return false;
456 	}
457 	return true;
458 }
459 
460 static inline bool __size_limit_check(unsigned int val,
461 				 struct drm_exynos_ipp_limit_val *l)
462 {
463 	if ((l->min && val < l->min) || (l->max && val > l->max)) {
464 		DRM_DEBUG_DRIVER("Value %d exceeds HW limits (min %d, max %d)\n",
465 				 val, l->min, l->max);
466 		return false;
467 	}
468 	return __align_check(val, l->align);
469 }
470 
471 static int exynos_drm_ipp_check_size_limits(struct exynos_drm_ipp_buffer *buf,
472 	const struct drm_exynos_ipp_limit *limits, unsigned int num_limits,
473 	bool rotate, bool swap)
474 {
475 	enum drm_ipp_size_id id = rotate ? IPP_LIMIT_ROTATED : IPP_LIMIT_AREA;
476 	struct drm_ipp_limit l;
477 	struct drm_exynos_ipp_limit_val *lh = &l.h, *lv = &l.v;
478 	int real_width = buf->buf.pitch[0] / buf->format->cpp[0];
479 
480 	if (!limits)
481 		return 0;
482 
483 	__get_size_limit(limits, num_limits, IPP_LIMIT_BUFFER, &l);
484 	if (!__size_limit_check(real_width, &l.h) ||
485 	    !__size_limit_check(buf->buf.height, &l.v))
486 		return -EINVAL;
487 
488 	if (swap) {
489 		lv = &l.h;
490 		lh = &l.v;
491 	}
492 	__get_size_limit(limits, num_limits, id, &l);
493 	if (!__size_limit_check(buf->rect.w, lh) ||
494 	    !__align_check(buf->rect.x, lh->align) ||
495 	    !__size_limit_check(buf->rect.h, lv) ||
496 	    !__align_check(buf->rect.y, lv->align))
497 		return -EINVAL;
498 
499 	return 0;
500 }
501 
502 static inline bool __scale_limit_check(unsigned int src, unsigned int dst,
503 				       unsigned int min, unsigned int max)
504 {
505 	if ((max && (dst << 16) > src * max) ||
506 	    (min && (dst << 16) < src * min)) {
507 		DRM_DEBUG_DRIVER("Scale from %d to %d exceeds HW limits (ratio min %d.%05d, max %d.%05d)\n",
508 			 src, dst,
509 			 min >> 16, 100000 * (min & 0xffff) / (1 << 16),
510 			 max >> 16, 100000 * (max & 0xffff) / (1 << 16));
511 		return false;
512 	}
513 	return true;
514 }
515 
516 static int exynos_drm_ipp_check_scale_limits(
517 				struct drm_exynos_ipp_task_rect *src,
518 				struct drm_exynos_ipp_task_rect *dst,
519 				const struct drm_exynos_ipp_limit *limits,
520 				unsigned int num_limits, bool swap)
521 {
522 	const struct drm_exynos_ipp_limit_val *lh, *lv;
523 	int dw, dh;
524 
525 	for (; num_limits; limits++, num_limits--)
526 		if ((limits->type & DRM_EXYNOS_IPP_LIMIT_TYPE_MASK) ==
527 		    DRM_EXYNOS_IPP_LIMIT_TYPE_SCALE)
528 			break;
529 	if (!num_limits)
530 		return 0;
531 
532 	lh = (!swap) ? &limits->h : &limits->v;
533 	lv = (!swap) ? &limits->v : &limits->h;
534 	dw = (!swap) ? dst->w : dst->h;
535 	dh = (!swap) ? dst->h : dst->w;
536 
537 	if (!__scale_limit_check(src->w, dw, lh->min, lh->max) ||
538 	    !__scale_limit_check(src->h, dh, lv->min, lv->max))
539 		return -EINVAL;
540 
541 	return 0;
542 }
543 
544 static int exynos_drm_ipp_check_format(struct exynos_drm_ipp_task *task,
545 				       struct exynos_drm_ipp_buffer *buf,
546 				       struct exynos_drm_ipp_buffer *src,
547 				       struct exynos_drm_ipp_buffer *dst,
548 				       bool rotate, bool swap)
549 {
550 	const struct exynos_drm_ipp_formats *fmt;
551 	int ret, i;
552 
553 	fmt = __ipp_format_get(task->ipp, buf->buf.fourcc, buf->buf.modifier,
554 			       buf == src ? DRM_EXYNOS_IPP_FORMAT_SOURCE :
555 					    DRM_EXYNOS_IPP_FORMAT_DESTINATION);
556 	if (!fmt) {
557 		DRM_DEV_DEBUG_DRIVER(task->dev,
558 				     "Task %pK: %s format not supported\n",
559 				     task, buf == src ? "src" : "dst");
560 		return -EINVAL;
561 	}
562 
563 	/* basic checks */
564 	if (buf->buf.width == 0 || buf->buf.height == 0)
565 		return -EINVAL;
566 
567 	buf->format = drm_format_info(buf->buf.fourcc);
568 	for (i = 0; i < buf->format->num_planes; i++) {
569 		unsigned int width = (i == 0) ? buf->buf.width :
570 			     DIV_ROUND_UP(buf->buf.width, buf->format->hsub);
571 
572 		if (buf->buf.pitch[i] == 0)
573 			buf->buf.pitch[i] = width * buf->format->cpp[i];
574 		if (buf->buf.pitch[i] < width * buf->format->cpp[i])
575 			return -EINVAL;
576 		if (!buf->buf.gem_id[i])
577 			return -ENOENT;
578 	}
579 
580 	/* pitch for additional planes must match */
581 	if (buf->format->num_planes > 2 &&
582 	    buf->buf.pitch[1] != buf->buf.pitch[2])
583 		return -EINVAL;
584 
585 	/* check driver limits */
586 	ret = exynos_drm_ipp_check_size_limits(buf, fmt->limits,
587 					       fmt->num_limits,
588 					       rotate,
589 					       buf == dst ? swap : false);
590 	if (ret)
591 		return ret;
592 	ret = exynos_drm_ipp_check_scale_limits(&src->rect, &dst->rect,
593 						fmt->limits,
594 						fmt->num_limits, swap);
595 	return ret;
596 }
597 
598 static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task)
599 {
600 	struct exynos_drm_ipp *ipp = task->ipp;
601 	struct exynos_drm_ipp_buffer *src = &task->src, *dst = &task->dst;
602 	unsigned int rotation = task->transform.rotation;
603 	int ret = 0;
604 	bool swap = drm_rotation_90_or_270(rotation);
605 	bool rotate = (rotation != DRM_MODE_ROTATE_0);
606 	bool scale = false;
607 
608 	DRM_DEV_DEBUG_DRIVER(task->dev, "Checking task %pK\n", task);
609 
610 	if (src->rect.w == UINT_MAX)
611 		src->rect.w = src->buf.width;
612 	if (src->rect.h == UINT_MAX)
613 		src->rect.h = src->buf.height;
614 	if (dst->rect.w == UINT_MAX)
615 		dst->rect.w = dst->buf.width;
616 	if (dst->rect.h == UINT_MAX)
617 		dst->rect.h = dst->buf.height;
618 
619 	if (src->rect.x + src->rect.w > (src->buf.width) ||
620 	    src->rect.y + src->rect.h > (src->buf.height) ||
621 	    dst->rect.x + dst->rect.w > (dst->buf.width) ||
622 	    dst->rect.y + dst->rect.h > (dst->buf.height)) {
623 		DRM_DEV_DEBUG_DRIVER(task->dev,
624 				     "Task %pK: defined area is outside provided buffers\n",
625 				     task);
626 		return -EINVAL;
627 	}
628 
629 	if ((!swap && (src->rect.w != dst->rect.w ||
630 		       src->rect.h != dst->rect.h)) ||
631 	    (swap && (src->rect.w != dst->rect.h ||
632 		      src->rect.h != dst->rect.w)))
633 		scale = true;
634 
635 	if ((!(ipp->capabilities & DRM_EXYNOS_IPP_CAP_CROP) &&
636 	     (src->rect.x || src->rect.y || dst->rect.x || dst->rect.y)) ||
637 	    (!(ipp->capabilities & DRM_EXYNOS_IPP_CAP_ROTATE) && rotate) ||
638 	    (!(ipp->capabilities & DRM_EXYNOS_IPP_CAP_SCALE) && scale) ||
639 	    (!(ipp->capabilities & DRM_EXYNOS_IPP_CAP_CONVERT) &&
640 	     src->buf.fourcc != dst->buf.fourcc)) {
641 		DRM_DEV_DEBUG_DRIVER(task->dev, "Task %pK: hw capabilities exceeded\n",
642 				     task);
643 		return -EINVAL;
644 	}
645 
646 	ret = exynos_drm_ipp_check_format(task, src, src, dst, rotate, swap);
647 	if (ret)
648 		return ret;
649 
650 	ret = exynos_drm_ipp_check_format(task, dst, src, dst, false, swap);
651 	if (ret)
652 		return ret;
653 
654 	DRM_DEV_DEBUG_DRIVER(ipp->dev, "Task %pK: all checks done.\n",
655 			     task);
656 
657 	return ret;
658 }
659 
660 static int exynos_drm_ipp_task_setup_buffers(struct exynos_drm_ipp_task *task,
661 				     struct drm_file *filp)
662 {
663 	struct exynos_drm_ipp_buffer *src = &task->src, *dst = &task->dst;
664 	int ret = 0;
665 
666 	DRM_DEV_DEBUG_DRIVER(task->dev, "Setting buffer for task %pK\n",
667 			     task);
668 
669 	ret = exynos_drm_ipp_task_setup_buffer(src, filp);
670 	if (ret) {
671 		DRM_DEV_DEBUG_DRIVER(task->dev,
672 				     "Task %pK: src buffer setup failed\n",
673 				     task);
674 		return ret;
675 	}
676 	ret = exynos_drm_ipp_task_setup_buffer(dst, filp);
677 	if (ret) {
678 		DRM_DEV_DEBUG_DRIVER(task->dev,
679 				     "Task %pK: dst buffer setup failed\n",
680 				     task);
681 		return ret;
682 	}
683 
684 	DRM_DEV_DEBUG_DRIVER(task->dev, "Task %pK: buffers prepared.\n",
685 			     task);
686 
687 	return ret;
688 }
689 
690 
691 static int exynos_drm_ipp_event_create(struct exynos_drm_ipp_task *task,
692 				 struct drm_file *file_priv, uint64_t user_data)
693 {
694 	struct drm_pending_exynos_ipp_event *e = NULL;
695 	int ret;
696 
697 	e = kzalloc(sizeof(*e), GFP_KERNEL);
698 	if (!e)
699 		return -ENOMEM;
700 
701 	e->event.base.type = DRM_EXYNOS_IPP_EVENT;
702 	e->event.base.length = sizeof(e->event);
703 	e->event.user_data = user_data;
704 
705 	ret = drm_event_reserve_init(task->ipp->drm_dev, file_priv, &e->base,
706 				     &e->event.base);
707 	if (ret)
708 		goto free;
709 
710 	task->event = e;
711 	return 0;
712 free:
713 	kfree(e);
714 	return ret;
715 }
716 
717 static void exynos_drm_ipp_event_send(struct exynos_drm_ipp_task *task)
718 {
719 	struct timespec64 now;
720 
721 	ktime_get_ts64(&now);
722 	task->event->event.tv_sec = now.tv_sec;
723 	task->event->event.tv_usec = now.tv_nsec / NSEC_PER_USEC;
724 	task->event->event.sequence = atomic_inc_return(&task->ipp->sequence);
725 
726 	drm_send_event(task->ipp->drm_dev, &task->event->base);
727 }
728 
729 static int exynos_drm_ipp_task_cleanup(struct exynos_drm_ipp_task *task)
730 {
731 	int ret = task->ret;
732 
733 	if (ret == 0 && task->event) {
734 		exynos_drm_ipp_event_send(task);
735 		/* ensure event won't be canceled on task free */
736 		task->event = NULL;
737 	}
738 
739 	exynos_drm_ipp_task_free(task->ipp, task);
740 	return ret;
741 }
742 
743 static void exynos_drm_ipp_cleanup_work(struct work_struct *work)
744 {
745 	struct exynos_drm_ipp_task *task = container_of(work,
746 				      struct exynos_drm_ipp_task, cleanup_work);
747 
748 	exynos_drm_ipp_task_cleanup(task);
749 }
750 
751 static void exynos_drm_ipp_next_task(struct exynos_drm_ipp *ipp);
752 
753 /**
754  * exynos_drm_ipp_task_done - finish given task and set return code
755  * @task: ipp task to finish
756  * @ret: error code or 0 if operation has been performed successfully
757  */
758 void exynos_drm_ipp_task_done(struct exynos_drm_ipp_task *task, int ret)
759 {
760 	struct exynos_drm_ipp *ipp = task->ipp;
761 	unsigned long flags;
762 
763 	DRM_DEV_DEBUG_DRIVER(task->dev, "ipp: %d, task %pK done: %d\n",
764 			     ipp->id, task, ret);
765 
766 	spin_lock_irqsave(&ipp->lock, flags);
767 	if (ipp->task == task)
768 		ipp->task = NULL;
769 	task->flags |= DRM_EXYNOS_IPP_TASK_DONE;
770 	task->ret = ret;
771 	spin_unlock_irqrestore(&ipp->lock, flags);
772 
773 	exynos_drm_ipp_next_task(ipp);
774 	wake_up(&ipp->done_wq);
775 
776 	if (task->flags & DRM_EXYNOS_IPP_TASK_ASYNC) {
777 		INIT_WORK(&task->cleanup_work, exynos_drm_ipp_cleanup_work);
778 		schedule_work(&task->cleanup_work);
779 	}
780 }
781 
782 static void exynos_drm_ipp_next_task(struct exynos_drm_ipp *ipp)
783 {
784 	struct exynos_drm_ipp_task *task;
785 	unsigned long flags;
786 	int ret;
787 
788 	DRM_DEV_DEBUG_DRIVER(ipp->dev, "ipp: %d, try to run new task\n",
789 			     ipp->id);
790 
791 	spin_lock_irqsave(&ipp->lock, flags);
792 
793 	if (ipp->task || list_empty(&ipp->todo_list)) {
794 		spin_unlock_irqrestore(&ipp->lock, flags);
795 		return;
796 	}
797 
798 	task = list_first_entry(&ipp->todo_list, struct exynos_drm_ipp_task,
799 				head);
800 	list_del_init(&task->head);
801 	ipp->task = task;
802 
803 	spin_unlock_irqrestore(&ipp->lock, flags);
804 
805 	DRM_DEV_DEBUG_DRIVER(ipp->dev,
806 			     "ipp: %d, selected task %pK to run\n", ipp->id,
807 			     task);
808 
809 	ret = ipp->funcs->commit(ipp, task);
810 	if (ret)
811 		exynos_drm_ipp_task_done(task, ret);
812 }
813 
814 static void exynos_drm_ipp_schedule_task(struct exynos_drm_ipp *ipp,
815 					 struct exynos_drm_ipp_task *task)
816 {
817 	unsigned long flags;
818 
819 	spin_lock_irqsave(&ipp->lock, flags);
820 	list_add(&task->head, &ipp->todo_list);
821 	spin_unlock_irqrestore(&ipp->lock, flags);
822 
823 	exynos_drm_ipp_next_task(ipp);
824 }
825 
826 static void exynos_drm_ipp_task_abort(struct exynos_drm_ipp *ipp,
827 				      struct exynos_drm_ipp_task *task)
828 {
829 	unsigned long flags;
830 
831 	spin_lock_irqsave(&ipp->lock, flags);
832 	if (task->flags & DRM_EXYNOS_IPP_TASK_DONE) {
833 		/* already completed task */
834 		exynos_drm_ipp_task_cleanup(task);
835 	} else if (ipp->task != task) {
836 		/* task has not been scheduled for execution yet */
837 		list_del_init(&task->head);
838 		exynos_drm_ipp_task_cleanup(task);
839 	} else {
840 		/*
841 		 * currently processed task, call abort() and perform
842 		 * cleanup with async worker
843 		 */
844 		task->flags |= DRM_EXYNOS_IPP_TASK_ASYNC;
845 		spin_unlock_irqrestore(&ipp->lock, flags);
846 		if (ipp->funcs->abort)
847 			ipp->funcs->abort(ipp, task);
848 		return;
849 	}
850 	spin_unlock_irqrestore(&ipp->lock, flags);
851 }
852 
853 /**
854  * exynos_drm_ipp_commit_ioctl - perform image processing operation
855  * @dev: DRM device
856  * @data: ioctl data
857  * @file_priv: DRM file info
858  *
859  * Construct a ipp task from the set of properties provided from the user
860  * and try to schedule it to framebuffer processor hardware.
861  *
862  * Called by the user via ioctl.
863  *
864  * Returns:
865  * Zero on success, negative errno on failure.
866  */
867 int exynos_drm_ipp_commit_ioctl(struct drm_device *dev, void *data,
868 				struct drm_file *file_priv)
869 {
870 	struct drm_exynos_ioctl_ipp_commit *arg = data;
871 	struct exynos_drm_ipp *ipp;
872 	struct exynos_drm_ipp_task *task;
873 	int ret = 0;
874 
875 	if ((arg->flags & ~DRM_EXYNOS_IPP_FLAGS) || arg->reserved)
876 		return -EINVAL;
877 
878 	/* can't test and expect an event at the same time */
879 	if ((arg->flags & DRM_EXYNOS_IPP_FLAG_TEST_ONLY) &&
880 			(arg->flags & DRM_EXYNOS_IPP_FLAG_EVENT))
881 		return -EINVAL;
882 
883 	ipp = __ipp_get(arg->ipp_id);
884 	if (!ipp)
885 		return -ENOENT;
886 
887 	task = exynos_drm_ipp_task_alloc(ipp);
888 	if (!task)
889 		return -ENOMEM;
890 
891 	ret = exynos_drm_ipp_task_set(task, arg);
892 	if (ret)
893 		goto free;
894 
895 	ret = exynos_drm_ipp_task_check(task);
896 	if (ret)
897 		goto free;
898 
899 	ret = exynos_drm_ipp_task_setup_buffers(task, file_priv);
900 	if (ret || arg->flags & DRM_EXYNOS_IPP_FLAG_TEST_ONLY)
901 		goto free;
902 
903 	if (arg->flags & DRM_EXYNOS_IPP_FLAG_EVENT) {
904 		ret = exynos_drm_ipp_event_create(task, file_priv,
905 						 arg->user_data);
906 		if (ret)
907 			goto free;
908 	}
909 
910 	/*
911 	 * Queue task for processing on the hardware. task object will be
912 	 * then freed after exynos_drm_ipp_task_done()
913 	 */
914 	if (arg->flags & DRM_EXYNOS_IPP_FLAG_NONBLOCK) {
915 		DRM_DEV_DEBUG_DRIVER(ipp->dev,
916 				     "ipp: %d, nonblocking processing task %pK\n",
917 				     ipp->id, task);
918 
919 		task->flags |= DRM_EXYNOS_IPP_TASK_ASYNC;
920 		exynos_drm_ipp_schedule_task(task->ipp, task);
921 		ret = 0;
922 	} else {
923 		DRM_DEV_DEBUG_DRIVER(ipp->dev, "ipp: %d, processing task %pK\n",
924 				     ipp->id, task);
925 		exynos_drm_ipp_schedule_task(ipp, task);
926 		ret = wait_event_interruptible(ipp->done_wq,
927 					task->flags & DRM_EXYNOS_IPP_TASK_DONE);
928 		if (ret)
929 			exynos_drm_ipp_task_abort(ipp, task);
930 		else
931 			ret = exynos_drm_ipp_task_cleanup(task);
932 	}
933 	return ret;
934 free:
935 	exynos_drm_ipp_task_free(ipp, task);
936 
937 	return ret;
938 }
939