1 /*
2  * Copyright (C) 2012 Samsung Electronics Co.Ltd
3  * Authors:
4  *	Eunchul Kim <chulspro.kim@samsung.com>
5  *	Jinyoung Jeon <jy0.jeon@samsung.com>
6  *	Sangmin Lee <lsmin.lee@samsung.com>
7  *
8  * This program is free software; you can redistribute  it and/or modify it
9  * under  the terms of  the GNU General  Public License as published by the
10  * Free Software Foundation;  either version 2 of the  License, or (at your
11  * option) any later version.
12  *
13  */
14 #include <linux/kernel.h>
15 #include <linux/platform_device.h>
16 #include <linux/types.h>
17 #include <linux/clk.h>
18 #include <linux/pm_runtime.h>
19 
20 #include <drm/drmP.h>
21 #include <drm/exynos_drm.h>
22 #include "exynos_drm_drv.h"
23 #include "exynos_drm_gem.h"
24 #include "exynos_drm_ipp.h"
25 #include "exynos_drm_iommu.h"
26 
27 /*
28  * IPP stands for Image Post Processing and
29  * supports image scaler/rotator and input/output DMA operations.
30  * using FIMC, GSC, Rotator, so on.
31  * IPP is integration device driver of same attribute h/w
32  */
33 
34 /*
35  * TODO
36  * 1. expand command control id.
37  * 2. integrate	property and config.
38  * 3. removed send_event id check routine.
39  * 4. compare send_event id if needed.
40  * 5. free subdrv_remove notifier callback list if needed.
41  * 6. need to check subdrv_open about multi-open.
42  * 7. need to power_on implement power and sysmmu ctrl.
43  */
44 
45 #define get_ipp_context(dev)	platform_get_drvdata(to_platform_device(dev))
46 #define ipp_is_m2m_cmd(c)	(c == IPP_CMD_M2M)
47 
48 /* platform device pointer for ipp device. */
49 static struct platform_device *exynos_drm_ipp_pdev;
50 
51 /*
52  * A structure of event.
53  *
54  * @base: base of event.
55  * @event: ipp event.
56  */
57 struct drm_exynos_ipp_send_event {
58 	struct drm_pending_event	base;
59 	struct drm_exynos_ipp_event	event;
60 };
61 
62 /*
63  * A structure of memory node.
64  *
65  * @list: list head to memory queue information.
66  * @ops_id: id of operations.
67  * @prop_id: id of property.
68  * @buf_id: id of buffer.
69  * @buf_info: gem objects and dma address, size.
70  * @filp: a pointer to drm_file.
71  */
72 struct drm_exynos_ipp_mem_node {
73 	struct list_head	list;
74 	enum drm_exynos_ops_id	ops_id;
75 	u32	prop_id;
76 	u32	buf_id;
77 	struct drm_exynos_ipp_buf_info	buf_info;
78 };
79 
80 /*
81  * A structure of ipp context.
82  *
83  * @subdrv: prepare initialization using subdrv.
84  * @ipp_lock: lock for synchronization of access to ipp_idr.
85  * @prop_lock: lock for synchronization of access to prop_idr.
86  * @ipp_idr: ipp driver idr.
87  * @prop_idr: property idr.
88  * @event_workq: event work queue.
89  * @cmd_workq: command work queue.
90  */
91 struct ipp_context {
92 	struct exynos_drm_subdrv	subdrv;
93 	struct mutex	ipp_lock;
94 	struct mutex	prop_lock;
95 	struct idr	ipp_idr;
96 	struct idr	prop_idr;
97 	struct workqueue_struct	*event_workq;
98 	struct workqueue_struct	*cmd_workq;
99 };
100 
101 static LIST_HEAD(exynos_drm_ippdrv_list);
102 static DEFINE_MUTEX(exynos_drm_ippdrv_lock);
103 static BLOCKING_NOTIFIER_HEAD(exynos_drm_ippnb_list);
104 
105 int exynos_platform_device_ipp_register(void)
106 {
107 	struct platform_device *pdev;
108 
109 	if (exynos_drm_ipp_pdev)
110 		return -EEXIST;
111 
112 	pdev = platform_device_register_simple("exynos-drm-ipp", -1, NULL, 0);
113 	if (IS_ERR(pdev))
114 		return PTR_ERR(pdev);
115 
116 	exynos_drm_ipp_pdev = pdev;
117 
118 	return 0;
119 }
120 
121 void exynos_platform_device_ipp_unregister(void)
122 {
123 	if (exynos_drm_ipp_pdev) {
124 		platform_device_unregister(exynos_drm_ipp_pdev);
125 		exynos_drm_ipp_pdev = NULL;
126 	}
127 }
128 
129 int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
130 {
131 	mutex_lock(&exynos_drm_ippdrv_lock);
132 	list_add_tail(&ippdrv->drv_list, &exynos_drm_ippdrv_list);
133 	mutex_unlock(&exynos_drm_ippdrv_lock);
134 
135 	return 0;
136 }
137 
138 int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
139 {
140 	mutex_lock(&exynos_drm_ippdrv_lock);
141 	list_del(&ippdrv->drv_list);
142 	mutex_unlock(&exynos_drm_ippdrv_lock);
143 
144 	return 0;
145 }
146 
147 static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj)
148 {
149 	int ret;
150 
151 	mutex_lock(lock);
152 	ret = idr_alloc(id_idr, obj, 1, 0, GFP_KERNEL);
153 	mutex_unlock(lock);
154 
155 	return ret;
156 }
157 
158 static void ipp_remove_id(struct idr *id_idr, struct mutex *lock, u32 id)
159 {
160 	mutex_lock(lock);
161 	idr_remove(id_idr, id);
162 	mutex_unlock(lock);
163 }
164 
165 static void *ipp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id)
166 {
167 	void *obj;
168 
169 	mutex_lock(lock);
170 	obj = idr_find(id_idr, id);
171 	mutex_unlock(lock);
172 
173 	return obj;
174 }
175 
176 static int ipp_check_driver(struct exynos_drm_ippdrv *ippdrv,
177 			    struct drm_exynos_ipp_property *property)
178 {
179 	if (ippdrv->dedicated || (!ipp_is_m2m_cmd(property->cmd) &&
180 				  !pm_runtime_suspended(ippdrv->dev)))
181 		return -EBUSY;
182 
183 	if (ippdrv->check_property &&
184 	    ippdrv->check_property(ippdrv->dev, property))
185 		return -EINVAL;
186 
187 	return 0;
188 }
189 
190 static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx,
191 		struct drm_exynos_ipp_property *property)
192 {
193 	struct exynos_drm_ippdrv *ippdrv;
194 	u32 ipp_id = property->ipp_id;
195 	int ret;
196 
197 	if (ipp_id) {
198 		ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock, ipp_id);
199 		if (!ippdrv) {
200 			DRM_DEBUG("ipp%d driver not found\n", ipp_id);
201 			return ERR_PTR(-ENODEV);
202 		}
203 
204 		ret = ipp_check_driver(ippdrv, property);
205 		if (ret < 0) {
206 			DRM_DEBUG("ipp%d driver check error %d\n", ipp_id, ret);
207 			return ERR_PTR(ret);
208 		}
209 
210 		return ippdrv;
211 	} else {
212 		list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
213 			ret = ipp_check_driver(ippdrv, property);
214 			if (ret == 0)
215 				return ippdrv;
216 		}
217 
218 		DRM_DEBUG("cannot find driver suitable for given property.\n");
219 	}
220 
221 	return ERR_PTR(-ENODEV);
222 }
223 
224 static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id)
225 {
226 	struct exynos_drm_ippdrv *ippdrv;
227 	struct drm_exynos_ipp_cmd_node *c_node;
228 	int count = 0;
229 
230 	DRM_DEBUG_KMS("prop_id[%d]\n", prop_id);
231 
232 	/*
233 	 * This case is search ipp driver by prop_id handle.
234 	 * sometimes, ipp subsystem find driver by prop_id.
235 	 * e.g PAUSE state, queue buf, command control.
236 	 */
237 	list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
238 		DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n", count++, (int)ippdrv);
239 
240 		mutex_lock(&ippdrv->cmd_lock);
241 		list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
242 			if (c_node->property.prop_id == prop_id) {
243 				mutex_unlock(&ippdrv->cmd_lock);
244 				return ippdrv;
245 			}
246 		}
247 		mutex_unlock(&ippdrv->cmd_lock);
248 	}
249 
250 	return ERR_PTR(-ENODEV);
251 }
252 
253 int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
254 		struct drm_file *file)
255 {
256 	struct drm_exynos_file_private *file_priv = file->driver_priv;
257 	struct device *dev = file_priv->ipp_dev;
258 	struct ipp_context *ctx = get_ipp_context(dev);
259 	struct drm_exynos_ipp_prop_list *prop_list = data;
260 	struct exynos_drm_ippdrv *ippdrv;
261 	int count = 0;
262 
263 	if (!ctx) {
264 		DRM_ERROR("invalid context.\n");
265 		return -EINVAL;
266 	}
267 
268 	if (!prop_list) {
269 		DRM_ERROR("invalid property parameter.\n");
270 		return -EINVAL;
271 	}
272 
273 	DRM_DEBUG_KMS("ipp_id[%d]\n", prop_list->ipp_id);
274 
275 	if (!prop_list->ipp_id) {
276 		list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list)
277 			count++;
278 
279 		/*
280 		 * Supports ippdrv list count for user application.
281 		 * First step user application getting ippdrv count.
282 		 * and second step getting ippdrv capability using ipp_id.
283 		 */
284 		prop_list->count = count;
285 	} else {
286 		/*
287 		 * Getting ippdrv capability by ipp_id.
288 		 * some device not supported wb, output interface.
289 		 * so, user application detect correct ipp driver
290 		 * using this ioctl.
291 		 */
292 		ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
293 						prop_list->ipp_id);
294 		if (!ippdrv) {
295 			DRM_ERROR("not found ipp%d driver.\n",
296 					prop_list->ipp_id);
297 			return -ENODEV;
298 		}
299 
300 		*prop_list = ippdrv->prop_list;
301 	}
302 
303 	return 0;
304 }
305 
306 static void ipp_print_property(struct drm_exynos_ipp_property *property,
307 		int idx)
308 {
309 	struct drm_exynos_ipp_config *config = &property->config[idx];
310 	struct drm_exynos_pos *pos = &config->pos;
311 	struct drm_exynos_sz *sz = &config->sz;
312 
313 	DRM_DEBUG_KMS("prop_id[%d]ops[%s]fmt[0x%x]\n",
314 		property->prop_id, idx ? "dst" : "src", config->fmt);
315 
316 	DRM_DEBUG_KMS("pos[%d %d %d %d]sz[%d %d]f[%d]r[%d]\n",
317 		pos->x, pos->y, pos->w, pos->h,
318 		sz->hsize, sz->vsize, config->flip, config->degree);
319 }
320 
321 static struct drm_exynos_ipp_cmd_work *ipp_create_cmd_work(void)
322 {
323 	struct drm_exynos_ipp_cmd_work *cmd_work;
324 
325 	cmd_work = kzalloc(sizeof(*cmd_work), GFP_KERNEL);
326 	if (!cmd_work)
327 		return ERR_PTR(-ENOMEM);
328 
329 	INIT_WORK((struct work_struct *)cmd_work, ipp_sched_cmd);
330 
331 	return cmd_work;
332 }
333 
334 static struct drm_exynos_ipp_event_work *ipp_create_event_work(void)
335 {
336 	struct drm_exynos_ipp_event_work *event_work;
337 
338 	event_work = kzalloc(sizeof(*event_work), GFP_KERNEL);
339 	if (!event_work)
340 		return ERR_PTR(-ENOMEM);
341 
342 	INIT_WORK(&event_work->work, ipp_sched_event);
343 
344 	return event_work;
345 }
346 
347 int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
348 		struct drm_file *file)
349 {
350 	struct drm_exynos_file_private *file_priv = file->driver_priv;
351 	struct device *dev = file_priv->ipp_dev;
352 	struct ipp_context *ctx = get_ipp_context(dev);
353 	struct drm_exynos_ipp_property *property = data;
354 	struct exynos_drm_ippdrv *ippdrv;
355 	struct drm_exynos_ipp_cmd_node *c_node;
356 	u32 prop_id;
357 	int ret, i;
358 
359 	if (!ctx) {
360 		DRM_ERROR("invalid context.\n");
361 		return -EINVAL;
362 	}
363 
364 	if (!property) {
365 		DRM_ERROR("invalid property parameter.\n");
366 		return -EINVAL;
367 	}
368 
369 	prop_id = property->prop_id;
370 
371 	/*
372 	 * This is log print for user application property.
373 	 * user application set various property.
374 	 */
375 	for_each_ipp_ops(i)
376 		ipp_print_property(property, i);
377 
378 	/*
379 	 * In case prop_id is not zero try to set existing property.
380 	 */
381 	if (prop_id) {
382 		c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock, prop_id);
383 
384 		if (!c_node || c_node->filp != file) {
385 			DRM_DEBUG_KMS("prop_id[%d] not found\n", prop_id);
386 			return -EINVAL;
387 		}
388 
389 		if (c_node->state != IPP_STATE_STOP) {
390 			DRM_DEBUG_KMS("prop_id[%d] not stopped\n", prop_id);
391 			return -EINVAL;
392 		}
393 
394 		c_node->property = *property;
395 
396 		return 0;
397 	}
398 
399 	/* find ipp driver using ipp id */
400 	ippdrv = ipp_find_driver(ctx, property);
401 	if (IS_ERR(ippdrv)) {
402 		DRM_ERROR("failed to get ipp driver.\n");
403 		return -EINVAL;
404 	}
405 
406 	/* allocate command node */
407 	c_node = kzalloc(sizeof(*c_node), GFP_KERNEL);
408 	if (!c_node)
409 		return -ENOMEM;
410 
411 	ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node);
412 	if (ret < 0) {
413 		DRM_ERROR("failed to create id.\n");
414 		goto err_clear;
415 	}
416 	property->prop_id = ret;
417 
418 	DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[0x%x]\n",
419 		property->prop_id, property->cmd, (int)ippdrv);
420 
421 	/* stored property information and ippdrv in private data */
422 	c_node->property = *property;
423 	c_node->state = IPP_STATE_IDLE;
424 	c_node->filp = file;
425 
426 	c_node->start_work = ipp_create_cmd_work();
427 	if (IS_ERR(c_node->start_work)) {
428 		DRM_ERROR("failed to create start work.\n");
429 		ret = PTR_ERR(c_node->start_work);
430 		goto err_remove_id;
431 	}
432 
433 	c_node->stop_work = ipp_create_cmd_work();
434 	if (IS_ERR(c_node->stop_work)) {
435 		DRM_ERROR("failed to create stop work.\n");
436 		ret = PTR_ERR(c_node->stop_work);
437 		goto err_free_start;
438 	}
439 
440 	c_node->event_work = ipp_create_event_work();
441 	if (IS_ERR(c_node->event_work)) {
442 		DRM_ERROR("failed to create event work.\n");
443 		ret = PTR_ERR(c_node->event_work);
444 		goto err_free_stop;
445 	}
446 
447 	mutex_init(&c_node->lock);
448 	mutex_init(&c_node->mem_lock);
449 	mutex_init(&c_node->event_lock);
450 
451 	init_completion(&c_node->start_complete);
452 	init_completion(&c_node->stop_complete);
453 
454 	for_each_ipp_ops(i)
455 		INIT_LIST_HEAD(&c_node->mem_list[i]);
456 
457 	INIT_LIST_HEAD(&c_node->event_list);
458 	mutex_lock(&ippdrv->cmd_lock);
459 	list_add_tail(&c_node->list, &ippdrv->cmd_list);
460 	mutex_unlock(&ippdrv->cmd_lock);
461 
462 	/* make dedicated state without m2m */
463 	if (!ipp_is_m2m_cmd(property->cmd))
464 		ippdrv->dedicated = true;
465 
466 	return 0;
467 
468 err_free_stop:
469 	kfree(c_node->stop_work);
470 err_free_start:
471 	kfree(c_node->start_work);
472 err_remove_id:
473 	ipp_remove_id(&ctx->prop_idr, &ctx->prop_lock, property->prop_id);
474 err_clear:
475 	kfree(c_node);
476 	return ret;
477 }
478 
479 static int ipp_put_mem_node(struct drm_device *drm_dev,
480 		struct drm_exynos_ipp_cmd_node *c_node,
481 		struct drm_exynos_ipp_mem_node *m_node)
482 {
483 	int i;
484 
485 	DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node);
486 
487 	if (!m_node) {
488 		DRM_ERROR("invalid dequeue node.\n");
489 		return -EFAULT;
490 	}
491 
492 	DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id);
493 
494 	/* put gem buffer */
495 	for_each_ipp_planar(i) {
496 		unsigned long handle = m_node->buf_info.handles[i];
497 		if (handle)
498 			exynos_drm_gem_put_dma_addr(drm_dev, handle,
499 							c_node->filp);
500 	}
501 
502 	list_del(&m_node->list);
503 	kfree(m_node);
504 
505 	return 0;
506 }
507 
508 static struct drm_exynos_ipp_mem_node
509 		*ipp_get_mem_node(struct drm_device *drm_dev,
510 		struct drm_exynos_ipp_cmd_node *c_node,
511 		struct drm_exynos_ipp_queue_buf *qbuf)
512 {
513 	struct drm_exynos_ipp_mem_node *m_node;
514 	struct drm_exynos_ipp_buf_info *buf_info;
515 	int i;
516 
517 	m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
518 	if (!m_node)
519 		return ERR_PTR(-ENOMEM);
520 
521 	buf_info = &m_node->buf_info;
522 
523 	/* operations, buffer id */
524 	m_node->ops_id = qbuf->ops_id;
525 	m_node->prop_id = qbuf->prop_id;
526 	m_node->buf_id = qbuf->buf_id;
527 	INIT_LIST_HEAD(&m_node->list);
528 
529 	DRM_DEBUG_KMS("m_node[0x%x]ops_id[%d]\n", (int)m_node, qbuf->ops_id);
530 	DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id);
531 
532 	for_each_ipp_planar(i) {
533 		DRM_DEBUG_KMS("i[%d]handle[0x%x]\n", i, qbuf->handle[i]);
534 
535 		/* get dma address by handle */
536 		if (qbuf->handle[i]) {
537 			dma_addr_t *addr;
538 
539 			addr = exynos_drm_gem_get_dma_addr(drm_dev,
540 					qbuf->handle[i], c_node->filp);
541 			if (IS_ERR(addr)) {
542 				DRM_ERROR("failed to get addr.\n");
543 				ipp_put_mem_node(drm_dev, c_node, m_node);
544 				return ERR_PTR(-EFAULT);
545 			}
546 
547 			buf_info->handles[i] = qbuf->handle[i];
548 			buf_info->base[i] = *addr;
549 			DRM_DEBUG_KMS("i[%d]base[0x%x]hd[0x%lx]\n", i,
550 				      buf_info->base[i], buf_info->handles[i]);
551 		}
552 	}
553 
554 	mutex_lock(&c_node->mem_lock);
555 	list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
556 	mutex_unlock(&c_node->mem_lock);
557 
558 	return m_node;
559 }
560 
561 static void ipp_clean_mem_nodes(struct drm_device *drm_dev,
562 			       struct drm_exynos_ipp_cmd_node *c_node, int ops)
563 {
564 	struct drm_exynos_ipp_mem_node *m_node, *tm_node;
565 	struct list_head *head = &c_node->mem_list[ops];
566 
567 	mutex_lock(&c_node->mem_lock);
568 
569 	list_for_each_entry_safe(m_node, tm_node, head, list) {
570 		int ret;
571 
572 		ret = ipp_put_mem_node(drm_dev, c_node, m_node);
573 		if (ret)
574 			DRM_ERROR("failed to put m_node.\n");
575 	}
576 
577 	mutex_unlock(&c_node->mem_lock);
578 }
579 
580 static void ipp_free_event(struct drm_pending_event *event)
581 {
582 	kfree(event);
583 }
584 
585 static int ipp_get_event(struct drm_device *drm_dev,
586 		struct drm_exynos_ipp_cmd_node *c_node,
587 		struct drm_exynos_ipp_queue_buf *qbuf)
588 {
589 	struct drm_exynos_ipp_send_event *e;
590 	unsigned long flags;
591 
592 	DRM_DEBUG_KMS("ops_id[%d]buf_id[%d]\n", qbuf->ops_id, qbuf->buf_id);
593 
594 	e = kzalloc(sizeof(*e), GFP_KERNEL);
595 	if (!e) {
596 		spin_lock_irqsave(&drm_dev->event_lock, flags);
597 		c_node->filp->event_space += sizeof(e->event);
598 		spin_unlock_irqrestore(&drm_dev->event_lock, flags);
599 		return -ENOMEM;
600 	}
601 
602 	/* make event */
603 	e->event.base.type = DRM_EXYNOS_IPP_EVENT;
604 	e->event.base.length = sizeof(e->event);
605 	e->event.user_data = qbuf->user_data;
606 	e->event.prop_id = qbuf->prop_id;
607 	e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id;
608 	e->base.event = &e->event.base;
609 	e->base.file_priv = c_node->filp;
610 	e->base.destroy = ipp_free_event;
611 	mutex_lock(&c_node->event_lock);
612 	list_add_tail(&e->base.link, &c_node->event_list);
613 	mutex_unlock(&c_node->event_lock);
614 
615 	return 0;
616 }
617 
618 static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
619 		struct drm_exynos_ipp_queue_buf *qbuf)
620 {
621 	struct drm_exynos_ipp_send_event *e, *te;
622 	int count = 0;
623 
624 	mutex_lock(&c_node->event_lock);
625 	list_for_each_entry_safe(e, te, &c_node->event_list, base.link) {
626 		DRM_DEBUG_KMS("count[%d]e[0x%x]\n", count++, (int)e);
627 
628 		/*
629 		 * qbuf == NULL condition means all event deletion.
630 		 * stop operations want to delete all event list.
631 		 * another case delete only same buf id.
632 		 */
633 		if (!qbuf) {
634 			/* delete list */
635 			list_del(&e->base.link);
636 			kfree(e);
637 		}
638 
639 		/* compare buffer id */
640 		if (qbuf && (qbuf->buf_id ==
641 		    e->event.buf_id[EXYNOS_DRM_OPS_DST])) {
642 			/* delete list */
643 			list_del(&e->base.link);
644 			kfree(e);
645 			goto out_unlock;
646 		}
647 	}
648 
649 out_unlock:
650 	mutex_unlock(&c_node->event_lock);
651 	return;
652 }
653 
654 static void ipp_clean_cmd_node(struct ipp_context *ctx,
655 				struct drm_exynos_ipp_cmd_node *c_node)
656 {
657 	int i;
658 
659 	/* cancel works */
660 	cancel_work_sync(&c_node->start_work->work);
661 	cancel_work_sync(&c_node->stop_work->work);
662 	cancel_work_sync(&c_node->event_work->work);
663 
664 	/* put event */
665 	ipp_put_event(c_node, NULL);
666 
667 	for_each_ipp_ops(i)
668 		ipp_clean_mem_nodes(ctx->subdrv.drm_dev, c_node, i);
669 
670 	/* delete list */
671 	list_del(&c_node->list);
672 
673 	ipp_remove_id(&ctx->prop_idr, &ctx->prop_lock,
674 			c_node->property.prop_id);
675 
676 	/* destroy mutex */
677 	mutex_destroy(&c_node->lock);
678 	mutex_destroy(&c_node->mem_lock);
679 	mutex_destroy(&c_node->event_lock);
680 
681 	/* free command node */
682 	kfree(c_node->start_work);
683 	kfree(c_node->stop_work);
684 	kfree(c_node->event_work);
685 	kfree(c_node);
686 }
687 
688 static bool ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
689 {
690 	switch (c_node->property.cmd) {
691 	case IPP_CMD_WB:
692 		return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]);
693 	case IPP_CMD_OUTPUT:
694 		return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]);
695 	case IPP_CMD_M2M:
696 	default:
697 		return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]) &&
698 		       !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]);
699 	}
700 }
701 
702 static struct drm_exynos_ipp_mem_node
703 		*ipp_find_mem_node(struct drm_exynos_ipp_cmd_node *c_node,
704 		struct drm_exynos_ipp_queue_buf *qbuf)
705 {
706 	struct drm_exynos_ipp_mem_node *m_node;
707 	struct list_head *head;
708 	int count = 0;
709 
710 	DRM_DEBUG_KMS("buf_id[%d]\n", qbuf->buf_id);
711 
712 	/* source/destination memory list */
713 	head = &c_node->mem_list[qbuf->ops_id];
714 
715 	/* find memory node from memory list */
716 	list_for_each_entry(m_node, head, list) {
717 		DRM_DEBUG_KMS("count[%d]m_node[0x%x]\n", count++, (int)m_node);
718 
719 		/* compare buffer id */
720 		if (m_node->buf_id == qbuf->buf_id)
721 			return m_node;
722 	}
723 
724 	return NULL;
725 }
726 
727 static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
728 		struct drm_exynos_ipp_cmd_node *c_node,
729 		struct drm_exynos_ipp_mem_node *m_node)
730 {
731 	struct exynos_drm_ipp_ops *ops = NULL;
732 	int ret = 0;
733 
734 	DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node);
735 
736 	if (!m_node) {
737 		DRM_ERROR("invalid queue node.\n");
738 		return -EFAULT;
739 	}
740 
741 	DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id);
742 
743 	/* get operations callback */
744 	ops = ippdrv->ops[m_node->ops_id];
745 	if (!ops) {
746 		DRM_ERROR("not support ops.\n");
747 		return -EFAULT;
748 	}
749 
750 	/* set address and enable irq */
751 	if (ops->set_addr) {
752 		ret = ops->set_addr(ippdrv->dev, &m_node->buf_info,
753 			m_node->buf_id, IPP_BUF_ENQUEUE);
754 		if (ret) {
755 			DRM_ERROR("failed to set addr.\n");
756 			return ret;
757 		}
758 	}
759 
760 	return ret;
761 }
762 
763 static void ipp_handle_cmd_work(struct device *dev,
764 		struct exynos_drm_ippdrv *ippdrv,
765 		struct drm_exynos_ipp_cmd_work *cmd_work,
766 		struct drm_exynos_ipp_cmd_node *c_node)
767 {
768 	struct ipp_context *ctx = get_ipp_context(dev);
769 
770 	cmd_work->ippdrv = ippdrv;
771 	cmd_work->c_node = c_node;
772 	queue_work(ctx->cmd_workq, &cmd_work->work);
773 }
774 
775 static int ipp_queue_buf_with_run(struct device *dev,
776 		struct drm_exynos_ipp_cmd_node *c_node,
777 		struct drm_exynos_ipp_mem_node *m_node,
778 		struct drm_exynos_ipp_queue_buf *qbuf)
779 {
780 	struct exynos_drm_ippdrv *ippdrv;
781 	struct drm_exynos_ipp_property *property;
782 	struct exynos_drm_ipp_ops *ops;
783 	int ret;
784 
785 	ippdrv = ipp_find_drv_by_handle(qbuf->prop_id);
786 	if (IS_ERR(ippdrv)) {
787 		DRM_ERROR("failed to get ipp driver.\n");
788 		return -EFAULT;
789 	}
790 
791 	ops = ippdrv->ops[qbuf->ops_id];
792 	if (!ops) {
793 		DRM_ERROR("failed to get ops.\n");
794 		return -EFAULT;
795 	}
796 
797 	property = &c_node->property;
798 
799 	if (c_node->state != IPP_STATE_START) {
800 		DRM_DEBUG_KMS("bypass for invalid state.\n");
801 		return 0;
802 	}
803 
804 	mutex_lock(&c_node->mem_lock);
805 	if (!ipp_check_mem_list(c_node)) {
806 		mutex_unlock(&c_node->mem_lock);
807 		DRM_DEBUG_KMS("empty memory.\n");
808 		return 0;
809 	}
810 
811 	/*
812 	 * If set destination buffer and enabled clock,
813 	 * then m2m operations need start operations at queue_buf
814 	 */
815 	if (ipp_is_m2m_cmd(property->cmd)) {
816 		struct drm_exynos_ipp_cmd_work *cmd_work = c_node->start_work;
817 
818 		cmd_work->ctrl = IPP_CTRL_PLAY;
819 		ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
820 	} else {
821 		ret = ipp_set_mem_node(ippdrv, c_node, m_node);
822 		if (ret) {
823 			mutex_unlock(&c_node->mem_lock);
824 			DRM_ERROR("failed to set m node.\n");
825 			return ret;
826 		}
827 	}
828 	mutex_unlock(&c_node->mem_lock);
829 
830 	return 0;
831 }
832 
833 static void ipp_clean_queue_buf(struct drm_device *drm_dev,
834 		struct drm_exynos_ipp_cmd_node *c_node,
835 		struct drm_exynos_ipp_queue_buf *qbuf)
836 {
837 	struct drm_exynos_ipp_mem_node *m_node, *tm_node;
838 
839 	/* delete list */
840 	mutex_lock(&c_node->mem_lock);
841 	list_for_each_entry_safe(m_node, tm_node,
842 		&c_node->mem_list[qbuf->ops_id], list) {
843 		if (m_node->buf_id == qbuf->buf_id &&
844 		    m_node->ops_id == qbuf->ops_id)
845 			ipp_put_mem_node(drm_dev, c_node, m_node);
846 	}
847 	mutex_unlock(&c_node->mem_lock);
848 }
849 
850 int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
851 		struct drm_file *file)
852 {
853 	struct drm_exynos_file_private *file_priv = file->driver_priv;
854 	struct device *dev = file_priv->ipp_dev;
855 	struct ipp_context *ctx = get_ipp_context(dev);
856 	struct drm_exynos_ipp_queue_buf *qbuf = data;
857 	struct drm_exynos_ipp_cmd_node *c_node;
858 	struct drm_exynos_ipp_mem_node *m_node;
859 	int ret;
860 
861 	if (!qbuf) {
862 		DRM_ERROR("invalid buf parameter.\n");
863 		return -EINVAL;
864 	}
865 
866 	if (qbuf->ops_id >= EXYNOS_DRM_OPS_MAX) {
867 		DRM_ERROR("invalid ops parameter.\n");
868 		return -EINVAL;
869 	}
870 
871 	DRM_DEBUG_KMS("prop_id[%d]ops_id[%s]buf_id[%d]buf_type[%d]\n",
872 		qbuf->prop_id, qbuf->ops_id ? "dst" : "src",
873 		qbuf->buf_id, qbuf->buf_type);
874 
875 	/* find command node */
876 	c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
877 		qbuf->prop_id);
878 	if (!c_node || c_node->filp != file) {
879 		DRM_ERROR("failed to get command node.\n");
880 		return -ENODEV;
881 	}
882 
883 	/* buffer control */
884 	switch (qbuf->buf_type) {
885 	case IPP_BUF_ENQUEUE:
886 		/* get memory node */
887 		m_node = ipp_get_mem_node(drm_dev, c_node, qbuf);
888 		if (IS_ERR(m_node)) {
889 			DRM_ERROR("failed to get m_node.\n");
890 			return PTR_ERR(m_node);
891 		}
892 
893 		/*
894 		 * first step get event for destination buffer.
895 		 * and second step when M2M case run with destination buffer
896 		 * if needed.
897 		 */
898 		if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) {
899 			/* get event for destination buffer */
900 			ret = ipp_get_event(drm_dev, c_node, qbuf);
901 			if (ret) {
902 				DRM_ERROR("failed to get event.\n");
903 				goto err_clean_node;
904 			}
905 
906 			/*
907 			 * M2M case run play control for streaming feature.
908 			 * other case set address and waiting.
909 			 */
910 			ret = ipp_queue_buf_with_run(dev, c_node, m_node, qbuf);
911 			if (ret) {
912 				DRM_ERROR("failed to run command.\n");
913 				goto err_clean_node;
914 			}
915 		}
916 		break;
917 	case IPP_BUF_DEQUEUE:
918 		mutex_lock(&c_node->lock);
919 
920 		/* put event for destination buffer */
921 		if (qbuf->ops_id == EXYNOS_DRM_OPS_DST)
922 			ipp_put_event(c_node, qbuf);
923 
924 		ipp_clean_queue_buf(drm_dev, c_node, qbuf);
925 
926 		mutex_unlock(&c_node->lock);
927 		break;
928 	default:
929 		DRM_ERROR("invalid buffer control.\n");
930 		return -EINVAL;
931 	}
932 
933 	return 0;
934 
935 err_clean_node:
936 	DRM_ERROR("clean memory nodes.\n");
937 
938 	ipp_clean_queue_buf(drm_dev, c_node, qbuf);
939 	return ret;
940 }
941 
942 static bool exynos_drm_ipp_check_valid(struct device *dev,
943 		enum drm_exynos_ipp_ctrl ctrl, enum drm_exynos_ipp_state state)
944 {
945 	if (ctrl != IPP_CTRL_PLAY) {
946 		if (pm_runtime_suspended(dev)) {
947 			DRM_ERROR("pm:runtime_suspended.\n");
948 			goto err_status;
949 		}
950 	}
951 
952 	switch (ctrl) {
953 	case IPP_CTRL_PLAY:
954 		if (state != IPP_STATE_IDLE)
955 			goto err_status;
956 		break;
957 	case IPP_CTRL_STOP:
958 		if (state == IPP_STATE_STOP)
959 			goto err_status;
960 		break;
961 	case IPP_CTRL_PAUSE:
962 		if (state != IPP_STATE_START)
963 			goto err_status;
964 		break;
965 	case IPP_CTRL_RESUME:
966 		if (state != IPP_STATE_STOP)
967 			goto err_status;
968 		break;
969 	default:
970 		DRM_ERROR("invalid state.\n");
971 		goto err_status;
972 	}
973 
974 	return true;
975 
976 err_status:
977 	DRM_ERROR("invalid status:ctrl[%d]state[%d]\n", ctrl, state);
978 	return false;
979 }
980 
981 int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
982 		struct drm_file *file)
983 {
984 	struct drm_exynos_file_private *file_priv = file->driver_priv;
985 	struct exynos_drm_ippdrv *ippdrv = NULL;
986 	struct device *dev = file_priv->ipp_dev;
987 	struct ipp_context *ctx = get_ipp_context(dev);
988 	struct drm_exynos_ipp_cmd_ctrl *cmd_ctrl = data;
989 	struct drm_exynos_ipp_cmd_work *cmd_work;
990 	struct drm_exynos_ipp_cmd_node *c_node;
991 
992 	if (!ctx) {
993 		DRM_ERROR("invalid context.\n");
994 		return -EINVAL;
995 	}
996 
997 	if (!cmd_ctrl) {
998 		DRM_ERROR("invalid control parameter.\n");
999 		return -EINVAL;
1000 	}
1001 
1002 	DRM_DEBUG_KMS("ctrl[%d]prop_id[%d]\n",
1003 		cmd_ctrl->ctrl, cmd_ctrl->prop_id);
1004 
1005 	ippdrv = ipp_find_drv_by_handle(cmd_ctrl->prop_id);
1006 	if (IS_ERR(ippdrv)) {
1007 		DRM_ERROR("failed to get ipp driver.\n");
1008 		return PTR_ERR(ippdrv);
1009 	}
1010 
1011 	c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
1012 		cmd_ctrl->prop_id);
1013 	if (!c_node || c_node->filp != file) {
1014 		DRM_ERROR("invalid command node list.\n");
1015 		return -ENODEV;
1016 	}
1017 
1018 	if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl,
1019 	    c_node->state)) {
1020 		DRM_ERROR("invalid state.\n");
1021 		return -EINVAL;
1022 	}
1023 
1024 	switch (cmd_ctrl->ctrl) {
1025 	case IPP_CTRL_PLAY:
1026 		if (pm_runtime_suspended(ippdrv->dev))
1027 			pm_runtime_get_sync(ippdrv->dev);
1028 
1029 		c_node->state = IPP_STATE_START;
1030 
1031 		cmd_work = c_node->start_work;
1032 		cmd_work->ctrl = cmd_ctrl->ctrl;
1033 		ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1034 		break;
1035 	case IPP_CTRL_STOP:
1036 		cmd_work = c_node->stop_work;
1037 		cmd_work->ctrl = cmd_ctrl->ctrl;
1038 		ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1039 
1040 		if (!wait_for_completion_timeout(&c_node->stop_complete,
1041 		    msecs_to_jiffies(300))) {
1042 			DRM_ERROR("timeout stop:prop_id[%d]\n",
1043 				c_node->property.prop_id);
1044 		}
1045 
1046 		c_node->state = IPP_STATE_STOP;
1047 		ippdrv->dedicated = false;
1048 		mutex_lock(&ippdrv->cmd_lock);
1049 		ipp_clean_cmd_node(ctx, c_node);
1050 
1051 		if (list_empty(&ippdrv->cmd_list))
1052 			pm_runtime_put_sync(ippdrv->dev);
1053 		mutex_unlock(&ippdrv->cmd_lock);
1054 		break;
1055 	case IPP_CTRL_PAUSE:
1056 		cmd_work = c_node->stop_work;
1057 		cmd_work->ctrl = cmd_ctrl->ctrl;
1058 		ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1059 
1060 		if (!wait_for_completion_timeout(&c_node->stop_complete,
1061 		    msecs_to_jiffies(200))) {
1062 			DRM_ERROR("timeout stop:prop_id[%d]\n",
1063 				c_node->property.prop_id);
1064 		}
1065 
1066 		c_node->state = IPP_STATE_STOP;
1067 		break;
1068 	case IPP_CTRL_RESUME:
1069 		c_node->state = IPP_STATE_START;
1070 		cmd_work = c_node->start_work;
1071 		cmd_work->ctrl = cmd_ctrl->ctrl;
1072 		ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1073 		break;
1074 	default:
1075 		DRM_ERROR("could not support this state currently.\n");
1076 		return -EINVAL;
1077 	}
1078 
1079 	DRM_DEBUG_KMS("done ctrl[%d]prop_id[%d]\n",
1080 		cmd_ctrl->ctrl, cmd_ctrl->prop_id);
1081 
1082 	return 0;
1083 }
1084 
1085 int exynos_drm_ippnb_register(struct notifier_block *nb)
1086 {
1087 	return blocking_notifier_chain_register(
1088 		&exynos_drm_ippnb_list, nb);
1089 }
1090 
1091 int exynos_drm_ippnb_unregister(struct notifier_block *nb)
1092 {
1093 	return blocking_notifier_chain_unregister(
1094 		&exynos_drm_ippnb_list, nb);
1095 }
1096 
1097 int exynos_drm_ippnb_send_event(unsigned long val, void *v)
1098 {
1099 	return blocking_notifier_call_chain(
1100 		&exynos_drm_ippnb_list, val, v);
1101 }
1102 
1103 static int ipp_set_property(struct exynos_drm_ippdrv *ippdrv,
1104 		struct drm_exynos_ipp_property *property)
1105 {
1106 	struct exynos_drm_ipp_ops *ops = NULL;
1107 	bool swap = false;
1108 	int ret, i;
1109 
1110 	if (!property) {
1111 		DRM_ERROR("invalid property parameter.\n");
1112 		return -EINVAL;
1113 	}
1114 
1115 	DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
1116 
1117 	/* reset h/w block */
1118 	if (ippdrv->reset &&
1119 	    ippdrv->reset(ippdrv->dev)) {
1120 		return -EINVAL;
1121 	}
1122 
1123 	/* set source,destination operations */
1124 	for_each_ipp_ops(i) {
1125 		struct drm_exynos_ipp_config *config =
1126 			&property->config[i];
1127 
1128 		ops = ippdrv->ops[i];
1129 		if (!ops || !config) {
1130 			DRM_ERROR("not support ops and config.\n");
1131 			return -EINVAL;
1132 		}
1133 
1134 		/* set format */
1135 		if (ops->set_fmt) {
1136 			ret = ops->set_fmt(ippdrv->dev, config->fmt);
1137 			if (ret)
1138 				return ret;
1139 		}
1140 
1141 		/* set transform for rotation, flip */
1142 		if (ops->set_transf) {
1143 			ret = ops->set_transf(ippdrv->dev, config->degree,
1144 				config->flip, &swap);
1145 			if (ret)
1146 				return ret;
1147 		}
1148 
1149 		/* set size */
1150 		if (ops->set_size) {
1151 			ret = ops->set_size(ippdrv->dev, swap, &config->pos,
1152 				&config->sz);
1153 			if (ret)
1154 				return ret;
1155 		}
1156 	}
1157 
1158 	return 0;
1159 }
1160 
1161 static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
1162 		struct drm_exynos_ipp_cmd_node *c_node)
1163 {
1164 	struct drm_exynos_ipp_mem_node *m_node;
1165 	struct drm_exynos_ipp_property *property = &c_node->property;
1166 	struct list_head *head;
1167 	int ret, i;
1168 
1169 	DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
1170 
1171 	/* store command info in ippdrv */
1172 	ippdrv->c_node = c_node;
1173 
1174 	mutex_lock(&c_node->mem_lock);
1175 	if (!ipp_check_mem_list(c_node)) {
1176 		DRM_DEBUG_KMS("empty memory.\n");
1177 		ret = -ENOMEM;
1178 		goto err_unlock;
1179 	}
1180 
1181 	/* set current property in ippdrv */
1182 	ret = ipp_set_property(ippdrv, property);
1183 	if (ret) {
1184 		DRM_ERROR("failed to set property.\n");
1185 		ippdrv->c_node = NULL;
1186 		goto err_unlock;
1187 	}
1188 
1189 	/* check command */
1190 	switch (property->cmd) {
1191 	case IPP_CMD_M2M:
1192 		for_each_ipp_ops(i) {
1193 			/* source/destination memory list */
1194 			head = &c_node->mem_list[i];
1195 
1196 			m_node = list_first_entry(head,
1197 				struct drm_exynos_ipp_mem_node, list);
1198 
1199 			DRM_DEBUG_KMS("m_node[0x%x]\n", (int)m_node);
1200 
1201 			ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1202 			if (ret) {
1203 				DRM_ERROR("failed to set m node.\n");
1204 				goto err_unlock;
1205 			}
1206 		}
1207 		break;
1208 	case IPP_CMD_WB:
1209 		/* destination memory list */
1210 		head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
1211 
1212 		list_for_each_entry(m_node, head, list) {
1213 			ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1214 			if (ret) {
1215 				DRM_ERROR("failed to set m node.\n");
1216 				goto err_unlock;
1217 			}
1218 		}
1219 		break;
1220 	case IPP_CMD_OUTPUT:
1221 		/* source memory list */
1222 		head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1223 
1224 		list_for_each_entry(m_node, head, list) {
1225 			ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1226 			if (ret) {
1227 				DRM_ERROR("failed to set m node.\n");
1228 				goto err_unlock;
1229 			}
1230 		}
1231 		break;
1232 	default:
1233 		DRM_ERROR("invalid operations.\n");
1234 		ret = -EINVAL;
1235 		goto err_unlock;
1236 	}
1237 	mutex_unlock(&c_node->mem_lock);
1238 
1239 	DRM_DEBUG_KMS("cmd[%d]\n", property->cmd);
1240 
1241 	/* start operations */
1242 	if (ippdrv->start) {
1243 		ret = ippdrv->start(ippdrv->dev, property->cmd);
1244 		if (ret) {
1245 			DRM_ERROR("failed to start ops.\n");
1246 			ippdrv->c_node = NULL;
1247 			return ret;
1248 		}
1249 	}
1250 
1251 	return 0;
1252 
1253 err_unlock:
1254 	mutex_unlock(&c_node->mem_lock);
1255 	ippdrv->c_node = NULL;
1256 	return ret;
1257 }
1258 
1259 static int ipp_stop_property(struct drm_device *drm_dev,
1260 		struct exynos_drm_ippdrv *ippdrv,
1261 		struct drm_exynos_ipp_cmd_node *c_node)
1262 {
1263 	struct drm_exynos_ipp_property *property = &c_node->property;
1264 	int i;
1265 
1266 	DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
1267 
1268 	/* stop operations */
1269 	if (ippdrv->stop)
1270 		ippdrv->stop(ippdrv->dev, property->cmd);
1271 
1272 	/* check command */
1273 	switch (property->cmd) {
1274 	case IPP_CMD_M2M:
1275 		for_each_ipp_ops(i)
1276 			ipp_clean_mem_nodes(drm_dev, c_node, i);
1277 		break;
1278 	case IPP_CMD_WB:
1279 		ipp_clean_mem_nodes(drm_dev, c_node, EXYNOS_DRM_OPS_DST);
1280 		break;
1281 	case IPP_CMD_OUTPUT:
1282 		ipp_clean_mem_nodes(drm_dev, c_node, EXYNOS_DRM_OPS_SRC);
1283 		break;
1284 	default:
1285 		DRM_ERROR("invalid operations.\n");
1286 		return -EINVAL;
1287 	}
1288 
1289 	return 0;
1290 }
1291 
1292 void ipp_sched_cmd(struct work_struct *work)
1293 {
1294 	struct drm_exynos_ipp_cmd_work *cmd_work =
1295 		container_of(work, struct drm_exynos_ipp_cmd_work, work);
1296 	struct exynos_drm_ippdrv *ippdrv;
1297 	struct drm_exynos_ipp_cmd_node *c_node;
1298 	struct drm_exynos_ipp_property *property;
1299 	int ret;
1300 
1301 	ippdrv = cmd_work->ippdrv;
1302 	if (!ippdrv) {
1303 		DRM_ERROR("invalid ippdrv list.\n");
1304 		return;
1305 	}
1306 
1307 	c_node = cmd_work->c_node;
1308 	if (!c_node) {
1309 		DRM_ERROR("invalid command node list.\n");
1310 		return;
1311 	}
1312 
1313 	mutex_lock(&c_node->lock);
1314 
1315 	property = &c_node->property;
1316 
1317 	switch (cmd_work->ctrl) {
1318 	case IPP_CTRL_PLAY:
1319 	case IPP_CTRL_RESUME:
1320 		ret = ipp_start_property(ippdrv, c_node);
1321 		if (ret) {
1322 			DRM_ERROR("failed to start property:prop_id[%d]\n",
1323 				c_node->property.prop_id);
1324 			goto err_unlock;
1325 		}
1326 
1327 		/*
1328 		 * M2M case supports wait_completion of transfer.
1329 		 * because M2M case supports single unit operation
1330 		 * with multiple queue.
1331 		 * M2M need to wait completion of data transfer.
1332 		 */
1333 		if (ipp_is_m2m_cmd(property->cmd)) {
1334 			if (!wait_for_completion_timeout
1335 			    (&c_node->start_complete, msecs_to_jiffies(200))) {
1336 				DRM_ERROR("timeout event:prop_id[%d]\n",
1337 					c_node->property.prop_id);
1338 				goto err_unlock;
1339 			}
1340 		}
1341 		break;
1342 	case IPP_CTRL_STOP:
1343 	case IPP_CTRL_PAUSE:
1344 		ret = ipp_stop_property(ippdrv->drm_dev, ippdrv,
1345 			c_node);
1346 		if (ret) {
1347 			DRM_ERROR("failed to stop property.\n");
1348 			goto err_unlock;
1349 		}
1350 
1351 		complete(&c_node->stop_complete);
1352 		break;
1353 	default:
1354 		DRM_ERROR("unknown control type\n");
1355 		break;
1356 	}
1357 
1358 	DRM_DEBUG_KMS("ctrl[%d] done.\n", cmd_work->ctrl);
1359 
1360 err_unlock:
1361 	mutex_unlock(&c_node->lock);
1362 }
1363 
1364 static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
1365 		struct drm_exynos_ipp_cmd_node *c_node, int *buf_id)
1366 {
1367 	struct drm_device *drm_dev = ippdrv->drm_dev;
1368 	struct drm_exynos_ipp_property *property = &c_node->property;
1369 	struct drm_exynos_ipp_mem_node *m_node;
1370 	struct drm_exynos_ipp_queue_buf qbuf;
1371 	struct drm_exynos_ipp_send_event *e;
1372 	struct list_head *head;
1373 	struct timeval now;
1374 	unsigned long flags;
1375 	u32 tbuf_id[EXYNOS_DRM_OPS_MAX] = {0, };
1376 	int ret, i;
1377 
1378 	for_each_ipp_ops(i)
1379 		DRM_DEBUG_KMS("%s buf_id[%d]\n", i ? "dst" : "src", buf_id[i]);
1380 
1381 	if (!drm_dev) {
1382 		DRM_ERROR("failed to get drm_dev.\n");
1383 		return -EINVAL;
1384 	}
1385 
1386 	if (!property) {
1387 		DRM_ERROR("failed to get property.\n");
1388 		return -EINVAL;
1389 	}
1390 
1391 	mutex_lock(&c_node->event_lock);
1392 	if (list_empty(&c_node->event_list)) {
1393 		DRM_DEBUG_KMS("event list is empty.\n");
1394 		ret = 0;
1395 		goto err_event_unlock;
1396 	}
1397 
1398 	mutex_lock(&c_node->mem_lock);
1399 	if (!ipp_check_mem_list(c_node)) {
1400 		DRM_DEBUG_KMS("empty memory.\n");
1401 		ret = 0;
1402 		goto err_mem_unlock;
1403 	}
1404 
1405 	/* check command */
1406 	switch (property->cmd) {
1407 	case IPP_CMD_M2M:
1408 		for_each_ipp_ops(i) {
1409 			/* source/destination memory list */
1410 			head = &c_node->mem_list[i];
1411 
1412 			m_node = list_first_entry(head,
1413 				struct drm_exynos_ipp_mem_node, list);
1414 
1415 			tbuf_id[i] = m_node->buf_id;
1416 			DRM_DEBUG_KMS("%s buf_id[%d]\n",
1417 				i ? "dst" : "src", tbuf_id[i]);
1418 
1419 			ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1420 			if (ret)
1421 				DRM_ERROR("failed to put m_node.\n");
1422 		}
1423 		break;
1424 	case IPP_CMD_WB:
1425 		/* clear buf for finding */
1426 		memset(&qbuf, 0x0, sizeof(qbuf));
1427 		qbuf.ops_id = EXYNOS_DRM_OPS_DST;
1428 		qbuf.buf_id = buf_id[EXYNOS_DRM_OPS_DST];
1429 
1430 		/* get memory node entry */
1431 		m_node = ipp_find_mem_node(c_node, &qbuf);
1432 		if (!m_node) {
1433 			DRM_ERROR("empty memory node.\n");
1434 			ret = -ENOMEM;
1435 			goto err_mem_unlock;
1436 		}
1437 
1438 		tbuf_id[EXYNOS_DRM_OPS_DST] = m_node->buf_id;
1439 
1440 		ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1441 		if (ret)
1442 			DRM_ERROR("failed to put m_node.\n");
1443 		break;
1444 	case IPP_CMD_OUTPUT:
1445 		/* source memory list */
1446 		head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1447 
1448 		m_node = list_first_entry(head,
1449 			struct drm_exynos_ipp_mem_node, list);
1450 
1451 		tbuf_id[EXYNOS_DRM_OPS_SRC] = m_node->buf_id;
1452 
1453 		ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1454 		if (ret)
1455 			DRM_ERROR("failed to put m_node.\n");
1456 		break;
1457 	default:
1458 		DRM_ERROR("invalid operations.\n");
1459 		ret = -EINVAL;
1460 		goto err_mem_unlock;
1461 	}
1462 	mutex_unlock(&c_node->mem_lock);
1463 
1464 	if (tbuf_id[EXYNOS_DRM_OPS_DST] != buf_id[EXYNOS_DRM_OPS_DST])
1465 		DRM_ERROR("failed to match buf_id[%d %d]prop_id[%d]\n",
1466 			tbuf_id[1], buf_id[1], property->prop_id);
1467 
1468 	/*
1469 	 * command node have event list of destination buffer
1470 	 * If destination buffer enqueue to mem list,
1471 	 * then we make event and link to event list tail.
1472 	 * so, we get first event for first enqueued buffer.
1473 	 */
1474 	e = list_first_entry(&c_node->event_list,
1475 		struct drm_exynos_ipp_send_event, base.link);
1476 
1477 	do_gettimeofday(&now);
1478 	DRM_DEBUG_KMS("tv_sec[%ld]tv_usec[%ld]\n", now.tv_sec, now.tv_usec);
1479 	e->event.tv_sec = now.tv_sec;
1480 	e->event.tv_usec = now.tv_usec;
1481 	e->event.prop_id = property->prop_id;
1482 
1483 	/* set buffer id about source destination */
1484 	for_each_ipp_ops(i)
1485 		e->event.buf_id[i] = tbuf_id[i];
1486 
1487 	spin_lock_irqsave(&drm_dev->event_lock, flags);
1488 	list_move_tail(&e->base.link, &e->base.file_priv->event_list);
1489 	wake_up_interruptible(&e->base.file_priv->event_wait);
1490 	spin_unlock_irqrestore(&drm_dev->event_lock, flags);
1491 	mutex_unlock(&c_node->event_lock);
1492 
1493 	DRM_DEBUG_KMS("done cmd[%d]prop_id[%d]buf_id[%d]\n",
1494 		property->cmd, property->prop_id, tbuf_id[EXYNOS_DRM_OPS_DST]);
1495 
1496 	return 0;
1497 
1498 err_mem_unlock:
1499 	mutex_unlock(&c_node->mem_lock);
1500 err_event_unlock:
1501 	mutex_unlock(&c_node->event_lock);
1502 	return ret;
1503 }
1504 
1505 void ipp_sched_event(struct work_struct *work)
1506 {
1507 	struct drm_exynos_ipp_event_work *event_work =
1508 		container_of(work, struct drm_exynos_ipp_event_work, work);
1509 	struct exynos_drm_ippdrv *ippdrv;
1510 	struct drm_exynos_ipp_cmd_node *c_node;
1511 	int ret;
1512 
1513 	if (!event_work) {
1514 		DRM_ERROR("failed to get event_work.\n");
1515 		return;
1516 	}
1517 
1518 	DRM_DEBUG_KMS("buf_id[%d]\n", event_work->buf_id[EXYNOS_DRM_OPS_DST]);
1519 
1520 	ippdrv = event_work->ippdrv;
1521 	if (!ippdrv) {
1522 		DRM_ERROR("failed to get ipp driver.\n");
1523 		return;
1524 	}
1525 
1526 	c_node = ippdrv->c_node;
1527 	if (!c_node) {
1528 		DRM_ERROR("failed to get command node.\n");
1529 		return;
1530 	}
1531 
1532 	/*
1533 	 * IPP supports command thread, event thread synchronization.
1534 	 * If IPP close immediately from user land, then IPP make
1535 	 * synchronization with command thread, so make complete event.
1536 	 * or going out operations.
1537 	 */
1538 	if (c_node->state != IPP_STATE_START) {
1539 		DRM_DEBUG_KMS("bypass state[%d]prop_id[%d]\n",
1540 			c_node->state, c_node->property.prop_id);
1541 		goto err_completion;
1542 	}
1543 
1544 	ret = ipp_send_event(ippdrv, c_node, event_work->buf_id);
1545 	if (ret) {
1546 		DRM_ERROR("failed to send event.\n");
1547 		goto err_completion;
1548 	}
1549 
1550 err_completion:
1551 	if (ipp_is_m2m_cmd(c_node->property.cmd))
1552 		complete(&c_node->start_complete);
1553 }
1554 
1555 static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
1556 {
1557 	struct ipp_context *ctx = get_ipp_context(dev);
1558 	struct exynos_drm_ippdrv *ippdrv;
1559 	int ret, count = 0;
1560 
1561 	/* get ipp driver entry */
1562 	list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1563 		ippdrv->drm_dev = drm_dev;
1564 
1565 		ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv);
1566 		if (ret < 0) {
1567 			DRM_ERROR("failed to create id.\n");
1568 			goto err;
1569 		}
1570 		ippdrv->prop_list.ipp_id = ret;
1571 
1572 		DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]ipp_id[%d]\n",
1573 			count++, (int)ippdrv, ret);
1574 
1575 		/* store parent device for node */
1576 		ippdrv->parent_dev = dev;
1577 
1578 		/* store event work queue and handler */
1579 		ippdrv->event_workq = ctx->event_workq;
1580 		ippdrv->sched_event = ipp_sched_event;
1581 		INIT_LIST_HEAD(&ippdrv->cmd_list);
1582 		mutex_init(&ippdrv->cmd_lock);
1583 
1584 		if (is_drm_iommu_supported(drm_dev)) {
1585 			ret = drm_iommu_attach_device(drm_dev, ippdrv->dev);
1586 			if (ret) {
1587 				DRM_ERROR("failed to activate iommu\n");
1588 				goto err;
1589 			}
1590 		}
1591 	}
1592 
1593 	return 0;
1594 
1595 err:
1596 	/* get ipp driver entry */
1597 	list_for_each_entry_continue_reverse(ippdrv, &exynos_drm_ippdrv_list,
1598 						drv_list) {
1599 		if (is_drm_iommu_supported(drm_dev))
1600 			drm_iommu_detach_device(drm_dev, ippdrv->dev);
1601 
1602 		ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock,
1603 				ippdrv->prop_list.ipp_id);
1604 	}
1605 
1606 	return ret;
1607 }
1608 
1609 static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
1610 {
1611 	struct exynos_drm_ippdrv *ippdrv, *t;
1612 	struct ipp_context *ctx = get_ipp_context(dev);
1613 
1614 	/* get ipp driver entry */
1615 	list_for_each_entry_safe(ippdrv, t, &exynos_drm_ippdrv_list, drv_list) {
1616 		if (is_drm_iommu_supported(drm_dev))
1617 			drm_iommu_detach_device(drm_dev, ippdrv->dev);
1618 
1619 		ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock,
1620 				ippdrv->prop_list.ipp_id);
1621 
1622 		ippdrv->drm_dev = NULL;
1623 		exynos_drm_ippdrv_unregister(ippdrv);
1624 	}
1625 }
1626 
1627 static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
1628 		struct drm_file *file)
1629 {
1630 	struct drm_exynos_file_private *file_priv = file->driver_priv;
1631 
1632 	file_priv->ipp_dev = dev;
1633 
1634 	DRM_DEBUG_KMS("done priv[0x%x]\n", (int)dev);
1635 
1636 	return 0;
1637 }
1638 
1639 static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
1640 		struct drm_file *file)
1641 {
1642 	struct exynos_drm_ippdrv *ippdrv = NULL;
1643 	struct ipp_context *ctx = get_ipp_context(dev);
1644 	struct drm_exynos_ipp_cmd_node *c_node, *tc_node;
1645 	int count = 0;
1646 
1647 	list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1648 		mutex_lock(&ippdrv->cmd_lock);
1649 		list_for_each_entry_safe(c_node, tc_node,
1650 			&ippdrv->cmd_list, list) {
1651 			DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n",
1652 				count++, (int)ippdrv);
1653 
1654 			if (c_node->filp == file) {
1655 				/*
1656 				 * userland goto unnormal state. process killed.
1657 				 * and close the file.
1658 				 * so, IPP didn't called stop cmd ctrl.
1659 				 * so, we are make stop operation in this state.
1660 				 */
1661 				if (c_node->state == IPP_STATE_START) {
1662 					ipp_stop_property(drm_dev, ippdrv,
1663 						c_node);
1664 					c_node->state = IPP_STATE_STOP;
1665 				}
1666 
1667 				ippdrv->dedicated = false;
1668 				ipp_clean_cmd_node(ctx, c_node);
1669 				if (list_empty(&ippdrv->cmd_list))
1670 					pm_runtime_put_sync(ippdrv->dev);
1671 			}
1672 		}
1673 		mutex_unlock(&ippdrv->cmd_lock);
1674 	}
1675 
1676 	return;
1677 }
1678 
1679 static int ipp_probe(struct platform_device *pdev)
1680 {
1681 	struct device *dev = &pdev->dev;
1682 	struct ipp_context *ctx;
1683 	struct exynos_drm_subdrv *subdrv;
1684 	int ret;
1685 
1686 	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
1687 	if (!ctx)
1688 		return -ENOMEM;
1689 
1690 	mutex_init(&ctx->ipp_lock);
1691 	mutex_init(&ctx->prop_lock);
1692 
1693 	idr_init(&ctx->ipp_idr);
1694 	idr_init(&ctx->prop_idr);
1695 
1696 	/*
1697 	 * create single thread for ipp event
1698 	 * IPP supports event thread for IPP drivers.
1699 	 * IPP driver send event_work to this thread.
1700 	 * and IPP event thread send event to user process.
1701 	 */
1702 	ctx->event_workq = create_singlethread_workqueue("ipp_event");
1703 	if (!ctx->event_workq) {
1704 		dev_err(dev, "failed to create event workqueue\n");
1705 		return -EINVAL;
1706 	}
1707 
1708 	/*
1709 	 * create single thread for ipp command
1710 	 * IPP supports command thread for user process.
1711 	 * user process make command node using set property ioctl.
1712 	 * and make start_work and send this work to command thread.
1713 	 * and then this command thread start property.
1714 	 */
1715 	ctx->cmd_workq = create_singlethread_workqueue("ipp_cmd");
1716 	if (!ctx->cmd_workq) {
1717 		dev_err(dev, "failed to create cmd workqueue\n");
1718 		ret = -EINVAL;
1719 		goto err_event_workq;
1720 	}
1721 
1722 	/* set sub driver informations */
1723 	subdrv = &ctx->subdrv;
1724 	subdrv->dev = dev;
1725 	subdrv->probe = ipp_subdrv_probe;
1726 	subdrv->remove = ipp_subdrv_remove;
1727 	subdrv->open = ipp_subdrv_open;
1728 	subdrv->close = ipp_subdrv_close;
1729 
1730 	platform_set_drvdata(pdev, ctx);
1731 
1732 	ret = exynos_drm_subdrv_register(subdrv);
1733 	if (ret < 0) {
1734 		DRM_ERROR("failed to register drm ipp device.\n");
1735 		goto err_cmd_workq;
1736 	}
1737 
1738 	dev_info(dev, "drm ipp registered successfully.\n");
1739 
1740 	return 0;
1741 
1742 err_cmd_workq:
1743 	destroy_workqueue(ctx->cmd_workq);
1744 err_event_workq:
1745 	destroy_workqueue(ctx->event_workq);
1746 	return ret;
1747 }
1748 
1749 static int ipp_remove(struct platform_device *pdev)
1750 {
1751 	struct ipp_context *ctx = platform_get_drvdata(pdev);
1752 
1753 	/* unregister sub driver */
1754 	exynos_drm_subdrv_unregister(&ctx->subdrv);
1755 
1756 	/* remove,destroy ipp idr */
1757 	idr_destroy(&ctx->ipp_idr);
1758 	idr_destroy(&ctx->prop_idr);
1759 
1760 	mutex_destroy(&ctx->ipp_lock);
1761 	mutex_destroy(&ctx->prop_lock);
1762 
1763 	/* destroy command, event work queue */
1764 	destroy_workqueue(ctx->cmd_workq);
1765 	destroy_workqueue(ctx->event_workq);
1766 
1767 	return 0;
1768 }
1769 
1770 struct platform_driver ipp_driver = {
1771 	.probe		= ipp_probe,
1772 	.remove		= ipp_remove,
1773 	.driver		= {
1774 		.name	= "exynos-drm-ipp",
1775 		.owner	= THIS_MODULE,
1776 	},
1777 };
1778 
1779