1 /*
2  * Copyright (C) 2012 Samsung Electronics Co.Ltd
3  * Authors:
4  *	Eunchul Kim <chulspro.kim@samsung.com>
5  *	Jinyoung Jeon <jy0.jeon@samsung.com>
6  *	Sangmin Lee <lsmin.lee@samsung.com>
7  *
8  * This program is free software; you can redistribute  it and/or modify it
9  * under  the terms of  the GNU General  Public License as published by the
10  * Free Software Foundation;  either version 2 of the  License, or (at your
11  * option) any later version.
12  *
13  */
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/platform_device.h>
17 #include <linux/types.h>
18 #include <linux/clk.h>
19 #include <linux/pm_runtime.h>
20 #include <plat/map-base.h>
21 
22 #include <drm/drmP.h>
23 #include <drm/exynos_drm.h>
24 #include "exynos_drm_drv.h"
25 #include "exynos_drm_gem.h"
26 #include "exynos_drm_ipp.h"
27 #include "exynos_drm_iommu.h"
28 
29 /*
30  * IPP stands for Image Post Processing and
31  * supports image scaler/rotator and input/output DMA operations.
32  * using FIMC, GSC, Rotator, so on.
33  * IPP is integration device driver of same attribute h/w
34  */
35 
36 /*
37  * TODO
38  * 1. expand command control id.
39  * 2. integrate	property and config.
40  * 3. removed send_event id check routine.
41  * 4. compare send_event id if needed.
42  * 5. free subdrv_remove notifier callback list if needed.
43  * 6. need to check subdrv_open about multi-open.
44  * 7. need to power_on implement power and sysmmu ctrl.
45  */
46 
47 #define get_ipp_context(dev)	platform_get_drvdata(to_platform_device(dev))
48 #define ipp_is_m2m_cmd(c)	(c == IPP_CMD_M2M)
49 
50 /* platform device pointer for ipp device. */
51 static struct platform_device *exynos_drm_ipp_pdev;
52 
53 /*
54  * A structure of event.
55  *
56  * @base: base of event.
57  * @event: ipp event.
58  */
59 struct drm_exynos_ipp_send_event {
60 	struct drm_pending_event	base;
61 	struct drm_exynos_ipp_event	event;
62 };
63 
64 /*
65  * A structure of memory node.
66  *
67  * @list: list head to memory queue information.
68  * @ops_id: id of operations.
69  * @prop_id: id of property.
70  * @buf_id: id of buffer.
71  * @buf_info: gem objects and dma address, size.
72  * @filp: a pointer to drm_file.
73  */
74 struct drm_exynos_ipp_mem_node {
75 	struct list_head	list;
76 	enum drm_exynos_ops_id	ops_id;
77 	u32	prop_id;
78 	u32	buf_id;
79 	struct drm_exynos_ipp_buf_info	buf_info;
80 	struct drm_file		*filp;
81 };
82 
83 /*
84  * A structure of ipp context.
85  *
86  * @subdrv: prepare initialization using subdrv.
87  * @ipp_lock: lock for synchronization of access to ipp_idr.
88  * @prop_lock: lock for synchronization of access to prop_idr.
89  * @ipp_idr: ipp driver idr.
90  * @prop_idr: property idr.
91  * @event_workq: event work queue.
92  * @cmd_workq: command work queue.
93  */
94 struct ipp_context {
95 	struct exynos_drm_subdrv	subdrv;
96 	struct mutex	ipp_lock;
97 	struct mutex	prop_lock;
98 	struct idr	ipp_idr;
99 	struct idr	prop_idr;
100 	struct workqueue_struct	*event_workq;
101 	struct workqueue_struct	*cmd_workq;
102 };
103 
104 static LIST_HEAD(exynos_drm_ippdrv_list);
105 static DEFINE_MUTEX(exynos_drm_ippdrv_lock);
106 static BLOCKING_NOTIFIER_HEAD(exynos_drm_ippnb_list);
107 
108 int exynos_platform_device_ipp_register(void)
109 {
110 	struct platform_device *pdev;
111 
112 	if (exynos_drm_ipp_pdev)
113 		return -EEXIST;
114 
115 	pdev = platform_device_register_simple("exynos-drm-ipp", -1, NULL, 0);
116 	if (IS_ERR(pdev))
117 		return PTR_ERR(pdev);
118 
119 	exynos_drm_ipp_pdev = pdev;
120 
121 	return 0;
122 }
123 
124 void exynos_platform_device_ipp_unregister(void)
125 {
126 	if (exynos_drm_ipp_pdev) {
127 		platform_device_unregister(exynos_drm_ipp_pdev);
128 		exynos_drm_ipp_pdev = NULL;
129 	}
130 }
131 
132 int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
133 {
134 	DRM_DEBUG_KMS("%s\n", __func__);
135 
136 	if (!ippdrv)
137 		return -EINVAL;
138 
139 	mutex_lock(&exynos_drm_ippdrv_lock);
140 	list_add_tail(&ippdrv->drv_list, &exynos_drm_ippdrv_list);
141 	mutex_unlock(&exynos_drm_ippdrv_lock);
142 
143 	return 0;
144 }
145 
146 int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
147 {
148 	DRM_DEBUG_KMS("%s\n", __func__);
149 
150 	if (!ippdrv)
151 		return -EINVAL;
152 
153 	mutex_lock(&exynos_drm_ippdrv_lock);
154 	list_del(&ippdrv->drv_list);
155 	mutex_unlock(&exynos_drm_ippdrv_lock);
156 
157 	return 0;
158 }
159 
160 static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj,
161 		u32 *idp)
162 {
163 	int ret;
164 
165 	DRM_DEBUG_KMS("%s\n", __func__);
166 
167 	/* do the allocation under our mutexlock */
168 	mutex_lock(lock);
169 	ret = idr_alloc(id_idr, obj, 1, 0, GFP_KERNEL);
170 	mutex_unlock(lock);
171 	if (ret < 0)
172 		return ret;
173 
174 	*idp = ret;
175 	return 0;
176 }
177 
178 static void *ipp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id)
179 {
180 	void *obj;
181 
182 	DRM_DEBUG_KMS("%s:id[%d]\n", __func__, id);
183 
184 	mutex_lock(lock);
185 
186 	/* find object using handle */
187 	obj = idr_find(id_idr, id);
188 	if (!obj) {
189 		DRM_ERROR("failed to find object.\n");
190 		mutex_unlock(lock);
191 		return ERR_PTR(-ENODEV);
192 	}
193 
194 	mutex_unlock(lock);
195 
196 	return obj;
197 }
198 
199 static inline bool ipp_check_dedicated(struct exynos_drm_ippdrv *ippdrv,
200 		enum drm_exynos_ipp_cmd	cmd)
201 {
202 	/*
203 	 * check dedicated flag and WB, OUTPUT operation with
204 	 * power on state.
205 	 */
206 	if (ippdrv->dedicated || (!ipp_is_m2m_cmd(cmd) &&
207 	    !pm_runtime_suspended(ippdrv->dev)))
208 		return true;
209 
210 	return false;
211 }
212 
213 static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx,
214 		struct drm_exynos_ipp_property *property)
215 {
216 	struct exynos_drm_ippdrv *ippdrv;
217 	u32 ipp_id = property->ipp_id;
218 
219 	DRM_DEBUG_KMS("%s:ipp_id[%d]\n", __func__, ipp_id);
220 
221 	if (ipp_id) {
222 		/* find ipp driver using idr */
223 		ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
224 			ipp_id);
225 		if (IS_ERR(ippdrv)) {
226 			DRM_ERROR("not found ipp%d driver.\n", ipp_id);
227 			return ippdrv;
228 		}
229 
230 		/*
231 		 * WB, OUTPUT opertion not supported multi-operation.
232 		 * so, make dedicated state at set property ioctl.
233 		 * when ipp driver finished operations, clear dedicated flags.
234 		 */
235 		if (ipp_check_dedicated(ippdrv, property->cmd)) {
236 			DRM_ERROR("already used choose device.\n");
237 			return ERR_PTR(-EBUSY);
238 		}
239 
240 		/*
241 		 * This is necessary to find correct device in ipp drivers.
242 		 * ipp drivers have different abilities,
243 		 * so need to check property.
244 		 */
245 		if (ippdrv->check_property &&
246 		    ippdrv->check_property(ippdrv->dev, property)) {
247 			DRM_ERROR("not support property.\n");
248 			return ERR_PTR(-EINVAL);
249 		}
250 
251 		return ippdrv;
252 	} else {
253 		/*
254 		 * This case is search all ipp driver for finding.
255 		 * user application don't set ipp_id in this case,
256 		 * so ipp subsystem search correct driver in driver list.
257 		 */
258 		list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
259 			if (ipp_check_dedicated(ippdrv, property->cmd)) {
260 				DRM_DEBUG_KMS("%s:used device.\n", __func__);
261 				continue;
262 			}
263 
264 			if (ippdrv->check_property &&
265 			    ippdrv->check_property(ippdrv->dev, property)) {
266 				DRM_DEBUG_KMS("%s:not support property.\n",
267 					__func__);
268 				continue;
269 			}
270 
271 			return ippdrv;
272 		}
273 
274 		DRM_ERROR("not support ipp driver operations.\n");
275 	}
276 
277 	return ERR_PTR(-ENODEV);
278 }
279 
280 static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id)
281 {
282 	struct exynos_drm_ippdrv *ippdrv;
283 	struct drm_exynos_ipp_cmd_node *c_node;
284 	int count = 0;
285 
286 	DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, prop_id);
287 
288 	if (list_empty(&exynos_drm_ippdrv_list)) {
289 		DRM_DEBUG_KMS("%s:ippdrv_list is empty.\n", __func__);
290 		return ERR_PTR(-ENODEV);
291 	}
292 
293 	/*
294 	 * This case is search ipp driver by prop_id handle.
295 	 * sometimes, ipp subsystem find driver by prop_id.
296 	 * e.g PAUSE state, queue buf, command contro.
297 	 */
298 	list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
299 		DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]\n", __func__,
300 			count++, (int)ippdrv);
301 
302 		if (!list_empty(&ippdrv->cmd_list)) {
303 			list_for_each_entry(c_node, &ippdrv->cmd_list, list)
304 				if (c_node->property.prop_id == prop_id)
305 					return ippdrv;
306 		}
307 	}
308 
309 	return ERR_PTR(-ENODEV);
310 }
311 
312 int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
313 		struct drm_file *file)
314 {
315 	struct drm_exynos_file_private *file_priv = file->driver_priv;
316 	struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
317 	struct device *dev = priv->dev;
318 	struct ipp_context *ctx = get_ipp_context(dev);
319 	struct drm_exynos_ipp_prop_list *prop_list = data;
320 	struct exynos_drm_ippdrv *ippdrv;
321 	int count = 0;
322 
323 	DRM_DEBUG_KMS("%s\n", __func__);
324 
325 	if (!ctx) {
326 		DRM_ERROR("invalid context.\n");
327 		return -EINVAL;
328 	}
329 
330 	if (!prop_list) {
331 		DRM_ERROR("invalid property parameter.\n");
332 		return -EINVAL;
333 	}
334 
335 	DRM_DEBUG_KMS("%s:ipp_id[%d]\n", __func__, prop_list->ipp_id);
336 
337 	if (!prop_list->ipp_id) {
338 		list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list)
339 			count++;
340 		/*
341 		 * Supports ippdrv list count for user application.
342 		 * First step user application getting ippdrv count.
343 		 * and second step getting ippdrv capability using ipp_id.
344 		 */
345 		prop_list->count = count;
346 	} else {
347 		/*
348 		 * Getting ippdrv capability by ipp_id.
349 		 * some deivce not supported wb, output interface.
350 		 * so, user application detect correct ipp driver
351 		 * using this ioctl.
352 		 */
353 		ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
354 						prop_list->ipp_id);
355 		if (!ippdrv) {
356 			DRM_ERROR("not found ipp%d driver.\n",
357 					prop_list->ipp_id);
358 			return -EINVAL;
359 		}
360 
361 		prop_list = ippdrv->prop_list;
362 	}
363 
364 	return 0;
365 }
366 
367 static void ipp_print_property(struct drm_exynos_ipp_property *property,
368 		int idx)
369 {
370 	struct drm_exynos_ipp_config *config = &property->config[idx];
371 	struct drm_exynos_pos *pos = &config->pos;
372 	struct drm_exynos_sz *sz = &config->sz;
373 
374 	DRM_DEBUG_KMS("%s:prop_id[%d]ops[%s]fmt[0x%x]\n",
375 		__func__, property->prop_id, idx ? "dst" : "src", config->fmt);
376 
377 	DRM_DEBUG_KMS("%s:pos[%d %d %d %d]sz[%d %d]f[%d]r[%d]\n",
378 		__func__, pos->x, pos->y, pos->w, pos->h,
379 		sz->hsize, sz->vsize, config->flip, config->degree);
380 }
381 
382 static int ipp_find_and_set_property(struct drm_exynos_ipp_property *property)
383 {
384 	struct exynos_drm_ippdrv *ippdrv;
385 	struct drm_exynos_ipp_cmd_node *c_node;
386 	u32 prop_id = property->prop_id;
387 
388 	DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, prop_id);
389 
390 	ippdrv = ipp_find_drv_by_handle(prop_id);
391 	if (IS_ERR(ippdrv)) {
392 		DRM_ERROR("failed to get ipp driver.\n");
393 		return -EINVAL;
394 	}
395 
396 	/*
397 	 * Find command node using command list in ippdrv.
398 	 * when we find this command no using prop_id.
399 	 * return property information set in this command node.
400 	 */
401 	list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
402 		if ((c_node->property.prop_id == prop_id) &&
403 		    (c_node->state == IPP_STATE_STOP)) {
404 			DRM_DEBUG_KMS("%s:found cmd[%d]ippdrv[0x%x]\n",
405 				__func__, property->cmd, (int)ippdrv);
406 
407 			c_node->property = *property;
408 			return 0;
409 		}
410 	}
411 
412 	DRM_ERROR("failed to search property.\n");
413 
414 	return -EINVAL;
415 }
416 
417 static struct drm_exynos_ipp_cmd_work *ipp_create_cmd_work(void)
418 {
419 	struct drm_exynos_ipp_cmd_work *cmd_work;
420 
421 	DRM_DEBUG_KMS("%s\n", __func__);
422 
423 	cmd_work = kzalloc(sizeof(*cmd_work), GFP_KERNEL);
424 	if (!cmd_work) {
425 		DRM_ERROR("failed to alloc cmd_work.\n");
426 		return ERR_PTR(-ENOMEM);
427 	}
428 
429 	INIT_WORK((struct work_struct *)cmd_work, ipp_sched_cmd);
430 
431 	return cmd_work;
432 }
433 
434 static struct drm_exynos_ipp_event_work *ipp_create_event_work(void)
435 {
436 	struct drm_exynos_ipp_event_work *event_work;
437 
438 	DRM_DEBUG_KMS("%s\n", __func__);
439 
440 	event_work = kzalloc(sizeof(*event_work), GFP_KERNEL);
441 	if (!event_work) {
442 		DRM_ERROR("failed to alloc event_work.\n");
443 		return ERR_PTR(-ENOMEM);
444 	}
445 
446 	INIT_WORK((struct work_struct *)event_work, ipp_sched_event);
447 
448 	return event_work;
449 }
450 
451 int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
452 		struct drm_file *file)
453 {
454 	struct drm_exynos_file_private *file_priv = file->driver_priv;
455 	struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
456 	struct device *dev = priv->dev;
457 	struct ipp_context *ctx = get_ipp_context(dev);
458 	struct drm_exynos_ipp_property *property = data;
459 	struct exynos_drm_ippdrv *ippdrv;
460 	struct drm_exynos_ipp_cmd_node *c_node;
461 	int ret, i;
462 
463 	DRM_DEBUG_KMS("%s\n", __func__);
464 
465 	if (!ctx) {
466 		DRM_ERROR("invalid context.\n");
467 		return -EINVAL;
468 	}
469 
470 	if (!property) {
471 		DRM_ERROR("invalid property parameter.\n");
472 		return -EINVAL;
473 	}
474 
475 	/*
476 	 * This is log print for user application property.
477 	 * user application set various property.
478 	 */
479 	for_each_ipp_ops(i)
480 		ipp_print_property(property, i);
481 
482 	/*
483 	 * set property ioctl generated new prop_id.
484 	 * but in this case already asigned prop_id using old set property.
485 	 * e.g PAUSE state. this case supports find current prop_id and use it
486 	 * instead of allocation.
487 	 */
488 	if (property->prop_id) {
489 		DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
490 		return ipp_find_and_set_property(property);
491 	}
492 
493 	/* find ipp driver using ipp id */
494 	ippdrv = ipp_find_driver(ctx, property);
495 	if (IS_ERR(ippdrv)) {
496 		DRM_ERROR("failed to get ipp driver.\n");
497 		return -EINVAL;
498 	}
499 
500 	/* allocate command node */
501 	c_node = kzalloc(sizeof(*c_node), GFP_KERNEL);
502 	if (!c_node) {
503 		DRM_ERROR("failed to allocate map node.\n");
504 		return -ENOMEM;
505 	}
506 
507 	/* create property id */
508 	ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node,
509 		&property->prop_id);
510 	if (ret) {
511 		DRM_ERROR("failed to create id.\n");
512 		goto err_clear;
513 	}
514 
515 	DRM_DEBUG_KMS("%s:created prop_id[%d]cmd[%d]ippdrv[0x%x]\n",
516 		__func__, property->prop_id, property->cmd, (int)ippdrv);
517 
518 	/* stored property information and ippdrv in private data */
519 	c_node->priv = priv;
520 	c_node->property = *property;
521 	c_node->state = IPP_STATE_IDLE;
522 
523 	c_node->start_work = ipp_create_cmd_work();
524 	if (IS_ERR(c_node->start_work)) {
525 		DRM_ERROR("failed to create start work.\n");
526 		goto err_clear;
527 	}
528 
529 	c_node->stop_work = ipp_create_cmd_work();
530 	if (IS_ERR(c_node->stop_work)) {
531 		DRM_ERROR("failed to create stop work.\n");
532 		goto err_free_start;
533 	}
534 
535 	c_node->event_work = ipp_create_event_work();
536 	if (IS_ERR(c_node->event_work)) {
537 		DRM_ERROR("failed to create event work.\n");
538 		goto err_free_stop;
539 	}
540 
541 	mutex_init(&c_node->cmd_lock);
542 	mutex_init(&c_node->mem_lock);
543 	mutex_init(&c_node->event_lock);
544 
545 	init_completion(&c_node->start_complete);
546 	init_completion(&c_node->stop_complete);
547 
548 	for_each_ipp_ops(i)
549 		INIT_LIST_HEAD(&c_node->mem_list[i]);
550 
551 	INIT_LIST_HEAD(&c_node->event_list);
552 	list_splice_init(&priv->event_list, &c_node->event_list);
553 	list_add_tail(&c_node->list, &ippdrv->cmd_list);
554 
555 	/* make dedicated state without m2m */
556 	if (!ipp_is_m2m_cmd(property->cmd))
557 		ippdrv->dedicated = true;
558 
559 	return 0;
560 
561 err_free_stop:
562 	kfree(c_node->stop_work);
563 err_free_start:
564 	kfree(c_node->start_work);
565 err_clear:
566 	kfree(c_node);
567 	return ret;
568 }
569 
570 static void ipp_clean_cmd_node(struct drm_exynos_ipp_cmd_node *c_node)
571 {
572 	DRM_DEBUG_KMS("%s\n", __func__);
573 
574 	/* delete list */
575 	list_del(&c_node->list);
576 
577 	/* destroy mutex */
578 	mutex_destroy(&c_node->cmd_lock);
579 	mutex_destroy(&c_node->mem_lock);
580 	mutex_destroy(&c_node->event_lock);
581 
582 	/* free command node */
583 	kfree(c_node->start_work);
584 	kfree(c_node->stop_work);
585 	kfree(c_node->event_work);
586 	kfree(c_node);
587 }
588 
589 static int ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
590 {
591 	struct drm_exynos_ipp_property *property = &c_node->property;
592 	struct drm_exynos_ipp_mem_node *m_node;
593 	struct list_head *head;
594 	int ret, i, count[EXYNOS_DRM_OPS_MAX] = { 0, };
595 
596 	DRM_DEBUG_KMS("%s\n", __func__);
597 
598 	mutex_lock(&c_node->mem_lock);
599 
600 	for_each_ipp_ops(i) {
601 		/* source/destination memory list */
602 		head = &c_node->mem_list[i];
603 
604 		if (list_empty(head)) {
605 			DRM_DEBUG_KMS("%s:%s memory empty.\n", __func__,
606 				i ? "dst" : "src");
607 			continue;
608 		}
609 
610 		/* find memory node entry */
611 		list_for_each_entry(m_node, head, list) {
612 			DRM_DEBUG_KMS("%s:%s,count[%d]m_node[0x%x]\n", __func__,
613 				i ? "dst" : "src", count[i], (int)m_node);
614 			count[i]++;
615 		}
616 	}
617 
618 	DRM_DEBUG_KMS("%s:min[%d]max[%d]\n", __func__,
619 		min(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]),
620 		max(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]));
621 
622 	/*
623 	 * M2M operations should be need paired memory address.
624 	 * so, need to check minimum count about src, dst.
625 	 * other case not use paired memory, so use maximum count
626 	 */
627 	if (ipp_is_m2m_cmd(property->cmd))
628 		ret = min(count[EXYNOS_DRM_OPS_SRC],
629 			count[EXYNOS_DRM_OPS_DST]);
630 	else
631 		ret = max(count[EXYNOS_DRM_OPS_SRC],
632 			count[EXYNOS_DRM_OPS_DST]);
633 
634 	mutex_unlock(&c_node->mem_lock);
635 
636 	return ret;
637 }
638 
639 static struct drm_exynos_ipp_mem_node
640 		*ipp_find_mem_node(struct drm_exynos_ipp_cmd_node *c_node,
641 		struct drm_exynos_ipp_queue_buf *qbuf)
642 {
643 	struct drm_exynos_ipp_mem_node *m_node;
644 	struct list_head *head;
645 	int count = 0;
646 
647 	DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, qbuf->buf_id);
648 
649 	/* source/destination memory list */
650 	head = &c_node->mem_list[qbuf->ops_id];
651 
652 	/* find memory node from memory list */
653 	list_for_each_entry(m_node, head, list) {
654 		DRM_DEBUG_KMS("%s:count[%d]m_node[0x%x]\n",
655 			__func__, count++, (int)m_node);
656 
657 		/* compare buffer id */
658 		if (m_node->buf_id == qbuf->buf_id)
659 			return m_node;
660 	}
661 
662 	return NULL;
663 }
664 
665 static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
666 		struct drm_exynos_ipp_cmd_node *c_node,
667 		struct drm_exynos_ipp_mem_node *m_node)
668 {
669 	struct exynos_drm_ipp_ops *ops = NULL;
670 	int ret = 0;
671 
672 	DRM_DEBUG_KMS("%s:node[0x%x]\n", __func__, (int)m_node);
673 
674 	if (!m_node) {
675 		DRM_ERROR("invalid queue node.\n");
676 		return -EFAULT;
677 	}
678 
679 	mutex_lock(&c_node->mem_lock);
680 
681 	DRM_DEBUG_KMS("%s:ops_id[%d]\n", __func__, m_node->ops_id);
682 
683 	/* get operations callback */
684 	ops = ippdrv->ops[m_node->ops_id];
685 	if (!ops) {
686 		DRM_ERROR("not support ops.\n");
687 		ret = -EFAULT;
688 		goto err_unlock;
689 	}
690 
691 	/* set address and enable irq */
692 	if (ops->set_addr) {
693 		ret = ops->set_addr(ippdrv->dev, &m_node->buf_info,
694 			m_node->buf_id, IPP_BUF_ENQUEUE);
695 		if (ret) {
696 			DRM_ERROR("failed to set addr.\n");
697 			goto err_unlock;
698 		}
699 	}
700 
701 err_unlock:
702 	mutex_unlock(&c_node->mem_lock);
703 	return ret;
704 }
705 
706 static struct drm_exynos_ipp_mem_node
707 		*ipp_get_mem_node(struct drm_device *drm_dev,
708 		struct drm_file *file,
709 		struct drm_exynos_ipp_cmd_node *c_node,
710 		struct drm_exynos_ipp_queue_buf *qbuf)
711 {
712 	struct drm_exynos_ipp_mem_node *m_node;
713 	struct drm_exynos_ipp_buf_info buf_info;
714 	void *addr;
715 	int i;
716 
717 	DRM_DEBUG_KMS("%s\n", __func__);
718 
719 	mutex_lock(&c_node->mem_lock);
720 
721 	m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
722 	if (!m_node) {
723 		DRM_ERROR("failed to allocate queue node.\n");
724 		goto err_unlock;
725 	}
726 
727 	/* clear base address for error handling */
728 	memset(&buf_info, 0x0, sizeof(buf_info));
729 
730 	/* operations, buffer id */
731 	m_node->ops_id = qbuf->ops_id;
732 	m_node->prop_id = qbuf->prop_id;
733 	m_node->buf_id = qbuf->buf_id;
734 
735 	DRM_DEBUG_KMS("%s:m_node[0x%x]ops_id[%d]\n", __func__,
736 		(int)m_node, qbuf->ops_id);
737 	DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]\n", __func__,
738 		qbuf->prop_id, m_node->buf_id);
739 
740 	for_each_ipp_planar(i) {
741 		DRM_DEBUG_KMS("%s:i[%d]handle[0x%x]\n", __func__,
742 			i, qbuf->handle[i]);
743 
744 		/* get dma address by handle */
745 		if (qbuf->handle[i]) {
746 			addr = exynos_drm_gem_get_dma_addr(drm_dev,
747 					qbuf->handle[i], file);
748 			if (IS_ERR(addr)) {
749 				DRM_ERROR("failed to get addr.\n");
750 				goto err_clear;
751 			}
752 
753 			buf_info.handles[i] = qbuf->handle[i];
754 			buf_info.base[i] = *(dma_addr_t *) addr;
755 			DRM_DEBUG_KMS("%s:i[%d]base[0x%x]hd[0x%x]\n",
756 				__func__, i, buf_info.base[i],
757 				(int)buf_info.handles[i]);
758 		}
759 	}
760 
761 	m_node->filp = file;
762 	m_node->buf_info = buf_info;
763 	list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
764 
765 	mutex_unlock(&c_node->mem_lock);
766 	return m_node;
767 
768 err_clear:
769 	kfree(m_node);
770 err_unlock:
771 	mutex_unlock(&c_node->mem_lock);
772 	return ERR_PTR(-EFAULT);
773 }
774 
775 static int ipp_put_mem_node(struct drm_device *drm_dev,
776 		struct drm_exynos_ipp_cmd_node *c_node,
777 		struct drm_exynos_ipp_mem_node *m_node)
778 {
779 	int i;
780 
781 	DRM_DEBUG_KMS("%s:node[0x%x]\n", __func__, (int)m_node);
782 
783 	if (!m_node) {
784 		DRM_ERROR("invalid dequeue node.\n");
785 		return -EFAULT;
786 	}
787 
788 	if (list_empty(&m_node->list)) {
789 		DRM_ERROR("empty memory node.\n");
790 		return -ENOMEM;
791 	}
792 
793 	mutex_lock(&c_node->mem_lock);
794 
795 	DRM_DEBUG_KMS("%s:ops_id[%d]\n", __func__, m_node->ops_id);
796 
797 	/* put gem buffer */
798 	for_each_ipp_planar(i) {
799 		unsigned long handle = m_node->buf_info.handles[i];
800 		if (handle)
801 			exynos_drm_gem_put_dma_addr(drm_dev, handle,
802 							m_node->filp);
803 	}
804 
805 	/* delete list in queue */
806 	list_del(&m_node->list);
807 	kfree(m_node);
808 
809 	mutex_unlock(&c_node->mem_lock);
810 
811 	return 0;
812 }
813 
814 static void ipp_free_event(struct drm_pending_event *event)
815 {
816 	kfree(event);
817 }
818 
819 static int ipp_get_event(struct drm_device *drm_dev,
820 		struct drm_file *file,
821 		struct drm_exynos_ipp_cmd_node *c_node,
822 		struct drm_exynos_ipp_queue_buf *qbuf)
823 {
824 	struct drm_exynos_ipp_send_event *e;
825 	unsigned long flags;
826 
827 	DRM_DEBUG_KMS("%s:ops_id[%d]buf_id[%d]\n", __func__,
828 		qbuf->ops_id, qbuf->buf_id);
829 
830 	e = kzalloc(sizeof(*e), GFP_KERNEL);
831 
832 	if (!e) {
833 		DRM_ERROR("failed to allocate event.\n");
834 		spin_lock_irqsave(&drm_dev->event_lock, flags);
835 		file->event_space += sizeof(e->event);
836 		spin_unlock_irqrestore(&drm_dev->event_lock, flags);
837 		return -ENOMEM;
838 	}
839 
840 	/* make event */
841 	e->event.base.type = DRM_EXYNOS_IPP_EVENT;
842 	e->event.base.length = sizeof(e->event);
843 	e->event.user_data = qbuf->user_data;
844 	e->event.prop_id = qbuf->prop_id;
845 	e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id;
846 	e->base.event = &e->event.base;
847 	e->base.file_priv = file;
848 	e->base.destroy = ipp_free_event;
849 	list_add_tail(&e->base.link, &c_node->event_list);
850 
851 	return 0;
852 }
853 
854 static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
855 		struct drm_exynos_ipp_queue_buf *qbuf)
856 {
857 	struct drm_exynos_ipp_send_event *e, *te;
858 	int count = 0;
859 
860 	DRM_DEBUG_KMS("%s\n", __func__);
861 
862 	if (list_empty(&c_node->event_list)) {
863 		DRM_DEBUG_KMS("%s:event_list is empty.\n", __func__);
864 		return;
865 	}
866 
867 	list_for_each_entry_safe(e, te, &c_node->event_list, base.link) {
868 		DRM_DEBUG_KMS("%s:count[%d]e[0x%x]\n",
869 			__func__, count++, (int)e);
870 
871 		/*
872 		 * quf == NULL condition means all event deletion.
873 		 * stop operations want to delete all event list.
874 		 * another case delete only same buf id.
875 		 */
876 		if (!qbuf) {
877 			/* delete list */
878 			list_del(&e->base.link);
879 			kfree(e);
880 		}
881 
882 		/* compare buffer id */
883 		if (qbuf && (qbuf->buf_id ==
884 		    e->event.buf_id[EXYNOS_DRM_OPS_DST])) {
885 			/* delete list */
886 			list_del(&e->base.link);
887 			kfree(e);
888 			return;
889 		}
890 	}
891 }
892 
893 static void ipp_handle_cmd_work(struct device *dev,
894 		struct exynos_drm_ippdrv *ippdrv,
895 		struct drm_exynos_ipp_cmd_work *cmd_work,
896 		struct drm_exynos_ipp_cmd_node *c_node)
897 {
898 	struct ipp_context *ctx = get_ipp_context(dev);
899 
900 	cmd_work->ippdrv = ippdrv;
901 	cmd_work->c_node = c_node;
902 	queue_work(ctx->cmd_workq, (struct work_struct *)cmd_work);
903 }
904 
905 static int ipp_queue_buf_with_run(struct device *dev,
906 		struct drm_exynos_ipp_cmd_node *c_node,
907 		struct drm_exynos_ipp_mem_node *m_node,
908 		struct drm_exynos_ipp_queue_buf *qbuf)
909 {
910 	struct exynos_drm_ippdrv *ippdrv;
911 	struct drm_exynos_ipp_property *property;
912 	struct exynos_drm_ipp_ops *ops;
913 	int ret;
914 
915 	DRM_DEBUG_KMS("%s\n", __func__);
916 
917 	ippdrv = ipp_find_drv_by_handle(qbuf->prop_id);
918 	if (IS_ERR(ippdrv)) {
919 		DRM_ERROR("failed to get ipp driver.\n");
920 		return -EFAULT;
921 	}
922 
923 	ops = ippdrv->ops[qbuf->ops_id];
924 	if (!ops) {
925 		DRM_ERROR("failed to get ops.\n");
926 		return -EFAULT;
927 	}
928 
929 	property = &c_node->property;
930 
931 	if (c_node->state != IPP_STATE_START) {
932 		DRM_DEBUG_KMS("%s:bypass for invalid state.\n" , __func__);
933 		return 0;
934 	}
935 
936 	if (!ipp_check_mem_list(c_node)) {
937 		DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
938 		return 0;
939 	}
940 
941 	/*
942 	 * If set destination buffer and enabled clock,
943 	 * then m2m operations need start operations at queue_buf
944 	 */
945 	if (ipp_is_m2m_cmd(property->cmd)) {
946 		struct drm_exynos_ipp_cmd_work *cmd_work = c_node->start_work;
947 
948 		cmd_work->ctrl = IPP_CTRL_PLAY;
949 		ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
950 	} else {
951 		ret = ipp_set_mem_node(ippdrv, c_node, m_node);
952 		if (ret) {
953 			DRM_ERROR("failed to set m node.\n");
954 			return ret;
955 		}
956 	}
957 
958 	return 0;
959 }
960 
961 static void ipp_clean_queue_buf(struct drm_device *drm_dev,
962 		struct drm_exynos_ipp_cmd_node *c_node,
963 		struct drm_exynos_ipp_queue_buf *qbuf)
964 {
965 	struct drm_exynos_ipp_mem_node *m_node, *tm_node;
966 
967 	DRM_DEBUG_KMS("%s\n", __func__);
968 
969 	if (!list_empty(&c_node->mem_list[qbuf->ops_id])) {
970 		/* delete list */
971 		list_for_each_entry_safe(m_node, tm_node,
972 			&c_node->mem_list[qbuf->ops_id], list) {
973 			if (m_node->buf_id == qbuf->buf_id &&
974 			    m_node->ops_id == qbuf->ops_id)
975 				ipp_put_mem_node(drm_dev, c_node, m_node);
976 		}
977 	}
978 }
979 
980 int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
981 		struct drm_file *file)
982 {
983 	struct drm_exynos_file_private *file_priv = file->driver_priv;
984 	struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
985 	struct device *dev = priv->dev;
986 	struct ipp_context *ctx = get_ipp_context(dev);
987 	struct drm_exynos_ipp_queue_buf *qbuf = data;
988 	struct drm_exynos_ipp_cmd_node *c_node;
989 	struct drm_exynos_ipp_mem_node *m_node;
990 	int ret;
991 
992 	DRM_DEBUG_KMS("%s\n", __func__);
993 
994 	if (!qbuf) {
995 		DRM_ERROR("invalid buf parameter.\n");
996 		return -EINVAL;
997 	}
998 
999 	if (qbuf->ops_id >= EXYNOS_DRM_OPS_MAX) {
1000 		DRM_ERROR("invalid ops parameter.\n");
1001 		return -EINVAL;
1002 	}
1003 
1004 	DRM_DEBUG_KMS("%s:prop_id[%d]ops_id[%s]buf_id[%d]buf_type[%d]\n",
1005 		__func__, qbuf->prop_id, qbuf->ops_id ? "dst" : "src",
1006 		qbuf->buf_id, qbuf->buf_type);
1007 
1008 	/* find command node */
1009 	c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
1010 		qbuf->prop_id);
1011 	if (!c_node) {
1012 		DRM_ERROR("failed to get command node.\n");
1013 		return -EFAULT;
1014 	}
1015 
1016 	/* buffer control */
1017 	switch (qbuf->buf_type) {
1018 	case IPP_BUF_ENQUEUE:
1019 		/* get memory node */
1020 		m_node = ipp_get_mem_node(drm_dev, file, c_node, qbuf);
1021 		if (IS_ERR(m_node)) {
1022 			DRM_ERROR("failed to get m_node.\n");
1023 			return PTR_ERR(m_node);
1024 		}
1025 
1026 		/*
1027 		 * first step get event for destination buffer.
1028 		 * and second step when M2M case run with destination buffer
1029 		 * if needed.
1030 		 */
1031 		if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) {
1032 			/* get event for destination buffer */
1033 			ret = ipp_get_event(drm_dev, file, c_node, qbuf);
1034 			if (ret) {
1035 				DRM_ERROR("failed to get event.\n");
1036 				goto err_clean_node;
1037 			}
1038 
1039 			/*
1040 			 * M2M case run play control for streaming feature.
1041 			 * other case set address and waiting.
1042 			 */
1043 			ret = ipp_queue_buf_with_run(dev, c_node, m_node, qbuf);
1044 			if (ret) {
1045 				DRM_ERROR("failed to run command.\n");
1046 				goto err_clean_node;
1047 			}
1048 		}
1049 		break;
1050 	case IPP_BUF_DEQUEUE:
1051 		mutex_lock(&c_node->cmd_lock);
1052 
1053 		/* put event for destination buffer */
1054 		if (qbuf->ops_id == EXYNOS_DRM_OPS_DST)
1055 			ipp_put_event(c_node, qbuf);
1056 
1057 		ipp_clean_queue_buf(drm_dev, c_node, qbuf);
1058 
1059 		mutex_unlock(&c_node->cmd_lock);
1060 		break;
1061 	default:
1062 		DRM_ERROR("invalid buffer control.\n");
1063 		return -EINVAL;
1064 	}
1065 
1066 	return 0;
1067 
1068 err_clean_node:
1069 	DRM_ERROR("clean memory nodes.\n");
1070 
1071 	ipp_clean_queue_buf(drm_dev, c_node, qbuf);
1072 	return ret;
1073 }
1074 
1075 static bool exynos_drm_ipp_check_valid(struct device *dev,
1076 		enum drm_exynos_ipp_ctrl ctrl, enum drm_exynos_ipp_state state)
1077 {
1078 	DRM_DEBUG_KMS("%s\n", __func__);
1079 
1080 	if (ctrl != IPP_CTRL_PLAY) {
1081 		if (pm_runtime_suspended(dev)) {
1082 			DRM_ERROR("pm:runtime_suspended.\n");
1083 			goto err_status;
1084 		}
1085 	}
1086 
1087 	switch (ctrl) {
1088 	case IPP_CTRL_PLAY:
1089 		if (state != IPP_STATE_IDLE)
1090 			goto err_status;
1091 		break;
1092 	case IPP_CTRL_STOP:
1093 		if (state == IPP_STATE_STOP)
1094 			goto err_status;
1095 		break;
1096 	case IPP_CTRL_PAUSE:
1097 		if (state != IPP_STATE_START)
1098 			goto err_status;
1099 		break;
1100 	case IPP_CTRL_RESUME:
1101 		if (state != IPP_STATE_STOP)
1102 			goto err_status;
1103 		break;
1104 	default:
1105 		DRM_ERROR("invalid state.\n");
1106 		goto err_status;
1107 		break;
1108 	}
1109 
1110 	return true;
1111 
1112 err_status:
1113 	DRM_ERROR("invalid status:ctrl[%d]state[%d]\n", ctrl, state);
1114 	return false;
1115 }
1116 
1117 int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
1118 		struct drm_file *file)
1119 {
1120 	struct drm_exynos_file_private *file_priv = file->driver_priv;
1121 	struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
1122 	struct exynos_drm_ippdrv *ippdrv = NULL;
1123 	struct device *dev = priv->dev;
1124 	struct ipp_context *ctx = get_ipp_context(dev);
1125 	struct drm_exynos_ipp_cmd_ctrl *cmd_ctrl = data;
1126 	struct drm_exynos_ipp_cmd_work *cmd_work;
1127 	struct drm_exynos_ipp_cmd_node *c_node;
1128 
1129 	DRM_DEBUG_KMS("%s\n", __func__);
1130 
1131 	if (!ctx) {
1132 		DRM_ERROR("invalid context.\n");
1133 		return -EINVAL;
1134 	}
1135 
1136 	if (!cmd_ctrl) {
1137 		DRM_ERROR("invalid control parameter.\n");
1138 		return -EINVAL;
1139 	}
1140 
1141 	DRM_DEBUG_KMS("%s:ctrl[%d]prop_id[%d]\n", __func__,
1142 		cmd_ctrl->ctrl, cmd_ctrl->prop_id);
1143 
1144 	ippdrv = ipp_find_drv_by_handle(cmd_ctrl->prop_id);
1145 	if (IS_ERR(ippdrv)) {
1146 		DRM_ERROR("failed to get ipp driver.\n");
1147 		return PTR_ERR(ippdrv);
1148 	}
1149 
1150 	c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
1151 		cmd_ctrl->prop_id);
1152 	if (!c_node) {
1153 		DRM_ERROR("invalid command node list.\n");
1154 		return -EINVAL;
1155 	}
1156 
1157 	if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl,
1158 	    c_node->state)) {
1159 		DRM_ERROR("invalid state.\n");
1160 		return -EINVAL;
1161 	}
1162 
1163 	switch (cmd_ctrl->ctrl) {
1164 	case IPP_CTRL_PLAY:
1165 		if (pm_runtime_suspended(ippdrv->dev))
1166 			pm_runtime_get_sync(ippdrv->dev);
1167 		c_node->state = IPP_STATE_START;
1168 
1169 		cmd_work = c_node->start_work;
1170 		cmd_work->ctrl = cmd_ctrl->ctrl;
1171 		ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1172 		c_node->state = IPP_STATE_START;
1173 		break;
1174 	case IPP_CTRL_STOP:
1175 		cmd_work = c_node->stop_work;
1176 		cmd_work->ctrl = cmd_ctrl->ctrl;
1177 		ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1178 
1179 		if (!wait_for_completion_timeout(&c_node->stop_complete,
1180 		    msecs_to_jiffies(300))) {
1181 			DRM_ERROR("timeout stop:prop_id[%d]\n",
1182 				c_node->property.prop_id);
1183 		}
1184 
1185 		c_node->state = IPP_STATE_STOP;
1186 		ippdrv->dedicated = false;
1187 		ipp_clean_cmd_node(c_node);
1188 
1189 		if (list_empty(&ippdrv->cmd_list))
1190 			pm_runtime_put_sync(ippdrv->dev);
1191 		break;
1192 	case IPP_CTRL_PAUSE:
1193 		cmd_work = c_node->stop_work;
1194 		cmd_work->ctrl = cmd_ctrl->ctrl;
1195 		ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1196 
1197 		if (!wait_for_completion_timeout(&c_node->stop_complete,
1198 		    msecs_to_jiffies(200))) {
1199 			DRM_ERROR("timeout stop:prop_id[%d]\n",
1200 				c_node->property.prop_id);
1201 		}
1202 
1203 		c_node->state = IPP_STATE_STOP;
1204 		break;
1205 	case IPP_CTRL_RESUME:
1206 		c_node->state = IPP_STATE_START;
1207 		cmd_work = c_node->start_work;
1208 		cmd_work->ctrl = cmd_ctrl->ctrl;
1209 		ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1210 		break;
1211 	default:
1212 		DRM_ERROR("could not support this state currently.\n");
1213 		return -EINVAL;
1214 	}
1215 
1216 	DRM_DEBUG_KMS("%s:done ctrl[%d]prop_id[%d]\n", __func__,
1217 		cmd_ctrl->ctrl, cmd_ctrl->prop_id);
1218 
1219 	return 0;
1220 }
1221 
1222 int exynos_drm_ippnb_register(struct notifier_block *nb)
1223 {
1224 	return blocking_notifier_chain_register(
1225 		&exynos_drm_ippnb_list, nb);
1226 }
1227 
1228 int exynos_drm_ippnb_unregister(struct notifier_block *nb)
1229 {
1230 	return blocking_notifier_chain_unregister(
1231 		&exynos_drm_ippnb_list, nb);
1232 }
1233 
1234 int exynos_drm_ippnb_send_event(unsigned long val, void *v)
1235 {
1236 	return blocking_notifier_call_chain(
1237 		&exynos_drm_ippnb_list, val, v);
1238 }
1239 
1240 static int ipp_set_property(struct exynos_drm_ippdrv *ippdrv,
1241 		struct drm_exynos_ipp_property *property)
1242 {
1243 	struct exynos_drm_ipp_ops *ops = NULL;
1244 	bool swap = false;
1245 	int ret, i;
1246 
1247 	if (!property) {
1248 		DRM_ERROR("invalid property parameter.\n");
1249 		return -EINVAL;
1250 	}
1251 
1252 	DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
1253 
1254 	/* reset h/w block */
1255 	if (ippdrv->reset &&
1256 	    ippdrv->reset(ippdrv->dev)) {
1257 		DRM_ERROR("failed to reset.\n");
1258 		return -EINVAL;
1259 	}
1260 
1261 	/* set source,destination operations */
1262 	for_each_ipp_ops(i) {
1263 		struct drm_exynos_ipp_config *config =
1264 			&property->config[i];
1265 
1266 		ops = ippdrv->ops[i];
1267 		if (!ops || !config) {
1268 			DRM_ERROR("not support ops and config.\n");
1269 			return -EINVAL;
1270 		}
1271 
1272 		/* set format */
1273 		if (ops->set_fmt) {
1274 			ret = ops->set_fmt(ippdrv->dev, config->fmt);
1275 			if (ret) {
1276 				DRM_ERROR("not support format.\n");
1277 				return ret;
1278 			}
1279 		}
1280 
1281 		/* set transform for rotation, flip */
1282 		if (ops->set_transf) {
1283 			ret = ops->set_transf(ippdrv->dev, config->degree,
1284 				config->flip, &swap);
1285 			if (ret) {
1286 				DRM_ERROR("not support tranf.\n");
1287 				return -EINVAL;
1288 			}
1289 		}
1290 
1291 		/* set size */
1292 		if (ops->set_size) {
1293 			ret = ops->set_size(ippdrv->dev, swap, &config->pos,
1294 				&config->sz);
1295 			if (ret) {
1296 				DRM_ERROR("not support size.\n");
1297 				return ret;
1298 			}
1299 		}
1300 	}
1301 
1302 	return 0;
1303 }
1304 
1305 static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
1306 		struct drm_exynos_ipp_cmd_node *c_node)
1307 {
1308 	struct drm_exynos_ipp_mem_node *m_node;
1309 	struct drm_exynos_ipp_property *property = &c_node->property;
1310 	struct list_head *head;
1311 	int ret, i;
1312 
1313 	DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
1314 
1315 	/* store command info in ippdrv */
1316 	ippdrv->c_node = c_node;
1317 
1318 	if (!ipp_check_mem_list(c_node)) {
1319 		DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
1320 		return -ENOMEM;
1321 	}
1322 
1323 	/* set current property in ippdrv */
1324 	ret = ipp_set_property(ippdrv, property);
1325 	if (ret) {
1326 		DRM_ERROR("failed to set property.\n");
1327 		ippdrv->c_node = NULL;
1328 		return ret;
1329 	}
1330 
1331 	/* check command */
1332 	switch (property->cmd) {
1333 	case IPP_CMD_M2M:
1334 		for_each_ipp_ops(i) {
1335 			/* source/destination memory list */
1336 			head = &c_node->mem_list[i];
1337 
1338 			m_node = list_first_entry(head,
1339 				struct drm_exynos_ipp_mem_node, list);
1340 			if (!m_node) {
1341 				DRM_ERROR("failed to get node.\n");
1342 				ret = -EFAULT;
1343 				return ret;
1344 			}
1345 
1346 			DRM_DEBUG_KMS("%s:m_node[0x%x]\n",
1347 				__func__, (int)m_node);
1348 
1349 			ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1350 			if (ret) {
1351 				DRM_ERROR("failed to set m node.\n");
1352 				return ret;
1353 			}
1354 		}
1355 		break;
1356 	case IPP_CMD_WB:
1357 		/* destination memory list */
1358 		head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
1359 
1360 		list_for_each_entry(m_node, head, list) {
1361 			ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1362 			if (ret) {
1363 				DRM_ERROR("failed to set m node.\n");
1364 				return ret;
1365 			}
1366 		}
1367 		break;
1368 	case IPP_CMD_OUTPUT:
1369 		/* source memory list */
1370 		head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1371 
1372 		list_for_each_entry(m_node, head, list) {
1373 			ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1374 			if (ret) {
1375 				DRM_ERROR("failed to set m node.\n");
1376 				return ret;
1377 			}
1378 		}
1379 		break;
1380 	default:
1381 		DRM_ERROR("invalid operations.\n");
1382 		return -EINVAL;
1383 	}
1384 
1385 	DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, property->cmd);
1386 
1387 	/* start operations */
1388 	if (ippdrv->start) {
1389 		ret = ippdrv->start(ippdrv->dev, property->cmd);
1390 		if (ret) {
1391 			DRM_ERROR("failed to start ops.\n");
1392 			return ret;
1393 		}
1394 	}
1395 
1396 	return 0;
1397 }
1398 
1399 static int ipp_stop_property(struct drm_device *drm_dev,
1400 		struct exynos_drm_ippdrv *ippdrv,
1401 		struct drm_exynos_ipp_cmd_node *c_node)
1402 {
1403 	struct drm_exynos_ipp_mem_node *m_node, *tm_node;
1404 	struct drm_exynos_ipp_property *property = &c_node->property;
1405 	struct list_head *head;
1406 	int ret = 0, i;
1407 
1408 	DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
1409 
1410 	/* put event */
1411 	ipp_put_event(c_node, NULL);
1412 
1413 	/* check command */
1414 	switch (property->cmd) {
1415 	case IPP_CMD_M2M:
1416 		for_each_ipp_ops(i) {
1417 			/* source/destination memory list */
1418 			head = &c_node->mem_list[i];
1419 
1420 			if (list_empty(head)) {
1421 				DRM_DEBUG_KMS("%s:mem_list is empty.\n",
1422 					__func__);
1423 				break;
1424 			}
1425 
1426 			list_for_each_entry_safe(m_node, tm_node,
1427 				head, list) {
1428 				ret = ipp_put_mem_node(drm_dev, c_node,
1429 					m_node);
1430 				if (ret) {
1431 					DRM_ERROR("failed to put m_node.\n");
1432 					goto err_clear;
1433 				}
1434 			}
1435 		}
1436 		break;
1437 	case IPP_CMD_WB:
1438 		/* destination memory list */
1439 		head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
1440 
1441 		if (list_empty(head)) {
1442 			DRM_DEBUG_KMS("%s:mem_list is empty.\n", __func__);
1443 			break;
1444 		}
1445 
1446 		list_for_each_entry_safe(m_node, tm_node, head, list) {
1447 			ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1448 			if (ret) {
1449 				DRM_ERROR("failed to put m_node.\n");
1450 				goto err_clear;
1451 			}
1452 		}
1453 		break;
1454 	case IPP_CMD_OUTPUT:
1455 		/* source memory list */
1456 		head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1457 
1458 		if (list_empty(head)) {
1459 			DRM_DEBUG_KMS("%s:mem_list is empty.\n", __func__);
1460 			break;
1461 		}
1462 
1463 		list_for_each_entry_safe(m_node, tm_node, head, list) {
1464 			ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1465 			if (ret) {
1466 				DRM_ERROR("failed to put m_node.\n");
1467 				goto err_clear;
1468 			}
1469 		}
1470 		break;
1471 	default:
1472 		DRM_ERROR("invalid operations.\n");
1473 		ret = -EINVAL;
1474 		goto err_clear;
1475 	}
1476 
1477 err_clear:
1478 	/* stop operations */
1479 	if (ippdrv->stop)
1480 		ippdrv->stop(ippdrv->dev, property->cmd);
1481 
1482 	return ret;
1483 }
1484 
1485 void ipp_sched_cmd(struct work_struct *work)
1486 {
1487 	struct drm_exynos_ipp_cmd_work *cmd_work =
1488 		(struct drm_exynos_ipp_cmd_work *)work;
1489 	struct exynos_drm_ippdrv *ippdrv;
1490 	struct drm_exynos_ipp_cmd_node *c_node;
1491 	struct drm_exynos_ipp_property *property;
1492 	int ret;
1493 
1494 	DRM_DEBUG_KMS("%s\n", __func__);
1495 
1496 	ippdrv = cmd_work->ippdrv;
1497 	if (!ippdrv) {
1498 		DRM_ERROR("invalid ippdrv list.\n");
1499 		return;
1500 	}
1501 
1502 	c_node = cmd_work->c_node;
1503 	if (!c_node) {
1504 		DRM_ERROR("invalid command node list.\n");
1505 		return;
1506 	}
1507 
1508 	mutex_lock(&c_node->cmd_lock);
1509 
1510 	property = &c_node->property;
1511 
1512 	switch (cmd_work->ctrl) {
1513 	case IPP_CTRL_PLAY:
1514 	case IPP_CTRL_RESUME:
1515 		ret = ipp_start_property(ippdrv, c_node);
1516 		if (ret) {
1517 			DRM_ERROR("failed to start property:prop_id[%d]\n",
1518 				c_node->property.prop_id);
1519 			goto err_unlock;
1520 		}
1521 
1522 		/*
1523 		 * M2M case supports wait_completion of transfer.
1524 		 * because M2M case supports single unit operation
1525 		 * with multiple queue.
1526 		 * M2M need to wait completion of data transfer.
1527 		 */
1528 		if (ipp_is_m2m_cmd(property->cmd)) {
1529 			if (!wait_for_completion_timeout
1530 			    (&c_node->start_complete, msecs_to_jiffies(200))) {
1531 				DRM_ERROR("timeout event:prop_id[%d]\n",
1532 					c_node->property.prop_id);
1533 				goto err_unlock;
1534 			}
1535 		}
1536 		break;
1537 	case IPP_CTRL_STOP:
1538 	case IPP_CTRL_PAUSE:
1539 		ret = ipp_stop_property(ippdrv->drm_dev, ippdrv,
1540 			c_node);
1541 		if (ret) {
1542 			DRM_ERROR("failed to stop property.\n");
1543 			goto err_unlock;
1544 		}
1545 
1546 		complete(&c_node->stop_complete);
1547 		break;
1548 	default:
1549 		DRM_ERROR("unknown control type\n");
1550 		break;
1551 	}
1552 
1553 	DRM_DEBUG_KMS("%s:ctrl[%d] done.\n", __func__, cmd_work->ctrl);
1554 
1555 err_unlock:
1556 	mutex_unlock(&c_node->cmd_lock);
1557 }
1558 
1559 static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
1560 		struct drm_exynos_ipp_cmd_node *c_node, int *buf_id)
1561 {
1562 	struct drm_device *drm_dev = ippdrv->drm_dev;
1563 	struct drm_exynos_ipp_property *property = &c_node->property;
1564 	struct drm_exynos_ipp_mem_node *m_node;
1565 	struct drm_exynos_ipp_queue_buf qbuf;
1566 	struct drm_exynos_ipp_send_event *e;
1567 	struct list_head *head;
1568 	struct timeval now;
1569 	unsigned long flags;
1570 	u32 tbuf_id[EXYNOS_DRM_OPS_MAX] = {0, };
1571 	int ret, i;
1572 
1573 	for_each_ipp_ops(i)
1574 		DRM_DEBUG_KMS("%s:%s buf_id[%d]\n", __func__,
1575 			i ? "dst" : "src", buf_id[i]);
1576 
1577 	if (!drm_dev) {
1578 		DRM_ERROR("failed to get drm_dev.\n");
1579 		return -EINVAL;
1580 	}
1581 
1582 	if (!property) {
1583 		DRM_ERROR("failed to get property.\n");
1584 		return -EINVAL;
1585 	}
1586 
1587 	if (list_empty(&c_node->event_list)) {
1588 		DRM_DEBUG_KMS("%s:event list is empty.\n", __func__);
1589 		return 0;
1590 	}
1591 
1592 	if (!ipp_check_mem_list(c_node)) {
1593 		DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
1594 		return 0;
1595 	}
1596 
1597 	/* check command */
1598 	switch (property->cmd) {
1599 	case IPP_CMD_M2M:
1600 		for_each_ipp_ops(i) {
1601 			/* source/destination memory list */
1602 			head = &c_node->mem_list[i];
1603 
1604 			m_node = list_first_entry(head,
1605 				struct drm_exynos_ipp_mem_node, list);
1606 			if (!m_node) {
1607 				DRM_ERROR("empty memory node.\n");
1608 				return -ENOMEM;
1609 			}
1610 
1611 			tbuf_id[i] = m_node->buf_id;
1612 			DRM_DEBUG_KMS("%s:%s buf_id[%d]\n", __func__,
1613 				i ? "dst" : "src", tbuf_id[i]);
1614 
1615 			ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1616 			if (ret)
1617 				DRM_ERROR("failed to put m_node.\n");
1618 		}
1619 		break;
1620 	case IPP_CMD_WB:
1621 		/* clear buf for finding */
1622 		memset(&qbuf, 0x0, sizeof(qbuf));
1623 		qbuf.ops_id = EXYNOS_DRM_OPS_DST;
1624 		qbuf.buf_id = buf_id[EXYNOS_DRM_OPS_DST];
1625 
1626 		/* get memory node entry */
1627 		m_node = ipp_find_mem_node(c_node, &qbuf);
1628 		if (!m_node) {
1629 			DRM_ERROR("empty memory node.\n");
1630 			return -ENOMEM;
1631 		}
1632 
1633 		tbuf_id[EXYNOS_DRM_OPS_DST] = m_node->buf_id;
1634 
1635 		ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1636 		if (ret)
1637 			DRM_ERROR("failed to put m_node.\n");
1638 		break;
1639 	case IPP_CMD_OUTPUT:
1640 		/* source memory list */
1641 		head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1642 
1643 		m_node = list_first_entry(head,
1644 			struct drm_exynos_ipp_mem_node, list);
1645 		if (!m_node) {
1646 			DRM_ERROR("empty memory node.\n");
1647 			return -ENOMEM;
1648 		}
1649 
1650 		tbuf_id[EXYNOS_DRM_OPS_SRC] = m_node->buf_id;
1651 
1652 		ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1653 		if (ret)
1654 			DRM_ERROR("failed to put m_node.\n");
1655 		break;
1656 	default:
1657 		DRM_ERROR("invalid operations.\n");
1658 		return -EINVAL;
1659 	}
1660 
1661 	if (tbuf_id[EXYNOS_DRM_OPS_DST] != buf_id[EXYNOS_DRM_OPS_DST])
1662 		DRM_ERROR("failed to match buf_id[%d %d]prop_id[%d]\n",
1663 			tbuf_id[1], buf_id[1], property->prop_id);
1664 
1665 	/*
1666 	 * command node have event list of destination buffer
1667 	 * If destination buffer enqueue to mem list,
1668 	 * then we make event and link to event list tail.
1669 	 * so, we get first event for first enqueued buffer.
1670 	 */
1671 	e = list_first_entry(&c_node->event_list,
1672 		struct drm_exynos_ipp_send_event, base.link);
1673 
1674 	if (!e) {
1675 		DRM_ERROR("empty event.\n");
1676 		return -EINVAL;
1677 	}
1678 
1679 	do_gettimeofday(&now);
1680 	DRM_DEBUG_KMS("%s:tv_sec[%ld]tv_usec[%ld]\n"
1681 		, __func__, now.tv_sec, now.tv_usec);
1682 	e->event.tv_sec = now.tv_sec;
1683 	e->event.tv_usec = now.tv_usec;
1684 	e->event.prop_id = property->prop_id;
1685 
1686 	/* set buffer id about source destination */
1687 	for_each_ipp_ops(i)
1688 		e->event.buf_id[i] = tbuf_id[i];
1689 
1690 	spin_lock_irqsave(&drm_dev->event_lock, flags);
1691 	list_move_tail(&e->base.link, &e->base.file_priv->event_list);
1692 	wake_up_interruptible(&e->base.file_priv->event_wait);
1693 	spin_unlock_irqrestore(&drm_dev->event_lock, flags);
1694 
1695 	DRM_DEBUG_KMS("%s:done cmd[%d]prop_id[%d]buf_id[%d]\n", __func__,
1696 		property->cmd, property->prop_id, tbuf_id[EXYNOS_DRM_OPS_DST]);
1697 
1698 	return 0;
1699 }
1700 
1701 void ipp_sched_event(struct work_struct *work)
1702 {
1703 	struct drm_exynos_ipp_event_work *event_work =
1704 		(struct drm_exynos_ipp_event_work *)work;
1705 	struct exynos_drm_ippdrv *ippdrv;
1706 	struct drm_exynos_ipp_cmd_node *c_node;
1707 	int ret;
1708 
1709 	if (!event_work) {
1710 		DRM_ERROR("failed to get event_work.\n");
1711 		return;
1712 	}
1713 
1714 	DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__,
1715 		event_work->buf_id[EXYNOS_DRM_OPS_DST]);
1716 
1717 	ippdrv = event_work->ippdrv;
1718 	if (!ippdrv) {
1719 		DRM_ERROR("failed to get ipp driver.\n");
1720 		return;
1721 	}
1722 
1723 	c_node = ippdrv->c_node;
1724 	if (!c_node) {
1725 		DRM_ERROR("failed to get command node.\n");
1726 		return;
1727 	}
1728 
1729 	/*
1730 	 * IPP supports command thread, event thread synchronization.
1731 	 * If IPP close immediately from user land, then IPP make
1732 	 * synchronization with command thread, so make complete event.
1733 	 * or going out operations.
1734 	 */
1735 	if (c_node->state != IPP_STATE_START) {
1736 		DRM_DEBUG_KMS("%s:bypass state[%d]prop_id[%d]\n",
1737 			__func__, c_node->state, c_node->property.prop_id);
1738 		goto err_completion;
1739 	}
1740 
1741 	mutex_lock(&c_node->event_lock);
1742 
1743 	ret = ipp_send_event(ippdrv, c_node, event_work->buf_id);
1744 	if (ret) {
1745 		DRM_ERROR("failed to send event.\n");
1746 		goto err_completion;
1747 	}
1748 
1749 err_completion:
1750 	if (ipp_is_m2m_cmd(c_node->property.cmd))
1751 		complete(&c_node->start_complete);
1752 
1753 	mutex_unlock(&c_node->event_lock);
1754 }
1755 
1756 static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
1757 {
1758 	struct ipp_context *ctx = get_ipp_context(dev);
1759 	struct exynos_drm_ippdrv *ippdrv;
1760 	int ret, count = 0;
1761 
1762 	DRM_DEBUG_KMS("%s\n", __func__);
1763 
1764 	/* get ipp driver entry */
1765 	list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1766 		ippdrv->drm_dev = drm_dev;
1767 
1768 		ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv,
1769 			&ippdrv->ipp_id);
1770 		if (ret) {
1771 			DRM_ERROR("failed to create id.\n");
1772 			goto err_idr;
1773 		}
1774 
1775 		DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]ipp_id[%d]\n", __func__,
1776 			count++, (int)ippdrv, ippdrv->ipp_id);
1777 
1778 		if (ippdrv->ipp_id == 0) {
1779 			DRM_ERROR("failed to get ipp_id[%d]\n",
1780 				ippdrv->ipp_id);
1781 			goto err_idr;
1782 		}
1783 
1784 		/* store parent device for node */
1785 		ippdrv->parent_dev = dev;
1786 
1787 		/* store event work queue and handler */
1788 		ippdrv->event_workq = ctx->event_workq;
1789 		ippdrv->sched_event = ipp_sched_event;
1790 		INIT_LIST_HEAD(&ippdrv->cmd_list);
1791 
1792 		if (is_drm_iommu_supported(drm_dev)) {
1793 			ret = drm_iommu_attach_device(drm_dev, ippdrv->dev);
1794 			if (ret) {
1795 				DRM_ERROR("failed to activate iommu\n");
1796 				goto err_iommu;
1797 			}
1798 		}
1799 	}
1800 
1801 	return 0;
1802 
1803 err_iommu:
1804 	/* get ipp driver entry */
1805 	list_for_each_entry_reverse(ippdrv, &exynos_drm_ippdrv_list, drv_list)
1806 		if (is_drm_iommu_supported(drm_dev))
1807 			drm_iommu_detach_device(drm_dev, ippdrv->dev);
1808 
1809 err_idr:
1810 	idr_destroy(&ctx->ipp_idr);
1811 	idr_destroy(&ctx->prop_idr);
1812 	return ret;
1813 }
1814 
1815 static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
1816 {
1817 	struct exynos_drm_ippdrv *ippdrv;
1818 
1819 	DRM_DEBUG_KMS("%s\n", __func__);
1820 
1821 	/* get ipp driver entry */
1822 	list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1823 		if (is_drm_iommu_supported(drm_dev))
1824 			drm_iommu_detach_device(drm_dev, ippdrv->dev);
1825 
1826 		ippdrv->drm_dev = NULL;
1827 		exynos_drm_ippdrv_unregister(ippdrv);
1828 	}
1829 }
1830 
1831 static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
1832 		struct drm_file *file)
1833 {
1834 	struct drm_exynos_file_private *file_priv = file->driver_priv;
1835 	struct exynos_drm_ipp_private *priv;
1836 
1837 	DRM_DEBUG_KMS("%s\n", __func__);
1838 
1839 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1840 	if (!priv) {
1841 		DRM_ERROR("failed to allocate priv.\n");
1842 		return -ENOMEM;
1843 	}
1844 	priv->dev = dev;
1845 	file_priv->ipp_priv = priv;
1846 
1847 	INIT_LIST_HEAD(&priv->event_list);
1848 
1849 	DRM_DEBUG_KMS("%s:done priv[0x%x]\n", __func__, (int)priv);
1850 
1851 	return 0;
1852 }
1853 
1854 static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
1855 		struct drm_file *file)
1856 {
1857 	struct drm_exynos_file_private *file_priv = file->driver_priv;
1858 	struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
1859 	struct exynos_drm_ippdrv *ippdrv = NULL;
1860 	struct drm_exynos_ipp_cmd_node *c_node, *tc_node;
1861 	int count = 0;
1862 
1863 	DRM_DEBUG_KMS("%s:for priv[0x%x]\n", __func__, (int)priv);
1864 
1865 	if (list_empty(&exynos_drm_ippdrv_list)) {
1866 		DRM_DEBUG_KMS("%s:ippdrv_list is empty.\n", __func__);
1867 		goto err_clear;
1868 	}
1869 
1870 	list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1871 		if (list_empty(&ippdrv->cmd_list))
1872 			continue;
1873 
1874 		list_for_each_entry_safe(c_node, tc_node,
1875 			&ippdrv->cmd_list, list) {
1876 			DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]\n",
1877 				__func__, count++, (int)ippdrv);
1878 
1879 			if (c_node->priv == priv) {
1880 				/*
1881 				 * userland goto unnormal state. process killed.
1882 				 * and close the file.
1883 				 * so, IPP didn't called stop cmd ctrl.
1884 				 * so, we are make stop operation in this state.
1885 				 */
1886 				if (c_node->state == IPP_STATE_START) {
1887 					ipp_stop_property(drm_dev, ippdrv,
1888 						c_node);
1889 					c_node->state = IPP_STATE_STOP;
1890 				}
1891 
1892 				ippdrv->dedicated = false;
1893 				ipp_clean_cmd_node(c_node);
1894 				if (list_empty(&ippdrv->cmd_list))
1895 					pm_runtime_put_sync(ippdrv->dev);
1896 			}
1897 		}
1898 	}
1899 
1900 err_clear:
1901 	kfree(priv);
1902 	return;
1903 }
1904 
1905 static int ipp_probe(struct platform_device *pdev)
1906 {
1907 	struct device *dev = &pdev->dev;
1908 	struct ipp_context *ctx;
1909 	struct exynos_drm_subdrv *subdrv;
1910 	int ret;
1911 
1912 	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
1913 	if (!ctx)
1914 		return -ENOMEM;
1915 
1916 	DRM_DEBUG_KMS("%s\n", __func__);
1917 
1918 	mutex_init(&ctx->ipp_lock);
1919 	mutex_init(&ctx->prop_lock);
1920 
1921 	idr_init(&ctx->ipp_idr);
1922 	idr_init(&ctx->prop_idr);
1923 
1924 	/*
1925 	 * create single thread for ipp event
1926 	 * IPP supports event thread for IPP drivers.
1927 	 * IPP driver send event_work to this thread.
1928 	 * and IPP event thread send event to user process.
1929 	 */
1930 	ctx->event_workq = create_singlethread_workqueue("ipp_event");
1931 	if (!ctx->event_workq) {
1932 		dev_err(dev, "failed to create event workqueue\n");
1933 		return -EINVAL;
1934 	}
1935 
1936 	/*
1937 	 * create single thread for ipp command
1938 	 * IPP supports command thread for user process.
1939 	 * user process make command node using set property ioctl.
1940 	 * and make start_work and send this work to command thread.
1941 	 * and then this command thread start property.
1942 	 */
1943 	ctx->cmd_workq = create_singlethread_workqueue("ipp_cmd");
1944 	if (!ctx->cmd_workq) {
1945 		dev_err(dev, "failed to create cmd workqueue\n");
1946 		ret = -EINVAL;
1947 		goto err_event_workq;
1948 	}
1949 
1950 	/* set sub driver informations */
1951 	subdrv = &ctx->subdrv;
1952 	subdrv->dev = dev;
1953 	subdrv->probe = ipp_subdrv_probe;
1954 	subdrv->remove = ipp_subdrv_remove;
1955 	subdrv->open = ipp_subdrv_open;
1956 	subdrv->close = ipp_subdrv_close;
1957 
1958 	platform_set_drvdata(pdev, ctx);
1959 
1960 	ret = exynos_drm_subdrv_register(subdrv);
1961 	if (ret < 0) {
1962 		DRM_ERROR("failed to register drm ipp device.\n");
1963 		goto err_cmd_workq;
1964 	}
1965 
1966 	dev_info(dev, "drm ipp registered successfully.\n");
1967 
1968 	return 0;
1969 
1970 err_cmd_workq:
1971 	destroy_workqueue(ctx->cmd_workq);
1972 err_event_workq:
1973 	destroy_workqueue(ctx->event_workq);
1974 	return ret;
1975 }
1976 
1977 static int ipp_remove(struct platform_device *pdev)
1978 {
1979 	struct ipp_context *ctx = platform_get_drvdata(pdev);
1980 
1981 	DRM_DEBUG_KMS("%s\n", __func__);
1982 
1983 	/* unregister sub driver */
1984 	exynos_drm_subdrv_unregister(&ctx->subdrv);
1985 
1986 	/* remove,destroy ipp idr */
1987 	idr_destroy(&ctx->ipp_idr);
1988 	idr_destroy(&ctx->prop_idr);
1989 
1990 	mutex_destroy(&ctx->ipp_lock);
1991 	mutex_destroy(&ctx->prop_lock);
1992 
1993 	/* destroy command, event work queue */
1994 	destroy_workqueue(ctx->cmd_workq);
1995 	destroy_workqueue(ctx->event_workq);
1996 
1997 	return 0;
1998 }
1999 
2000 static int ipp_power_ctrl(struct ipp_context *ctx, bool enable)
2001 {
2002 	DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
2003 
2004 	return 0;
2005 }
2006 
2007 #ifdef CONFIG_PM_SLEEP
2008 static int ipp_suspend(struct device *dev)
2009 {
2010 	struct ipp_context *ctx = get_ipp_context(dev);
2011 
2012 	DRM_DEBUG_KMS("%s\n", __func__);
2013 
2014 	if (pm_runtime_suspended(dev))
2015 		return 0;
2016 
2017 	return ipp_power_ctrl(ctx, false);
2018 }
2019 
2020 static int ipp_resume(struct device *dev)
2021 {
2022 	struct ipp_context *ctx = get_ipp_context(dev);
2023 
2024 	DRM_DEBUG_KMS("%s\n", __func__);
2025 
2026 	if (!pm_runtime_suspended(dev))
2027 		return ipp_power_ctrl(ctx, true);
2028 
2029 	return 0;
2030 }
2031 #endif
2032 
2033 #ifdef CONFIG_PM_RUNTIME
2034 static int ipp_runtime_suspend(struct device *dev)
2035 {
2036 	struct ipp_context *ctx = get_ipp_context(dev);
2037 
2038 	DRM_DEBUG_KMS("%s\n", __func__);
2039 
2040 	return ipp_power_ctrl(ctx, false);
2041 }
2042 
2043 static int ipp_runtime_resume(struct device *dev)
2044 {
2045 	struct ipp_context *ctx = get_ipp_context(dev);
2046 
2047 	DRM_DEBUG_KMS("%s\n", __func__);
2048 
2049 	return ipp_power_ctrl(ctx, true);
2050 }
2051 #endif
2052 
2053 static const struct dev_pm_ops ipp_pm_ops = {
2054 	SET_SYSTEM_SLEEP_PM_OPS(ipp_suspend, ipp_resume)
2055 	SET_RUNTIME_PM_OPS(ipp_runtime_suspend, ipp_runtime_resume, NULL)
2056 };
2057 
2058 struct platform_driver ipp_driver = {
2059 	.probe		= ipp_probe,
2060 	.remove		= ipp_remove,
2061 	.driver		= {
2062 		.name	= "exynos-drm-ipp",
2063 		.owner	= THIS_MODULE,
2064 		.pm	= &ipp_pm_ops,
2065 	},
2066 };
2067 
2068