1 /*
2  * Copyright (C) 2012 Samsung Electronics Co.Ltd
3  * Authors:
4  *	Eunchul Kim <chulspro.kim@samsung.com>
5  *	Jinyoung Jeon <jy0.jeon@samsung.com>
6  *	Sangmin Lee <lsmin.lee@samsung.com>
7  *
8  * This program is free software; you can redistribute  it and/or modify it
9  * under  the terms of  the GNU General  Public License as published by the
10  * Free Software Foundation;  either version 2 of the  License, or (at your
11  * option) any later version.
12  *
13  */
14 #include <linux/kernel.h>
15 #include <linux/platform_device.h>
16 #include <linux/types.h>
17 #include <linux/clk.h>
18 #include <linux/pm_runtime.h>
19 #include <plat/map-base.h>
20 
21 #include <drm/drmP.h>
22 #include <drm/exynos_drm.h>
23 #include "exynos_drm_drv.h"
24 #include "exynos_drm_gem.h"
25 #include "exynos_drm_ipp.h"
26 #include "exynos_drm_iommu.h"
27 
28 /*
29  * IPP stands for Image Post Processing and
30  * supports image scaler/rotator and input/output DMA operations.
31  * using FIMC, GSC, Rotator, so on.
32  * IPP is integration device driver of same attribute h/w
33  */
34 
35 /*
36  * TODO
37  * 1. expand command control id.
38  * 2. integrate	property and config.
39  * 3. removed send_event id check routine.
40  * 4. compare send_event id if needed.
41  * 5. free subdrv_remove notifier callback list if needed.
42  * 6. need to check subdrv_open about multi-open.
43  * 7. need to power_on implement power and sysmmu ctrl.
44  */
45 
46 #define get_ipp_context(dev)	platform_get_drvdata(to_platform_device(dev))
47 #define ipp_is_m2m_cmd(c)	(c == IPP_CMD_M2M)
48 
49 /* platform device pointer for ipp device. */
50 static struct platform_device *exynos_drm_ipp_pdev;
51 
52 /*
53  * A structure of event.
54  *
55  * @base: base of event.
56  * @event: ipp event.
57  */
58 struct drm_exynos_ipp_send_event {
59 	struct drm_pending_event	base;
60 	struct drm_exynos_ipp_event	event;
61 };
62 
63 /*
64  * A structure of memory node.
65  *
66  * @list: list head to memory queue information.
67  * @ops_id: id of operations.
68  * @prop_id: id of property.
69  * @buf_id: id of buffer.
70  * @buf_info: gem objects and dma address, size.
71  * @filp: a pointer to drm_file.
72  */
73 struct drm_exynos_ipp_mem_node {
74 	struct list_head	list;
75 	enum drm_exynos_ops_id	ops_id;
76 	u32	prop_id;
77 	u32	buf_id;
78 	struct drm_exynos_ipp_buf_info	buf_info;
79 	struct drm_file		*filp;
80 };
81 
82 /*
83  * A structure of ipp context.
84  *
85  * @subdrv: prepare initialization using subdrv.
86  * @ipp_lock: lock for synchronization of access to ipp_idr.
87  * @prop_lock: lock for synchronization of access to prop_idr.
88  * @ipp_idr: ipp driver idr.
89  * @prop_idr: property idr.
90  * @event_workq: event work queue.
91  * @cmd_workq: command work queue.
92  */
93 struct ipp_context {
94 	struct exynos_drm_subdrv	subdrv;
95 	struct mutex	ipp_lock;
96 	struct mutex	prop_lock;
97 	struct idr	ipp_idr;
98 	struct idr	prop_idr;
99 	struct workqueue_struct	*event_workq;
100 	struct workqueue_struct	*cmd_workq;
101 };
102 
103 static LIST_HEAD(exynos_drm_ippdrv_list);
104 static DEFINE_MUTEX(exynos_drm_ippdrv_lock);
105 static BLOCKING_NOTIFIER_HEAD(exynos_drm_ippnb_list);
106 
107 int exynos_platform_device_ipp_register(void)
108 {
109 	struct platform_device *pdev;
110 
111 	if (exynos_drm_ipp_pdev)
112 		return -EEXIST;
113 
114 	pdev = platform_device_register_simple("exynos-drm-ipp", -1, NULL, 0);
115 	if (IS_ERR(pdev))
116 		return PTR_ERR(pdev);
117 
118 	exynos_drm_ipp_pdev = pdev;
119 
120 	return 0;
121 }
122 
123 void exynos_platform_device_ipp_unregister(void)
124 {
125 	if (exynos_drm_ipp_pdev) {
126 		platform_device_unregister(exynos_drm_ipp_pdev);
127 		exynos_drm_ipp_pdev = NULL;
128 	}
129 }
130 
131 int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
132 {
133 	if (!ippdrv)
134 		return -EINVAL;
135 
136 	mutex_lock(&exynos_drm_ippdrv_lock);
137 	list_add_tail(&ippdrv->drv_list, &exynos_drm_ippdrv_list);
138 	mutex_unlock(&exynos_drm_ippdrv_lock);
139 
140 	return 0;
141 }
142 
143 int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
144 {
145 	if (!ippdrv)
146 		return -EINVAL;
147 
148 	mutex_lock(&exynos_drm_ippdrv_lock);
149 	list_del(&ippdrv->drv_list);
150 	mutex_unlock(&exynos_drm_ippdrv_lock);
151 
152 	return 0;
153 }
154 
155 static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj,
156 		u32 *idp)
157 {
158 	int ret;
159 
160 	/* do the allocation under our mutexlock */
161 	mutex_lock(lock);
162 	ret = idr_alloc(id_idr, obj, 1, 0, GFP_KERNEL);
163 	mutex_unlock(lock);
164 	if (ret < 0)
165 		return ret;
166 
167 	*idp = ret;
168 	return 0;
169 }
170 
171 static void *ipp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id)
172 {
173 	void *obj;
174 
175 	DRM_DEBUG_KMS("id[%d]\n", id);
176 
177 	mutex_lock(lock);
178 
179 	/* find object using handle */
180 	obj = idr_find(id_idr, id);
181 	if (!obj) {
182 		DRM_ERROR("failed to find object.\n");
183 		mutex_unlock(lock);
184 		return ERR_PTR(-ENODEV);
185 	}
186 
187 	mutex_unlock(lock);
188 
189 	return obj;
190 }
191 
192 static inline bool ipp_check_dedicated(struct exynos_drm_ippdrv *ippdrv,
193 		enum drm_exynos_ipp_cmd	cmd)
194 {
195 	/*
196 	 * check dedicated flag and WB, OUTPUT operation with
197 	 * power on state.
198 	 */
199 	if (ippdrv->dedicated || (!ipp_is_m2m_cmd(cmd) &&
200 	    !pm_runtime_suspended(ippdrv->dev)))
201 		return true;
202 
203 	return false;
204 }
205 
206 static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx,
207 		struct drm_exynos_ipp_property *property)
208 {
209 	struct exynos_drm_ippdrv *ippdrv;
210 	u32 ipp_id = property->ipp_id;
211 
212 	DRM_DEBUG_KMS("ipp_id[%d]\n", ipp_id);
213 
214 	if (ipp_id) {
215 		/* find ipp driver using idr */
216 		ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
217 			ipp_id);
218 		if (IS_ERR(ippdrv)) {
219 			DRM_ERROR("not found ipp%d driver.\n", ipp_id);
220 			return ippdrv;
221 		}
222 
223 		/*
224 		 * WB, OUTPUT opertion not supported multi-operation.
225 		 * so, make dedicated state at set property ioctl.
226 		 * when ipp driver finished operations, clear dedicated flags.
227 		 */
228 		if (ipp_check_dedicated(ippdrv, property->cmd)) {
229 			DRM_ERROR("already used choose device.\n");
230 			return ERR_PTR(-EBUSY);
231 		}
232 
233 		/*
234 		 * This is necessary to find correct device in ipp drivers.
235 		 * ipp drivers have different abilities,
236 		 * so need to check property.
237 		 */
238 		if (ippdrv->check_property &&
239 		    ippdrv->check_property(ippdrv->dev, property)) {
240 			DRM_ERROR("not support property.\n");
241 			return ERR_PTR(-EINVAL);
242 		}
243 
244 		return ippdrv;
245 	} else {
246 		/*
247 		 * This case is search all ipp driver for finding.
248 		 * user application don't set ipp_id in this case,
249 		 * so ipp subsystem search correct driver in driver list.
250 		 */
251 		list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
252 			if (ipp_check_dedicated(ippdrv, property->cmd)) {
253 				DRM_DEBUG_KMS("used device.\n");
254 				continue;
255 			}
256 
257 			if (ippdrv->check_property &&
258 			    ippdrv->check_property(ippdrv->dev, property)) {
259 				DRM_DEBUG_KMS("not support property.\n");
260 				continue;
261 			}
262 
263 			return ippdrv;
264 		}
265 
266 		DRM_ERROR("not support ipp driver operations.\n");
267 	}
268 
269 	return ERR_PTR(-ENODEV);
270 }
271 
272 static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id)
273 {
274 	struct exynos_drm_ippdrv *ippdrv;
275 	struct drm_exynos_ipp_cmd_node *c_node;
276 	int count = 0;
277 
278 	DRM_DEBUG_KMS("prop_id[%d]\n", prop_id);
279 
280 	if (list_empty(&exynos_drm_ippdrv_list)) {
281 		DRM_DEBUG_KMS("ippdrv_list is empty.\n");
282 		return ERR_PTR(-ENODEV);
283 	}
284 
285 	/*
286 	 * This case is search ipp driver by prop_id handle.
287 	 * sometimes, ipp subsystem find driver by prop_id.
288 	 * e.g PAUSE state, queue buf, command contro.
289 	 */
290 	list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
291 		DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n", count++, (int)ippdrv);
292 
293 		if (!list_empty(&ippdrv->cmd_list)) {
294 			list_for_each_entry(c_node, &ippdrv->cmd_list, list)
295 				if (c_node->property.prop_id == prop_id)
296 					return ippdrv;
297 		}
298 	}
299 
300 	return ERR_PTR(-ENODEV);
301 }
302 
303 int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
304 		struct drm_file *file)
305 {
306 	struct drm_exynos_file_private *file_priv = file->driver_priv;
307 	struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
308 	struct device *dev = priv->dev;
309 	struct ipp_context *ctx = get_ipp_context(dev);
310 	struct drm_exynos_ipp_prop_list *prop_list = data;
311 	struct exynos_drm_ippdrv *ippdrv;
312 	int count = 0;
313 
314 	if (!ctx) {
315 		DRM_ERROR("invalid context.\n");
316 		return -EINVAL;
317 	}
318 
319 	if (!prop_list) {
320 		DRM_ERROR("invalid property parameter.\n");
321 		return -EINVAL;
322 	}
323 
324 	DRM_DEBUG_KMS("ipp_id[%d]\n", prop_list->ipp_id);
325 
326 	if (!prop_list->ipp_id) {
327 		list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list)
328 			count++;
329 		/*
330 		 * Supports ippdrv list count for user application.
331 		 * First step user application getting ippdrv count.
332 		 * and second step getting ippdrv capability using ipp_id.
333 		 */
334 		prop_list->count = count;
335 	} else {
336 		/*
337 		 * Getting ippdrv capability by ipp_id.
338 		 * some deivce not supported wb, output interface.
339 		 * so, user application detect correct ipp driver
340 		 * using this ioctl.
341 		 */
342 		ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
343 						prop_list->ipp_id);
344 		if (IS_ERR(ippdrv)) {
345 			DRM_ERROR("not found ipp%d driver.\n",
346 					prop_list->ipp_id);
347 			return PTR_ERR(ippdrv);
348 		}
349 
350 		prop_list = ippdrv->prop_list;
351 	}
352 
353 	return 0;
354 }
355 
356 static void ipp_print_property(struct drm_exynos_ipp_property *property,
357 		int idx)
358 {
359 	struct drm_exynos_ipp_config *config = &property->config[idx];
360 	struct drm_exynos_pos *pos = &config->pos;
361 	struct drm_exynos_sz *sz = &config->sz;
362 
363 	DRM_DEBUG_KMS("prop_id[%d]ops[%s]fmt[0x%x]\n",
364 		property->prop_id, idx ? "dst" : "src", config->fmt);
365 
366 	DRM_DEBUG_KMS("pos[%d %d %d %d]sz[%d %d]f[%d]r[%d]\n",
367 		pos->x, pos->y, pos->w, pos->h,
368 		sz->hsize, sz->vsize, config->flip, config->degree);
369 }
370 
371 static int ipp_find_and_set_property(struct drm_exynos_ipp_property *property)
372 {
373 	struct exynos_drm_ippdrv *ippdrv;
374 	struct drm_exynos_ipp_cmd_node *c_node;
375 	u32 prop_id = property->prop_id;
376 
377 	DRM_DEBUG_KMS("prop_id[%d]\n", prop_id);
378 
379 	ippdrv = ipp_find_drv_by_handle(prop_id);
380 	if (IS_ERR(ippdrv)) {
381 		DRM_ERROR("failed to get ipp driver.\n");
382 		return -EINVAL;
383 	}
384 
385 	/*
386 	 * Find command node using command list in ippdrv.
387 	 * when we find this command no using prop_id.
388 	 * return property information set in this command node.
389 	 */
390 	list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
391 		if ((c_node->property.prop_id == prop_id) &&
392 		    (c_node->state == IPP_STATE_STOP)) {
393 			DRM_DEBUG_KMS("found cmd[%d]ippdrv[0x%x]\n",
394 				property->cmd, (int)ippdrv);
395 
396 			c_node->property = *property;
397 			return 0;
398 		}
399 	}
400 
401 	DRM_ERROR("failed to search property.\n");
402 
403 	return -EINVAL;
404 }
405 
406 static struct drm_exynos_ipp_cmd_work *ipp_create_cmd_work(void)
407 {
408 	struct drm_exynos_ipp_cmd_work *cmd_work;
409 
410 	cmd_work = kzalloc(sizeof(*cmd_work), GFP_KERNEL);
411 	if (!cmd_work)
412 		return ERR_PTR(-ENOMEM);
413 
414 	INIT_WORK((struct work_struct *)cmd_work, ipp_sched_cmd);
415 
416 	return cmd_work;
417 }
418 
419 static struct drm_exynos_ipp_event_work *ipp_create_event_work(void)
420 {
421 	struct drm_exynos_ipp_event_work *event_work;
422 
423 	event_work = kzalloc(sizeof(*event_work), GFP_KERNEL);
424 	if (!event_work)
425 		return ERR_PTR(-ENOMEM);
426 
427 	INIT_WORK((struct work_struct *)event_work, ipp_sched_event);
428 
429 	return event_work;
430 }
431 
432 int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
433 		struct drm_file *file)
434 {
435 	struct drm_exynos_file_private *file_priv = file->driver_priv;
436 	struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
437 	struct device *dev = priv->dev;
438 	struct ipp_context *ctx = get_ipp_context(dev);
439 	struct drm_exynos_ipp_property *property = data;
440 	struct exynos_drm_ippdrv *ippdrv;
441 	struct drm_exynos_ipp_cmd_node *c_node;
442 	int ret, i;
443 
444 	if (!ctx) {
445 		DRM_ERROR("invalid context.\n");
446 		return -EINVAL;
447 	}
448 
449 	if (!property) {
450 		DRM_ERROR("invalid property parameter.\n");
451 		return -EINVAL;
452 	}
453 
454 	/*
455 	 * This is log print for user application property.
456 	 * user application set various property.
457 	 */
458 	for_each_ipp_ops(i)
459 		ipp_print_property(property, i);
460 
461 	/*
462 	 * set property ioctl generated new prop_id.
463 	 * but in this case already asigned prop_id using old set property.
464 	 * e.g PAUSE state. this case supports find current prop_id and use it
465 	 * instead of allocation.
466 	 */
467 	if (property->prop_id) {
468 		DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
469 		return ipp_find_and_set_property(property);
470 	}
471 
472 	/* find ipp driver using ipp id */
473 	ippdrv = ipp_find_driver(ctx, property);
474 	if (IS_ERR(ippdrv)) {
475 		DRM_ERROR("failed to get ipp driver.\n");
476 		return -EINVAL;
477 	}
478 
479 	/* allocate command node */
480 	c_node = kzalloc(sizeof(*c_node), GFP_KERNEL);
481 	if (!c_node)
482 		return -ENOMEM;
483 
484 	/* create property id */
485 	ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node,
486 		&property->prop_id);
487 	if (ret) {
488 		DRM_ERROR("failed to create id.\n");
489 		goto err_clear;
490 	}
491 
492 	DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[0x%x]\n",
493 		property->prop_id, property->cmd, (int)ippdrv);
494 
495 	/* stored property information and ippdrv in private data */
496 	c_node->priv = priv;
497 	c_node->property = *property;
498 	c_node->state = IPP_STATE_IDLE;
499 
500 	c_node->start_work = ipp_create_cmd_work();
501 	if (IS_ERR(c_node->start_work)) {
502 		DRM_ERROR("failed to create start work.\n");
503 		goto err_clear;
504 	}
505 
506 	c_node->stop_work = ipp_create_cmd_work();
507 	if (IS_ERR(c_node->stop_work)) {
508 		DRM_ERROR("failed to create stop work.\n");
509 		goto err_free_start;
510 	}
511 
512 	c_node->event_work = ipp_create_event_work();
513 	if (IS_ERR(c_node->event_work)) {
514 		DRM_ERROR("failed to create event work.\n");
515 		goto err_free_stop;
516 	}
517 
518 	mutex_init(&c_node->cmd_lock);
519 	mutex_init(&c_node->mem_lock);
520 	mutex_init(&c_node->event_lock);
521 
522 	init_completion(&c_node->start_complete);
523 	init_completion(&c_node->stop_complete);
524 
525 	for_each_ipp_ops(i)
526 		INIT_LIST_HEAD(&c_node->mem_list[i]);
527 
528 	INIT_LIST_HEAD(&c_node->event_list);
529 	list_splice_init(&priv->event_list, &c_node->event_list);
530 	list_add_tail(&c_node->list, &ippdrv->cmd_list);
531 
532 	/* make dedicated state without m2m */
533 	if (!ipp_is_m2m_cmd(property->cmd))
534 		ippdrv->dedicated = true;
535 
536 	return 0;
537 
538 err_free_stop:
539 	kfree(c_node->stop_work);
540 err_free_start:
541 	kfree(c_node->start_work);
542 err_clear:
543 	kfree(c_node);
544 	return ret;
545 }
546 
547 static void ipp_clean_cmd_node(struct drm_exynos_ipp_cmd_node *c_node)
548 {
549 	/* delete list */
550 	list_del(&c_node->list);
551 
552 	/* destroy mutex */
553 	mutex_destroy(&c_node->cmd_lock);
554 	mutex_destroy(&c_node->mem_lock);
555 	mutex_destroy(&c_node->event_lock);
556 
557 	/* free command node */
558 	kfree(c_node->start_work);
559 	kfree(c_node->stop_work);
560 	kfree(c_node->event_work);
561 	kfree(c_node);
562 }
563 
564 static int ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
565 {
566 	struct drm_exynos_ipp_property *property = &c_node->property;
567 	struct drm_exynos_ipp_mem_node *m_node;
568 	struct list_head *head;
569 	int ret, i, count[EXYNOS_DRM_OPS_MAX] = { 0, };
570 
571 	mutex_lock(&c_node->mem_lock);
572 
573 	for_each_ipp_ops(i) {
574 		/* source/destination memory list */
575 		head = &c_node->mem_list[i];
576 
577 		if (list_empty(head)) {
578 			DRM_DEBUG_KMS("%s memory empty.\n", i ? "dst" : "src");
579 			continue;
580 		}
581 
582 		/* find memory node entry */
583 		list_for_each_entry(m_node, head, list) {
584 			DRM_DEBUG_KMS("%s,count[%d]m_node[0x%x]\n",
585 				i ? "dst" : "src", count[i], (int)m_node);
586 			count[i]++;
587 		}
588 	}
589 
590 	DRM_DEBUG_KMS("min[%d]max[%d]\n",
591 		min(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]),
592 		max(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]));
593 
594 	/*
595 	 * M2M operations should be need paired memory address.
596 	 * so, need to check minimum count about src, dst.
597 	 * other case not use paired memory, so use maximum count
598 	 */
599 	if (ipp_is_m2m_cmd(property->cmd))
600 		ret = min(count[EXYNOS_DRM_OPS_SRC],
601 			count[EXYNOS_DRM_OPS_DST]);
602 	else
603 		ret = max(count[EXYNOS_DRM_OPS_SRC],
604 			count[EXYNOS_DRM_OPS_DST]);
605 
606 	mutex_unlock(&c_node->mem_lock);
607 
608 	return ret;
609 }
610 
611 static struct drm_exynos_ipp_mem_node
612 		*ipp_find_mem_node(struct drm_exynos_ipp_cmd_node *c_node,
613 		struct drm_exynos_ipp_queue_buf *qbuf)
614 {
615 	struct drm_exynos_ipp_mem_node *m_node;
616 	struct list_head *head;
617 	int count = 0;
618 
619 	DRM_DEBUG_KMS("buf_id[%d]\n", qbuf->buf_id);
620 
621 	/* source/destination memory list */
622 	head = &c_node->mem_list[qbuf->ops_id];
623 
624 	/* find memory node from memory list */
625 	list_for_each_entry(m_node, head, list) {
626 		DRM_DEBUG_KMS("count[%d]m_node[0x%x]\n", count++, (int)m_node);
627 
628 		/* compare buffer id */
629 		if (m_node->buf_id == qbuf->buf_id)
630 			return m_node;
631 	}
632 
633 	return NULL;
634 }
635 
636 static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
637 		struct drm_exynos_ipp_cmd_node *c_node,
638 		struct drm_exynos_ipp_mem_node *m_node)
639 {
640 	struct exynos_drm_ipp_ops *ops = NULL;
641 	int ret = 0;
642 
643 	DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node);
644 
645 	if (!m_node) {
646 		DRM_ERROR("invalid queue node.\n");
647 		return -EFAULT;
648 	}
649 
650 	mutex_lock(&c_node->mem_lock);
651 
652 	DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id);
653 
654 	/* get operations callback */
655 	ops = ippdrv->ops[m_node->ops_id];
656 	if (!ops) {
657 		DRM_ERROR("not support ops.\n");
658 		ret = -EFAULT;
659 		goto err_unlock;
660 	}
661 
662 	/* set address and enable irq */
663 	if (ops->set_addr) {
664 		ret = ops->set_addr(ippdrv->dev, &m_node->buf_info,
665 			m_node->buf_id, IPP_BUF_ENQUEUE);
666 		if (ret) {
667 			DRM_ERROR("failed to set addr.\n");
668 			goto err_unlock;
669 		}
670 	}
671 
672 err_unlock:
673 	mutex_unlock(&c_node->mem_lock);
674 	return ret;
675 }
676 
677 static struct drm_exynos_ipp_mem_node
678 		*ipp_get_mem_node(struct drm_device *drm_dev,
679 		struct drm_file *file,
680 		struct drm_exynos_ipp_cmd_node *c_node,
681 		struct drm_exynos_ipp_queue_buf *qbuf)
682 {
683 	struct drm_exynos_ipp_mem_node *m_node;
684 	struct drm_exynos_ipp_buf_info buf_info;
685 	void *addr;
686 	int i;
687 
688 	mutex_lock(&c_node->mem_lock);
689 
690 	m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
691 	if (!m_node)
692 		goto err_unlock;
693 
694 	/* clear base address for error handling */
695 	memset(&buf_info, 0x0, sizeof(buf_info));
696 
697 	/* operations, buffer id */
698 	m_node->ops_id = qbuf->ops_id;
699 	m_node->prop_id = qbuf->prop_id;
700 	m_node->buf_id = qbuf->buf_id;
701 
702 	DRM_DEBUG_KMS("m_node[0x%x]ops_id[%d]\n", (int)m_node, qbuf->ops_id);
703 	DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id);
704 
705 	for_each_ipp_planar(i) {
706 		DRM_DEBUG_KMS("i[%d]handle[0x%x]\n", i, qbuf->handle[i]);
707 
708 		/* get dma address by handle */
709 		if (qbuf->handle[i]) {
710 			addr = exynos_drm_gem_get_dma_addr(drm_dev,
711 					qbuf->handle[i], file);
712 			if (IS_ERR(addr)) {
713 				DRM_ERROR("failed to get addr.\n");
714 				goto err_clear;
715 			}
716 
717 			buf_info.handles[i] = qbuf->handle[i];
718 			buf_info.base[i] = *(dma_addr_t *) addr;
719 			DRM_DEBUG_KMS("i[%d]base[0x%x]hd[0x%x]\n",
720 				i, buf_info.base[i], (int)buf_info.handles[i]);
721 		}
722 	}
723 
724 	m_node->filp = file;
725 	m_node->buf_info = buf_info;
726 	list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
727 
728 	mutex_unlock(&c_node->mem_lock);
729 	return m_node;
730 
731 err_clear:
732 	kfree(m_node);
733 err_unlock:
734 	mutex_unlock(&c_node->mem_lock);
735 	return ERR_PTR(-EFAULT);
736 }
737 
738 static int ipp_put_mem_node(struct drm_device *drm_dev,
739 		struct drm_exynos_ipp_cmd_node *c_node,
740 		struct drm_exynos_ipp_mem_node *m_node)
741 {
742 	int i;
743 
744 	DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node);
745 
746 	if (!m_node) {
747 		DRM_ERROR("invalid dequeue node.\n");
748 		return -EFAULT;
749 	}
750 
751 	if (list_empty(&m_node->list)) {
752 		DRM_ERROR("empty memory node.\n");
753 		return -ENOMEM;
754 	}
755 
756 	mutex_lock(&c_node->mem_lock);
757 
758 	DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id);
759 
760 	/* put gem buffer */
761 	for_each_ipp_planar(i) {
762 		unsigned long handle = m_node->buf_info.handles[i];
763 		if (handle)
764 			exynos_drm_gem_put_dma_addr(drm_dev, handle,
765 							m_node->filp);
766 	}
767 
768 	/* delete list in queue */
769 	list_del(&m_node->list);
770 	kfree(m_node);
771 
772 	mutex_unlock(&c_node->mem_lock);
773 
774 	return 0;
775 }
776 
777 static void ipp_free_event(struct drm_pending_event *event)
778 {
779 	kfree(event);
780 }
781 
782 static int ipp_get_event(struct drm_device *drm_dev,
783 		struct drm_file *file,
784 		struct drm_exynos_ipp_cmd_node *c_node,
785 		struct drm_exynos_ipp_queue_buf *qbuf)
786 {
787 	struct drm_exynos_ipp_send_event *e;
788 	unsigned long flags;
789 
790 	DRM_DEBUG_KMS("ops_id[%d]buf_id[%d]\n", qbuf->ops_id, qbuf->buf_id);
791 
792 	e = kzalloc(sizeof(*e), GFP_KERNEL);
793 	if (!e) {
794 		spin_lock_irqsave(&drm_dev->event_lock, flags);
795 		file->event_space += sizeof(e->event);
796 		spin_unlock_irqrestore(&drm_dev->event_lock, flags);
797 		return -ENOMEM;
798 	}
799 
800 	/* make event */
801 	e->event.base.type = DRM_EXYNOS_IPP_EVENT;
802 	e->event.base.length = sizeof(e->event);
803 	e->event.user_data = qbuf->user_data;
804 	e->event.prop_id = qbuf->prop_id;
805 	e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id;
806 	e->base.event = &e->event.base;
807 	e->base.file_priv = file;
808 	e->base.destroy = ipp_free_event;
809 	list_add_tail(&e->base.link, &c_node->event_list);
810 
811 	return 0;
812 }
813 
814 static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
815 		struct drm_exynos_ipp_queue_buf *qbuf)
816 {
817 	struct drm_exynos_ipp_send_event *e, *te;
818 	int count = 0;
819 
820 	if (list_empty(&c_node->event_list)) {
821 		DRM_DEBUG_KMS("event_list is empty.\n");
822 		return;
823 	}
824 
825 	list_for_each_entry_safe(e, te, &c_node->event_list, base.link) {
826 		DRM_DEBUG_KMS("count[%d]e[0x%x]\n", count++, (int)e);
827 
828 		/*
829 		 * quf == NULL condition means all event deletion.
830 		 * stop operations want to delete all event list.
831 		 * another case delete only same buf id.
832 		 */
833 		if (!qbuf) {
834 			/* delete list */
835 			list_del(&e->base.link);
836 			kfree(e);
837 		}
838 
839 		/* compare buffer id */
840 		if (qbuf && (qbuf->buf_id ==
841 		    e->event.buf_id[EXYNOS_DRM_OPS_DST])) {
842 			/* delete list */
843 			list_del(&e->base.link);
844 			kfree(e);
845 			return;
846 		}
847 	}
848 }
849 
850 static void ipp_handle_cmd_work(struct device *dev,
851 		struct exynos_drm_ippdrv *ippdrv,
852 		struct drm_exynos_ipp_cmd_work *cmd_work,
853 		struct drm_exynos_ipp_cmd_node *c_node)
854 {
855 	struct ipp_context *ctx = get_ipp_context(dev);
856 
857 	cmd_work->ippdrv = ippdrv;
858 	cmd_work->c_node = c_node;
859 	queue_work(ctx->cmd_workq, (struct work_struct *)cmd_work);
860 }
861 
862 static int ipp_queue_buf_with_run(struct device *dev,
863 		struct drm_exynos_ipp_cmd_node *c_node,
864 		struct drm_exynos_ipp_mem_node *m_node,
865 		struct drm_exynos_ipp_queue_buf *qbuf)
866 {
867 	struct exynos_drm_ippdrv *ippdrv;
868 	struct drm_exynos_ipp_property *property;
869 	struct exynos_drm_ipp_ops *ops;
870 	int ret;
871 
872 	ippdrv = ipp_find_drv_by_handle(qbuf->prop_id);
873 	if (IS_ERR(ippdrv)) {
874 		DRM_ERROR("failed to get ipp driver.\n");
875 		return -EFAULT;
876 	}
877 
878 	ops = ippdrv->ops[qbuf->ops_id];
879 	if (!ops) {
880 		DRM_ERROR("failed to get ops.\n");
881 		return -EFAULT;
882 	}
883 
884 	property = &c_node->property;
885 
886 	if (c_node->state != IPP_STATE_START) {
887 		DRM_DEBUG_KMS("bypass for invalid state.\n");
888 		return 0;
889 	}
890 
891 	if (!ipp_check_mem_list(c_node)) {
892 		DRM_DEBUG_KMS("empty memory.\n");
893 		return 0;
894 	}
895 
896 	/*
897 	 * If set destination buffer and enabled clock,
898 	 * then m2m operations need start operations at queue_buf
899 	 */
900 	if (ipp_is_m2m_cmd(property->cmd)) {
901 		struct drm_exynos_ipp_cmd_work *cmd_work = c_node->start_work;
902 
903 		cmd_work->ctrl = IPP_CTRL_PLAY;
904 		ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
905 	} else {
906 		ret = ipp_set_mem_node(ippdrv, c_node, m_node);
907 		if (ret) {
908 			DRM_ERROR("failed to set m node.\n");
909 			return ret;
910 		}
911 	}
912 
913 	return 0;
914 }
915 
916 static void ipp_clean_queue_buf(struct drm_device *drm_dev,
917 		struct drm_exynos_ipp_cmd_node *c_node,
918 		struct drm_exynos_ipp_queue_buf *qbuf)
919 {
920 	struct drm_exynos_ipp_mem_node *m_node, *tm_node;
921 
922 	if (!list_empty(&c_node->mem_list[qbuf->ops_id])) {
923 		/* delete list */
924 		list_for_each_entry_safe(m_node, tm_node,
925 			&c_node->mem_list[qbuf->ops_id], list) {
926 			if (m_node->buf_id == qbuf->buf_id &&
927 			    m_node->ops_id == qbuf->ops_id)
928 				ipp_put_mem_node(drm_dev, c_node, m_node);
929 		}
930 	}
931 }
932 
933 int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
934 		struct drm_file *file)
935 {
936 	struct drm_exynos_file_private *file_priv = file->driver_priv;
937 	struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
938 	struct device *dev = priv->dev;
939 	struct ipp_context *ctx = get_ipp_context(dev);
940 	struct drm_exynos_ipp_queue_buf *qbuf = data;
941 	struct drm_exynos_ipp_cmd_node *c_node;
942 	struct drm_exynos_ipp_mem_node *m_node;
943 	int ret;
944 
945 	if (!qbuf) {
946 		DRM_ERROR("invalid buf parameter.\n");
947 		return -EINVAL;
948 	}
949 
950 	if (qbuf->ops_id >= EXYNOS_DRM_OPS_MAX) {
951 		DRM_ERROR("invalid ops parameter.\n");
952 		return -EINVAL;
953 	}
954 
955 	DRM_DEBUG_KMS("prop_id[%d]ops_id[%s]buf_id[%d]buf_type[%d]\n",
956 		qbuf->prop_id, qbuf->ops_id ? "dst" : "src",
957 		qbuf->buf_id, qbuf->buf_type);
958 
959 	/* find command node */
960 	c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
961 		qbuf->prop_id);
962 	if (IS_ERR(c_node)) {
963 		DRM_ERROR("failed to get command node.\n");
964 		return PTR_ERR(c_node);
965 	}
966 
967 	/* buffer control */
968 	switch (qbuf->buf_type) {
969 	case IPP_BUF_ENQUEUE:
970 		/* get memory node */
971 		m_node = ipp_get_mem_node(drm_dev, file, c_node, qbuf);
972 		if (IS_ERR(m_node)) {
973 			DRM_ERROR("failed to get m_node.\n");
974 			return PTR_ERR(m_node);
975 		}
976 
977 		/*
978 		 * first step get event for destination buffer.
979 		 * and second step when M2M case run with destination buffer
980 		 * if needed.
981 		 */
982 		if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) {
983 			/* get event for destination buffer */
984 			ret = ipp_get_event(drm_dev, file, c_node, qbuf);
985 			if (ret) {
986 				DRM_ERROR("failed to get event.\n");
987 				goto err_clean_node;
988 			}
989 
990 			/*
991 			 * M2M case run play control for streaming feature.
992 			 * other case set address and waiting.
993 			 */
994 			ret = ipp_queue_buf_with_run(dev, c_node, m_node, qbuf);
995 			if (ret) {
996 				DRM_ERROR("failed to run command.\n");
997 				goto err_clean_node;
998 			}
999 		}
1000 		break;
1001 	case IPP_BUF_DEQUEUE:
1002 		mutex_lock(&c_node->cmd_lock);
1003 
1004 		/* put event for destination buffer */
1005 		if (qbuf->ops_id == EXYNOS_DRM_OPS_DST)
1006 			ipp_put_event(c_node, qbuf);
1007 
1008 		ipp_clean_queue_buf(drm_dev, c_node, qbuf);
1009 
1010 		mutex_unlock(&c_node->cmd_lock);
1011 		break;
1012 	default:
1013 		DRM_ERROR("invalid buffer control.\n");
1014 		return -EINVAL;
1015 	}
1016 
1017 	return 0;
1018 
1019 err_clean_node:
1020 	DRM_ERROR("clean memory nodes.\n");
1021 
1022 	ipp_clean_queue_buf(drm_dev, c_node, qbuf);
1023 	return ret;
1024 }
1025 
1026 static bool exynos_drm_ipp_check_valid(struct device *dev,
1027 		enum drm_exynos_ipp_ctrl ctrl, enum drm_exynos_ipp_state state)
1028 {
1029 	if (ctrl != IPP_CTRL_PLAY) {
1030 		if (pm_runtime_suspended(dev)) {
1031 			DRM_ERROR("pm:runtime_suspended.\n");
1032 			goto err_status;
1033 		}
1034 	}
1035 
1036 	switch (ctrl) {
1037 	case IPP_CTRL_PLAY:
1038 		if (state != IPP_STATE_IDLE)
1039 			goto err_status;
1040 		break;
1041 	case IPP_CTRL_STOP:
1042 		if (state == IPP_STATE_STOP)
1043 			goto err_status;
1044 		break;
1045 	case IPP_CTRL_PAUSE:
1046 		if (state != IPP_STATE_START)
1047 			goto err_status;
1048 		break;
1049 	case IPP_CTRL_RESUME:
1050 		if (state != IPP_STATE_STOP)
1051 			goto err_status;
1052 		break;
1053 	default:
1054 		DRM_ERROR("invalid state.\n");
1055 		goto err_status;
1056 	}
1057 
1058 	return true;
1059 
1060 err_status:
1061 	DRM_ERROR("invalid status:ctrl[%d]state[%d]\n", ctrl, state);
1062 	return false;
1063 }
1064 
1065 int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
1066 		struct drm_file *file)
1067 {
1068 	struct drm_exynos_file_private *file_priv = file->driver_priv;
1069 	struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
1070 	struct exynos_drm_ippdrv *ippdrv = NULL;
1071 	struct device *dev = priv->dev;
1072 	struct ipp_context *ctx = get_ipp_context(dev);
1073 	struct drm_exynos_ipp_cmd_ctrl *cmd_ctrl = data;
1074 	struct drm_exynos_ipp_cmd_work *cmd_work;
1075 	struct drm_exynos_ipp_cmd_node *c_node;
1076 
1077 	if (!ctx) {
1078 		DRM_ERROR("invalid context.\n");
1079 		return -EINVAL;
1080 	}
1081 
1082 	if (!cmd_ctrl) {
1083 		DRM_ERROR("invalid control parameter.\n");
1084 		return -EINVAL;
1085 	}
1086 
1087 	DRM_DEBUG_KMS("ctrl[%d]prop_id[%d]\n",
1088 		cmd_ctrl->ctrl, cmd_ctrl->prop_id);
1089 
1090 	ippdrv = ipp_find_drv_by_handle(cmd_ctrl->prop_id);
1091 	if (IS_ERR(ippdrv)) {
1092 		DRM_ERROR("failed to get ipp driver.\n");
1093 		return PTR_ERR(ippdrv);
1094 	}
1095 
1096 	c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
1097 		cmd_ctrl->prop_id);
1098 	if (IS_ERR(c_node)) {
1099 		DRM_ERROR("invalid command node list.\n");
1100 		return PTR_ERR(c_node);
1101 	}
1102 
1103 	if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl,
1104 	    c_node->state)) {
1105 		DRM_ERROR("invalid state.\n");
1106 		return -EINVAL;
1107 	}
1108 
1109 	switch (cmd_ctrl->ctrl) {
1110 	case IPP_CTRL_PLAY:
1111 		if (pm_runtime_suspended(ippdrv->dev))
1112 			pm_runtime_get_sync(ippdrv->dev);
1113 		c_node->state = IPP_STATE_START;
1114 
1115 		cmd_work = c_node->start_work;
1116 		cmd_work->ctrl = cmd_ctrl->ctrl;
1117 		ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1118 		c_node->state = IPP_STATE_START;
1119 		break;
1120 	case IPP_CTRL_STOP:
1121 		cmd_work = c_node->stop_work;
1122 		cmd_work->ctrl = cmd_ctrl->ctrl;
1123 		ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1124 
1125 		if (!wait_for_completion_timeout(&c_node->stop_complete,
1126 		    msecs_to_jiffies(300))) {
1127 			DRM_ERROR("timeout stop:prop_id[%d]\n",
1128 				c_node->property.prop_id);
1129 		}
1130 
1131 		c_node->state = IPP_STATE_STOP;
1132 		ippdrv->dedicated = false;
1133 		ipp_clean_cmd_node(c_node);
1134 
1135 		if (list_empty(&ippdrv->cmd_list))
1136 			pm_runtime_put_sync(ippdrv->dev);
1137 		break;
1138 	case IPP_CTRL_PAUSE:
1139 		cmd_work = c_node->stop_work;
1140 		cmd_work->ctrl = cmd_ctrl->ctrl;
1141 		ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1142 
1143 		if (!wait_for_completion_timeout(&c_node->stop_complete,
1144 		    msecs_to_jiffies(200))) {
1145 			DRM_ERROR("timeout stop:prop_id[%d]\n",
1146 				c_node->property.prop_id);
1147 		}
1148 
1149 		c_node->state = IPP_STATE_STOP;
1150 		break;
1151 	case IPP_CTRL_RESUME:
1152 		c_node->state = IPP_STATE_START;
1153 		cmd_work = c_node->start_work;
1154 		cmd_work->ctrl = cmd_ctrl->ctrl;
1155 		ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1156 		break;
1157 	default:
1158 		DRM_ERROR("could not support this state currently.\n");
1159 		return -EINVAL;
1160 	}
1161 
1162 	DRM_DEBUG_KMS("done ctrl[%d]prop_id[%d]\n",
1163 		cmd_ctrl->ctrl, cmd_ctrl->prop_id);
1164 
1165 	return 0;
1166 }
1167 
1168 int exynos_drm_ippnb_register(struct notifier_block *nb)
1169 {
1170 	return blocking_notifier_chain_register(
1171 		&exynos_drm_ippnb_list, nb);
1172 }
1173 
1174 int exynos_drm_ippnb_unregister(struct notifier_block *nb)
1175 {
1176 	return blocking_notifier_chain_unregister(
1177 		&exynos_drm_ippnb_list, nb);
1178 }
1179 
1180 int exynos_drm_ippnb_send_event(unsigned long val, void *v)
1181 {
1182 	return blocking_notifier_call_chain(
1183 		&exynos_drm_ippnb_list, val, v);
1184 }
1185 
1186 static int ipp_set_property(struct exynos_drm_ippdrv *ippdrv,
1187 		struct drm_exynos_ipp_property *property)
1188 {
1189 	struct exynos_drm_ipp_ops *ops = NULL;
1190 	bool swap = false;
1191 	int ret, i;
1192 
1193 	if (!property) {
1194 		DRM_ERROR("invalid property parameter.\n");
1195 		return -EINVAL;
1196 	}
1197 
1198 	DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
1199 
1200 	/* reset h/w block */
1201 	if (ippdrv->reset &&
1202 	    ippdrv->reset(ippdrv->dev)) {
1203 		DRM_ERROR("failed to reset.\n");
1204 		return -EINVAL;
1205 	}
1206 
1207 	/* set source,destination operations */
1208 	for_each_ipp_ops(i) {
1209 		struct drm_exynos_ipp_config *config =
1210 			&property->config[i];
1211 
1212 		ops = ippdrv->ops[i];
1213 		if (!ops || !config) {
1214 			DRM_ERROR("not support ops and config.\n");
1215 			return -EINVAL;
1216 		}
1217 
1218 		/* set format */
1219 		if (ops->set_fmt) {
1220 			ret = ops->set_fmt(ippdrv->dev, config->fmt);
1221 			if (ret) {
1222 				DRM_ERROR("not support format.\n");
1223 				return ret;
1224 			}
1225 		}
1226 
1227 		/* set transform for rotation, flip */
1228 		if (ops->set_transf) {
1229 			ret = ops->set_transf(ippdrv->dev, config->degree,
1230 				config->flip, &swap);
1231 			if (ret) {
1232 				DRM_ERROR("not support tranf.\n");
1233 				return -EINVAL;
1234 			}
1235 		}
1236 
1237 		/* set size */
1238 		if (ops->set_size) {
1239 			ret = ops->set_size(ippdrv->dev, swap, &config->pos,
1240 				&config->sz);
1241 			if (ret) {
1242 				DRM_ERROR("not support size.\n");
1243 				return ret;
1244 			}
1245 		}
1246 	}
1247 
1248 	return 0;
1249 }
1250 
1251 static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
1252 		struct drm_exynos_ipp_cmd_node *c_node)
1253 {
1254 	struct drm_exynos_ipp_mem_node *m_node;
1255 	struct drm_exynos_ipp_property *property = &c_node->property;
1256 	struct list_head *head;
1257 	int ret, i;
1258 
1259 	DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
1260 
1261 	/* store command info in ippdrv */
1262 	ippdrv->c_node = c_node;
1263 
1264 	if (!ipp_check_mem_list(c_node)) {
1265 		DRM_DEBUG_KMS("empty memory.\n");
1266 		return -ENOMEM;
1267 	}
1268 
1269 	/* set current property in ippdrv */
1270 	ret = ipp_set_property(ippdrv, property);
1271 	if (ret) {
1272 		DRM_ERROR("failed to set property.\n");
1273 		ippdrv->c_node = NULL;
1274 		return ret;
1275 	}
1276 
1277 	/* check command */
1278 	switch (property->cmd) {
1279 	case IPP_CMD_M2M:
1280 		for_each_ipp_ops(i) {
1281 			/* source/destination memory list */
1282 			head = &c_node->mem_list[i];
1283 
1284 			m_node = list_first_entry(head,
1285 				struct drm_exynos_ipp_mem_node, list);
1286 			if (!m_node) {
1287 				DRM_ERROR("failed to get node.\n");
1288 				ret = -EFAULT;
1289 				return ret;
1290 			}
1291 
1292 			DRM_DEBUG_KMS("m_node[0x%x]\n", (int)m_node);
1293 
1294 			ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1295 			if (ret) {
1296 				DRM_ERROR("failed to set m node.\n");
1297 				return ret;
1298 			}
1299 		}
1300 		break;
1301 	case IPP_CMD_WB:
1302 		/* destination memory list */
1303 		head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
1304 
1305 		list_for_each_entry(m_node, head, list) {
1306 			ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1307 			if (ret) {
1308 				DRM_ERROR("failed to set m node.\n");
1309 				return ret;
1310 			}
1311 		}
1312 		break;
1313 	case IPP_CMD_OUTPUT:
1314 		/* source memory list */
1315 		head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1316 
1317 		list_for_each_entry(m_node, head, list) {
1318 			ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1319 			if (ret) {
1320 				DRM_ERROR("failed to set m node.\n");
1321 				return ret;
1322 			}
1323 		}
1324 		break;
1325 	default:
1326 		DRM_ERROR("invalid operations.\n");
1327 		return -EINVAL;
1328 	}
1329 
1330 	DRM_DEBUG_KMS("cmd[%d]\n", property->cmd);
1331 
1332 	/* start operations */
1333 	if (ippdrv->start) {
1334 		ret = ippdrv->start(ippdrv->dev, property->cmd);
1335 		if (ret) {
1336 			DRM_ERROR("failed to start ops.\n");
1337 			return ret;
1338 		}
1339 	}
1340 
1341 	return 0;
1342 }
1343 
1344 static int ipp_stop_property(struct drm_device *drm_dev,
1345 		struct exynos_drm_ippdrv *ippdrv,
1346 		struct drm_exynos_ipp_cmd_node *c_node)
1347 {
1348 	struct drm_exynos_ipp_mem_node *m_node, *tm_node;
1349 	struct drm_exynos_ipp_property *property = &c_node->property;
1350 	struct list_head *head;
1351 	int ret = 0, i;
1352 
1353 	DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
1354 
1355 	/* put event */
1356 	ipp_put_event(c_node, NULL);
1357 
1358 	/* check command */
1359 	switch (property->cmd) {
1360 	case IPP_CMD_M2M:
1361 		for_each_ipp_ops(i) {
1362 			/* source/destination memory list */
1363 			head = &c_node->mem_list[i];
1364 
1365 			if (list_empty(head)) {
1366 				DRM_DEBUG_KMS("mem_list is empty.\n");
1367 				break;
1368 			}
1369 
1370 			list_for_each_entry_safe(m_node, tm_node,
1371 				head, list) {
1372 				ret = ipp_put_mem_node(drm_dev, c_node,
1373 					m_node);
1374 				if (ret) {
1375 					DRM_ERROR("failed to put m_node.\n");
1376 					goto err_clear;
1377 				}
1378 			}
1379 		}
1380 		break;
1381 	case IPP_CMD_WB:
1382 		/* destination memory list */
1383 		head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
1384 
1385 		if (list_empty(head)) {
1386 			DRM_DEBUG_KMS("mem_list is empty.\n");
1387 			break;
1388 		}
1389 
1390 		list_for_each_entry_safe(m_node, tm_node, head, list) {
1391 			ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1392 			if (ret) {
1393 				DRM_ERROR("failed to put m_node.\n");
1394 				goto err_clear;
1395 			}
1396 		}
1397 		break;
1398 	case IPP_CMD_OUTPUT:
1399 		/* source memory list */
1400 		head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1401 
1402 		if (list_empty(head)) {
1403 			DRM_DEBUG_KMS("mem_list is empty.\n");
1404 			break;
1405 		}
1406 
1407 		list_for_each_entry_safe(m_node, tm_node, head, list) {
1408 			ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1409 			if (ret) {
1410 				DRM_ERROR("failed to put m_node.\n");
1411 				goto err_clear;
1412 			}
1413 		}
1414 		break;
1415 	default:
1416 		DRM_ERROR("invalid operations.\n");
1417 		ret = -EINVAL;
1418 		goto err_clear;
1419 	}
1420 
1421 err_clear:
1422 	/* stop operations */
1423 	if (ippdrv->stop)
1424 		ippdrv->stop(ippdrv->dev, property->cmd);
1425 
1426 	return ret;
1427 }
1428 
1429 void ipp_sched_cmd(struct work_struct *work)
1430 {
1431 	struct drm_exynos_ipp_cmd_work *cmd_work =
1432 		(struct drm_exynos_ipp_cmd_work *)work;
1433 	struct exynos_drm_ippdrv *ippdrv;
1434 	struct drm_exynos_ipp_cmd_node *c_node;
1435 	struct drm_exynos_ipp_property *property;
1436 	int ret;
1437 
1438 	ippdrv = cmd_work->ippdrv;
1439 	if (!ippdrv) {
1440 		DRM_ERROR("invalid ippdrv list.\n");
1441 		return;
1442 	}
1443 
1444 	c_node = cmd_work->c_node;
1445 	if (!c_node) {
1446 		DRM_ERROR("invalid command node list.\n");
1447 		return;
1448 	}
1449 
1450 	mutex_lock(&c_node->cmd_lock);
1451 
1452 	property = &c_node->property;
1453 
1454 	switch (cmd_work->ctrl) {
1455 	case IPP_CTRL_PLAY:
1456 	case IPP_CTRL_RESUME:
1457 		ret = ipp_start_property(ippdrv, c_node);
1458 		if (ret) {
1459 			DRM_ERROR("failed to start property:prop_id[%d]\n",
1460 				c_node->property.prop_id);
1461 			goto err_unlock;
1462 		}
1463 
1464 		/*
1465 		 * M2M case supports wait_completion of transfer.
1466 		 * because M2M case supports single unit operation
1467 		 * with multiple queue.
1468 		 * M2M need to wait completion of data transfer.
1469 		 */
1470 		if (ipp_is_m2m_cmd(property->cmd)) {
1471 			if (!wait_for_completion_timeout
1472 			    (&c_node->start_complete, msecs_to_jiffies(200))) {
1473 				DRM_ERROR("timeout event:prop_id[%d]\n",
1474 					c_node->property.prop_id);
1475 				goto err_unlock;
1476 			}
1477 		}
1478 		break;
1479 	case IPP_CTRL_STOP:
1480 	case IPP_CTRL_PAUSE:
1481 		ret = ipp_stop_property(ippdrv->drm_dev, ippdrv,
1482 			c_node);
1483 		if (ret) {
1484 			DRM_ERROR("failed to stop property.\n");
1485 			goto err_unlock;
1486 		}
1487 
1488 		complete(&c_node->stop_complete);
1489 		break;
1490 	default:
1491 		DRM_ERROR("unknown control type\n");
1492 		break;
1493 	}
1494 
1495 	DRM_DEBUG_KMS("ctrl[%d] done.\n", cmd_work->ctrl);
1496 
1497 err_unlock:
1498 	mutex_unlock(&c_node->cmd_lock);
1499 }
1500 
1501 static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
1502 		struct drm_exynos_ipp_cmd_node *c_node, int *buf_id)
1503 {
1504 	struct drm_device *drm_dev = ippdrv->drm_dev;
1505 	struct drm_exynos_ipp_property *property = &c_node->property;
1506 	struct drm_exynos_ipp_mem_node *m_node;
1507 	struct drm_exynos_ipp_queue_buf qbuf;
1508 	struct drm_exynos_ipp_send_event *e;
1509 	struct list_head *head;
1510 	struct timeval now;
1511 	unsigned long flags;
1512 	u32 tbuf_id[EXYNOS_DRM_OPS_MAX] = {0, };
1513 	int ret, i;
1514 
1515 	for_each_ipp_ops(i)
1516 		DRM_DEBUG_KMS("%s buf_id[%d]\n", i ? "dst" : "src", buf_id[i]);
1517 
1518 	if (!drm_dev) {
1519 		DRM_ERROR("failed to get drm_dev.\n");
1520 		return -EINVAL;
1521 	}
1522 
1523 	if (!property) {
1524 		DRM_ERROR("failed to get property.\n");
1525 		return -EINVAL;
1526 	}
1527 
1528 	if (list_empty(&c_node->event_list)) {
1529 		DRM_DEBUG_KMS("event list is empty.\n");
1530 		return 0;
1531 	}
1532 
1533 	if (!ipp_check_mem_list(c_node)) {
1534 		DRM_DEBUG_KMS("empty memory.\n");
1535 		return 0;
1536 	}
1537 
1538 	/* check command */
1539 	switch (property->cmd) {
1540 	case IPP_CMD_M2M:
1541 		for_each_ipp_ops(i) {
1542 			/* source/destination memory list */
1543 			head = &c_node->mem_list[i];
1544 
1545 			m_node = list_first_entry(head,
1546 				struct drm_exynos_ipp_mem_node, list);
1547 			if (!m_node) {
1548 				DRM_ERROR("empty memory node.\n");
1549 				return -ENOMEM;
1550 			}
1551 
1552 			tbuf_id[i] = m_node->buf_id;
1553 			DRM_DEBUG_KMS("%s buf_id[%d]\n",
1554 				i ? "dst" : "src", tbuf_id[i]);
1555 
1556 			ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1557 			if (ret)
1558 				DRM_ERROR("failed to put m_node.\n");
1559 		}
1560 		break;
1561 	case IPP_CMD_WB:
1562 		/* clear buf for finding */
1563 		memset(&qbuf, 0x0, sizeof(qbuf));
1564 		qbuf.ops_id = EXYNOS_DRM_OPS_DST;
1565 		qbuf.buf_id = buf_id[EXYNOS_DRM_OPS_DST];
1566 
1567 		/* get memory node entry */
1568 		m_node = ipp_find_mem_node(c_node, &qbuf);
1569 		if (!m_node) {
1570 			DRM_ERROR("empty memory node.\n");
1571 			return -ENOMEM;
1572 		}
1573 
1574 		tbuf_id[EXYNOS_DRM_OPS_DST] = m_node->buf_id;
1575 
1576 		ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1577 		if (ret)
1578 			DRM_ERROR("failed to put m_node.\n");
1579 		break;
1580 	case IPP_CMD_OUTPUT:
1581 		/* source memory list */
1582 		head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1583 
1584 		m_node = list_first_entry(head,
1585 			struct drm_exynos_ipp_mem_node, list);
1586 		if (!m_node) {
1587 			DRM_ERROR("empty memory node.\n");
1588 			return -ENOMEM;
1589 		}
1590 
1591 		tbuf_id[EXYNOS_DRM_OPS_SRC] = m_node->buf_id;
1592 
1593 		ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1594 		if (ret)
1595 			DRM_ERROR("failed to put m_node.\n");
1596 		break;
1597 	default:
1598 		DRM_ERROR("invalid operations.\n");
1599 		return -EINVAL;
1600 	}
1601 
1602 	if (tbuf_id[EXYNOS_DRM_OPS_DST] != buf_id[EXYNOS_DRM_OPS_DST])
1603 		DRM_ERROR("failed to match buf_id[%d %d]prop_id[%d]\n",
1604 			tbuf_id[1], buf_id[1], property->prop_id);
1605 
1606 	/*
1607 	 * command node have event list of destination buffer
1608 	 * If destination buffer enqueue to mem list,
1609 	 * then we make event and link to event list tail.
1610 	 * so, we get first event for first enqueued buffer.
1611 	 */
1612 	e = list_first_entry(&c_node->event_list,
1613 		struct drm_exynos_ipp_send_event, base.link);
1614 
1615 	if (!e) {
1616 		DRM_ERROR("empty event.\n");
1617 		return -EINVAL;
1618 	}
1619 
1620 	do_gettimeofday(&now);
1621 	DRM_DEBUG_KMS("tv_sec[%ld]tv_usec[%ld]\n", now.tv_sec, now.tv_usec);
1622 	e->event.tv_sec = now.tv_sec;
1623 	e->event.tv_usec = now.tv_usec;
1624 	e->event.prop_id = property->prop_id;
1625 
1626 	/* set buffer id about source destination */
1627 	for_each_ipp_ops(i)
1628 		e->event.buf_id[i] = tbuf_id[i];
1629 
1630 	spin_lock_irqsave(&drm_dev->event_lock, flags);
1631 	list_move_tail(&e->base.link, &e->base.file_priv->event_list);
1632 	wake_up_interruptible(&e->base.file_priv->event_wait);
1633 	spin_unlock_irqrestore(&drm_dev->event_lock, flags);
1634 
1635 	DRM_DEBUG_KMS("done cmd[%d]prop_id[%d]buf_id[%d]\n",
1636 		property->cmd, property->prop_id, tbuf_id[EXYNOS_DRM_OPS_DST]);
1637 
1638 	return 0;
1639 }
1640 
1641 void ipp_sched_event(struct work_struct *work)
1642 {
1643 	struct drm_exynos_ipp_event_work *event_work =
1644 		(struct drm_exynos_ipp_event_work *)work;
1645 	struct exynos_drm_ippdrv *ippdrv;
1646 	struct drm_exynos_ipp_cmd_node *c_node;
1647 	int ret;
1648 
1649 	if (!event_work) {
1650 		DRM_ERROR("failed to get event_work.\n");
1651 		return;
1652 	}
1653 
1654 	DRM_DEBUG_KMS("buf_id[%d]\n", event_work->buf_id[EXYNOS_DRM_OPS_DST]);
1655 
1656 	ippdrv = event_work->ippdrv;
1657 	if (!ippdrv) {
1658 		DRM_ERROR("failed to get ipp driver.\n");
1659 		return;
1660 	}
1661 
1662 	c_node = ippdrv->c_node;
1663 	if (!c_node) {
1664 		DRM_ERROR("failed to get command node.\n");
1665 		return;
1666 	}
1667 
1668 	/*
1669 	 * IPP supports command thread, event thread synchronization.
1670 	 * If IPP close immediately from user land, then IPP make
1671 	 * synchronization with command thread, so make complete event.
1672 	 * or going out operations.
1673 	 */
1674 	if (c_node->state != IPP_STATE_START) {
1675 		DRM_DEBUG_KMS("bypass state[%d]prop_id[%d]\n",
1676 			c_node->state, c_node->property.prop_id);
1677 		goto err_completion;
1678 	}
1679 
1680 	mutex_lock(&c_node->event_lock);
1681 
1682 	ret = ipp_send_event(ippdrv, c_node, event_work->buf_id);
1683 	if (ret) {
1684 		DRM_ERROR("failed to send event.\n");
1685 		goto err_completion;
1686 	}
1687 
1688 err_completion:
1689 	if (ipp_is_m2m_cmd(c_node->property.cmd))
1690 		complete(&c_node->start_complete);
1691 
1692 	mutex_unlock(&c_node->event_lock);
1693 }
1694 
1695 static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
1696 {
1697 	struct ipp_context *ctx = get_ipp_context(dev);
1698 	struct exynos_drm_ippdrv *ippdrv;
1699 	int ret, count = 0;
1700 
1701 	/* get ipp driver entry */
1702 	list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1703 		ippdrv->drm_dev = drm_dev;
1704 
1705 		ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv,
1706 			&ippdrv->ipp_id);
1707 		if (ret) {
1708 			DRM_ERROR("failed to create id.\n");
1709 			goto err_idr;
1710 		}
1711 
1712 		DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]ipp_id[%d]\n",
1713 			count++, (int)ippdrv, ippdrv->ipp_id);
1714 
1715 		if (ippdrv->ipp_id == 0) {
1716 			DRM_ERROR("failed to get ipp_id[%d]\n",
1717 				ippdrv->ipp_id);
1718 			goto err_idr;
1719 		}
1720 
1721 		/* store parent device for node */
1722 		ippdrv->parent_dev = dev;
1723 
1724 		/* store event work queue and handler */
1725 		ippdrv->event_workq = ctx->event_workq;
1726 		ippdrv->sched_event = ipp_sched_event;
1727 		INIT_LIST_HEAD(&ippdrv->cmd_list);
1728 
1729 		if (is_drm_iommu_supported(drm_dev)) {
1730 			ret = drm_iommu_attach_device(drm_dev, ippdrv->dev);
1731 			if (ret) {
1732 				DRM_ERROR("failed to activate iommu\n");
1733 				goto err_iommu;
1734 			}
1735 		}
1736 	}
1737 
1738 	return 0;
1739 
1740 err_iommu:
1741 	/* get ipp driver entry */
1742 	list_for_each_entry_reverse(ippdrv, &exynos_drm_ippdrv_list, drv_list)
1743 		if (is_drm_iommu_supported(drm_dev))
1744 			drm_iommu_detach_device(drm_dev, ippdrv->dev);
1745 
1746 err_idr:
1747 	idr_destroy(&ctx->ipp_idr);
1748 	idr_destroy(&ctx->prop_idr);
1749 	return ret;
1750 }
1751 
1752 static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
1753 {
1754 	struct exynos_drm_ippdrv *ippdrv;
1755 
1756 	/* get ipp driver entry */
1757 	list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1758 		if (is_drm_iommu_supported(drm_dev))
1759 			drm_iommu_detach_device(drm_dev, ippdrv->dev);
1760 
1761 		ippdrv->drm_dev = NULL;
1762 		exynos_drm_ippdrv_unregister(ippdrv);
1763 	}
1764 }
1765 
1766 static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
1767 		struct drm_file *file)
1768 {
1769 	struct drm_exynos_file_private *file_priv = file->driver_priv;
1770 	struct exynos_drm_ipp_private *priv;
1771 
1772 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1773 	if (!priv)
1774 		return -ENOMEM;
1775 	priv->dev = dev;
1776 	file_priv->ipp_priv = priv;
1777 
1778 	INIT_LIST_HEAD(&priv->event_list);
1779 
1780 	DRM_DEBUG_KMS("done priv[0x%x]\n", (int)priv);
1781 
1782 	return 0;
1783 }
1784 
1785 static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
1786 		struct drm_file *file)
1787 {
1788 	struct drm_exynos_file_private *file_priv = file->driver_priv;
1789 	struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
1790 	struct exynos_drm_ippdrv *ippdrv = NULL;
1791 	struct drm_exynos_ipp_cmd_node *c_node, *tc_node;
1792 	int count = 0;
1793 
1794 	DRM_DEBUG_KMS("for priv[0x%x]\n", (int)priv);
1795 
1796 	if (list_empty(&exynos_drm_ippdrv_list)) {
1797 		DRM_DEBUG_KMS("ippdrv_list is empty.\n");
1798 		goto err_clear;
1799 	}
1800 
1801 	list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1802 		if (list_empty(&ippdrv->cmd_list))
1803 			continue;
1804 
1805 		list_for_each_entry_safe(c_node, tc_node,
1806 			&ippdrv->cmd_list, list) {
1807 			DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n",
1808 				count++, (int)ippdrv);
1809 
1810 			if (c_node->priv == priv) {
1811 				/*
1812 				 * userland goto unnormal state. process killed.
1813 				 * and close the file.
1814 				 * so, IPP didn't called stop cmd ctrl.
1815 				 * so, we are make stop operation in this state.
1816 				 */
1817 				if (c_node->state == IPP_STATE_START) {
1818 					ipp_stop_property(drm_dev, ippdrv,
1819 						c_node);
1820 					c_node->state = IPP_STATE_STOP;
1821 				}
1822 
1823 				ippdrv->dedicated = false;
1824 				ipp_clean_cmd_node(c_node);
1825 				if (list_empty(&ippdrv->cmd_list))
1826 					pm_runtime_put_sync(ippdrv->dev);
1827 			}
1828 		}
1829 	}
1830 
1831 err_clear:
1832 	kfree(priv);
1833 	return;
1834 }
1835 
1836 static int ipp_probe(struct platform_device *pdev)
1837 {
1838 	struct device *dev = &pdev->dev;
1839 	struct ipp_context *ctx;
1840 	struct exynos_drm_subdrv *subdrv;
1841 	int ret;
1842 
1843 	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
1844 	if (!ctx)
1845 		return -ENOMEM;
1846 
1847 	mutex_init(&ctx->ipp_lock);
1848 	mutex_init(&ctx->prop_lock);
1849 
1850 	idr_init(&ctx->ipp_idr);
1851 	idr_init(&ctx->prop_idr);
1852 
1853 	/*
1854 	 * create single thread for ipp event
1855 	 * IPP supports event thread for IPP drivers.
1856 	 * IPP driver send event_work to this thread.
1857 	 * and IPP event thread send event to user process.
1858 	 */
1859 	ctx->event_workq = create_singlethread_workqueue("ipp_event");
1860 	if (!ctx->event_workq) {
1861 		dev_err(dev, "failed to create event workqueue\n");
1862 		return -EINVAL;
1863 	}
1864 
1865 	/*
1866 	 * create single thread for ipp command
1867 	 * IPP supports command thread for user process.
1868 	 * user process make command node using set property ioctl.
1869 	 * and make start_work and send this work to command thread.
1870 	 * and then this command thread start property.
1871 	 */
1872 	ctx->cmd_workq = create_singlethread_workqueue("ipp_cmd");
1873 	if (!ctx->cmd_workq) {
1874 		dev_err(dev, "failed to create cmd workqueue\n");
1875 		ret = -EINVAL;
1876 		goto err_event_workq;
1877 	}
1878 
1879 	/* set sub driver informations */
1880 	subdrv = &ctx->subdrv;
1881 	subdrv->dev = dev;
1882 	subdrv->probe = ipp_subdrv_probe;
1883 	subdrv->remove = ipp_subdrv_remove;
1884 	subdrv->open = ipp_subdrv_open;
1885 	subdrv->close = ipp_subdrv_close;
1886 
1887 	platform_set_drvdata(pdev, ctx);
1888 
1889 	ret = exynos_drm_subdrv_register(subdrv);
1890 	if (ret < 0) {
1891 		DRM_ERROR("failed to register drm ipp device.\n");
1892 		goto err_cmd_workq;
1893 	}
1894 
1895 	dev_info(dev, "drm ipp registered successfully.\n");
1896 
1897 	return 0;
1898 
1899 err_cmd_workq:
1900 	destroy_workqueue(ctx->cmd_workq);
1901 err_event_workq:
1902 	destroy_workqueue(ctx->event_workq);
1903 	return ret;
1904 }
1905 
1906 static int ipp_remove(struct platform_device *pdev)
1907 {
1908 	struct ipp_context *ctx = platform_get_drvdata(pdev);
1909 
1910 	/* unregister sub driver */
1911 	exynos_drm_subdrv_unregister(&ctx->subdrv);
1912 
1913 	/* remove,destroy ipp idr */
1914 	idr_destroy(&ctx->ipp_idr);
1915 	idr_destroy(&ctx->prop_idr);
1916 
1917 	mutex_destroy(&ctx->ipp_lock);
1918 	mutex_destroy(&ctx->prop_lock);
1919 
1920 	/* destroy command, event work queue */
1921 	destroy_workqueue(ctx->cmd_workq);
1922 	destroy_workqueue(ctx->event_workq);
1923 
1924 	return 0;
1925 }
1926 
1927 static int ipp_power_ctrl(struct ipp_context *ctx, bool enable)
1928 {
1929 	DRM_DEBUG_KMS("enable[%d]\n", enable);
1930 
1931 	return 0;
1932 }
1933 
1934 #ifdef CONFIG_PM_SLEEP
1935 static int ipp_suspend(struct device *dev)
1936 {
1937 	struct ipp_context *ctx = get_ipp_context(dev);
1938 
1939 	if (pm_runtime_suspended(dev))
1940 		return 0;
1941 
1942 	return ipp_power_ctrl(ctx, false);
1943 }
1944 
1945 static int ipp_resume(struct device *dev)
1946 {
1947 	struct ipp_context *ctx = get_ipp_context(dev);
1948 
1949 	if (!pm_runtime_suspended(dev))
1950 		return ipp_power_ctrl(ctx, true);
1951 
1952 	return 0;
1953 }
1954 #endif
1955 
1956 #ifdef CONFIG_PM_RUNTIME
1957 static int ipp_runtime_suspend(struct device *dev)
1958 {
1959 	struct ipp_context *ctx = get_ipp_context(dev);
1960 
1961 	return ipp_power_ctrl(ctx, false);
1962 }
1963 
1964 static int ipp_runtime_resume(struct device *dev)
1965 {
1966 	struct ipp_context *ctx = get_ipp_context(dev);
1967 
1968 	return ipp_power_ctrl(ctx, true);
1969 }
1970 #endif
1971 
1972 static const struct dev_pm_ops ipp_pm_ops = {
1973 	SET_SYSTEM_SLEEP_PM_OPS(ipp_suspend, ipp_resume)
1974 	SET_RUNTIME_PM_OPS(ipp_runtime_suspend, ipp_runtime_resume, NULL)
1975 };
1976 
1977 struct platform_driver ipp_driver = {
1978 	.probe		= ipp_probe,
1979 	.remove		= ipp_remove,
1980 	.driver		= {
1981 		.name	= "exynos-drm-ipp",
1982 		.owner	= THIS_MODULE,
1983 		.pm	= &ipp_pm_ops,
1984 	},
1985 };
1986 
1987