1 /*
2  * Copyright (C) 2012 Samsung Electronics Co.Ltd
3  * Authors:
4  *	Eunchul Kim <chulspro.kim@samsung.com>
5  *	Jinyoung Jeon <jy0.jeon@samsung.com>
6  *	Sangmin Lee <lsmin.lee@samsung.com>
7  *
8  * This program is free software; you can redistribute  it and/or modify it
9  * under  the terms of  the GNU General  Public License as published by the
10  * Free Software Foundation;  either version 2 of the  License, or (at your
11  * option) any later version.
12  *
13  */
14 #include <linux/kernel.h>
15 #include <linux/platform_device.h>
16 #include <linux/types.h>
17 #include <linux/clk.h>
18 #include <linux/pm_runtime.h>
19 #include <plat/map-base.h>
20 
21 #include <drm/drmP.h>
22 #include <drm/exynos_drm.h>
23 #include "exynos_drm_drv.h"
24 #include "exynos_drm_gem.h"
25 #include "exynos_drm_ipp.h"
26 #include "exynos_drm_iommu.h"
27 
28 /*
29  * IPP stands for Image Post Processing and
30  * supports image scaler/rotator and input/output DMA operations.
31  * using FIMC, GSC, Rotator, so on.
32  * IPP is integration device driver of same attribute h/w
33  */
34 
35 /*
36  * TODO
37  * 1. expand command control id.
38  * 2. integrate	property and config.
39  * 3. removed send_event id check routine.
40  * 4. compare send_event id if needed.
41  * 5. free subdrv_remove notifier callback list if needed.
42  * 6. need to check subdrv_open about multi-open.
43  * 7. need to power_on implement power and sysmmu ctrl.
44  */
45 
46 #define get_ipp_context(dev)	platform_get_drvdata(to_platform_device(dev))
47 #define ipp_is_m2m_cmd(c)	(c == IPP_CMD_M2M)
48 
49 /* platform device pointer for ipp device. */
50 static struct platform_device *exynos_drm_ipp_pdev;
51 
52 /*
53  * A structure of event.
54  *
55  * @base: base of event.
56  * @event: ipp event.
57  */
58 struct drm_exynos_ipp_send_event {
59 	struct drm_pending_event	base;
60 	struct drm_exynos_ipp_event	event;
61 };
62 
63 /*
64  * A structure of memory node.
65  *
66  * @list: list head to memory queue information.
67  * @ops_id: id of operations.
68  * @prop_id: id of property.
69  * @buf_id: id of buffer.
70  * @buf_info: gem objects and dma address, size.
71  * @filp: a pointer to drm_file.
72  */
73 struct drm_exynos_ipp_mem_node {
74 	struct list_head	list;
75 	enum drm_exynos_ops_id	ops_id;
76 	u32	prop_id;
77 	u32	buf_id;
78 	struct drm_exynos_ipp_buf_info	buf_info;
79 	struct drm_file		*filp;
80 };
81 
82 /*
83  * A structure of ipp context.
84  *
85  * @subdrv: prepare initialization using subdrv.
86  * @ipp_lock: lock for synchronization of access to ipp_idr.
87  * @prop_lock: lock for synchronization of access to prop_idr.
88  * @ipp_idr: ipp driver idr.
89  * @prop_idr: property idr.
90  * @event_workq: event work queue.
91  * @cmd_workq: command work queue.
92  */
93 struct ipp_context {
94 	struct exynos_drm_subdrv	subdrv;
95 	struct mutex	ipp_lock;
96 	struct mutex	prop_lock;
97 	struct idr	ipp_idr;
98 	struct idr	prop_idr;
99 	struct workqueue_struct	*event_workq;
100 	struct workqueue_struct	*cmd_workq;
101 };
102 
103 static LIST_HEAD(exynos_drm_ippdrv_list);
104 static DEFINE_MUTEX(exynos_drm_ippdrv_lock);
105 static BLOCKING_NOTIFIER_HEAD(exynos_drm_ippnb_list);
106 
107 int exynos_platform_device_ipp_register(void)
108 {
109 	struct platform_device *pdev;
110 
111 	if (exynos_drm_ipp_pdev)
112 		return -EEXIST;
113 
114 	pdev = platform_device_register_simple("exynos-drm-ipp", -1, NULL, 0);
115 	if (IS_ERR(pdev))
116 		return PTR_ERR(pdev);
117 
118 	exynos_drm_ipp_pdev = pdev;
119 
120 	return 0;
121 }
122 
123 void exynos_platform_device_ipp_unregister(void)
124 {
125 	if (exynos_drm_ipp_pdev) {
126 		platform_device_unregister(exynos_drm_ipp_pdev);
127 		exynos_drm_ipp_pdev = NULL;
128 	}
129 }
130 
131 int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
132 {
133 	if (!ippdrv)
134 		return -EINVAL;
135 
136 	mutex_lock(&exynos_drm_ippdrv_lock);
137 	list_add_tail(&ippdrv->drv_list, &exynos_drm_ippdrv_list);
138 	mutex_unlock(&exynos_drm_ippdrv_lock);
139 
140 	return 0;
141 }
142 
143 int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
144 {
145 	if (!ippdrv)
146 		return -EINVAL;
147 
148 	mutex_lock(&exynos_drm_ippdrv_lock);
149 	list_del(&ippdrv->drv_list);
150 	mutex_unlock(&exynos_drm_ippdrv_lock);
151 
152 	return 0;
153 }
154 
155 static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj,
156 		u32 *idp)
157 {
158 	int ret;
159 
160 	/* do the allocation under our mutexlock */
161 	mutex_lock(lock);
162 	ret = idr_alloc(id_idr, obj, 1, 0, GFP_KERNEL);
163 	mutex_unlock(lock);
164 	if (ret < 0)
165 		return ret;
166 
167 	*idp = ret;
168 	return 0;
169 }
170 
171 static void *ipp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id)
172 {
173 	void *obj;
174 
175 	DRM_DEBUG_KMS("id[%d]\n", id);
176 
177 	mutex_lock(lock);
178 
179 	/* find object using handle */
180 	obj = idr_find(id_idr, id);
181 	if (!obj) {
182 		DRM_ERROR("failed to find object.\n");
183 		mutex_unlock(lock);
184 		return ERR_PTR(-ENODEV);
185 	}
186 
187 	mutex_unlock(lock);
188 
189 	return obj;
190 }
191 
192 static inline bool ipp_check_dedicated(struct exynos_drm_ippdrv *ippdrv,
193 		enum drm_exynos_ipp_cmd	cmd)
194 {
195 	/*
196 	 * check dedicated flag and WB, OUTPUT operation with
197 	 * power on state.
198 	 */
199 	if (ippdrv->dedicated || (!ipp_is_m2m_cmd(cmd) &&
200 	    !pm_runtime_suspended(ippdrv->dev)))
201 		return true;
202 
203 	return false;
204 }
205 
206 static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx,
207 		struct drm_exynos_ipp_property *property)
208 {
209 	struct exynos_drm_ippdrv *ippdrv;
210 	u32 ipp_id = property->ipp_id;
211 
212 	DRM_DEBUG_KMS("ipp_id[%d]\n", ipp_id);
213 
214 	if (ipp_id) {
215 		/* find ipp driver using idr */
216 		ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
217 			ipp_id);
218 		if (IS_ERR(ippdrv)) {
219 			DRM_ERROR("not found ipp%d driver.\n", ipp_id);
220 			return ippdrv;
221 		}
222 
223 		/*
224 		 * WB, OUTPUT opertion not supported multi-operation.
225 		 * so, make dedicated state at set property ioctl.
226 		 * when ipp driver finished operations, clear dedicated flags.
227 		 */
228 		if (ipp_check_dedicated(ippdrv, property->cmd)) {
229 			DRM_ERROR("already used choose device.\n");
230 			return ERR_PTR(-EBUSY);
231 		}
232 
233 		/*
234 		 * This is necessary to find correct device in ipp drivers.
235 		 * ipp drivers have different abilities,
236 		 * so need to check property.
237 		 */
238 		if (ippdrv->check_property &&
239 		    ippdrv->check_property(ippdrv->dev, property)) {
240 			DRM_ERROR("not support property.\n");
241 			return ERR_PTR(-EINVAL);
242 		}
243 
244 		return ippdrv;
245 	} else {
246 		/*
247 		 * This case is search all ipp driver for finding.
248 		 * user application don't set ipp_id in this case,
249 		 * so ipp subsystem search correct driver in driver list.
250 		 */
251 		list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
252 			if (ipp_check_dedicated(ippdrv, property->cmd)) {
253 				DRM_DEBUG_KMS("used device.\n");
254 				continue;
255 			}
256 
257 			if (ippdrv->check_property &&
258 			    ippdrv->check_property(ippdrv->dev, property)) {
259 				DRM_DEBUG_KMS("not support property.\n");
260 				continue;
261 			}
262 
263 			return ippdrv;
264 		}
265 
266 		DRM_ERROR("not support ipp driver operations.\n");
267 	}
268 
269 	return ERR_PTR(-ENODEV);
270 }
271 
272 static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id)
273 {
274 	struct exynos_drm_ippdrv *ippdrv;
275 	struct drm_exynos_ipp_cmd_node *c_node;
276 	int count = 0;
277 
278 	DRM_DEBUG_KMS("prop_id[%d]\n", prop_id);
279 
280 	if (list_empty(&exynos_drm_ippdrv_list)) {
281 		DRM_DEBUG_KMS("ippdrv_list is empty.\n");
282 		return ERR_PTR(-ENODEV);
283 	}
284 
285 	/*
286 	 * This case is search ipp driver by prop_id handle.
287 	 * sometimes, ipp subsystem find driver by prop_id.
288 	 * e.g PAUSE state, queue buf, command contro.
289 	 */
290 	list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
291 		DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n", count++, (int)ippdrv);
292 
293 		if (!list_empty(&ippdrv->cmd_list)) {
294 			list_for_each_entry(c_node, &ippdrv->cmd_list, list)
295 				if (c_node->property.prop_id == prop_id)
296 					return ippdrv;
297 		}
298 	}
299 
300 	return ERR_PTR(-ENODEV);
301 }
302 
303 int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
304 		struct drm_file *file)
305 {
306 	struct drm_exynos_file_private *file_priv = file->driver_priv;
307 	struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
308 	struct device *dev = priv->dev;
309 	struct ipp_context *ctx = get_ipp_context(dev);
310 	struct drm_exynos_ipp_prop_list *prop_list = data;
311 	struct exynos_drm_ippdrv *ippdrv;
312 	int count = 0;
313 
314 	if (!ctx) {
315 		DRM_ERROR("invalid context.\n");
316 		return -EINVAL;
317 	}
318 
319 	if (!prop_list) {
320 		DRM_ERROR("invalid property parameter.\n");
321 		return -EINVAL;
322 	}
323 
324 	DRM_DEBUG_KMS("ipp_id[%d]\n", prop_list->ipp_id);
325 
326 	if (!prop_list->ipp_id) {
327 		list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list)
328 			count++;
329 		/*
330 		 * Supports ippdrv list count for user application.
331 		 * First step user application getting ippdrv count.
332 		 * and second step getting ippdrv capability using ipp_id.
333 		 */
334 		prop_list->count = count;
335 	} else {
336 		/*
337 		 * Getting ippdrv capability by ipp_id.
338 		 * some deivce not supported wb, output interface.
339 		 * so, user application detect correct ipp driver
340 		 * using this ioctl.
341 		 */
342 		ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
343 						prop_list->ipp_id);
344 		if (IS_ERR(ippdrv)) {
345 			DRM_ERROR("not found ipp%d driver.\n",
346 					prop_list->ipp_id);
347 			return PTR_ERR(ippdrv);
348 		}
349 
350 		prop_list = ippdrv->prop_list;
351 	}
352 
353 	return 0;
354 }
355 
356 static void ipp_print_property(struct drm_exynos_ipp_property *property,
357 		int idx)
358 {
359 	struct drm_exynos_ipp_config *config = &property->config[idx];
360 	struct drm_exynos_pos *pos = &config->pos;
361 	struct drm_exynos_sz *sz = &config->sz;
362 
363 	DRM_DEBUG_KMS("prop_id[%d]ops[%s]fmt[0x%x]\n",
364 		property->prop_id, idx ? "dst" : "src", config->fmt);
365 
366 	DRM_DEBUG_KMS("pos[%d %d %d %d]sz[%d %d]f[%d]r[%d]\n",
367 		pos->x, pos->y, pos->w, pos->h,
368 		sz->hsize, sz->vsize, config->flip, config->degree);
369 }
370 
371 static int ipp_find_and_set_property(struct drm_exynos_ipp_property *property)
372 {
373 	struct exynos_drm_ippdrv *ippdrv;
374 	struct drm_exynos_ipp_cmd_node *c_node;
375 	u32 prop_id = property->prop_id;
376 
377 	DRM_DEBUG_KMS("prop_id[%d]\n", prop_id);
378 
379 	ippdrv = ipp_find_drv_by_handle(prop_id);
380 	if (IS_ERR(ippdrv)) {
381 		DRM_ERROR("failed to get ipp driver.\n");
382 		return -EINVAL;
383 	}
384 
385 	/*
386 	 * Find command node using command list in ippdrv.
387 	 * when we find this command no using prop_id.
388 	 * return property information set in this command node.
389 	 */
390 	list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
391 		if ((c_node->property.prop_id == prop_id) &&
392 		    (c_node->state == IPP_STATE_STOP)) {
393 			DRM_DEBUG_KMS("found cmd[%d]ippdrv[0x%x]\n",
394 				property->cmd, (int)ippdrv);
395 
396 			c_node->property = *property;
397 			return 0;
398 		}
399 	}
400 
401 	DRM_ERROR("failed to search property.\n");
402 
403 	return -EINVAL;
404 }
405 
406 static struct drm_exynos_ipp_cmd_work *ipp_create_cmd_work(void)
407 {
408 	struct drm_exynos_ipp_cmd_work *cmd_work;
409 
410 	cmd_work = kzalloc(sizeof(*cmd_work), GFP_KERNEL);
411 	if (!cmd_work) {
412 		DRM_ERROR("failed to alloc cmd_work.\n");
413 		return ERR_PTR(-ENOMEM);
414 	}
415 
416 	INIT_WORK((struct work_struct *)cmd_work, ipp_sched_cmd);
417 
418 	return cmd_work;
419 }
420 
421 static struct drm_exynos_ipp_event_work *ipp_create_event_work(void)
422 {
423 	struct drm_exynos_ipp_event_work *event_work;
424 
425 	event_work = kzalloc(sizeof(*event_work), GFP_KERNEL);
426 	if (!event_work) {
427 		DRM_ERROR("failed to alloc event_work.\n");
428 		return ERR_PTR(-ENOMEM);
429 	}
430 
431 	INIT_WORK((struct work_struct *)event_work, ipp_sched_event);
432 
433 	return event_work;
434 }
435 
436 int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
437 		struct drm_file *file)
438 {
439 	struct drm_exynos_file_private *file_priv = file->driver_priv;
440 	struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
441 	struct device *dev = priv->dev;
442 	struct ipp_context *ctx = get_ipp_context(dev);
443 	struct drm_exynos_ipp_property *property = data;
444 	struct exynos_drm_ippdrv *ippdrv;
445 	struct drm_exynos_ipp_cmd_node *c_node;
446 	int ret, i;
447 
448 	if (!ctx) {
449 		DRM_ERROR("invalid context.\n");
450 		return -EINVAL;
451 	}
452 
453 	if (!property) {
454 		DRM_ERROR("invalid property parameter.\n");
455 		return -EINVAL;
456 	}
457 
458 	/*
459 	 * This is log print for user application property.
460 	 * user application set various property.
461 	 */
462 	for_each_ipp_ops(i)
463 		ipp_print_property(property, i);
464 
465 	/*
466 	 * set property ioctl generated new prop_id.
467 	 * but in this case already asigned prop_id using old set property.
468 	 * e.g PAUSE state. this case supports find current prop_id and use it
469 	 * instead of allocation.
470 	 */
471 	if (property->prop_id) {
472 		DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
473 		return ipp_find_and_set_property(property);
474 	}
475 
476 	/* find ipp driver using ipp id */
477 	ippdrv = ipp_find_driver(ctx, property);
478 	if (IS_ERR(ippdrv)) {
479 		DRM_ERROR("failed to get ipp driver.\n");
480 		return -EINVAL;
481 	}
482 
483 	/* allocate command node */
484 	c_node = kzalloc(sizeof(*c_node), GFP_KERNEL);
485 	if (!c_node) {
486 		DRM_ERROR("failed to allocate map node.\n");
487 		return -ENOMEM;
488 	}
489 
490 	/* create property id */
491 	ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node,
492 		&property->prop_id);
493 	if (ret) {
494 		DRM_ERROR("failed to create id.\n");
495 		goto err_clear;
496 	}
497 
498 	DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[0x%x]\n",
499 		property->prop_id, property->cmd, (int)ippdrv);
500 
501 	/* stored property information and ippdrv in private data */
502 	c_node->priv = priv;
503 	c_node->property = *property;
504 	c_node->state = IPP_STATE_IDLE;
505 
506 	c_node->start_work = ipp_create_cmd_work();
507 	if (IS_ERR(c_node->start_work)) {
508 		DRM_ERROR("failed to create start work.\n");
509 		goto err_clear;
510 	}
511 
512 	c_node->stop_work = ipp_create_cmd_work();
513 	if (IS_ERR(c_node->stop_work)) {
514 		DRM_ERROR("failed to create stop work.\n");
515 		goto err_free_start;
516 	}
517 
518 	c_node->event_work = ipp_create_event_work();
519 	if (IS_ERR(c_node->event_work)) {
520 		DRM_ERROR("failed to create event work.\n");
521 		goto err_free_stop;
522 	}
523 
524 	mutex_init(&c_node->cmd_lock);
525 	mutex_init(&c_node->mem_lock);
526 	mutex_init(&c_node->event_lock);
527 
528 	init_completion(&c_node->start_complete);
529 	init_completion(&c_node->stop_complete);
530 
531 	for_each_ipp_ops(i)
532 		INIT_LIST_HEAD(&c_node->mem_list[i]);
533 
534 	INIT_LIST_HEAD(&c_node->event_list);
535 	list_splice_init(&priv->event_list, &c_node->event_list);
536 	list_add_tail(&c_node->list, &ippdrv->cmd_list);
537 
538 	/* make dedicated state without m2m */
539 	if (!ipp_is_m2m_cmd(property->cmd))
540 		ippdrv->dedicated = true;
541 
542 	return 0;
543 
544 err_free_stop:
545 	kfree(c_node->stop_work);
546 err_free_start:
547 	kfree(c_node->start_work);
548 err_clear:
549 	kfree(c_node);
550 	return ret;
551 }
552 
553 static void ipp_clean_cmd_node(struct drm_exynos_ipp_cmd_node *c_node)
554 {
555 	/* delete list */
556 	list_del(&c_node->list);
557 
558 	/* destroy mutex */
559 	mutex_destroy(&c_node->cmd_lock);
560 	mutex_destroy(&c_node->mem_lock);
561 	mutex_destroy(&c_node->event_lock);
562 
563 	/* free command node */
564 	kfree(c_node->start_work);
565 	kfree(c_node->stop_work);
566 	kfree(c_node->event_work);
567 	kfree(c_node);
568 }
569 
570 static int ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
571 {
572 	struct drm_exynos_ipp_property *property = &c_node->property;
573 	struct drm_exynos_ipp_mem_node *m_node;
574 	struct list_head *head;
575 	int ret, i, count[EXYNOS_DRM_OPS_MAX] = { 0, };
576 
577 	mutex_lock(&c_node->mem_lock);
578 
579 	for_each_ipp_ops(i) {
580 		/* source/destination memory list */
581 		head = &c_node->mem_list[i];
582 
583 		if (list_empty(head)) {
584 			DRM_DEBUG_KMS("%s memory empty.\n", i ? "dst" : "src");
585 			continue;
586 		}
587 
588 		/* find memory node entry */
589 		list_for_each_entry(m_node, head, list) {
590 			DRM_DEBUG_KMS("%s,count[%d]m_node[0x%x]\n",
591 				i ? "dst" : "src", count[i], (int)m_node);
592 			count[i]++;
593 		}
594 	}
595 
596 	DRM_DEBUG_KMS("min[%d]max[%d]\n",
597 		min(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]),
598 		max(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]));
599 
600 	/*
601 	 * M2M operations should be need paired memory address.
602 	 * so, need to check minimum count about src, dst.
603 	 * other case not use paired memory, so use maximum count
604 	 */
605 	if (ipp_is_m2m_cmd(property->cmd))
606 		ret = min(count[EXYNOS_DRM_OPS_SRC],
607 			count[EXYNOS_DRM_OPS_DST]);
608 	else
609 		ret = max(count[EXYNOS_DRM_OPS_SRC],
610 			count[EXYNOS_DRM_OPS_DST]);
611 
612 	mutex_unlock(&c_node->mem_lock);
613 
614 	return ret;
615 }
616 
617 static struct drm_exynos_ipp_mem_node
618 		*ipp_find_mem_node(struct drm_exynos_ipp_cmd_node *c_node,
619 		struct drm_exynos_ipp_queue_buf *qbuf)
620 {
621 	struct drm_exynos_ipp_mem_node *m_node;
622 	struct list_head *head;
623 	int count = 0;
624 
625 	DRM_DEBUG_KMS("buf_id[%d]\n", qbuf->buf_id);
626 
627 	/* source/destination memory list */
628 	head = &c_node->mem_list[qbuf->ops_id];
629 
630 	/* find memory node from memory list */
631 	list_for_each_entry(m_node, head, list) {
632 		DRM_DEBUG_KMS("count[%d]m_node[0x%x]\n", count++, (int)m_node);
633 
634 		/* compare buffer id */
635 		if (m_node->buf_id == qbuf->buf_id)
636 			return m_node;
637 	}
638 
639 	return NULL;
640 }
641 
642 static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
643 		struct drm_exynos_ipp_cmd_node *c_node,
644 		struct drm_exynos_ipp_mem_node *m_node)
645 {
646 	struct exynos_drm_ipp_ops *ops = NULL;
647 	int ret = 0;
648 
649 	DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node);
650 
651 	if (!m_node) {
652 		DRM_ERROR("invalid queue node.\n");
653 		return -EFAULT;
654 	}
655 
656 	mutex_lock(&c_node->mem_lock);
657 
658 	DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id);
659 
660 	/* get operations callback */
661 	ops = ippdrv->ops[m_node->ops_id];
662 	if (!ops) {
663 		DRM_ERROR("not support ops.\n");
664 		ret = -EFAULT;
665 		goto err_unlock;
666 	}
667 
668 	/* set address and enable irq */
669 	if (ops->set_addr) {
670 		ret = ops->set_addr(ippdrv->dev, &m_node->buf_info,
671 			m_node->buf_id, IPP_BUF_ENQUEUE);
672 		if (ret) {
673 			DRM_ERROR("failed to set addr.\n");
674 			goto err_unlock;
675 		}
676 	}
677 
678 err_unlock:
679 	mutex_unlock(&c_node->mem_lock);
680 	return ret;
681 }
682 
683 static struct drm_exynos_ipp_mem_node
684 		*ipp_get_mem_node(struct drm_device *drm_dev,
685 		struct drm_file *file,
686 		struct drm_exynos_ipp_cmd_node *c_node,
687 		struct drm_exynos_ipp_queue_buf *qbuf)
688 {
689 	struct drm_exynos_ipp_mem_node *m_node;
690 	struct drm_exynos_ipp_buf_info buf_info;
691 	void *addr;
692 	int i;
693 
694 	mutex_lock(&c_node->mem_lock);
695 
696 	m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
697 	if (!m_node) {
698 		DRM_ERROR("failed to allocate queue node.\n");
699 		goto err_unlock;
700 	}
701 
702 	/* clear base address for error handling */
703 	memset(&buf_info, 0x0, sizeof(buf_info));
704 
705 	/* operations, buffer id */
706 	m_node->ops_id = qbuf->ops_id;
707 	m_node->prop_id = qbuf->prop_id;
708 	m_node->buf_id = qbuf->buf_id;
709 
710 	DRM_DEBUG_KMS("m_node[0x%x]ops_id[%d]\n", (int)m_node, qbuf->ops_id);
711 	DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id);
712 
713 	for_each_ipp_planar(i) {
714 		DRM_DEBUG_KMS("i[%d]handle[0x%x]\n", i, qbuf->handle[i]);
715 
716 		/* get dma address by handle */
717 		if (qbuf->handle[i]) {
718 			addr = exynos_drm_gem_get_dma_addr(drm_dev,
719 					qbuf->handle[i], file);
720 			if (IS_ERR(addr)) {
721 				DRM_ERROR("failed to get addr.\n");
722 				goto err_clear;
723 			}
724 
725 			buf_info.handles[i] = qbuf->handle[i];
726 			buf_info.base[i] = *(dma_addr_t *) addr;
727 			DRM_DEBUG_KMS("i[%d]base[0x%x]hd[0x%x]\n",
728 				i, buf_info.base[i], (int)buf_info.handles[i]);
729 		}
730 	}
731 
732 	m_node->filp = file;
733 	m_node->buf_info = buf_info;
734 	list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
735 
736 	mutex_unlock(&c_node->mem_lock);
737 	return m_node;
738 
739 err_clear:
740 	kfree(m_node);
741 err_unlock:
742 	mutex_unlock(&c_node->mem_lock);
743 	return ERR_PTR(-EFAULT);
744 }
745 
746 static int ipp_put_mem_node(struct drm_device *drm_dev,
747 		struct drm_exynos_ipp_cmd_node *c_node,
748 		struct drm_exynos_ipp_mem_node *m_node)
749 {
750 	int i;
751 
752 	DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node);
753 
754 	if (!m_node) {
755 		DRM_ERROR("invalid dequeue node.\n");
756 		return -EFAULT;
757 	}
758 
759 	if (list_empty(&m_node->list)) {
760 		DRM_ERROR("empty memory node.\n");
761 		return -ENOMEM;
762 	}
763 
764 	mutex_lock(&c_node->mem_lock);
765 
766 	DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id);
767 
768 	/* put gem buffer */
769 	for_each_ipp_planar(i) {
770 		unsigned long handle = m_node->buf_info.handles[i];
771 		if (handle)
772 			exynos_drm_gem_put_dma_addr(drm_dev, handle,
773 							m_node->filp);
774 	}
775 
776 	/* delete list in queue */
777 	list_del(&m_node->list);
778 	kfree(m_node);
779 
780 	mutex_unlock(&c_node->mem_lock);
781 
782 	return 0;
783 }
784 
785 static void ipp_free_event(struct drm_pending_event *event)
786 {
787 	kfree(event);
788 }
789 
790 static int ipp_get_event(struct drm_device *drm_dev,
791 		struct drm_file *file,
792 		struct drm_exynos_ipp_cmd_node *c_node,
793 		struct drm_exynos_ipp_queue_buf *qbuf)
794 {
795 	struct drm_exynos_ipp_send_event *e;
796 	unsigned long flags;
797 
798 	DRM_DEBUG_KMS("ops_id[%d]buf_id[%d]\n", qbuf->ops_id, qbuf->buf_id);
799 
800 	e = kzalloc(sizeof(*e), GFP_KERNEL);
801 
802 	if (!e) {
803 		DRM_ERROR("failed to allocate event.\n");
804 		spin_lock_irqsave(&drm_dev->event_lock, flags);
805 		file->event_space += sizeof(e->event);
806 		spin_unlock_irqrestore(&drm_dev->event_lock, flags);
807 		return -ENOMEM;
808 	}
809 
810 	/* make event */
811 	e->event.base.type = DRM_EXYNOS_IPP_EVENT;
812 	e->event.base.length = sizeof(e->event);
813 	e->event.user_data = qbuf->user_data;
814 	e->event.prop_id = qbuf->prop_id;
815 	e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id;
816 	e->base.event = &e->event.base;
817 	e->base.file_priv = file;
818 	e->base.destroy = ipp_free_event;
819 	list_add_tail(&e->base.link, &c_node->event_list);
820 
821 	return 0;
822 }
823 
824 static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
825 		struct drm_exynos_ipp_queue_buf *qbuf)
826 {
827 	struct drm_exynos_ipp_send_event *e, *te;
828 	int count = 0;
829 
830 	if (list_empty(&c_node->event_list)) {
831 		DRM_DEBUG_KMS("event_list is empty.\n");
832 		return;
833 	}
834 
835 	list_for_each_entry_safe(e, te, &c_node->event_list, base.link) {
836 		DRM_DEBUG_KMS("count[%d]e[0x%x]\n", count++, (int)e);
837 
838 		/*
839 		 * quf == NULL condition means all event deletion.
840 		 * stop operations want to delete all event list.
841 		 * another case delete only same buf id.
842 		 */
843 		if (!qbuf) {
844 			/* delete list */
845 			list_del(&e->base.link);
846 			kfree(e);
847 		}
848 
849 		/* compare buffer id */
850 		if (qbuf && (qbuf->buf_id ==
851 		    e->event.buf_id[EXYNOS_DRM_OPS_DST])) {
852 			/* delete list */
853 			list_del(&e->base.link);
854 			kfree(e);
855 			return;
856 		}
857 	}
858 }
859 
860 static void ipp_handle_cmd_work(struct device *dev,
861 		struct exynos_drm_ippdrv *ippdrv,
862 		struct drm_exynos_ipp_cmd_work *cmd_work,
863 		struct drm_exynos_ipp_cmd_node *c_node)
864 {
865 	struct ipp_context *ctx = get_ipp_context(dev);
866 
867 	cmd_work->ippdrv = ippdrv;
868 	cmd_work->c_node = c_node;
869 	queue_work(ctx->cmd_workq, (struct work_struct *)cmd_work);
870 }
871 
872 static int ipp_queue_buf_with_run(struct device *dev,
873 		struct drm_exynos_ipp_cmd_node *c_node,
874 		struct drm_exynos_ipp_mem_node *m_node,
875 		struct drm_exynos_ipp_queue_buf *qbuf)
876 {
877 	struct exynos_drm_ippdrv *ippdrv;
878 	struct drm_exynos_ipp_property *property;
879 	struct exynos_drm_ipp_ops *ops;
880 	int ret;
881 
882 	ippdrv = ipp_find_drv_by_handle(qbuf->prop_id);
883 	if (IS_ERR(ippdrv)) {
884 		DRM_ERROR("failed to get ipp driver.\n");
885 		return -EFAULT;
886 	}
887 
888 	ops = ippdrv->ops[qbuf->ops_id];
889 	if (!ops) {
890 		DRM_ERROR("failed to get ops.\n");
891 		return -EFAULT;
892 	}
893 
894 	property = &c_node->property;
895 
896 	if (c_node->state != IPP_STATE_START) {
897 		DRM_DEBUG_KMS("bypass for invalid state.\n");
898 		return 0;
899 	}
900 
901 	if (!ipp_check_mem_list(c_node)) {
902 		DRM_DEBUG_KMS("empty memory.\n");
903 		return 0;
904 	}
905 
906 	/*
907 	 * If set destination buffer and enabled clock,
908 	 * then m2m operations need start operations at queue_buf
909 	 */
910 	if (ipp_is_m2m_cmd(property->cmd)) {
911 		struct drm_exynos_ipp_cmd_work *cmd_work = c_node->start_work;
912 
913 		cmd_work->ctrl = IPP_CTRL_PLAY;
914 		ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
915 	} else {
916 		ret = ipp_set_mem_node(ippdrv, c_node, m_node);
917 		if (ret) {
918 			DRM_ERROR("failed to set m node.\n");
919 			return ret;
920 		}
921 	}
922 
923 	return 0;
924 }
925 
926 static void ipp_clean_queue_buf(struct drm_device *drm_dev,
927 		struct drm_exynos_ipp_cmd_node *c_node,
928 		struct drm_exynos_ipp_queue_buf *qbuf)
929 {
930 	struct drm_exynos_ipp_mem_node *m_node, *tm_node;
931 
932 	if (!list_empty(&c_node->mem_list[qbuf->ops_id])) {
933 		/* delete list */
934 		list_for_each_entry_safe(m_node, tm_node,
935 			&c_node->mem_list[qbuf->ops_id], list) {
936 			if (m_node->buf_id == qbuf->buf_id &&
937 			    m_node->ops_id == qbuf->ops_id)
938 				ipp_put_mem_node(drm_dev, c_node, m_node);
939 		}
940 	}
941 }
942 
943 int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
944 		struct drm_file *file)
945 {
946 	struct drm_exynos_file_private *file_priv = file->driver_priv;
947 	struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
948 	struct device *dev = priv->dev;
949 	struct ipp_context *ctx = get_ipp_context(dev);
950 	struct drm_exynos_ipp_queue_buf *qbuf = data;
951 	struct drm_exynos_ipp_cmd_node *c_node;
952 	struct drm_exynos_ipp_mem_node *m_node;
953 	int ret;
954 
955 	if (!qbuf) {
956 		DRM_ERROR("invalid buf parameter.\n");
957 		return -EINVAL;
958 	}
959 
960 	if (qbuf->ops_id >= EXYNOS_DRM_OPS_MAX) {
961 		DRM_ERROR("invalid ops parameter.\n");
962 		return -EINVAL;
963 	}
964 
965 	DRM_DEBUG_KMS("prop_id[%d]ops_id[%s]buf_id[%d]buf_type[%d]\n",
966 		qbuf->prop_id, qbuf->ops_id ? "dst" : "src",
967 		qbuf->buf_id, qbuf->buf_type);
968 
969 	/* find command node */
970 	c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
971 		qbuf->prop_id);
972 	if (IS_ERR(c_node)) {
973 		DRM_ERROR("failed to get command node.\n");
974 		return PTR_ERR(c_node);
975 	}
976 
977 	/* buffer control */
978 	switch (qbuf->buf_type) {
979 	case IPP_BUF_ENQUEUE:
980 		/* get memory node */
981 		m_node = ipp_get_mem_node(drm_dev, file, c_node, qbuf);
982 		if (IS_ERR(m_node)) {
983 			DRM_ERROR("failed to get m_node.\n");
984 			return PTR_ERR(m_node);
985 		}
986 
987 		/*
988 		 * first step get event for destination buffer.
989 		 * and second step when M2M case run with destination buffer
990 		 * if needed.
991 		 */
992 		if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) {
993 			/* get event for destination buffer */
994 			ret = ipp_get_event(drm_dev, file, c_node, qbuf);
995 			if (ret) {
996 				DRM_ERROR("failed to get event.\n");
997 				goto err_clean_node;
998 			}
999 
1000 			/*
1001 			 * M2M case run play control for streaming feature.
1002 			 * other case set address and waiting.
1003 			 */
1004 			ret = ipp_queue_buf_with_run(dev, c_node, m_node, qbuf);
1005 			if (ret) {
1006 				DRM_ERROR("failed to run command.\n");
1007 				goto err_clean_node;
1008 			}
1009 		}
1010 		break;
1011 	case IPP_BUF_DEQUEUE:
1012 		mutex_lock(&c_node->cmd_lock);
1013 
1014 		/* put event for destination buffer */
1015 		if (qbuf->ops_id == EXYNOS_DRM_OPS_DST)
1016 			ipp_put_event(c_node, qbuf);
1017 
1018 		ipp_clean_queue_buf(drm_dev, c_node, qbuf);
1019 
1020 		mutex_unlock(&c_node->cmd_lock);
1021 		break;
1022 	default:
1023 		DRM_ERROR("invalid buffer control.\n");
1024 		return -EINVAL;
1025 	}
1026 
1027 	return 0;
1028 
1029 err_clean_node:
1030 	DRM_ERROR("clean memory nodes.\n");
1031 
1032 	ipp_clean_queue_buf(drm_dev, c_node, qbuf);
1033 	return ret;
1034 }
1035 
1036 static bool exynos_drm_ipp_check_valid(struct device *dev,
1037 		enum drm_exynos_ipp_ctrl ctrl, enum drm_exynos_ipp_state state)
1038 {
1039 	if (ctrl != IPP_CTRL_PLAY) {
1040 		if (pm_runtime_suspended(dev)) {
1041 			DRM_ERROR("pm:runtime_suspended.\n");
1042 			goto err_status;
1043 		}
1044 	}
1045 
1046 	switch (ctrl) {
1047 	case IPP_CTRL_PLAY:
1048 		if (state != IPP_STATE_IDLE)
1049 			goto err_status;
1050 		break;
1051 	case IPP_CTRL_STOP:
1052 		if (state == IPP_STATE_STOP)
1053 			goto err_status;
1054 		break;
1055 	case IPP_CTRL_PAUSE:
1056 		if (state != IPP_STATE_START)
1057 			goto err_status;
1058 		break;
1059 	case IPP_CTRL_RESUME:
1060 		if (state != IPP_STATE_STOP)
1061 			goto err_status;
1062 		break;
1063 	default:
1064 		DRM_ERROR("invalid state.\n");
1065 		goto err_status;
1066 	}
1067 
1068 	return true;
1069 
1070 err_status:
1071 	DRM_ERROR("invalid status:ctrl[%d]state[%d]\n", ctrl, state);
1072 	return false;
1073 }
1074 
1075 int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
1076 		struct drm_file *file)
1077 {
1078 	struct drm_exynos_file_private *file_priv = file->driver_priv;
1079 	struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
1080 	struct exynos_drm_ippdrv *ippdrv = NULL;
1081 	struct device *dev = priv->dev;
1082 	struct ipp_context *ctx = get_ipp_context(dev);
1083 	struct drm_exynos_ipp_cmd_ctrl *cmd_ctrl = data;
1084 	struct drm_exynos_ipp_cmd_work *cmd_work;
1085 	struct drm_exynos_ipp_cmd_node *c_node;
1086 
1087 	if (!ctx) {
1088 		DRM_ERROR("invalid context.\n");
1089 		return -EINVAL;
1090 	}
1091 
1092 	if (!cmd_ctrl) {
1093 		DRM_ERROR("invalid control parameter.\n");
1094 		return -EINVAL;
1095 	}
1096 
1097 	DRM_DEBUG_KMS("ctrl[%d]prop_id[%d]\n",
1098 		cmd_ctrl->ctrl, cmd_ctrl->prop_id);
1099 
1100 	ippdrv = ipp_find_drv_by_handle(cmd_ctrl->prop_id);
1101 	if (IS_ERR(ippdrv)) {
1102 		DRM_ERROR("failed to get ipp driver.\n");
1103 		return PTR_ERR(ippdrv);
1104 	}
1105 
1106 	c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
1107 		cmd_ctrl->prop_id);
1108 	if (IS_ERR(c_node)) {
1109 		DRM_ERROR("invalid command node list.\n");
1110 		return PTR_ERR(c_node);
1111 	}
1112 
1113 	if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl,
1114 	    c_node->state)) {
1115 		DRM_ERROR("invalid state.\n");
1116 		return -EINVAL;
1117 	}
1118 
1119 	switch (cmd_ctrl->ctrl) {
1120 	case IPP_CTRL_PLAY:
1121 		if (pm_runtime_suspended(ippdrv->dev))
1122 			pm_runtime_get_sync(ippdrv->dev);
1123 		c_node->state = IPP_STATE_START;
1124 
1125 		cmd_work = c_node->start_work;
1126 		cmd_work->ctrl = cmd_ctrl->ctrl;
1127 		ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1128 		c_node->state = IPP_STATE_START;
1129 		break;
1130 	case IPP_CTRL_STOP:
1131 		cmd_work = c_node->stop_work;
1132 		cmd_work->ctrl = cmd_ctrl->ctrl;
1133 		ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1134 
1135 		if (!wait_for_completion_timeout(&c_node->stop_complete,
1136 		    msecs_to_jiffies(300))) {
1137 			DRM_ERROR("timeout stop:prop_id[%d]\n",
1138 				c_node->property.prop_id);
1139 		}
1140 
1141 		c_node->state = IPP_STATE_STOP;
1142 		ippdrv->dedicated = false;
1143 		ipp_clean_cmd_node(c_node);
1144 
1145 		if (list_empty(&ippdrv->cmd_list))
1146 			pm_runtime_put_sync(ippdrv->dev);
1147 		break;
1148 	case IPP_CTRL_PAUSE:
1149 		cmd_work = c_node->stop_work;
1150 		cmd_work->ctrl = cmd_ctrl->ctrl;
1151 		ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1152 
1153 		if (!wait_for_completion_timeout(&c_node->stop_complete,
1154 		    msecs_to_jiffies(200))) {
1155 			DRM_ERROR("timeout stop:prop_id[%d]\n",
1156 				c_node->property.prop_id);
1157 		}
1158 
1159 		c_node->state = IPP_STATE_STOP;
1160 		break;
1161 	case IPP_CTRL_RESUME:
1162 		c_node->state = IPP_STATE_START;
1163 		cmd_work = c_node->start_work;
1164 		cmd_work->ctrl = cmd_ctrl->ctrl;
1165 		ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1166 		break;
1167 	default:
1168 		DRM_ERROR("could not support this state currently.\n");
1169 		return -EINVAL;
1170 	}
1171 
1172 	DRM_DEBUG_KMS("done ctrl[%d]prop_id[%d]\n",
1173 		cmd_ctrl->ctrl, cmd_ctrl->prop_id);
1174 
1175 	return 0;
1176 }
1177 
1178 int exynos_drm_ippnb_register(struct notifier_block *nb)
1179 {
1180 	return blocking_notifier_chain_register(
1181 		&exynos_drm_ippnb_list, nb);
1182 }
1183 
1184 int exynos_drm_ippnb_unregister(struct notifier_block *nb)
1185 {
1186 	return blocking_notifier_chain_unregister(
1187 		&exynos_drm_ippnb_list, nb);
1188 }
1189 
1190 int exynos_drm_ippnb_send_event(unsigned long val, void *v)
1191 {
1192 	return blocking_notifier_call_chain(
1193 		&exynos_drm_ippnb_list, val, v);
1194 }
1195 
1196 static int ipp_set_property(struct exynos_drm_ippdrv *ippdrv,
1197 		struct drm_exynos_ipp_property *property)
1198 {
1199 	struct exynos_drm_ipp_ops *ops = NULL;
1200 	bool swap = false;
1201 	int ret, i;
1202 
1203 	if (!property) {
1204 		DRM_ERROR("invalid property parameter.\n");
1205 		return -EINVAL;
1206 	}
1207 
1208 	DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
1209 
1210 	/* reset h/w block */
1211 	if (ippdrv->reset &&
1212 	    ippdrv->reset(ippdrv->dev)) {
1213 		DRM_ERROR("failed to reset.\n");
1214 		return -EINVAL;
1215 	}
1216 
1217 	/* set source,destination operations */
1218 	for_each_ipp_ops(i) {
1219 		struct drm_exynos_ipp_config *config =
1220 			&property->config[i];
1221 
1222 		ops = ippdrv->ops[i];
1223 		if (!ops || !config) {
1224 			DRM_ERROR("not support ops and config.\n");
1225 			return -EINVAL;
1226 		}
1227 
1228 		/* set format */
1229 		if (ops->set_fmt) {
1230 			ret = ops->set_fmt(ippdrv->dev, config->fmt);
1231 			if (ret) {
1232 				DRM_ERROR("not support format.\n");
1233 				return ret;
1234 			}
1235 		}
1236 
1237 		/* set transform for rotation, flip */
1238 		if (ops->set_transf) {
1239 			ret = ops->set_transf(ippdrv->dev, config->degree,
1240 				config->flip, &swap);
1241 			if (ret) {
1242 				DRM_ERROR("not support tranf.\n");
1243 				return -EINVAL;
1244 			}
1245 		}
1246 
1247 		/* set size */
1248 		if (ops->set_size) {
1249 			ret = ops->set_size(ippdrv->dev, swap, &config->pos,
1250 				&config->sz);
1251 			if (ret) {
1252 				DRM_ERROR("not support size.\n");
1253 				return ret;
1254 			}
1255 		}
1256 	}
1257 
1258 	return 0;
1259 }
1260 
1261 static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
1262 		struct drm_exynos_ipp_cmd_node *c_node)
1263 {
1264 	struct drm_exynos_ipp_mem_node *m_node;
1265 	struct drm_exynos_ipp_property *property = &c_node->property;
1266 	struct list_head *head;
1267 	int ret, i;
1268 
1269 	DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
1270 
1271 	/* store command info in ippdrv */
1272 	ippdrv->c_node = c_node;
1273 
1274 	if (!ipp_check_mem_list(c_node)) {
1275 		DRM_DEBUG_KMS("empty memory.\n");
1276 		return -ENOMEM;
1277 	}
1278 
1279 	/* set current property in ippdrv */
1280 	ret = ipp_set_property(ippdrv, property);
1281 	if (ret) {
1282 		DRM_ERROR("failed to set property.\n");
1283 		ippdrv->c_node = NULL;
1284 		return ret;
1285 	}
1286 
1287 	/* check command */
1288 	switch (property->cmd) {
1289 	case IPP_CMD_M2M:
1290 		for_each_ipp_ops(i) {
1291 			/* source/destination memory list */
1292 			head = &c_node->mem_list[i];
1293 
1294 			m_node = list_first_entry(head,
1295 				struct drm_exynos_ipp_mem_node, list);
1296 			if (!m_node) {
1297 				DRM_ERROR("failed to get node.\n");
1298 				ret = -EFAULT;
1299 				return ret;
1300 			}
1301 
1302 			DRM_DEBUG_KMS("m_node[0x%x]\n", (int)m_node);
1303 
1304 			ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1305 			if (ret) {
1306 				DRM_ERROR("failed to set m node.\n");
1307 				return ret;
1308 			}
1309 		}
1310 		break;
1311 	case IPP_CMD_WB:
1312 		/* destination memory list */
1313 		head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
1314 
1315 		list_for_each_entry(m_node, head, list) {
1316 			ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1317 			if (ret) {
1318 				DRM_ERROR("failed to set m node.\n");
1319 				return ret;
1320 			}
1321 		}
1322 		break;
1323 	case IPP_CMD_OUTPUT:
1324 		/* source memory list */
1325 		head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1326 
1327 		list_for_each_entry(m_node, head, list) {
1328 			ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1329 			if (ret) {
1330 				DRM_ERROR("failed to set m node.\n");
1331 				return ret;
1332 			}
1333 		}
1334 		break;
1335 	default:
1336 		DRM_ERROR("invalid operations.\n");
1337 		return -EINVAL;
1338 	}
1339 
1340 	DRM_DEBUG_KMS("cmd[%d]\n", property->cmd);
1341 
1342 	/* start operations */
1343 	if (ippdrv->start) {
1344 		ret = ippdrv->start(ippdrv->dev, property->cmd);
1345 		if (ret) {
1346 			DRM_ERROR("failed to start ops.\n");
1347 			return ret;
1348 		}
1349 	}
1350 
1351 	return 0;
1352 }
1353 
1354 static int ipp_stop_property(struct drm_device *drm_dev,
1355 		struct exynos_drm_ippdrv *ippdrv,
1356 		struct drm_exynos_ipp_cmd_node *c_node)
1357 {
1358 	struct drm_exynos_ipp_mem_node *m_node, *tm_node;
1359 	struct drm_exynos_ipp_property *property = &c_node->property;
1360 	struct list_head *head;
1361 	int ret = 0, i;
1362 
1363 	DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
1364 
1365 	/* put event */
1366 	ipp_put_event(c_node, NULL);
1367 
1368 	/* check command */
1369 	switch (property->cmd) {
1370 	case IPP_CMD_M2M:
1371 		for_each_ipp_ops(i) {
1372 			/* source/destination memory list */
1373 			head = &c_node->mem_list[i];
1374 
1375 			if (list_empty(head)) {
1376 				DRM_DEBUG_KMS("mem_list is empty.\n");
1377 				break;
1378 			}
1379 
1380 			list_for_each_entry_safe(m_node, tm_node,
1381 				head, list) {
1382 				ret = ipp_put_mem_node(drm_dev, c_node,
1383 					m_node);
1384 				if (ret) {
1385 					DRM_ERROR("failed to put m_node.\n");
1386 					goto err_clear;
1387 				}
1388 			}
1389 		}
1390 		break;
1391 	case IPP_CMD_WB:
1392 		/* destination memory list */
1393 		head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
1394 
1395 		if (list_empty(head)) {
1396 			DRM_DEBUG_KMS("mem_list is empty.\n");
1397 			break;
1398 		}
1399 
1400 		list_for_each_entry_safe(m_node, tm_node, head, list) {
1401 			ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1402 			if (ret) {
1403 				DRM_ERROR("failed to put m_node.\n");
1404 				goto err_clear;
1405 			}
1406 		}
1407 		break;
1408 	case IPP_CMD_OUTPUT:
1409 		/* source memory list */
1410 		head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1411 
1412 		if (list_empty(head)) {
1413 			DRM_DEBUG_KMS("mem_list is empty.\n");
1414 			break;
1415 		}
1416 
1417 		list_for_each_entry_safe(m_node, tm_node, head, list) {
1418 			ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1419 			if (ret) {
1420 				DRM_ERROR("failed to put m_node.\n");
1421 				goto err_clear;
1422 			}
1423 		}
1424 		break;
1425 	default:
1426 		DRM_ERROR("invalid operations.\n");
1427 		ret = -EINVAL;
1428 		goto err_clear;
1429 	}
1430 
1431 err_clear:
1432 	/* stop operations */
1433 	if (ippdrv->stop)
1434 		ippdrv->stop(ippdrv->dev, property->cmd);
1435 
1436 	return ret;
1437 }
1438 
1439 void ipp_sched_cmd(struct work_struct *work)
1440 {
1441 	struct drm_exynos_ipp_cmd_work *cmd_work =
1442 		(struct drm_exynos_ipp_cmd_work *)work;
1443 	struct exynos_drm_ippdrv *ippdrv;
1444 	struct drm_exynos_ipp_cmd_node *c_node;
1445 	struct drm_exynos_ipp_property *property;
1446 	int ret;
1447 
1448 	ippdrv = cmd_work->ippdrv;
1449 	if (!ippdrv) {
1450 		DRM_ERROR("invalid ippdrv list.\n");
1451 		return;
1452 	}
1453 
1454 	c_node = cmd_work->c_node;
1455 	if (!c_node) {
1456 		DRM_ERROR("invalid command node list.\n");
1457 		return;
1458 	}
1459 
1460 	mutex_lock(&c_node->cmd_lock);
1461 
1462 	property = &c_node->property;
1463 
1464 	switch (cmd_work->ctrl) {
1465 	case IPP_CTRL_PLAY:
1466 	case IPP_CTRL_RESUME:
1467 		ret = ipp_start_property(ippdrv, c_node);
1468 		if (ret) {
1469 			DRM_ERROR("failed to start property:prop_id[%d]\n",
1470 				c_node->property.prop_id);
1471 			goto err_unlock;
1472 		}
1473 
1474 		/*
1475 		 * M2M case supports wait_completion of transfer.
1476 		 * because M2M case supports single unit operation
1477 		 * with multiple queue.
1478 		 * M2M need to wait completion of data transfer.
1479 		 */
1480 		if (ipp_is_m2m_cmd(property->cmd)) {
1481 			if (!wait_for_completion_timeout
1482 			    (&c_node->start_complete, msecs_to_jiffies(200))) {
1483 				DRM_ERROR("timeout event:prop_id[%d]\n",
1484 					c_node->property.prop_id);
1485 				goto err_unlock;
1486 			}
1487 		}
1488 		break;
1489 	case IPP_CTRL_STOP:
1490 	case IPP_CTRL_PAUSE:
1491 		ret = ipp_stop_property(ippdrv->drm_dev, ippdrv,
1492 			c_node);
1493 		if (ret) {
1494 			DRM_ERROR("failed to stop property.\n");
1495 			goto err_unlock;
1496 		}
1497 
1498 		complete(&c_node->stop_complete);
1499 		break;
1500 	default:
1501 		DRM_ERROR("unknown control type\n");
1502 		break;
1503 	}
1504 
1505 	DRM_DEBUG_KMS("ctrl[%d] done.\n", cmd_work->ctrl);
1506 
1507 err_unlock:
1508 	mutex_unlock(&c_node->cmd_lock);
1509 }
1510 
1511 static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
1512 		struct drm_exynos_ipp_cmd_node *c_node, int *buf_id)
1513 {
1514 	struct drm_device *drm_dev = ippdrv->drm_dev;
1515 	struct drm_exynos_ipp_property *property = &c_node->property;
1516 	struct drm_exynos_ipp_mem_node *m_node;
1517 	struct drm_exynos_ipp_queue_buf qbuf;
1518 	struct drm_exynos_ipp_send_event *e;
1519 	struct list_head *head;
1520 	struct timeval now;
1521 	unsigned long flags;
1522 	u32 tbuf_id[EXYNOS_DRM_OPS_MAX] = {0, };
1523 	int ret, i;
1524 
1525 	for_each_ipp_ops(i)
1526 		DRM_DEBUG_KMS("%s buf_id[%d]\n", i ? "dst" : "src", buf_id[i]);
1527 
1528 	if (!drm_dev) {
1529 		DRM_ERROR("failed to get drm_dev.\n");
1530 		return -EINVAL;
1531 	}
1532 
1533 	if (!property) {
1534 		DRM_ERROR("failed to get property.\n");
1535 		return -EINVAL;
1536 	}
1537 
1538 	if (list_empty(&c_node->event_list)) {
1539 		DRM_DEBUG_KMS("event list is empty.\n");
1540 		return 0;
1541 	}
1542 
1543 	if (!ipp_check_mem_list(c_node)) {
1544 		DRM_DEBUG_KMS("empty memory.\n");
1545 		return 0;
1546 	}
1547 
1548 	/* check command */
1549 	switch (property->cmd) {
1550 	case IPP_CMD_M2M:
1551 		for_each_ipp_ops(i) {
1552 			/* source/destination memory list */
1553 			head = &c_node->mem_list[i];
1554 
1555 			m_node = list_first_entry(head,
1556 				struct drm_exynos_ipp_mem_node, list);
1557 			if (!m_node) {
1558 				DRM_ERROR("empty memory node.\n");
1559 				return -ENOMEM;
1560 			}
1561 
1562 			tbuf_id[i] = m_node->buf_id;
1563 			DRM_DEBUG_KMS("%s buf_id[%d]\n",
1564 				i ? "dst" : "src", tbuf_id[i]);
1565 
1566 			ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1567 			if (ret)
1568 				DRM_ERROR("failed to put m_node.\n");
1569 		}
1570 		break;
1571 	case IPP_CMD_WB:
1572 		/* clear buf for finding */
1573 		memset(&qbuf, 0x0, sizeof(qbuf));
1574 		qbuf.ops_id = EXYNOS_DRM_OPS_DST;
1575 		qbuf.buf_id = buf_id[EXYNOS_DRM_OPS_DST];
1576 
1577 		/* get memory node entry */
1578 		m_node = ipp_find_mem_node(c_node, &qbuf);
1579 		if (!m_node) {
1580 			DRM_ERROR("empty memory node.\n");
1581 			return -ENOMEM;
1582 		}
1583 
1584 		tbuf_id[EXYNOS_DRM_OPS_DST] = m_node->buf_id;
1585 
1586 		ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1587 		if (ret)
1588 			DRM_ERROR("failed to put m_node.\n");
1589 		break;
1590 	case IPP_CMD_OUTPUT:
1591 		/* source memory list */
1592 		head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1593 
1594 		m_node = list_first_entry(head,
1595 			struct drm_exynos_ipp_mem_node, list);
1596 		if (!m_node) {
1597 			DRM_ERROR("empty memory node.\n");
1598 			return -ENOMEM;
1599 		}
1600 
1601 		tbuf_id[EXYNOS_DRM_OPS_SRC] = m_node->buf_id;
1602 
1603 		ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1604 		if (ret)
1605 			DRM_ERROR("failed to put m_node.\n");
1606 		break;
1607 	default:
1608 		DRM_ERROR("invalid operations.\n");
1609 		return -EINVAL;
1610 	}
1611 
1612 	if (tbuf_id[EXYNOS_DRM_OPS_DST] != buf_id[EXYNOS_DRM_OPS_DST])
1613 		DRM_ERROR("failed to match buf_id[%d %d]prop_id[%d]\n",
1614 			tbuf_id[1], buf_id[1], property->prop_id);
1615 
1616 	/*
1617 	 * command node have event list of destination buffer
1618 	 * If destination buffer enqueue to mem list,
1619 	 * then we make event and link to event list tail.
1620 	 * so, we get first event for first enqueued buffer.
1621 	 */
1622 	e = list_first_entry(&c_node->event_list,
1623 		struct drm_exynos_ipp_send_event, base.link);
1624 
1625 	if (!e) {
1626 		DRM_ERROR("empty event.\n");
1627 		return -EINVAL;
1628 	}
1629 
1630 	do_gettimeofday(&now);
1631 	DRM_DEBUG_KMS("tv_sec[%ld]tv_usec[%ld]\n", now.tv_sec, now.tv_usec);
1632 	e->event.tv_sec = now.tv_sec;
1633 	e->event.tv_usec = now.tv_usec;
1634 	e->event.prop_id = property->prop_id;
1635 
1636 	/* set buffer id about source destination */
1637 	for_each_ipp_ops(i)
1638 		e->event.buf_id[i] = tbuf_id[i];
1639 
1640 	spin_lock_irqsave(&drm_dev->event_lock, flags);
1641 	list_move_tail(&e->base.link, &e->base.file_priv->event_list);
1642 	wake_up_interruptible(&e->base.file_priv->event_wait);
1643 	spin_unlock_irqrestore(&drm_dev->event_lock, flags);
1644 
1645 	DRM_DEBUG_KMS("done cmd[%d]prop_id[%d]buf_id[%d]\n",
1646 		property->cmd, property->prop_id, tbuf_id[EXYNOS_DRM_OPS_DST]);
1647 
1648 	return 0;
1649 }
1650 
1651 void ipp_sched_event(struct work_struct *work)
1652 {
1653 	struct drm_exynos_ipp_event_work *event_work =
1654 		(struct drm_exynos_ipp_event_work *)work;
1655 	struct exynos_drm_ippdrv *ippdrv;
1656 	struct drm_exynos_ipp_cmd_node *c_node;
1657 	int ret;
1658 
1659 	if (!event_work) {
1660 		DRM_ERROR("failed to get event_work.\n");
1661 		return;
1662 	}
1663 
1664 	DRM_DEBUG_KMS("buf_id[%d]\n", event_work->buf_id[EXYNOS_DRM_OPS_DST]);
1665 
1666 	ippdrv = event_work->ippdrv;
1667 	if (!ippdrv) {
1668 		DRM_ERROR("failed to get ipp driver.\n");
1669 		return;
1670 	}
1671 
1672 	c_node = ippdrv->c_node;
1673 	if (!c_node) {
1674 		DRM_ERROR("failed to get command node.\n");
1675 		return;
1676 	}
1677 
1678 	/*
1679 	 * IPP supports command thread, event thread synchronization.
1680 	 * If IPP close immediately from user land, then IPP make
1681 	 * synchronization with command thread, so make complete event.
1682 	 * or going out operations.
1683 	 */
1684 	if (c_node->state != IPP_STATE_START) {
1685 		DRM_DEBUG_KMS("bypass state[%d]prop_id[%d]\n",
1686 			c_node->state, c_node->property.prop_id);
1687 		goto err_completion;
1688 	}
1689 
1690 	mutex_lock(&c_node->event_lock);
1691 
1692 	ret = ipp_send_event(ippdrv, c_node, event_work->buf_id);
1693 	if (ret) {
1694 		DRM_ERROR("failed to send event.\n");
1695 		goto err_completion;
1696 	}
1697 
1698 err_completion:
1699 	if (ipp_is_m2m_cmd(c_node->property.cmd))
1700 		complete(&c_node->start_complete);
1701 
1702 	mutex_unlock(&c_node->event_lock);
1703 }
1704 
1705 static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
1706 {
1707 	struct ipp_context *ctx = get_ipp_context(dev);
1708 	struct exynos_drm_ippdrv *ippdrv;
1709 	int ret, count = 0;
1710 
1711 	/* get ipp driver entry */
1712 	list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1713 		ippdrv->drm_dev = drm_dev;
1714 
1715 		ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv,
1716 			&ippdrv->ipp_id);
1717 		if (ret) {
1718 			DRM_ERROR("failed to create id.\n");
1719 			goto err_idr;
1720 		}
1721 
1722 		DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]ipp_id[%d]\n",
1723 			count++, (int)ippdrv, ippdrv->ipp_id);
1724 
1725 		if (ippdrv->ipp_id == 0) {
1726 			DRM_ERROR("failed to get ipp_id[%d]\n",
1727 				ippdrv->ipp_id);
1728 			goto err_idr;
1729 		}
1730 
1731 		/* store parent device for node */
1732 		ippdrv->parent_dev = dev;
1733 
1734 		/* store event work queue and handler */
1735 		ippdrv->event_workq = ctx->event_workq;
1736 		ippdrv->sched_event = ipp_sched_event;
1737 		INIT_LIST_HEAD(&ippdrv->cmd_list);
1738 
1739 		if (is_drm_iommu_supported(drm_dev)) {
1740 			ret = drm_iommu_attach_device(drm_dev, ippdrv->dev);
1741 			if (ret) {
1742 				DRM_ERROR("failed to activate iommu\n");
1743 				goto err_iommu;
1744 			}
1745 		}
1746 	}
1747 
1748 	return 0;
1749 
1750 err_iommu:
1751 	/* get ipp driver entry */
1752 	list_for_each_entry_reverse(ippdrv, &exynos_drm_ippdrv_list, drv_list)
1753 		if (is_drm_iommu_supported(drm_dev))
1754 			drm_iommu_detach_device(drm_dev, ippdrv->dev);
1755 
1756 err_idr:
1757 	idr_destroy(&ctx->ipp_idr);
1758 	idr_destroy(&ctx->prop_idr);
1759 	return ret;
1760 }
1761 
1762 static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
1763 {
1764 	struct exynos_drm_ippdrv *ippdrv;
1765 
1766 	/* get ipp driver entry */
1767 	list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1768 		if (is_drm_iommu_supported(drm_dev))
1769 			drm_iommu_detach_device(drm_dev, ippdrv->dev);
1770 
1771 		ippdrv->drm_dev = NULL;
1772 		exynos_drm_ippdrv_unregister(ippdrv);
1773 	}
1774 }
1775 
1776 static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
1777 		struct drm_file *file)
1778 {
1779 	struct drm_exynos_file_private *file_priv = file->driver_priv;
1780 	struct exynos_drm_ipp_private *priv;
1781 
1782 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1783 	if (!priv) {
1784 		DRM_ERROR("failed to allocate priv.\n");
1785 		return -ENOMEM;
1786 	}
1787 	priv->dev = dev;
1788 	file_priv->ipp_priv = priv;
1789 
1790 	INIT_LIST_HEAD(&priv->event_list);
1791 
1792 	DRM_DEBUG_KMS("done priv[0x%x]\n", (int)priv);
1793 
1794 	return 0;
1795 }
1796 
1797 static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
1798 		struct drm_file *file)
1799 {
1800 	struct drm_exynos_file_private *file_priv = file->driver_priv;
1801 	struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
1802 	struct exynos_drm_ippdrv *ippdrv = NULL;
1803 	struct drm_exynos_ipp_cmd_node *c_node, *tc_node;
1804 	int count = 0;
1805 
1806 	DRM_DEBUG_KMS("for priv[0x%x]\n", (int)priv);
1807 
1808 	if (list_empty(&exynos_drm_ippdrv_list)) {
1809 		DRM_DEBUG_KMS("ippdrv_list is empty.\n");
1810 		goto err_clear;
1811 	}
1812 
1813 	list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1814 		if (list_empty(&ippdrv->cmd_list))
1815 			continue;
1816 
1817 		list_for_each_entry_safe(c_node, tc_node,
1818 			&ippdrv->cmd_list, list) {
1819 			DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n",
1820 				count++, (int)ippdrv);
1821 
1822 			if (c_node->priv == priv) {
1823 				/*
1824 				 * userland goto unnormal state. process killed.
1825 				 * and close the file.
1826 				 * so, IPP didn't called stop cmd ctrl.
1827 				 * so, we are make stop operation in this state.
1828 				 */
1829 				if (c_node->state == IPP_STATE_START) {
1830 					ipp_stop_property(drm_dev, ippdrv,
1831 						c_node);
1832 					c_node->state = IPP_STATE_STOP;
1833 				}
1834 
1835 				ippdrv->dedicated = false;
1836 				ipp_clean_cmd_node(c_node);
1837 				if (list_empty(&ippdrv->cmd_list))
1838 					pm_runtime_put_sync(ippdrv->dev);
1839 			}
1840 		}
1841 	}
1842 
1843 err_clear:
1844 	kfree(priv);
1845 	return;
1846 }
1847 
1848 static int ipp_probe(struct platform_device *pdev)
1849 {
1850 	struct device *dev = &pdev->dev;
1851 	struct ipp_context *ctx;
1852 	struct exynos_drm_subdrv *subdrv;
1853 	int ret;
1854 
1855 	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
1856 	if (!ctx)
1857 		return -ENOMEM;
1858 
1859 	mutex_init(&ctx->ipp_lock);
1860 	mutex_init(&ctx->prop_lock);
1861 
1862 	idr_init(&ctx->ipp_idr);
1863 	idr_init(&ctx->prop_idr);
1864 
1865 	/*
1866 	 * create single thread for ipp event
1867 	 * IPP supports event thread for IPP drivers.
1868 	 * IPP driver send event_work to this thread.
1869 	 * and IPP event thread send event to user process.
1870 	 */
1871 	ctx->event_workq = create_singlethread_workqueue("ipp_event");
1872 	if (!ctx->event_workq) {
1873 		dev_err(dev, "failed to create event workqueue\n");
1874 		return -EINVAL;
1875 	}
1876 
1877 	/*
1878 	 * create single thread for ipp command
1879 	 * IPP supports command thread for user process.
1880 	 * user process make command node using set property ioctl.
1881 	 * and make start_work and send this work to command thread.
1882 	 * and then this command thread start property.
1883 	 */
1884 	ctx->cmd_workq = create_singlethread_workqueue("ipp_cmd");
1885 	if (!ctx->cmd_workq) {
1886 		dev_err(dev, "failed to create cmd workqueue\n");
1887 		ret = -EINVAL;
1888 		goto err_event_workq;
1889 	}
1890 
1891 	/* set sub driver informations */
1892 	subdrv = &ctx->subdrv;
1893 	subdrv->dev = dev;
1894 	subdrv->probe = ipp_subdrv_probe;
1895 	subdrv->remove = ipp_subdrv_remove;
1896 	subdrv->open = ipp_subdrv_open;
1897 	subdrv->close = ipp_subdrv_close;
1898 
1899 	platform_set_drvdata(pdev, ctx);
1900 
1901 	ret = exynos_drm_subdrv_register(subdrv);
1902 	if (ret < 0) {
1903 		DRM_ERROR("failed to register drm ipp device.\n");
1904 		goto err_cmd_workq;
1905 	}
1906 
1907 	dev_info(dev, "drm ipp registered successfully.\n");
1908 
1909 	return 0;
1910 
1911 err_cmd_workq:
1912 	destroy_workqueue(ctx->cmd_workq);
1913 err_event_workq:
1914 	destroy_workqueue(ctx->event_workq);
1915 	return ret;
1916 }
1917 
1918 static int ipp_remove(struct platform_device *pdev)
1919 {
1920 	struct ipp_context *ctx = platform_get_drvdata(pdev);
1921 
1922 	/* unregister sub driver */
1923 	exynos_drm_subdrv_unregister(&ctx->subdrv);
1924 
1925 	/* remove,destroy ipp idr */
1926 	idr_destroy(&ctx->ipp_idr);
1927 	idr_destroy(&ctx->prop_idr);
1928 
1929 	mutex_destroy(&ctx->ipp_lock);
1930 	mutex_destroy(&ctx->prop_lock);
1931 
1932 	/* destroy command, event work queue */
1933 	destroy_workqueue(ctx->cmd_workq);
1934 	destroy_workqueue(ctx->event_workq);
1935 
1936 	return 0;
1937 }
1938 
1939 static int ipp_power_ctrl(struct ipp_context *ctx, bool enable)
1940 {
1941 	DRM_DEBUG_KMS("enable[%d]\n", enable);
1942 
1943 	return 0;
1944 }
1945 
1946 #ifdef CONFIG_PM_SLEEP
1947 static int ipp_suspend(struct device *dev)
1948 {
1949 	struct ipp_context *ctx = get_ipp_context(dev);
1950 
1951 	if (pm_runtime_suspended(dev))
1952 		return 0;
1953 
1954 	return ipp_power_ctrl(ctx, false);
1955 }
1956 
1957 static int ipp_resume(struct device *dev)
1958 {
1959 	struct ipp_context *ctx = get_ipp_context(dev);
1960 
1961 	if (!pm_runtime_suspended(dev))
1962 		return ipp_power_ctrl(ctx, true);
1963 
1964 	return 0;
1965 }
1966 #endif
1967 
1968 #ifdef CONFIG_PM_RUNTIME
1969 static int ipp_runtime_suspend(struct device *dev)
1970 {
1971 	struct ipp_context *ctx = get_ipp_context(dev);
1972 
1973 	return ipp_power_ctrl(ctx, false);
1974 }
1975 
1976 static int ipp_runtime_resume(struct device *dev)
1977 {
1978 	struct ipp_context *ctx = get_ipp_context(dev);
1979 
1980 	return ipp_power_ctrl(ctx, true);
1981 }
1982 #endif
1983 
1984 static const struct dev_pm_ops ipp_pm_ops = {
1985 	SET_SYSTEM_SLEEP_PM_OPS(ipp_suspend, ipp_resume)
1986 	SET_RUNTIME_PM_OPS(ipp_runtime_suspend, ipp_runtime_resume, NULL)
1987 };
1988 
1989 struct platform_driver ipp_driver = {
1990 	.probe		= ipp_probe,
1991 	.remove		= ipp_remove,
1992 	.driver		= {
1993 		.name	= "exynos-drm-ipp",
1994 		.owner	= THIS_MODULE,
1995 		.pm	= &ipp_pm_ops,
1996 	},
1997 };
1998 
1999