1 /*
2  * Copyright (C) 2012 Samsung Electronics Co.Ltd
3  * Authors:
4  *	Eunchul Kim <chulspro.kim@samsung.com>
5  *	Jinyoung Jeon <jy0.jeon@samsung.com>
6  *	Sangmin Lee <lsmin.lee@samsung.com>
7  *
8  * This program is free software; you can redistribute  it and/or modify it
9  * under  the terms of  the GNU General  Public License as published by the
10  * Free Software Foundation;  either version 2 of the  License, or (at your
11  * option) any later version.
12  *
13  */
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/platform_device.h>
17 #include <linux/types.h>
18 #include <linux/clk.h>
19 #include <linux/pm_runtime.h>
20 #include <plat/map-base.h>
21 
22 #include <drm/drmP.h>
23 #include <drm/exynos_drm.h>
24 #include "exynos_drm_drv.h"
25 #include "exynos_drm_gem.h"
26 #include "exynos_drm_ipp.h"
27 #include "exynos_drm_iommu.h"
28 
29 /*
30  * IPP stands for Image Post Processing and
31  * supports image scaler/rotator and input/output DMA operations.
32  * using FIMC, GSC, Rotator, so on.
33  * IPP is integration device driver of same attribute h/w
34  */
35 
36 /*
37  * TODO
38  * 1. expand command control id.
39  * 2. integrate	property and config.
40  * 3. removed send_event id check routine.
41  * 4. compare send_event id if needed.
42  * 5. free subdrv_remove notifier callback list if needed.
43  * 6. need to check subdrv_open about multi-open.
44  * 7. need to power_on implement power and sysmmu ctrl.
45  */
46 
47 #define get_ipp_context(dev)	platform_get_drvdata(to_platform_device(dev))
48 #define ipp_is_m2m_cmd(c)	(c == IPP_CMD_M2M)
49 
50 /* platform device pointer for ipp device. */
51 static struct platform_device *exynos_drm_ipp_pdev;
52 
53 /*
54  * A structure of event.
55  *
56  * @base: base of event.
57  * @event: ipp event.
58  */
59 struct drm_exynos_ipp_send_event {
60 	struct drm_pending_event	base;
61 	struct drm_exynos_ipp_event	event;
62 };
63 
64 /*
65  * A structure of memory node.
66  *
67  * @list: list head to memory queue information.
68  * @ops_id: id of operations.
69  * @prop_id: id of property.
70  * @buf_id: id of buffer.
71  * @buf_info: gem objects and dma address, size.
72  * @filp: a pointer to drm_file.
73  */
74 struct drm_exynos_ipp_mem_node {
75 	struct list_head	list;
76 	enum drm_exynos_ops_id	ops_id;
77 	u32	prop_id;
78 	u32	buf_id;
79 	struct drm_exynos_ipp_buf_info	buf_info;
80 	struct drm_file		*filp;
81 };
82 
83 /*
84  * A structure of ipp context.
85  *
86  * @subdrv: prepare initialization using subdrv.
87  * @ipp_lock: lock for synchronization of access to ipp_idr.
88  * @prop_lock: lock for synchronization of access to prop_idr.
89  * @ipp_idr: ipp driver idr.
90  * @prop_idr: property idr.
91  * @event_workq: event work queue.
92  * @cmd_workq: command work queue.
93  */
94 struct ipp_context {
95 	struct exynos_drm_subdrv	subdrv;
96 	struct mutex	ipp_lock;
97 	struct mutex	prop_lock;
98 	struct idr	ipp_idr;
99 	struct idr	prop_idr;
100 	struct workqueue_struct	*event_workq;
101 	struct workqueue_struct	*cmd_workq;
102 };
103 
104 static LIST_HEAD(exynos_drm_ippdrv_list);
105 static DEFINE_MUTEX(exynos_drm_ippdrv_lock);
106 static BLOCKING_NOTIFIER_HEAD(exynos_drm_ippnb_list);
107 
108 int exynos_platform_device_ipp_register(void)
109 {
110 	struct platform_device *pdev;
111 
112 	if (exynos_drm_ipp_pdev)
113 		return -EEXIST;
114 
115 	pdev = platform_device_register_simple("exynos-drm-ipp", -1, NULL, 0);
116 	if (IS_ERR(pdev))
117 		return PTR_ERR(pdev);
118 
119 	exynos_drm_ipp_pdev = pdev;
120 
121 	return 0;
122 }
123 
124 void exynos_platform_device_ipp_unregister(void)
125 {
126 	if (exynos_drm_ipp_pdev) {
127 		platform_device_unregister(exynos_drm_ipp_pdev);
128 		exynos_drm_ipp_pdev = NULL;
129 	}
130 }
131 
132 int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
133 {
134 	if (!ippdrv)
135 		return -EINVAL;
136 
137 	mutex_lock(&exynos_drm_ippdrv_lock);
138 	list_add_tail(&ippdrv->drv_list, &exynos_drm_ippdrv_list);
139 	mutex_unlock(&exynos_drm_ippdrv_lock);
140 
141 	return 0;
142 }
143 
144 int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
145 {
146 	if (!ippdrv)
147 		return -EINVAL;
148 
149 	mutex_lock(&exynos_drm_ippdrv_lock);
150 	list_del(&ippdrv->drv_list);
151 	mutex_unlock(&exynos_drm_ippdrv_lock);
152 
153 	return 0;
154 }
155 
156 static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj,
157 		u32 *idp)
158 {
159 	int ret;
160 
161 	/* do the allocation under our mutexlock */
162 	mutex_lock(lock);
163 	ret = idr_alloc(id_idr, obj, 1, 0, GFP_KERNEL);
164 	mutex_unlock(lock);
165 	if (ret < 0)
166 		return ret;
167 
168 	*idp = ret;
169 	return 0;
170 }
171 
172 static void *ipp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id)
173 {
174 	void *obj;
175 
176 	DRM_DEBUG_KMS("id[%d]\n", id);
177 
178 	mutex_lock(lock);
179 
180 	/* find object using handle */
181 	obj = idr_find(id_idr, id);
182 	if (!obj) {
183 		DRM_ERROR("failed to find object.\n");
184 		mutex_unlock(lock);
185 		return ERR_PTR(-ENODEV);
186 	}
187 
188 	mutex_unlock(lock);
189 
190 	return obj;
191 }
192 
193 static inline bool ipp_check_dedicated(struct exynos_drm_ippdrv *ippdrv,
194 		enum drm_exynos_ipp_cmd	cmd)
195 {
196 	/*
197 	 * check dedicated flag and WB, OUTPUT operation with
198 	 * power on state.
199 	 */
200 	if (ippdrv->dedicated || (!ipp_is_m2m_cmd(cmd) &&
201 	    !pm_runtime_suspended(ippdrv->dev)))
202 		return true;
203 
204 	return false;
205 }
206 
207 static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx,
208 		struct drm_exynos_ipp_property *property)
209 {
210 	struct exynos_drm_ippdrv *ippdrv;
211 	u32 ipp_id = property->ipp_id;
212 
213 	DRM_DEBUG_KMS("ipp_id[%d]\n", ipp_id);
214 
215 	if (ipp_id) {
216 		/* find ipp driver using idr */
217 		ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
218 			ipp_id);
219 		if (IS_ERR(ippdrv)) {
220 			DRM_ERROR("not found ipp%d driver.\n", ipp_id);
221 			return ippdrv;
222 		}
223 
224 		/*
225 		 * WB, OUTPUT opertion not supported multi-operation.
226 		 * so, make dedicated state at set property ioctl.
227 		 * when ipp driver finished operations, clear dedicated flags.
228 		 */
229 		if (ipp_check_dedicated(ippdrv, property->cmd)) {
230 			DRM_ERROR("already used choose device.\n");
231 			return ERR_PTR(-EBUSY);
232 		}
233 
234 		/*
235 		 * This is necessary to find correct device in ipp drivers.
236 		 * ipp drivers have different abilities,
237 		 * so need to check property.
238 		 */
239 		if (ippdrv->check_property &&
240 		    ippdrv->check_property(ippdrv->dev, property)) {
241 			DRM_ERROR("not support property.\n");
242 			return ERR_PTR(-EINVAL);
243 		}
244 
245 		return ippdrv;
246 	} else {
247 		/*
248 		 * This case is search all ipp driver for finding.
249 		 * user application don't set ipp_id in this case,
250 		 * so ipp subsystem search correct driver in driver list.
251 		 */
252 		list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
253 			if (ipp_check_dedicated(ippdrv, property->cmd)) {
254 				DRM_DEBUG_KMS("used device.\n");
255 				continue;
256 			}
257 
258 			if (ippdrv->check_property &&
259 			    ippdrv->check_property(ippdrv->dev, property)) {
260 				DRM_DEBUG_KMS("not support property.\n");
261 				continue;
262 			}
263 
264 			return ippdrv;
265 		}
266 
267 		DRM_ERROR("not support ipp driver operations.\n");
268 	}
269 
270 	return ERR_PTR(-ENODEV);
271 }
272 
273 static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id)
274 {
275 	struct exynos_drm_ippdrv *ippdrv;
276 	struct drm_exynos_ipp_cmd_node *c_node;
277 	int count = 0;
278 
279 	DRM_DEBUG_KMS("prop_id[%d]\n", prop_id);
280 
281 	if (list_empty(&exynos_drm_ippdrv_list)) {
282 		DRM_DEBUG_KMS("ippdrv_list is empty.\n");
283 		return ERR_PTR(-ENODEV);
284 	}
285 
286 	/*
287 	 * This case is search ipp driver by prop_id handle.
288 	 * sometimes, ipp subsystem find driver by prop_id.
289 	 * e.g PAUSE state, queue buf, command contro.
290 	 */
291 	list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
292 		DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n", count++, (int)ippdrv);
293 
294 		if (!list_empty(&ippdrv->cmd_list)) {
295 			list_for_each_entry(c_node, &ippdrv->cmd_list, list)
296 				if (c_node->property.prop_id == prop_id)
297 					return ippdrv;
298 		}
299 	}
300 
301 	return ERR_PTR(-ENODEV);
302 }
303 
304 int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
305 		struct drm_file *file)
306 {
307 	struct drm_exynos_file_private *file_priv = file->driver_priv;
308 	struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
309 	struct device *dev = priv->dev;
310 	struct ipp_context *ctx = get_ipp_context(dev);
311 	struct drm_exynos_ipp_prop_list *prop_list = data;
312 	struct exynos_drm_ippdrv *ippdrv;
313 	int count = 0;
314 
315 	if (!ctx) {
316 		DRM_ERROR("invalid context.\n");
317 		return -EINVAL;
318 	}
319 
320 	if (!prop_list) {
321 		DRM_ERROR("invalid property parameter.\n");
322 		return -EINVAL;
323 	}
324 
325 	DRM_DEBUG_KMS("ipp_id[%d]\n", prop_list->ipp_id);
326 
327 	if (!prop_list->ipp_id) {
328 		list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list)
329 			count++;
330 		/*
331 		 * Supports ippdrv list count for user application.
332 		 * First step user application getting ippdrv count.
333 		 * and second step getting ippdrv capability using ipp_id.
334 		 */
335 		prop_list->count = count;
336 	} else {
337 		/*
338 		 * Getting ippdrv capability by ipp_id.
339 		 * some deivce not supported wb, output interface.
340 		 * so, user application detect correct ipp driver
341 		 * using this ioctl.
342 		 */
343 		ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
344 						prop_list->ipp_id);
345 		if (!ippdrv) {
346 			DRM_ERROR("not found ipp%d driver.\n",
347 					prop_list->ipp_id);
348 			return -EINVAL;
349 		}
350 
351 		prop_list = ippdrv->prop_list;
352 	}
353 
354 	return 0;
355 }
356 
357 static void ipp_print_property(struct drm_exynos_ipp_property *property,
358 		int idx)
359 {
360 	struct drm_exynos_ipp_config *config = &property->config[idx];
361 	struct drm_exynos_pos *pos = &config->pos;
362 	struct drm_exynos_sz *sz = &config->sz;
363 
364 	DRM_DEBUG_KMS("prop_id[%d]ops[%s]fmt[0x%x]\n",
365 		property->prop_id, idx ? "dst" : "src", config->fmt);
366 
367 	DRM_DEBUG_KMS("pos[%d %d %d %d]sz[%d %d]f[%d]r[%d]\n",
368 		pos->x, pos->y, pos->w, pos->h,
369 		sz->hsize, sz->vsize, config->flip, config->degree);
370 }
371 
372 static int ipp_find_and_set_property(struct drm_exynos_ipp_property *property)
373 {
374 	struct exynos_drm_ippdrv *ippdrv;
375 	struct drm_exynos_ipp_cmd_node *c_node;
376 	u32 prop_id = property->prop_id;
377 
378 	DRM_DEBUG_KMS("prop_id[%d]\n", prop_id);
379 
380 	ippdrv = ipp_find_drv_by_handle(prop_id);
381 	if (IS_ERR(ippdrv)) {
382 		DRM_ERROR("failed to get ipp driver.\n");
383 		return -EINVAL;
384 	}
385 
386 	/*
387 	 * Find command node using command list in ippdrv.
388 	 * when we find this command no using prop_id.
389 	 * return property information set in this command node.
390 	 */
391 	list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
392 		if ((c_node->property.prop_id == prop_id) &&
393 		    (c_node->state == IPP_STATE_STOP)) {
394 			DRM_DEBUG_KMS("found cmd[%d]ippdrv[0x%x]\n",
395 				property->cmd, (int)ippdrv);
396 
397 			c_node->property = *property;
398 			return 0;
399 		}
400 	}
401 
402 	DRM_ERROR("failed to search property.\n");
403 
404 	return -EINVAL;
405 }
406 
407 static struct drm_exynos_ipp_cmd_work *ipp_create_cmd_work(void)
408 {
409 	struct drm_exynos_ipp_cmd_work *cmd_work;
410 
411 	cmd_work = kzalloc(sizeof(*cmd_work), GFP_KERNEL);
412 	if (!cmd_work) {
413 		DRM_ERROR("failed to alloc cmd_work.\n");
414 		return ERR_PTR(-ENOMEM);
415 	}
416 
417 	INIT_WORK((struct work_struct *)cmd_work, ipp_sched_cmd);
418 
419 	return cmd_work;
420 }
421 
422 static struct drm_exynos_ipp_event_work *ipp_create_event_work(void)
423 {
424 	struct drm_exynos_ipp_event_work *event_work;
425 
426 	event_work = kzalloc(sizeof(*event_work), GFP_KERNEL);
427 	if (!event_work) {
428 		DRM_ERROR("failed to alloc event_work.\n");
429 		return ERR_PTR(-ENOMEM);
430 	}
431 
432 	INIT_WORK((struct work_struct *)event_work, ipp_sched_event);
433 
434 	return event_work;
435 }
436 
437 int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
438 		struct drm_file *file)
439 {
440 	struct drm_exynos_file_private *file_priv = file->driver_priv;
441 	struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
442 	struct device *dev = priv->dev;
443 	struct ipp_context *ctx = get_ipp_context(dev);
444 	struct drm_exynos_ipp_property *property = data;
445 	struct exynos_drm_ippdrv *ippdrv;
446 	struct drm_exynos_ipp_cmd_node *c_node;
447 	int ret, i;
448 
449 	if (!ctx) {
450 		DRM_ERROR("invalid context.\n");
451 		return -EINVAL;
452 	}
453 
454 	if (!property) {
455 		DRM_ERROR("invalid property parameter.\n");
456 		return -EINVAL;
457 	}
458 
459 	/*
460 	 * This is log print for user application property.
461 	 * user application set various property.
462 	 */
463 	for_each_ipp_ops(i)
464 		ipp_print_property(property, i);
465 
466 	/*
467 	 * set property ioctl generated new prop_id.
468 	 * but in this case already asigned prop_id using old set property.
469 	 * e.g PAUSE state. this case supports find current prop_id and use it
470 	 * instead of allocation.
471 	 */
472 	if (property->prop_id) {
473 		DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
474 		return ipp_find_and_set_property(property);
475 	}
476 
477 	/* find ipp driver using ipp id */
478 	ippdrv = ipp_find_driver(ctx, property);
479 	if (IS_ERR(ippdrv)) {
480 		DRM_ERROR("failed to get ipp driver.\n");
481 		return -EINVAL;
482 	}
483 
484 	/* allocate command node */
485 	c_node = kzalloc(sizeof(*c_node), GFP_KERNEL);
486 	if (!c_node) {
487 		DRM_ERROR("failed to allocate map node.\n");
488 		return -ENOMEM;
489 	}
490 
491 	/* create property id */
492 	ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node,
493 		&property->prop_id);
494 	if (ret) {
495 		DRM_ERROR("failed to create id.\n");
496 		goto err_clear;
497 	}
498 
499 	DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[0x%x]\n",
500 		property->prop_id, property->cmd, (int)ippdrv);
501 
502 	/* stored property information and ippdrv in private data */
503 	c_node->priv = priv;
504 	c_node->property = *property;
505 	c_node->state = IPP_STATE_IDLE;
506 
507 	c_node->start_work = ipp_create_cmd_work();
508 	if (IS_ERR(c_node->start_work)) {
509 		DRM_ERROR("failed to create start work.\n");
510 		goto err_clear;
511 	}
512 
513 	c_node->stop_work = ipp_create_cmd_work();
514 	if (IS_ERR(c_node->stop_work)) {
515 		DRM_ERROR("failed to create stop work.\n");
516 		goto err_free_start;
517 	}
518 
519 	c_node->event_work = ipp_create_event_work();
520 	if (IS_ERR(c_node->event_work)) {
521 		DRM_ERROR("failed to create event work.\n");
522 		goto err_free_stop;
523 	}
524 
525 	mutex_init(&c_node->cmd_lock);
526 	mutex_init(&c_node->mem_lock);
527 	mutex_init(&c_node->event_lock);
528 
529 	init_completion(&c_node->start_complete);
530 	init_completion(&c_node->stop_complete);
531 
532 	for_each_ipp_ops(i)
533 		INIT_LIST_HEAD(&c_node->mem_list[i]);
534 
535 	INIT_LIST_HEAD(&c_node->event_list);
536 	list_splice_init(&priv->event_list, &c_node->event_list);
537 	list_add_tail(&c_node->list, &ippdrv->cmd_list);
538 
539 	/* make dedicated state without m2m */
540 	if (!ipp_is_m2m_cmd(property->cmd))
541 		ippdrv->dedicated = true;
542 
543 	return 0;
544 
545 err_free_stop:
546 	kfree(c_node->stop_work);
547 err_free_start:
548 	kfree(c_node->start_work);
549 err_clear:
550 	kfree(c_node);
551 	return ret;
552 }
553 
554 static void ipp_clean_cmd_node(struct drm_exynos_ipp_cmd_node *c_node)
555 {
556 	/* delete list */
557 	list_del(&c_node->list);
558 
559 	/* destroy mutex */
560 	mutex_destroy(&c_node->cmd_lock);
561 	mutex_destroy(&c_node->mem_lock);
562 	mutex_destroy(&c_node->event_lock);
563 
564 	/* free command node */
565 	kfree(c_node->start_work);
566 	kfree(c_node->stop_work);
567 	kfree(c_node->event_work);
568 	kfree(c_node);
569 }
570 
571 static int ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
572 {
573 	struct drm_exynos_ipp_property *property = &c_node->property;
574 	struct drm_exynos_ipp_mem_node *m_node;
575 	struct list_head *head;
576 	int ret, i, count[EXYNOS_DRM_OPS_MAX] = { 0, };
577 
578 	mutex_lock(&c_node->mem_lock);
579 
580 	for_each_ipp_ops(i) {
581 		/* source/destination memory list */
582 		head = &c_node->mem_list[i];
583 
584 		if (list_empty(head)) {
585 			DRM_DEBUG_KMS("%s memory empty.\n", i ? "dst" : "src");
586 			continue;
587 		}
588 
589 		/* find memory node entry */
590 		list_for_each_entry(m_node, head, list) {
591 			DRM_DEBUG_KMS("%s,count[%d]m_node[0x%x]\n",
592 				i ? "dst" : "src", count[i], (int)m_node);
593 			count[i]++;
594 		}
595 	}
596 
597 	DRM_DEBUG_KMS("min[%d]max[%d]\n",
598 		min(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]),
599 		max(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]));
600 
601 	/*
602 	 * M2M operations should be need paired memory address.
603 	 * so, need to check minimum count about src, dst.
604 	 * other case not use paired memory, so use maximum count
605 	 */
606 	if (ipp_is_m2m_cmd(property->cmd))
607 		ret = min(count[EXYNOS_DRM_OPS_SRC],
608 			count[EXYNOS_DRM_OPS_DST]);
609 	else
610 		ret = max(count[EXYNOS_DRM_OPS_SRC],
611 			count[EXYNOS_DRM_OPS_DST]);
612 
613 	mutex_unlock(&c_node->mem_lock);
614 
615 	return ret;
616 }
617 
618 static struct drm_exynos_ipp_mem_node
619 		*ipp_find_mem_node(struct drm_exynos_ipp_cmd_node *c_node,
620 		struct drm_exynos_ipp_queue_buf *qbuf)
621 {
622 	struct drm_exynos_ipp_mem_node *m_node;
623 	struct list_head *head;
624 	int count = 0;
625 
626 	DRM_DEBUG_KMS("buf_id[%d]\n", qbuf->buf_id);
627 
628 	/* source/destination memory list */
629 	head = &c_node->mem_list[qbuf->ops_id];
630 
631 	/* find memory node from memory list */
632 	list_for_each_entry(m_node, head, list) {
633 		DRM_DEBUG_KMS("count[%d]m_node[0x%x]\n", count++, (int)m_node);
634 
635 		/* compare buffer id */
636 		if (m_node->buf_id == qbuf->buf_id)
637 			return m_node;
638 	}
639 
640 	return NULL;
641 }
642 
643 static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
644 		struct drm_exynos_ipp_cmd_node *c_node,
645 		struct drm_exynos_ipp_mem_node *m_node)
646 {
647 	struct exynos_drm_ipp_ops *ops = NULL;
648 	int ret = 0;
649 
650 	DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node);
651 
652 	if (!m_node) {
653 		DRM_ERROR("invalid queue node.\n");
654 		return -EFAULT;
655 	}
656 
657 	mutex_lock(&c_node->mem_lock);
658 
659 	DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id);
660 
661 	/* get operations callback */
662 	ops = ippdrv->ops[m_node->ops_id];
663 	if (!ops) {
664 		DRM_ERROR("not support ops.\n");
665 		ret = -EFAULT;
666 		goto err_unlock;
667 	}
668 
669 	/* set address and enable irq */
670 	if (ops->set_addr) {
671 		ret = ops->set_addr(ippdrv->dev, &m_node->buf_info,
672 			m_node->buf_id, IPP_BUF_ENQUEUE);
673 		if (ret) {
674 			DRM_ERROR("failed to set addr.\n");
675 			goto err_unlock;
676 		}
677 	}
678 
679 err_unlock:
680 	mutex_unlock(&c_node->mem_lock);
681 	return ret;
682 }
683 
684 static struct drm_exynos_ipp_mem_node
685 		*ipp_get_mem_node(struct drm_device *drm_dev,
686 		struct drm_file *file,
687 		struct drm_exynos_ipp_cmd_node *c_node,
688 		struct drm_exynos_ipp_queue_buf *qbuf)
689 {
690 	struct drm_exynos_ipp_mem_node *m_node;
691 	struct drm_exynos_ipp_buf_info buf_info;
692 	void *addr;
693 	int i;
694 
695 	mutex_lock(&c_node->mem_lock);
696 
697 	m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
698 	if (!m_node) {
699 		DRM_ERROR("failed to allocate queue node.\n");
700 		goto err_unlock;
701 	}
702 
703 	/* clear base address for error handling */
704 	memset(&buf_info, 0x0, sizeof(buf_info));
705 
706 	/* operations, buffer id */
707 	m_node->ops_id = qbuf->ops_id;
708 	m_node->prop_id = qbuf->prop_id;
709 	m_node->buf_id = qbuf->buf_id;
710 
711 	DRM_DEBUG_KMS("m_node[0x%x]ops_id[%d]\n", (int)m_node, qbuf->ops_id);
712 	DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id);
713 
714 	for_each_ipp_planar(i) {
715 		DRM_DEBUG_KMS("i[%d]handle[0x%x]\n", i, qbuf->handle[i]);
716 
717 		/* get dma address by handle */
718 		if (qbuf->handle[i]) {
719 			addr = exynos_drm_gem_get_dma_addr(drm_dev,
720 					qbuf->handle[i], file);
721 			if (IS_ERR(addr)) {
722 				DRM_ERROR("failed to get addr.\n");
723 				goto err_clear;
724 			}
725 
726 			buf_info.handles[i] = qbuf->handle[i];
727 			buf_info.base[i] = *(dma_addr_t *) addr;
728 			DRM_DEBUG_KMS("i[%d]base[0x%x]hd[0x%x]\n",
729 				i, buf_info.base[i], (int)buf_info.handles[i]);
730 		}
731 	}
732 
733 	m_node->filp = file;
734 	m_node->buf_info = buf_info;
735 	list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
736 
737 	mutex_unlock(&c_node->mem_lock);
738 	return m_node;
739 
740 err_clear:
741 	kfree(m_node);
742 err_unlock:
743 	mutex_unlock(&c_node->mem_lock);
744 	return ERR_PTR(-EFAULT);
745 }
746 
747 static int ipp_put_mem_node(struct drm_device *drm_dev,
748 		struct drm_exynos_ipp_cmd_node *c_node,
749 		struct drm_exynos_ipp_mem_node *m_node)
750 {
751 	int i;
752 
753 	DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node);
754 
755 	if (!m_node) {
756 		DRM_ERROR("invalid dequeue node.\n");
757 		return -EFAULT;
758 	}
759 
760 	if (list_empty(&m_node->list)) {
761 		DRM_ERROR("empty memory node.\n");
762 		return -ENOMEM;
763 	}
764 
765 	mutex_lock(&c_node->mem_lock);
766 
767 	DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id);
768 
769 	/* put gem buffer */
770 	for_each_ipp_planar(i) {
771 		unsigned long handle = m_node->buf_info.handles[i];
772 		if (handle)
773 			exynos_drm_gem_put_dma_addr(drm_dev, handle,
774 							m_node->filp);
775 	}
776 
777 	/* delete list in queue */
778 	list_del(&m_node->list);
779 	kfree(m_node);
780 
781 	mutex_unlock(&c_node->mem_lock);
782 
783 	return 0;
784 }
785 
786 static void ipp_free_event(struct drm_pending_event *event)
787 {
788 	kfree(event);
789 }
790 
791 static int ipp_get_event(struct drm_device *drm_dev,
792 		struct drm_file *file,
793 		struct drm_exynos_ipp_cmd_node *c_node,
794 		struct drm_exynos_ipp_queue_buf *qbuf)
795 {
796 	struct drm_exynos_ipp_send_event *e;
797 	unsigned long flags;
798 
799 	DRM_DEBUG_KMS("ops_id[%d]buf_id[%d]\n", qbuf->ops_id, qbuf->buf_id);
800 
801 	e = kzalloc(sizeof(*e), GFP_KERNEL);
802 
803 	if (!e) {
804 		DRM_ERROR("failed to allocate event.\n");
805 		spin_lock_irqsave(&drm_dev->event_lock, flags);
806 		file->event_space += sizeof(e->event);
807 		spin_unlock_irqrestore(&drm_dev->event_lock, flags);
808 		return -ENOMEM;
809 	}
810 
811 	/* make event */
812 	e->event.base.type = DRM_EXYNOS_IPP_EVENT;
813 	e->event.base.length = sizeof(e->event);
814 	e->event.user_data = qbuf->user_data;
815 	e->event.prop_id = qbuf->prop_id;
816 	e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id;
817 	e->base.event = &e->event.base;
818 	e->base.file_priv = file;
819 	e->base.destroy = ipp_free_event;
820 	list_add_tail(&e->base.link, &c_node->event_list);
821 
822 	return 0;
823 }
824 
825 static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
826 		struct drm_exynos_ipp_queue_buf *qbuf)
827 {
828 	struct drm_exynos_ipp_send_event *e, *te;
829 	int count = 0;
830 
831 	if (list_empty(&c_node->event_list)) {
832 		DRM_DEBUG_KMS("event_list is empty.\n");
833 		return;
834 	}
835 
836 	list_for_each_entry_safe(e, te, &c_node->event_list, base.link) {
837 		DRM_DEBUG_KMS("count[%d]e[0x%x]\n", count++, (int)e);
838 
839 		/*
840 		 * quf == NULL condition means all event deletion.
841 		 * stop operations want to delete all event list.
842 		 * another case delete only same buf id.
843 		 */
844 		if (!qbuf) {
845 			/* delete list */
846 			list_del(&e->base.link);
847 			kfree(e);
848 		}
849 
850 		/* compare buffer id */
851 		if (qbuf && (qbuf->buf_id ==
852 		    e->event.buf_id[EXYNOS_DRM_OPS_DST])) {
853 			/* delete list */
854 			list_del(&e->base.link);
855 			kfree(e);
856 			return;
857 		}
858 	}
859 }
860 
861 static void ipp_handle_cmd_work(struct device *dev,
862 		struct exynos_drm_ippdrv *ippdrv,
863 		struct drm_exynos_ipp_cmd_work *cmd_work,
864 		struct drm_exynos_ipp_cmd_node *c_node)
865 {
866 	struct ipp_context *ctx = get_ipp_context(dev);
867 
868 	cmd_work->ippdrv = ippdrv;
869 	cmd_work->c_node = c_node;
870 	queue_work(ctx->cmd_workq, (struct work_struct *)cmd_work);
871 }
872 
873 static int ipp_queue_buf_with_run(struct device *dev,
874 		struct drm_exynos_ipp_cmd_node *c_node,
875 		struct drm_exynos_ipp_mem_node *m_node,
876 		struct drm_exynos_ipp_queue_buf *qbuf)
877 {
878 	struct exynos_drm_ippdrv *ippdrv;
879 	struct drm_exynos_ipp_property *property;
880 	struct exynos_drm_ipp_ops *ops;
881 	int ret;
882 
883 	ippdrv = ipp_find_drv_by_handle(qbuf->prop_id);
884 	if (IS_ERR(ippdrv)) {
885 		DRM_ERROR("failed to get ipp driver.\n");
886 		return -EFAULT;
887 	}
888 
889 	ops = ippdrv->ops[qbuf->ops_id];
890 	if (!ops) {
891 		DRM_ERROR("failed to get ops.\n");
892 		return -EFAULT;
893 	}
894 
895 	property = &c_node->property;
896 
897 	if (c_node->state != IPP_STATE_START) {
898 		DRM_DEBUG_KMS("bypass for invalid state.\n");
899 		return 0;
900 	}
901 
902 	if (!ipp_check_mem_list(c_node)) {
903 		DRM_DEBUG_KMS("empty memory.\n");
904 		return 0;
905 	}
906 
907 	/*
908 	 * If set destination buffer and enabled clock,
909 	 * then m2m operations need start operations at queue_buf
910 	 */
911 	if (ipp_is_m2m_cmd(property->cmd)) {
912 		struct drm_exynos_ipp_cmd_work *cmd_work = c_node->start_work;
913 
914 		cmd_work->ctrl = IPP_CTRL_PLAY;
915 		ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
916 	} else {
917 		ret = ipp_set_mem_node(ippdrv, c_node, m_node);
918 		if (ret) {
919 			DRM_ERROR("failed to set m node.\n");
920 			return ret;
921 		}
922 	}
923 
924 	return 0;
925 }
926 
927 static void ipp_clean_queue_buf(struct drm_device *drm_dev,
928 		struct drm_exynos_ipp_cmd_node *c_node,
929 		struct drm_exynos_ipp_queue_buf *qbuf)
930 {
931 	struct drm_exynos_ipp_mem_node *m_node, *tm_node;
932 
933 	if (!list_empty(&c_node->mem_list[qbuf->ops_id])) {
934 		/* delete list */
935 		list_for_each_entry_safe(m_node, tm_node,
936 			&c_node->mem_list[qbuf->ops_id], list) {
937 			if (m_node->buf_id == qbuf->buf_id &&
938 			    m_node->ops_id == qbuf->ops_id)
939 				ipp_put_mem_node(drm_dev, c_node, m_node);
940 		}
941 	}
942 }
943 
944 int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
945 		struct drm_file *file)
946 {
947 	struct drm_exynos_file_private *file_priv = file->driver_priv;
948 	struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
949 	struct device *dev = priv->dev;
950 	struct ipp_context *ctx = get_ipp_context(dev);
951 	struct drm_exynos_ipp_queue_buf *qbuf = data;
952 	struct drm_exynos_ipp_cmd_node *c_node;
953 	struct drm_exynos_ipp_mem_node *m_node;
954 	int ret;
955 
956 	if (!qbuf) {
957 		DRM_ERROR("invalid buf parameter.\n");
958 		return -EINVAL;
959 	}
960 
961 	if (qbuf->ops_id >= EXYNOS_DRM_OPS_MAX) {
962 		DRM_ERROR("invalid ops parameter.\n");
963 		return -EINVAL;
964 	}
965 
966 	DRM_DEBUG_KMS("prop_id[%d]ops_id[%s]buf_id[%d]buf_type[%d]\n",
967 		qbuf->prop_id, qbuf->ops_id ? "dst" : "src",
968 		qbuf->buf_id, qbuf->buf_type);
969 
970 	/* find command node */
971 	c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
972 		qbuf->prop_id);
973 	if (!c_node) {
974 		DRM_ERROR("failed to get command node.\n");
975 		return -EFAULT;
976 	}
977 
978 	/* buffer control */
979 	switch (qbuf->buf_type) {
980 	case IPP_BUF_ENQUEUE:
981 		/* get memory node */
982 		m_node = ipp_get_mem_node(drm_dev, file, c_node, qbuf);
983 		if (IS_ERR(m_node)) {
984 			DRM_ERROR("failed to get m_node.\n");
985 			return PTR_ERR(m_node);
986 		}
987 
988 		/*
989 		 * first step get event for destination buffer.
990 		 * and second step when M2M case run with destination buffer
991 		 * if needed.
992 		 */
993 		if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) {
994 			/* get event for destination buffer */
995 			ret = ipp_get_event(drm_dev, file, c_node, qbuf);
996 			if (ret) {
997 				DRM_ERROR("failed to get event.\n");
998 				goto err_clean_node;
999 			}
1000 
1001 			/*
1002 			 * M2M case run play control for streaming feature.
1003 			 * other case set address and waiting.
1004 			 */
1005 			ret = ipp_queue_buf_with_run(dev, c_node, m_node, qbuf);
1006 			if (ret) {
1007 				DRM_ERROR("failed to run command.\n");
1008 				goto err_clean_node;
1009 			}
1010 		}
1011 		break;
1012 	case IPP_BUF_DEQUEUE:
1013 		mutex_lock(&c_node->cmd_lock);
1014 
1015 		/* put event for destination buffer */
1016 		if (qbuf->ops_id == EXYNOS_DRM_OPS_DST)
1017 			ipp_put_event(c_node, qbuf);
1018 
1019 		ipp_clean_queue_buf(drm_dev, c_node, qbuf);
1020 
1021 		mutex_unlock(&c_node->cmd_lock);
1022 		break;
1023 	default:
1024 		DRM_ERROR("invalid buffer control.\n");
1025 		return -EINVAL;
1026 	}
1027 
1028 	return 0;
1029 
1030 err_clean_node:
1031 	DRM_ERROR("clean memory nodes.\n");
1032 
1033 	ipp_clean_queue_buf(drm_dev, c_node, qbuf);
1034 	return ret;
1035 }
1036 
1037 static bool exynos_drm_ipp_check_valid(struct device *dev,
1038 		enum drm_exynos_ipp_ctrl ctrl, enum drm_exynos_ipp_state state)
1039 {
1040 	if (ctrl != IPP_CTRL_PLAY) {
1041 		if (pm_runtime_suspended(dev)) {
1042 			DRM_ERROR("pm:runtime_suspended.\n");
1043 			goto err_status;
1044 		}
1045 	}
1046 
1047 	switch (ctrl) {
1048 	case IPP_CTRL_PLAY:
1049 		if (state != IPP_STATE_IDLE)
1050 			goto err_status;
1051 		break;
1052 	case IPP_CTRL_STOP:
1053 		if (state == IPP_STATE_STOP)
1054 			goto err_status;
1055 		break;
1056 	case IPP_CTRL_PAUSE:
1057 		if (state != IPP_STATE_START)
1058 			goto err_status;
1059 		break;
1060 	case IPP_CTRL_RESUME:
1061 		if (state != IPP_STATE_STOP)
1062 			goto err_status;
1063 		break;
1064 	default:
1065 		DRM_ERROR("invalid state.\n");
1066 		goto err_status;
1067 	}
1068 
1069 	return true;
1070 
1071 err_status:
1072 	DRM_ERROR("invalid status:ctrl[%d]state[%d]\n", ctrl, state);
1073 	return false;
1074 }
1075 
1076 int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
1077 		struct drm_file *file)
1078 {
1079 	struct drm_exynos_file_private *file_priv = file->driver_priv;
1080 	struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
1081 	struct exynos_drm_ippdrv *ippdrv = NULL;
1082 	struct device *dev = priv->dev;
1083 	struct ipp_context *ctx = get_ipp_context(dev);
1084 	struct drm_exynos_ipp_cmd_ctrl *cmd_ctrl = data;
1085 	struct drm_exynos_ipp_cmd_work *cmd_work;
1086 	struct drm_exynos_ipp_cmd_node *c_node;
1087 
1088 	if (!ctx) {
1089 		DRM_ERROR("invalid context.\n");
1090 		return -EINVAL;
1091 	}
1092 
1093 	if (!cmd_ctrl) {
1094 		DRM_ERROR("invalid control parameter.\n");
1095 		return -EINVAL;
1096 	}
1097 
1098 	DRM_DEBUG_KMS("ctrl[%d]prop_id[%d]\n",
1099 		cmd_ctrl->ctrl, cmd_ctrl->prop_id);
1100 
1101 	ippdrv = ipp_find_drv_by_handle(cmd_ctrl->prop_id);
1102 	if (IS_ERR(ippdrv)) {
1103 		DRM_ERROR("failed to get ipp driver.\n");
1104 		return PTR_ERR(ippdrv);
1105 	}
1106 
1107 	c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
1108 		cmd_ctrl->prop_id);
1109 	if (!c_node) {
1110 		DRM_ERROR("invalid command node list.\n");
1111 		return -EINVAL;
1112 	}
1113 
1114 	if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl,
1115 	    c_node->state)) {
1116 		DRM_ERROR("invalid state.\n");
1117 		return -EINVAL;
1118 	}
1119 
1120 	switch (cmd_ctrl->ctrl) {
1121 	case IPP_CTRL_PLAY:
1122 		if (pm_runtime_suspended(ippdrv->dev))
1123 			pm_runtime_get_sync(ippdrv->dev);
1124 		c_node->state = IPP_STATE_START;
1125 
1126 		cmd_work = c_node->start_work;
1127 		cmd_work->ctrl = cmd_ctrl->ctrl;
1128 		ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1129 		c_node->state = IPP_STATE_START;
1130 		break;
1131 	case IPP_CTRL_STOP:
1132 		cmd_work = c_node->stop_work;
1133 		cmd_work->ctrl = cmd_ctrl->ctrl;
1134 		ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1135 
1136 		if (!wait_for_completion_timeout(&c_node->stop_complete,
1137 		    msecs_to_jiffies(300))) {
1138 			DRM_ERROR("timeout stop:prop_id[%d]\n",
1139 				c_node->property.prop_id);
1140 		}
1141 
1142 		c_node->state = IPP_STATE_STOP;
1143 		ippdrv->dedicated = false;
1144 		ipp_clean_cmd_node(c_node);
1145 
1146 		if (list_empty(&ippdrv->cmd_list))
1147 			pm_runtime_put_sync(ippdrv->dev);
1148 		break;
1149 	case IPP_CTRL_PAUSE:
1150 		cmd_work = c_node->stop_work;
1151 		cmd_work->ctrl = cmd_ctrl->ctrl;
1152 		ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1153 
1154 		if (!wait_for_completion_timeout(&c_node->stop_complete,
1155 		    msecs_to_jiffies(200))) {
1156 			DRM_ERROR("timeout stop:prop_id[%d]\n",
1157 				c_node->property.prop_id);
1158 		}
1159 
1160 		c_node->state = IPP_STATE_STOP;
1161 		break;
1162 	case IPP_CTRL_RESUME:
1163 		c_node->state = IPP_STATE_START;
1164 		cmd_work = c_node->start_work;
1165 		cmd_work->ctrl = cmd_ctrl->ctrl;
1166 		ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1167 		break;
1168 	default:
1169 		DRM_ERROR("could not support this state currently.\n");
1170 		return -EINVAL;
1171 	}
1172 
1173 	DRM_DEBUG_KMS("done ctrl[%d]prop_id[%d]\n",
1174 		cmd_ctrl->ctrl, cmd_ctrl->prop_id);
1175 
1176 	return 0;
1177 }
1178 
1179 int exynos_drm_ippnb_register(struct notifier_block *nb)
1180 {
1181 	return blocking_notifier_chain_register(
1182 		&exynos_drm_ippnb_list, nb);
1183 }
1184 
1185 int exynos_drm_ippnb_unregister(struct notifier_block *nb)
1186 {
1187 	return blocking_notifier_chain_unregister(
1188 		&exynos_drm_ippnb_list, nb);
1189 }
1190 
1191 int exynos_drm_ippnb_send_event(unsigned long val, void *v)
1192 {
1193 	return blocking_notifier_call_chain(
1194 		&exynos_drm_ippnb_list, val, v);
1195 }
1196 
1197 static int ipp_set_property(struct exynos_drm_ippdrv *ippdrv,
1198 		struct drm_exynos_ipp_property *property)
1199 {
1200 	struct exynos_drm_ipp_ops *ops = NULL;
1201 	bool swap = false;
1202 	int ret, i;
1203 
1204 	if (!property) {
1205 		DRM_ERROR("invalid property parameter.\n");
1206 		return -EINVAL;
1207 	}
1208 
1209 	DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
1210 
1211 	/* reset h/w block */
1212 	if (ippdrv->reset &&
1213 	    ippdrv->reset(ippdrv->dev)) {
1214 		DRM_ERROR("failed to reset.\n");
1215 		return -EINVAL;
1216 	}
1217 
1218 	/* set source,destination operations */
1219 	for_each_ipp_ops(i) {
1220 		struct drm_exynos_ipp_config *config =
1221 			&property->config[i];
1222 
1223 		ops = ippdrv->ops[i];
1224 		if (!ops || !config) {
1225 			DRM_ERROR("not support ops and config.\n");
1226 			return -EINVAL;
1227 		}
1228 
1229 		/* set format */
1230 		if (ops->set_fmt) {
1231 			ret = ops->set_fmt(ippdrv->dev, config->fmt);
1232 			if (ret) {
1233 				DRM_ERROR("not support format.\n");
1234 				return ret;
1235 			}
1236 		}
1237 
1238 		/* set transform for rotation, flip */
1239 		if (ops->set_transf) {
1240 			ret = ops->set_transf(ippdrv->dev, config->degree,
1241 				config->flip, &swap);
1242 			if (ret) {
1243 				DRM_ERROR("not support tranf.\n");
1244 				return -EINVAL;
1245 			}
1246 		}
1247 
1248 		/* set size */
1249 		if (ops->set_size) {
1250 			ret = ops->set_size(ippdrv->dev, swap, &config->pos,
1251 				&config->sz);
1252 			if (ret) {
1253 				DRM_ERROR("not support size.\n");
1254 				return ret;
1255 			}
1256 		}
1257 	}
1258 
1259 	return 0;
1260 }
1261 
1262 static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
1263 		struct drm_exynos_ipp_cmd_node *c_node)
1264 {
1265 	struct drm_exynos_ipp_mem_node *m_node;
1266 	struct drm_exynos_ipp_property *property = &c_node->property;
1267 	struct list_head *head;
1268 	int ret, i;
1269 
1270 	DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
1271 
1272 	/* store command info in ippdrv */
1273 	ippdrv->c_node = c_node;
1274 
1275 	if (!ipp_check_mem_list(c_node)) {
1276 		DRM_DEBUG_KMS("empty memory.\n");
1277 		return -ENOMEM;
1278 	}
1279 
1280 	/* set current property in ippdrv */
1281 	ret = ipp_set_property(ippdrv, property);
1282 	if (ret) {
1283 		DRM_ERROR("failed to set property.\n");
1284 		ippdrv->c_node = NULL;
1285 		return ret;
1286 	}
1287 
1288 	/* check command */
1289 	switch (property->cmd) {
1290 	case IPP_CMD_M2M:
1291 		for_each_ipp_ops(i) {
1292 			/* source/destination memory list */
1293 			head = &c_node->mem_list[i];
1294 
1295 			m_node = list_first_entry(head,
1296 				struct drm_exynos_ipp_mem_node, list);
1297 			if (!m_node) {
1298 				DRM_ERROR("failed to get node.\n");
1299 				ret = -EFAULT;
1300 				return ret;
1301 			}
1302 
1303 			DRM_DEBUG_KMS("m_node[0x%x]\n", (int)m_node);
1304 
1305 			ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1306 			if (ret) {
1307 				DRM_ERROR("failed to set m node.\n");
1308 				return ret;
1309 			}
1310 		}
1311 		break;
1312 	case IPP_CMD_WB:
1313 		/* destination memory list */
1314 		head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
1315 
1316 		list_for_each_entry(m_node, head, list) {
1317 			ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1318 			if (ret) {
1319 				DRM_ERROR("failed to set m node.\n");
1320 				return ret;
1321 			}
1322 		}
1323 		break;
1324 	case IPP_CMD_OUTPUT:
1325 		/* source memory list */
1326 		head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1327 
1328 		list_for_each_entry(m_node, head, list) {
1329 			ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1330 			if (ret) {
1331 				DRM_ERROR("failed to set m node.\n");
1332 				return ret;
1333 			}
1334 		}
1335 		break;
1336 	default:
1337 		DRM_ERROR("invalid operations.\n");
1338 		return -EINVAL;
1339 	}
1340 
1341 	DRM_DEBUG_KMS("cmd[%d]\n", property->cmd);
1342 
1343 	/* start operations */
1344 	if (ippdrv->start) {
1345 		ret = ippdrv->start(ippdrv->dev, property->cmd);
1346 		if (ret) {
1347 			DRM_ERROR("failed to start ops.\n");
1348 			return ret;
1349 		}
1350 	}
1351 
1352 	return 0;
1353 }
1354 
1355 static int ipp_stop_property(struct drm_device *drm_dev,
1356 		struct exynos_drm_ippdrv *ippdrv,
1357 		struct drm_exynos_ipp_cmd_node *c_node)
1358 {
1359 	struct drm_exynos_ipp_mem_node *m_node, *tm_node;
1360 	struct drm_exynos_ipp_property *property = &c_node->property;
1361 	struct list_head *head;
1362 	int ret = 0, i;
1363 
1364 	DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
1365 
1366 	/* put event */
1367 	ipp_put_event(c_node, NULL);
1368 
1369 	/* check command */
1370 	switch (property->cmd) {
1371 	case IPP_CMD_M2M:
1372 		for_each_ipp_ops(i) {
1373 			/* source/destination memory list */
1374 			head = &c_node->mem_list[i];
1375 
1376 			if (list_empty(head)) {
1377 				DRM_DEBUG_KMS("mem_list is empty.\n");
1378 				break;
1379 			}
1380 
1381 			list_for_each_entry_safe(m_node, tm_node,
1382 				head, list) {
1383 				ret = ipp_put_mem_node(drm_dev, c_node,
1384 					m_node);
1385 				if (ret) {
1386 					DRM_ERROR("failed to put m_node.\n");
1387 					goto err_clear;
1388 				}
1389 			}
1390 		}
1391 		break;
1392 	case IPP_CMD_WB:
1393 		/* destination memory list */
1394 		head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
1395 
1396 		if (list_empty(head)) {
1397 			DRM_DEBUG_KMS("mem_list is empty.\n");
1398 			break;
1399 		}
1400 
1401 		list_for_each_entry_safe(m_node, tm_node, head, list) {
1402 			ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1403 			if (ret) {
1404 				DRM_ERROR("failed to put m_node.\n");
1405 				goto err_clear;
1406 			}
1407 		}
1408 		break;
1409 	case IPP_CMD_OUTPUT:
1410 		/* source memory list */
1411 		head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1412 
1413 		if (list_empty(head)) {
1414 			DRM_DEBUG_KMS("mem_list is empty.\n");
1415 			break;
1416 		}
1417 
1418 		list_for_each_entry_safe(m_node, tm_node, head, list) {
1419 			ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1420 			if (ret) {
1421 				DRM_ERROR("failed to put m_node.\n");
1422 				goto err_clear;
1423 			}
1424 		}
1425 		break;
1426 	default:
1427 		DRM_ERROR("invalid operations.\n");
1428 		ret = -EINVAL;
1429 		goto err_clear;
1430 	}
1431 
1432 err_clear:
1433 	/* stop operations */
1434 	if (ippdrv->stop)
1435 		ippdrv->stop(ippdrv->dev, property->cmd);
1436 
1437 	return ret;
1438 }
1439 
1440 void ipp_sched_cmd(struct work_struct *work)
1441 {
1442 	struct drm_exynos_ipp_cmd_work *cmd_work =
1443 		(struct drm_exynos_ipp_cmd_work *)work;
1444 	struct exynos_drm_ippdrv *ippdrv;
1445 	struct drm_exynos_ipp_cmd_node *c_node;
1446 	struct drm_exynos_ipp_property *property;
1447 	int ret;
1448 
1449 	ippdrv = cmd_work->ippdrv;
1450 	if (!ippdrv) {
1451 		DRM_ERROR("invalid ippdrv list.\n");
1452 		return;
1453 	}
1454 
1455 	c_node = cmd_work->c_node;
1456 	if (!c_node) {
1457 		DRM_ERROR("invalid command node list.\n");
1458 		return;
1459 	}
1460 
1461 	mutex_lock(&c_node->cmd_lock);
1462 
1463 	property = &c_node->property;
1464 
1465 	switch (cmd_work->ctrl) {
1466 	case IPP_CTRL_PLAY:
1467 	case IPP_CTRL_RESUME:
1468 		ret = ipp_start_property(ippdrv, c_node);
1469 		if (ret) {
1470 			DRM_ERROR("failed to start property:prop_id[%d]\n",
1471 				c_node->property.prop_id);
1472 			goto err_unlock;
1473 		}
1474 
1475 		/*
1476 		 * M2M case supports wait_completion of transfer.
1477 		 * because M2M case supports single unit operation
1478 		 * with multiple queue.
1479 		 * M2M need to wait completion of data transfer.
1480 		 */
1481 		if (ipp_is_m2m_cmd(property->cmd)) {
1482 			if (!wait_for_completion_timeout
1483 			    (&c_node->start_complete, msecs_to_jiffies(200))) {
1484 				DRM_ERROR("timeout event:prop_id[%d]\n",
1485 					c_node->property.prop_id);
1486 				goto err_unlock;
1487 			}
1488 		}
1489 		break;
1490 	case IPP_CTRL_STOP:
1491 	case IPP_CTRL_PAUSE:
1492 		ret = ipp_stop_property(ippdrv->drm_dev, ippdrv,
1493 			c_node);
1494 		if (ret) {
1495 			DRM_ERROR("failed to stop property.\n");
1496 			goto err_unlock;
1497 		}
1498 
1499 		complete(&c_node->stop_complete);
1500 		break;
1501 	default:
1502 		DRM_ERROR("unknown control type\n");
1503 		break;
1504 	}
1505 
1506 	DRM_DEBUG_KMS("ctrl[%d] done.\n", cmd_work->ctrl);
1507 
1508 err_unlock:
1509 	mutex_unlock(&c_node->cmd_lock);
1510 }
1511 
1512 static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
1513 		struct drm_exynos_ipp_cmd_node *c_node, int *buf_id)
1514 {
1515 	struct drm_device *drm_dev = ippdrv->drm_dev;
1516 	struct drm_exynos_ipp_property *property = &c_node->property;
1517 	struct drm_exynos_ipp_mem_node *m_node;
1518 	struct drm_exynos_ipp_queue_buf qbuf;
1519 	struct drm_exynos_ipp_send_event *e;
1520 	struct list_head *head;
1521 	struct timeval now;
1522 	unsigned long flags;
1523 	u32 tbuf_id[EXYNOS_DRM_OPS_MAX] = {0, };
1524 	int ret, i;
1525 
1526 	for_each_ipp_ops(i)
1527 		DRM_DEBUG_KMS("%s buf_id[%d]\n", i ? "dst" : "src", buf_id[i]);
1528 
1529 	if (!drm_dev) {
1530 		DRM_ERROR("failed to get drm_dev.\n");
1531 		return -EINVAL;
1532 	}
1533 
1534 	if (!property) {
1535 		DRM_ERROR("failed to get property.\n");
1536 		return -EINVAL;
1537 	}
1538 
1539 	if (list_empty(&c_node->event_list)) {
1540 		DRM_DEBUG_KMS("event list is empty.\n");
1541 		return 0;
1542 	}
1543 
1544 	if (!ipp_check_mem_list(c_node)) {
1545 		DRM_DEBUG_KMS("empty memory.\n");
1546 		return 0;
1547 	}
1548 
1549 	/* check command */
1550 	switch (property->cmd) {
1551 	case IPP_CMD_M2M:
1552 		for_each_ipp_ops(i) {
1553 			/* source/destination memory list */
1554 			head = &c_node->mem_list[i];
1555 
1556 			m_node = list_first_entry(head,
1557 				struct drm_exynos_ipp_mem_node, list);
1558 			if (!m_node) {
1559 				DRM_ERROR("empty memory node.\n");
1560 				return -ENOMEM;
1561 			}
1562 
1563 			tbuf_id[i] = m_node->buf_id;
1564 			DRM_DEBUG_KMS("%s buf_id[%d]\n",
1565 				i ? "dst" : "src", tbuf_id[i]);
1566 
1567 			ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1568 			if (ret)
1569 				DRM_ERROR("failed to put m_node.\n");
1570 		}
1571 		break;
1572 	case IPP_CMD_WB:
1573 		/* clear buf for finding */
1574 		memset(&qbuf, 0x0, sizeof(qbuf));
1575 		qbuf.ops_id = EXYNOS_DRM_OPS_DST;
1576 		qbuf.buf_id = buf_id[EXYNOS_DRM_OPS_DST];
1577 
1578 		/* get memory node entry */
1579 		m_node = ipp_find_mem_node(c_node, &qbuf);
1580 		if (!m_node) {
1581 			DRM_ERROR("empty memory node.\n");
1582 			return -ENOMEM;
1583 		}
1584 
1585 		tbuf_id[EXYNOS_DRM_OPS_DST] = m_node->buf_id;
1586 
1587 		ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1588 		if (ret)
1589 			DRM_ERROR("failed to put m_node.\n");
1590 		break;
1591 	case IPP_CMD_OUTPUT:
1592 		/* source memory list */
1593 		head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1594 
1595 		m_node = list_first_entry(head,
1596 			struct drm_exynos_ipp_mem_node, list);
1597 		if (!m_node) {
1598 			DRM_ERROR("empty memory node.\n");
1599 			return -ENOMEM;
1600 		}
1601 
1602 		tbuf_id[EXYNOS_DRM_OPS_SRC] = m_node->buf_id;
1603 
1604 		ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1605 		if (ret)
1606 			DRM_ERROR("failed to put m_node.\n");
1607 		break;
1608 	default:
1609 		DRM_ERROR("invalid operations.\n");
1610 		return -EINVAL;
1611 	}
1612 
1613 	if (tbuf_id[EXYNOS_DRM_OPS_DST] != buf_id[EXYNOS_DRM_OPS_DST])
1614 		DRM_ERROR("failed to match buf_id[%d %d]prop_id[%d]\n",
1615 			tbuf_id[1], buf_id[1], property->prop_id);
1616 
1617 	/*
1618 	 * command node have event list of destination buffer
1619 	 * If destination buffer enqueue to mem list,
1620 	 * then we make event and link to event list tail.
1621 	 * so, we get first event for first enqueued buffer.
1622 	 */
1623 	e = list_first_entry(&c_node->event_list,
1624 		struct drm_exynos_ipp_send_event, base.link);
1625 
1626 	if (!e) {
1627 		DRM_ERROR("empty event.\n");
1628 		return -EINVAL;
1629 	}
1630 
1631 	do_gettimeofday(&now);
1632 	DRM_DEBUG_KMS("tv_sec[%ld]tv_usec[%ld]\n", now.tv_sec, now.tv_usec);
1633 	e->event.tv_sec = now.tv_sec;
1634 	e->event.tv_usec = now.tv_usec;
1635 	e->event.prop_id = property->prop_id;
1636 
1637 	/* set buffer id about source destination */
1638 	for_each_ipp_ops(i)
1639 		e->event.buf_id[i] = tbuf_id[i];
1640 
1641 	spin_lock_irqsave(&drm_dev->event_lock, flags);
1642 	list_move_tail(&e->base.link, &e->base.file_priv->event_list);
1643 	wake_up_interruptible(&e->base.file_priv->event_wait);
1644 	spin_unlock_irqrestore(&drm_dev->event_lock, flags);
1645 
1646 	DRM_DEBUG_KMS("done cmd[%d]prop_id[%d]buf_id[%d]\n",
1647 		property->cmd, property->prop_id, tbuf_id[EXYNOS_DRM_OPS_DST]);
1648 
1649 	return 0;
1650 }
1651 
1652 void ipp_sched_event(struct work_struct *work)
1653 {
1654 	struct drm_exynos_ipp_event_work *event_work =
1655 		(struct drm_exynos_ipp_event_work *)work;
1656 	struct exynos_drm_ippdrv *ippdrv;
1657 	struct drm_exynos_ipp_cmd_node *c_node;
1658 	int ret;
1659 
1660 	if (!event_work) {
1661 		DRM_ERROR("failed to get event_work.\n");
1662 		return;
1663 	}
1664 
1665 	DRM_DEBUG_KMS("buf_id[%d]\n", event_work->buf_id[EXYNOS_DRM_OPS_DST]);
1666 
1667 	ippdrv = event_work->ippdrv;
1668 	if (!ippdrv) {
1669 		DRM_ERROR("failed to get ipp driver.\n");
1670 		return;
1671 	}
1672 
1673 	c_node = ippdrv->c_node;
1674 	if (!c_node) {
1675 		DRM_ERROR("failed to get command node.\n");
1676 		return;
1677 	}
1678 
1679 	/*
1680 	 * IPP supports command thread, event thread synchronization.
1681 	 * If IPP close immediately from user land, then IPP make
1682 	 * synchronization with command thread, so make complete event.
1683 	 * or going out operations.
1684 	 */
1685 	if (c_node->state != IPP_STATE_START) {
1686 		DRM_DEBUG_KMS("bypass state[%d]prop_id[%d]\n",
1687 			c_node->state, c_node->property.prop_id);
1688 		goto err_completion;
1689 	}
1690 
1691 	mutex_lock(&c_node->event_lock);
1692 
1693 	ret = ipp_send_event(ippdrv, c_node, event_work->buf_id);
1694 	if (ret) {
1695 		DRM_ERROR("failed to send event.\n");
1696 		goto err_completion;
1697 	}
1698 
1699 err_completion:
1700 	if (ipp_is_m2m_cmd(c_node->property.cmd))
1701 		complete(&c_node->start_complete);
1702 
1703 	mutex_unlock(&c_node->event_lock);
1704 }
1705 
1706 static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
1707 {
1708 	struct ipp_context *ctx = get_ipp_context(dev);
1709 	struct exynos_drm_ippdrv *ippdrv;
1710 	int ret, count = 0;
1711 
1712 	/* get ipp driver entry */
1713 	list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1714 		ippdrv->drm_dev = drm_dev;
1715 
1716 		ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv,
1717 			&ippdrv->ipp_id);
1718 		if (ret) {
1719 			DRM_ERROR("failed to create id.\n");
1720 			goto err_idr;
1721 		}
1722 
1723 		DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]ipp_id[%d]\n",
1724 			count++, (int)ippdrv, ippdrv->ipp_id);
1725 
1726 		if (ippdrv->ipp_id == 0) {
1727 			DRM_ERROR("failed to get ipp_id[%d]\n",
1728 				ippdrv->ipp_id);
1729 			goto err_idr;
1730 		}
1731 
1732 		/* store parent device for node */
1733 		ippdrv->parent_dev = dev;
1734 
1735 		/* store event work queue and handler */
1736 		ippdrv->event_workq = ctx->event_workq;
1737 		ippdrv->sched_event = ipp_sched_event;
1738 		INIT_LIST_HEAD(&ippdrv->cmd_list);
1739 
1740 		if (is_drm_iommu_supported(drm_dev)) {
1741 			ret = drm_iommu_attach_device(drm_dev, ippdrv->dev);
1742 			if (ret) {
1743 				DRM_ERROR("failed to activate iommu\n");
1744 				goto err_iommu;
1745 			}
1746 		}
1747 	}
1748 
1749 	return 0;
1750 
1751 err_iommu:
1752 	/* get ipp driver entry */
1753 	list_for_each_entry_reverse(ippdrv, &exynos_drm_ippdrv_list, drv_list)
1754 		if (is_drm_iommu_supported(drm_dev))
1755 			drm_iommu_detach_device(drm_dev, ippdrv->dev);
1756 
1757 err_idr:
1758 	idr_destroy(&ctx->ipp_idr);
1759 	idr_destroy(&ctx->prop_idr);
1760 	return ret;
1761 }
1762 
1763 static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
1764 {
1765 	struct exynos_drm_ippdrv *ippdrv;
1766 
1767 	/* get ipp driver entry */
1768 	list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1769 		if (is_drm_iommu_supported(drm_dev))
1770 			drm_iommu_detach_device(drm_dev, ippdrv->dev);
1771 
1772 		ippdrv->drm_dev = NULL;
1773 		exynos_drm_ippdrv_unregister(ippdrv);
1774 	}
1775 }
1776 
1777 static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
1778 		struct drm_file *file)
1779 {
1780 	struct drm_exynos_file_private *file_priv = file->driver_priv;
1781 	struct exynos_drm_ipp_private *priv;
1782 
1783 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1784 	if (!priv) {
1785 		DRM_ERROR("failed to allocate priv.\n");
1786 		return -ENOMEM;
1787 	}
1788 	priv->dev = dev;
1789 	file_priv->ipp_priv = priv;
1790 
1791 	INIT_LIST_HEAD(&priv->event_list);
1792 
1793 	DRM_DEBUG_KMS("done priv[0x%x]\n", (int)priv);
1794 
1795 	return 0;
1796 }
1797 
1798 static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
1799 		struct drm_file *file)
1800 {
1801 	struct drm_exynos_file_private *file_priv = file->driver_priv;
1802 	struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
1803 	struct exynos_drm_ippdrv *ippdrv = NULL;
1804 	struct drm_exynos_ipp_cmd_node *c_node, *tc_node;
1805 	int count = 0;
1806 
1807 	DRM_DEBUG_KMS("for priv[0x%x]\n", (int)priv);
1808 
1809 	if (list_empty(&exynos_drm_ippdrv_list)) {
1810 		DRM_DEBUG_KMS("ippdrv_list is empty.\n");
1811 		goto err_clear;
1812 	}
1813 
1814 	list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1815 		if (list_empty(&ippdrv->cmd_list))
1816 			continue;
1817 
1818 		list_for_each_entry_safe(c_node, tc_node,
1819 			&ippdrv->cmd_list, list) {
1820 			DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n",
1821 				count++, (int)ippdrv);
1822 
1823 			if (c_node->priv == priv) {
1824 				/*
1825 				 * userland goto unnormal state. process killed.
1826 				 * and close the file.
1827 				 * so, IPP didn't called stop cmd ctrl.
1828 				 * so, we are make stop operation in this state.
1829 				 */
1830 				if (c_node->state == IPP_STATE_START) {
1831 					ipp_stop_property(drm_dev, ippdrv,
1832 						c_node);
1833 					c_node->state = IPP_STATE_STOP;
1834 				}
1835 
1836 				ippdrv->dedicated = false;
1837 				ipp_clean_cmd_node(c_node);
1838 				if (list_empty(&ippdrv->cmd_list))
1839 					pm_runtime_put_sync(ippdrv->dev);
1840 			}
1841 		}
1842 	}
1843 
1844 err_clear:
1845 	kfree(priv);
1846 	return;
1847 }
1848 
1849 static int ipp_probe(struct platform_device *pdev)
1850 {
1851 	struct device *dev = &pdev->dev;
1852 	struct ipp_context *ctx;
1853 	struct exynos_drm_subdrv *subdrv;
1854 	int ret;
1855 
1856 	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
1857 	if (!ctx)
1858 		return -ENOMEM;
1859 
1860 	mutex_init(&ctx->ipp_lock);
1861 	mutex_init(&ctx->prop_lock);
1862 
1863 	idr_init(&ctx->ipp_idr);
1864 	idr_init(&ctx->prop_idr);
1865 
1866 	/*
1867 	 * create single thread for ipp event
1868 	 * IPP supports event thread for IPP drivers.
1869 	 * IPP driver send event_work to this thread.
1870 	 * and IPP event thread send event to user process.
1871 	 */
1872 	ctx->event_workq = create_singlethread_workqueue("ipp_event");
1873 	if (!ctx->event_workq) {
1874 		dev_err(dev, "failed to create event workqueue\n");
1875 		return -EINVAL;
1876 	}
1877 
1878 	/*
1879 	 * create single thread for ipp command
1880 	 * IPP supports command thread for user process.
1881 	 * user process make command node using set property ioctl.
1882 	 * and make start_work and send this work to command thread.
1883 	 * and then this command thread start property.
1884 	 */
1885 	ctx->cmd_workq = create_singlethread_workqueue("ipp_cmd");
1886 	if (!ctx->cmd_workq) {
1887 		dev_err(dev, "failed to create cmd workqueue\n");
1888 		ret = -EINVAL;
1889 		goto err_event_workq;
1890 	}
1891 
1892 	/* set sub driver informations */
1893 	subdrv = &ctx->subdrv;
1894 	subdrv->dev = dev;
1895 	subdrv->probe = ipp_subdrv_probe;
1896 	subdrv->remove = ipp_subdrv_remove;
1897 	subdrv->open = ipp_subdrv_open;
1898 	subdrv->close = ipp_subdrv_close;
1899 
1900 	platform_set_drvdata(pdev, ctx);
1901 
1902 	ret = exynos_drm_subdrv_register(subdrv);
1903 	if (ret < 0) {
1904 		DRM_ERROR("failed to register drm ipp device.\n");
1905 		goto err_cmd_workq;
1906 	}
1907 
1908 	dev_info(dev, "drm ipp registered successfully.\n");
1909 
1910 	return 0;
1911 
1912 err_cmd_workq:
1913 	destroy_workqueue(ctx->cmd_workq);
1914 err_event_workq:
1915 	destroy_workqueue(ctx->event_workq);
1916 	return ret;
1917 }
1918 
1919 static int ipp_remove(struct platform_device *pdev)
1920 {
1921 	struct ipp_context *ctx = platform_get_drvdata(pdev);
1922 
1923 	/* unregister sub driver */
1924 	exynos_drm_subdrv_unregister(&ctx->subdrv);
1925 
1926 	/* remove,destroy ipp idr */
1927 	idr_destroy(&ctx->ipp_idr);
1928 	idr_destroy(&ctx->prop_idr);
1929 
1930 	mutex_destroy(&ctx->ipp_lock);
1931 	mutex_destroy(&ctx->prop_lock);
1932 
1933 	/* destroy command, event work queue */
1934 	destroy_workqueue(ctx->cmd_workq);
1935 	destroy_workqueue(ctx->event_workq);
1936 
1937 	return 0;
1938 }
1939 
1940 static int ipp_power_ctrl(struct ipp_context *ctx, bool enable)
1941 {
1942 	DRM_DEBUG_KMS("enable[%d]\n", enable);
1943 
1944 	return 0;
1945 }
1946 
1947 #ifdef CONFIG_PM_SLEEP
1948 static int ipp_suspend(struct device *dev)
1949 {
1950 	struct ipp_context *ctx = get_ipp_context(dev);
1951 
1952 	if (pm_runtime_suspended(dev))
1953 		return 0;
1954 
1955 	return ipp_power_ctrl(ctx, false);
1956 }
1957 
1958 static int ipp_resume(struct device *dev)
1959 {
1960 	struct ipp_context *ctx = get_ipp_context(dev);
1961 
1962 	if (!pm_runtime_suspended(dev))
1963 		return ipp_power_ctrl(ctx, true);
1964 
1965 	return 0;
1966 }
1967 #endif
1968 
1969 #ifdef CONFIG_PM_RUNTIME
1970 static int ipp_runtime_suspend(struct device *dev)
1971 {
1972 	struct ipp_context *ctx = get_ipp_context(dev);
1973 
1974 	return ipp_power_ctrl(ctx, false);
1975 }
1976 
1977 static int ipp_runtime_resume(struct device *dev)
1978 {
1979 	struct ipp_context *ctx = get_ipp_context(dev);
1980 
1981 	return ipp_power_ctrl(ctx, true);
1982 }
1983 #endif
1984 
1985 static const struct dev_pm_ops ipp_pm_ops = {
1986 	SET_SYSTEM_SLEEP_PM_OPS(ipp_suspend, ipp_resume)
1987 	SET_RUNTIME_PM_OPS(ipp_runtime_suspend, ipp_runtime_resume, NULL)
1988 };
1989 
1990 struct platform_driver ipp_driver = {
1991 	.probe		= ipp_probe,
1992 	.remove		= ipp_remove,
1993 	.driver		= {
1994 		.name	= "exynos-drm-ipp",
1995 		.owner	= THIS_MODULE,
1996 		.pm	= &ipp_pm_ops,
1997 	},
1998 };
1999 
2000